diff --git a/kvdo.spec b/kvdo.spec index 9b29ecdd..046adf14 100644 --- a/kvdo.spec +++ b/kvdo.spec @@ -1,6 +1,6 @@ %define spec_release 1 %define kmod_name kvdo -%define kmod_driver_version 6.2.0.219 +%define kmod_driver_version 6.2.0.239 %define kmod_rpm_release %{spec_release} %define kmod_kernel_version 3.10.0-693.el7 @@ -85,5 +85,5 @@ rm -rf $RPM_BUILD_ROOT %{_usr}/src/%{kmod_name}-%{version}-%{kmod_driver_version}/* %changelog -* Fri Sep 14 2018 - J. corwin Coburn - 6.2.0.219-1 -HASH(0x1b30ad8) \ No newline at end of file +* Fri Oct 05 2018 - J. corwin Coburn - 6.2.0.239-1 +HASH(0x1b8ead8) \ No newline at end of file diff --git a/vdo/Makefile b/vdo/Makefile index f14882e0..9e1cd3e7 100644 --- a/vdo/Makefile +++ b/vdo/Makefile @@ -1,4 +1,4 @@ -VDO_VERSION = 6.2.0.219 +VDO_VERSION = 6.2.0.239 VDO_VERSION_MAJOR = $(word 1,$(subst ., ,$(VDO_VERSION))) VDO_VERSION_MINOR = $(word 2,$(subst ., ,$(VDO_VERSION))) diff --git a/vdo/base/extent.c b/vdo/base/extent.c index a412f1c2..d26fdfe3 100644 --- a/vdo/base/extent.c +++ b/vdo/base/extent.c @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/extent.c#1 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/extent.c#2 $ */ #include "extent.h" @@ -93,16 +93,22 @@ void freeExtent(VDOExtent **extentPtr) * @param extent The extent * @param startBlock The absolute physical block at which the extent should * begin its I/O + * @param count The number of blocks to write * @param operation The operation to perform on the extent **/ static void launchMetadataExtent(VDOExtent *extent, PhysicalBlockNumber startBlock, + BlockCount count, VIOOperation operation) { resetCompletion(&extent->completion); - extent->completeCount = 0; - BlockCount vioCount = extent->count; - for (BlockCount i = 0; i < vioCount; i++) { + if (count > extent->count) { + finishCompletion(&extent->completion, VDO_OUT_OF_RANGE); + return; + } + + extent->completeCount = extent->count - count; + for (BlockCount i = 0; i < count; i++) { VIO *vio = extent->vios[i]; vio->completion.callbackThreadID = extent->completion.callbackThreadID; launchMetadataVIO(vio, startBlock++, handleVIOCompletion, @@ -111,15 +117,19 @@ static void launchMetadataExtent(VDOExtent *extent, } /**********************************************************************/ -void readMetadataExtent(VDOExtent *extent, PhysicalBlockNumber startBlock) +void readPartialMetadataExtent(VDOExtent *extent, + PhysicalBlockNumber startBlock, + BlockCount count) { - launchMetadataExtent(extent, startBlock, VIO_READ); + launchMetadataExtent(extent, startBlock, count, VIO_READ); } /**********************************************************************/ -void writeMetadataExtent(VDOExtent *extent, PhysicalBlockNumber startBlock) +void writePartialMetadataExtent(VDOExtent *extent, + PhysicalBlockNumber startBlock, + BlockCount count) { - launchMetadataExtent(extent, startBlock, VIO_WRITE); + launchMetadataExtent(extent, startBlock, count, VIO_WRITE); } /**********************************************************************/ diff --git a/vdo/base/extent.h b/vdo/base/extent.h index 688fa25d..a0c4a6f3 100644 --- a/vdo/base/extent.h +++ b/vdo/base/extent.h @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/extent.h#1 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/extent.h#2 $ */ #ifndef EXTENT_H @@ -105,8 +105,25 @@ void freeExtent(VDOExtent **extentPtr); * @param extent The extent to read * @param startBlock The physical block number of the first block * in the extent + * @param count The number of blocks to read (must be less than or + * equal to the length of the extent) **/ -void readMetadataExtent(VDOExtent *extent, PhysicalBlockNumber startBlock); +void readPartialMetadataExtent(VDOExtent *extent, + PhysicalBlockNumber startBlock, + BlockCount count); + +/** + * Read metadata from the underlying storage. + * + * @param extent The extent to read + * @param startBlock The physical block number of the first block + * in the extent + **/ +static inline void readMetadataExtent(VDOExtent *extent, + PhysicalBlockNumber startBlock) +{ + readPartialMetadataExtent(extent, startBlock, extent->count); +} /** * Write metadata to the underlying storage. @@ -114,8 +131,24 @@ void readMetadataExtent(VDOExtent *extent, PhysicalBlockNumber startBlock); * @param extent The extent to write * @param startBlock The physical block number of the first block in the * extent + * @param count The number of blocks to read (must be less than or + * equal to the length of the extent) **/ -void writeMetadataExtent(VDOExtent *extent, PhysicalBlockNumber startBlock); +void writePartialMetadataExtent(VDOExtent *extent, + PhysicalBlockNumber startBlock, + BlockCount count); +/** + * Write metadata to the underlying storage. + * + * @param extent The extent to write + * @param startBlock The physical block number of the first block in the + * extent + **/ +static inline void writeMetadataExtent(VDOExtent *extent, + PhysicalBlockNumber startBlock) +{ + writePartialMetadataExtent(extent, startBlock, extent->count); +} /** * Notify an extent that one of its VIOs has completed. If the signaling VIO diff --git a/vdo/base/partitionCopy.c b/vdo/base/partitionCopy.c index bf2430f3..94ad5c52 100644 --- a/vdo/base/partitionCopy.c +++ b/vdo/base/partitionCopy.c @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/partitionCopy.c#1 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/partitionCopy.c#2 $ */ #include "partitionCopy.h" @@ -26,6 +26,7 @@ #include "completion.h" #include "constants.h" #include "extent.h" +#include "numUtils.h" enum { STRIDE_LENGTH = 2048 @@ -67,27 +68,65 @@ CopyCompletion *asCopyCompletion(VDOCompletion *completion) return (CopyCompletion *) completion; } -/** - * Free the copy completion now that we've finished copying. - * - * @param completion The copy completion - **/ -static void finishCopy(VDOCompletion *completion) +/**********************************************************************/ +int makeCopyCompletion(PhysicalLayer *layer, VDOCompletion **completionPtr) { - VDOCompletion *parent = completion->parent; - int result = completion->result; + CopyCompletion *copy; + int result = ALLOCATE(1, CopyCompletion, __func__, ©); + if (result != VDO_SUCCESS) { + return result; + } + initializeCompletion(©->completion, PARTITION_COPY_COMPLETION, layer); - CopyCompletion *copy = asCopyCompletion(completion); + result = ALLOCATE((VDO_BLOCK_SIZE * STRIDE_LENGTH), char, + "partition copy extent", ©->data); + if (result != VDO_SUCCESS) { + VDOCompletion *completion = ©->completion; + freeCopyCompletion(&completion); + return result; + } + + result = createExtent(layer, VIO_TYPE_PARTITION_COPY, VIO_PRIORITY_HIGH, + STRIDE_LENGTH, copy->data, ©->extent); + if (result != VDO_SUCCESS) { + VDOCompletion *completion = ©->completion; + freeCopyCompletion(&completion); + return result; + } + + *completionPtr = ©->completion; + return VDO_SUCCESS; +} + +/**********************************************************************/ +void freeCopyCompletion(VDOCompletion **completionPtr) +{ + if (*completionPtr == NULL) { + return; + } + + CopyCompletion *copy = asCopyCompletion(*completionPtr); freeExtent(©->extent); FREE(copy->data); FREE(copy); - - finishCompletion(parent, result); + *completionPtr = NULL; } /**********************************************************************/ static void copyPartitionStride(CopyCompletion *copy); +/** + * Determine the number of blocks to copy in the current stride. + * + * @param copy The copy completion + * + * @return The number of blocks to copy in the current stride + **/ +static inline BlockCount getStrideSize(CopyCompletion *copy) +{ + return minBlockCount(STRIDE_LENGTH, copy->endingIndex - copy->currentIndex); +} + /** * Process a completed write during a partition copy. * @@ -96,7 +135,7 @@ static void copyPartitionStride(CopyCompletion *copy); static void completeWriteForCopy(VDOCompletion *completion) { CopyCompletion *copy = asCopyCompletion(completion->parent); - copy->currentIndex += copy->extent->count; + copy->currentIndex += getStrideSize(copy); if (copy->currentIndex >= copy->endingIndex) { // We're done. finishCompletion(completion->parent, VDO_SUCCESS); @@ -123,7 +162,8 @@ static void completeReadForCopy(VDOCompletion *completion) } completion->callback = completeWriteForCopy; - writeMetadataExtent(asVDOExtent(completion), layerStartBlock); + writePartialMetadataExtent(asVDOExtent(completion), layerStartBlock, + getStrideSize(copy)); } /** @@ -133,21 +173,6 @@ static void completeReadForCopy(VDOCompletion *completion) **/ static void copyPartitionStride(CopyCompletion *copy) { - PhysicalBlockNumber blocksRemaining - = (copy->endingIndex - copy->currentIndex); - if (blocksRemaining < STRIDE_LENGTH) { - // There is less than a whole stride left to copy, so the extent must be - // resized down to the remaining length. - freeExtent(©->extent); - int result = createExtent(copy->completion.layer, VIO_TYPE_PARTITION_COPY, - VIO_PRIORITY_HIGH, blocksRemaining, copy->data, - ©->extent); - if (result != VDO_SUCCESS) { - finishCompletion(©->completion, result); - return; - } - } - PhysicalBlockNumber layerStartBlock; int result = translateToPBN(copy->source, copy->currentIndex, &layerStartBlock); @@ -159,48 +184,8 @@ static void copyPartitionStride(CopyCompletion *copy) prepareCompletion(©->extent->completion, completeReadForCopy, finishParentCallback, copy->completion.callbackThreadID, ©->completion); - readMetadataExtent(copy->extent, layerStartBlock); -} - -/** - * Initialize a copy completion. - * - * @param layer The layer in question - * @param source The partition to copy from - * @param target The partition to copy to - * @param parent The parent to finish when the copy is complete - * @param copy The copy completion to initialize - * - * @return VDO_SUCCESS or an error - **/ -__attribute__((warn_unused_result)) -static int initializeCopyCompletion(PhysicalLayer *layer, - Partition *source, - Partition *target, - VDOCompletion *parent, - CopyCompletion *copy) -{ - initializeCompletion(©->completion, PARTITION_COPY_COMPLETION, layer); - prepareCompletion(©->completion, finishCopy, finishCopy, - parent->callbackThreadID, parent); - - int result = ALLOCATE((VDO_BLOCK_SIZE * STRIDE_LENGTH), char, - "partition copy extent", ©->data); - if (result != VDO_SUCCESS) { - return result; - } - - result = createExtent(layer, VIO_TYPE_PARTITION_COPY, VIO_PRIORITY_HIGH, - STRIDE_LENGTH, copy->data, ©->extent); - if (result != VDO_SUCCESS) { - return result; - } - - copy->source = source; - copy->target = target; - copy->currentIndex = 0; - copy->endingIndex = getFixedLayoutPartitionSize(source); - return VDO_SUCCESS; + readPartialMetadataExtent(copy->extent, layerStartBlock, + getStrideSize(copy)); } /** @@ -233,7 +218,7 @@ static int validatePartitionCopy(Partition *source, Partition *target) } /**********************************************************************/ -void copyPartitionAsync(PhysicalLayer *layer, +void copyPartitionAsync(VDOCompletion *completion, Partition *source, Partition *target, VDOCompletion *parent) @@ -244,18 +229,11 @@ void copyPartitionAsync(PhysicalLayer *layer, return; } - CopyCompletion *copy; - result = ALLOCATE(1, CopyCompletion, __func__, ©); - if (result != VDO_SUCCESS) { - finishCompletion(parent, result); - return; - } - - result = initializeCopyCompletion(layer, source, target, parent, copy); - if (result != VDO_SUCCESS) { - finishCompletion(©->completion, result); - return; - } - + CopyCompletion *copy = asCopyCompletion(completion); + prepareToFinishParent(©->completion, parent); + copy->source = source; + copy->target = target; + copy->currentIndex = 0; + copy->endingIndex = getFixedLayoutPartitionSize(source); copyPartitionStride(copy); } diff --git a/vdo/base/partitionCopy.h b/vdo/base/partitionCopy.h index 36f87ec7..5f62303d 100644 --- a/vdo/base/partitionCopy.h +++ b/vdo/base/partitionCopy.h @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/partitionCopy.h#1 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/partitionCopy.h#2 $ */ #ifndef PARTITION_COPY_H @@ -26,15 +26,33 @@ #include "physicalLayer.h" #include "types.h" +/** + * Make a copy completion. + * + * @param [in] layer The layer on which the partitions reside + * @param [out] completionPtr A pointer to hold the copy completion + * + * @return VDO_SUCCESS or an error + **/ +int makeCopyCompletion(PhysicalLayer *layer, VDOCompletion **completionPtr) + __attribute__((warn_unused_result)); + +/** + * Free a copy completion and NULL out the reference to it. + * + * @param completionPtr A pointer to the complete to be freed + **/ +void freeCopyCompletion(VDOCompletion **completionPtr); + /** * Copy a partition. * - * @param layer The layer in question + * @param completion The copy completion to use * @param source The partition to copy from * @param target The partition to copy to * @param parent The parent to finish when the copy is complete **/ -void copyPartitionAsync(PhysicalLayer *layer, +void copyPartitionAsync(VDOCompletion *completion, Partition *source, Partition *target, VDOCompletion *parent); diff --git a/vdo/base/recoveryJournal.c b/vdo/base/recoveryJournal.c index 1fc7e98f..1dc75165 100644 --- a/vdo/base/recoveryJournal.c +++ b/vdo/base/recoveryJournal.c @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/recoveryJournal.c#12 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/recoveryJournal.c#13 $ */ #include "recoveryJournal.h" @@ -435,8 +435,6 @@ int makeRecoveryJournal(Nonce nonce, return result; } - setActiveBlock(journal); - result = ALLOCATE(VDO_BLOCK_SIZE, char, "journal flush data", &journal->unusedFlushVIOData); if (result != VDO_SUCCESS) { @@ -452,6 +450,7 @@ int makeRecoveryJournal(Nonce nonce, return result; } + setActiveBlock(journal); journal->flushVIO->completion.callbackThreadID = journal->threadID; } diff --git a/vdo/base/slabDepot.c b/vdo/base/slabDepot.c index 8debc919..8269176b 100644 --- a/vdo/base/slabDepot.c +++ b/vdo/base/slabDepot.c @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/slabDepot.c#4 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/slabDepot.c#5 $ */ #include "slabDepot.h" @@ -1046,8 +1046,6 @@ void updateSlabDepotSize(SlabDepot *depot, bool reverting) /**********************************************************************/ int prepareToGrowSlabDepot(SlabDepot *depot, BlockCount newSize) { - abandonNewSlabs(depot); - if ((newSize >> depot->slabSizeShift) <= depot->slabCount) { return VDO_INCREMENT_TOO_SMALL; } @@ -1067,7 +1065,12 @@ int prepareToGrowSlabDepot(SlabDepot *depot, BlockCount newSize) return logErrorWithStringError(VDO_INCREMENT_TOO_SMALL, "Depot can only grow"); } + if (newSlabCount == depot->newSlabCount) { + // Check it out, we've already got all the new slabs allocated! + return VDO_SUCCESS; + } + abandonNewSlabs(depot); result = allocateSlabs(depot, depot->completion.layer, newSlabCount); if (result != VDO_SUCCESS) { abandonNewSlabs(depot); diff --git a/vdo/base/threadData.c b/vdo/base/threadData.c index 64d5c124..2ac65eee 100644 --- a/vdo/base/threadData.c +++ b/vdo/base/threadData.c @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/threadData.c#1 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/threadData.c#2 $ */ #include "threadData.h" @@ -68,11 +68,11 @@ static ThreadData *asThreadData(VDOCompletion *completion) } /**********************************************************************/ -int initializeThreadData(ThreadData *threadData, - ThreadID threadID, - bool isReadOnly, - const ThreadConfig *threadConfig, - PhysicalLayer *layer) +static int initializeThreadData(ThreadData *threadData, + ThreadID threadID, + bool isReadOnly, + const ThreadConfig *threadConfig, + PhysicalLayer *layer) { threadData->threadID = threadID; threadData->isReadOnly = isReadOnly; @@ -85,9 +85,45 @@ int initializeThreadData(ThreadData *threadData, } /**********************************************************************/ -void uninitializeThreadData(ThreadData *threadData) +int makeThreadDataArray(bool isReadOnly, + const ThreadConfig *threadConfig, + PhysicalLayer *layer, + ThreadData **threadsPtr) { - destroyEnqueueable(&threadData->completion); + ThreadData *threads; + int result = ALLOCATE(threadConfig->baseThreadCount, ThreadData, __func__, + &threads); + if (result != VDO_SUCCESS) { + return result; + } + + for (ThreadCount id = 0; id < threadConfig->baseThreadCount; id++) { + result = initializeThreadData(&threads[id], id, isReadOnly, + threadConfig, layer); + if (result != VDO_SUCCESS) { + freeThreadDataArray(&threads, id); + return result; + } + } + + *threadsPtr = threads; + return VDO_SUCCESS; +} + +/**********************************************************************/ +void freeThreadDataArray(ThreadData **threadsPtr, ThreadCount count) +{ + ThreadData *threads = *threadsPtr; + if (threads == NULL) { + return; + } + + for (ThreadCount id = 0; id < count; id++) { + destroyEnqueueable(&threads[id].completion); + } + + FREE(threads); + *threadsPtr = NULL; } /**********************************************************************/ diff --git a/vdo/base/threadData.h b/vdo/base/threadData.h index cbd2f9e1..e70d30f8 100644 --- a/vdo/base/threadData.h +++ b/vdo/base/threadData.h @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/threadData.h#1 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/threadData.h#2 $ */ #ifndef THREAD_DATA_H @@ -61,30 +61,31 @@ struct threadData { }; /** - * Initialize the data for a thread. + * Create and initialize an array of ThreadData structures for all the base + * threads in the VDO. * - * @param threadData The data to initialize - * @param threadID The ID of the thread this data is for - * @param isReadOnly true if this thread should be in - * read-only mode - * @param threadConfig The thread configuration of the VDO - * @param layer The physical layer of the VDO + * @param [in] isReadOnly true if the threads should be in + * read-only mode + * @param [in] threadConfig The thread configuration of the VDO + * @param [in] layer The physical layer of the VDO + * @param [out] threadsPtr A pointer to receive the new array * * @return VDO_SUCCESS or an error **/ -int initializeThreadData(ThreadData *threadData, - ThreadID threadID, - bool isReadOnly, - const ThreadConfig *threadConfig, - PhysicalLayer *layer) +int makeThreadDataArray(bool isReadOnly, + const ThreadConfig *threadConfig, + PhysicalLayer *layer, + ThreadData **threadsPtr) __attribute__((warn_unused_result)); /** - * Clean up thread data resources. + * Destroy and free an array of ThreadData structures, then null out the + * reference to it. * - * @param threadData The thread data to uninitialize + * @param threadsPtr The reference to the array to free + * @param count The number of thread structures in the array **/ -void uninitializeThreadData(ThreadData *threadData); +void freeThreadDataArray(ThreadData **threadsPtr, ThreadCount count); /** * Get the next physical zone from which to allocate. diff --git a/vdo/base/upgrade.c b/vdo/base/upgrade.c index a9e45927..24360abc 100644 --- a/vdo/base/upgrade.c +++ b/vdo/base/upgrade.c @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/upgrade.c#3 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/upgrade.c#4 $ */ #include "upgrade.h" @@ -259,25 +259,14 @@ int upgradePriorVDO(PhysicalLayer *layer) return result; } - const ThreadConfig *threadConfig = getThreadConfig(vdo); - result = ALLOCATE(threadConfig->baseThreadCount, ThreadData, __func__, - &vdo->threadData); + result = makeThreadDataArray((vdo->state == VDO_READ_ONLY_MODE), + getThreadConfig(vdo), vdo->layer, + &vdo->threadData); if (result != VDO_SUCCESS) { freeVDO(&vdo); return result; } - for (ThreadCount thread = 0; thread < threadConfig->baseThreadCount; - thread++) { - result = initializeThreadData(&vdo->threadData[thread], thread, - (vdo->state == VDO_READ_ONLY_MODE), - threadConfig, vdo->layer); - if (result != VDO_SUCCESS) { - freeVDO(&vdo); - return result; - } - } - result = finishSodiumDecode(vdo); if (result != VDO_SUCCESS) { freeVDO(&vdo); diff --git a/vdo/base/vdo.c b/vdo/base/vdo.c index 16ea4859..54bbaaab 100644 --- a/vdo/base/vdo.c +++ b/vdo/base/vdo.c @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/vdo.c#8 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/vdo.c#10 $ */ /* @@ -189,13 +189,8 @@ void destroyVDO(VDO *vdo) FREE(vdo->physicalZones); vdo->physicalZones = NULL; - if (vdo->threadData != NULL) { - for (ThreadID thread = 0; thread < threadConfig->baseThreadCount; - thread++) { - uninitializeThreadData(&vdo->threadData[thread]); - } - FREE(vdo->threadData); - vdo->threadData = NULL; + if (threadConfig != NULL) { + freeThreadDataArray(&vdo->threadData, threadConfig->baseThreadCount); } freeAdminCompletion(&vdo->adminCompletion); @@ -626,9 +621,11 @@ int validateVDOConfig(const VDOConfig *config, return VDO_OUT_OF_RANGE; } + // This can't check equality because FileLayer et al can only known about + // the storage size, which may not match the super block size. result = ASSERT(config->physicalBlocks <= blockCount, - "Physical size %" PRIu64 " in super block greater than" - " storage size %" PRIu64, config->physicalBlocks, + "Physical size %" PRIu64 " in super block smaller than" + " expected size %" PRIu64, config->physicalBlocks, blockCount); if (result != UDS_SUCCESS) { return VDO_PARAMETER_MISMATCH; diff --git a/vdo/base/vdoLayout.c b/vdo/base/vdoLayout.c index f34d09ef..4221cf78 100644 --- a/vdo/base/vdoLayout.c +++ b/vdo/base/vdoLayout.c @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/vdoLayout.c#1 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/vdoLayout.c#2 $ */ #include "vdoLayout.h" @@ -202,6 +202,7 @@ void freeVDOLayout(VDOLayout **vdoLayoutPtr) return; } + freeCopyCompletion(&vdoLayout->copyCompletion); freeFixedLayout(&vdoLayout->nextLayout); freeFixedLayout(&vdoLayout->layout); freeFixedLayout(&vdoLayout->previousLayout); @@ -266,15 +267,24 @@ static BlockCount getPartitionSize(VDOLayout *layout, PartitionID partitionID) } /**********************************************************************/ -int prepareToGrowVDOLayout(VDOLayout *vdoLayout, - BlockCount oldPhysicalBlocks, - BlockCount newPhysicalBlocks) +int prepareToGrowVDOLayout(VDOLayout *vdoLayout, + BlockCount oldPhysicalBlocks, + BlockCount newPhysicalBlocks, + PhysicalLayer *layer) { if (getNextVDOLayoutSize(vdoLayout) == newPhysicalBlocks) { // We are already prepared to grow to the new size, so we're done. return VDO_SUCCESS; } + // Make a copy completion if there isn't one + if (vdoLayout->copyCompletion == NULL) { + int result = makeCopyCompletion(layer, &vdoLayout->copyCompletion); + if (result != VDO_SUCCESS) { + return result; + } + } + // Free any unused preparation. freeFixedLayout(&vdoLayout->nextLayout); @@ -290,6 +300,7 @@ int prepareToGrowVDOLayout(VDOLayout *vdoLayout, SLAB_SUMMARY_PARTITION), &vdoLayout->nextLayout); if (result != VDO_SUCCESS) { + freeCopyCompletion(&vdoLayout->copyCompletion); return result; } @@ -305,6 +316,7 @@ int prepareToGrowVDOLayout(VDOLayout *vdoLayout, if (minNewSize > newPhysicalBlocks) { // Copying the journal and summary would destroy some old metadata. freeFixedLayout(&vdoLayout->nextLayout); + freeCopyCompletion(&vdoLayout->copyCompletion); return VDO_INCREMENT_TOO_SMALL; } @@ -383,6 +395,8 @@ void finishVDOLayoutGrowth(VDOLayout *vdoLayout) if (vdoLayout->layout != vdoLayout->nextLayout) { freeFixedLayout(&vdoLayout->nextLayout); } + + freeCopyCompletion(&vdoLayout->copyCompletion); } /**********************************************************************/ @@ -390,7 +404,8 @@ void copyPartition(VDOLayout *layout, PartitionID partitionID, VDOCompletion *parent) { - copyPartitionAsync(parent->layer, getVDOPartition(layout, partitionID), + copyPartitionAsync(layout->copyCompletion, + getVDOPartition(layout, partitionID), getPartitionFromNextLayout(layout, partitionID), parent); } diff --git a/vdo/base/vdoLayout.h b/vdo/base/vdoLayout.h index dae0cce7..9f2ad0f0 100644 --- a/vdo/base/vdoLayout.h +++ b/vdo/base/vdoLayout.h @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/vdoLayout.h#1 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/vdoLayout.h#2 $ */ /** @@ -93,12 +93,14 @@ Partition *getVDOPartition(VDOLayout *vdoLayout, PartitionID id) * @param vdoLayout The layout to grow * @param oldPhysicalBlocks The current size of the VDO * @param newPhysicalBlocks The size to which the VDO will be grown + * @param layer The layer being grown * * @return VDO_SUCCESS or an error code **/ -int prepareToGrowVDOLayout(VDOLayout *vdoLayout, - BlockCount oldPhysicalBlocks, - BlockCount newPhysicalBlocks) +int prepareToGrowVDOLayout(VDOLayout *vdoLayout, + BlockCount oldPhysicalBlocks, + BlockCount newPhysicalBlocks, + PhysicalLayer *layer) __attribute__((warn_unused_result)); /** diff --git a/vdo/base/vdoLayoutInternals.h b/vdo/base/vdoLayoutInternals.h index 0bfe7979..ecdfa002 100644 --- a/vdo/base/vdoLayoutInternals.h +++ b/vdo/base/vdoLayoutInternals.h @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/vdoLayoutInternals.h#1 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/vdoLayoutInternals.h#2 $ */ #ifndef VDO_LAYOUT_INTERNALS_H @@ -34,6 +34,8 @@ struct vdoLayout { FixedLayout *previousLayout; // The first block in the layouts PhysicalBlockNumber startingOffset; + // A pointer to the copy completion (if there is one) + VDOCompletion *copyCompletion; }; #endif // VDO_LAYOUT_INTERNALS_H diff --git a/vdo/base/vdoLoad.c b/vdo/base/vdoLoad.c index 96e3edfe..b2383a3a 100644 --- a/vdo/base/vdoLoad.c +++ b/vdo/base/vdoLoad.c @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/vdoLoad.c#4 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/vdoLoad.c#6 $ */ #include "vdoLoad.h" @@ -342,22 +342,12 @@ static int decodeVDO(VDO *vdo, bool validateConfig) } const ThreadConfig *threadConfig = getThreadConfig(vdo); - result = ALLOCATE(threadConfig->baseThreadCount, ThreadData, __func__, - &vdo->threadData); + result = makeThreadDataArray((vdo->state == VDO_READ_ONLY_MODE), + threadConfig, vdo->layer, &vdo->threadData); if (result != VDO_SUCCESS) { return result; } - for (ThreadCount thread = 0; thread < threadConfig->baseThreadCount; - thread++) { - result = initializeThreadData(&vdo->threadData[thread], thread, - (vdo->state == VDO_READ_ONLY_MODE), - threadConfig, vdo->layer); - if (result != VDO_SUCCESS) { - return result; - } - } - result = finishVDODecode(vdo); if (result != VDO_SUCCESS) { return result; @@ -488,12 +478,6 @@ static void loadCallback(VDOCompletion *completion) int performVDOLoad(VDO *vdo, const VDOLoadConfig *loadConfig) { vdo->loadConfig = *loadConfig; - int result = copyThreadConfig(loadConfig->threadConfig, - &vdo->loadConfig.threadConfig); - if (result != VDO_SUCCESS) { - return result; - } - return performAdminOperation(vdo, ADMIN_OPERATION_LOAD, loadCallback); } diff --git a/vdo/base/vdoResize.c b/vdo/base/vdoResize.c index 4259a4c3..87425d93 100644 --- a/vdo/base/vdoResize.c +++ b/vdo/base/vdoResize.c @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/vdoResize.c#4 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/base/vdoResize.c#5 $ */ #include "vdoResize.h" @@ -266,7 +266,7 @@ int prepareToGrowPhysical(VDO *vdo, BlockCount newPhysicalBlocks) } int result = prepareToGrowVDOLayout(vdo->layout, currentPhysicalBlocks, - newPhysicalBlocks); + newPhysicalBlocks, vdo->layer); if (result != VDO_SUCCESS) { return result; } diff --git a/vdo/kernel/bio.c b/vdo/kernel/bio.c index d6bf65cb..24b732df 100644 --- a/vdo/kernel/bio.c +++ b/vdo/kernel/bio.c @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/bio.c#2 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/bio.c#3 $ */ #include "bio.h" @@ -254,7 +254,7 @@ static void initializeBio(BIO *bio, KernelLayer *layer) #endif bio->bi_end_io = completeAsyncBio; setBioSector(bio, (sector_t) -1); // Sector will be set later on. - setBioBlockDevice(bio, layer->dev->bdev); + setBioBlockDevice(bio, getKernelLayerBdev(layer)); } /**********************************************************************/ diff --git a/vdo/kernel/dataKVIO.c b/vdo/kernel/dataKVIO.c index be3a2cb2..d219317c 100644 --- a/vdo/kernel/dataKVIO.c +++ b/vdo/kernel/dataKVIO.c @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/dataKVIO.c#8 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/dataKVIO.c#10 $ */ #include "dataKVIO.h" @@ -599,7 +599,7 @@ static int kvdoCreateKVIOFromBio(KernelLayer *layer, dataKVIO->readBlock.data = dataKVIO->dataBlock; } - setBioBlockDevice(bio, layer->dev->bdev); + setBioBlockDevice(bio, getKernelLayerBdev(layer)); bio->bi_end_io = completeAsyncBio; *dataKVIOPtr = dataKVIO; return VDO_SUCCESS; @@ -867,7 +867,7 @@ static int allocatePooledDataKVIO(KernelLayer *layer, DataKVIO **dataKVIOPtr) "DataKVIO data bio allocation failure"); } - if (layer->readCacheBlocks == 0) { + if (!layer->deviceConfig->readCacheEnabled) { result = allocateMemory(VDO_BLOCK_SIZE, 0, "kvio read buffer", &dataKVIO->readBlock.buffer); if (result != VDO_SUCCESS) { diff --git a/vdo/kernel/deviceConfig.c b/vdo/kernel/deviceConfig.c index e125ffdd..cdbf94bf 100644 --- a/vdo/kernel/deviceConfig.c +++ b/vdo/kernel/deviceConfig.c @@ -16,20 +16,27 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/deviceConfig.c#1 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/deviceConfig.c#4 $ */ #include "deviceConfig.h" +#include + #include "logger.h" #include "memoryAlloc.h" #include "stringUtils.h" #include "vdoStringUtils.h" +#include "constants.h" + +// The index of the pool name within the table line for the two known versions +static const uint8_t POOL_NAME_ARG_INDEX[2] = {8, 10}; + enum { - REQUIRED_ARGC = 10, - POOL_NAME_ARG_INDEX = 8, + V0_REQUIRED_ARGC = 10, + V1_REQUIRED_ARGC = 12, // Arbitrary limit used when parsing thread-count config spec strings THREAD_COUNT_LIMIT = 100, BIO_ROTATION_INTERVAL_LIMIT = 1024, @@ -43,20 +50,108 @@ enum { DEFAULT_BIO_SUBMIT_QUEUE_ROTATE_INTERVAL = 64, }; +static const char TABLE_VERSION_STRING[] = "V1"; + +/** + * Decide the version number from argv. + * + * @param [in] argc The number of table values + * @param [in] argv The array of table values + * @param [out] errorPtr A pointer to return a error string in + * @param [out] versionPtr A pointer to return the version + * + * @return VDO_SUCCESS or an error code + **/ +static int getVersionNumber(int argc, + char **argv, + char **errorPtr, + TableVersion *versionPtr) +{ + if (strcmp(argv[0], TABLE_VERSION_STRING) != 0) { + if (argc == V0_REQUIRED_ARGC) { + logWarning("Detected version mismatch between kernel module and tools."); + logWarning("Please consider upgrading management tools to match kernel."); + *versionPtr = 0; + return VDO_SUCCESS; + } + *errorPtr = "Incorrect number of arguments for any known format"; + return VDO_BAD_CONFIGURATION; + } + + if (argc == V1_REQUIRED_ARGC) { + *versionPtr = 1; + return VDO_SUCCESS; + } + + *errorPtr = "Incorrect number of arguments"; + return VDO_BAD_CONFIGURATION; +} + /**********************************************************************/ int getPoolNameFromArgv(int argc, char **argv, char **errorPtr, char **poolNamePtr) { - if (argc != REQUIRED_ARGC) { - *errorPtr = "Incorrect number of arguments"; - return VDO_BAD_CONFIGURATION; + TableVersion version; + int result = getVersionNumber(argc, argv, errorPtr, &version); + if (result != VDO_SUCCESS) { + return result; } - *poolNamePtr = argv[POOL_NAME_ARG_INDEX]; + *poolNamePtr = argv[POOL_NAME_ARG_INDEX[version]]; return VDO_SUCCESS; } +/** + * Resolve the config with write policy, physical size, and other unspecified + * fields based on the device, if needed. + * + * @param [in,out] config The config possibly missing values + * @param [in] verbose Whether to log about the underlying device + **/ +static void resolveConfigWithDevice(DeviceConfig *config, + bool verbose) +{ + struct dm_dev *dev = config->ownedDevice; + struct request_queue *requestQueue = bdev_get_queue(dev->bdev); +#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0) + bool flushSupported + = ((requestQueue->queue_flags & (1ULL << QUEUE_FLAG_WC)) != 0); + bool fuaSupported + = ((requestQueue->queue_flags & (1ULL << QUEUE_FLAG_FUA)) != 0); +#else + bool flushSupported = ((requestQueue->flush_flags & REQ_FLUSH) == REQ_FLUSH); + bool fuaSupported = ((requestQueue->flush_flags & REQ_FUA) == REQ_FUA); +#endif + if (verbose) { + logInfo("underlying device, REQ_FLUSH: %s, REQ_FUA: %s", + (flushSupported ? "supported" : "not supported"), + (fuaSupported ? "supported" : "not supported")); + } else { + // We should probably always log, but need to make sure that makes sense + // before changing behavior. + } + + if (config->writePolicy == WRITE_POLICY_AUTO) { + config->writePolicy + = (flushSupported ? WRITE_POLICY_ASYNC : WRITE_POLICY_SYNC); + logInfo("Using write policy %s automatically.", + getConfigWritePolicyString(config)); + } else { + logInfo("Using write policy %s.", getConfigWritePolicyString(config)); + } + + if (flushSupported && (config->writePolicy == WRITE_POLICY_SYNC)) { + logWarning("WARNING: Running in sync mode atop a device supporting flushes" + " is dangerous!"); + } + + if (config->version == 0) { + uint64_t deviceSize = i_size_read(dev->bdev->bd_inode); + config->physicalBlocks = deviceSize / VDO_BLOCK_SIZE; + } +} + /** * Parse a two-valued option into a bool. * @@ -254,23 +349,22 @@ static void handleParseError(DeviceConfig **configPtr, } /**********************************************************************/ -int parseDeviceConfig(int argc, - char **argv, - char **errorPtr, - DeviceConfig **configPtr) +int parseDeviceConfig(int argc, + char **argv, + struct dm_target *ti, + bool verbose, + DeviceConfig **configPtr) { + char **errorPtr = &ti->error; DeviceConfig *config = NULL; - if (argc != REQUIRED_ARGC) { - handleParseError(&config, errorPtr, "Incorrect number of arguments"); - return VDO_BAD_CONFIGURATION; - } - int result = ALLOCATE(1, DeviceConfig, "DeviceConfig", &config); if (result != VDO_SUCCESS) { handleParseError(&config, errorPtr, "Could not allocate config structure"); return VDO_BAD_CONFIGURATION; } + config->owningTarget = ti; + // Save the original string. result = joinStrings(argv, argc, ' ', &config->originalString); if (result != VDO_SUCCESS) { @@ -299,6 +393,14 @@ int parseDeviceConfig(int argc, char **argumentPtr = argv; + result = getVersionNumber(argc, argv, errorPtr, &config->version); + if (result != VDO_SUCCESS) { + // getVersionNumber sets errorPtr itself. + handleParseError(&config, errorPtr, *errorPtr); + return result; + } + argumentPtr++; + result = duplicateString(*argumentPtr++, "parent device name", &config->parentDeviceName); if (result != VDO_SUCCESS) { @@ -306,6 +408,15 @@ int parseDeviceConfig(int argc, return VDO_BAD_CONFIGURATION; } + // Get the physical blocks, if known. + if (config->version == 1) { + result = kstrtoull(*argumentPtr++, 10, &config->physicalBlocks); + if (result != VDO_SUCCESS) { + handleParseError(&config, errorPtr, "Invalid physical block count"); + return VDO_BAD_CONFIGURATION; + } + } + // Get the logical block size and validate bool enable512e; result = parseBool(*argumentPtr++, "512", "4096", &enable512e); @@ -366,7 +477,9 @@ int parseDeviceConfig(int argc, } argumentPtr++; - if (argumentPtr != &argv[POOL_NAME_ARG_INDEX]) { + // Make sure the enum to get the pool name from argv directly is still in + // sync with the parsing of the table line. + if (argumentPtr != &argv[POOL_NAME_ARG_INDEX[config->version]]) { handleParseError(&config, errorPtr, "Pool name not in expected location"); return VDO_BAD_CONFIGURATION; } @@ -411,6 +524,17 @@ int parseDeviceConfig(int argc, return VDO_BAD_CONFIGURATION; } + result = dm_get_device(ti, config->parentDeviceName, + dm_table_get_mode(ti->table), &config->ownedDevice); + if (result != 0) { + logError("couldn't open device \"%s\": error %d", + config->parentDeviceName, result); + handleParseError(&config, errorPtr, "Unable to open storage device"); + return VDO_BAD_CONFIGURATION; + } + + resolveConfigWithDevice(config, verbose); + *configPtr = config; return result; } @@ -428,6 +552,10 @@ void freeDeviceConfig(DeviceConfig **configPtr) return; } + if (config->ownedDevice != NULL) { + dm_put_device(config->owningTarget, config->ownedDevice); + } + FREE(config->threadConfigString); FREE(config->poolName); FREE(config->parentDeviceName); @@ -446,21 +574,4 @@ const char *getConfigWritePolicyString(DeviceConfig *config) return ((config->writePolicy == WRITE_POLICY_ASYNC) ? "async" : "sync"); } -/**********************************************************************/ -void resolveConfigWithFlushSupport(DeviceConfig *config, bool flushSupported) -{ - if (config->writePolicy == WRITE_POLICY_AUTO) { - config->writePolicy - = (flushSupported ? WRITE_POLICY_ASYNC : WRITE_POLICY_SYNC); - logInfo("Using write policy %s automatically.", - getConfigWritePolicyString(config)); - } else { - logInfo("Using write policy %s.", getConfigWritePolicyString(config)); - } - - if (flushSupported && (config->writePolicy == WRITE_POLICY_SYNC)) { - logWarning("WARNING: Running in sync mode atop a device supporting flushes" - " is dangerous!"); - } -} diff --git a/vdo/kernel/deviceConfig.h b/vdo/kernel/deviceConfig.h index 87ebc730..f19026cf 100644 --- a/vdo/kernel/deviceConfig.h +++ b/vdo/kernel/deviceConfig.h @@ -16,11 +16,13 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/deviceConfig.h#1 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/deviceConfig.h#5 $ */ #ifndef DEVICE_CONFIG_H #define DEVICE_CONFIG_H +#include + #include "kernelTypes.h" typedef struct { @@ -34,9 +36,16 @@ typedef struct { int hashZones; } ThreadCountConfig; +typedef uint32_t TableVersion; + typedef struct { + struct dm_target *owningTarget; + struct dm_dev *ownedDevice; + KernelLayer *layer; char *originalString; + TableVersion version; char *parentDeviceName; + BlockCount physicalBlocks; unsigned int logicalBlockSize; WritePolicy writePolicy; unsigned int cacheSize; @@ -66,19 +75,21 @@ int getPoolNameFromArgv(int argc, __attribute__((warn_unused_result)); /** - * Convert the dmsetup argument list into a DeviceConfig. + * Convert the dmsetup table into a DeviceConfig. * * @param [in] argc The number of table values * @param [in] argv The array of table values - * @param [out] errorPtr A pointer to return a error string in + * @param [in] ti The target structure for this table + * @param [in] verbose Whether to log about the underlying device * @param [out] configPtr A pointer to return the allocated config * * @return VDO_SUCCESS or an error code **/ -int parseDeviceConfig(int argc, - char **argv, - char **errorPtr, - DeviceConfig **configPtr) +int parseDeviceConfig(int argc, + char **argv, + struct dm_target *ti, + bool verbose, + DeviceConfig **configPtr) __attribute__((warn_unused_result)); /** @@ -98,12 +109,4 @@ void freeDeviceConfig(DeviceConfig **configPtr); const char *getConfigWritePolicyString(DeviceConfig *config) __attribute__((warn_unused_result)); -/** - * Resolve the write policy if needed. - * - * @param [in,out] config The config to resolve - * @param [in] flushSupported Whether flushes are supported. - **/ -void resolveConfigWithFlushSupport(DeviceConfig *config, bool flushSupported); - #endif // DEVICE_CONFIG_H diff --git a/vdo/kernel/dmvdo.c b/vdo/kernel/dmvdo.c index b17b2774..6be2ed10 100644 --- a/vdo/kernel/dmvdo.c +++ b/vdo/kernel/dmvdo.c @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/dmvdo.c#8 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/dmvdo.c#20 $ */ #include "dmvdo.h" @@ -116,6 +116,18 @@ unsigned int maxDiscardSectors = VDO_SECTORS_PER_BLOCK; /**********************************************************************/ +/** + * Get the kernel layer associated with a dm target structure. + * + * @param ti The dm target structure + * + * @return The kernel layer, or NULL. + **/ +static KernelLayer *getKernelLayerForTarget(struct dm_target *ti) +{ + return ((DeviceConfig *) ti->private)->layer; +} + /** * Begin VDO processing of a bio. This is called by the device mapper * through the "map" function, and has resulted from a call to either @@ -151,16 +163,16 @@ static int vdoMapBio(struct dm_target *ti, BIO *bio, union map_info *unused) static int vdoMapBio(struct dm_target *ti, BIO *bio) #endif { - KernelLayer *layer = ti->private; + KernelLayer *layer = getKernelLayerForTarget(ti); return kvdoMapBio(layer, bio); } /**********************************************************************/ static void vdoIoHints(struct dm_target *ti, struct queue_limits *limits) { - KernelLayer *layer = ti->private; + KernelLayer *layer = getKernelLayerForTarget(ti); - limits->logical_block_size = layer->logicalBlockSize; + limits->logical_block_size = layer->deviceConfig->logicalBlockSize; limits->physical_block_size = VDO_BLOCK_SIZE; // The minimum io size for random io @@ -181,11 +193,11 @@ static int vdoIterateDevices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { - KernelLayer *layer = ti->private; - sector_t len = blockToSector(layer, layer->blockCount); + KernelLayer *layer = getKernelLayerForTarget(ti); + sector_t len = blockToSector(layer, layer->deviceConfig->physicalBlocks); #if HAS_FLUSH_SUPPORTED - return fn(ti, layer->dev, 0, len, data); + return fn(ti, layer->deviceConfig->ownedDevice, 0, len, data); #else if (!shouldProcessFlush(layer)) { // In sync mode, if the underlying device needs flushes, accept flushes. @@ -221,7 +233,7 @@ static void vdoStatus(struct dm_target *ti, char *result, unsigned int maxlen) { - KernelLayer *layer = ti->private; + KernelLayer *layer = getKernelLayerForTarget(ti); char nameBuffer[BDEVNAME_SIZE]; // N.B.: The DMEMIT macro uses the variables named "sz", "result", "maxlen". int sz = 0; @@ -233,7 +245,7 @@ static void vdoStatus(struct dm_target *ti, getKVDOStatistics(&layer->kvdo, &layer->vdoStatsStorage); VDOStatistics *stats = &layer->vdoStatsStorage; DMEMIT("/dev/%s %s %s %s %s %" PRIu64 " %" PRIu64, - bdevname(layer->dev->bdev, nameBuffer), + bdevname(getKernelLayerBdev(layer), nameBuffer), stats->mode, stats->inRecoveryMode ? "recovering" : "-", getDedupeStateName(layer->dedupeIndex), @@ -245,7 +257,7 @@ static void vdoStatus(struct dm_target *ti, case STATUSTYPE_TABLE: // Report the string actually specified in the beginning. - DMEMIT("%s", layer->deviceConfig->originalString); + DMEMIT("%s", ((DeviceConfig *) ti->private)->originalString); break; } @@ -254,15 +266,15 @@ static void vdoStatus(struct dm_target *ti, /** - * Get the size of a device, in blocks. + * Get the size of the underlying device, in blocks. * - * @param [in] dev The device object. + * @param [in] layer The layer * * @return The size in blocks **/ -static BlockCount getDeviceBlockCount(struct dm_dev *dev) +static BlockCount getUnderlyingDeviceBlockCount(KernelLayer *layer) { - uint64_t physicalSize = i_size_read(dev->bdev->bd_inode); + uint64_t physicalSize = i_size_read(getKernelLayerBdev(layer)->bd_inode); return physicalSize / VDO_BLOCK_SIZE; } @@ -332,11 +344,26 @@ static int processVDOMessageLocked(KernelLayer *layer, } if (strcasecmp(argv[0], "prepareToGrowPhysical") == 0) { - return prepareToResizePhysical(layer, getDeviceBlockCount(layer->dev)); + return prepareToResizePhysical(layer, + getUnderlyingDeviceBlockCount(layer)); } if (strcasecmp(argv[0], "growPhysical") == 0) { - return resizePhysical(layer, getDeviceBlockCount(layer->dev)); + // The actual growPhysical will happen when the device is resumed. + + if (layer->deviceConfig->version != 0) { + // XXX Uncomment this branch when new VDO manager is updated to not + // send this message. + + // Old style message on new style table is unexpected; it means the + // user started the VDO with new manager and is growing with old. + // logInfo("Mismatch between growPhysical method and table version."); + // return -EINVAL; + } else { + layer->deviceConfig->physicalBlocks + = getUnderlyingDeviceBlockCount(layer); + } + return 0; } break; @@ -454,7 +481,7 @@ static int vdoMessage(struct dm_target *ti, unsigned int argc, char **argv) return -EINVAL; } - KernelLayer *layer = (KernelLayer *) ti->private; + KernelLayer *layer = getKernelLayerForTarget(ti); RegisteredThread allocatingThread, instanceThread; registerAllocatingThread(&allocatingThread, NULL); registerThreadDevice(&instanceThread, layer); @@ -464,44 +491,6 @@ static int vdoMessage(struct dm_target *ti, unsigned int argc, char **argv) return mapToSystemError(result); } -/** - * Get the device beneath this device mapper target, given its name and - * this target. - * - * @param [in] ti This device mapper target - * @param [in] name The name of the device beneath ti - * @param [out] devPtr A pointer to return the device structure - * - * @return a system error code - **/ -static int getUnderlyingDevice(struct dm_target *ti, - const char *name, - struct dm_dev **devPtr) -{ - int result; -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,34) - result = dm_get_device(ti, name, dm_table_get_mode(ti->table), devPtr); -#else -#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32) -// The signature of dm_get_device differs between kernel 2.6.32 on -// debian squeeze and kernel 2.6.32 on RHEL (CentOS) -#if defined(RHEL_RELEASE_CODE) - result = dm_get_device(ti, name, dm_table_get_mode(ti->table), devPtr); -#else - result = dm_get_device(ti, name, 0, 0, dm_table_get_mode(ti->table), devPtr); -#endif /* RHEL */ -#else -#error "unsupported linux kernel version" -#endif /* kernel 2.6.32 */ -#endif /* kernel version > 2.6.34 */ - - if (result != 0) { - logError("couldn't open device \"%s\": error %d", name, result); - return -EINVAL; - } - return result; -} - /** * Configure the dm_target with our capabilities. * @@ -552,22 +541,17 @@ static void configureTargetCapabilities(struct dm_target *ti, * Handle a vdoInitialize failure, freeing all appropriate structures. * * @param ti The device mapper target representing our device - * @param config The parsed config for the instance - * @param dev The device under our device (possibly NULL) * @param threadConfig The thread config (possibly NULL) * @param layer The kernel layer (possibly NULL) * @param instance The instance number to be released * @param why The reason for failure **/ static void cleanupInitialize(struct dm_target *ti, - DeviceConfig *config, - struct dm_dev *dev, ThreadConfig *threadConfig, KernelLayer *layer, unsigned int instance, char *why) { - freeDeviceConfig(&config); if (threadConfig != NULL) { freeThreadConfig(&threadConfig); } @@ -578,9 +562,6 @@ static void cleanupInitialize(struct dm_target *ti, // With no KernelLayer taking ownership we have to release explicitly. releaseKVDOInstance(instance); } - if (dev != NULL) { - dm_put_device(ti, dev); - } ti->error = why; } @@ -601,40 +582,15 @@ static int vdoInitialize(struct dm_target *ti, { logInfo("starting device '%s'", config->poolName); - struct dm_dev *dev; - int result = getUnderlyingDevice(ti, config->parentDeviceName, &dev); - if (result != 0) { - cleanupInitialize(ti, config, NULL, NULL, NULL, instance, - "Device lookup failed"); - return result; - } - - struct request_queue *requestQueue = bdev_get_queue(dev->bdev); -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0) - bool flushSupported - = ((requestQueue->queue_flags & (1ULL << QUEUE_FLAG_WC)) != 0); - bool fuaSupported - = ((requestQueue->queue_flags & (1ULL << QUEUE_FLAG_FUA)) != 0); -#else - bool flushSupported = ((requestQueue->flush_flags & REQ_FLUSH) == REQ_FLUSH); - bool fuaSupported = ((requestQueue->flush_flags & REQ_FUA) == REQ_FUA); -#endif - logInfo("underlying device, REQ_FLUSH: %s, REQ_FUA: %s", - (flushSupported ? "supported" : "not supported"), - (fuaSupported ? "supported" : "not supported")); - - resolveConfigWithFlushSupport(config, flushSupported); - uint64_t blockSize = VDO_BLOCK_SIZE; uint64_t logicalSize = to_bytes(ti->len); BlockCount logicalBlocks = logicalSize / blockSize; - BlockCount physicalBlocks = getDeviceBlockCount(dev); logDebug("Logical block size = %" PRIu64, (uint64_t) config->logicalBlockSize); logDebug("Logical blocks = %" PRIu64, logicalBlocks); logDebug("Physical block size = %" PRIu64, (uint64_t) blockSize); - logDebug("Physical blocks = %" PRIu64, physicalBlocks); + logDebug("Physical blocks = %" PRIu64, config->physicalBlocks); logDebug("Block map cache blocks = %u", config->cacheSize); logDebug("Block map maximum age = %u", config->blockMapMaximumAge); logDebug("MD RAID5 mode = %s", (config->mdRaid5ModeEnabled @@ -653,17 +609,15 @@ static int vdoInitialize(struct dm_target *ti, .maximumAge = config->blockMapMaximumAge, }; - // Henceforth it is the kernel layer's responsibility to clean up the - // DeviceConfig in case of error. char *failureReason; KernelLayer *layer; - result = makeKernelLayer(physicalBlocks, ti->begin, dev, instance, - config, &kvdoDevice.kobj, &loadConfig.threadConfig, - &failureReason, &layer); + int result = makeKernelLayer(ti->begin, instance, config, + &kvdoDevice.kobj, &loadConfig.threadConfig, + &failureReason, &layer); if (result != VDO_SUCCESS) { logError("Could not create kernel physical layer. (VDO error %d," " message %s)", result, failureReason); - cleanupInitialize(ti, NULL, dev, loadConfig.threadConfig, NULL, instance, + cleanupInitialize(ti, loadConfig.threadConfig, NULL, instance, failureReason); return result; } @@ -675,55 +629,30 @@ static int vdoInitialize(struct dm_target *ti, if (config->cacheSize < (2 * MAXIMUM_USER_VIOS * loadConfig.threadConfig->logicalZoneCount)) { logWarning("Insufficient block map cache for logical zones"); - cleanupInitialize(ti, NULL, dev, loadConfig.threadConfig, layer, instance, + cleanupInitialize(ti, loadConfig.threadConfig, layer, instance, "Insufficient block map cache for logical zones"); return VDO_BAD_CONFIGURATION; } + // Henceforth it is the kernel layer's responsibility to clean up the + // ThreadConfig. result = startKernelLayer(layer, &loadConfig, &failureReason); - freeThreadConfig(&loadConfig.threadConfig); if (result != VDO_SUCCESS) { logError("Could not start kernel physical layer. (VDO error %d," " message %s)", result, failureReason); - cleanupInitialize(ti, NULL, dev, NULL, layer, instance, failureReason); + cleanupInitialize(ti, NULL, layer, instance, failureReason); return result; } - layer->ti = ti; - ti->private = layer; + acquireKernelLayerReference(layer, config); + setKernelLayerActiveConfig(layer, config); + ti->private = config; configureTargetCapabilities(ti, layer); logInfo("device '%s' started", config->poolName); return VDO_SUCCESS; } -/** - * Release our reference to the old device and swap in the new dm target - * structure and underlying device. If there is no old reference increment - * the reference count on the layer to prevent it being released before the - * destructor for the old target occurs. - * - * @param layer The layer in question - * @param ti The new target structure - * @param dev The new underlying device - **/ -static void convertToNewDevice(KernelLayer *layer, - struct dm_target *ti, - struct dm_dev *dev) -{ - if (layer->oldTI != NULL) { - logDebug("Releasing ref by %" PRIptr " to %" PRIptr, layer->oldTI, - layer->oldDev); - dm_put_device(layer->oldTI, layer->oldDev); - } else { - kobject_get(&layer->kobj); - } - layer->oldTI = layer->ti; - layer->oldDev = layer->dev; - layer->ti = ti; - layer->dev = dev; -} - /**********************************************************************/ static int vdoCtr(struct dm_target *ti, unsigned int argc, char **argv) { @@ -752,8 +681,9 @@ static int vdoCtr(struct dm_target *ti, unsigned int argc, char **argv) RegisteredThread instanceThread; registerThreadDeviceID(&instanceThread, &instance); + bool verbose = (oldLayer == NULL); DeviceConfig *config = NULL; - result = parseDeviceConfig(argc, argv, &ti->error, &config); + result = parseDeviceConfig(argc, argv, ti, verbose, &config); if (result != VDO_SUCCESS) { unregisterThreadDeviceID(); unregisterAllocatingThread(); @@ -765,68 +695,33 @@ static int vdoCtr(struct dm_target *ti, unsigned int argc, char **argv) // Is there already a device of this name? if (oldLayer != NULL) { - if (getKernelLayerState(oldLayer) != LAYER_SUSPENDED) { - logError("Can't modify already-existing VDO named %s that isn't" - " suspended", poolName); - freeDeviceConfig(&config); - unregisterThreadDeviceID(); - unregisterAllocatingThread(); - return -EINVAL; - } - /* - * Applying the new table here is technically incorrect. Most - * devices don't apply the new table until they go through a - * suspend/resume cycle, and if they fail to apply the new table - * in their preresume step, they remain in a suspended state without a - * valid table. We want to apply some modifications without a suspend - * and resume cycle, and if the modifications are invalid we want to - * remain active rather than suspended, so we apply the changes here - * instead of in preresume. + * To preserve backward compatibility with old VDO Managers, we need to + * allow this to happen when either suspended or not. We could assert + * that if the config is version 0, we are suspended, and if not, we + * are not, but we can't do that till new VDO Manager does the right + * order. */ - logInfo("modifying device '%s'", config->poolName); - ti->private = oldLayer; - - struct dm_dev *newDev; - result = getUnderlyingDevice(ti, config->parentDeviceName, &newDev); - if (result == 0) { - struct request_queue *requestQueue = bdev_get_queue(newDev->bdev); -#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0) - bool flushSupported - = ((requestQueue->queue_flags & (1ULL << QUEUE_FLAG_WC)) != 0); -#else - bool flushSupported - = ((requestQueue->flush_flags & REQ_FLUSH) == REQ_FLUSH); -#endif - resolveConfigWithFlushSupport(config, flushSupported); - - result = modifyKernelLayer(oldLayer, ti, config, &ti->error); - if (result != VDO_SUCCESS) { - result = mapToSystemError(result); - dm_put_device(ti, newDev); - freeDeviceConfig(&config); - } else { - configureTargetCapabilities(ti, oldLayer); - convertToNewDevice(oldLayer, ti, newDev); - DeviceConfig *oldConfig = oldLayer->deviceConfig; - oldLayer->deviceConfig = config; - freeDeviceConfig(&oldConfig); - } - } else { + logInfo("preparing to modify device '%s'", config->poolName); + result = prepareToModifyKernelLayer(oldLayer, config, &ti->error); + if (result != VDO_SUCCESS) { + result = mapToSystemError(result); freeDeviceConfig(&config); - logError("Could not find underlying device"); + } else { + acquireKernelLayerReference(oldLayer, config); + ti->private = config; + configureTargetCapabilities(ti, oldLayer); } unregisterThreadDeviceID(); unregisterAllocatingThread(); return result; } - // Henceforth, the config will be freed within any failing function and it - // is the kernel layer's responsibility to free when appropriate. result = vdoInitialize(ti, instance, config); if (result != VDO_SUCCESS) { // vdoInitialize calls into various VDO routines, so map error result = mapToSystemError(result); + freeDeviceConfig(&config); } unregisterThreadDeviceID(); @@ -837,71 +732,39 @@ static int vdoCtr(struct dm_target *ti, unsigned int argc, char **argv) /**********************************************************************/ static void vdoDtr(struct dm_target *ti) { - KernelLayer *layer = ti->private; - if (layer->ti != ti) { - /* - * This must be the destructor associated with a reload. - * - * We cannot access anything that may have been cleaned up by a previous - * invocation of the destructor. That is, there's no guarantee that this - * code path is being executed before one that actually tore down the - * internals of the layer. - * - * Only perform the put on the device and kobject if the dm_target is the - * specific target tracked in the layer's oldTI field. This allows - * multiple construction/destruction associated with the same layer (e.g., - * as a result of multiple dmsetup reloads) without incorrectly destructing - * the layer. - */ - if (layer->oldTI == ti) { - logDebug("Releasing reference by old ti %" PRIptr " to dev %" PRIptr, - layer->oldTI, layer->oldDev); - dm_put_device(layer->oldTI, layer->oldDev); - layer->oldTI = NULL; - layer->oldDev = NULL; - kobject_put(&layer->kobj); - } - return; - } - - struct dm_dev *dev = layer->dev; - unsigned int instance = layer->instance; - - RegisteredThread allocatingThread, instanceThread; - registerThreadDeviceID(&instanceThread, &instance); - registerAllocatingThread(&allocatingThread, NULL); + DeviceConfig *config = ti->private; + KernelLayer *layer = getKernelLayerForTarget(ti); - waitForNoRequestsActive(layer); - logInfo("stopping device '%s'", layer->deviceConfig->poolName); + releaseKernelLayerReference(layer, config); - if (layer->dumpOnShutdown) { - vdoDumpAll(layer, "device shutdown"); - } + if (layer->configReferences == 0) { + // This was the last config referencing the layer. Free it. + unsigned int instance = layer->instance; + RegisteredThread allocatingThread, instanceThread; + registerThreadDeviceID(&instanceThread, &instance); + registerAllocatingThread(&allocatingThread, NULL); - // Copy the device name (for logging) out of the layer since it's about to be - // freed. - const char *poolName; - char *poolNameToFree; - int result = duplicateString(layer->deviceConfig->poolName, "pool name", - &poolNameToFree); - poolName = (result == VDO_SUCCESS) ? poolNameToFree : "unknown"; + waitForNoRequestsActive(layer); + logInfo("stopping device '%s'", config->poolName); - freeKernelLayer(layer); - dm_put_device(ti, dev); + if (layer->dumpOnShutdown) { + vdoDumpAll(layer, "device shutdown"); + } - logInfo("device '%s' stopped", poolName); - poolName = NULL; - FREE(poolNameToFree); - poolNameToFree = NULL; + freeKernelLayer(layer); + logInfo("device '%s' stopped", config->poolName); + unregisterThreadDeviceID(); + unregisterAllocatingThread(); + } - unregisterThreadDeviceID(); - unregisterAllocatingThread(); + freeDeviceConfig(&config); + ti->private = NULL; } /**********************************************************************/ static void vdoPostsuspend(struct dm_target *ti) { - KernelLayer *layer = ti->private; + KernelLayer *layer = getKernelLayerForTarget(ti); RegisteredThread instanceThread; registerThreadDevice(&instanceThread, layer); const char *poolName = layer->deviceConfig->poolName; @@ -918,11 +781,23 @@ static void vdoPostsuspend(struct dm_target *ti) /**********************************************************************/ static int vdoPreresume(struct dm_target *ti) { - KernelLayer *layer = ti->private; + KernelLayer *layer = getKernelLayerForTarget(ti); + DeviceConfig *config = ti->private; RegisteredThread instanceThread; registerThreadDevice(&instanceThread, layer); - logInfo("resuming device '%s'", layer->deviceConfig->poolName); - int result = resumeKernelLayer(layer); + logInfo("resuming device '%s'", config->poolName); + + // This is a noop if nothing has changed, and by calling it every time + // we capture old-style growPhysicals, which change the config in place. + int result = modifyKernelLayer(layer, config); + if (result != VDO_SUCCESS) { + logErrorWithStringError(result, "Commit of modifications to device '%s'" + " failed", config->poolName); + return result; + } + setKernelLayerActiveConfig(layer, config); + + result = resumeKernelLayer(layer); if (result != VDO_SUCCESS) { logError("resume of device '%s' failed with error: %d", layer->deviceConfig->poolName, result); @@ -934,7 +809,7 @@ static int vdoPreresume(struct dm_target *ti) /**********************************************************************/ static void vdoResume(struct dm_target *ti) { - KernelLayer *layer = ti->private; + KernelLayer *layer = getKernelLayerForTarget(ti); RegisteredThread instanceThread; registerThreadDevice(&instanceThread, layer); logInfo("device '%s' resumed", layer->deviceConfig->poolName); diff --git a/vdo/kernel/ioSubmitter.c b/vdo/kernel/ioSubmitter.c index d6c95544..7e07062c 100644 --- a/vdo/kernel/ioSubmitter.c +++ b/vdo/kernel/ioSubmitter.c @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/ioSubmitter.c#2 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/ioSubmitter.c#4 $ */ #include "ioSubmitterInternals.h" @@ -237,7 +237,7 @@ static void processBioMap(KvdoWorkItem *item) KVIO *kvioBio = bio->bi_private; BIO *next = bio->bi_next; bio->bi_next = NULL; - setBioBlockDevice(bio, kvioBio->layer->dev->bdev); + setBioBlockDevice(bio, getKernelLayerBdev(kvioBio->layer)); kvioBio->bioSubmissionCallback(&kvioBio->enqueueable.workItem); bio = next; } @@ -429,7 +429,7 @@ void enqueueBioMap(BIO *bio, * Setting the sync-flag on journal-related bios is expected to reduce * latency on journal updates submitted to an MD RAID5 device. */ - if (layer->mdRaid5ModeEnabled) { + if (layer->deviceConfig->mdRaid5ModeEnabled) { if (isData(kvio)) { // Clear the bits for sync I/O RW flags on data block bios. clearBioOperationFlagSync(bio); @@ -489,6 +489,7 @@ int makeIOSubmitter(const char *threadNamePrefix, unsigned int threadCount, unsigned int rotationInterval, unsigned int maxRequestsActive, + unsigned int readCacheBlocks, KernelLayer *layer, IOSubmitter **ioSubmitterPtr) { @@ -502,8 +503,8 @@ int makeIOSubmitter(const char *threadNamePrefix, return result; } - if (layer->readCacheBlocks > 0) { - result = makeReadCache(layer, layer->readCacheBlocks, threadCount, + if (readCacheBlocks > 0) { + result = makeReadCache(layer, readCacheBlocks, threadCount, &ioSubmitter->readCache); if (result != VDO_SUCCESS) { FREE(ioSubmitter); diff --git a/vdo/kernel/ioSubmitter.h b/vdo/kernel/ioSubmitter.h index c9e5650f..63371124 100644 --- a/vdo/kernel/ioSubmitter.h +++ b/vdo/kernel/ioSubmitter.h @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/ioSubmitter.h#1 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/ioSubmitter.h#2 $ */ #ifndef IOSUBMITTER_H @@ -75,6 +75,7 @@ void completeAsyncBio(BIO *bio, int error); * bio-submission threads when enqueuing work * items * @param [in] maxRequestsActive Number of bios for merge tracking + * @param [in] readCacheBlocks Number of read cache blocks * @param [in] layer The kernel layer * @param [out] ioSubmitter Pointer to the new data structure * @@ -84,6 +85,7 @@ int makeIOSubmitter(const char *threadNamePrefix, unsigned int threadCount, unsigned int rotationInterval, unsigned int maxRequestsActive, + unsigned int readCacheBlocks, KernelLayer *layer, IOSubmitter **ioSubmitter); diff --git a/vdo/kernel/kernelLayer.c b/vdo/kernel/kernelLayer.c index 8f0beacb..dd44556d 100644 --- a/vdo/kernel/kernelLayer.c +++ b/vdo/kernel/kernelLayer.c @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/kernelLayer.c#12 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/kernelLayer.c#18 $ */ #include "kernelLayer.h" @@ -38,6 +38,7 @@ #include "bio.h" #include "dataKVIO.h" #include "dedupeIndex.h" +#include "deviceConfig.h" #include "deviceRegistry.h" #include "instanceNumber.h" #include "ioSubmitterInternals.h" @@ -97,7 +98,7 @@ static CRC32Checksum kvdoUpdateCRC32(CRC32Checksum crc, /**********************************************************************/ static BlockCount kvdoGetBlockCount(PhysicalLayer *header) { - return asKernelLayer(header)->blockCount; + return asKernelLayer(header)->deviceConfig->physicalBlocks; } /**********************************************************************/ @@ -248,7 +249,7 @@ int kvdoMapBio(KernelLayer *layer, BIO *bio) // again, so this is the last chance to account for it. countBios(&layer->biosAcknowledged, bio); atomic64_inc(&layer->flushOut); - setBioBlockDevice(bio, layer->dev->bdev); + setBioBlockDevice(bio, getKernelLayerBdev(layer)); return DM_MAPIO_REMAPPED; } } @@ -289,6 +290,12 @@ int kvdoMapBio(KernelLayer *layer, BIO *bio) return DM_MAPIO_SUBMITTED; } +/**********************************************************************/ +struct block_device *getKernelLayerBdev(const KernelLayer *layer) +{ + return layer->deviceConfig->ownedDevice->bdev; +} + /**********************************************************************/ void completeManyRequests(KernelLayer *layer, uint32_t count) { @@ -471,7 +478,7 @@ static int kvdoSynchronousRead(PhysicalLayer *layer, setBioOperationRead(bio); bio->bi_end_io = endSyncRead; bio->bi_private = &bioWait; - setBioBlockDevice(bio, kernelLayer->dev->bdev); + setBioBlockDevice(bio, getKernelLayerBdev(kernelLayer)); setBioSector(bio, blockToSector(kernelLayer, startBlock)); generic_make_request(bio); wait_for_completion(&bioWait); @@ -562,7 +569,7 @@ static void waitForSyncOperation(PhysicalLayer *common) * * @returns VDO_SUCCESS if bio set created, error code otherwise **/ -static int makeDedupeBioSet(KernelLayer *layer) +static int makeDedupeBioSet(KernelLayer *layer) { #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0) int result = ALLOCATE(1, struct bio_set, "bio set", &layer->bioset); @@ -572,7 +579,7 @@ static int makeDedupeBioSet(KernelLayer *layer) result = bioset_init(layer->bioset, 0, 0, BIOSET_NEED_BVECS); if (result != 0) { - return result; + return result; } #else #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0) @@ -584,14 +591,12 @@ static int makeDedupeBioSet(KernelLayer *layer) return -ENOMEM; } #endif - + return VDO_SUCCESS; } - + /**********************************************************************/ -int makeKernelLayer(BlockCount blockCount, - uint64_t startingSector, - struct dm_dev *dev, +int makeKernelLayer(uint64_t startingSector, unsigned int instance, DeviceConfig *config, struct kobject *parentKobject, @@ -612,7 +617,6 @@ int makeKernelLayer(BlockCount blockCount, int result = ALLOCATE(1, KernelLayer, "VDO configuration", &layer); if (result != UDS_SUCCESS) { *reason = "Cannot allocate VDO configuration"; - freeDeviceConfig(&config); return result; } @@ -625,7 +629,6 @@ int makeKernelLayer(BlockCount blockCount, result = allocateVDO(&layer->common, &layer->kvdo.vdo); if (result != VDO_SUCCESS) { *reason = "Cannot allocate VDO"; - freeDeviceConfig(&config); FREE(layer); return result; } @@ -637,7 +640,6 @@ int makeKernelLayer(BlockCount blockCount, result = kobject_add(&layer->kobj, parentKobject, config->poolName); if (result != 0) { *reason = "Cannot add sysfs node"; - freeDeviceConfig(&config); kobject_put(&layer->kobj); return result; } @@ -645,7 +647,6 @@ int makeKernelLayer(BlockCount blockCount, result = kobject_add(&layer->wqDirectory, &layer->kobj, "work_queues"); if (result != 0) { *reason = "Cannot add sysfs node"; - freeDeviceConfig(&config); kobject_put(&layer->wqDirectory); kobject_put(&layer->kobj); return result; @@ -668,16 +669,9 @@ int makeKernelLayer(BlockCount blockCount, int requestLimit = defaultMaxRequestsActive; initializeLimiter(&layer->requestLimiter, requestLimit); initializeLimiter(&layer->discardLimiter, requestLimit * 3 / 4); - layer->readCacheBlocks - = (config->readCacheEnabled - ? (requestLimit + config->readCacheExtraBlocks) : 0); layer->allocationsAllowed = true; - layer->dev = dev; - layer->blockCount = blockCount; layer->instance = instance; - layer->logicalBlockSize = config->logicalBlockSize; - layer->mdRaid5ModeEnabled = config->mdRaid5ModeEnabled; layer->deviceConfig = config; layer->startingSectorOffset = startingSector; @@ -826,10 +820,10 @@ int makeKernelLayer(BlockCount blockCount, } // KVIO and VIO pool - BUG_ON(layer->logicalBlockSize <= 0); + BUG_ON(layer->deviceConfig->logicalBlockSize <= 0); BUG_ON(layer->requestLimiter.limit <= 0); BUG_ON(layer->bioset == NULL); - BUG_ON(layer->dev == NULL); + BUG_ON(layer->deviceConfig->ownedDevice == NULL); result = makeDataKVIOBufferPool(layer, layer->requestLimiter.limit, &layer->dataKVIOPool); if (result != VDO_SUCCESS) { @@ -853,11 +847,15 @@ int makeKernelLayer(BlockCount blockCount, setKernelLayerState(layer, LAYER_REQUEST_QUEUE_INITIALIZED); - // Bio queue + // Bio queue and read cache + unsigned int readCacheBlocks + = (config->readCacheEnabled + ? (requestLimit + config->readCacheExtraBlocks) : 0); result = makeIOSubmitter(layer->threadNamePrefix, config->threadCounts.bioThreads, config->threadCounts.bioRotationInterval, layer->requestLimiter.limit, + readCacheBlocks, layer, &layer->ioSubmitter); if (result != VDO_SUCCESS) { @@ -901,62 +899,97 @@ int makeKernelLayer(BlockCount blockCount, } /**********************************************************************/ -int modifyKernelLayer(KernelLayer *layer, - struct dm_target *ti, - DeviceConfig *config, - char **why) +int prepareToModifyKernelLayer(KernelLayer *layer, + DeviceConfig *config, + char **errorPtr) { - if (ti->begin != layer->ti->begin) { - *why = "Starting sector cannot change"; + DeviceConfig *extantConfig = layer->deviceConfig; + if (config->owningTarget->begin != extantConfig->owningTarget->begin) { + *errorPtr = "Starting sector cannot change"; return VDO_PARAMETER_MISMATCH; } - DeviceConfig *extantConfig = layer->deviceConfig; - if (strcmp(config->parentDeviceName, extantConfig->parentDeviceName) != 0) { - *why = "Underlying device cannot change"; + *errorPtr = "Underlying device cannot change"; return VDO_PARAMETER_MISMATCH; } if (config->logicalBlockSize != extantConfig->logicalBlockSize) { - *why = "Logical block size cannot change"; + *errorPtr = "Logical block size cannot change"; return VDO_PARAMETER_MISMATCH; } if (config->cacheSize != extantConfig->cacheSize) { - *why = "Block map cache size cannot change"; + *errorPtr = "Block map cache size cannot change"; return VDO_PARAMETER_MISMATCH; } if (config->blockMapMaximumAge != extantConfig->blockMapMaximumAge) { - *why = "Block map maximum age cannot change"; + *errorPtr = "Block map maximum age cannot change"; return VDO_PARAMETER_MISMATCH; } if (config->mdRaid5ModeEnabled != extantConfig->mdRaid5ModeEnabled) { - *why = "mdRaid5Mode cannot change"; + *errorPtr = "mdRaid5Mode cannot change"; return VDO_PARAMETER_MISMATCH; } if (config->readCacheEnabled != extantConfig->readCacheEnabled) { - *why = "Read cache enabled cannot change"; + *errorPtr = "Read cache enabled cannot change"; return VDO_PARAMETER_MISMATCH; } if (config->readCacheExtraBlocks != extantConfig->readCacheExtraBlocks) { - *why = "Read cache size cannot change"; + *errorPtr = "Read cache size cannot change"; return VDO_PARAMETER_MISMATCH; } if (strcmp(config->threadConfigString, extantConfig->threadConfigString) != 0) { - *why = "Thread configuration cannot change"; + *errorPtr = "Thread configuration cannot change"; return VDO_PARAMETER_MISMATCH; } // Below here are the actions to take when a non-immutable property changes. - if (config->writePolicy != layer->deviceConfig->writePolicy) { + if (config->writePolicy != extantConfig->writePolicy) { + // Nothing needs doing right now for a write policy change. + } + + if (config->owningTarget->len != extantConfig->owningTarget->len) { + size_t logicalBytes = to_bytes(config->owningTarget->len); + if ((logicalBytes % VDO_BLOCK_SIZE) != 0) { + *errorPtr = "Logical size must be a multiple of 4096"; + return VDO_PARAMETER_MISMATCH; + } + + int result = prepareToResizeLogical(layer, logicalBytes / VDO_BLOCK_SIZE); + if (result != VDO_SUCCESS) { + *errorPtr = "Device prepareToGrowLogical failed"; + return result; + } + } + + if (config->physicalBlocks != extantConfig->physicalBlocks) { + int result = prepareToResizePhysical(layer, config->physicalBlocks); + if (result != VDO_SUCCESS) { + *errorPtr = "Device prepareToGrowPhysical failed"; + return result; + } + } + + return VDO_SUCCESS; +} + +/**********************************************************************/ +int modifyKernelLayer(KernelLayer *layer, + DeviceConfig *config) +{ + DeviceConfig *extantConfig = layer->deviceConfig; + + // A failure here is unrecoverable. So there is no problem if it happens. + + if (config->writePolicy != extantConfig->writePolicy) { /* * Ordinarily, when going from async to sync, we must flush any metadata * written. However, because the underlying storage must have gone into @@ -965,33 +998,26 @@ int modifyKernelLayer(KernelLayer *layer, * by the suspend and all metadata between the suspend and the write * policy change is written to synchronous storage. */ - if (getKernelLayerState(layer) != LAYER_SUSPENDED) { - *why = "Device must be suspended before changing write policy"; - return VDO_COMPONENT_BUSY; - } - logInfo("Modifying device '%s' write policy from %s to %s", - config->poolName, getConfigWritePolicyString(layer->deviceConfig), + config->poolName, getConfigWritePolicyString(extantConfig), getConfigWritePolicyString(config)); setWritePolicy(layer->kvdo.vdo, config->writePolicy); - return VDO_SUCCESS; } - if (ti->len != layer->ti->len) { - if (getKernelLayerState(layer) != LAYER_SUSPENDED) { - *why = "Device must be suspended before changing logical size"; - return VDO_COMPONENT_BUSY; - } - - size_t logicalBytes = to_bytes(ti->len); - if ((logicalBytes % VDO_BLOCK_SIZE) != 0) { - *why = "Logical size must be a multiple of 4096"; - return VDO_PARAMETER_MISMATCH; + if (config->owningTarget->len != extantConfig->owningTarget->len) { + size_t logicalBytes = to_bytes(config->owningTarget->len); + int result = resizeLogical(layer, logicalBytes / VDO_BLOCK_SIZE); + if (result != VDO_SUCCESS) { + return result; } + } - int result = resizeLogical(layer, logicalBytes / VDO_BLOCK_SIZE); + // Grow physical if the version is 0, so we can't tell if we + // got an old-style growPhysical command, or if size changed. + if ((config->physicalBlocks != extantConfig->physicalBlocks) + || (config->version == 0)) { + int result = resizePhysical(layer, config->physicalBlocks); if (result != VDO_SUCCESS) { - *why = "Device growLogical failed"; return result; } } @@ -1094,7 +1120,6 @@ void freeKernelLayer(KernelLayer *layer) } freeDedupeIndex(&layer->dedupeIndex); - freeDeviceConfig(&layer->deviceConfig); stopPeriodicEventReporter(&layer->albireoTimeoutReporter); if (releaseInstance) { @@ -1247,7 +1272,7 @@ int prepareToResizePhysical(KernelLayer *layer, BlockCount physicalCount) // If we don't trap this case, mapToSystemError() will remap it to -EIO, // which is misleading and ahistorical. return -EINVAL; - } else { + } else { return result; } } @@ -1266,8 +1291,6 @@ int resizePhysical(KernelLayer *layer, BlockCount physicalCount) // kvdoResizePhysical logs errors return result; } - - layer->blockCount = physicalCount; return VDO_SUCCESS; } diff --git a/vdo/kernel/kernelLayer.h b/vdo/kernel/kernelLayer.h index d8b380ee..3ffc7b71 100644 --- a/vdo/kernel/kernelLayer.h +++ b/vdo/kernel/kernelLayer.h @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/kernelLayer.h#4 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/kernelLayer.h#10 $ */ #ifndef KERNELLAYER_H @@ -104,6 +104,7 @@ struct kernelLayer { PhysicalLayer common; // Layer specific info DeviceConfig *deviceConfig; + unsigned int configReferences; char threadNamePrefix[MAX_QUEUE_NAME_LEN]; struct kobject kobj; struct kobject wqDirectory; @@ -116,11 +117,6 @@ struct kernelLayer { /** Contains the current KernelLayerState, which rarely changes */ Atomic32 state; bool allocationsAllowed; - /** The current dm target for this layer */ - struct dm_target *ti; - /** The previous dm target and device for this layer */ - struct dm_target *oldTI; - struct dm_dev *oldDev; AtomicBool processingMessage; /** Limit the number of requests that are being processed. */ @@ -139,7 +135,6 @@ struct kernelLayer { * during suspend. **/ struct completion flushWait; - bool mdRaid5ModeEnabled; /** * Bio submission manager used for sending bios to the storage * device. @@ -156,13 +151,8 @@ struct kernelLayer { /** Optional work queue for calling bio_endio. */ KvdoWorkQueue *bioAckQueue; /** Underlying block device info. */ - struct dm_dev *dev; - BlockCount blockCount; - BlockSize logicalBlockSize; uint64_t startingSectorOffset; VolumeGeometry geometry; - // Read cache - unsigned int readCacheBlocks; // Memory allocation BufferPool *dataKVIOPool; struct bio_set *bioset; @@ -256,10 +246,8 @@ typedef struct kvdoEnqueueable { /** * Creates a kernel specific physical layer to be used by VDO * - * @param blockCount The number of blocks supported by the layer * @param startingSector The sector offset of our table entry in the * DM device - * @param dev The underlying device * @param instance Device instantiation counter * @param parentKobject The parent sysfs node * @param config The device configuration @@ -269,9 +257,7 @@ typedef struct kvdoEnqueueable { * * @return VDO_SUCCESS or an error **/ -int makeKernelLayer(BlockCount blockCount, - uint64_t startingSector, - struct dm_dev *dev, +int makeKernelLayer(uint64_t startingSector, unsigned int instance, DeviceConfig *config, struct kobject *parentKobject, @@ -281,19 +267,29 @@ int makeKernelLayer(BlockCount blockCount, __attribute__((warn_unused_result)); /** - * Modify a kernel physical layer. + * Prepare to modify a kernel layer. * * @param layer The layer to modify - * @param ti The new dm_target structure - * @param config The device configuration - * @param why The reason for any failure during this call + * @param config The new device configuration + * @param errorPtr A pointer to store the reason for any failure + * + * @return VDO_SUCCESS or an error + **/ +int prepareToModifyKernelLayer(KernelLayer *layer, + DeviceConfig *config, + char **errorPtr) + __attribute__((warn_unused_result)); + +/** + * Modify a kernel physical layer. + * + * @param layer The layer to modify + * @param config The new device configuration * * @return VDO_SUCCESS or an error **/ int modifyKernelLayer(KernelLayer *layer, - struct dm_target *ti, - DeviceConfig *config, - char **why) + DeviceConfig *config) __attribute__((warn_unused_result)); /** @@ -431,6 +427,54 @@ static inline BlockSize sectorToBlockOffset(KernelLayer *layer, return to_bytes(sectorNumber & sectorsPerBlockMask); } +/** + * Get the block device object currently underlying a kernel layer. + * + * @param layer The kernel layer in question + * + * @return The block device object under the layer + **/ +struct block_device *getKernelLayerBdev(const KernelLayer *layer) + __attribute__((warn_unused_result)); + +/** + * Acquire a reference from the config to the kernel layer. + * + * @param layer The kernel layer in question + * @param config The config in question + **/ +static inline void acquireKernelLayerReference(KernelLayer *layer, + DeviceConfig *config) +{ + layer->configReferences++; + config->layer = layer; +} + +/** + * Release a reference from the config to its kernel layer. + * + * @param layer The kernel layer in question + * @param config The config in question + **/ +static inline void releaseKernelLayerReference(KernelLayer *layer, + DeviceConfig *config) +{ + config->layer = NULL; + layer->configReferences--; +} + +/** + * Set the layer's active config. + * + * @param layer The kernel layer in question + * @param config The config in question + **/ +static inline void setKernelLayerActiveConfig(KernelLayer *layer, + DeviceConfig *config) +{ + layer->deviceConfig = config; +} + /** * Given an error code, return a value we can return to the OS. The * input error code may be a system-generated value (such as -EIO), an diff --git a/vdo/kernel/kvdoFlush.c b/vdo/kernel/kvdoFlush.c index 13684d87..1a76c2c1 100644 --- a/vdo/kernel/kvdoFlush.c +++ b/vdo/kernel/kvdoFlush.c @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/kvdoFlush.c#2 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/kvdoFlush.c#3 $ */ #include "kvdoFlush.h" @@ -195,7 +195,8 @@ static void kvdoCompleteFlushWork(KvdoWorkItem *item) countBios(&layer->biosAcknowledged, bio); // Make sure the bio is a empty flush bio. - prepareFlushBIO(bio, bio->bi_private, layer->dev->bdev, bio->bi_end_io); + prepareFlushBIO(bio, bio->bi_private, getKernelLayerBdev(layer), + bio->bi_end_io); atomic64_inc(&layer->flushOut); generic_make_request(bio); } @@ -262,7 +263,7 @@ int synchronousFlush(KernelLayer *layer) } init_completion(&layer->flushWait); - prepareFlushBIO(bio, layer, layer->dev->bdev, endSynchronousFlush); + prepareFlushBIO(bio, layer, getKernelLayerBdev(layer), endSynchronousFlush); bio->bi_next = NULL; generic_make_request(bio); wait_for_completion(&layer->flushWait); diff --git a/vdo/kernel/kvio.c b/vdo/kernel/kvio.c index f51a6a47..289e705d 100644 --- a/vdo/kernel/kvio.c +++ b/vdo/kernel/kvio.c @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/kvio.c#2 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/kvio.c#3 $ */ #include "kvio.h" @@ -199,7 +199,7 @@ void kvdoFlushVIO(VIO *vio) BIO *bio = kvio->bio; KernelLayer *layer = kvio->layer; resetBio(bio, layer); - prepareFlushBIO(bio, kvio, layer->dev->bdev, completeFlushBio); + prepareFlushBIO(bio, kvio, getKernelLayerBdev(layer), completeFlushBio); submitBio(bio, getMetadataAction(vio)); } diff --git a/vdo/kernel/statusProcfs.c b/vdo/kernel/statusProcfs.c index d3979c78..6aaebb8e 100644 --- a/vdo/kernel/statusProcfs.c +++ b/vdo/kernel/statusProcfs.c @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/statusProcfs.c#2 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/statusProcfs.c#3 $ * * Proc filesystem interface to the old GET_DEDUPE_STATS and * GET_KERNEL_STATS ioctls, which can no longer be supported in 4.4 @@ -124,7 +124,7 @@ void getKernelStats(KernelLayer *layer, KernelStatistics *stats) stats->dedupeAdviceTimeouts = (getEventCount(&layer->albireoTimeoutReporter) + atomic64_read(&layer->dedupeContextBusy)); stats->flushOut = atomic64_read(&layer->flushOut); - stats->logicalBlockSize = layer->logicalBlockSize; + stats->logicalBlockSize = layer->deviceConfig->logicalBlockSize; copyBioStat(&stats->biosIn, &layer->biosIn); copyBioStat(&stats->biosInPartial, &layer->biosInPartial); copyBioStat(&stats->biosOut, &layer->biosOut); diff --git a/vdo/kernel/udsIndex.c b/vdo/kernel/udsIndex.c index 8f9932a0..8223824c 100644 --- a/vdo/kernel/udsIndex.c +++ b/vdo/kernel/udsIndex.c @@ -31,7 +31,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/udsIndex.c#7 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/udsIndex.c#9 $ */ #include "udsIndex.h" @@ -378,7 +378,7 @@ static void closeSession(UDSIndex *index) } spin_lock(&index->stateLock); index->indexState = IS_CLOSED; - index->errorFlag = result != UDS_SUCCESS; + index->errorFlag |= result != UDS_SUCCESS; // ASSERTION: We leave in IS_CLOSED state. } @@ -401,7 +401,7 @@ static void openContext(UDSIndex *index) } else { index->indexTarget = IS_CLOSED; index->errorFlag = true; - // ASSERTION: On failure, we leave in IS_CLOSED state. + // ASSERTION: On failure, we leave in IS_INDEXSESSION state. } } @@ -437,6 +437,11 @@ static void openSession(UDSIndex *index) if (result != UDS_SUCCESS) { logErrorWithStringError(result, "Error reading configuration for %s", index->indexName); + int closeResult = udsCloseIndexSession(index->indexSession); + if (closeResult != UDS_SUCCESS) { + logErrorWithStringError(closeResult, "Error closing index %s", + index->indexName); + } } else { if (udsConfigurationGetNonce(index->configuration) != udsConfigurationGetNonce(configuration)) { @@ -758,7 +763,7 @@ int makeUDSIndex(KernelLayer *layer, DedupeIndex **indexPtr) { UDSIndex *index; int result = ALLOCATE(1, UDSIndex, "UDS index data", &index); - if (result != VDO_SUCCESS) { + if (result != UDS_SUCCESS) { return result; } @@ -766,12 +771,22 @@ int makeUDSIndex(KernelLayer *layer, DedupeIndex **indexPtr) "dev=%s offset=4096 size=%" PRIu64, layer->deviceConfig->parentDeviceName, getIndexRegionSize(layer->geometry) * VDO_BLOCK_SIZE); - if (result < 0) { + if (result != UDS_SUCCESS) { logError("Creating index name failed (%d)", result); FREE(index); return result; } + result = indexConfigToUdsConfiguration(&layer->geometry.indexConfig, + &index->configuration); + if (result != VDO_SUCCESS) { + FREE(index->indexName); + FREE(index); + return result; + } + udsConfigurationSetNonce(index->configuration, + (UdsNonce) layer->geometry.nonce); + static const KvdoWorkQueueType udsQueueType = { .start = startUDSQueue, .finish = finishUDSQueue, @@ -782,29 +797,19 @@ int makeUDSIndex(KernelLayer *layer, DedupeIndex **indexPtr) result = makeWorkQueue(layer->threadNamePrefix, "dedupeQ", &layer->wqDirectory, layer, index, &udsQueueType, 1, &index->udsQueue); - if (result < 0) { - logError("UDS index queue initialization failed (%d)", result); - FREE(index->indexName); - FREE(index); - return result; - } - - result = indexConfigToUdsConfiguration(&layer->geometry.indexConfig, - &index->configuration); if (result != VDO_SUCCESS) { - freeWorkQueue(&index->udsQueue); + logError("UDS index queue initialization failed (%d)", result); + udsFreeConfiguration(index->configuration); FREE(index->indexName); FREE(index); return result; } - udsConfigurationSetNonce(index->configuration, - (UdsNonce) layer->geometry.nonce); kobject_init(&index->dedupeObject, &dedupeKobjType); result = kobject_add(&index->dedupeObject, &layer->kobj, "dedupe"); if (result != VDO_SUCCESS) { - udsFreeConfiguration(index->configuration); freeWorkQueue(&index->udsQueue); + udsFreeConfiguration(index->configuration); FREE(index->indexName); FREE(index); return result; diff --git a/vdo/kernel/workQueue.c b/vdo/kernel/workQueue.c index 5b527cb4..1963eac6 100644 --- a/vdo/kernel/workQueue.c +++ b/vdo/kernel/workQueue.c @@ -16,7 +16,7 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * - * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/workQueue.c#8 $ + * $Id: //eng/vdo-releases/aluminum/src/c++/vdo/kernel/workQueue.c#9 $ */ #include "workQueue.h" @@ -975,6 +975,8 @@ void freeWorkQueue(KvdoWorkQueue **queuePtr) } *queuePtr = NULL; + finishWorkQueue(queue); + if (queue->roundRobinMode) { freeRoundRobinWorkQueue(asRoundRobinWorkQueue(queue)); } else {