SERVER-114536 Remove writeConflictRetry in getTagsFromStorage (#45213)

GitOrigin-RevId: 4e9dbddadb41b926de7bb5b30d95e2d5f812f97f
This commit is contained in:
Gabriel Marks 2025-12-16 10:34:49 -05:00 committed by MongoDB Bot
parent e62f34cd30
commit 627ca12e79
1 changed files with 46 additions and 66 deletions

View File

@ -529,86 +529,66 @@ std::vector<std::vector<FLEEdgeCountInfo>> getTagsFromStorage(
const NamespaceStringOrUUID& nsOrUUID, const NamespaceStringOrUUID& nsOrUUID,
const std::vector<std::vector<FLEEdgePrfBlock>>& escDerivedFromDataTokens, const std::vector<std::vector<FLEEdgePrfBlock>>& escDerivedFromDataTokens,
FLETagQueryInterface::TagQueryType type) { FLETagQueryInterface::TagQueryType type) {
const auto collectionAcquisition = acquireCollectionMaybeLockFree(
opCtx,
CollectionAcquisitionRequest::fromOpCtx(opCtx, nsOrUUID, AcquisitionPrerequisites::kRead));
auto opStr = "getTagsFromStorage"_sd; const auto& collectionPtr = collectionAcquisition.getCollectionPtr();
#ifdef MONGO_CONFIG_DEBUG_BUILD // If there is no collection, run through the algorithm with a special reader that only
// TODO SERVER-114536: If a lock-free read is active and we enter this function the operation // returns empty documents. This simplifies the implementation of other readers.
// could force us to abandon the snapshot opened. This could cause the acquisition held above if (!collectionAcquisition.exists()) {
// this call to become invalid by accident. We disable the checks temporarily until a more MissingCollectionReader reader;
// permanent fix is in place. return ESCCollection::getTags(reader, escDerivedFromDataTokens, type);
DisableCollectionConsistencyChecks disableChecks{opCtx}; }
#endif
return writeConflictRetry(
opCtx, opStr, nsOrUUID, [&]() -> std::vector<std::vector<FLEEdgeCountInfo>> {
const auto collectionAcquisition = acquireCollectionMaybeLockFree(
opCtx,
CollectionAcquisitionRequest::fromOpCtx(
opCtx, nsOrUUID, AcquisitionPrerequisites::kRead));
const auto& collectionPtr = collectionAcquisition.getCollectionPtr(); // numRecords is signed so guard against negative numbers
auto docCountSigned = collectionPtr->numRecords(opCtx);
uint64_t docCount = docCountSigned < 0 ? 0 : static_cast<uint64_t>(docCountSigned);
// If there is no collection, run through the algorithm with a special reader that only std::unique_ptr<SeekableRecordCursor> cursor = collectionPtr->getCursor(opCtx, true);
// returns empty documents. This simplifies the implementation of other readers.
if (!collectionAcquisition.exists()) {
MissingCollectionReader reader;
return ESCCollection::getTags(reader, escDerivedFromDataTokens, type);
}
// numRecords is signed so guard against negative numbers // If clustered collection, we have simpler searches
auto docCountSigned = collectionPtr->numRecords(opCtx); if (collectionPtr->isClustered() &&
uint64_t docCount = docCountSigned < 0 ? 0 : static_cast<uint64_t>(docCountSigned); collectionPtr->getClusteredInfo()
->getIndexSpec()
.getKey()
.firstElement()
.fieldNameStringData() == "_id"_sd) {
std::unique_ptr<SeekableRecordCursor> cursor = collectionPtr->getCursor(opCtx, true); StorageEngineClusteredCollectionReader reader(opCtx, docCount, nsOrUUID, cursor.get());
// If clustered collection, we have simpler searches return ESCCollection::getTags(reader, escDerivedFromDataTokens, type);
if (collectionPtr->isClustered() && }
collectionPtr->getClusteredInfo()
->getIndexSpec()
.getKey()
.firstElement()
.fieldNameStringData() == "_id"_sd) {
StorageEngineClusteredCollectionReader reader( // Non-clustered case, we need to look a index entry in _id index and then the
opCtx, docCount, nsOrUUID, cursor.get()); // collection
auto indexCatalog = collectionPtr->getIndexCatalog();
return ESCCollection::getTags(reader, escDerivedFromDataTokens, type); const auto indexEntry = indexCatalog->findIndexByName(
} opCtx, IndexConstants::kIdIndexName, IndexCatalog::InclusionPolicy::kReady);
if (!indexEntry) {
uasserted(ErrorCodes::IndexNotFound,
str::stream() << "Index not found, ns:" << toStringForLogging(nsOrUUID)
<< ", index: " << IndexConstants::kIdIndexName);
}
// Non-clustered case, we need to look a index entry in _id index and then the if (indexEntry->descriptor()->isPartial()) {
// collection uasserted(ErrorCodes::IndexOptionsConflict,
auto indexCatalog = collectionPtr->getIndexCatalog(); str::stream() << "Partial index is not allowed for this operation, ns:"
<< toStringForLogging(nsOrUUID)
<< ", index: " << IndexConstants::kIdIndexName);
}
const auto indexEntry = indexCatalog->findIndexByName( auto indexCatalogEntry = indexEntry->shared_from_this();
opCtx, IndexConstants::kIdIndexName, IndexCatalog::InclusionPolicy::kReady);
if (!indexEntry) {
uasserted(ErrorCodes::IndexNotFound,
str::stream() << "Index not found, ns:" << toStringForLogging(nsOrUUID)
<< ", index: " << IndexConstants::kIdIndexName);
}
if (indexEntry->descriptor()->isPartial()) { auto sdi = indexCatalogEntry->accessMethod()->asSortedData();
uasserted(ErrorCodes::IndexOptionsConflict, auto indexCursor = sdi->newCursor(opCtx, *shard_role_details::getRecoveryUnit(opCtx), true);
str::stream() << "Partial index is not allowed for this operation, ns:"
<< toStringForLogging(nsOrUUID)
<< ", index: " << IndexConstants::kIdIndexName);
}
auto indexCatalogEntry = indexEntry->shared_from_this(); StorageEngineIndexCollectionReader reader(
opCtx, docCount, nsOrUUID, cursor.get(), sdi->getSortedDataInterface(), indexCursor.get());
auto sdi = indexCatalogEntry->accessMethod()->asSortedData(); return ESCCollection::getTags(reader, escDerivedFromDataTokens, type);
auto indexCursor =
sdi->newCursor(opCtx, *shard_role_details::getRecoveryUnit(opCtx), true);
StorageEngineIndexCollectionReader reader(opCtx,
docCount,
nsOrUUID,
cursor.get(),
sdi->getSortedDataInterface(),
indexCursor.get());
return ESCCollection::getTags(reader, escDerivedFromDataTokens, type);
});
} }
} // namespace mongo } // namespace mongo