From e62f34cd30bb8346ccbf8304d36bd290821800b1 Mon Sep 17 00:00:00 2001 From: Igor Praznik <95708172+igpraznik@users.noreply.github.com> Date: Tue, 16 Dec 2025 16:08:05 +0100 Subject: [PATCH] SERVER-90141 Improve ChunkManager API to prevent unsafe method calls on point-in-time instances (#44238) GitOrigin-RevId: 256d62978cc39e7836d01653e459d88f8ad48eaf --- src/mongo/db/bulk_write_shard_test.cpp | 3 +- .../query_cmd/aggregation_execution_state.cpp | 25 ++- .../aggregation_execution_state_test.cpp | 6 +- .../query_shard_server_test_fixture.cpp | 2 +- src/mongo/db/global_catalog/chunk_manager.cpp | 10 +- src/mongo/db/global_catalog/chunk_manager.h | 157 ++++++++++++------ .../chunk_manager_query_test.cpp | 20 +-- .../chunk_manager_refresh_bm.cpp | 11 +- .../ddl/create_collection_coordinator.cpp | 2 +- .../ddl/drop_indexes_coordinator.cpp | 2 +- .../metadata_consistency_util_test.cpp | 3 +- src/mongo/db/pipeline/pipeline_test.cpp | 10 +- .../multiple_collection_accessor_test.cpp | 3 +- .../timeseries/timeseries_translation.cpp | 3 +- .../collection_routing_info_targeter_test.cpp | 11 +- src/mongo/db/router_role/router_role.cpp | 25 +-- .../routing_cache/catalog_cache.cpp | 48 ++++-- .../router_role/routing_cache/catalog_cache.h | 139 +++++++++++++++- .../routing_cache/catalog_cache_mock.cpp | 11 +- .../routing_cache/catalog_cache_mock.h | 2 +- .../db/router_role/sharding_write_router.cpp | 2 +- .../router_role/sharding_write_router_bm.cpp | 29 ++-- ...hard_key_read_write_distribution_stage.cpp | 5 +- ...shard_key_read_write_distribution_test.cpp | 5 +- .../db/s/balancer/balance_stats_test.cpp | 5 +- .../db/s/balancer/balancer_policy_test.cpp | 5 +- ...ration_chunk_cloner_source_op_observer.cpp | 2 +- .../s/migration_chunk_cloner_source_test.cpp | 2 +- src/mongo/db/s/range_deleter_service_test.cpp | 28 ++-- src/mongo/db/s/range_deletion_util_test.cpp | 2 +- .../resharding_collection_cloner_test.cpp | 4 +- .../resharding_data_replication_test.cpp | 3 +- ...resharding_donor_recipient_common_test.cpp | 27 ++- .../resharding_oplog_applier_test.cpp | 2 +- .../resharding_oplog_batch_applier_test.cpp | 3 +- ...resharding_oplog_crud_application_test.cpp | 17 +- .../resharding_recipient_service_test.cpp | 5 +- .../shard_role/post_resharding_placement.cpp | 3 +- .../shard_catalog/collection_metadata.cpp | 21 ++- .../shard_catalog/collection_metadata.h | 79 ++++++--- .../collection_metadata_filtering_test.cpp | 2 +- .../collection_metadata_test.cpp | 37 +++-- .../collection_sharding_runtime_test.cpp | 27 ++- .../shard_catalog/metadata_manager.cpp | 7 +- .../shard_catalog/metadata_manager_test.cpp | 9 +- .../op_observer_sharding_test.cpp | 3 +- src/mongo/db/shard_role/shard_role_test.cpp | 3 +- .../cluster_analyze_shard_key_cmd.cpp | 2 +- .../s/query/exec/establish_cursors_test.cpp | 7 +- .../s/query/shard_key_pattern_query_util.cpp | 7 +- .../write_op_analyzer_test.cpp | 7 +- 51 files changed, 548 insertions(+), 305 deletions(-) diff --git a/src/mongo/db/bulk_write_shard_test.cpp b/src/mongo/db/bulk_write_shard_test.cpp index ed399e52771..b51ddc3accc 100644 --- a/src/mongo/db/bulk_write_shard_test.cpp +++ b/src/mongo/db/bulk_write_shard_test.cpp @@ -176,8 +176,7 @@ void installShardedCollectionMetadata(OperationContext* opCtx, RoutingTableHistoryValueHandle(std::make_shared(std::move(rt)), ComparableChunkVersion::makeComparableChunkVersion(version)); - const auto collectionMetadata = - CollectionMetadata(ChunkManager(rtHandle, boost::none), thisShardId); + const auto collectionMetadata = CollectionMetadata(CurrentChunkManager(rtHandle), thisShardId); AutoGetCollection coll(opCtx, nss, MODE_IX); CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(opCtx, nss) diff --git a/src/mongo/db/commands/query_cmd/aggregation_execution_state.cpp b/src/mongo/db/commands/query_cmd/aggregation_execution_state.cpp index c73d7ff8653..dad26937968 100644 --- a/src/mongo/db/commands/query_cmd/aggregation_execution_state.cpp +++ b/src/mongo/db/commands/query_cmd/aggregation_execution_state.cpp @@ -485,17 +485,26 @@ bool AggExState::canReadUnderlyingCollectionLocally(const CollectionRoutingInfo& const auto myShardId = ShardingState::get(_opCtx)->shardId(); const auto atClusterTime = repl::ReadConcernArgs::get(_opCtx).getArgsAtClusterTime(); - const auto chunkManagerMaybeAtClusterTime = atClusterTime - ? ChunkManager::makeAtTime(cri.getChunkManager(), atClusterTime->asTimestamp()) - : cri.getChunkManager(); + auto isNssLocalFunc = [&](const auto& cm) { + if (cm.isSharded()) { + return false; + } else if (cm.isUnsplittable()) { + return cm.getMinKeyShardIdWithSimpleCollation() == myShardId; + } else { + return cri.getDbPrimaryShardId() == myShardId; + } + }; - if (chunkManagerMaybeAtClusterTime.isSharded()) { - return false; - } else if (chunkManagerMaybeAtClusterTime.isUnsplittable()) { - return chunkManagerMaybeAtClusterTime.getMinKeyShardIdWithSimpleCollation() == myShardId; + bool isNssLocal; + if (atClusterTime) { + auto pitChunkManager = + PointInTimeChunkManager::make(cri.getChunkManager(), atClusterTime->asTimestamp()); + isNssLocal = isNssLocalFunc(pitChunkManager); } else { - return cri.getDbPrimaryShardId() == myShardId; + isNssLocal = isNssLocalFunc(cri.getChunkManager()); } + + return isNssLocal; } Status AggExState::collatorCompatibleWithPipeline(const CollatorInterface* collator) const { diff --git a/src/mongo/db/commands/query_cmd/aggregation_execution_state_test.cpp b/src/mongo/db/commands/query_cmd/aggregation_execution_state_test.cpp index 75c6bd0bc42..172d0048c05 100644 --- a/src/mongo/db/commands/query_cmd/aggregation_execution_state_test.cpp +++ b/src/mongo/db/commands/query_cmd/aggregation_execution_state_test.cpp @@ -79,8 +79,8 @@ protected: CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(opCtx, nss) ->setFilteringMetadata(opCtx, CollectionMetadata::UNTRACKED()); - auto cm = ChunkManager(RoutingTableHistoryValueHandle{OptionalRoutingTableHistory{}}, - _dbVersion.getTimestamp()); + PointInTimeChunkManager cm(RoutingTableHistoryValueHandle{OptionalRoutingTableHistory{}}, + _dbVersion.getTimestamp()); getCatalogCacheMock()->setCollectionReturnValue( nss, CollectionRoutingInfo( @@ -132,7 +132,7 @@ protected: std::make_shared(std::move(rt)), ComparableChunkVersion::makeComparableChunkVersion(version)); - auto cm = ChunkManager(rtHandle, boost::none); + CurrentChunkManager cm(rtHandle); const auto collectionMetadata = CollectionMetadata(cm, shardName); AutoGetCollection coll(opCtx, NamespaceStringOrUUID(nss), MODE_IX); diff --git a/src/mongo/db/exec/classic/query_shard_server_test_fixture.cpp b/src/mongo/db/exec/classic/query_shard_server_test_fixture.cpp index d3492b65191..e33f0137c19 100644 --- a/src/mongo/db/exec/classic/query_shard_server_test_fixture.cpp +++ b/src/mongo/db/exec/classic/query_shard_server_test_fixture.cpp @@ -146,7 +146,7 @@ CollectionMetadata QueryShardServerTestFixture::prepareTestData( true, _chunks); - ChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none); + CurrentChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt))); ASSERT_EQ(_chunks.size(), cm.numChunks()); { diff --git a/src/mongo/db/global_catalog/chunk_manager.cpp b/src/mongo/db/global_catalog/chunk_manager.cpp index b88f55570b0..6e5da6d76d8 100644 --- a/src/mongo/db/global_catalog/chunk_manager.cpp +++ b/src/mongo/db/global_catalog/chunk_manager.cpp @@ -826,7 +826,7 @@ void ChunkManager::getShardIdsForRange(const BSONObj& min, // owns chunks when it used to at _clusterTime. if (!_clusterTime && ChunkMap::allElementsAreOfType(BSONType::minKey, min) && ChunkMap::allElementsAreOfType(BSONType::maxKey, max)) { - getAllShardIds(shardIds); + _rt->optRt->getAllShardIds(shardIds); if (chunkRanges) { getAllChunkRanges(chunkRanges); } @@ -871,8 +871,8 @@ bool ChunkManager::rangeOverlapsShard(const ChunkRange& range, const ShardId& sh return overlapFound; } -boost::optional ChunkManager::getNextChunkOnShard(const BSONObj& shardKey, - const ShardId& shardId) const { +boost::optional CurrentChunkManager::getNextChunkOnShard(const BSONObj& shardKey, + const ShardId& shardId) const { tassert(7626422, "Expected routing table to be initialized", _rt->optRt); tassert(8719704, "Should never call getNextChunkOnShard when ChunkManager is at point-in-time", @@ -916,10 +916,6 @@ void RoutingTableHistory::getAllChunkRanges(std::set* all) const { }); } -ChunkManager ChunkManager::makeAtTime(const ChunkManager& cm, Timestamp clusterTime) { - return ChunkManager(cm._rt, clusterTime); -} - bool ChunkManager::allowMigrations() const { if (!_rt->optRt) return true; diff --git a/src/mongo/db/global_catalog/chunk_manager.h b/src/mongo/db/global_catalog/chunk_manager.h index 1625d4c9d3a..0a3b67335bb 100644 --- a/src/mongo/db/global_catalog/chunk_manager.h +++ b/src/mongo/db/global_catalog/chunk_manager.h @@ -67,7 +67,7 @@ namespace mongo { -class ChunkManager; +class CurrentChunkManager; struct MONGO_MOD_NEEDS_REPLACEMENT PlacementVersionTargetingInfo { /** @@ -705,12 +705,59 @@ struct MONGO_MOD_NEEDS_REPLACEMENT EndpointComp { /** * Wrapper around a RoutingTableHistory, which pins it to a particular point in time. + * + * The ChunkManager class hierarchy represents routing information for MongoDB sharded collections. + * This implementation uses a non-virtual inheritance approach where state is stored in + * the base class and behavior is differentiated through derived class method availability. + * ChunkManager (Base Class - Stores ALL state) + * Derived classes: + * 1. CurrentChunkManager(_clusterTime: none) + * Additional Methods: + * - getAllShardIds() + * - getNShardsOwningChunks() + * - getNextChunkOnShard() + * 2. PointInTimeChunkManager(_clusterTime: Timestamp) + * + * CRITICAL DESIGN CONSTRAINT: ALL STATE MUST BE AT ChunkManager LEVEL + * + * The _clusterTime field MUST remain in the base ChunkManager class because both + * CurrentChunkManager and PointInTimeChunkManager share the same RoutingTableHistory. + * + * The routing table itself doesn't change based on point-in-time vs current semantics. + * Only the _clusterTime affects how operations interpret that shared routing table: + * - CurrentChunkManager uses the latest cluster time + * - PointInTimeChunkManager uses a specific historical cluster time + + * Usage: + * 1. CurrentChunkManager (Latest Routing State) + * ------------------------------------------ + * Represents the most up-to-date routing information from the catalog cache. + * + * Characteristics: + * - _clusterTime is boost::none (no specific point in time) + * - Provides access to current cluster topology + * - Supports additional operations that query current state + * - Used for normal CRUD operations and administrative commands + * + * Exclusive Methods (NOT available on PointInTimeChunkManager): + * - getAllShardIds(): Get exact current set of shards owning chunks + * - getNShardsOwningChunks(): Get exact current count of shards + * - getNextChunkOnShard(): Find next chunk on shard (for migrations) + * + * 2. PointInTimeChunkManager (Historical Routing State) + * --------------------------------------------------- + * Represents routing information as it existed at a specific cluster timestamp. + * + * Characteristics: + * - _clusterTime contains a specific Timestamp + * - Provides snapshot-consistent view of routing + * - Respects atClusterTime for all chunk operations + * - Used for snapshot reads and multi-document transactions */ class MONGO_MOD_NEEDS_REPLACEMENT ChunkManager { -public: - ChunkManager(RoutingTableHistoryValueHandle rt, boost::optional clusterTime) - : _rt(std::move(rt)), _clusterTime(std::move(clusterTime)) {} + friend class PointInTimeChunkManager; +public: // Methods supported on both sharded and unsharded collections /* @@ -863,15 +910,6 @@ public: */ bool rangeOverlapsShard(const ChunkRange& range, const ShardId& shardId) const; - /** - * Given a shardKey, returns the first chunk which is owned by shardId and overlaps or sorts - * after that shardKey. If the return value is empty, this means no such chunk exists. - * - * Can only be used when this ChunkManager is not at point-in-time. - */ - boost::optional getNextChunkOnShard(const BSONObj& shardKey, - const ShardId& shardId) const; - /** * Given a shard key (or a prefix) that has been extracted from a document, returns the chunk * that contains that key. @@ -914,22 +952,12 @@ public: std::set* chunkRanges = nullptr, bool includeMaxBound = true) const; - /** - * Returns the ids of all shards on which the collection has any chunks. - * Can only be used when this ChunkManager is not at point-in-time. - */ - void getAllShardIds(std::set* all) const { - tassert(7626409, "Expected routing table to be initialized", _rt->optRt); - tassert(8719700, - "Should never call getAllShardIds when ChunkManager is at point-in-time", - !_clusterTime); - _rt->optRt->getAllShardIds(all); - } - /** * Returns the ids of all shards on which the collection has any chunks. * Can be used when this ChunkManager is at point-in-time, but it returns the shardIds as of the * latest known placement (instead of the ones at the point-in-time). + * + * TODO SERVER-114823: Remove all usages getAllShardIds_UNSAFE_NotPointInTime */ void getAllShardIds_UNSAFE_NotPointInTime(std::set* all) const { tassert(8719701, "Expected routing table to be initialized", _rt->optRt); @@ -944,36 +972,20 @@ public: _rt->optRt->getAllChunkRanges(all); } - /** - * Returns the number of shards on which the collection has any chunks. - * Can only be used when this ChunkManager is not at point-in-time. - */ - size_t getNShardsOwningChunks() const { - tassert(8719702, "Expected routing table to be initialized", _rt->optRt); - tassert(8719703, - "Should never call getNShardsOwningChunks when ChunkManager is at point-in-time", - !_clusterTime); - return _rt->optRt->getNShardsOwningChunks(); - } - /** * Returns the approximate number of shards on which the collection has any chunks. * * To be only used for logging/metrics which do not need to be always correct. The returned * value may be incorrect when this ChunkManager is at point-in-time (it will reflect the * 'latest' number of shards, rather than the one at the point-in-time). + * + * TODO SERVER-114823: Remove all usages getAproxNShardsOwningChunks */ size_t getAproxNShardsOwningChunks() const { tassert(7626411, "Expected routing table to be initialized", _rt->optRt); return _rt->optRt->getNShardsOwningChunks(); } - /** - * Constructs a new ChunkManager, which is a view of the underlying routing table at a different - * `clusterTime`. - */ - static ChunkManager makeAtTime(const ChunkManager& cm, Timestamp clusterTime); - bool uuidMatches(const UUID& uuid) const { tassert(7626412, "Expected routing table to be initialized", _rt->optRt); return _rt->optRt->uuidMatches(uuid); @@ -1014,15 +1026,66 @@ public: return _rt->optRt->isNewTimeseriesWithoutView(); } -private: - RoutingTableHistoryValueHandle _rt; +protected: + ChunkManager(RoutingTableHistoryValueHandle rt, boost::optional clusterTime) + : _rt(std::move(rt)), _clusterTime(std::move(clusterTime)) {} + RoutingTableHistoryValueHandle _rt; boost::optional _clusterTime; }; +class MONGO_MOD_NEEDS_REPLACEMENT CurrentChunkManager : public ChunkManager { +public: + explicit CurrentChunkManager(RoutingTableHistoryValueHandle rt) + : ChunkManager(std::move(rt), boost::none) {} + + /** + * Given a shardKey, returns the first chunk which is owned by shardId and overlaps or sorts + * after that shardKey. If the return value is empty, this means no such chunk exists. + * + * Can only be used when this ChunkManager is not at point-in-time. + */ + boost::optional getNextChunkOnShard(const BSONObj& shardKey, + const ShardId& shardId) const; + + /** + * Returns the ids of all shards on which the collection has any chunks. + * Can only be used when this ChunkManager is not at point-in-time. + */ + void getAllShardIds(std::set* all) const { + tassert(7626409, "Expected routing table to be initialized", _rt->optRt); + tassert(8719700, + "Should never call getAllShardIds when ChunkManager is at point-in-time", + !_clusterTime); + _rt->optRt->getAllShardIds(all); + } + + /** + * Returns the number of shards on which the collection has any chunks. + * Can only be used when this ChunkManager is not at point-in-time. + */ + size_t getNShardsOwningChunks() const { + tassert(8719702, "Expected routing table to be initialized", _rt->optRt); + tassert(8719703, + "Should never call getNShardsOwningChunks when ChunkManager is at point-in-time", + !_clusterTime); + return _rt->optRt->getNShardsOwningChunks(); + } +}; + +class MONGO_MOD_NEEDS_REPLACEMENT PointInTimeChunkManager : public ChunkManager { +public: + PointInTimeChunkManager(RoutingTableHistoryValueHandle rt, Timestamp clusterTime) + : ChunkManager(std::move(rt), clusterTime) {} + + static PointInTimeChunkManager make(const ChunkManager& cm, Timestamp clusterTime) { + return PointInTimeChunkManager(cm._rt, clusterTime); + } +}; + /** - * If `max` is the max bound of some chunk, returns that chunk. Otherwise, returns the chunk that - * contains the key `max`. + * If `max` is the max bound of some chunk, returns that chunk. + * Otherwise, returns the chunk that contains the key `max`. */ MONGO_MOD_NEEDS_REPLACEMENT Chunk getChunkForMaxBound(const ChunkManager& cm, const BSONObj& max); diff --git a/src/mongo/db/global_catalog/chunk_manager_query_test.cpp b/src/mongo/db/global_catalog/chunk_manager_query_test.cpp index 062cba31e6b..9f3e20e712d 100644 --- a/src/mongo/db/global_catalog/chunk_manager_query_test.cpp +++ b/src/mongo/db/global_catalog/chunk_manager_query_test.cpp @@ -518,21 +518,21 @@ TEST_F(ChunkManagerQueryTest, SnapshotQueryWithMoreShardsThanLatestMetadata) { chunk1.setHistory({ChunkHistory(*chunk1.getOnCurrentShardSince(), ShardId("0")), ChunkHistory(Timestamp(1, 0), ShardId("1"))}); - ChunkManager chunkManager(makeStandaloneRoutingTableHistory( - oldRoutingTable.makeUpdated(boost::none /* timeseriesFields */, - boost::none /* reshardingFields */, - true, - false, /* unsplittable */ - {chunk1})), - Timestamp(5, 0)); + PointInTimeChunkManager cm(makeStandaloneRoutingTableHistory( + oldRoutingTable.makeUpdated(boost::none /* timeseriesFields */, + boost::none /* reshardingFields */, + true, + false, /* unsplittable */ + {chunk1})), + Timestamp(5, 0)); std::set shardIds; - chunkManager.getShardIdsForRange(BSON("x" << MINKEY), BSON("x" << MAXKEY), &shardIds); + cm.getShardIdsForRange(BSON("x" << MINKEY), BSON("x" << MAXKEY), &shardIds); ASSERT_EQ(2, shardIds.size()); const auto expCtx = make_intrusive(); shardIds.clear(); - getShardIdsForQuery(expCtx, BSON("x" << BSON("$gt" << -20)), {}, chunkManager, &shardIds); + getShardIdsForQuery(expCtx, BSON("x" << BSON("$gt" << -20)), {}, cm, &shardIds); ASSERT_EQ(2, shardIds.size()); } @@ -573,7 +573,7 @@ TEST_F(ChunkManagerQueryTest, TestKeyBelongsToShard) { boost::none /* reshardingFields */, true, chunkVec); - ChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)), clusterTime); + PointInTimeChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)), clusterTime); auto chunkIt = chunks.begin(); while (chunkIt != chunks.end()) { diff --git a/src/mongo/db/global_catalog/chunk_manager_refresh_bm.cpp b/src/mongo/db/global_catalog/chunk_manager_refresh_bm.cpp index 4d2bef7eecf..707dae8dde2 100644 --- a/src/mongo/db/global_catalog/chunk_manager_refresh_bm.cpp +++ b/src/mongo/db/global_catalog/chunk_manager_refresh_bm.cpp @@ -125,8 +125,8 @@ CollectionMetadata makeChunkManagerWithShardSelector(int nShards, boost::none /* reshardingFields */, true, chunks); - return CollectionMetadata( - ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none), getShardId(0)); + return CollectionMetadata(CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))), + getShardId(0)); } ShardId pessimalShardSelector(int i, int nShards, int nChunks) { @@ -157,8 +157,8 @@ MONGO_COMPILER_NOINLINE auto runIncrementalUpdate(const CollectionMetadata& cm, true /* allowMigration */, false /* unsplittable */, newChunks); - return CollectionMetadata( - ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none), getShardId(0)); + return CollectionMetadata(CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))), + getShardId(0)); } /* @@ -315,8 +315,7 @@ auto BM_FullBuildOfChunkManager(benchmark::State& state, ShardSelectorFn selectS true, chunks); benchmark::DoNotOptimize(CollectionMetadata( - ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none), - getShardId(0))); + CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))), getShardId(0))); } } diff --git a/src/mongo/db/global_catalog/ddl/create_collection_coordinator.cpp b/src/mongo/db/global_catalog/ddl/create_collection_coordinator.cpp index c5ca06181a6..b9305af76c2 100644 --- a/src/mongo/db/global_catalog/ddl/create_collection_coordinator.cpp +++ b/src/mongo/db/global_catalog/ddl/create_collection_coordinator.cpp @@ -741,7 +741,7 @@ void checkLocalCatalogCollectionOptions(OperationContext* opCtx, void checkShardingCatalogCollectionOptions(OperationContext* opCtx, const NamespaceString& targetNss, const ShardsvrCreateCollectionRequest& request, - const ChunkManager& cm) { + const CurrentChunkManager& cm) { if (request.getRegisterExistingCollectionInGlobalCatalog()) { // No need for checking the sharding catalog when tracking a collection for the first time return; diff --git a/src/mongo/db/global_catalog/ddl/drop_indexes_coordinator.cpp b/src/mongo/db/global_catalog/ddl/drop_indexes_coordinator.cpp index be21c25df91..521da1b49ae 100644 --- a/src/mongo/db/global_catalog/ddl/drop_indexes_coordinator.cpp +++ b/src/mongo/db/global_catalog/ddl/drop_indexes_coordinator.cpp @@ -85,7 +85,7 @@ void DropIndexesCoordinator::_dropIndexes(OperationContext* opCtx, router.route( "DropIndexesCoordinator::_dropIndexesPhase", [&](OperationContext* opCtx, const CollectionRoutingInfo& cri) { - const auto chunkManager = cri.getChunkManager(); + const auto chunkManager = cri.getCurrentChunkManager(); std::map shardIdsToShardVersions; if (chunkManager.hasRoutingTable()) { diff --git a/src/mongo/db/global_catalog/metadata_consistency_validation/metadata_consistency_util_test.cpp b/src/mongo/db/global_catalog/metadata_consistency_validation/metadata_consistency_util_test.cpp index 720d3176cf9..78a76cd45f1 100644 --- a/src/mongo/db/global_catalog/metadata_consistency_validation/metadata_consistency_util_test.cpp +++ b/src/mongo/db/global_catalog/metadata_consistency_validation/metadata_consistency_util_test.cpp @@ -880,8 +880,7 @@ TEST_F(MetadataConsistencyTest, ShardTrackedCollectionInconsistencyTest) { std::make_shared(std::move(rt)), ComparableChunkVersion::makeComparableChunkVersion(version)); - const auto collectionMetadata = - CollectionMetadata(ChunkManager(rtHandle, boost::none), _shardId); + const auto collectionMetadata = CollectionMetadata(CurrentChunkManager(rtHandle), _shardId); auto scopedCSR = CollectionShardingRuntime::acquireExclusive(opCtx, _nss); scopedCSR->setFilteringMetadata(opCtx, collectionMetadata); diff --git a/src/mongo/db/pipeline/pipeline_test.cpp b/src/mongo/db/pipeline/pipeline_test.cpp index 612be9c6962..9890632bec0 100644 --- a/src/mongo/db/pipeline/pipeline_test.cpp +++ b/src/mongo/db/pipeline/pipeline_test.cpp @@ -5516,7 +5516,8 @@ public: getCatalogCacheMock()->setCollectionReturnValue( NamespaceString::createNamespaceString_forTest(kDBName, "outColl"), CollectionRoutingInfo{ - ChunkManager{makeStandaloneRoutingTableHistory(std::move(rt)), timestamp}, + PointInTimeChunkManager{makeStandaloneRoutingTableHistory(std::move(rt)), + timestamp}, DatabaseTypeValueHandle( DatabaseType{DatabaseName::createDatabaseName_forTest(boost::none, kDBName), kMyShardName, @@ -5561,7 +5562,8 @@ TEST_F(PipelineOptimizationsShardMerger, MergeWithUntrackedCollection) { getCatalogCacheMock()->setCollectionReturnValue( NamespaceString::createNamespaceString_forTest(kDBName, "outColl"), CollectionRoutingInfo{ - ChunkManager{RoutingTableHistoryValueHandle{OptionalRoutingTableHistory{}}, timestamp}, + PointInTimeChunkManager{RoutingTableHistoryValueHandle{OptionalRoutingTableHistory{}}, + timestamp}, DatabaseTypeValueHandle( DatabaseType{DatabaseName::createDatabaseName_forTest(boost::none, kDBName), kMyShardName, @@ -5616,7 +5618,7 @@ TEST_F(PipelineOptimizationsShardMerger, LookUpUnsplittableFromCollection) { getCatalogCacheMock()->setCollectionReturnValue( fromCollNs, CollectionRoutingInfo{ - ChunkManager{makeStandaloneRoutingTableHistory(std::move(rt)), timestamp}, + PointInTimeChunkManager{makeStandaloneRoutingTableHistory(std::move(rt)), timestamp}, DatabaseTypeValueHandle( DatabaseType{DatabaseName::createDatabaseName_forTest(boost::none, kDBName), kMyShardName, @@ -5653,7 +5655,7 @@ TEST_F(PipelineOptimizationsShardMerger, LookUpShardedFromCollection) { getCatalogCacheMock()->setCollectionReturnValue( fromCollNs, CollectionRoutingInfo{ - ChunkManager{makeStandaloneRoutingTableHistory(std::move(rt)), timestamp}, + PointInTimeChunkManager{makeStandaloneRoutingTableHistory(std::move(rt)), timestamp}, DatabaseTypeValueHandle( DatabaseType{DatabaseName::createDatabaseName_forTest(boost::none, kDBName), kMyShardName, diff --git a/src/mongo/db/query/multiple_collection_accessor_test.cpp b/src/mongo/db/query/multiple_collection_accessor_test.cpp index 460687bf526..dac29bcd1c4 100644 --- a/src/mongo/db/query/multiple_collection_accessor_test.cpp +++ b/src/mongo/db/query/multiple_collection_accessor_test.cpp @@ -181,8 +181,7 @@ void MultipleCollectionAccessorTest::installShardedCollectionMetadata( RoutingTableHistoryValueHandle(std::make_shared(std::move(rt)), ComparableChunkVersion::makeComparableChunkVersion(version)); - const auto collectionMetadata = - CollectionMetadata(ChunkManager(rtHandle, boost::none), kMyShardName); + const auto collectionMetadata = CollectionMetadata(CurrentChunkManager(rtHandle), kMyShardName); auto coll = acquireCollection( operationContext(), diff --git a/src/mongo/db/query/timeseries/timeseries_translation.cpp b/src/mongo/db/query/timeseries/timeseries_translation.cpp index e0984db75b5..9cb1ecda7ba 100644 --- a/src/mongo/db/query/timeseries/timeseries_translation.cpp +++ b/src/mongo/db/query/timeseries/timeseries_translation.cpp @@ -177,8 +177,7 @@ boost::optional getTimeseriesTranslationParamsIfReq return boost::none; } - const ChunkManager& chunkManager = cri.getChunkManager(); - const auto& timeseriesFields = chunkManager.getTimeseriesFields(); + const auto& timeseriesFields = cri.getChunkManager().getTimeseriesFields(); tassert(10601101, "Timeseries collections must have timeseries options", timeseriesFields.has_value()); diff --git a/src/mongo/db/router_role/collection_routing_info_targeter_test.cpp b/src/mongo/db/router_role/collection_routing_info_targeter_test.cpp index 0376dbfff8f..2887e32948c 100644 --- a/src/mongo/db/router_role/collection_routing_info_targeter_test.cpp +++ b/src/mongo/db/router_role/collection_routing_info_targeter_test.cpp @@ -190,8 +190,8 @@ void CollectionRoutingInfoTargeterTest::testTargetInsertWithRangePrefixHashedSha * with the distinction that it simply creates and returns a ChunkManager object * and does not assign it to the Global Catalog Cache ChunkManager. */ -ChunkManager makeCustomChunkManager(const ShardKeyPattern& shardKeyPattern, - const std::vector& splitPoints) { +CurrentChunkManager makeCustomChunkManager(const ShardKeyPattern& shardKeyPattern, + const std::vector& splitPoints) { std::vector chunks; auto splitPointsIncludingEnds(splitPoints); splitPointsIncludingEnds.insert(splitPointsIncludingEnds.begin(), @@ -228,9 +228,8 @@ ChunkManager makeCustomChunkManager(const ShardKeyPattern& shardKeyPattern, true, // allowMigration chunks); - return ChunkManager(RoutingTableHistoryValueHandle( - std::make_shared(std::move(routingTableHistory))), - boost::none); + return CurrentChunkManager(RoutingTableHistoryValueHandle( + std::make_shared(std::move(routingTableHistory)))); } @@ -711,7 +710,7 @@ public: const auto cri = makeUnshardedCollectionRoutingInfo(kNss); std::set shards; - cri.getChunkManager().getAllShardIds(&shards); + cri.getCurrentChunkManager().getAllShardIds(&shards); ASSERT_EQ(1, shards.size()); owningShard = *shards.begin(); diff --git a/src/mongo/db/router_role/router_role.cpp b/src/mongo/db/router_role/router_role.cpp index 73b1c84b614..1aa6d560d0e 100644 --- a/src/mongo/db/router_role/router_role.cpp +++ b/src/mongo/db/router_role/router_role.cpp @@ -411,23 +411,26 @@ bool MultiCollectionRouter::isAnyCollectionNotLocal( "Must be an entry in criMap for namespace " + nss.toStringForErrorMsg(), nssCri != criMap.end()); - const auto chunkManagerMaybeAtClusterTime = atClusterTime - ? ChunkManager::makeAtTime(nssCri->second.getChunkManager(), - atClusterTime->asTimestamp()) - : nssCri->second.getChunkManager(); - - bool isNssLocal = [&]() { - if (chunkManagerMaybeAtClusterTime.isSharded()) { + auto isNssLocalFunc = [&](const auto& cm) { + if (cm.isSharded()) { return false; - } else if (chunkManagerMaybeAtClusterTime.isUnsplittable()) { - return chunkManagerMaybeAtClusterTime.getMinKeyShardIdWithSimpleCollation() == - myShardId; + } else if (cm.isUnsplittable()) { + return cm.getMinKeyShardIdWithSimpleCollation() == myShardId; } else { // If collection is untracked, it is only local if this shard is the dbPrimary // shard. return nssCri->second.getDbPrimaryShardId() == myShardId; } - }(); + }; + + bool isNssLocal; + if (atClusterTime) { + auto pitChunkManager = PointInTimeChunkManager::make(nssCri->second.getChunkManager(), + atClusterTime->asTimestamp()); + isNssLocal = isNssLocalFunc(pitChunkManager); + } else { + isNssLocal = isNssLocalFunc(nssCri->second.getChunkManager()); + } if (!isNssLocal) { anyCollectionNotLocal = true; diff --git a/src/mongo/db/router_role/routing_cache/catalog_cache.cpp b/src/mongo/db/router_role/routing_cache/catalog_cache.cpp index c5d3a05de92..9ce3701a2e8 100644 --- a/src/mongo/db/router_role/routing_cache/catalog_cache.cpp +++ b/src/mongo/db/router_role/routing_cache/catalog_cache.cpp @@ -34,6 +34,7 @@ #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/curop.h" +#include "mongo/db/global_catalog/chunk_manager.h" #include "mongo/db/global_catalog/sharding_catalog_client.h" #include "mongo/db/global_catalog/type_database_gen.h" #include "mongo/db/keypattern.h" @@ -168,7 +169,7 @@ const OperationContext::Decoration routerShouldRelaxCollectionUUIDConsiste } // namespace bool CollectionRoutingInfo::hasRoutingTable() const { - return _cm.hasRoutingTable(); + return getChunkManager().hasRoutingTable(); } const ShardId& CollectionRoutingInfo::getDbPrimaryShardId() const { @@ -180,7 +181,7 @@ const DatabaseVersion& CollectionRoutingInfo::getDbVersion() const { } ShardVersion CollectionRoutingInfo::getCollectionVersion() const { - ShardVersion sv = ShardVersionFactory::make(_cm); + auto sv = ShardVersionFactory::make(getChunkManager()); if (MONGO_unlikely(shouldIgnoreUuidMismatch)) { sv.setIgnoreShardingCatalogUuidMismatch(); } @@ -188,7 +189,7 @@ ShardVersion CollectionRoutingInfo::getCollectionVersion() const { } ShardVersion CollectionRoutingInfo::getShardVersion(const ShardId& shardId) const { - auto sv = ShardVersionFactory::make(_cm, shardId); + auto sv = ShardVersionFactory::make(getChunkManager(), shardId); if (MONGO_unlikely(shouldIgnoreUuidMismatch)) { sv.setIgnoreShardingCatalogUuidMismatch(); } @@ -419,11 +420,8 @@ StatusWith CatalogCache::_getDatabaseForCollectionRoutingInf return swDbInfo; } -StatusWith CatalogCache::_getCollectionPlacementInfoAt( - OperationContext* opCtx, - const NamespaceString& nss, - boost::optional atClusterTime, - bool allowLocks) { +StatusWith CatalogCache::_getCollectionRoutingTable( + OperationContext* opCtx, const NamespaceString& nss, bool allowLocks) { tassert(7032314, "Do not hold a lock while refreshing the catalog cache. Doing so would potentially " "hold the lock during a network call, and can lead to a deadlock as described in " @@ -440,7 +438,7 @@ StatusWith CatalogCache::_getCollectionPlacementInfoAt( if (nss.isNamespaceAlwaysUntracked()) { // If the collection is known to always be untracked, there is no need to request it to // the CollectionCache. - return ChunkManager(OptionalRoutingTableHistory(), atClusterTime); + return OptionalRoutingTableHistory(); } auto collEntryFuture = @@ -452,7 +450,7 @@ StatusWith CatalogCache::_getCollectionPlacementInfoAt( // use it, otherwise return an error if (collEntryFuture.isReady()) { - return ChunkManager(collEntryFuture.get(opCtx), atClusterTime); + return collEntryFuture.get(opCtx); } else { return Status{ShardCannotRefreshDueToLocksHeldInfo(nss), "Routing info refresh did not complete"}; @@ -468,7 +466,7 @@ StatusWith CatalogCache::_getCollectionPlacementInfoAt( auto collEntry = collEntryFuture.get(opCtx); _stats.totalRefreshWaitTimeMicros.addAndFetch(t.micros()); - return ChunkManager(std::move(collEntry), atClusterTime); + return std::move(collEntry); } catch (const DBException& ex) { _stats.totalRefreshWaitTimeMicros.addAndFetch(t.micros()); bool isCatalogCacheRetriableError = ex.isA() || @@ -509,13 +507,20 @@ StatusWith CatalogCache::_getCollectionRoutingInfoAt( return swDbInfo.getStatus(); } - auto swChunkManager = _getCollectionPlacementInfoAt(opCtx, nss, optAtClusterTime, allowLocks); - if (!swChunkManager.isOK()) { - return swChunkManager.getStatus(); + auto swRoutingTable = _getCollectionRoutingTable(opCtx, nss, allowLocks); + if (!swRoutingTable.isOK()) { + return swRoutingTable.getStatus(); } - auto cri = - CollectionRoutingInfo{std::move(swChunkManager.getValue()), std::move(swDbInfo.getValue())}; + auto cri = [&]() -> CollectionRoutingInfo { + if (optAtClusterTime) { + PointInTimeChunkManager chunkManager(swRoutingTable.getValue(), optAtClusterTime.get()); + return CollectionRoutingInfo{std::move(chunkManager), std::move(swDbInfo.getValue())}; + } + CurrentChunkManager chunkManager(swRoutingTable.getValue()); + return CollectionRoutingInfo{std::move(chunkManager), std::move(swDbInfo.getValue())}; + }(); + if (MONGO_unlikely(routerShouldRelaxCollectionUUIDConsistencyCheck(opCtx))) { cri.shouldIgnoreUuidMismatch = true; } @@ -538,11 +543,18 @@ void CatalogCache::_triggerPlacementVersionRefresh(const NamespaceString& nss) { nss, ComparableChunkVersion::makeComparableChunkVersionForForcedRefresh()); } -StatusWith CatalogCache::getCollectionPlacementInfoWithRefresh( +StatusWith CatalogCache::getCollectionPlacementInfoWithRefresh( OperationContext* opCtx, const NamespaceString& nss) { try { _triggerPlacementVersionRefresh(nss); - return _getCollectionPlacementInfoAt(opCtx, nss, boost::none /* atClusterTime */); + + auto swRoutingTable = _getCollectionRoutingTable(opCtx, nss); + + if (!swRoutingTable.isOK()) { + return swRoutingTable.getStatus(); + } + + return CurrentChunkManager(swRoutingTable.getValue()); } catch (const DBException& ex) { return ex.toStatus(); } diff --git a/src/mongo/db/router_role/routing_cache/catalog_cache.h b/src/mongo/db/router_role/routing_cache/catalog_cache.h index b07ca486b63..2cefc842140 100644 --- a/src/mongo/db/router_role/routing_cache/catalog_cache.h +++ b/src/mongo/db/router_role/routing_cache/catalog_cache.h @@ -73,9 +73,40 @@ using CachedDatabaseInfo MONGO_MOD_PUBLIC = DatabaseTypeValueHandle; class MONGO_MOD_PUBLIC CollectionRoutingInfo { public: - CollectionRoutingInfo(ChunkManager&& chunkManager, CachedDatabaseInfo&& dbInfo) + CollectionRoutingInfo(CurrentChunkManager&& chunkManager, CachedDatabaseInfo&& dbInfo) : _dbInfo(std::move(dbInfo)), _cm(std::move(chunkManager)) {} + CollectionRoutingInfo(PointInTimeChunkManager&& chunkManager, CachedDatabaseInfo&& dbInfo) + : _dbInfo(std::move(dbInfo)), _cm(std::move(chunkManager)) {} + + /** + * Variant type that can hold either a CurrentChunkManager or a PointInTimeChunkManager. + * + * This allows CollectionRoutingInfo to represent routing information in two different modes: + * - CurrentChunkManager: Represents the latest known routing state for a collection + * - PointInTimeChunkManager: Represents routing state at a specific point in time (cluster + * time) + * + * Usage guidelines: + * - Use std::visit() to access the held ChunkManager polymorphically when the operation works + * identically for both types + * - Use std::get_if() when you need to conditionally access features only + * available on CurrentChunkManager (e.g., getNShardsOwningChunks()) + * - Use std::holds_alternative() to check which type is currently held + * + * Example: + * // Polymorphic access (works for both types): + * std::visit([](const auto& cm) { return cm.isSharded(); }, variant); + * + * // Type-specific access: + * if (auto* currentCm = std::get_if(&variant)) { + * currentCm->getAllShardIds(&shards); + * } + * + * TODO SERVER-114825: Investigate if it's possible to implement without this variant. + */ + using ChunkManagerVariant = std::variant; + /** * Returns true if the collection is tracked in the global catalog. * @@ -90,11 +121,103 @@ public: * shards. */ bool isSharded() const { - return _cm.isSharded(); + return getChunkManager().isSharded(); } + /** + * Returns a const reference to the ChunkManager held in this CollectionRoutingInfo. + * + * This method provides polymorphic access to common ChunkManager functionality that works + * identically for both CurrentChunkManager and PointInTimeChunkManager. + * + * Use this method when: + * - You need to call methods that are defined on the base ChunkManager class + * - The operation doesn't require type-specific behavior + * - You're working with shared functionality like: + * - isSharded() + * - isUnsplittable() + * - getShardKeyPattern() + * - getVersion() + * - forEachChunk() (respects point-in-time semantics automatically) + * + * Do NOT use this method when: + * - You need to call CurrentChunkManager-specific methods like getAllShardIds() or + * getNShardsOwningChunks() - use getCurrentChunkManager() instead + * - You need to know whether you have a current or point-in-time view - check the variant + * directly with std::holds_alternative<>() + * + * Example usage: + * const auto& cm = cri.getChunkManager(); + * if (cm.isSharded()) { + * cm.forEachChunk([](const Chunk& chunk) { ... }); + * } + * @return A const reference to the base ChunkManager interface + */ const ChunkManager& getChunkManager() const { - return _cm; + return std::visit([](const auto& cm) -> const ChunkManager& { return cm; }, _cm); + } + + /** + * Returns a const reference to the CurrentChunkManager held in this CollectionRoutingInfo. + * + * This method provides direct access to CurrentChunkManager-specific functionality that is + * NOT available when using a PointInTimeChunkManager. + * + * Use this method when: + * - You need to access the CURRENT (latest) state of the routing information + * - You need to call methods that are ONLY available on CurrentChunkManager: + * - getAllShardIds(): Gets the exact current set of shards owning chunks + * - getNShardsOwningChunks(): Gets the exact current count of shards + * - getNextChunkOnShard(): Finds the next chunk on a specific shard + * - You're performing operations that explicitly require non-point-in-time semantics: + * - Checking current cluster topology + * - Making routing decisions based on latest metadata + * - Administrative operations that need up-to-date information + * + * Do NOT use this method when: + * - You're working with point-in-time reads (e.g., transactions with atClusterTime) + * - You don't know whether the CollectionRoutingInfo contains a CurrentChunkManager or + * PointInTimeChunkManager - this will throw an exception if it's the wrong type + * - You only need common ChunkManager functionality - use getChunkManager() instead + * + * Common usage patterns: + * + * // Pattern 1: Direct access when you know it's current + * const auto& currentCm = cri.getCurrentChunkManager(); + * currentCm.getAllShardIds(&allShards); + * + * // Pattern 2: Conditional access (safer) + * if (std::holds_alternative(cri._cm)) { + * const auto& currentCm = cri.getCurrentChunkManager(); + * size_t nShards = currentCm.getNShardsOwningChunks(); + * } + * + * // Pattern 3: Using std::visit for type-specific behavior + * std::visit(OverloadedVisitor{ + * [](const CurrentChunkManager& cm) { + * // Can call getCurrentChunkManager-only methods + * cm.getAllShardIds(&shards); + * }, + * [](const PointInTimeChunkManager& cm) { + * // Different behavior for point-in-time + * } + * }, cri._cm); + * + * When this is created: + * - getCollectionRoutingInfo() without atClusterTime returns CurrentChunkManager + * - getCollectionRoutingInfoAt() with atClusterTime returns PointInTimeChunkManager + * - getCollectionPlacementInfoWithRefresh() returns CurrentChunkManager + * + * @throws TAssertionException if this CollectionRoutingInfo contains a PointInTimeChunkManager + * instead of a CurrentChunkManager + * + * @return A const reference to the CurrentChunkManager + */ + const CurrentChunkManager& getCurrentChunkManager() const { + tassert(10271001, + "Expected current ChunkManager but have PointInTimeChunkManager", + std::holds_alternative(_cm)); + return std::get(_cm); } ShardVersion getCollectionVersion() const; @@ -113,7 +236,7 @@ public: private: CachedDatabaseInfo _dbInfo; - ChunkManager _cm; + ChunkManagerVariant _cm; }; /** @@ -272,7 +395,7 @@ public: /** * Blocking method to retrieve refreshed collection placement information (ChunkManager). */ - virtual StatusWith getCollectionPlacementInfoWithRefresh( + virtual StatusWith getCollectionPlacementInfoWithRefresh( OperationContext* opCtx, const NamespaceString& nss); /** @@ -433,10 +556,8 @@ private: boost::optional optAtClusterTime, bool allowLocks = false); - StatusWith _getCollectionPlacementInfoAt(OperationContext* opCtx, - const NamespaceString& nss, - boost::optional atClusterTime, - bool allowLocks = false); + StatusWith _getCollectionRoutingTable( + OperationContext* opCtx, const NamespaceString& nss, bool allowLocks = false); void _triggerPlacementVersionRefresh(const NamespaceString& nss); diff --git a/src/mongo/db/router_role/routing_cache/catalog_cache_mock.cpp b/src/mongo/db/router_role/routing_cache/catalog_cache_mock.cpp index 848c2ed6002..901ba1776d3 100644 --- a/src/mongo/db/router_role/routing_cache/catalog_cache_mock.cpp +++ b/src/mongo/db/router_role/routing_cache/catalog_cache_mock.cpp @@ -30,6 +30,7 @@ #include "mongo/db/router_role/routing_cache/catalog_cache_mock.h" #include "mongo/base/error_codes.h" +#include "mongo/db/global_catalog/chunk_manager.h" #include "mongo/db/router_role/routing_cache/config_server_catalog_cache_loader_mock.h" #include "mongo/db/service_context.h" #include "mongo/db/sharding_environment/sharding_test_fixture_common.h" @@ -70,11 +71,11 @@ StatusWith CatalogCacheMock::getCollectionRoutingInfo( nss.toStringForErrorMsg())); } } -StatusWith CatalogCacheMock::getCollectionPlacementInfoWithRefresh( +StatusWith CatalogCacheMock::getCollectionPlacementInfoWithRefresh( OperationContext* opCtx, const NamespaceString& nss) { const auto it = _collectionCache.find(nss); if (it != _collectionCache.end()) { - return it->second.getChunkManager(); + return it->second.getCurrentChunkManager(); } else { return Status( ErrorCodes::InternalError, @@ -107,7 +108,7 @@ std::unique_ptr CatalogCacheMock::make() { CollectionRoutingInfo CatalogCacheMock::makeCollectionRoutingInfoUntracked( const NamespaceString& nss, const ShardId& dbPrimaryShard, DatabaseVersion dbVersion) { - ChunkManager cm(OptionalRoutingTableHistory(), boost::none); + CurrentChunkManager cm(OptionalRoutingTableHistory{}); return CollectionRoutingInfo( std::move(cm), DatabaseTypeValueHandle(DatabaseType{nss.dbName(), dbPrimaryShard, dbVersion})); @@ -197,8 +198,8 @@ CollectionRoutingInfo CatalogCacheMock::_makeCollectionRoutingInfoTracked( true /*allowMigrations*/, chunkTypes); - ChunkManager cm(ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rth)), - boost::none /*clusterTime*/); + CurrentChunkManager cm( + ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rth))); return CollectionRoutingInfo( std::move(cm), DatabaseTypeValueHandle(DatabaseType{nss.dbName(), dbPrimaryShard, dbVersion})); diff --git a/src/mongo/db/router_role/routing_cache/catalog_cache_mock.h b/src/mongo/db/router_role/routing_cache/catalog_cache_mock.h index e285179cf05..335c2486ff2 100644 --- a/src/mongo/db/router_role/routing_cache/catalog_cache_mock.h +++ b/src/mongo/db/router_role/routing_cache/catalog_cache_mock.h @@ -62,7 +62,7 @@ public: const NamespaceString& nss, bool allowLocks) override; - StatusWith getCollectionPlacementInfoWithRefresh( + StatusWith getCollectionPlacementInfoWithRefresh( OperationContext* opCtx, const NamespaceString& nss) override; void setDatabaseReturnValue(const DatabaseName& dbName, CachedDatabaseInfo databaseInfo); diff --git a/src/mongo/db/router_role/sharding_write_router.cpp b/src/mongo/db/router_role/sharding_write_router.cpp index 18eaffd7fb7..b771174088f 100644 --- a/src/mongo/db/router_role/sharding_write_router.cpp +++ b/src/mongo/db/router_role/sharding_write_router.cpp @@ -71,7 +71,7 @@ ShardingWriteRouter::ShardingWriteRouter(OperationContext* opCtx, const Namespac tassert(6862800, "Routing information for the temporary resharding collection is stale", cri.hasRoutingTable()); - _reshardingChunkMgr = cri.getChunkManager(); + _reshardingChunkMgr = cri.getCurrentChunkManager(); } } } diff --git a/src/mongo/db/router_role/sharding_write_router_bm.cpp b/src/mongo/db/router_role/sharding_write_router_bm.cpp index 6c8c3bd6053..6da4ad8225e 100644 --- a/src/mongo/db/router_role/sharding_write_router_bm.cpp +++ b/src/mongo/db/router_role/sharding_write_router_bm.cpp @@ -224,7 +224,7 @@ protected: ComparableChunkVersion::makeComparableChunkVersion(version)); } - std::pair, mongo::ChunkManager> createChunks( + std::pair, mongo::CurrentChunkManager> createChunks( size_t nShards, uint32_t nChunks, std::vector shards) { invariant(shards.size() == nShards); @@ -255,20 +255,19 @@ protected: reshardingFields.setDonorFields( TypeCollectionDonorFields{tempNss, reshardKeyPattern, shards}); - ChunkManager cm(makeStandaloneRoutingTableHistory( - RoutingTableHistory::makeNew(kNss, - collIdentifier, - shardKeyPattern, - false, /* unsplittable */ - nullptr, - false, - collEpoch, - collTimestamp, - boost::none /* timeseriesFields */, - reshardingFields, /* reshardingFields */ - true, - chunks)), - boost::none); + CurrentChunkManager cm(makeStandaloneRoutingTableHistory( + RoutingTableHistory::makeNew(kNss, + collIdentifier, + shardKeyPattern, + false, /* unsplittable */ + nullptr, + false, + collEpoch, + collTimestamp, + boost::none /* timeseriesFields */, + reshardingFields, /* reshardingFields */ + true, + chunks))); return std::make_pair(chunks, cm); } diff --git a/src/mongo/db/s/analyze_shard_key_read_write_distribution_stage.cpp b/src/mongo/db/s/analyze_shard_key_read_write_distribution_stage.cpp index d4df236a10b..f59b29c1bf1 100644 --- a/src/mongo/db/s/analyze_shard_key_read_write_distribution_stage.cpp +++ b/src/mongo/db/s/analyze_shard_key_read_write_distribution_stage.cpp @@ -193,9 +193,8 @@ CollectionRoutingInfoTargeter makeCollectionRoutingInfoTargeter( true /* allowMigrations */, chunks); - auto cm = ChunkManager(RoutingTableHistoryValueHandle(std::make_shared( - std::move(routingTableHistory))), - boost::none); + CurrentChunkManager cm(RoutingTableHistoryValueHandle( + std::make_shared(std::move(routingTableHistory)))); auto routingCtx = RoutingContext::createSynthetic( {{nss, diff --git a/src/mongo/db/s/analyze_shard_key_read_write_distribution_test.cpp b/src/mongo/db/s/analyze_shard_key_read_write_distribution_test.cpp index 7d9fd357941..f7a2353f260 100644 --- a/src/mongo/db/s/analyze_shard_key_read_write_distribution_test.cpp +++ b/src/mongo/db/s/analyze_shard_key_read_write_distribution_test.cpp @@ -152,9 +152,8 @@ protected: true /* allowMigrations */, chunks); - auto cm = ChunkManager(RoutingTableHistoryValueHandle(std::make_shared( - std::move(routingTableHistory))), - boost::none); + CurrentChunkManager cm(RoutingTableHistoryValueHandle( + std::make_shared(std::move(routingTableHistory)))); auto routingCtx = RoutingContext::createSynthetic( {{nss, CollectionRoutingInfo{std::move(cm), diff --git a/src/mongo/db/s/balancer/balance_stats_test.cpp b/src/mongo/db/s/balancer/balance_stats_test.cpp index 49ea8596708..e001353bff8 100644 --- a/src/mongo/db/s/balancer/balance_stats_test.cpp +++ b/src/mongo/db/s/balancer/balance_stats_test.cpp @@ -87,9 +87,8 @@ public: true, // allowMigration chunks); - return ChunkManager(RoutingTableHistoryValueHandle(std::make_shared( - std::move(routingTableHistory))), - boost::none); + return CurrentChunkManager(RoutingTableHistoryValueHandle( + std::make_shared(std::move(routingTableHistory)))); } private: diff --git a/src/mongo/db/s/balancer/balancer_policy_test.cpp b/src/mongo/db/s/balancer/balancer_policy_test.cpp index c9bf8368c0f..f98ecf409dd 100644 --- a/src/mongo/db/s/balancer/balancer_policy_test.cpp +++ b/src/mongo/db/s/balancer/balancer_policy_test.cpp @@ -129,9 +129,10 @@ RoutingTableHistory makeRoutingTable(const std::vector& chunks) { chunks); } -ChunkManager makeChunkManager(const std::vector& chunks) { +CurrentChunkManager makeChunkManager(const std::vector& chunks) { auto rt = std::make_shared(makeRoutingTable(chunks)); - return {{std::move(rt)}, boost::none /* atClusterTime */}; + return CurrentChunkManager( + RoutingTableHistoryValueHandle(OptionalRoutingTableHistory(std::move(rt)))); } DistributionStatus makeDistStatus(const ChunkManager& cm, ZoneInfo zoneInfo = ZoneInfo()) { diff --git a/src/mongo/db/s/migration_chunk_cloner_source_op_observer.cpp b/src/mongo/db/s/migration_chunk_cloner_source_op_observer.cpp index f2b45408754..9612ff5cc70 100644 --- a/src/mongo/db/s/migration_chunk_cloner_source_op_observer.cpp +++ b/src/mongo/db/s/migration_chunk_cloner_source_op_observer.cpp @@ -72,7 +72,7 @@ void MigrationChunkClonerSourceOpObserver::assertIntersectingChunkHasNotMoved( const LogicalTime& atClusterTime) { // We can assume the simple collation because shard keys do not support non-simple collations. auto cmAtTimeOfWrite = - ChunkManager::makeAtTime(*metadata.getChunkManager(), atClusterTime.asTimestamp()); + PointInTimeChunkManager::make(*metadata.getChunkManager(), atClusterTime.asTimestamp()); auto chunk = cmAtTimeOfWrite.findIntersectingChunkWithSimpleCollation(shardKey); // Throws if the chunk has moved since the timestamp of the running transaction's atClusterTime diff --git a/src/mongo/db/s/migration_chunk_cloner_source_test.cpp b/src/mongo/db/s/migration_chunk_cloner_source_test.cpp index bcfb501bee2..4bc073e6dbb 100644 --- a/src/mongo/db/s/migration_chunk_cloner_source_test.cpp +++ b/src/mongo/db/s/migration_chunk_cloner_source_test.cpp @@ -730,7 +730,7 @@ protected: ->setFilteringMetadata( operationContext(), CollectionMetadata( - ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none), + CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))), ShardId("dummyShardId"))); }(); diff --git a/src/mongo/db/s/range_deleter_service_test.cpp b/src/mongo/db/s/range_deleter_service_test.cpp index c65955c3860..e4da786e954 100644 --- a/src/mongo/db/s/range_deleter_service_test.cpp +++ b/src/mongo/db/s/range_deleter_service_test.cpp @@ -129,21 +129,19 @@ void RangeDeleterServiceTest::_setFilteringMetadataByUUID(OperationContext* opCt ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)}, ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}), ShardId("this")); - ChunkManager cm(makeStandaloneRoutingTableHistory( - RoutingTableHistory::makeNew(nss, - uuid, - kShardKeyPattern, - false, /* unsplittable */ - nullptr, - false, - epoch, - Timestamp(1, 1), - boost::none /* timeseriesFields */, - boost::none /* reshardingFields */, - - true, - {std::move(chunk)})), - boost::none); + CurrentChunkManager cm(makeStandaloneRoutingTableHistory( + RoutingTableHistory::makeNew(nss, + uuid, + kShardKeyPattern, + false, /* unsplittable */ + nullptr, + false, + epoch, + Timestamp(1, 1), + boost::none /* timeseriesFields */, + boost::none /* reshardingFields */, + true, + {std::move(chunk)}))); return CollectionMetadata(std::move(cm), ShardId("this")); }(); diff --git a/src/mongo/db/s/range_deletion_util_test.cpp b/src/mongo/db/s/range_deletion_util_test.cpp index 849266ee760..f14aad3684b 100644 --- a/src/mongo/db/s/range_deletion_util_test.cpp +++ b/src/mongo/db/s/range_deletion_util_test.cpp @@ -134,7 +134,7 @@ public: ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)}, ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}), ShardId("dummyShardId")}}); - ChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none); + CurrentChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt))); AutoGetDb autoDb(_opCtx, kNss.dbName(), MODE_IX); Lock::CollectionLock collLock(_opCtx, kNss, MODE_IX); CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(_opCtx, kNss) diff --git a/src/mongo/db/s/resharding/resharding_collection_cloner_test.cpp b/src/mongo/db/s/resharding/resharding_collection_cloner_test.cpp index d7a022ce6a3..3875b18641c 100644 --- a/src/mongo/db/s/resharding/resharding_collection_cloner_test.cpp +++ b/src/mongo/db/s/resharding/resharding_collection_cloner_test.cpp @@ -226,7 +226,7 @@ protected: ShardServerTestFixtureWithCatalogCacheMock::tearDown(); } - ChunkManager createChunkManager( + CurrentChunkManager createChunkManager( const ShardKeyPattern& shardKeyPattern, std::deque configCacheChunksData) { const OID epoch = OID::gen(); @@ -255,7 +255,7 @@ protected: false, chunks); - return ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none); + return CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))); } /** diff --git a/src/mongo/db/s/resharding/resharding_data_replication_test.cpp b/src/mongo/db/s/resharding/resharding_data_replication_test.cpp index 99f083d1dc6..74b2944de9b 100644 --- a/src/mongo/db/s/resharding/resharding_data_replication_test.cpp +++ b/src/mongo/db/s/resharding/resharding_data_replication_test.cpp @@ -128,8 +128,7 @@ public: true /* allowMigrations */, chunks); - return ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), - boost::none /* clusterTime */); + return CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))); } DonorShardFetchTimestamp makeDonorShardFetchTimestamp(ShardId shardId, diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp index c21d4c06bb0..862e724c713 100644 --- a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp +++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp @@ -165,20 +165,19 @@ protected: auto range = ChunkRange(BSON(shardKey << MINKEY), BSON(shardKey << MAXKEY)); auto chunk = ChunkType( uuid, range, ChunkVersion({epoch, timestamp}, {1, 0}), shardThatChunkExistsOn); - ChunkManager cm(makeStandaloneRoutingTableHistory( - RoutingTableHistory::makeNew(nss, - uuid, - shardKeyPattern, - false, /* unsplittable */ - nullptr, - false, - epoch, - timestamp, - boost::none /* timeseriesFields */, - boost::none /* reshardingFields */, - true, - {std::move(chunk)})), - boost::none); + CurrentChunkManager cm(makeStandaloneRoutingTableHistory( + RoutingTableHistory::makeNew(nss, + uuid, + shardKeyPattern, + false, /* unsplittable */ + nullptr, + false, + epoch, + timestamp, + boost::none /* timeseriesFields */, + boost::none /* reshardingFields */, + true, + {std::move(chunk)}))); auto dbVersion = DatabaseVersion(uuid, timestamp); getCatalogCacheMock()->setDatabaseReturnValue( nss.dbName(), diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp index d93277d56cc..e9c40db7b52 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp @@ -310,7 +310,7 @@ public: false, chunks); - return ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none); + return CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))); } void loadCatalogCacheValues() { diff --git a/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp index 5916d133ddc..31c781e868c 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp @@ -520,8 +520,7 @@ private: true /* allowMigrations */, chunks); - return ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), - boost::none /* clusterTime */); + return CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))); } RoutingTableHistoryValueHandle makeStandaloneRoutingTableHistory(RoutingTableHistory rt) { diff --git a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp index 8ccb07cba71..55a5abf240f 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp @@ -333,11 +333,11 @@ public: } private: - ChunkManager makeChunkManager(const OID& epoch, - const NamespaceString& nss, - const UUID& uuid, - const BSONObj& shardKey, - const std::vector& chunks) { + CurrentChunkManager makeChunkManager(const OID& epoch, + const NamespaceString& nss, + const UUID& uuid, + const BSONObj& shardKey, + const std::vector& chunks) { auto rt = RoutingTableHistory::makeNew(nss, uuid, shardKey, @@ -350,11 +350,10 @@ private: boost::none /* reshardingFields */, true /* allowMigrations */, chunks); - return ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), - boost::none /* clusterTime */); + return CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))); } - ChunkManager makeChunkManagerForSourceCollection() { + CurrentChunkManager makeChunkManagerForSourceCollection() { // Create three chunks, two that are owned by this donor shard and one owned by some other // shard. The chunk for {sk: null} is owned by this donor shard to allow test cases to omit // the shard key field when it isn't relevant. @@ -380,7 +379,7 @@ private: epoch, _sourceNss, _sourceUUID, BSON(_currentShardKey << 1), chunks); } - ChunkManager makeChunkManagerForOutputCollection() { + CurrentChunkManager makeChunkManagerForOutputCollection() { const OID epoch = OID::gen(); const UUID outputUuid = UUID::gen(); std::vector chunks = { diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp index 88e6d49ba85..f9c6b0829df 100644 --- a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp +++ b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp @@ -227,9 +227,8 @@ public: true /* allowMigrations */, chunks); return CollectionRoutingInfo{ - ChunkManager( - ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rt)), - boost::none /* clusterTime */), + CurrentChunkManager( + ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rt))), DatabaseTypeValueHandle(DatabaseType{ nss.dbName(), _someDonorId, DatabaseVersion(UUID::gen(), Timestamp(1, 1))})}; } diff --git a/src/mongo/db/shard_role/post_resharding_placement.cpp b/src/mongo/db/shard_role/post_resharding_placement.cpp index f4b4dc517ea..ae7f471e2a1 100644 --- a/src/mongo/db/shard_role/post_resharding_placement.cpp +++ b/src/mongo/db/shard_role/post_resharding_placement.cpp @@ -57,7 +57,8 @@ PostReshardingCollectionPlacement::PostReshardingCollectionPlacement( tassert(11178203, "Routing information for the temporary resharding collection is stale", tmpNssRoutingInfoWithStatus.getValue().hasRoutingTable()); - _tmpReshardingCollectionChunkManager = tmpNssRoutingInfoWithStatus.getValue().getChunkManager(); + _tmpReshardingCollectionChunkManager = + tmpNssRoutingInfoWithStatus.getValue().getCurrentChunkManager(); } const ShardId& PostReshardingCollectionPlacement::getReshardingDestinedRecipient( diff --git a/src/mongo/db/shard_role/shard_catalog/collection_metadata.cpp b/src/mongo/db/shard_role/shard_catalog/collection_metadata.cpp index 0918348f23c..4620ec198f3 100644 --- a/src/mongo/db/shard_role/shard_catalog/collection_metadata.cpp +++ b/src/mongo/db/shard_role/shard_catalog/collection_metadata.cpp @@ -55,11 +55,14 @@ namespace mongo { -CollectionMetadata::CollectionMetadata(ChunkManager cm, const ShardId& thisShardId) +CollectionMetadata::CollectionMetadata(CurrentChunkManager cm, const ShardId& thisShardId) + : _cm(std::move(cm)), _thisShardId(thisShardId) {} + +CollectionMetadata::CollectionMetadata(PointInTimeChunkManager cm, const ShardId& thisShardId) : _cm(std::move(cm)), _thisShardId(thisShardId) {} bool CollectionMetadata::allowMigrations() const { - return _cm ? _cm->allowMigrations() : true; + return _cm ? getChunkManagerBase().allowMigrations() : true; } boost::optional CollectionMetadata::getReshardingKeyIfShouldForwardOps() const { @@ -139,12 +142,14 @@ BSONObj CollectionMetadata::extractDocumentKey(const ShardKeyPattern* shardKeyPa } BSONObj CollectionMetadata::extractDocumentKey(const BSONObj& doc) const { - return extractDocumentKey(isSharded() ? &_cm->getShardKeyPattern() : nullptr, doc); + return extractDocumentKey(isSharded() ? &getChunkManagerBase().getShardKeyPattern() : nullptr, + doc); } std::string CollectionMetadata::toStringBasic() const { if (hasRoutingTable()) { - return str::stream() << "collection placement version: " << _cm->getVersion().toString() + return str::stream() << "collection placement version: " + << getChunkManagerBase().getVersion().toString() << ", shard placement version: " << getShardPlacementVersionForLogging().toString(); } else { @@ -157,7 +162,7 @@ RangeMap CollectionMetadata::getChunks() const { RangeMap chunksMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap()); - _cm->forEachChunk([this, &chunksMap](const auto& chunk) { + getChunkManagerBase().forEachChunk([this, &chunksMap](const auto& chunk) { if (chunk.getShardId() == _thisShardId) chunksMap.emplace_hint(chunksMap.end(), chunk.getMin(), chunk.getMax()); @@ -170,7 +175,7 @@ RangeMap CollectionMetadata::getChunks() const { bool CollectionMetadata::getNextChunk(const BSONObj& lookupKey, ChunkType* chunk) const { tassert(10016203, "Expected a routing table to be initialized", hasRoutingTable()); - auto nextChunk = _cm->getNextChunkOnShard(lookupKey, _thisShardId); + auto nextChunk = getCurrentChunkManager()->getNextChunkOnShard(lookupKey, _thisShardId); if (!nextChunk) return false; @@ -182,7 +187,7 @@ bool CollectionMetadata::getNextChunk(const BSONObj& lookupKey, ChunkType* chunk bool CollectionMetadata::currentShardHasAnyChunks() const { tassert(10016204, "Expected a routing table to be initialized", hasRoutingTable()); std::set shards; - _cm->getAllShardIds(&shards); + getCurrentChunkManager()->getAllShardIds(&shards); return shards.find(_thisShardId) != shards.end(); } @@ -259,7 +264,7 @@ void CollectionMetadata::toBSONChunks(BSONArrayBuilder* builder) const { if (!hasRoutingTable()) return; - _cm->forEachChunk([this, &builder](const auto& chunk) { + getChunkManagerBase().forEachChunk([this, &builder](const auto& chunk) { if (chunk.getShardId() == _thisShardId) { BSONArrayBuilder chunkBB(builder->subarrayStart()); chunkBB.append(chunk.getMin()); diff --git a/src/mongo/db/shard_role/shard_catalog/collection_metadata.h b/src/mongo/db/shard_role/shard_catalog/collection_metadata.h index 928d7af9b83..54d3a34fb21 100644 --- a/src/mongo/db/shard_role/shard_catalog/collection_metadata.h +++ b/src/mongo/db/shard_role/shard_catalog/collection_metadata.h @@ -81,7 +81,9 @@ public: * "thisShardId" is the shard identity of this shard for purposes of answering questions like * "does this key belong to this shard"? */ - CollectionMetadata(ChunkManager cm, const ShardId& thisShardId); + CollectionMetadata(CurrentChunkManager cm, const ShardId& thisShardId); + + CollectionMetadata(PointInTimeChunkManager cm, const ShardId& thisShardId); /** * Returns a CollectionMetadata object for an untracked collection. @@ -94,18 +96,18 @@ public: * Returns whether this metadata object represents a sharded or unsharded collection. */ bool isSharded() const { - return _cm && _cm->isSharded(); + return _cm && getChunkManagerBase().isSharded(); } /** * Returns whether this metadata object represents an unsplittable collection. */ bool isUnsplittable() const { - return _cm && _cm->isUnsplittable(); + return _cm && getChunkManagerBase().isUnsplittable(); } bool hasRoutingTable() const { - return _cm && _cm->hasRoutingTable(); + return _cm && getChunkManagerBase().hasRoutingTable(); } bool allowMigrations() const; @@ -126,7 +128,8 @@ public: * have a routing table. */ ChunkVersion getShardPlacementVersion() const { - return (hasRoutingTable() ? _cm->getVersion(_thisShardId) : ChunkVersion::UNTRACKED()); + return (hasRoutingTable() ? getChunkManagerBase().getVersion(_thisShardId) + : ChunkVersion::UNTRACKED()); } /** @@ -136,7 +139,8 @@ public: * timestamp". */ Timestamp getShardMaxValidAfter() const { - return (hasRoutingTable() ? _cm->getMaxValidAfter(_thisShardId) : Timestamp(0, 0)); + return (hasRoutingTable() ? getChunkManagerBase().getMaxValidAfter(_thisShardId) + : Timestamp(0, 0)); } /** @@ -149,7 +153,7 @@ public: * instead. */ ChunkVersion getShardPlacementVersionForLogging() const { - return (hasRoutingTable() ? _cm->getVersionForLogging(_thisShardId) + return (hasRoutingTable() ? getChunkManagerBase().getVersionForLogging(_thisShardId) : ChunkVersion::UNTRACKED()); } @@ -158,7 +162,7 @@ public: * table. */ ChunkVersion getCollPlacementVersion() const { - return (hasRoutingTable() ? _cm->getVersion() : ChunkVersion::UNTRACKED()); + return (hasRoutingTable() ? getChunkManagerBase().getVersion() : ChunkVersion::UNTRACKED()); } /** @@ -171,7 +175,7 @@ public: const ShardKeyPattern& getShardKeyPattern() const { tassert(10016206, "Expected a routing table to be initialized", hasRoutingTable()); - return _cm->getShardKeyPattern(); + return getChunkManagerBase().getShardKeyPattern(); } /** @@ -199,12 +203,12 @@ public: bool uuidMatches(UUID uuid) const { tassert(10016215, "Expected a routing table to be initialized", hasRoutingTable()); - return _cm->uuidMatches(uuid); + return getChunkManagerBase().uuidMatches(uuid); } const UUID& getUUID() const { tassert(10016216, "Expected a routing table to be initialized", hasRoutingTable()); - return _cm->getUUID(); + return getChunkManagerBase().getUUID(); } /** @@ -230,7 +234,32 @@ public: const ChunkManager* getChunkManager() const { tassert(10016207, "Expected a routing table to be initialized", hasRoutingTable()); - return _cm.get_ptr(); + return std::visit([](const auto& cm) -> const ChunkManager* { return &cm; }, *_cm); + } + + /** + * Returns a pointer to ChunkManager if that's what's stored, nullptr otherwise. + * Use this only when you specifically need the current (non-point-in-time) ChunkManager. + */ + const CurrentChunkManager* getCurrentChunkManager() const { + tassert(9014101, "Expected a routing table to be initialized", hasRoutingTable()); + return std::get_if(&*_cm); + } + + /** + * Returns a pointer to PointInTimeChunkManager if that's what's stored, nullptr otherwise. + * Use this only when you specifically need the point-in-time ChunkManager. + */ + const PointInTimeChunkManager* getPointInTimeChunkManager() const { + tassert(9014102, "Expected a routing table to be initialized", hasRoutingTable()); + return std::get_if(&*_cm); + } + + /** + * Returns true if this metadata holds a point-in-time chunk manager. + */ + bool isAtPointInTime() const { + return _cm && std::holds_alternative(*_cm); } /** @@ -239,7 +268,7 @@ public: */ bool keyBelongsToMe(const BSONObj& key) const { tassert(10016208, "Expected a routing table to be initialized", hasRoutingTable()); - return _cm->keyBelongsToShard(key, _thisShardId); + return getChunkManagerBase().keyBelongsToShard(key, _thisShardId); } /** @@ -250,7 +279,7 @@ public: ChunkManager::ChunkOwnership nearestOwnedChunk(const BSONObj& key, ChunkMap::Direction direction) const { tassert(9526301, "Expected a routing table to be initialized", hasRoutingTable()); - return _cm->nearestOwnedChunk(key, _thisShardId, direction); + return getChunkManagerBase().nearestOwnedChunk(key, _thisShardId, direction); } /** @@ -266,7 +295,7 @@ public: */ bool rangeOverlapsChunk(const ChunkRange& range) const { tassert(10016209, "Expected a routing table to be initialized", hasRoutingTable()); - return _cm->rangeOverlapsShard(range, _thisShardId); + return getChunkManagerBase().rangeOverlapsShard(range, _thisShardId); } /** @@ -307,22 +336,34 @@ public: const boost::optional& getReshardingFields() const { tassert(10016210, "Expected a routing table to be initialized", hasRoutingTable()); - return _cm->getReshardingFields(); + return getChunkManagerBase().getReshardingFields(); } const boost::optional& getTimeseriesFields() const { tassert(10016211, "Expected a routing table to be initialized", hasRoutingTable()); - return _cm->getTimeseriesFields(); + return getChunkManagerBase().getTimeseriesFields(); } bool isUniqueShardKey() const { tassert(10016212, "Expected a routing table to be initialized", hasRoutingTable()); - return _cm->isUnique(); + return getChunkManagerBase().isUnique(); } private: + /** + * Helper to access the ChunkManager interface regardless of whether _cm holds + * a ChunkManager or PointInTimeChunkManager. + */ + const ChunkManager& getChunkManagerBase() const { + tassert(9014100, "Expected _cm to be initialized", _cm.has_value()); + return std::visit([](const auto& cm) -> const ChunkManager& { return cm; }, *_cm); + } + + // Type alias for the variant holding either ChunkManager or PointInTimeChunkManager + using ChunkManagerVariant = std::variant; + // The full routing table for the collection or boost::none if the collection is not tracked - boost::optional _cm; + boost::optional _cm; // The identity of this shard, for the purpose of answering "key belongs to me" queries. If the // collection is not tracked (_cm is boost::none), then this value will be empty. diff --git a/src/mongo/db/shard_role/shard_catalog/collection_metadata_filtering_test.cpp b/src/mongo/db/shard_role/shard_catalog/collection_metadata_filtering_test.cpp index e38867c1863..4b9bf8e8ff3 100644 --- a/src/mongo/db/shard_role/shard_catalog/collection_metadata_filtering_test.cpp +++ b/src/mongo/db/shard_role/shard_catalog/collection_metadata_filtering_test.cpp @@ -148,7 +148,7 @@ protected: return std::vector{chunk1, chunk2, chunk3, chunk4}; }()); - ChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none); + CurrentChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt))); ASSERT_EQ(4, cm.numChunks()); { diff --git a/src/mongo/db/shard_role/shard_catalog/collection_metadata_test.cpp b/src/mongo/db/shard_role/shard_catalog/collection_metadata_test.cpp index 60d89904352..41fed96e24b 100644 --- a/src/mongo/db/shard_role/shard_catalog/collection_metadata_test.cpp +++ b/src/mongo/db/shard_role/shard_catalog/collection_metadata_test.cpp @@ -103,22 +103,27 @@ CollectionMetadata makeTrackedCollectionMetadataImpl( chunk.setHistory({ChunkHistory(*chunk.getOnCurrentShardSince(), chunk.getShard())}); } - return CollectionMetadata( - ChunkManager(ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory( - RoutingTableHistory::makeNew(kNss, - uuid, - shardKeyPattern, - false, /* unsplittable */ - nullptr, - false, - epoch, - timestamp, - boost::none /* timeseriesFields */, - std::move(reshardingFields), - true, - allChunks)), - kChunkManager), - kThisShard); + auto routingTableHistory = ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory( + RoutingTableHistory::makeNew(kNss, + uuid, + shardKeyPattern, + false, /* unsplittable */ + nullptr, + false, + epoch, + timestamp, + boost::none /* timeseriesFields */, + std::move(reshardingFields), + true, + allChunks)); + + if (kChunkManager) { + return CollectionMetadata( + PointInTimeChunkManager(std::move(routingTableHistory), kChunkManager.get()), + kThisShard); + } else { + return CollectionMetadata(CurrentChunkManager(std::move(routingTableHistory)), kThisShard); + } } diff --git a/src/mongo/db/shard_role/shard_catalog/collection_sharding_runtime_test.cpp b/src/mongo/db/shard_role/shard_catalog/collection_sharding_runtime_test.cpp index c7180413506..849b6d1532e 100644 --- a/src/mongo/db/shard_role/shard_catalog/collection_sharding_runtime_test.cpp +++ b/src/mongo/db/shard_role/shard_catalog/collection_sharding_runtime_test.cpp @@ -116,21 +116,20 @@ public: auto range = ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)); auto chunk = ChunkType( uuid, std::move(range), ChunkVersion({epoch, timestamp}, {1, 0}), chunkShardId); - ChunkManager cm(makeStandaloneRoutingTableHistory( - RoutingTableHistory::makeNew(kTestNss, - uuid, - kShardKeyPattern, - false, /* unsplittable */ - nullptr, - false, - epoch, - timestamp, - boost::none /* timeseriesFields */, - boost::none /* reshardingFields */, + CurrentChunkManager cm(makeStandaloneRoutingTableHistory( + RoutingTableHistory::makeNew(kTestNss, + uuid, + kShardKeyPattern, + false, /* unsplittable */ + nullptr, + false, + epoch, + timestamp, + boost::none /* timeseriesFields */, + boost::none /* reshardingFields */, - true, - {std::move(chunk)})), - boost::none); + true, + {std::move(chunk)}))); return CollectionMetadata(std::move(cm), collectionShardId); } diff --git a/src/mongo/db/shard_role/shard_catalog/metadata_manager.cpp b/src/mongo/db/shard_role/shard_catalog/metadata_manager.cpp index 2d18f1cc6b8..55dbf22de92 100644 --- a/src/mongo/db/shard_role/shard_catalog/metadata_manager.cpp +++ b/src/mongo/db/shard_role/shard_catalog/metadata_manager.cpp @@ -151,9 +151,10 @@ std::shared_ptr MetadataManager::getActiveMet } } - return std::make_shared(CollectionMetadata( - ChunkManager::makeAtTime(*activeMetadata->getChunkManager(), atClusterTime->asTimestamp()), - activeMetadata->shardId())); + return std::make_shared( + CollectionMetadata(PointInTimeChunkManager::make(*activeMetadata->getChunkManager(), + atClusterTime->asTimestamp()), + activeMetadata->shardId())); } boost::optional MetadataManager::getCollectionUuid() const { diff --git a/src/mongo/db/shard_role/shard_catalog/metadata_manager_test.cpp b/src/mongo/db/shard_role/shard_catalog/metadata_manager_test.cpp index 52390d2d864..41306fd83ea 100644 --- a/src/mongo/db/shard_role/shard_catalog/metadata_manager_test.cpp +++ b/src/mongo/db/shard_role/shard_catalog/metadata_manager_test.cpp @@ -109,8 +109,7 @@ protected: {ChunkType{uuid, range, ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}), kOtherShard}}); return CollectionMetadata( - ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none), - kThisShard); + CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))), kThisShard); } /** @@ -163,8 +162,7 @@ protected: splitChunks); return CollectionMetadata( - ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none), - kThisShard); + CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))), kThisShard); } static CollectionMetadata cloneMetadataMinusChunk(const CollectionMetadata& metadata, @@ -190,8 +188,7 @@ protected: {ChunkType(metadata.getUUID(), ChunkRange(minKey, maxKey), chunkVersion, kOtherShard)}); return CollectionMetadata( - ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none), - kThisShard); + CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))), kThisShard); } std::shared_ptr _manager; diff --git a/src/mongo/db/shard_role/shard_catalog/op_observer_sharding_test.cpp b/src/mongo/db/shard_role/shard_catalog/op_observer_sharding_test.cpp index 2863f459480..eb1e96fcd53 100644 --- a/src/mongo/db/shard_role/shard_catalog/op_observer_sharding_test.cpp +++ b/src/mongo/db/shard_role/shard_catalog/op_observer_sharding_test.cpp @@ -141,7 +141,8 @@ protected: {std::move(chunk)}); return CollectionMetadata( - ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), Timestamp(100, 0)), + PointInTimeChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), + Timestamp(100, 0)), ShardId("this")); } diff --git a/src/mongo/db/shard_role/shard_role_test.cpp b/src/mongo/db/shard_role/shard_role_test.cpp index a0209968e1d..90c2f8aaf0b 100644 --- a/src/mongo/db/shard_role/shard_role_test.cpp +++ b/src/mongo/db/shard_role/shard_role_test.cpp @@ -234,8 +234,7 @@ void ShardRoleTest::installShardedCollectionMetadata( RoutingTableHistoryValueHandle(std::make_shared(std::move(rt)), ComparableChunkVersion::makeComparableChunkVersion(version)); - const auto collectionMetadata = - CollectionMetadata(ChunkManager(rtHandle, boost::none), kMyShardName); + const auto collectionMetadata = CollectionMetadata(CurrentChunkManager(rtHandle), kMyShardName); AutoGetCollection coll(opCtx, nss, MODE_IX); CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(opCtx, nss) diff --git a/src/mongo/s/commands/cluster_analyze_shard_key_cmd.cpp b/src/mongo/s/commands/cluster_analyze_shard_key_cmd.cpp index 5d8451aa289..19fd87f3f1b 100644 --- a/src/mongo/s/commands/cluster_analyze_shard_key_cmd.cpp +++ b/src/mongo/s/commands/cluster_analyze_shard_key_cmd.cpp @@ -98,7 +98,7 @@ public: std::set candidateShardIds; if (cri.hasRoutingTable()) { - cri.getChunkManager().getAllShardIds(&candidateShardIds); + cri.getCurrentChunkManager().getAllShardIds(&candidateShardIds); } else { candidateShardIds.insert(primaryShardId); } diff --git a/src/mongo/s/query/exec/establish_cursors_test.cpp b/src/mongo/s/query/exec/establish_cursors_test.cpp index 9a2da99afbe..4ded0e1967c 100644 --- a/src/mongo/s/query/exec/establish_cursors_test.cpp +++ b/src/mongo/s/query/exec/establish_cursors_test.cpp @@ -160,7 +160,7 @@ public: }); } - ChunkManager createChunkManager(const UUID& uuid, const NamespaceString& nss) { + CurrentChunkManager createChunkManager(const UUID& uuid, const NamespaceString& nss) { ShardKeyPattern sk{fromjson("{x: 1, _id: 1}")}; std::deque configData{ Document(fromjson("{_id: {x: {$minKey: 1}, _id: {$minKey: 1}}, max: {x: 0.0, _id: " @@ -193,9 +193,8 @@ public: false, chunks); - return ChunkManager( - ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rt)), - boost::none); + return CurrentChunkManager( + ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rt))); } protected: diff --git a/src/mongo/s/query/shard_key_pattern_query_util.cpp b/src/mongo/s/query/shard_key_pattern_query_util.cpp index b9e58010240..87760d0d550 100644 --- a/src/mongo/s/query/shard_key_pattern_query_util.cpp +++ b/src/mongo/s/query/shard_key_pattern_query_util.cpp @@ -587,7 +587,12 @@ void getShardIdsAndChunksForCanonicalQuery(const CanonicalQuery& query, // entries if a shard no longer owns chunks when it used to at _clusterTime. Similarly, // this optimization does not apply when it's necessary to fill chunkRanges, as the last // chunks can be lost. - if (!cm.isAtPointInTime() && shardIds->size() == cm.getNShardsOwningChunks() && !info) { + // + // Uses getAproxNShardsOwningChunks() as getNShardsOwningChunks() is only available on + // CurrentChunkManager, but both currently share the same implementation. + // TODO SERVER-114823 Review the usage of getAproxNShardsOwningChunks here. + if (!cm.isAtPointInTime() && shardIds->size() == cm.getAproxNShardsOwningChunks() && + !info) { break; } } diff --git a/src/mongo/s/write_ops/unified_write_executor/write_op_analyzer_test.cpp b/src/mongo/s/write_ops/unified_write_executor/write_op_analyzer_test.cpp index 4f4e446f95c..15a2259d55c 100644 --- a/src/mongo/s/write_ops/unified_write_executor/write_op_analyzer_test.cpp +++ b/src/mongo/s/write_ops/unified_write_executor/write_op_analyzer_test.cpp @@ -110,7 +110,7 @@ struct WriteOpAnalyzerTestImpl : public ShardingTestFixture { const NamespaceString kUnsplittableNss = NamespaceString::createNamespaceString_forTest("test", "unsplittable"); - ChunkManager createChunkManager( + CurrentChunkManager createChunkManager( const UUID& uuid, const NamespaceString& nss, boost::optional timeseriesFields = boost::none, @@ -152,9 +152,8 @@ struct WriteOpAnalyzerTestImpl : public ShardingTestFixture { false, chunks); - return ChunkManager( - ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rt)), - boost::none); + return CurrentChunkManager( + ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rt))); } std::unique_ptr createRoutingContextSharded(