SERVER-90141 Improve ChunkManager API to prevent unsafe method calls on point-in-time instances (#44238)

GitOrigin-RevId: 256d62978cc39e7836d01653e459d88f8ad48eaf
This commit is contained in:
Igor Praznik 2025-12-16 16:08:05 +01:00 committed by MongoDB Bot
parent 0835ee99cf
commit e62f34cd30
51 changed files with 548 additions and 305 deletions

View File

@ -176,8 +176,7 @@ void installShardedCollectionMetadata(OperationContext* opCtx,
RoutingTableHistoryValueHandle(std::make_shared<RoutingTableHistory>(std::move(rt)),
ComparableChunkVersion::makeComparableChunkVersion(version));
const auto collectionMetadata =
CollectionMetadata(ChunkManager(rtHandle, boost::none), thisShardId);
const auto collectionMetadata = CollectionMetadata(CurrentChunkManager(rtHandle), thisShardId);
AutoGetCollection coll(opCtx, nss, MODE_IX);
CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(opCtx, nss)

View File

@ -485,17 +485,26 @@ bool AggExState::canReadUnderlyingCollectionLocally(const CollectionRoutingInfo&
const auto myShardId = ShardingState::get(_opCtx)->shardId();
const auto atClusterTime = repl::ReadConcernArgs::get(_opCtx).getArgsAtClusterTime();
const auto chunkManagerMaybeAtClusterTime = atClusterTime
? ChunkManager::makeAtTime(cri.getChunkManager(), atClusterTime->asTimestamp())
: cri.getChunkManager();
auto isNssLocalFunc = [&](const auto& cm) {
if (cm.isSharded()) {
return false;
} else if (cm.isUnsplittable()) {
return cm.getMinKeyShardIdWithSimpleCollation() == myShardId;
} else {
return cri.getDbPrimaryShardId() == myShardId;
}
};
if (chunkManagerMaybeAtClusterTime.isSharded()) {
return false;
} else if (chunkManagerMaybeAtClusterTime.isUnsplittable()) {
return chunkManagerMaybeAtClusterTime.getMinKeyShardIdWithSimpleCollation() == myShardId;
bool isNssLocal;
if (atClusterTime) {
auto pitChunkManager =
PointInTimeChunkManager::make(cri.getChunkManager(), atClusterTime->asTimestamp());
isNssLocal = isNssLocalFunc(pitChunkManager);
} else {
return cri.getDbPrimaryShardId() == myShardId;
isNssLocal = isNssLocalFunc(cri.getChunkManager());
}
return isNssLocal;
}
Status AggExState::collatorCompatibleWithPipeline(const CollatorInterface* collator) const {

View File

@ -79,8 +79,8 @@ protected:
CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(opCtx, nss)
->setFilteringMetadata(opCtx, CollectionMetadata::UNTRACKED());
auto cm = ChunkManager(RoutingTableHistoryValueHandle{OptionalRoutingTableHistory{}},
_dbVersion.getTimestamp());
PointInTimeChunkManager cm(RoutingTableHistoryValueHandle{OptionalRoutingTableHistory{}},
_dbVersion.getTimestamp());
getCatalogCacheMock()->setCollectionReturnValue(
nss,
CollectionRoutingInfo(
@ -132,7 +132,7 @@ protected:
std::make_shared<RoutingTableHistory>(std::move(rt)),
ComparableChunkVersion::makeComparableChunkVersion(version));
auto cm = ChunkManager(rtHandle, boost::none);
CurrentChunkManager cm(rtHandle);
const auto collectionMetadata = CollectionMetadata(cm, shardName);
AutoGetCollection coll(opCtx, NamespaceStringOrUUID(nss), MODE_IX);

View File

@ -146,7 +146,7 @@ CollectionMetadata QueryShardServerTestFixture::prepareTestData(
true,
_chunks);
ChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none);
CurrentChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)));
ASSERT_EQ(_chunks.size(), cm.numChunks());
{

View File

@ -826,7 +826,7 @@ void ChunkManager::getShardIdsForRange(const BSONObj& min,
// owns chunks when it used to at _clusterTime.
if (!_clusterTime && ChunkMap::allElementsAreOfType(BSONType::minKey, min) &&
ChunkMap::allElementsAreOfType(BSONType::maxKey, max)) {
getAllShardIds(shardIds);
_rt->optRt->getAllShardIds(shardIds);
if (chunkRanges) {
getAllChunkRanges(chunkRanges);
}
@ -871,8 +871,8 @@ bool ChunkManager::rangeOverlapsShard(const ChunkRange& range, const ShardId& sh
return overlapFound;
}
boost::optional<Chunk> ChunkManager::getNextChunkOnShard(const BSONObj& shardKey,
const ShardId& shardId) const {
boost::optional<Chunk> CurrentChunkManager::getNextChunkOnShard(const BSONObj& shardKey,
const ShardId& shardId) const {
tassert(7626422, "Expected routing table to be initialized", _rt->optRt);
tassert(8719704,
"Should never call getNextChunkOnShard when ChunkManager is at point-in-time",
@ -916,10 +916,6 @@ void RoutingTableHistory::getAllChunkRanges(std::set<ChunkRange>* all) const {
});
}
ChunkManager ChunkManager::makeAtTime(const ChunkManager& cm, Timestamp clusterTime) {
return ChunkManager(cm._rt, clusterTime);
}
bool ChunkManager::allowMigrations() const {
if (!_rt->optRt)
return true;

View File

@ -67,7 +67,7 @@
namespace mongo {
class ChunkManager;
class CurrentChunkManager;
struct MONGO_MOD_NEEDS_REPLACEMENT PlacementVersionTargetingInfo {
/**
@ -705,12 +705,59 @@ struct MONGO_MOD_NEEDS_REPLACEMENT EndpointComp {
/**
* Wrapper around a RoutingTableHistory, which pins it to a particular point in time.
*
* The ChunkManager class hierarchy represents routing information for MongoDB sharded collections.
* This implementation uses a non-virtual inheritance approach where state is stored in
* the base class and behavior is differentiated through derived class method availability.
* ChunkManager (Base Class - Stores ALL state)
* Derived classes:
* 1. CurrentChunkManager(_clusterTime: none)
* Additional Methods:
* - getAllShardIds()
* - getNShardsOwningChunks()
* - getNextChunkOnShard()
* 2. PointInTimeChunkManager(_clusterTime: Timestamp)
*
* CRITICAL DESIGN CONSTRAINT: ALL STATE MUST BE AT ChunkManager LEVEL
*
* The _clusterTime field MUST remain in the base ChunkManager class because both
* CurrentChunkManager and PointInTimeChunkManager share the same RoutingTableHistory.
*
* The routing table itself doesn't change based on point-in-time vs current semantics.
* Only the _clusterTime affects how operations interpret that shared routing table:
* - CurrentChunkManager uses the latest cluster time
* - PointInTimeChunkManager uses a specific historical cluster time
* Usage:
* 1. CurrentChunkManager (Latest Routing State)
* ------------------------------------------
* Represents the most up-to-date routing information from the catalog cache.
*
* Characteristics:
* - _clusterTime is boost::none (no specific point in time)
* - Provides access to current cluster topology
* - Supports additional operations that query current state
* - Used for normal CRUD operations and administrative commands
*
* Exclusive Methods (NOT available on PointInTimeChunkManager):
* - getAllShardIds(): Get exact current set of shards owning chunks
* - getNShardsOwningChunks(): Get exact current count of shards
* - getNextChunkOnShard(): Find next chunk on shard (for migrations)
*
* 2. PointInTimeChunkManager (Historical Routing State)
* ---------------------------------------------------
* Represents routing information as it existed at a specific cluster timestamp.
*
* Characteristics:
* - _clusterTime contains a specific Timestamp
* - Provides snapshot-consistent view of routing
* - Respects atClusterTime for all chunk operations
* - Used for snapshot reads and multi-document transactions
*/
class MONGO_MOD_NEEDS_REPLACEMENT ChunkManager {
public:
ChunkManager(RoutingTableHistoryValueHandle rt, boost::optional<Timestamp> clusterTime)
: _rt(std::move(rt)), _clusterTime(std::move(clusterTime)) {}
friend class PointInTimeChunkManager;
public:
// Methods supported on both sharded and unsharded collections
/*
@ -863,15 +910,6 @@ public:
*/
bool rangeOverlapsShard(const ChunkRange& range, const ShardId& shardId) const;
/**
* Given a shardKey, returns the first chunk which is owned by shardId and overlaps or sorts
* after that shardKey. If the return value is empty, this means no such chunk exists.
*
* Can only be used when this ChunkManager is not at point-in-time.
*/
boost::optional<Chunk> getNextChunkOnShard(const BSONObj& shardKey,
const ShardId& shardId) const;
/**
* Given a shard key (or a prefix) that has been extracted from a document, returns the chunk
* that contains that key.
@ -914,22 +952,12 @@ public:
std::set<ChunkRange>* chunkRanges = nullptr,
bool includeMaxBound = true) const;
/**
* Returns the ids of all shards on which the collection has any chunks.
* Can only be used when this ChunkManager is not at point-in-time.
*/
void getAllShardIds(std::set<ShardId>* all) const {
tassert(7626409, "Expected routing table to be initialized", _rt->optRt);
tassert(8719700,
"Should never call getAllShardIds when ChunkManager is at point-in-time",
!_clusterTime);
_rt->optRt->getAllShardIds(all);
}
/**
* Returns the ids of all shards on which the collection has any chunks.
* Can be used when this ChunkManager is at point-in-time, but it returns the shardIds as of the
* latest known placement (instead of the ones at the point-in-time).
*
* TODO SERVER-114823: Remove all usages getAllShardIds_UNSAFE_NotPointInTime
*/
void getAllShardIds_UNSAFE_NotPointInTime(std::set<ShardId>* all) const {
tassert(8719701, "Expected routing table to be initialized", _rt->optRt);
@ -944,36 +972,20 @@ public:
_rt->optRt->getAllChunkRanges(all);
}
/**
* Returns the number of shards on which the collection has any chunks.
* Can only be used when this ChunkManager is not at point-in-time.
*/
size_t getNShardsOwningChunks() const {
tassert(8719702, "Expected routing table to be initialized", _rt->optRt);
tassert(8719703,
"Should never call getNShardsOwningChunks when ChunkManager is at point-in-time",
!_clusterTime);
return _rt->optRt->getNShardsOwningChunks();
}
/**
* Returns the approximate number of shards on which the collection has any chunks.
*
* To be only used for logging/metrics which do not need to be always correct. The returned
* value may be incorrect when this ChunkManager is at point-in-time (it will reflect the
* 'latest' number of shards, rather than the one at the point-in-time).
*
* TODO SERVER-114823: Remove all usages getAproxNShardsOwningChunks
*/
size_t getAproxNShardsOwningChunks() const {
tassert(7626411, "Expected routing table to be initialized", _rt->optRt);
return _rt->optRt->getNShardsOwningChunks();
}
/**
* Constructs a new ChunkManager, which is a view of the underlying routing table at a different
* `clusterTime`.
*/
static ChunkManager makeAtTime(const ChunkManager& cm, Timestamp clusterTime);
bool uuidMatches(const UUID& uuid) const {
tassert(7626412, "Expected routing table to be initialized", _rt->optRt);
return _rt->optRt->uuidMatches(uuid);
@ -1014,15 +1026,66 @@ public:
return _rt->optRt->isNewTimeseriesWithoutView();
}
private:
RoutingTableHistoryValueHandle _rt;
protected:
ChunkManager(RoutingTableHistoryValueHandle rt, boost::optional<Timestamp> clusterTime)
: _rt(std::move(rt)), _clusterTime(std::move(clusterTime)) {}
RoutingTableHistoryValueHandle _rt;
boost::optional<Timestamp> _clusterTime;
};
class MONGO_MOD_NEEDS_REPLACEMENT CurrentChunkManager : public ChunkManager {
public:
explicit CurrentChunkManager(RoutingTableHistoryValueHandle rt)
: ChunkManager(std::move(rt), boost::none) {}
/**
* Given a shardKey, returns the first chunk which is owned by shardId and overlaps or sorts
* after that shardKey. If the return value is empty, this means no such chunk exists.
*
* Can only be used when this ChunkManager is not at point-in-time.
*/
boost::optional<Chunk> getNextChunkOnShard(const BSONObj& shardKey,
const ShardId& shardId) const;
/**
* Returns the ids of all shards on which the collection has any chunks.
* Can only be used when this ChunkManager is not at point-in-time.
*/
void getAllShardIds(std::set<ShardId>* all) const {
tassert(7626409, "Expected routing table to be initialized", _rt->optRt);
tassert(8719700,
"Should never call getAllShardIds when ChunkManager is at point-in-time",
!_clusterTime);
_rt->optRt->getAllShardIds(all);
}
/**
* Returns the number of shards on which the collection has any chunks.
* Can only be used when this ChunkManager is not at point-in-time.
*/
size_t getNShardsOwningChunks() const {
tassert(8719702, "Expected routing table to be initialized", _rt->optRt);
tassert(8719703,
"Should never call getNShardsOwningChunks when ChunkManager is at point-in-time",
!_clusterTime);
return _rt->optRt->getNShardsOwningChunks();
}
};
class MONGO_MOD_NEEDS_REPLACEMENT PointInTimeChunkManager : public ChunkManager {
public:
PointInTimeChunkManager(RoutingTableHistoryValueHandle rt, Timestamp clusterTime)
: ChunkManager(std::move(rt), clusterTime) {}
static PointInTimeChunkManager make(const ChunkManager& cm, Timestamp clusterTime) {
return PointInTimeChunkManager(cm._rt, clusterTime);
}
};
/**
* If `max` is the max bound of some chunk, returns that chunk. Otherwise, returns the chunk that
* contains the key `max`.
* If `max` is the max bound of some chunk, returns that chunk.
* Otherwise, returns the chunk that contains the key `max`.
*/
MONGO_MOD_NEEDS_REPLACEMENT Chunk getChunkForMaxBound(const ChunkManager& cm, const BSONObj& max);

View File

@ -518,21 +518,21 @@ TEST_F(ChunkManagerQueryTest, SnapshotQueryWithMoreShardsThanLatestMetadata) {
chunk1.setHistory({ChunkHistory(*chunk1.getOnCurrentShardSince(), ShardId("0")),
ChunkHistory(Timestamp(1, 0), ShardId("1"))});
ChunkManager chunkManager(makeStandaloneRoutingTableHistory(
oldRoutingTable.makeUpdated(boost::none /* timeseriesFields */,
boost::none /* reshardingFields */,
true,
false, /* unsplittable */
{chunk1})),
Timestamp(5, 0));
PointInTimeChunkManager cm(makeStandaloneRoutingTableHistory(
oldRoutingTable.makeUpdated(boost::none /* timeseriesFields */,
boost::none /* reshardingFields */,
true,
false, /* unsplittable */
{chunk1})),
Timestamp(5, 0));
std::set<ShardId> shardIds;
chunkManager.getShardIdsForRange(BSON("x" << MINKEY), BSON("x" << MAXKEY), &shardIds);
cm.getShardIdsForRange(BSON("x" << MINKEY), BSON("x" << MAXKEY), &shardIds);
ASSERT_EQ(2, shardIds.size());
const auto expCtx = make_intrusive<ExpressionContextForTest>();
shardIds.clear();
getShardIdsForQuery(expCtx, BSON("x" << BSON("$gt" << -20)), {}, chunkManager, &shardIds);
getShardIdsForQuery(expCtx, BSON("x" << BSON("$gt" << -20)), {}, cm, &shardIds);
ASSERT_EQ(2, shardIds.size());
}
@ -573,7 +573,7 @@ TEST_F(ChunkManagerQueryTest, TestKeyBelongsToShard) {
boost::none /* reshardingFields */,
true,
chunkVec);
ChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)), clusterTime);
PointInTimeChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)), clusterTime);
auto chunkIt = chunks.begin();
while (chunkIt != chunks.end()) {

View File

@ -125,8 +125,8 @@ CollectionMetadata makeChunkManagerWithShardSelector(int nShards,
boost::none /* reshardingFields */,
true,
chunks);
return CollectionMetadata(
ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none), getShardId(0));
return CollectionMetadata(CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))),
getShardId(0));
}
ShardId pessimalShardSelector(int i, int nShards, int nChunks) {
@ -157,8 +157,8 @@ MONGO_COMPILER_NOINLINE auto runIncrementalUpdate(const CollectionMetadata& cm,
true /* allowMigration */,
false /* unsplittable */,
newChunks);
return CollectionMetadata(
ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none), getShardId(0));
return CollectionMetadata(CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))),
getShardId(0));
}
/*
@ -315,8 +315,7 @@ auto BM_FullBuildOfChunkManager(benchmark::State& state, ShardSelectorFn selectS
true,
chunks);
benchmark::DoNotOptimize(CollectionMetadata(
ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none),
getShardId(0)));
CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))), getShardId(0)));
}
}

View File

@ -741,7 +741,7 @@ void checkLocalCatalogCollectionOptions(OperationContext* opCtx,
void checkShardingCatalogCollectionOptions(OperationContext* opCtx,
const NamespaceString& targetNss,
const ShardsvrCreateCollectionRequest& request,
const ChunkManager& cm) {
const CurrentChunkManager& cm) {
if (request.getRegisterExistingCollectionInGlobalCatalog()) {
// No need for checking the sharding catalog when tracking a collection for the first time
return;

View File

@ -85,7 +85,7 @@ void DropIndexesCoordinator::_dropIndexes(OperationContext* opCtx,
router.route(
"DropIndexesCoordinator::_dropIndexesPhase",
[&](OperationContext* opCtx, const CollectionRoutingInfo& cri) {
const auto chunkManager = cri.getChunkManager();
const auto chunkManager = cri.getCurrentChunkManager();
std::map<ShardId, ShardVersion> shardIdsToShardVersions;
if (chunkManager.hasRoutingTable()) {

View File

@ -880,8 +880,7 @@ TEST_F(MetadataConsistencyTest, ShardTrackedCollectionInconsistencyTest) {
std::make_shared<RoutingTableHistory>(std::move(rt)),
ComparableChunkVersion::makeComparableChunkVersion(version));
const auto collectionMetadata =
CollectionMetadata(ChunkManager(rtHandle, boost::none), _shardId);
const auto collectionMetadata = CollectionMetadata(CurrentChunkManager(rtHandle), _shardId);
auto scopedCSR = CollectionShardingRuntime::acquireExclusive(opCtx, _nss);
scopedCSR->setFilteringMetadata(opCtx, collectionMetadata);

View File

@ -5516,7 +5516,8 @@ public:
getCatalogCacheMock()->setCollectionReturnValue(
NamespaceString::createNamespaceString_forTest(kDBName, "outColl"),
CollectionRoutingInfo{
ChunkManager{makeStandaloneRoutingTableHistory(std::move(rt)), timestamp},
PointInTimeChunkManager{makeStandaloneRoutingTableHistory(std::move(rt)),
timestamp},
DatabaseTypeValueHandle(
DatabaseType{DatabaseName::createDatabaseName_forTest(boost::none, kDBName),
kMyShardName,
@ -5561,7 +5562,8 @@ TEST_F(PipelineOptimizationsShardMerger, MergeWithUntrackedCollection) {
getCatalogCacheMock()->setCollectionReturnValue(
NamespaceString::createNamespaceString_forTest(kDBName, "outColl"),
CollectionRoutingInfo{
ChunkManager{RoutingTableHistoryValueHandle{OptionalRoutingTableHistory{}}, timestamp},
PointInTimeChunkManager{RoutingTableHistoryValueHandle{OptionalRoutingTableHistory{}},
timestamp},
DatabaseTypeValueHandle(
DatabaseType{DatabaseName::createDatabaseName_forTest(boost::none, kDBName),
kMyShardName,
@ -5616,7 +5618,7 @@ TEST_F(PipelineOptimizationsShardMerger, LookUpUnsplittableFromCollection) {
getCatalogCacheMock()->setCollectionReturnValue(
fromCollNs,
CollectionRoutingInfo{
ChunkManager{makeStandaloneRoutingTableHistory(std::move(rt)), timestamp},
PointInTimeChunkManager{makeStandaloneRoutingTableHistory(std::move(rt)), timestamp},
DatabaseTypeValueHandle(
DatabaseType{DatabaseName::createDatabaseName_forTest(boost::none, kDBName),
kMyShardName,
@ -5653,7 +5655,7 @@ TEST_F(PipelineOptimizationsShardMerger, LookUpShardedFromCollection) {
getCatalogCacheMock()->setCollectionReturnValue(
fromCollNs,
CollectionRoutingInfo{
ChunkManager{makeStandaloneRoutingTableHistory(std::move(rt)), timestamp},
PointInTimeChunkManager{makeStandaloneRoutingTableHistory(std::move(rt)), timestamp},
DatabaseTypeValueHandle(
DatabaseType{DatabaseName::createDatabaseName_forTest(boost::none, kDBName),
kMyShardName,

View File

@ -181,8 +181,7 @@ void MultipleCollectionAccessorTest::installShardedCollectionMetadata(
RoutingTableHistoryValueHandle(std::make_shared<RoutingTableHistory>(std::move(rt)),
ComparableChunkVersion::makeComparableChunkVersion(version));
const auto collectionMetadata =
CollectionMetadata(ChunkManager(rtHandle, boost::none), kMyShardName);
const auto collectionMetadata = CollectionMetadata(CurrentChunkManager(rtHandle), kMyShardName);
auto coll = acquireCollection(
operationContext(),

View File

@ -177,8 +177,7 @@ boost::optional<TimeseriesTranslationParams> getTimeseriesTranslationParamsIfReq
return boost::none;
}
const ChunkManager& chunkManager = cri.getChunkManager();
const auto& timeseriesFields = chunkManager.getTimeseriesFields();
const auto& timeseriesFields = cri.getChunkManager().getTimeseriesFields();
tassert(10601101,
"Timeseries collections must have timeseries options",
timeseriesFields.has_value());

View File

@ -190,8 +190,8 @@ void CollectionRoutingInfoTargeterTest::testTargetInsertWithRangePrefixHashedSha
* with the distinction that it simply creates and returns a ChunkManager object
* and does not assign it to the Global Catalog Cache ChunkManager.
*/
ChunkManager makeCustomChunkManager(const ShardKeyPattern& shardKeyPattern,
const std::vector<BSONObj>& splitPoints) {
CurrentChunkManager makeCustomChunkManager(const ShardKeyPattern& shardKeyPattern,
const std::vector<BSONObj>& splitPoints) {
std::vector<ChunkType> chunks;
auto splitPointsIncludingEnds(splitPoints);
splitPointsIncludingEnds.insert(splitPointsIncludingEnds.begin(),
@ -228,9 +228,8 @@ ChunkManager makeCustomChunkManager(const ShardKeyPattern& shardKeyPattern,
true, // allowMigration
chunks);
return ChunkManager(RoutingTableHistoryValueHandle(
std::make_shared<RoutingTableHistory>(std::move(routingTableHistory))),
boost::none);
return CurrentChunkManager(RoutingTableHistoryValueHandle(
std::make_shared<RoutingTableHistory>(std::move(routingTableHistory))));
}
@ -711,7 +710,7 @@ public:
const auto cri = makeUnshardedCollectionRoutingInfo(kNss);
std::set<ShardId> shards;
cri.getChunkManager().getAllShardIds(&shards);
cri.getCurrentChunkManager().getAllShardIds(&shards);
ASSERT_EQ(1, shards.size());
owningShard = *shards.begin();

View File

@ -411,23 +411,26 @@ bool MultiCollectionRouter::isAnyCollectionNotLocal(
"Must be an entry in criMap for namespace " + nss.toStringForErrorMsg(),
nssCri != criMap.end());
const auto chunkManagerMaybeAtClusterTime = atClusterTime
? ChunkManager::makeAtTime(nssCri->second.getChunkManager(),
atClusterTime->asTimestamp())
: nssCri->second.getChunkManager();
bool isNssLocal = [&]() {
if (chunkManagerMaybeAtClusterTime.isSharded()) {
auto isNssLocalFunc = [&](const auto& cm) {
if (cm.isSharded()) {
return false;
} else if (chunkManagerMaybeAtClusterTime.isUnsplittable()) {
return chunkManagerMaybeAtClusterTime.getMinKeyShardIdWithSimpleCollation() ==
myShardId;
} else if (cm.isUnsplittable()) {
return cm.getMinKeyShardIdWithSimpleCollation() == myShardId;
} else {
// If collection is untracked, it is only local if this shard is the dbPrimary
// shard.
return nssCri->second.getDbPrimaryShardId() == myShardId;
}
}();
};
bool isNssLocal;
if (atClusterTime) {
auto pitChunkManager = PointInTimeChunkManager::make(nssCri->second.getChunkManager(),
atClusterTime->asTimestamp());
isNssLocal = isNssLocalFunc(pitChunkManager);
} else {
isNssLocal = isNssLocalFunc(nssCri->second.getChunkManager());
}
if (!isNssLocal) {
anyCollectionNotLocal = true;

View File

@ -34,6 +34,7 @@
#include "mongo/bson/bsonobj.h"
#include "mongo/bson/bsonobjbuilder.h"
#include "mongo/db/curop.h"
#include "mongo/db/global_catalog/chunk_manager.h"
#include "mongo/db/global_catalog/sharding_catalog_client.h"
#include "mongo/db/global_catalog/type_database_gen.h"
#include "mongo/db/keypattern.h"
@ -168,7 +169,7 @@ const OperationContext::Decoration<bool> routerShouldRelaxCollectionUUIDConsiste
} // namespace
bool CollectionRoutingInfo::hasRoutingTable() const {
return _cm.hasRoutingTable();
return getChunkManager().hasRoutingTable();
}
const ShardId& CollectionRoutingInfo::getDbPrimaryShardId() const {
@ -180,7 +181,7 @@ const DatabaseVersion& CollectionRoutingInfo::getDbVersion() const {
}
ShardVersion CollectionRoutingInfo::getCollectionVersion() const {
ShardVersion sv = ShardVersionFactory::make(_cm);
auto sv = ShardVersionFactory::make(getChunkManager());
if (MONGO_unlikely(shouldIgnoreUuidMismatch)) {
sv.setIgnoreShardingCatalogUuidMismatch();
}
@ -188,7 +189,7 @@ ShardVersion CollectionRoutingInfo::getCollectionVersion() const {
}
ShardVersion CollectionRoutingInfo::getShardVersion(const ShardId& shardId) const {
auto sv = ShardVersionFactory::make(_cm, shardId);
auto sv = ShardVersionFactory::make(getChunkManager(), shardId);
if (MONGO_unlikely(shouldIgnoreUuidMismatch)) {
sv.setIgnoreShardingCatalogUuidMismatch();
}
@ -419,11 +420,8 @@ StatusWith<CachedDatabaseInfo> CatalogCache::_getDatabaseForCollectionRoutingInf
return swDbInfo;
}
StatusWith<ChunkManager> CatalogCache::_getCollectionPlacementInfoAt(
OperationContext* opCtx,
const NamespaceString& nss,
boost::optional<Timestamp> atClusterTime,
bool allowLocks) {
StatusWith<RoutingTableHistoryValueHandle> CatalogCache::_getCollectionRoutingTable(
OperationContext* opCtx, const NamespaceString& nss, bool allowLocks) {
tassert(7032314,
"Do not hold a lock while refreshing the catalog cache. Doing so would potentially "
"hold the lock during a network call, and can lead to a deadlock as described in "
@ -440,7 +438,7 @@ StatusWith<ChunkManager> CatalogCache::_getCollectionPlacementInfoAt(
if (nss.isNamespaceAlwaysUntracked()) {
// If the collection is known to always be untracked, there is no need to request it to
// the CollectionCache.
return ChunkManager(OptionalRoutingTableHistory(), atClusterTime);
return OptionalRoutingTableHistory();
}
auto collEntryFuture =
@ -452,7 +450,7 @@ StatusWith<ChunkManager> CatalogCache::_getCollectionPlacementInfoAt(
// use it, otherwise return an error
if (collEntryFuture.isReady()) {
return ChunkManager(collEntryFuture.get(opCtx), atClusterTime);
return collEntryFuture.get(opCtx);
} else {
return Status{ShardCannotRefreshDueToLocksHeldInfo(nss),
"Routing info refresh did not complete"};
@ -468,7 +466,7 @@ StatusWith<ChunkManager> CatalogCache::_getCollectionPlacementInfoAt(
auto collEntry = collEntryFuture.get(opCtx);
_stats.totalRefreshWaitTimeMicros.addAndFetch(t.micros());
return ChunkManager(std::move(collEntry), atClusterTime);
return std::move(collEntry);
} catch (const DBException& ex) {
_stats.totalRefreshWaitTimeMicros.addAndFetch(t.micros());
bool isCatalogCacheRetriableError = ex.isA<ErrorCategory::SnapshotError>() ||
@ -509,13 +507,20 @@ StatusWith<CollectionRoutingInfo> CatalogCache::_getCollectionRoutingInfoAt(
return swDbInfo.getStatus();
}
auto swChunkManager = _getCollectionPlacementInfoAt(opCtx, nss, optAtClusterTime, allowLocks);
if (!swChunkManager.isOK()) {
return swChunkManager.getStatus();
auto swRoutingTable = _getCollectionRoutingTable(opCtx, nss, allowLocks);
if (!swRoutingTable.isOK()) {
return swRoutingTable.getStatus();
}
auto cri =
CollectionRoutingInfo{std::move(swChunkManager.getValue()), std::move(swDbInfo.getValue())};
auto cri = [&]() -> CollectionRoutingInfo {
if (optAtClusterTime) {
PointInTimeChunkManager chunkManager(swRoutingTable.getValue(), optAtClusterTime.get());
return CollectionRoutingInfo{std::move(chunkManager), std::move(swDbInfo.getValue())};
}
CurrentChunkManager chunkManager(swRoutingTable.getValue());
return CollectionRoutingInfo{std::move(chunkManager), std::move(swDbInfo.getValue())};
}();
if (MONGO_unlikely(routerShouldRelaxCollectionUUIDConsistencyCheck(opCtx))) {
cri.shouldIgnoreUuidMismatch = true;
}
@ -538,11 +543,18 @@ void CatalogCache::_triggerPlacementVersionRefresh(const NamespaceString& nss) {
nss, ComparableChunkVersion::makeComparableChunkVersionForForcedRefresh());
}
StatusWith<ChunkManager> CatalogCache::getCollectionPlacementInfoWithRefresh(
StatusWith<CurrentChunkManager> CatalogCache::getCollectionPlacementInfoWithRefresh(
OperationContext* opCtx, const NamespaceString& nss) {
try {
_triggerPlacementVersionRefresh(nss);
return _getCollectionPlacementInfoAt(opCtx, nss, boost::none /* atClusterTime */);
auto swRoutingTable = _getCollectionRoutingTable(opCtx, nss);
if (!swRoutingTable.isOK()) {
return swRoutingTable.getStatus();
}
return CurrentChunkManager(swRoutingTable.getValue());
} catch (const DBException& ex) {
return ex.toStatus();
}

View File

@ -73,9 +73,40 @@ using CachedDatabaseInfo MONGO_MOD_PUBLIC = DatabaseTypeValueHandle;
class MONGO_MOD_PUBLIC CollectionRoutingInfo {
public:
CollectionRoutingInfo(ChunkManager&& chunkManager, CachedDatabaseInfo&& dbInfo)
CollectionRoutingInfo(CurrentChunkManager&& chunkManager, CachedDatabaseInfo&& dbInfo)
: _dbInfo(std::move(dbInfo)), _cm(std::move(chunkManager)) {}
CollectionRoutingInfo(PointInTimeChunkManager&& chunkManager, CachedDatabaseInfo&& dbInfo)
: _dbInfo(std::move(dbInfo)), _cm(std::move(chunkManager)) {}
/**
* Variant type that can hold either a CurrentChunkManager or a PointInTimeChunkManager.
*
* This allows CollectionRoutingInfo to represent routing information in two different modes:
* - CurrentChunkManager: Represents the latest known routing state for a collection
* - PointInTimeChunkManager: Represents routing state at a specific point in time (cluster
* time)
*
* Usage guidelines:
* - Use std::visit() to access the held ChunkManager polymorphically when the operation works
* identically for both types
* - Use std::get_if<CurrentChunkManager>() when you need to conditionally access features only
* available on CurrentChunkManager (e.g., getNShardsOwningChunks())
* - Use std::holds_alternative<CurrentChunkManager>() to check which type is currently held
*
* Example:
* // Polymorphic access (works for both types):
* std::visit([](const auto& cm) { return cm.isSharded(); }, variant);
*
* // Type-specific access:
* if (auto* currentCm = std::get_if<CurrentChunkManager>(&variant)) {
* currentCm->getAllShardIds(&shards);
* }
*
* TODO SERVER-114825: Investigate if it's possible to implement without this variant.
*/
using ChunkManagerVariant = std::variant<CurrentChunkManager, PointInTimeChunkManager>;
/**
* Returns true if the collection is tracked in the global catalog.
*
@ -90,11 +121,103 @@ public:
* shards.
*/
bool isSharded() const {
return _cm.isSharded();
return getChunkManager().isSharded();
}
/**
* Returns a const reference to the ChunkManager held in this CollectionRoutingInfo.
*
* This method provides polymorphic access to common ChunkManager functionality that works
* identically for both CurrentChunkManager and PointInTimeChunkManager.
*
* Use this method when:
* - You need to call methods that are defined on the base ChunkManager class
* - The operation doesn't require type-specific behavior
* - You're working with shared functionality like:
* - isSharded()
* - isUnsplittable()
* - getShardKeyPattern()
* - getVersion()
* - forEachChunk() (respects point-in-time semantics automatically)
*
* Do NOT use this method when:
* - You need to call CurrentChunkManager-specific methods like getAllShardIds() or
* getNShardsOwningChunks() - use getCurrentChunkManager() instead
* - You need to know whether you have a current or point-in-time view - check the variant
* directly with std::holds_alternative<>()
*
* Example usage:
* const auto& cm = cri.getChunkManager();
* if (cm.isSharded()) {
* cm.forEachChunk([](const Chunk& chunk) { ... });
* }
* @return A const reference to the base ChunkManager interface
*/
const ChunkManager& getChunkManager() const {
return _cm;
return std::visit([](const auto& cm) -> const ChunkManager& { return cm; }, _cm);
}
/**
* Returns a const reference to the CurrentChunkManager held in this CollectionRoutingInfo.
*
* This method provides direct access to CurrentChunkManager-specific functionality that is
* NOT available when using a PointInTimeChunkManager.
*
* Use this method when:
* - You need to access the CURRENT (latest) state of the routing information
* - You need to call methods that are ONLY available on CurrentChunkManager:
* - getAllShardIds(): Gets the exact current set of shards owning chunks
* - getNShardsOwningChunks(): Gets the exact current count of shards
* - getNextChunkOnShard(): Finds the next chunk on a specific shard
* - You're performing operations that explicitly require non-point-in-time semantics:
* - Checking current cluster topology
* - Making routing decisions based on latest metadata
* - Administrative operations that need up-to-date information
*
* Do NOT use this method when:
* - You're working with point-in-time reads (e.g., transactions with atClusterTime)
* - You don't know whether the CollectionRoutingInfo contains a CurrentChunkManager or
* PointInTimeChunkManager - this will throw an exception if it's the wrong type
* - You only need common ChunkManager functionality - use getChunkManager() instead
*
* Common usage patterns:
*
* // Pattern 1: Direct access when you know it's current
* const auto& currentCm = cri.getCurrentChunkManager();
* currentCm.getAllShardIds(&allShards);
*
* // Pattern 2: Conditional access (safer)
* if (std::holds_alternative<CurrentChunkManager>(cri._cm)) {
* const auto& currentCm = cri.getCurrentChunkManager();
* size_t nShards = currentCm.getNShardsOwningChunks();
* }
*
* // Pattern 3: Using std::visit for type-specific behavior
* std::visit(OverloadedVisitor{
* [](const CurrentChunkManager& cm) {
* // Can call getCurrentChunkManager-only methods
* cm.getAllShardIds(&shards);
* },
* [](const PointInTimeChunkManager& cm) {
* // Different behavior for point-in-time
* }
* }, cri._cm);
*
* When this is created:
* - getCollectionRoutingInfo() without atClusterTime returns CurrentChunkManager
* - getCollectionRoutingInfoAt() with atClusterTime returns PointInTimeChunkManager
* - getCollectionPlacementInfoWithRefresh() returns CurrentChunkManager
*
* @throws TAssertionException if this CollectionRoutingInfo contains a PointInTimeChunkManager
* instead of a CurrentChunkManager
*
* @return A const reference to the CurrentChunkManager
*/
const CurrentChunkManager& getCurrentChunkManager() const {
tassert(10271001,
"Expected current ChunkManager but have PointInTimeChunkManager",
std::holds_alternative<CurrentChunkManager>(_cm));
return std::get<CurrentChunkManager>(_cm);
}
ShardVersion getCollectionVersion() const;
@ -113,7 +236,7 @@ public:
private:
CachedDatabaseInfo _dbInfo;
ChunkManager _cm;
ChunkManagerVariant _cm;
};
/**
@ -272,7 +395,7 @@ public:
/**
* Blocking method to retrieve refreshed collection placement information (ChunkManager).
*/
virtual StatusWith<ChunkManager> getCollectionPlacementInfoWithRefresh(
virtual StatusWith<CurrentChunkManager> getCollectionPlacementInfoWithRefresh(
OperationContext* opCtx, const NamespaceString& nss);
/**
@ -433,10 +556,8 @@ private:
boost::optional<Timestamp> optAtClusterTime,
bool allowLocks = false);
StatusWith<ChunkManager> _getCollectionPlacementInfoAt(OperationContext* opCtx,
const NamespaceString& nss,
boost::optional<Timestamp> atClusterTime,
bool allowLocks = false);
StatusWith<RoutingTableHistoryValueHandle> _getCollectionRoutingTable(
OperationContext* opCtx, const NamespaceString& nss, bool allowLocks = false);
void _triggerPlacementVersionRefresh(const NamespaceString& nss);

View File

@ -30,6 +30,7 @@
#include "mongo/db/router_role/routing_cache/catalog_cache_mock.h"
#include "mongo/base/error_codes.h"
#include "mongo/db/global_catalog/chunk_manager.h"
#include "mongo/db/router_role/routing_cache/config_server_catalog_cache_loader_mock.h"
#include "mongo/db/service_context.h"
#include "mongo/db/sharding_environment/sharding_test_fixture_common.h"
@ -70,11 +71,11 @@ StatusWith<CollectionRoutingInfo> CatalogCacheMock::getCollectionRoutingInfo(
nss.toStringForErrorMsg()));
}
}
StatusWith<ChunkManager> CatalogCacheMock::getCollectionPlacementInfoWithRefresh(
StatusWith<CurrentChunkManager> CatalogCacheMock::getCollectionPlacementInfoWithRefresh(
OperationContext* opCtx, const NamespaceString& nss) {
const auto it = _collectionCache.find(nss);
if (it != _collectionCache.end()) {
return it->second.getChunkManager();
return it->second.getCurrentChunkManager();
} else {
return Status(
ErrorCodes::InternalError,
@ -107,7 +108,7 @@ std::unique_ptr<CatalogCacheMock> CatalogCacheMock::make() {
CollectionRoutingInfo CatalogCacheMock::makeCollectionRoutingInfoUntracked(
const NamespaceString& nss, const ShardId& dbPrimaryShard, DatabaseVersion dbVersion) {
ChunkManager cm(OptionalRoutingTableHistory(), boost::none);
CurrentChunkManager cm(OptionalRoutingTableHistory{});
return CollectionRoutingInfo(
std::move(cm),
DatabaseTypeValueHandle(DatabaseType{nss.dbName(), dbPrimaryShard, dbVersion}));
@ -197,8 +198,8 @@ CollectionRoutingInfo CatalogCacheMock::_makeCollectionRoutingInfoTracked(
true /*allowMigrations*/,
chunkTypes);
ChunkManager cm(ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rth)),
boost::none /*clusterTime*/);
CurrentChunkManager cm(
ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rth)));
return CollectionRoutingInfo(
std::move(cm),
DatabaseTypeValueHandle(DatabaseType{nss.dbName(), dbPrimaryShard, dbVersion}));

View File

@ -62,7 +62,7 @@ public:
const NamespaceString& nss,
bool allowLocks) override;
StatusWith<ChunkManager> getCollectionPlacementInfoWithRefresh(
StatusWith<CurrentChunkManager> getCollectionPlacementInfoWithRefresh(
OperationContext* opCtx, const NamespaceString& nss) override;
void setDatabaseReturnValue(const DatabaseName& dbName, CachedDatabaseInfo databaseInfo);

View File

@ -71,7 +71,7 @@ ShardingWriteRouter::ShardingWriteRouter(OperationContext* opCtx, const Namespac
tassert(6862800,
"Routing information for the temporary resharding collection is stale",
cri.hasRoutingTable());
_reshardingChunkMgr = cri.getChunkManager();
_reshardingChunkMgr = cri.getCurrentChunkManager();
}
}
}

View File

@ -224,7 +224,7 @@ protected:
ComparableChunkVersion::makeComparableChunkVersion(version));
}
std::pair<std::vector<mongo::ChunkType>, mongo::ChunkManager> createChunks(
std::pair<std::vector<mongo::ChunkType>, mongo::CurrentChunkManager> createChunks(
size_t nShards, uint32_t nChunks, std::vector<ShardId> shards) {
invariant(shards.size() == nShards);
@ -255,20 +255,19 @@ protected:
reshardingFields.setDonorFields(
TypeCollectionDonorFields{tempNss, reshardKeyPattern, shards});
ChunkManager cm(makeStandaloneRoutingTableHistory(
RoutingTableHistory::makeNew(kNss,
collIdentifier,
shardKeyPattern,
false, /* unsplittable */
nullptr,
false,
collEpoch,
collTimestamp,
boost::none /* timeseriesFields */,
reshardingFields, /* reshardingFields */
true,
chunks)),
boost::none);
CurrentChunkManager cm(makeStandaloneRoutingTableHistory(
RoutingTableHistory::makeNew(kNss,
collIdentifier,
shardKeyPattern,
false, /* unsplittable */
nullptr,
false,
collEpoch,
collTimestamp,
boost::none /* timeseriesFields */,
reshardingFields, /* reshardingFields */
true,
chunks)));
return std::make_pair(chunks, cm);
}

View File

@ -193,9 +193,8 @@ CollectionRoutingInfoTargeter makeCollectionRoutingInfoTargeter(
true /* allowMigrations */,
chunks);
auto cm = ChunkManager(RoutingTableHistoryValueHandle(std::make_shared<RoutingTableHistory>(
std::move(routingTableHistory))),
boost::none);
CurrentChunkManager cm(RoutingTableHistoryValueHandle(
std::make_shared<RoutingTableHistory>(std::move(routingTableHistory))));
auto routingCtx = RoutingContext::createSynthetic(
{{nss,

View File

@ -152,9 +152,8 @@ protected:
true /* allowMigrations */,
chunks);
auto cm = ChunkManager(RoutingTableHistoryValueHandle(std::make_shared<RoutingTableHistory>(
std::move(routingTableHistory))),
boost::none);
CurrentChunkManager cm(RoutingTableHistoryValueHandle(
std::make_shared<RoutingTableHistory>(std::move(routingTableHistory))));
auto routingCtx = RoutingContext::createSynthetic(
{{nss,
CollectionRoutingInfo{std::move(cm),

View File

@ -87,9 +87,8 @@ public:
true, // allowMigration
chunks);
return ChunkManager(RoutingTableHistoryValueHandle(std::make_shared<RoutingTableHistory>(
std::move(routingTableHistory))),
boost::none);
return CurrentChunkManager(RoutingTableHistoryValueHandle(
std::make_shared<RoutingTableHistory>(std::move(routingTableHistory))));
}
private:

View File

@ -129,9 +129,10 @@ RoutingTableHistory makeRoutingTable(const std::vector<ChunkType>& chunks) {
chunks);
}
ChunkManager makeChunkManager(const std::vector<ChunkType>& chunks) {
CurrentChunkManager makeChunkManager(const std::vector<ChunkType>& chunks) {
auto rt = std::make_shared<RoutingTableHistory>(makeRoutingTable(chunks));
return {{std::move(rt)}, boost::none /* atClusterTime */};
return CurrentChunkManager(
RoutingTableHistoryValueHandle(OptionalRoutingTableHistory(std::move(rt))));
}
DistributionStatus makeDistStatus(const ChunkManager& cm, ZoneInfo zoneInfo = ZoneInfo()) {

View File

@ -72,7 +72,7 @@ void MigrationChunkClonerSourceOpObserver::assertIntersectingChunkHasNotMoved(
const LogicalTime& atClusterTime) {
// We can assume the simple collation because shard keys do not support non-simple collations.
auto cmAtTimeOfWrite =
ChunkManager::makeAtTime(*metadata.getChunkManager(), atClusterTime.asTimestamp());
PointInTimeChunkManager::make(*metadata.getChunkManager(), atClusterTime.asTimestamp());
auto chunk = cmAtTimeOfWrite.findIntersectingChunkWithSimpleCollation(shardKey);
// Throws if the chunk has moved since the timestamp of the running transaction's atClusterTime

View File

@ -730,7 +730,7 @@ protected:
->setFilteringMetadata(
operationContext(),
CollectionMetadata(
ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none),
CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))),
ShardId("dummyShardId")));
}();

View File

@ -129,21 +129,19 @@ void RangeDeleterServiceTest::_setFilteringMetadataByUUID(OperationContext* opCt
ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)},
ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}),
ShardId("this"));
ChunkManager cm(makeStandaloneRoutingTableHistory(
RoutingTableHistory::makeNew(nss,
uuid,
kShardKeyPattern,
false, /* unsplittable */
nullptr,
false,
epoch,
Timestamp(1, 1),
boost::none /* timeseriesFields */,
boost::none /* reshardingFields */,
true,
{std::move(chunk)})),
boost::none);
CurrentChunkManager cm(makeStandaloneRoutingTableHistory(
RoutingTableHistory::makeNew(nss,
uuid,
kShardKeyPattern,
false, /* unsplittable */
nullptr,
false,
epoch,
Timestamp(1, 1),
boost::none /* timeseriesFields */,
boost::none /* reshardingFields */,
true,
{std::move(chunk)})));
return CollectionMetadata(std::move(cm), ShardId("this"));
}();

View File

@ -134,7 +134,7 @@ public:
ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)},
ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}),
ShardId("dummyShardId")}});
ChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none);
CurrentChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)));
AutoGetDb autoDb(_opCtx, kNss.dbName(), MODE_IX);
Lock::CollectionLock collLock(_opCtx, kNss, MODE_IX);
CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(_opCtx, kNss)

View File

@ -226,7 +226,7 @@ protected:
ShardServerTestFixtureWithCatalogCacheMock::tearDown();
}
ChunkManager createChunkManager(
CurrentChunkManager createChunkManager(
const ShardKeyPattern& shardKeyPattern,
std::deque<DocumentSource::GetNextResult> configCacheChunksData) {
const OID epoch = OID::gen();
@ -255,7 +255,7 @@ protected:
false,
chunks);
return ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none);
return CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)));
}
/**

View File

@ -128,8 +128,7 @@ public:
true /* allowMigrations */,
chunks);
return ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)),
boost::none /* clusterTime */);
return CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)));
}
DonorShardFetchTimestamp makeDonorShardFetchTimestamp(ShardId shardId,

View File

@ -165,20 +165,19 @@ protected:
auto range = ChunkRange(BSON(shardKey << MINKEY), BSON(shardKey << MAXKEY));
auto chunk = ChunkType(
uuid, range, ChunkVersion({epoch, timestamp}, {1, 0}), shardThatChunkExistsOn);
ChunkManager cm(makeStandaloneRoutingTableHistory(
RoutingTableHistory::makeNew(nss,
uuid,
shardKeyPattern,
false, /* unsplittable */
nullptr,
false,
epoch,
timestamp,
boost::none /* timeseriesFields */,
boost::none /* reshardingFields */,
true,
{std::move(chunk)})),
boost::none);
CurrentChunkManager cm(makeStandaloneRoutingTableHistory(
RoutingTableHistory::makeNew(nss,
uuid,
shardKeyPattern,
false, /* unsplittable */
nullptr,
false,
epoch,
timestamp,
boost::none /* timeseriesFields */,
boost::none /* reshardingFields */,
true,
{std::move(chunk)})));
auto dbVersion = DatabaseVersion(uuid, timestamp);
getCatalogCacheMock()->setDatabaseReturnValue(
nss.dbName(),

View File

@ -310,7 +310,7 @@ public:
false,
chunks);
return ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none);
return CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)));
}
void loadCatalogCacheValues() {

View File

@ -520,8 +520,7 @@ private:
true /* allowMigrations */,
chunks);
return ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)),
boost::none /* clusterTime */);
return CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)));
}
RoutingTableHistoryValueHandle makeStandaloneRoutingTableHistory(RoutingTableHistory rt) {

View File

@ -333,11 +333,11 @@ public:
}
private:
ChunkManager makeChunkManager(const OID& epoch,
const NamespaceString& nss,
const UUID& uuid,
const BSONObj& shardKey,
const std::vector<ChunkType>& chunks) {
CurrentChunkManager makeChunkManager(const OID& epoch,
const NamespaceString& nss,
const UUID& uuid,
const BSONObj& shardKey,
const std::vector<ChunkType>& chunks) {
auto rt = RoutingTableHistory::makeNew(nss,
uuid,
shardKey,
@ -350,11 +350,10 @@ private:
boost::none /* reshardingFields */,
true /* allowMigrations */,
chunks);
return ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)),
boost::none /* clusterTime */);
return CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)));
}
ChunkManager makeChunkManagerForSourceCollection() {
CurrentChunkManager makeChunkManagerForSourceCollection() {
// Create three chunks, two that are owned by this donor shard and one owned by some other
// shard. The chunk for {sk: null} is owned by this donor shard to allow test cases to omit
// the shard key field when it isn't relevant.
@ -380,7 +379,7 @@ private:
epoch, _sourceNss, _sourceUUID, BSON(_currentShardKey << 1), chunks);
}
ChunkManager makeChunkManagerForOutputCollection() {
CurrentChunkManager makeChunkManagerForOutputCollection() {
const OID epoch = OID::gen();
const UUID outputUuid = UUID::gen();
std::vector<ChunkType> chunks = {

View File

@ -227,9 +227,8 @@ public:
true /* allowMigrations */,
chunks);
return CollectionRoutingInfo{
ChunkManager(
ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rt)),
boost::none /* clusterTime */),
CurrentChunkManager(
ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rt))),
DatabaseTypeValueHandle(DatabaseType{
nss.dbName(), _someDonorId, DatabaseVersion(UUID::gen(), Timestamp(1, 1))})};
}

View File

@ -57,7 +57,8 @@ PostReshardingCollectionPlacement::PostReshardingCollectionPlacement(
tassert(11178203,
"Routing information for the temporary resharding collection is stale",
tmpNssRoutingInfoWithStatus.getValue().hasRoutingTable());
_tmpReshardingCollectionChunkManager = tmpNssRoutingInfoWithStatus.getValue().getChunkManager();
_tmpReshardingCollectionChunkManager =
tmpNssRoutingInfoWithStatus.getValue().getCurrentChunkManager();
}
const ShardId& PostReshardingCollectionPlacement::getReshardingDestinedRecipient(

View File

@ -55,11 +55,14 @@
namespace mongo {
CollectionMetadata::CollectionMetadata(ChunkManager cm, const ShardId& thisShardId)
CollectionMetadata::CollectionMetadata(CurrentChunkManager cm, const ShardId& thisShardId)
: _cm(std::move(cm)), _thisShardId(thisShardId) {}
CollectionMetadata::CollectionMetadata(PointInTimeChunkManager cm, const ShardId& thisShardId)
: _cm(std::move(cm)), _thisShardId(thisShardId) {}
bool CollectionMetadata::allowMigrations() const {
return _cm ? _cm->allowMigrations() : true;
return _cm ? getChunkManagerBase().allowMigrations() : true;
}
boost::optional<ShardKeyPattern> CollectionMetadata::getReshardingKeyIfShouldForwardOps() const {
@ -139,12 +142,14 @@ BSONObj CollectionMetadata::extractDocumentKey(const ShardKeyPattern* shardKeyPa
}
BSONObj CollectionMetadata::extractDocumentKey(const BSONObj& doc) const {
return extractDocumentKey(isSharded() ? &_cm->getShardKeyPattern() : nullptr, doc);
return extractDocumentKey(isSharded() ? &getChunkManagerBase().getShardKeyPattern() : nullptr,
doc);
}
std::string CollectionMetadata::toStringBasic() const {
if (hasRoutingTable()) {
return str::stream() << "collection placement version: " << _cm->getVersion().toString()
return str::stream() << "collection placement version: "
<< getChunkManagerBase().getVersion().toString()
<< ", shard placement version: "
<< getShardPlacementVersionForLogging().toString();
} else {
@ -157,7 +162,7 @@ RangeMap CollectionMetadata::getChunks() const {
RangeMap chunksMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<BSONObj>());
_cm->forEachChunk([this, &chunksMap](const auto& chunk) {
getChunkManagerBase().forEachChunk([this, &chunksMap](const auto& chunk) {
if (chunk.getShardId() == _thisShardId)
chunksMap.emplace_hint(chunksMap.end(), chunk.getMin(), chunk.getMax());
@ -170,7 +175,7 @@ RangeMap CollectionMetadata::getChunks() const {
bool CollectionMetadata::getNextChunk(const BSONObj& lookupKey, ChunkType* chunk) const {
tassert(10016203, "Expected a routing table to be initialized", hasRoutingTable());
auto nextChunk = _cm->getNextChunkOnShard(lookupKey, _thisShardId);
auto nextChunk = getCurrentChunkManager()->getNextChunkOnShard(lookupKey, _thisShardId);
if (!nextChunk)
return false;
@ -182,7 +187,7 @@ bool CollectionMetadata::getNextChunk(const BSONObj& lookupKey, ChunkType* chunk
bool CollectionMetadata::currentShardHasAnyChunks() const {
tassert(10016204, "Expected a routing table to be initialized", hasRoutingTable());
std::set<ShardId> shards;
_cm->getAllShardIds(&shards);
getCurrentChunkManager()->getAllShardIds(&shards);
return shards.find(_thisShardId) != shards.end();
}
@ -259,7 +264,7 @@ void CollectionMetadata::toBSONChunks(BSONArrayBuilder* builder) const {
if (!hasRoutingTable())
return;
_cm->forEachChunk([this, &builder](const auto& chunk) {
getChunkManagerBase().forEachChunk([this, &builder](const auto& chunk) {
if (chunk.getShardId() == _thisShardId) {
BSONArrayBuilder chunkBB(builder->subarrayStart());
chunkBB.append(chunk.getMin());

View File

@ -81,7 +81,9 @@ public:
* "thisShardId" is the shard identity of this shard for purposes of answering questions like
* "does this key belong to this shard"?
*/
CollectionMetadata(ChunkManager cm, const ShardId& thisShardId);
CollectionMetadata(CurrentChunkManager cm, const ShardId& thisShardId);
CollectionMetadata(PointInTimeChunkManager cm, const ShardId& thisShardId);
/**
* Returns a CollectionMetadata object for an untracked collection.
@ -94,18 +96,18 @@ public:
* Returns whether this metadata object represents a sharded or unsharded collection.
*/
bool isSharded() const {
return _cm && _cm->isSharded();
return _cm && getChunkManagerBase().isSharded();
}
/**
* Returns whether this metadata object represents an unsplittable collection.
*/
bool isUnsplittable() const {
return _cm && _cm->isUnsplittable();
return _cm && getChunkManagerBase().isUnsplittable();
}
bool hasRoutingTable() const {
return _cm && _cm->hasRoutingTable();
return _cm && getChunkManagerBase().hasRoutingTable();
}
bool allowMigrations() const;
@ -126,7 +128,8 @@ public:
* have a routing table.
*/
ChunkVersion getShardPlacementVersion() const {
return (hasRoutingTable() ? _cm->getVersion(_thisShardId) : ChunkVersion::UNTRACKED());
return (hasRoutingTable() ? getChunkManagerBase().getVersion(_thisShardId)
: ChunkVersion::UNTRACKED());
}
/**
@ -136,7 +139,8 @@ public:
* timestamp".
*/
Timestamp getShardMaxValidAfter() const {
return (hasRoutingTable() ? _cm->getMaxValidAfter(_thisShardId) : Timestamp(0, 0));
return (hasRoutingTable() ? getChunkManagerBase().getMaxValidAfter(_thisShardId)
: Timestamp(0, 0));
}
/**
@ -149,7 +153,7 @@ public:
* instead.
*/
ChunkVersion getShardPlacementVersionForLogging() const {
return (hasRoutingTable() ? _cm->getVersionForLogging(_thisShardId)
return (hasRoutingTable() ? getChunkManagerBase().getVersionForLogging(_thisShardId)
: ChunkVersion::UNTRACKED());
}
@ -158,7 +162,7 @@ public:
* table.
*/
ChunkVersion getCollPlacementVersion() const {
return (hasRoutingTable() ? _cm->getVersion() : ChunkVersion::UNTRACKED());
return (hasRoutingTable() ? getChunkManagerBase().getVersion() : ChunkVersion::UNTRACKED());
}
/**
@ -171,7 +175,7 @@ public:
const ShardKeyPattern& getShardKeyPattern() const {
tassert(10016206, "Expected a routing table to be initialized", hasRoutingTable());
return _cm->getShardKeyPattern();
return getChunkManagerBase().getShardKeyPattern();
}
/**
@ -199,12 +203,12 @@ public:
bool uuidMatches(UUID uuid) const {
tassert(10016215, "Expected a routing table to be initialized", hasRoutingTable());
return _cm->uuidMatches(uuid);
return getChunkManagerBase().uuidMatches(uuid);
}
const UUID& getUUID() const {
tassert(10016216, "Expected a routing table to be initialized", hasRoutingTable());
return _cm->getUUID();
return getChunkManagerBase().getUUID();
}
/**
@ -230,7 +234,32 @@ public:
const ChunkManager* getChunkManager() const {
tassert(10016207, "Expected a routing table to be initialized", hasRoutingTable());
return _cm.get_ptr();
return std::visit([](const auto& cm) -> const ChunkManager* { return &cm; }, *_cm);
}
/**
* Returns a pointer to ChunkManager if that's what's stored, nullptr otherwise.
* Use this only when you specifically need the current (non-point-in-time) ChunkManager.
*/
const CurrentChunkManager* getCurrentChunkManager() const {
tassert(9014101, "Expected a routing table to be initialized", hasRoutingTable());
return std::get_if<CurrentChunkManager>(&*_cm);
}
/**
* Returns a pointer to PointInTimeChunkManager if that's what's stored, nullptr otherwise.
* Use this only when you specifically need the point-in-time ChunkManager.
*/
const PointInTimeChunkManager* getPointInTimeChunkManager() const {
tassert(9014102, "Expected a routing table to be initialized", hasRoutingTable());
return std::get_if<PointInTimeChunkManager>(&*_cm);
}
/**
* Returns true if this metadata holds a point-in-time chunk manager.
*/
bool isAtPointInTime() const {
return _cm && std::holds_alternative<PointInTimeChunkManager>(*_cm);
}
/**
@ -239,7 +268,7 @@ public:
*/
bool keyBelongsToMe(const BSONObj& key) const {
tassert(10016208, "Expected a routing table to be initialized", hasRoutingTable());
return _cm->keyBelongsToShard(key, _thisShardId);
return getChunkManagerBase().keyBelongsToShard(key, _thisShardId);
}
/**
@ -250,7 +279,7 @@ public:
ChunkManager::ChunkOwnership nearestOwnedChunk(const BSONObj& key,
ChunkMap::Direction direction) const {
tassert(9526301, "Expected a routing table to be initialized", hasRoutingTable());
return _cm->nearestOwnedChunk(key, _thisShardId, direction);
return getChunkManagerBase().nearestOwnedChunk(key, _thisShardId, direction);
}
/**
@ -266,7 +295,7 @@ public:
*/
bool rangeOverlapsChunk(const ChunkRange& range) const {
tassert(10016209, "Expected a routing table to be initialized", hasRoutingTable());
return _cm->rangeOverlapsShard(range, _thisShardId);
return getChunkManagerBase().rangeOverlapsShard(range, _thisShardId);
}
/**
@ -307,22 +336,34 @@ public:
const boost::optional<TypeCollectionReshardingFields>& getReshardingFields() const {
tassert(10016210, "Expected a routing table to be initialized", hasRoutingTable());
return _cm->getReshardingFields();
return getChunkManagerBase().getReshardingFields();
}
const boost::optional<TypeCollectionTimeseriesFields>& getTimeseriesFields() const {
tassert(10016211, "Expected a routing table to be initialized", hasRoutingTable());
return _cm->getTimeseriesFields();
return getChunkManagerBase().getTimeseriesFields();
}
bool isUniqueShardKey() const {
tassert(10016212, "Expected a routing table to be initialized", hasRoutingTable());
return _cm->isUnique();
return getChunkManagerBase().isUnique();
}
private:
/**
* Helper to access the ChunkManager interface regardless of whether _cm holds
* a ChunkManager or PointInTimeChunkManager.
*/
const ChunkManager& getChunkManagerBase() const {
tassert(9014100, "Expected _cm to be initialized", _cm.has_value());
return std::visit([](const auto& cm) -> const ChunkManager& { return cm; }, *_cm);
}
// Type alias for the variant holding either ChunkManager or PointInTimeChunkManager
using ChunkManagerVariant = std::variant<CurrentChunkManager, PointInTimeChunkManager>;
// The full routing table for the collection or boost::none if the collection is not tracked
boost::optional<ChunkManager> _cm;
boost::optional<ChunkManagerVariant> _cm;
// The identity of this shard, for the purpose of answering "key belongs to me" queries. If the
// collection is not tracked (_cm is boost::none), then this value will be empty.

View File

@ -148,7 +148,7 @@ protected:
return std::vector<ChunkType>{chunk1, chunk2, chunk3, chunk4};
}());
ChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none);
CurrentChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)));
ASSERT_EQ(4, cm.numChunks());
{

View File

@ -103,22 +103,27 @@ CollectionMetadata makeTrackedCollectionMetadataImpl(
chunk.setHistory({ChunkHistory(*chunk.getOnCurrentShardSince(), chunk.getShard())});
}
return CollectionMetadata(
ChunkManager(ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(
RoutingTableHistory::makeNew(kNss,
uuid,
shardKeyPattern,
false, /* unsplittable */
nullptr,
false,
epoch,
timestamp,
boost::none /* timeseriesFields */,
std::move(reshardingFields),
true,
allChunks)),
kChunkManager),
kThisShard);
auto routingTableHistory = ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(
RoutingTableHistory::makeNew(kNss,
uuid,
shardKeyPattern,
false, /* unsplittable */
nullptr,
false,
epoch,
timestamp,
boost::none /* timeseriesFields */,
std::move(reshardingFields),
true,
allChunks));
if (kChunkManager) {
return CollectionMetadata(
PointInTimeChunkManager(std::move(routingTableHistory), kChunkManager.get()),
kThisShard);
} else {
return CollectionMetadata(CurrentChunkManager(std::move(routingTableHistory)), kThisShard);
}
}

View File

@ -116,21 +116,20 @@ public:
auto range = ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY));
auto chunk = ChunkType(
uuid, std::move(range), ChunkVersion({epoch, timestamp}, {1, 0}), chunkShardId);
ChunkManager cm(makeStandaloneRoutingTableHistory(
RoutingTableHistory::makeNew(kTestNss,
uuid,
kShardKeyPattern,
false, /* unsplittable */
nullptr,
false,
epoch,
timestamp,
boost::none /* timeseriesFields */,
boost::none /* reshardingFields */,
CurrentChunkManager cm(makeStandaloneRoutingTableHistory(
RoutingTableHistory::makeNew(kTestNss,
uuid,
kShardKeyPattern,
false, /* unsplittable */
nullptr,
false,
epoch,
timestamp,
boost::none /* timeseriesFields */,
boost::none /* reshardingFields */,
true,
{std::move(chunk)})),
boost::none);
true,
{std::move(chunk)})));
return CollectionMetadata(std::move(cm), collectionShardId);
}

View File

@ -151,9 +151,10 @@ std::shared_ptr<ScopedCollectionDescription::Impl> MetadataManager::getActiveMet
}
}
return std::make_shared<MetadataAtTimestamp>(CollectionMetadata(
ChunkManager::makeAtTime(*activeMetadata->getChunkManager(), atClusterTime->asTimestamp()),
activeMetadata->shardId()));
return std::make_shared<MetadataAtTimestamp>(
CollectionMetadata(PointInTimeChunkManager::make(*activeMetadata->getChunkManager(),
atClusterTime->asTimestamp()),
activeMetadata->shardId()));
}
boost::optional<UUID> MetadataManager::getCollectionUuid() const {

View File

@ -109,8 +109,7 @@ protected:
{ChunkType{uuid, range, ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}), kOtherShard}});
return CollectionMetadata(
ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none),
kThisShard);
CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))), kThisShard);
}
/**
@ -163,8 +162,7 @@ protected:
splitChunks);
return CollectionMetadata(
ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none),
kThisShard);
CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))), kThisShard);
}
static CollectionMetadata cloneMetadataMinusChunk(const CollectionMetadata& metadata,
@ -190,8 +188,7 @@ protected:
{ChunkType(metadata.getUUID(), ChunkRange(minKey, maxKey), chunkVersion, kOtherShard)});
return CollectionMetadata(
ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none),
kThisShard);
CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))), kThisShard);
}
std::shared_ptr<MetadataManager> _manager;

View File

@ -141,7 +141,8 @@ protected:
{std::move(chunk)});
return CollectionMetadata(
ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), Timestamp(100, 0)),
PointInTimeChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)),
Timestamp(100, 0)),
ShardId("this"));
}

View File

@ -234,8 +234,7 @@ void ShardRoleTest::installShardedCollectionMetadata(
RoutingTableHistoryValueHandle(std::make_shared<RoutingTableHistory>(std::move(rt)),
ComparableChunkVersion::makeComparableChunkVersion(version));
const auto collectionMetadata =
CollectionMetadata(ChunkManager(rtHandle, boost::none), kMyShardName);
const auto collectionMetadata = CollectionMetadata(CurrentChunkManager(rtHandle), kMyShardName);
AutoGetCollection coll(opCtx, nss, MODE_IX);
CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(opCtx, nss)

View File

@ -98,7 +98,7 @@ public:
std::set<ShardId> candidateShardIds;
if (cri.hasRoutingTable()) {
cri.getChunkManager().getAllShardIds(&candidateShardIds);
cri.getCurrentChunkManager().getAllShardIds(&candidateShardIds);
} else {
candidateShardIds.insert(primaryShardId);
}

View File

@ -160,7 +160,7 @@ public:
});
}
ChunkManager createChunkManager(const UUID& uuid, const NamespaceString& nss) {
CurrentChunkManager createChunkManager(const UUID& uuid, const NamespaceString& nss) {
ShardKeyPattern sk{fromjson("{x: 1, _id: 1}")};
std::deque<DocumentSource::GetNextResult> configData{
Document(fromjson("{_id: {x: {$minKey: 1}, _id: {$minKey: 1}}, max: {x: 0.0, _id: "
@ -193,9 +193,8 @@ public:
false,
chunks);
return ChunkManager(
ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rt)),
boost::none);
return CurrentChunkManager(
ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rt)));
}
protected:

View File

@ -587,7 +587,12 @@ void getShardIdsAndChunksForCanonicalQuery(const CanonicalQuery& query,
// entries if a shard no longer owns chunks when it used to at _clusterTime. Similarly,
// this optimization does not apply when it's necessary to fill chunkRanges, as the last
// chunks can be lost.
if (!cm.isAtPointInTime() && shardIds->size() == cm.getNShardsOwningChunks() && !info) {
//
// Uses getAproxNShardsOwningChunks() as getNShardsOwningChunks() is only available on
// CurrentChunkManager, but both currently share the same implementation.
// TODO SERVER-114823 Review the usage of getAproxNShardsOwningChunks here.
if (!cm.isAtPointInTime() && shardIds->size() == cm.getAproxNShardsOwningChunks() &&
!info) {
break;
}
}

View File

@ -110,7 +110,7 @@ struct WriteOpAnalyzerTestImpl : public ShardingTestFixture {
const NamespaceString kUnsplittableNss =
NamespaceString::createNamespaceString_forTest("test", "unsplittable");
ChunkManager createChunkManager(
CurrentChunkManager createChunkManager(
const UUID& uuid,
const NamespaceString& nss,
boost::optional<TypeCollectionTimeseriesFields> timeseriesFields = boost::none,
@ -152,9 +152,8 @@ struct WriteOpAnalyzerTestImpl : public ShardingTestFixture {
false,
chunks);
return ChunkManager(
ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rt)),
boost::none);
return CurrentChunkManager(
ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rt)));
}
std::unique_ptr<RoutingContext> createRoutingContextSharded(