mirror of https://github.com/mongodb/mongo
SERVER-90141 Improve ChunkManager API to prevent unsafe method calls on point-in-time instances (#44238)
GitOrigin-RevId: 256d62978cc39e7836d01653e459d88f8ad48eaf
This commit is contained in:
parent
0835ee99cf
commit
e62f34cd30
|
|
@ -176,8 +176,7 @@ void installShardedCollectionMetadata(OperationContext* opCtx,
|
||||||
RoutingTableHistoryValueHandle(std::make_shared<RoutingTableHistory>(std::move(rt)),
|
RoutingTableHistoryValueHandle(std::make_shared<RoutingTableHistory>(std::move(rt)),
|
||||||
ComparableChunkVersion::makeComparableChunkVersion(version));
|
ComparableChunkVersion::makeComparableChunkVersion(version));
|
||||||
|
|
||||||
const auto collectionMetadata =
|
const auto collectionMetadata = CollectionMetadata(CurrentChunkManager(rtHandle), thisShardId);
|
||||||
CollectionMetadata(ChunkManager(rtHandle, boost::none), thisShardId);
|
|
||||||
|
|
||||||
AutoGetCollection coll(opCtx, nss, MODE_IX);
|
AutoGetCollection coll(opCtx, nss, MODE_IX);
|
||||||
CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(opCtx, nss)
|
CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(opCtx, nss)
|
||||||
|
|
|
||||||
|
|
@ -485,17 +485,26 @@ bool AggExState::canReadUnderlyingCollectionLocally(const CollectionRoutingInfo&
|
||||||
const auto myShardId = ShardingState::get(_opCtx)->shardId();
|
const auto myShardId = ShardingState::get(_opCtx)->shardId();
|
||||||
const auto atClusterTime = repl::ReadConcernArgs::get(_opCtx).getArgsAtClusterTime();
|
const auto atClusterTime = repl::ReadConcernArgs::get(_opCtx).getArgsAtClusterTime();
|
||||||
|
|
||||||
const auto chunkManagerMaybeAtClusterTime = atClusterTime
|
auto isNssLocalFunc = [&](const auto& cm) {
|
||||||
? ChunkManager::makeAtTime(cri.getChunkManager(), atClusterTime->asTimestamp())
|
if (cm.isSharded()) {
|
||||||
: cri.getChunkManager();
|
|
||||||
|
|
||||||
if (chunkManagerMaybeAtClusterTime.isSharded()) {
|
|
||||||
return false;
|
return false;
|
||||||
} else if (chunkManagerMaybeAtClusterTime.isUnsplittable()) {
|
} else if (cm.isUnsplittable()) {
|
||||||
return chunkManagerMaybeAtClusterTime.getMinKeyShardIdWithSimpleCollation() == myShardId;
|
return cm.getMinKeyShardIdWithSimpleCollation() == myShardId;
|
||||||
} else {
|
} else {
|
||||||
return cri.getDbPrimaryShardId() == myShardId;
|
return cri.getDbPrimaryShardId() == myShardId;
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
bool isNssLocal;
|
||||||
|
if (atClusterTime) {
|
||||||
|
auto pitChunkManager =
|
||||||
|
PointInTimeChunkManager::make(cri.getChunkManager(), atClusterTime->asTimestamp());
|
||||||
|
isNssLocal = isNssLocalFunc(pitChunkManager);
|
||||||
|
} else {
|
||||||
|
isNssLocal = isNssLocalFunc(cri.getChunkManager());
|
||||||
|
}
|
||||||
|
|
||||||
|
return isNssLocal;
|
||||||
}
|
}
|
||||||
|
|
||||||
Status AggExState::collatorCompatibleWithPipeline(const CollatorInterface* collator) const {
|
Status AggExState::collatorCompatibleWithPipeline(const CollatorInterface* collator) const {
|
||||||
|
|
|
||||||
|
|
@ -79,7 +79,7 @@ protected:
|
||||||
|
|
||||||
CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(opCtx, nss)
|
CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(opCtx, nss)
|
||||||
->setFilteringMetadata(opCtx, CollectionMetadata::UNTRACKED());
|
->setFilteringMetadata(opCtx, CollectionMetadata::UNTRACKED());
|
||||||
auto cm = ChunkManager(RoutingTableHistoryValueHandle{OptionalRoutingTableHistory{}},
|
PointInTimeChunkManager cm(RoutingTableHistoryValueHandle{OptionalRoutingTableHistory{}},
|
||||||
_dbVersion.getTimestamp());
|
_dbVersion.getTimestamp());
|
||||||
getCatalogCacheMock()->setCollectionReturnValue(
|
getCatalogCacheMock()->setCollectionReturnValue(
|
||||||
nss,
|
nss,
|
||||||
|
|
@ -132,7 +132,7 @@ protected:
|
||||||
std::make_shared<RoutingTableHistory>(std::move(rt)),
|
std::make_shared<RoutingTableHistory>(std::move(rt)),
|
||||||
ComparableChunkVersion::makeComparableChunkVersion(version));
|
ComparableChunkVersion::makeComparableChunkVersion(version));
|
||||||
|
|
||||||
auto cm = ChunkManager(rtHandle, boost::none);
|
CurrentChunkManager cm(rtHandle);
|
||||||
const auto collectionMetadata = CollectionMetadata(cm, shardName);
|
const auto collectionMetadata = CollectionMetadata(cm, shardName);
|
||||||
|
|
||||||
AutoGetCollection coll(opCtx, NamespaceStringOrUUID(nss), MODE_IX);
|
AutoGetCollection coll(opCtx, NamespaceStringOrUUID(nss), MODE_IX);
|
||||||
|
|
|
||||||
|
|
@ -146,7 +146,7 @@ CollectionMetadata QueryShardServerTestFixture::prepareTestData(
|
||||||
true,
|
true,
|
||||||
_chunks);
|
_chunks);
|
||||||
|
|
||||||
ChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none);
|
CurrentChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)));
|
||||||
ASSERT_EQ(_chunks.size(), cm.numChunks());
|
ASSERT_EQ(_chunks.size(), cm.numChunks());
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -826,7 +826,7 @@ void ChunkManager::getShardIdsForRange(const BSONObj& min,
|
||||||
// owns chunks when it used to at _clusterTime.
|
// owns chunks when it used to at _clusterTime.
|
||||||
if (!_clusterTime && ChunkMap::allElementsAreOfType(BSONType::minKey, min) &&
|
if (!_clusterTime && ChunkMap::allElementsAreOfType(BSONType::minKey, min) &&
|
||||||
ChunkMap::allElementsAreOfType(BSONType::maxKey, max)) {
|
ChunkMap::allElementsAreOfType(BSONType::maxKey, max)) {
|
||||||
getAllShardIds(shardIds);
|
_rt->optRt->getAllShardIds(shardIds);
|
||||||
if (chunkRanges) {
|
if (chunkRanges) {
|
||||||
getAllChunkRanges(chunkRanges);
|
getAllChunkRanges(chunkRanges);
|
||||||
}
|
}
|
||||||
|
|
@ -871,7 +871,7 @@ bool ChunkManager::rangeOverlapsShard(const ChunkRange& range, const ShardId& sh
|
||||||
return overlapFound;
|
return overlapFound;
|
||||||
}
|
}
|
||||||
|
|
||||||
boost::optional<Chunk> ChunkManager::getNextChunkOnShard(const BSONObj& shardKey,
|
boost::optional<Chunk> CurrentChunkManager::getNextChunkOnShard(const BSONObj& shardKey,
|
||||||
const ShardId& shardId) const {
|
const ShardId& shardId) const {
|
||||||
tassert(7626422, "Expected routing table to be initialized", _rt->optRt);
|
tassert(7626422, "Expected routing table to be initialized", _rt->optRt);
|
||||||
tassert(8719704,
|
tassert(8719704,
|
||||||
|
|
@ -916,10 +916,6 @@ void RoutingTableHistory::getAllChunkRanges(std::set<ChunkRange>* all) const {
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
ChunkManager ChunkManager::makeAtTime(const ChunkManager& cm, Timestamp clusterTime) {
|
|
||||||
return ChunkManager(cm._rt, clusterTime);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool ChunkManager::allowMigrations() const {
|
bool ChunkManager::allowMigrations() const {
|
||||||
if (!_rt->optRt)
|
if (!_rt->optRt)
|
||||||
return true;
|
return true;
|
||||||
|
|
|
||||||
|
|
@ -67,7 +67,7 @@
|
||||||
|
|
||||||
namespace mongo {
|
namespace mongo {
|
||||||
|
|
||||||
class ChunkManager;
|
class CurrentChunkManager;
|
||||||
|
|
||||||
struct MONGO_MOD_NEEDS_REPLACEMENT PlacementVersionTargetingInfo {
|
struct MONGO_MOD_NEEDS_REPLACEMENT PlacementVersionTargetingInfo {
|
||||||
/**
|
/**
|
||||||
|
|
@ -705,12 +705,59 @@ struct MONGO_MOD_NEEDS_REPLACEMENT EndpointComp {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Wrapper around a RoutingTableHistory, which pins it to a particular point in time.
|
* Wrapper around a RoutingTableHistory, which pins it to a particular point in time.
|
||||||
|
*
|
||||||
|
* The ChunkManager class hierarchy represents routing information for MongoDB sharded collections.
|
||||||
|
* This implementation uses a non-virtual inheritance approach where state is stored in
|
||||||
|
* the base class and behavior is differentiated through derived class method availability.
|
||||||
|
* ChunkManager (Base Class - Stores ALL state)
|
||||||
|
* Derived classes:
|
||||||
|
* 1. CurrentChunkManager(_clusterTime: none)
|
||||||
|
* Additional Methods:
|
||||||
|
* - getAllShardIds()
|
||||||
|
* - getNShardsOwningChunks()
|
||||||
|
* - getNextChunkOnShard()
|
||||||
|
* 2. PointInTimeChunkManager(_clusterTime: Timestamp)
|
||||||
|
*
|
||||||
|
* CRITICAL DESIGN CONSTRAINT: ALL STATE MUST BE AT ChunkManager LEVEL
|
||||||
|
*
|
||||||
|
* The _clusterTime field MUST remain in the base ChunkManager class because both
|
||||||
|
* CurrentChunkManager and PointInTimeChunkManager share the same RoutingTableHistory.
|
||||||
|
*
|
||||||
|
* The routing table itself doesn't change based on point-in-time vs current semantics.
|
||||||
|
* Only the _clusterTime affects how operations interpret that shared routing table:
|
||||||
|
* - CurrentChunkManager uses the latest cluster time
|
||||||
|
* - PointInTimeChunkManager uses a specific historical cluster time
|
||||||
|
|
||||||
|
* Usage:
|
||||||
|
* 1. CurrentChunkManager (Latest Routing State)
|
||||||
|
* ------------------------------------------
|
||||||
|
* Represents the most up-to-date routing information from the catalog cache.
|
||||||
|
*
|
||||||
|
* Characteristics:
|
||||||
|
* - _clusterTime is boost::none (no specific point in time)
|
||||||
|
* - Provides access to current cluster topology
|
||||||
|
* - Supports additional operations that query current state
|
||||||
|
* - Used for normal CRUD operations and administrative commands
|
||||||
|
*
|
||||||
|
* Exclusive Methods (NOT available on PointInTimeChunkManager):
|
||||||
|
* - getAllShardIds(): Get exact current set of shards owning chunks
|
||||||
|
* - getNShardsOwningChunks(): Get exact current count of shards
|
||||||
|
* - getNextChunkOnShard(): Find next chunk on shard (for migrations)
|
||||||
|
*
|
||||||
|
* 2. PointInTimeChunkManager (Historical Routing State)
|
||||||
|
* ---------------------------------------------------
|
||||||
|
* Represents routing information as it existed at a specific cluster timestamp.
|
||||||
|
*
|
||||||
|
* Characteristics:
|
||||||
|
* - _clusterTime contains a specific Timestamp
|
||||||
|
* - Provides snapshot-consistent view of routing
|
||||||
|
* - Respects atClusterTime for all chunk operations
|
||||||
|
* - Used for snapshot reads and multi-document transactions
|
||||||
*/
|
*/
|
||||||
class MONGO_MOD_NEEDS_REPLACEMENT ChunkManager {
|
class MONGO_MOD_NEEDS_REPLACEMENT ChunkManager {
|
||||||
public:
|
friend class PointInTimeChunkManager;
|
||||||
ChunkManager(RoutingTableHistoryValueHandle rt, boost::optional<Timestamp> clusterTime)
|
|
||||||
: _rt(std::move(rt)), _clusterTime(std::move(clusterTime)) {}
|
|
||||||
|
|
||||||
|
public:
|
||||||
// Methods supported on both sharded and unsharded collections
|
// Methods supported on both sharded and unsharded collections
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -863,15 +910,6 @@ public:
|
||||||
*/
|
*/
|
||||||
bool rangeOverlapsShard(const ChunkRange& range, const ShardId& shardId) const;
|
bool rangeOverlapsShard(const ChunkRange& range, const ShardId& shardId) const;
|
||||||
|
|
||||||
/**
|
|
||||||
* Given a shardKey, returns the first chunk which is owned by shardId and overlaps or sorts
|
|
||||||
* after that shardKey. If the return value is empty, this means no such chunk exists.
|
|
||||||
*
|
|
||||||
* Can only be used when this ChunkManager is not at point-in-time.
|
|
||||||
*/
|
|
||||||
boost::optional<Chunk> getNextChunkOnShard(const BSONObj& shardKey,
|
|
||||||
const ShardId& shardId) const;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Given a shard key (or a prefix) that has been extracted from a document, returns the chunk
|
* Given a shard key (or a prefix) that has been extracted from a document, returns the chunk
|
||||||
* that contains that key.
|
* that contains that key.
|
||||||
|
|
@ -914,22 +952,12 @@ public:
|
||||||
std::set<ChunkRange>* chunkRanges = nullptr,
|
std::set<ChunkRange>* chunkRanges = nullptr,
|
||||||
bool includeMaxBound = true) const;
|
bool includeMaxBound = true) const;
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the ids of all shards on which the collection has any chunks.
|
|
||||||
* Can only be used when this ChunkManager is not at point-in-time.
|
|
||||||
*/
|
|
||||||
void getAllShardIds(std::set<ShardId>* all) const {
|
|
||||||
tassert(7626409, "Expected routing table to be initialized", _rt->optRt);
|
|
||||||
tassert(8719700,
|
|
||||||
"Should never call getAllShardIds when ChunkManager is at point-in-time",
|
|
||||||
!_clusterTime);
|
|
||||||
_rt->optRt->getAllShardIds(all);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the ids of all shards on which the collection has any chunks.
|
* Returns the ids of all shards on which the collection has any chunks.
|
||||||
* Can be used when this ChunkManager is at point-in-time, but it returns the shardIds as of the
|
* Can be used when this ChunkManager is at point-in-time, but it returns the shardIds as of the
|
||||||
* latest known placement (instead of the ones at the point-in-time).
|
* latest known placement (instead of the ones at the point-in-time).
|
||||||
|
*
|
||||||
|
* TODO SERVER-114823: Remove all usages getAllShardIds_UNSAFE_NotPointInTime
|
||||||
*/
|
*/
|
||||||
void getAllShardIds_UNSAFE_NotPointInTime(std::set<ShardId>* all) const {
|
void getAllShardIds_UNSAFE_NotPointInTime(std::set<ShardId>* all) const {
|
||||||
tassert(8719701, "Expected routing table to be initialized", _rt->optRt);
|
tassert(8719701, "Expected routing table to be initialized", _rt->optRt);
|
||||||
|
|
@ -944,36 +972,20 @@ public:
|
||||||
_rt->optRt->getAllChunkRanges(all);
|
_rt->optRt->getAllChunkRanges(all);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns the number of shards on which the collection has any chunks.
|
|
||||||
* Can only be used when this ChunkManager is not at point-in-time.
|
|
||||||
*/
|
|
||||||
size_t getNShardsOwningChunks() const {
|
|
||||||
tassert(8719702, "Expected routing table to be initialized", _rt->optRt);
|
|
||||||
tassert(8719703,
|
|
||||||
"Should never call getNShardsOwningChunks when ChunkManager is at point-in-time",
|
|
||||||
!_clusterTime);
|
|
||||||
return _rt->optRt->getNShardsOwningChunks();
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns the approximate number of shards on which the collection has any chunks.
|
* Returns the approximate number of shards on which the collection has any chunks.
|
||||||
*
|
*
|
||||||
* To be only used for logging/metrics which do not need to be always correct. The returned
|
* To be only used for logging/metrics which do not need to be always correct. The returned
|
||||||
* value may be incorrect when this ChunkManager is at point-in-time (it will reflect the
|
* value may be incorrect when this ChunkManager is at point-in-time (it will reflect the
|
||||||
* 'latest' number of shards, rather than the one at the point-in-time).
|
* 'latest' number of shards, rather than the one at the point-in-time).
|
||||||
|
*
|
||||||
|
* TODO SERVER-114823: Remove all usages getAproxNShardsOwningChunks
|
||||||
*/
|
*/
|
||||||
size_t getAproxNShardsOwningChunks() const {
|
size_t getAproxNShardsOwningChunks() const {
|
||||||
tassert(7626411, "Expected routing table to be initialized", _rt->optRt);
|
tassert(7626411, "Expected routing table to be initialized", _rt->optRt);
|
||||||
return _rt->optRt->getNShardsOwningChunks();
|
return _rt->optRt->getNShardsOwningChunks();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructs a new ChunkManager, which is a view of the underlying routing table at a different
|
|
||||||
* `clusterTime`.
|
|
||||||
*/
|
|
||||||
static ChunkManager makeAtTime(const ChunkManager& cm, Timestamp clusterTime);
|
|
||||||
|
|
||||||
bool uuidMatches(const UUID& uuid) const {
|
bool uuidMatches(const UUID& uuid) const {
|
||||||
tassert(7626412, "Expected routing table to be initialized", _rt->optRt);
|
tassert(7626412, "Expected routing table to be initialized", _rt->optRt);
|
||||||
return _rt->optRt->uuidMatches(uuid);
|
return _rt->optRt->uuidMatches(uuid);
|
||||||
|
|
@ -1014,15 +1026,66 @@ public:
|
||||||
return _rt->optRt->isNewTimeseriesWithoutView();
|
return _rt->optRt->isNewTimeseriesWithoutView();
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
protected:
|
||||||
RoutingTableHistoryValueHandle _rt;
|
ChunkManager(RoutingTableHistoryValueHandle rt, boost::optional<Timestamp> clusterTime)
|
||||||
|
: _rt(std::move(rt)), _clusterTime(std::move(clusterTime)) {}
|
||||||
|
|
||||||
|
RoutingTableHistoryValueHandle _rt;
|
||||||
boost::optional<Timestamp> _clusterTime;
|
boost::optional<Timestamp> _clusterTime;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
class MONGO_MOD_NEEDS_REPLACEMENT CurrentChunkManager : public ChunkManager {
|
||||||
|
public:
|
||||||
|
explicit CurrentChunkManager(RoutingTableHistoryValueHandle rt)
|
||||||
|
: ChunkManager(std::move(rt), boost::none) {}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If `max` is the max bound of some chunk, returns that chunk. Otherwise, returns the chunk that
|
* Given a shardKey, returns the first chunk which is owned by shardId and overlaps or sorts
|
||||||
* contains the key `max`.
|
* after that shardKey. If the return value is empty, this means no such chunk exists.
|
||||||
|
*
|
||||||
|
* Can only be used when this ChunkManager is not at point-in-time.
|
||||||
|
*/
|
||||||
|
boost::optional<Chunk> getNextChunkOnShard(const BSONObj& shardKey,
|
||||||
|
const ShardId& shardId) const;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the ids of all shards on which the collection has any chunks.
|
||||||
|
* Can only be used when this ChunkManager is not at point-in-time.
|
||||||
|
*/
|
||||||
|
void getAllShardIds(std::set<ShardId>* all) const {
|
||||||
|
tassert(7626409, "Expected routing table to be initialized", _rt->optRt);
|
||||||
|
tassert(8719700,
|
||||||
|
"Should never call getAllShardIds when ChunkManager is at point-in-time",
|
||||||
|
!_clusterTime);
|
||||||
|
_rt->optRt->getAllShardIds(all);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns the number of shards on which the collection has any chunks.
|
||||||
|
* Can only be used when this ChunkManager is not at point-in-time.
|
||||||
|
*/
|
||||||
|
size_t getNShardsOwningChunks() const {
|
||||||
|
tassert(8719702, "Expected routing table to be initialized", _rt->optRt);
|
||||||
|
tassert(8719703,
|
||||||
|
"Should never call getNShardsOwningChunks when ChunkManager is at point-in-time",
|
||||||
|
!_clusterTime);
|
||||||
|
return _rt->optRt->getNShardsOwningChunks();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
class MONGO_MOD_NEEDS_REPLACEMENT PointInTimeChunkManager : public ChunkManager {
|
||||||
|
public:
|
||||||
|
PointInTimeChunkManager(RoutingTableHistoryValueHandle rt, Timestamp clusterTime)
|
||||||
|
: ChunkManager(std::move(rt), clusterTime) {}
|
||||||
|
|
||||||
|
static PointInTimeChunkManager make(const ChunkManager& cm, Timestamp clusterTime) {
|
||||||
|
return PointInTimeChunkManager(cm._rt, clusterTime);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* If `max` is the max bound of some chunk, returns that chunk.
|
||||||
|
* Otherwise, returns the chunk that contains the key `max`.
|
||||||
*/
|
*/
|
||||||
MONGO_MOD_NEEDS_REPLACEMENT Chunk getChunkForMaxBound(const ChunkManager& cm, const BSONObj& max);
|
MONGO_MOD_NEEDS_REPLACEMENT Chunk getChunkForMaxBound(const ChunkManager& cm, const BSONObj& max);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -518,7 +518,7 @@ TEST_F(ChunkManagerQueryTest, SnapshotQueryWithMoreShardsThanLatestMetadata) {
|
||||||
chunk1.setHistory({ChunkHistory(*chunk1.getOnCurrentShardSince(), ShardId("0")),
|
chunk1.setHistory({ChunkHistory(*chunk1.getOnCurrentShardSince(), ShardId("0")),
|
||||||
ChunkHistory(Timestamp(1, 0), ShardId("1"))});
|
ChunkHistory(Timestamp(1, 0), ShardId("1"))});
|
||||||
|
|
||||||
ChunkManager chunkManager(makeStandaloneRoutingTableHistory(
|
PointInTimeChunkManager cm(makeStandaloneRoutingTableHistory(
|
||||||
oldRoutingTable.makeUpdated(boost::none /* timeseriesFields */,
|
oldRoutingTable.makeUpdated(boost::none /* timeseriesFields */,
|
||||||
boost::none /* reshardingFields */,
|
boost::none /* reshardingFields */,
|
||||||
true,
|
true,
|
||||||
|
|
@ -527,12 +527,12 @@ TEST_F(ChunkManagerQueryTest, SnapshotQueryWithMoreShardsThanLatestMetadata) {
|
||||||
Timestamp(5, 0));
|
Timestamp(5, 0));
|
||||||
|
|
||||||
std::set<ShardId> shardIds;
|
std::set<ShardId> shardIds;
|
||||||
chunkManager.getShardIdsForRange(BSON("x" << MINKEY), BSON("x" << MAXKEY), &shardIds);
|
cm.getShardIdsForRange(BSON("x" << MINKEY), BSON("x" << MAXKEY), &shardIds);
|
||||||
ASSERT_EQ(2, shardIds.size());
|
ASSERT_EQ(2, shardIds.size());
|
||||||
|
|
||||||
const auto expCtx = make_intrusive<ExpressionContextForTest>();
|
const auto expCtx = make_intrusive<ExpressionContextForTest>();
|
||||||
shardIds.clear();
|
shardIds.clear();
|
||||||
getShardIdsForQuery(expCtx, BSON("x" << BSON("$gt" << -20)), {}, chunkManager, &shardIds);
|
getShardIdsForQuery(expCtx, BSON("x" << BSON("$gt" << -20)), {}, cm, &shardIds);
|
||||||
ASSERT_EQ(2, shardIds.size());
|
ASSERT_EQ(2, shardIds.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -573,7 +573,7 @@ TEST_F(ChunkManagerQueryTest, TestKeyBelongsToShard) {
|
||||||
boost::none /* reshardingFields */,
|
boost::none /* reshardingFields */,
|
||||||
true,
|
true,
|
||||||
chunkVec);
|
chunkVec);
|
||||||
ChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)), clusterTime);
|
PointInTimeChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)), clusterTime);
|
||||||
|
|
||||||
auto chunkIt = chunks.begin();
|
auto chunkIt = chunks.begin();
|
||||||
while (chunkIt != chunks.end()) {
|
while (chunkIt != chunks.end()) {
|
||||||
|
|
|
||||||
|
|
@ -125,8 +125,8 @@ CollectionMetadata makeChunkManagerWithShardSelector(int nShards,
|
||||||
boost::none /* reshardingFields */,
|
boost::none /* reshardingFields */,
|
||||||
true,
|
true,
|
||||||
chunks);
|
chunks);
|
||||||
return CollectionMetadata(
|
return CollectionMetadata(CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))),
|
||||||
ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none), getShardId(0));
|
getShardId(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
ShardId pessimalShardSelector(int i, int nShards, int nChunks) {
|
ShardId pessimalShardSelector(int i, int nShards, int nChunks) {
|
||||||
|
|
@ -157,8 +157,8 @@ MONGO_COMPILER_NOINLINE auto runIncrementalUpdate(const CollectionMetadata& cm,
|
||||||
true /* allowMigration */,
|
true /* allowMigration */,
|
||||||
false /* unsplittable */,
|
false /* unsplittable */,
|
||||||
newChunks);
|
newChunks);
|
||||||
return CollectionMetadata(
|
return CollectionMetadata(CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))),
|
||||||
ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none), getShardId(0));
|
getShardId(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
@ -315,8 +315,7 @@ auto BM_FullBuildOfChunkManager(benchmark::State& state, ShardSelectorFn selectS
|
||||||
true,
|
true,
|
||||||
chunks);
|
chunks);
|
||||||
benchmark::DoNotOptimize(CollectionMetadata(
|
benchmark::DoNotOptimize(CollectionMetadata(
|
||||||
ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none),
|
CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))), getShardId(0)));
|
||||||
getShardId(0)));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -741,7 +741,7 @@ void checkLocalCatalogCollectionOptions(OperationContext* opCtx,
|
||||||
void checkShardingCatalogCollectionOptions(OperationContext* opCtx,
|
void checkShardingCatalogCollectionOptions(OperationContext* opCtx,
|
||||||
const NamespaceString& targetNss,
|
const NamespaceString& targetNss,
|
||||||
const ShardsvrCreateCollectionRequest& request,
|
const ShardsvrCreateCollectionRequest& request,
|
||||||
const ChunkManager& cm) {
|
const CurrentChunkManager& cm) {
|
||||||
if (request.getRegisterExistingCollectionInGlobalCatalog()) {
|
if (request.getRegisterExistingCollectionInGlobalCatalog()) {
|
||||||
// No need for checking the sharding catalog when tracking a collection for the first time
|
// No need for checking the sharding catalog when tracking a collection for the first time
|
||||||
return;
|
return;
|
||||||
|
|
|
||||||
|
|
@ -85,7 +85,7 @@ void DropIndexesCoordinator::_dropIndexes(OperationContext* opCtx,
|
||||||
router.route(
|
router.route(
|
||||||
"DropIndexesCoordinator::_dropIndexesPhase",
|
"DropIndexesCoordinator::_dropIndexesPhase",
|
||||||
[&](OperationContext* opCtx, const CollectionRoutingInfo& cri) {
|
[&](OperationContext* opCtx, const CollectionRoutingInfo& cri) {
|
||||||
const auto chunkManager = cri.getChunkManager();
|
const auto chunkManager = cri.getCurrentChunkManager();
|
||||||
std::map<ShardId, ShardVersion> shardIdsToShardVersions;
|
std::map<ShardId, ShardVersion> shardIdsToShardVersions;
|
||||||
|
|
||||||
if (chunkManager.hasRoutingTable()) {
|
if (chunkManager.hasRoutingTable()) {
|
||||||
|
|
|
||||||
|
|
@ -880,8 +880,7 @@ TEST_F(MetadataConsistencyTest, ShardTrackedCollectionInconsistencyTest) {
|
||||||
std::make_shared<RoutingTableHistory>(std::move(rt)),
|
std::make_shared<RoutingTableHistory>(std::move(rt)),
|
||||||
ComparableChunkVersion::makeComparableChunkVersion(version));
|
ComparableChunkVersion::makeComparableChunkVersion(version));
|
||||||
|
|
||||||
const auto collectionMetadata =
|
const auto collectionMetadata = CollectionMetadata(CurrentChunkManager(rtHandle), _shardId);
|
||||||
CollectionMetadata(ChunkManager(rtHandle, boost::none), _shardId);
|
|
||||||
|
|
||||||
auto scopedCSR = CollectionShardingRuntime::acquireExclusive(opCtx, _nss);
|
auto scopedCSR = CollectionShardingRuntime::acquireExclusive(opCtx, _nss);
|
||||||
scopedCSR->setFilteringMetadata(opCtx, collectionMetadata);
|
scopedCSR->setFilteringMetadata(opCtx, collectionMetadata);
|
||||||
|
|
|
||||||
|
|
@ -5516,7 +5516,8 @@ public:
|
||||||
getCatalogCacheMock()->setCollectionReturnValue(
|
getCatalogCacheMock()->setCollectionReturnValue(
|
||||||
NamespaceString::createNamespaceString_forTest(kDBName, "outColl"),
|
NamespaceString::createNamespaceString_forTest(kDBName, "outColl"),
|
||||||
CollectionRoutingInfo{
|
CollectionRoutingInfo{
|
||||||
ChunkManager{makeStandaloneRoutingTableHistory(std::move(rt)), timestamp},
|
PointInTimeChunkManager{makeStandaloneRoutingTableHistory(std::move(rt)),
|
||||||
|
timestamp},
|
||||||
DatabaseTypeValueHandle(
|
DatabaseTypeValueHandle(
|
||||||
DatabaseType{DatabaseName::createDatabaseName_forTest(boost::none, kDBName),
|
DatabaseType{DatabaseName::createDatabaseName_forTest(boost::none, kDBName),
|
||||||
kMyShardName,
|
kMyShardName,
|
||||||
|
|
@ -5561,7 +5562,8 @@ TEST_F(PipelineOptimizationsShardMerger, MergeWithUntrackedCollection) {
|
||||||
getCatalogCacheMock()->setCollectionReturnValue(
|
getCatalogCacheMock()->setCollectionReturnValue(
|
||||||
NamespaceString::createNamespaceString_forTest(kDBName, "outColl"),
|
NamespaceString::createNamespaceString_forTest(kDBName, "outColl"),
|
||||||
CollectionRoutingInfo{
|
CollectionRoutingInfo{
|
||||||
ChunkManager{RoutingTableHistoryValueHandle{OptionalRoutingTableHistory{}}, timestamp},
|
PointInTimeChunkManager{RoutingTableHistoryValueHandle{OptionalRoutingTableHistory{}},
|
||||||
|
timestamp},
|
||||||
DatabaseTypeValueHandle(
|
DatabaseTypeValueHandle(
|
||||||
DatabaseType{DatabaseName::createDatabaseName_forTest(boost::none, kDBName),
|
DatabaseType{DatabaseName::createDatabaseName_forTest(boost::none, kDBName),
|
||||||
kMyShardName,
|
kMyShardName,
|
||||||
|
|
@ -5616,7 +5618,7 @@ TEST_F(PipelineOptimizationsShardMerger, LookUpUnsplittableFromCollection) {
|
||||||
getCatalogCacheMock()->setCollectionReturnValue(
|
getCatalogCacheMock()->setCollectionReturnValue(
|
||||||
fromCollNs,
|
fromCollNs,
|
||||||
CollectionRoutingInfo{
|
CollectionRoutingInfo{
|
||||||
ChunkManager{makeStandaloneRoutingTableHistory(std::move(rt)), timestamp},
|
PointInTimeChunkManager{makeStandaloneRoutingTableHistory(std::move(rt)), timestamp},
|
||||||
DatabaseTypeValueHandle(
|
DatabaseTypeValueHandle(
|
||||||
DatabaseType{DatabaseName::createDatabaseName_forTest(boost::none, kDBName),
|
DatabaseType{DatabaseName::createDatabaseName_forTest(boost::none, kDBName),
|
||||||
kMyShardName,
|
kMyShardName,
|
||||||
|
|
@ -5653,7 +5655,7 @@ TEST_F(PipelineOptimizationsShardMerger, LookUpShardedFromCollection) {
|
||||||
getCatalogCacheMock()->setCollectionReturnValue(
|
getCatalogCacheMock()->setCollectionReturnValue(
|
||||||
fromCollNs,
|
fromCollNs,
|
||||||
CollectionRoutingInfo{
|
CollectionRoutingInfo{
|
||||||
ChunkManager{makeStandaloneRoutingTableHistory(std::move(rt)), timestamp},
|
PointInTimeChunkManager{makeStandaloneRoutingTableHistory(std::move(rt)), timestamp},
|
||||||
DatabaseTypeValueHandle(
|
DatabaseTypeValueHandle(
|
||||||
DatabaseType{DatabaseName::createDatabaseName_forTest(boost::none, kDBName),
|
DatabaseType{DatabaseName::createDatabaseName_forTest(boost::none, kDBName),
|
||||||
kMyShardName,
|
kMyShardName,
|
||||||
|
|
|
||||||
|
|
@ -181,8 +181,7 @@ void MultipleCollectionAccessorTest::installShardedCollectionMetadata(
|
||||||
RoutingTableHistoryValueHandle(std::make_shared<RoutingTableHistory>(std::move(rt)),
|
RoutingTableHistoryValueHandle(std::make_shared<RoutingTableHistory>(std::move(rt)),
|
||||||
ComparableChunkVersion::makeComparableChunkVersion(version));
|
ComparableChunkVersion::makeComparableChunkVersion(version));
|
||||||
|
|
||||||
const auto collectionMetadata =
|
const auto collectionMetadata = CollectionMetadata(CurrentChunkManager(rtHandle), kMyShardName);
|
||||||
CollectionMetadata(ChunkManager(rtHandle, boost::none), kMyShardName);
|
|
||||||
|
|
||||||
auto coll = acquireCollection(
|
auto coll = acquireCollection(
|
||||||
operationContext(),
|
operationContext(),
|
||||||
|
|
|
||||||
|
|
@ -177,8 +177,7 @@ boost::optional<TimeseriesTranslationParams> getTimeseriesTranslationParamsIfReq
|
||||||
return boost::none;
|
return boost::none;
|
||||||
}
|
}
|
||||||
|
|
||||||
const ChunkManager& chunkManager = cri.getChunkManager();
|
const auto& timeseriesFields = cri.getChunkManager().getTimeseriesFields();
|
||||||
const auto& timeseriesFields = chunkManager.getTimeseriesFields();
|
|
||||||
tassert(10601101,
|
tassert(10601101,
|
||||||
"Timeseries collections must have timeseries options",
|
"Timeseries collections must have timeseries options",
|
||||||
timeseriesFields.has_value());
|
timeseriesFields.has_value());
|
||||||
|
|
|
||||||
|
|
@ -190,7 +190,7 @@ void CollectionRoutingInfoTargeterTest::testTargetInsertWithRangePrefixHashedSha
|
||||||
* with the distinction that it simply creates and returns a ChunkManager object
|
* with the distinction that it simply creates and returns a ChunkManager object
|
||||||
* and does not assign it to the Global Catalog Cache ChunkManager.
|
* and does not assign it to the Global Catalog Cache ChunkManager.
|
||||||
*/
|
*/
|
||||||
ChunkManager makeCustomChunkManager(const ShardKeyPattern& shardKeyPattern,
|
CurrentChunkManager makeCustomChunkManager(const ShardKeyPattern& shardKeyPattern,
|
||||||
const std::vector<BSONObj>& splitPoints) {
|
const std::vector<BSONObj>& splitPoints) {
|
||||||
std::vector<ChunkType> chunks;
|
std::vector<ChunkType> chunks;
|
||||||
auto splitPointsIncludingEnds(splitPoints);
|
auto splitPointsIncludingEnds(splitPoints);
|
||||||
|
|
@ -228,9 +228,8 @@ ChunkManager makeCustomChunkManager(const ShardKeyPattern& shardKeyPattern,
|
||||||
true, // allowMigration
|
true, // allowMigration
|
||||||
chunks);
|
chunks);
|
||||||
|
|
||||||
return ChunkManager(RoutingTableHistoryValueHandle(
|
return CurrentChunkManager(RoutingTableHistoryValueHandle(
|
||||||
std::make_shared<RoutingTableHistory>(std::move(routingTableHistory))),
|
std::make_shared<RoutingTableHistory>(std::move(routingTableHistory))));
|
||||||
boost::none);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -711,7 +710,7 @@ public:
|
||||||
const auto cri = makeUnshardedCollectionRoutingInfo(kNss);
|
const auto cri = makeUnshardedCollectionRoutingInfo(kNss);
|
||||||
|
|
||||||
std::set<ShardId> shards;
|
std::set<ShardId> shards;
|
||||||
cri.getChunkManager().getAllShardIds(&shards);
|
cri.getCurrentChunkManager().getAllShardIds(&shards);
|
||||||
ASSERT_EQ(1, shards.size());
|
ASSERT_EQ(1, shards.size());
|
||||||
owningShard = *shards.begin();
|
owningShard = *shards.begin();
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -411,23 +411,26 @@ bool MultiCollectionRouter::isAnyCollectionNotLocal(
|
||||||
"Must be an entry in criMap for namespace " + nss.toStringForErrorMsg(),
|
"Must be an entry in criMap for namespace " + nss.toStringForErrorMsg(),
|
||||||
nssCri != criMap.end());
|
nssCri != criMap.end());
|
||||||
|
|
||||||
const auto chunkManagerMaybeAtClusterTime = atClusterTime
|
auto isNssLocalFunc = [&](const auto& cm) {
|
||||||
? ChunkManager::makeAtTime(nssCri->second.getChunkManager(),
|
if (cm.isSharded()) {
|
||||||
atClusterTime->asTimestamp())
|
|
||||||
: nssCri->second.getChunkManager();
|
|
||||||
|
|
||||||
bool isNssLocal = [&]() {
|
|
||||||
if (chunkManagerMaybeAtClusterTime.isSharded()) {
|
|
||||||
return false;
|
return false;
|
||||||
} else if (chunkManagerMaybeAtClusterTime.isUnsplittable()) {
|
} else if (cm.isUnsplittable()) {
|
||||||
return chunkManagerMaybeAtClusterTime.getMinKeyShardIdWithSimpleCollation() ==
|
return cm.getMinKeyShardIdWithSimpleCollation() == myShardId;
|
||||||
myShardId;
|
|
||||||
} else {
|
} else {
|
||||||
// If collection is untracked, it is only local if this shard is the dbPrimary
|
// If collection is untracked, it is only local if this shard is the dbPrimary
|
||||||
// shard.
|
// shard.
|
||||||
return nssCri->second.getDbPrimaryShardId() == myShardId;
|
return nssCri->second.getDbPrimaryShardId() == myShardId;
|
||||||
}
|
}
|
||||||
}();
|
};
|
||||||
|
|
||||||
|
bool isNssLocal;
|
||||||
|
if (atClusterTime) {
|
||||||
|
auto pitChunkManager = PointInTimeChunkManager::make(nssCri->second.getChunkManager(),
|
||||||
|
atClusterTime->asTimestamp());
|
||||||
|
isNssLocal = isNssLocalFunc(pitChunkManager);
|
||||||
|
} else {
|
||||||
|
isNssLocal = isNssLocalFunc(nssCri->second.getChunkManager());
|
||||||
|
}
|
||||||
|
|
||||||
if (!isNssLocal) {
|
if (!isNssLocal) {
|
||||||
anyCollectionNotLocal = true;
|
anyCollectionNotLocal = true;
|
||||||
|
|
|
||||||
|
|
@ -34,6 +34,7 @@
|
||||||
#include "mongo/bson/bsonobj.h"
|
#include "mongo/bson/bsonobj.h"
|
||||||
#include "mongo/bson/bsonobjbuilder.h"
|
#include "mongo/bson/bsonobjbuilder.h"
|
||||||
#include "mongo/db/curop.h"
|
#include "mongo/db/curop.h"
|
||||||
|
#include "mongo/db/global_catalog/chunk_manager.h"
|
||||||
#include "mongo/db/global_catalog/sharding_catalog_client.h"
|
#include "mongo/db/global_catalog/sharding_catalog_client.h"
|
||||||
#include "mongo/db/global_catalog/type_database_gen.h"
|
#include "mongo/db/global_catalog/type_database_gen.h"
|
||||||
#include "mongo/db/keypattern.h"
|
#include "mongo/db/keypattern.h"
|
||||||
|
|
@ -168,7 +169,7 @@ const OperationContext::Decoration<bool> routerShouldRelaxCollectionUUIDConsiste
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
bool CollectionRoutingInfo::hasRoutingTable() const {
|
bool CollectionRoutingInfo::hasRoutingTable() const {
|
||||||
return _cm.hasRoutingTable();
|
return getChunkManager().hasRoutingTable();
|
||||||
}
|
}
|
||||||
|
|
||||||
const ShardId& CollectionRoutingInfo::getDbPrimaryShardId() const {
|
const ShardId& CollectionRoutingInfo::getDbPrimaryShardId() const {
|
||||||
|
|
@ -180,7 +181,7 @@ const DatabaseVersion& CollectionRoutingInfo::getDbVersion() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
ShardVersion CollectionRoutingInfo::getCollectionVersion() const {
|
ShardVersion CollectionRoutingInfo::getCollectionVersion() const {
|
||||||
ShardVersion sv = ShardVersionFactory::make(_cm);
|
auto sv = ShardVersionFactory::make(getChunkManager());
|
||||||
if (MONGO_unlikely(shouldIgnoreUuidMismatch)) {
|
if (MONGO_unlikely(shouldIgnoreUuidMismatch)) {
|
||||||
sv.setIgnoreShardingCatalogUuidMismatch();
|
sv.setIgnoreShardingCatalogUuidMismatch();
|
||||||
}
|
}
|
||||||
|
|
@ -188,7 +189,7 @@ ShardVersion CollectionRoutingInfo::getCollectionVersion() const {
|
||||||
}
|
}
|
||||||
|
|
||||||
ShardVersion CollectionRoutingInfo::getShardVersion(const ShardId& shardId) const {
|
ShardVersion CollectionRoutingInfo::getShardVersion(const ShardId& shardId) const {
|
||||||
auto sv = ShardVersionFactory::make(_cm, shardId);
|
auto sv = ShardVersionFactory::make(getChunkManager(), shardId);
|
||||||
if (MONGO_unlikely(shouldIgnoreUuidMismatch)) {
|
if (MONGO_unlikely(shouldIgnoreUuidMismatch)) {
|
||||||
sv.setIgnoreShardingCatalogUuidMismatch();
|
sv.setIgnoreShardingCatalogUuidMismatch();
|
||||||
}
|
}
|
||||||
|
|
@ -419,11 +420,8 @@ StatusWith<CachedDatabaseInfo> CatalogCache::_getDatabaseForCollectionRoutingInf
|
||||||
return swDbInfo;
|
return swDbInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
StatusWith<ChunkManager> CatalogCache::_getCollectionPlacementInfoAt(
|
StatusWith<RoutingTableHistoryValueHandle> CatalogCache::_getCollectionRoutingTable(
|
||||||
OperationContext* opCtx,
|
OperationContext* opCtx, const NamespaceString& nss, bool allowLocks) {
|
||||||
const NamespaceString& nss,
|
|
||||||
boost::optional<Timestamp> atClusterTime,
|
|
||||||
bool allowLocks) {
|
|
||||||
tassert(7032314,
|
tassert(7032314,
|
||||||
"Do not hold a lock while refreshing the catalog cache. Doing so would potentially "
|
"Do not hold a lock while refreshing the catalog cache. Doing so would potentially "
|
||||||
"hold the lock during a network call, and can lead to a deadlock as described in "
|
"hold the lock during a network call, and can lead to a deadlock as described in "
|
||||||
|
|
@ -440,7 +438,7 @@ StatusWith<ChunkManager> CatalogCache::_getCollectionPlacementInfoAt(
|
||||||
if (nss.isNamespaceAlwaysUntracked()) {
|
if (nss.isNamespaceAlwaysUntracked()) {
|
||||||
// If the collection is known to always be untracked, there is no need to request it to
|
// If the collection is known to always be untracked, there is no need to request it to
|
||||||
// the CollectionCache.
|
// the CollectionCache.
|
||||||
return ChunkManager(OptionalRoutingTableHistory(), atClusterTime);
|
return OptionalRoutingTableHistory();
|
||||||
}
|
}
|
||||||
|
|
||||||
auto collEntryFuture =
|
auto collEntryFuture =
|
||||||
|
|
@ -452,7 +450,7 @@ StatusWith<ChunkManager> CatalogCache::_getCollectionPlacementInfoAt(
|
||||||
// use it, otherwise return an error
|
// use it, otherwise return an error
|
||||||
|
|
||||||
if (collEntryFuture.isReady()) {
|
if (collEntryFuture.isReady()) {
|
||||||
return ChunkManager(collEntryFuture.get(opCtx), atClusterTime);
|
return collEntryFuture.get(opCtx);
|
||||||
} else {
|
} else {
|
||||||
return Status{ShardCannotRefreshDueToLocksHeldInfo(nss),
|
return Status{ShardCannotRefreshDueToLocksHeldInfo(nss),
|
||||||
"Routing info refresh did not complete"};
|
"Routing info refresh did not complete"};
|
||||||
|
|
@ -468,7 +466,7 @@ StatusWith<ChunkManager> CatalogCache::_getCollectionPlacementInfoAt(
|
||||||
auto collEntry = collEntryFuture.get(opCtx);
|
auto collEntry = collEntryFuture.get(opCtx);
|
||||||
_stats.totalRefreshWaitTimeMicros.addAndFetch(t.micros());
|
_stats.totalRefreshWaitTimeMicros.addAndFetch(t.micros());
|
||||||
|
|
||||||
return ChunkManager(std::move(collEntry), atClusterTime);
|
return std::move(collEntry);
|
||||||
} catch (const DBException& ex) {
|
} catch (const DBException& ex) {
|
||||||
_stats.totalRefreshWaitTimeMicros.addAndFetch(t.micros());
|
_stats.totalRefreshWaitTimeMicros.addAndFetch(t.micros());
|
||||||
bool isCatalogCacheRetriableError = ex.isA<ErrorCategory::SnapshotError>() ||
|
bool isCatalogCacheRetriableError = ex.isA<ErrorCategory::SnapshotError>() ||
|
||||||
|
|
@ -509,13 +507,20 @@ StatusWith<CollectionRoutingInfo> CatalogCache::_getCollectionRoutingInfoAt(
|
||||||
return swDbInfo.getStatus();
|
return swDbInfo.getStatus();
|
||||||
}
|
}
|
||||||
|
|
||||||
auto swChunkManager = _getCollectionPlacementInfoAt(opCtx, nss, optAtClusterTime, allowLocks);
|
auto swRoutingTable = _getCollectionRoutingTable(opCtx, nss, allowLocks);
|
||||||
if (!swChunkManager.isOK()) {
|
if (!swRoutingTable.isOK()) {
|
||||||
return swChunkManager.getStatus();
|
return swRoutingTable.getStatus();
|
||||||
}
|
}
|
||||||
|
|
||||||
auto cri =
|
auto cri = [&]() -> CollectionRoutingInfo {
|
||||||
CollectionRoutingInfo{std::move(swChunkManager.getValue()), std::move(swDbInfo.getValue())};
|
if (optAtClusterTime) {
|
||||||
|
PointInTimeChunkManager chunkManager(swRoutingTable.getValue(), optAtClusterTime.get());
|
||||||
|
return CollectionRoutingInfo{std::move(chunkManager), std::move(swDbInfo.getValue())};
|
||||||
|
}
|
||||||
|
CurrentChunkManager chunkManager(swRoutingTable.getValue());
|
||||||
|
return CollectionRoutingInfo{std::move(chunkManager), std::move(swDbInfo.getValue())};
|
||||||
|
}();
|
||||||
|
|
||||||
if (MONGO_unlikely(routerShouldRelaxCollectionUUIDConsistencyCheck(opCtx))) {
|
if (MONGO_unlikely(routerShouldRelaxCollectionUUIDConsistencyCheck(opCtx))) {
|
||||||
cri.shouldIgnoreUuidMismatch = true;
|
cri.shouldIgnoreUuidMismatch = true;
|
||||||
}
|
}
|
||||||
|
|
@ -538,11 +543,18 @@ void CatalogCache::_triggerPlacementVersionRefresh(const NamespaceString& nss) {
|
||||||
nss, ComparableChunkVersion::makeComparableChunkVersionForForcedRefresh());
|
nss, ComparableChunkVersion::makeComparableChunkVersionForForcedRefresh());
|
||||||
}
|
}
|
||||||
|
|
||||||
StatusWith<ChunkManager> CatalogCache::getCollectionPlacementInfoWithRefresh(
|
StatusWith<CurrentChunkManager> CatalogCache::getCollectionPlacementInfoWithRefresh(
|
||||||
OperationContext* opCtx, const NamespaceString& nss) {
|
OperationContext* opCtx, const NamespaceString& nss) {
|
||||||
try {
|
try {
|
||||||
_triggerPlacementVersionRefresh(nss);
|
_triggerPlacementVersionRefresh(nss);
|
||||||
return _getCollectionPlacementInfoAt(opCtx, nss, boost::none /* atClusterTime */);
|
|
||||||
|
auto swRoutingTable = _getCollectionRoutingTable(opCtx, nss);
|
||||||
|
|
||||||
|
if (!swRoutingTable.isOK()) {
|
||||||
|
return swRoutingTable.getStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
return CurrentChunkManager(swRoutingTable.getValue());
|
||||||
} catch (const DBException& ex) {
|
} catch (const DBException& ex) {
|
||||||
return ex.toStatus();
|
return ex.toStatus();
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -73,9 +73,40 @@ using CachedDatabaseInfo MONGO_MOD_PUBLIC = DatabaseTypeValueHandle;
|
||||||
|
|
||||||
class MONGO_MOD_PUBLIC CollectionRoutingInfo {
|
class MONGO_MOD_PUBLIC CollectionRoutingInfo {
|
||||||
public:
|
public:
|
||||||
CollectionRoutingInfo(ChunkManager&& chunkManager, CachedDatabaseInfo&& dbInfo)
|
CollectionRoutingInfo(CurrentChunkManager&& chunkManager, CachedDatabaseInfo&& dbInfo)
|
||||||
: _dbInfo(std::move(dbInfo)), _cm(std::move(chunkManager)) {}
|
: _dbInfo(std::move(dbInfo)), _cm(std::move(chunkManager)) {}
|
||||||
|
|
||||||
|
CollectionRoutingInfo(PointInTimeChunkManager&& chunkManager, CachedDatabaseInfo&& dbInfo)
|
||||||
|
: _dbInfo(std::move(dbInfo)), _cm(std::move(chunkManager)) {}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Variant type that can hold either a CurrentChunkManager or a PointInTimeChunkManager.
|
||||||
|
*
|
||||||
|
* This allows CollectionRoutingInfo to represent routing information in two different modes:
|
||||||
|
* - CurrentChunkManager: Represents the latest known routing state for a collection
|
||||||
|
* - PointInTimeChunkManager: Represents routing state at a specific point in time (cluster
|
||||||
|
* time)
|
||||||
|
*
|
||||||
|
* Usage guidelines:
|
||||||
|
* - Use std::visit() to access the held ChunkManager polymorphically when the operation works
|
||||||
|
* identically for both types
|
||||||
|
* - Use std::get_if<CurrentChunkManager>() when you need to conditionally access features only
|
||||||
|
* available on CurrentChunkManager (e.g., getNShardsOwningChunks())
|
||||||
|
* - Use std::holds_alternative<CurrentChunkManager>() to check which type is currently held
|
||||||
|
*
|
||||||
|
* Example:
|
||||||
|
* // Polymorphic access (works for both types):
|
||||||
|
* std::visit([](const auto& cm) { return cm.isSharded(); }, variant);
|
||||||
|
*
|
||||||
|
* // Type-specific access:
|
||||||
|
* if (auto* currentCm = std::get_if<CurrentChunkManager>(&variant)) {
|
||||||
|
* currentCm->getAllShardIds(&shards);
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* TODO SERVER-114825: Investigate if it's possible to implement without this variant.
|
||||||
|
*/
|
||||||
|
using ChunkManagerVariant = std::variant<CurrentChunkManager, PointInTimeChunkManager>;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true if the collection is tracked in the global catalog.
|
* Returns true if the collection is tracked in the global catalog.
|
||||||
*
|
*
|
||||||
|
|
@ -90,11 +121,103 @@ public:
|
||||||
* shards.
|
* shards.
|
||||||
*/
|
*/
|
||||||
bool isSharded() const {
|
bool isSharded() const {
|
||||||
return _cm.isSharded();
|
return getChunkManager().isSharded();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a const reference to the ChunkManager held in this CollectionRoutingInfo.
|
||||||
|
*
|
||||||
|
* This method provides polymorphic access to common ChunkManager functionality that works
|
||||||
|
* identically for both CurrentChunkManager and PointInTimeChunkManager.
|
||||||
|
*
|
||||||
|
* Use this method when:
|
||||||
|
* - You need to call methods that are defined on the base ChunkManager class
|
||||||
|
* - The operation doesn't require type-specific behavior
|
||||||
|
* - You're working with shared functionality like:
|
||||||
|
* - isSharded()
|
||||||
|
* - isUnsplittable()
|
||||||
|
* - getShardKeyPattern()
|
||||||
|
* - getVersion()
|
||||||
|
* - forEachChunk() (respects point-in-time semantics automatically)
|
||||||
|
*
|
||||||
|
* Do NOT use this method when:
|
||||||
|
* - You need to call CurrentChunkManager-specific methods like getAllShardIds() or
|
||||||
|
* getNShardsOwningChunks() - use getCurrentChunkManager() instead
|
||||||
|
* - You need to know whether you have a current or point-in-time view - check the variant
|
||||||
|
* directly with std::holds_alternative<>()
|
||||||
|
*
|
||||||
|
* Example usage:
|
||||||
|
* const auto& cm = cri.getChunkManager();
|
||||||
|
* if (cm.isSharded()) {
|
||||||
|
* cm.forEachChunk([](const Chunk& chunk) { ... });
|
||||||
|
* }
|
||||||
|
* @return A const reference to the base ChunkManager interface
|
||||||
|
*/
|
||||||
const ChunkManager& getChunkManager() const {
|
const ChunkManager& getChunkManager() const {
|
||||||
return _cm;
|
return std::visit([](const auto& cm) -> const ChunkManager& { return cm; }, _cm);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a const reference to the CurrentChunkManager held in this CollectionRoutingInfo.
|
||||||
|
*
|
||||||
|
* This method provides direct access to CurrentChunkManager-specific functionality that is
|
||||||
|
* NOT available when using a PointInTimeChunkManager.
|
||||||
|
*
|
||||||
|
* Use this method when:
|
||||||
|
* - You need to access the CURRENT (latest) state of the routing information
|
||||||
|
* - You need to call methods that are ONLY available on CurrentChunkManager:
|
||||||
|
* - getAllShardIds(): Gets the exact current set of shards owning chunks
|
||||||
|
* - getNShardsOwningChunks(): Gets the exact current count of shards
|
||||||
|
* - getNextChunkOnShard(): Finds the next chunk on a specific shard
|
||||||
|
* - You're performing operations that explicitly require non-point-in-time semantics:
|
||||||
|
* - Checking current cluster topology
|
||||||
|
* - Making routing decisions based on latest metadata
|
||||||
|
* - Administrative operations that need up-to-date information
|
||||||
|
*
|
||||||
|
* Do NOT use this method when:
|
||||||
|
* - You're working with point-in-time reads (e.g., transactions with atClusterTime)
|
||||||
|
* - You don't know whether the CollectionRoutingInfo contains a CurrentChunkManager or
|
||||||
|
* PointInTimeChunkManager - this will throw an exception if it's the wrong type
|
||||||
|
* - You only need common ChunkManager functionality - use getChunkManager() instead
|
||||||
|
*
|
||||||
|
* Common usage patterns:
|
||||||
|
*
|
||||||
|
* // Pattern 1: Direct access when you know it's current
|
||||||
|
* const auto& currentCm = cri.getCurrentChunkManager();
|
||||||
|
* currentCm.getAllShardIds(&allShards);
|
||||||
|
*
|
||||||
|
* // Pattern 2: Conditional access (safer)
|
||||||
|
* if (std::holds_alternative<CurrentChunkManager>(cri._cm)) {
|
||||||
|
* const auto& currentCm = cri.getCurrentChunkManager();
|
||||||
|
* size_t nShards = currentCm.getNShardsOwningChunks();
|
||||||
|
* }
|
||||||
|
*
|
||||||
|
* // Pattern 3: Using std::visit for type-specific behavior
|
||||||
|
* std::visit(OverloadedVisitor{
|
||||||
|
* [](const CurrentChunkManager& cm) {
|
||||||
|
* // Can call getCurrentChunkManager-only methods
|
||||||
|
* cm.getAllShardIds(&shards);
|
||||||
|
* },
|
||||||
|
* [](const PointInTimeChunkManager& cm) {
|
||||||
|
* // Different behavior for point-in-time
|
||||||
|
* }
|
||||||
|
* }, cri._cm);
|
||||||
|
*
|
||||||
|
* When this is created:
|
||||||
|
* - getCollectionRoutingInfo() without atClusterTime returns CurrentChunkManager
|
||||||
|
* - getCollectionRoutingInfoAt() with atClusterTime returns PointInTimeChunkManager
|
||||||
|
* - getCollectionPlacementInfoWithRefresh() returns CurrentChunkManager
|
||||||
|
*
|
||||||
|
* @throws TAssertionException if this CollectionRoutingInfo contains a PointInTimeChunkManager
|
||||||
|
* instead of a CurrentChunkManager
|
||||||
|
*
|
||||||
|
* @return A const reference to the CurrentChunkManager
|
||||||
|
*/
|
||||||
|
const CurrentChunkManager& getCurrentChunkManager() const {
|
||||||
|
tassert(10271001,
|
||||||
|
"Expected current ChunkManager but have PointInTimeChunkManager",
|
||||||
|
std::holds_alternative<CurrentChunkManager>(_cm));
|
||||||
|
return std::get<CurrentChunkManager>(_cm);
|
||||||
}
|
}
|
||||||
|
|
||||||
ShardVersion getCollectionVersion() const;
|
ShardVersion getCollectionVersion() const;
|
||||||
|
|
@ -113,7 +236,7 @@ public:
|
||||||
|
|
||||||
private:
|
private:
|
||||||
CachedDatabaseInfo _dbInfo;
|
CachedDatabaseInfo _dbInfo;
|
||||||
ChunkManager _cm;
|
ChunkManagerVariant _cm;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -272,7 +395,7 @@ public:
|
||||||
/**
|
/**
|
||||||
* Blocking method to retrieve refreshed collection placement information (ChunkManager).
|
* Blocking method to retrieve refreshed collection placement information (ChunkManager).
|
||||||
*/
|
*/
|
||||||
virtual StatusWith<ChunkManager> getCollectionPlacementInfoWithRefresh(
|
virtual StatusWith<CurrentChunkManager> getCollectionPlacementInfoWithRefresh(
|
||||||
OperationContext* opCtx, const NamespaceString& nss);
|
OperationContext* opCtx, const NamespaceString& nss);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -433,10 +556,8 @@ private:
|
||||||
boost::optional<Timestamp> optAtClusterTime,
|
boost::optional<Timestamp> optAtClusterTime,
|
||||||
bool allowLocks = false);
|
bool allowLocks = false);
|
||||||
|
|
||||||
StatusWith<ChunkManager> _getCollectionPlacementInfoAt(OperationContext* opCtx,
|
StatusWith<RoutingTableHistoryValueHandle> _getCollectionRoutingTable(
|
||||||
const NamespaceString& nss,
|
OperationContext* opCtx, const NamespaceString& nss, bool allowLocks = false);
|
||||||
boost::optional<Timestamp> atClusterTime,
|
|
||||||
bool allowLocks = false);
|
|
||||||
|
|
||||||
void _triggerPlacementVersionRefresh(const NamespaceString& nss);
|
void _triggerPlacementVersionRefresh(const NamespaceString& nss);
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -30,6 +30,7 @@
|
||||||
#include "mongo/db/router_role/routing_cache/catalog_cache_mock.h"
|
#include "mongo/db/router_role/routing_cache/catalog_cache_mock.h"
|
||||||
|
|
||||||
#include "mongo/base/error_codes.h"
|
#include "mongo/base/error_codes.h"
|
||||||
|
#include "mongo/db/global_catalog/chunk_manager.h"
|
||||||
#include "mongo/db/router_role/routing_cache/config_server_catalog_cache_loader_mock.h"
|
#include "mongo/db/router_role/routing_cache/config_server_catalog_cache_loader_mock.h"
|
||||||
#include "mongo/db/service_context.h"
|
#include "mongo/db/service_context.h"
|
||||||
#include "mongo/db/sharding_environment/sharding_test_fixture_common.h"
|
#include "mongo/db/sharding_environment/sharding_test_fixture_common.h"
|
||||||
|
|
@ -70,11 +71,11 @@ StatusWith<CollectionRoutingInfo> CatalogCacheMock::getCollectionRoutingInfo(
|
||||||
nss.toStringForErrorMsg()));
|
nss.toStringForErrorMsg()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
StatusWith<ChunkManager> CatalogCacheMock::getCollectionPlacementInfoWithRefresh(
|
StatusWith<CurrentChunkManager> CatalogCacheMock::getCollectionPlacementInfoWithRefresh(
|
||||||
OperationContext* opCtx, const NamespaceString& nss) {
|
OperationContext* opCtx, const NamespaceString& nss) {
|
||||||
const auto it = _collectionCache.find(nss);
|
const auto it = _collectionCache.find(nss);
|
||||||
if (it != _collectionCache.end()) {
|
if (it != _collectionCache.end()) {
|
||||||
return it->second.getChunkManager();
|
return it->second.getCurrentChunkManager();
|
||||||
} else {
|
} else {
|
||||||
return Status(
|
return Status(
|
||||||
ErrorCodes::InternalError,
|
ErrorCodes::InternalError,
|
||||||
|
|
@ -107,7 +108,7 @@ std::unique_ptr<CatalogCacheMock> CatalogCacheMock::make() {
|
||||||
|
|
||||||
CollectionRoutingInfo CatalogCacheMock::makeCollectionRoutingInfoUntracked(
|
CollectionRoutingInfo CatalogCacheMock::makeCollectionRoutingInfoUntracked(
|
||||||
const NamespaceString& nss, const ShardId& dbPrimaryShard, DatabaseVersion dbVersion) {
|
const NamespaceString& nss, const ShardId& dbPrimaryShard, DatabaseVersion dbVersion) {
|
||||||
ChunkManager cm(OptionalRoutingTableHistory(), boost::none);
|
CurrentChunkManager cm(OptionalRoutingTableHistory{});
|
||||||
return CollectionRoutingInfo(
|
return CollectionRoutingInfo(
|
||||||
std::move(cm),
|
std::move(cm),
|
||||||
DatabaseTypeValueHandle(DatabaseType{nss.dbName(), dbPrimaryShard, dbVersion}));
|
DatabaseTypeValueHandle(DatabaseType{nss.dbName(), dbPrimaryShard, dbVersion}));
|
||||||
|
|
@ -197,8 +198,8 @@ CollectionRoutingInfo CatalogCacheMock::_makeCollectionRoutingInfoTracked(
|
||||||
true /*allowMigrations*/,
|
true /*allowMigrations*/,
|
||||||
chunkTypes);
|
chunkTypes);
|
||||||
|
|
||||||
ChunkManager cm(ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rth)),
|
CurrentChunkManager cm(
|
||||||
boost::none /*clusterTime*/);
|
ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rth)));
|
||||||
return CollectionRoutingInfo(
|
return CollectionRoutingInfo(
|
||||||
std::move(cm),
|
std::move(cm),
|
||||||
DatabaseTypeValueHandle(DatabaseType{nss.dbName(), dbPrimaryShard, dbVersion}));
|
DatabaseTypeValueHandle(DatabaseType{nss.dbName(), dbPrimaryShard, dbVersion}));
|
||||||
|
|
|
||||||
|
|
@ -62,7 +62,7 @@ public:
|
||||||
const NamespaceString& nss,
|
const NamespaceString& nss,
|
||||||
bool allowLocks) override;
|
bool allowLocks) override;
|
||||||
|
|
||||||
StatusWith<ChunkManager> getCollectionPlacementInfoWithRefresh(
|
StatusWith<CurrentChunkManager> getCollectionPlacementInfoWithRefresh(
|
||||||
OperationContext* opCtx, const NamespaceString& nss) override;
|
OperationContext* opCtx, const NamespaceString& nss) override;
|
||||||
|
|
||||||
void setDatabaseReturnValue(const DatabaseName& dbName, CachedDatabaseInfo databaseInfo);
|
void setDatabaseReturnValue(const DatabaseName& dbName, CachedDatabaseInfo databaseInfo);
|
||||||
|
|
|
||||||
|
|
@ -71,7 +71,7 @@ ShardingWriteRouter::ShardingWriteRouter(OperationContext* opCtx, const Namespac
|
||||||
tassert(6862800,
|
tassert(6862800,
|
||||||
"Routing information for the temporary resharding collection is stale",
|
"Routing information for the temporary resharding collection is stale",
|
||||||
cri.hasRoutingTable());
|
cri.hasRoutingTable());
|
||||||
_reshardingChunkMgr = cri.getChunkManager();
|
_reshardingChunkMgr = cri.getCurrentChunkManager();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -224,7 +224,7 @@ protected:
|
||||||
ComparableChunkVersion::makeComparableChunkVersion(version));
|
ComparableChunkVersion::makeComparableChunkVersion(version));
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<std::vector<mongo::ChunkType>, mongo::ChunkManager> createChunks(
|
std::pair<std::vector<mongo::ChunkType>, mongo::CurrentChunkManager> createChunks(
|
||||||
size_t nShards, uint32_t nChunks, std::vector<ShardId> shards) {
|
size_t nShards, uint32_t nChunks, std::vector<ShardId> shards) {
|
||||||
invariant(shards.size() == nShards);
|
invariant(shards.size() == nShards);
|
||||||
|
|
||||||
|
|
@ -255,7 +255,7 @@ protected:
|
||||||
reshardingFields.setDonorFields(
|
reshardingFields.setDonorFields(
|
||||||
TypeCollectionDonorFields{tempNss, reshardKeyPattern, shards});
|
TypeCollectionDonorFields{tempNss, reshardKeyPattern, shards});
|
||||||
|
|
||||||
ChunkManager cm(makeStandaloneRoutingTableHistory(
|
CurrentChunkManager cm(makeStandaloneRoutingTableHistory(
|
||||||
RoutingTableHistory::makeNew(kNss,
|
RoutingTableHistory::makeNew(kNss,
|
||||||
collIdentifier,
|
collIdentifier,
|
||||||
shardKeyPattern,
|
shardKeyPattern,
|
||||||
|
|
@ -267,8 +267,7 @@ protected:
|
||||||
boost::none /* timeseriesFields */,
|
boost::none /* timeseriesFields */,
|
||||||
reshardingFields, /* reshardingFields */
|
reshardingFields, /* reshardingFields */
|
||||||
true,
|
true,
|
||||||
chunks)),
|
chunks)));
|
||||||
boost::none);
|
|
||||||
|
|
||||||
return std::make_pair(chunks, cm);
|
return std::make_pair(chunks, cm);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -193,9 +193,8 @@ CollectionRoutingInfoTargeter makeCollectionRoutingInfoTargeter(
|
||||||
true /* allowMigrations */,
|
true /* allowMigrations */,
|
||||||
chunks);
|
chunks);
|
||||||
|
|
||||||
auto cm = ChunkManager(RoutingTableHistoryValueHandle(std::make_shared<RoutingTableHistory>(
|
CurrentChunkManager cm(RoutingTableHistoryValueHandle(
|
||||||
std::move(routingTableHistory))),
|
std::make_shared<RoutingTableHistory>(std::move(routingTableHistory))));
|
||||||
boost::none);
|
|
||||||
|
|
||||||
auto routingCtx = RoutingContext::createSynthetic(
|
auto routingCtx = RoutingContext::createSynthetic(
|
||||||
{{nss,
|
{{nss,
|
||||||
|
|
|
||||||
|
|
@ -152,9 +152,8 @@ protected:
|
||||||
true /* allowMigrations */,
|
true /* allowMigrations */,
|
||||||
chunks);
|
chunks);
|
||||||
|
|
||||||
auto cm = ChunkManager(RoutingTableHistoryValueHandle(std::make_shared<RoutingTableHistory>(
|
CurrentChunkManager cm(RoutingTableHistoryValueHandle(
|
||||||
std::move(routingTableHistory))),
|
std::make_shared<RoutingTableHistory>(std::move(routingTableHistory))));
|
||||||
boost::none);
|
|
||||||
auto routingCtx = RoutingContext::createSynthetic(
|
auto routingCtx = RoutingContext::createSynthetic(
|
||||||
{{nss,
|
{{nss,
|
||||||
CollectionRoutingInfo{std::move(cm),
|
CollectionRoutingInfo{std::move(cm),
|
||||||
|
|
|
||||||
|
|
@ -87,9 +87,8 @@ public:
|
||||||
true, // allowMigration
|
true, // allowMigration
|
||||||
chunks);
|
chunks);
|
||||||
|
|
||||||
return ChunkManager(RoutingTableHistoryValueHandle(std::make_shared<RoutingTableHistory>(
|
return CurrentChunkManager(RoutingTableHistoryValueHandle(
|
||||||
std::move(routingTableHistory))),
|
std::make_shared<RoutingTableHistory>(std::move(routingTableHistory))));
|
||||||
boost::none);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
|
|
||||||
|
|
@ -129,9 +129,10 @@ RoutingTableHistory makeRoutingTable(const std::vector<ChunkType>& chunks) {
|
||||||
chunks);
|
chunks);
|
||||||
}
|
}
|
||||||
|
|
||||||
ChunkManager makeChunkManager(const std::vector<ChunkType>& chunks) {
|
CurrentChunkManager makeChunkManager(const std::vector<ChunkType>& chunks) {
|
||||||
auto rt = std::make_shared<RoutingTableHistory>(makeRoutingTable(chunks));
|
auto rt = std::make_shared<RoutingTableHistory>(makeRoutingTable(chunks));
|
||||||
return {{std::move(rt)}, boost::none /* atClusterTime */};
|
return CurrentChunkManager(
|
||||||
|
RoutingTableHistoryValueHandle(OptionalRoutingTableHistory(std::move(rt))));
|
||||||
}
|
}
|
||||||
|
|
||||||
DistributionStatus makeDistStatus(const ChunkManager& cm, ZoneInfo zoneInfo = ZoneInfo()) {
|
DistributionStatus makeDistStatus(const ChunkManager& cm, ZoneInfo zoneInfo = ZoneInfo()) {
|
||||||
|
|
|
||||||
|
|
@ -72,7 +72,7 @@ void MigrationChunkClonerSourceOpObserver::assertIntersectingChunkHasNotMoved(
|
||||||
const LogicalTime& atClusterTime) {
|
const LogicalTime& atClusterTime) {
|
||||||
// We can assume the simple collation because shard keys do not support non-simple collations.
|
// We can assume the simple collation because shard keys do not support non-simple collations.
|
||||||
auto cmAtTimeOfWrite =
|
auto cmAtTimeOfWrite =
|
||||||
ChunkManager::makeAtTime(*metadata.getChunkManager(), atClusterTime.asTimestamp());
|
PointInTimeChunkManager::make(*metadata.getChunkManager(), atClusterTime.asTimestamp());
|
||||||
auto chunk = cmAtTimeOfWrite.findIntersectingChunkWithSimpleCollation(shardKey);
|
auto chunk = cmAtTimeOfWrite.findIntersectingChunkWithSimpleCollation(shardKey);
|
||||||
|
|
||||||
// Throws if the chunk has moved since the timestamp of the running transaction's atClusterTime
|
// Throws if the chunk has moved since the timestamp of the running transaction's atClusterTime
|
||||||
|
|
|
||||||
|
|
@ -730,7 +730,7 @@ protected:
|
||||||
->setFilteringMetadata(
|
->setFilteringMetadata(
|
||||||
operationContext(),
|
operationContext(),
|
||||||
CollectionMetadata(
|
CollectionMetadata(
|
||||||
ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none),
|
CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))),
|
||||||
ShardId("dummyShardId")));
|
ShardId("dummyShardId")));
|
||||||
}();
|
}();
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -129,7 +129,7 @@ void RangeDeleterServiceTest::_setFilteringMetadataByUUID(OperationContext* opCt
|
||||||
ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)},
|
ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)},
|
||||||
ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}),
|
ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}),
|
||||||
ShardId("this"));
|
ShardId("this"));
|
||||||
ChunkManager cm(makeStandaloneRoutingTableHistory(
|
CurrentChunkManager cm(makeStandaloneRoutingTableHistory(
|
||||||
RoutingTableHistory::makeNew(nss,
|
RoutingTableHistory::makeNew(nss,
|
||||||
uuid,
|
uuid,
|
||||||
kShardKeyPattern,
|
kShardKeyPattern,
|
||||||
|
|
@ -140,10 +140,8 @@ void RangeDeleterServiceTest::_setFilteringMetadataByUUID(OperationContext* opCt
|
||||||
Timestamp(1, 1),
|
Timestamp(1, 1),
|
||||||
boost::none /* timeseriesFields */,
|
boost::none /* timeseriesFields */,
|
||||||
boost::none /* reshardingFields */,
|
boost::none /* reshardingFields */,
|
||||||
|
|
||||||
true,
|
true,
|
||||||
{std::move(chunk)})),
|
{std::move(chunk)})));
|
||||||
boost::none);
|
|
||||||
|
|
||||||
return CollectionMetadata(std::move(cm), ShardId("this"));
|
return CollectionMetadata(std::move(cm), ShardId("this"));
|
||||||
}();
|
}();
|
||||||
|
|
|
||||||
|
|
@ -134,7 +134,7 @@ public:
|
||||||
ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)},
|
ChunkRange{BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY)},
|
||||||
ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}),
|
ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}),
|
||||||
ShardId("dummyShardId")}});
|
ShardId("dummyShardId")}});
|
||||||
ChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none);
|
CurrentChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)));
|
||||||
AutoGetDb autoDb(_opCtx, kNss.dbName(), MODE_IX);
|
AutoGetDb autoDb(_opCtx, kNss.dbName(), MODE_IX);
|
||||||
Lock::CollectionLock collLock(_opCtx, kNss, MODE_IX);
|
Lock::CollectionLock collLock(_opCtx, kNss, MODE_IX);
|
||||||
CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(_opCtx, kNss)
|
CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(_opCtx, kNss)
|
||||||
|
|
|
||||||
|
|
@ -226,7 +226,7 @@ protected:
|
||||||
ShardServerTestFixtureWithCatalogCacheMock::tearDown();
|
ShardServerTestFixtureWithCatalogCacheMock::tearDown();
|
||||||
}
|
}
|
||||||
|
|
||||||
ChunkManager createChunkManager(
|
CurrentChunkManager createChunkManager(
|
||||||
const ShardKeyPattern& shardKeyPattern,
|
const ShardKeyPattern& shardKeyPattern,
|
||||||
std::deque<DocumentSource::GetNextResult> configCacheChunksData) {
|
std::deque<DocumentSource::GetNextResult> configCacheChunksData) {
|
||||||
const OID epoch = OID::gen();
|
const OID epoch = OID::gen();
|
||||||
|
|
@ -255,7 +255,7 @@ protected:
|
||||||
false,
|
false,
|
||||||
chunks);
|
chunks);
|
||||||
|
|
||||||
return ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none);
|
return CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
||||||
|
|
@ -128,8 +128,7 @@ public:
|
||||||
true /* allowMigrations */,
|
true /* allowMigrations */,
|
||||||
chunks);
|
chunks);
|
||||||
|
|
||||||
return ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)),
|
return CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)));
|
||||||
boost::none /* clusterTime */);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
DonorShardFetchTimestamp makeDonorShardFetchTimestamp(ShardId shardId,
|
DonorShardFetchTimestamp makeDonorShardFetchTimestamp(ShardId shardId,
|
||||||
|
|
|
||||||
|
|
@ -165,7 +165,7 @@ protected:
|
||||||
auto range = ChunkRange(BSON(shardKey << MINKEY), BSON(shardKey << MAXKEY));
|
auto range = ChunkRange(BSON(shardKey << MINKEY), BSON(shardKey << MAXKEY));
|
||||||
auto chunk = ChunkType(
|
auto chunk = ChunkType(
|
||||||
uuid, range, ChunkVersion({epoch, timestamp}, {1, 0}), shardThatChunkExistsOn);
|
uuid, range, ChunkVersion({epoch, timestamp}, {1, 0}), shardThatChunkExistsOn);
|
||||||
ChunkManager cm(makeStandaloneRoutingTableHistory(
|
CurrentChunkManager cm(makeStandaloneRoutingTableHistory(
|
||||||
RoutingTableHistory::makeNew(nss,
|
RoutingTableHistory::makeNew(nss,
|
||||||
uuid,
|
uuid,
|
||||||
shardKeyPattern,
|
shardKeyPattern,
|
||||||
|
|
@ -177,8 +177,7 @@ protected:
|
||||||
boost::none /* timeseriesFields */,
|
boost::none /* timeseriesFields */,
|
||||||
boost::none /* reshardingFields */,
|
boost::none /* reshardingFields */,
|
||||||
true,
|
true,
|
||||||
{std::move(chunk)})),
|
{std::move(chunk)})));
|
||||||
boost::none);
|
|
||||||
auto dbVersion = DatabaseVersion(uuid, timestamp);
|
auto dbVersion = DatabaseVersion(uuid, timestamp);
|
||||||
getCatalogCacheMock()->setDatabaseReturnValue(
|
getCatalogCacheMock()->setDatabaseReturnValue(
|
||||||
nss.dbName(),
|
nss.dbName(),
|
||||||
|
|
|
||||||
|
|
@ -310,7 +310,7 @@ public:
|
||||||
false,
|
false,
|
||||||
chunks);
|
chunks);
|
||||||
|
|
||||||
return ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none);
|
return CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)));
|
||||||
}
|
}
|
||||||
|
|
||||||
void loadCatalogCacheValues() {
|
void loadCatalogCacheValues() {
|
||||||
|
|
|
||||||
|
|
@ -520,8 +520,7 @@ private:
|
||||||
true /* allowMigrations */,
|
true /* allowMigrations */,
|
||||||
chunks);
|
chunks);
|
||||||
|
|
||||||
return ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)),
|
return CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)));
|
||||||
boost::none /* clusterTime */);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
RoutingTableHistoryValueHandle makeStandaloneRoutingTableHistory(RoutingTableHistory rt) {
|
RoutingTableHistoryValueHandle makeStandaloneRoutingTableHistory(RoutingTableHistory rt) {
|
||||||
|
|
|
||||||
|
|
@ -333,7 +333,7 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
ChunkManager makeChunkManager(const OID& epoch,
|
CurrentChunkManager makeChunkManager(const OID& epoch,
|
||||||
const NamespaceString& nss,
|
const NamespaceString& nss,
|
||||||
const UUID& uuid,
|
const UUID& uuid,
|
||||||
const BSONObj& shardKey,
|
const BSONObj& shardKey,
|
||||||
|
|
@ -350,11 +350,10 @@ private:
|
||||||
boost::none /* reshardingFields */,
|
boost::none /* reshardingFields */,
|
||||||
true /* allowMigrations */,
|
true /* allowMigrations */,
|
||||||
chunks);
|
chunks);
|
||||||
return ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)),
|
return CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)));
|
||||||
boost::none /* clusterTime */);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
ChunkManager makeChunkManagerForSourceCollection() {
|
CurrentChunkManager makeChunkManagerForSourceCollection() {
|
||||||
// Create three chunks, two that are owned by this donor shard and one owned by some other
|
// Create three chunks, two that are owned by this donor shard and one owned by some other
|
||||||
// shard. The chunk for {sk: null} is owned by this donor shard to allow test cases to omit
|
// shard. The chunk for {sk: null} is owned by this donor shard to allow test cases to omit
|
||||||
// the shard key field when it isn't relevant.
|
// the shard key field when it isn't relevant.
|
||||||
|
|
@ -380,7 +379,7 @@ private:
|
||||||
epoch, _sourceNss, _sourceUUID, BSON(_currentShardKey << 1), chunks);
|
epoch, _sourceNss, _sourceUUID, BSON(_currentShardKey << 1), chunks);
|
||||||
}
|
}
|
||||||
|
|
||||||
ChunkManager makeChunkManagerForOutputCollection() {
|
CurrentChunkManager makeChunkManagerForOutputCollection() {
|
||||||
const OID epoch = OID::gen();
|
const OID epoch = OID::gen();
|
||||||
const UUID outputUuid = UUID::gen();
|
const UUID outputUuid = UUID::gen();
|
||||||
std::vector<ChunkType> chunks = {
|
std::vector<ChunkType> chunks = {
|
||||||
|
|
|
||||||
|
|
@ -227,9 +227,8 @@ public:
|
||||||
true /* allowMigrations */,
|
true /* allowMigrations */,
|
||||||
chunks);
|
chunks);
|
||||||
return CollectionRoutingInfo{
|
return CollectionRoutingInfo{
|
||||||
ChunkManager(
|
CurrentChunkManager(
|
||||||
ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rt)),
|
ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rt))),
|
||||||
boost::none /* clusterTime */),
|
|
||||||
DatabaseTypeValueHandle(DatabaseType{
|
DatabaseTypeValueHandle(DatabaseType{
|
||||||
nss.dbName(), _someDonorId, DatabaseVersion(UUID::gen(), Timestamp(1, 1))})};
|
nss.dbName(), _someDonorId, DatabaseVersion(UUID::gen(), Timestamp(1, 1))})};
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -57,7 +57,8 @@ PostReshardingCollectionPlacement::PostReshardingCollectionPlacement(
|
||||||
tassert(11178203,
|
tassert(11178203,
|
||||||
"Routing information for the temporary resharding collection is stale",
|
"Routing information for the temporary resharding collection is stale",
|
||||||
tmpNssRoutingInfoWithStatus.getValue().hasRoutingTable());
|
tmpNssRoutingInfoWithStatus.getValue().hasRoutingTable());
|
||||||
_tmpReshardingCollectionChunkManager = tmpNssRoutingInfoWithStatus.getValue().getChunkManager();
|
_tmpReshardingCollectionChunkManager =
|
||||||
|
tmpNssRoutingInfoWithStatus.getValue().getCurrentChunkManager();
|
||||||
}
|
}
|
||||||
|
|
||||||
const ShardId& PostReshardingCollectionPlacement::getReshardingDestinedRecipient(
|
const ShardId& PostReshardingCollectionPlacement::getReshardingDestinedRecipient(
|
||||||
|
|
|
||||||
|
|
@ -55,11 +55,14 @@
|
||||||
|
|
||||||
namespace mongo {
|
namespace mongo {
|
||||||
|
|
||||||
CollectionMetadata::CollectionMetadata(ChunkManager cm, const ShardId& thisShardId)
|
CollectionMetadata::CollectionMetadata(CurrentChunkManager cm, const ShardId& thisShardId)
|
||||||
|
: _cm(std::move(cm)), _thisShardId(thisShardId) {}
|
||||||
|
|
||||||
|
CollectionMetadata::CollectionMetadata(PointInTimeChunkManager cm, const ShardId& thisShardId)
|
||||||
: _cm(std::move(cm)), _thisShardId(thisShardId) {}
|
: _cm(std::move(cm)), _thisShardId(thisShardId) {}
|
||||||
|
|
||||||
bool CollectionMetadata::allowMigrations() const {
|
bool CollectionMetadata::allowMigrations() const {
|
||||||
return _cm ? _cm->allowMigrations() : true;
|
return _cm ? getChunkManagerBase().allowMigrations() : true;
|
||||||
}
|
}
|
||||||
|
|
||||||
boost::optional<ShardKeyPattern> CollectionMetadata::getReshardingKeyIfShouldForwardOps() const {
|
boost::optional<ShardKeyPattern> CollectionMetadata::getReshardingKeyIfShouldForwardOps() const {
|
||||||
|
|
@ -139,12 +142,14 @@ BSONObj CollectionMetadata::extractDocumentKey(const ShardKeyPattern* shardKeyPa
|
||||||
}
|
}
|
||||||
|
|
||||||
BSONObj CollectionMetadata::extractDocumentKey(const BSONObj& doc) const {
|
BSONObj CollectionMetadata::extractDocumentKey(const BSONObj& doc) const {
|
||||||
return extractDocumentKey(isSharded() ? &_cm->getShardKeyPattern() : nullptr, doc);
|
return extractDocumentKey(isSharded() ? &getChunkManagerBase().getShardKeyPattern() : nullptr,
|
||||||
|
doc);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::string CollectionMetadata::toStringBasic() const {
|
std::string CollectionMetadata::toStringBasic() const {
|
||||||
if (hasRoutingTable()) {
|
if (hasRoutingTable()) {
|
||||||
return str::stream() << "collection placement version: " << _cm->getVersion().toString()
|
return str::stream() << "collection placement version: "
|
||||||
|
<< getChunkManagerBase().getVersion().toString()
|
||||||
<< ", shard placement version: "
|
<< ", shard placement version: "
|
||||||
<< getShardPlacementVersionForLogging().toString();
|
<< getShardPlacementVersionForLogging().toString();
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -157,7 +162,7 @@ RangeMap CollectionMetadata::getChunks() const {
|
||||||
|
|
||||||
RangeMap chunksMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<BSONObj>());
|
RangeMap chunksMap(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap<BSONObj>());
|
||||||
|
|
||||||
_cm->forEachChunk([this, &chunksMap](const auto& chunk) {
|
getChunkManagerBase().forEachChunk([this, &chunksMap](const auto& chunk) {
|
||||||
if (chunk.getShardId() == _thisShardId)
|
if (chunk.getShardId() == _thisShardId)
|
||||||
chunksMap.emplace_hint(chunksMap.end(), chunk.getMin(), chunk.getMax());
|
chunksMap.emplace_hint(chunksMap.end(), chunk.getMin(), chunk.getMax());
|
||||||
|
|
||||||
|
|
@ -170,7 +175,7 @@ RangeMap CollectionMetadata::getChunks() const {
|
||||||
bool CollectionMetadata::getNextChunk(const BSONObj& lookupKey, ChunkType* chunk) const {
|
bool CollectionMetadata::getNextChunk(const BSONObj& lookupKey, ChunkType* chunk) const {
|
||||||
tassert(10016203, "Expected a routing table to be initialized", hasRoutingTable());
|
tassert(10016203, "Expected a routing table to be initialized", hasRoutingTable());
|
||||||
|
|
||||||
auto nextChunk = _cm->getNextChunkOnShard(lookupKey, _thisShardId);
|
auto nextChunk = getCurrentChunkManager()->getNextChunkOnShard(lookupKey, _thisShardId);
|
||||||
if (!nextChunk)
|
if (!nextChunk)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
|
@ -182,7 +187,7 @@ bool CollectionMetadata::getNextChunk(const BSONObj& lookupKey, ChunkType* chunk
|
||||||
bool CollectionMetadata::currentShardHasAnyChunks() const {
|
bool CollectionMetadata::currentShardHasAnyChunks() const {
|
||||||
tassert(10016204, "Expected a routing table to be initialized", hasRoutingTable());
|
tassert(10016204, "Expected a routing table to be initialized", hasRoutingTable());
|
||||||
std::set<ShardId> shards;
|
std::set<ShardId> shards;
|
||||||
_cm->getAllShardIds(&shards);
|
getCurrentChunkManager()->getAllShardIds(&shards);
|
||||||
return shards.find(_thisShardId) != shards.end();
|
return shards.find(_thisShardId) != shards.end();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -259,7 +264,7 @@ void CollectionMetadata::toBSONChunks(BSONArrayBuilder* builder) const {
|
||||||
if (!hasRoutingTable())
|
if (!hasRoutingTable())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
_cm->forEachChunk([this, &builder](const auto& chunk) {
|
getChunkManagerBase().forEachChunk([this, &builder](const auto& chunk) {
|
||||||
if (chunk.getShardId() == _thisShardId) {
|
if (chunk.getShardId() == _thisShardId) {
|
||||||
BSONArrayBuilder chunkBB(builder->subarrayStart());
|
BSONArrayBuilder chunkBB(builder->subarrayStart());
|
||||||
chunkBB.append(chunk.getMin());
|
chunkBB.append(chunk.getMin());
|
||||||
|
|
|
||||||
|
|
@ -81,7 +81,9 @@ public:
|
||||||
* "thisShardId" is the shard identity of this shard for purposes of answering questions like
|
* "thisShardId" is the shard identity of this shard for purposes of answering questions like
|
||||||
* "does this key belong to this shard"?
|
* "does this key belong to this shard"?
|
||||||
*/
|
*/
|
||||||
CollectionMetadata(ChunkManager cm, const ShardId& thisShardId);
|
CollectionMetadata(CurrentChunkManager cm, const ShardId& thisShardId);
|
||||||
|
|
||||||
|
CollectionMetadata(PointInTimeChunkManager cm, const ShardId& thisShardId);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a CollectionMetadata object for an untracked collection.
|
* Returns a CollectionMetadata object for an untracked collection.
|
||||||
|
|
@ -94,18 +96,18 @@ public:
|
||||||
* Returns whether this metadata object represents a sharded or unsharded collection.
|
* Returns whether this metadata object represents a sharded or unsharded collection.
|
||||||
*/
|
*/
|
||||||
bool isSharded() const {
|
bool isSharded() const {
|
||||||
return _cm && _cm->isSharded();
|
return _cm && getChunkManagerBase().isSharded();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns whether this metadata object represents an unsplittable collection.
|
* Returns whether this metadata object represents an unsplittable collection.
|
||||||
*/
|
*/
|
||||||
bool isUnsplittable() const {
|
bool isUnsplittable() const {
|
||||||
return _cm && _cm->isUnsplittable();
|
return _cm && getChunkManagerBase().isUnsplittable();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool hasRoutingTable() const {
|
bool hasRoutingTable() const {
|
||||||
return _cm && _cm->hasRoutingTable();
|
return _cm && getChunkManagerBase().hasRoutingTable();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool allowMigrations() const;
|
bool allowMigrations() const;
|
||||||
|
|
@ -126,7 +128,8 @@ public:
|
||||||
* have a routing table.
|
* have a routing table.
|
||||||
*/
|
*/
|
||||||
ChunkVersion getShardPlacementVersion() const {
|
ChunkVersion getShardPlacementVersion() const {
|
||||||
return (hasRoutingTable() ? _cm->getVersion(_thisShardId) : ChunkVersion::UNTRACKED());
|
return (hasRoutingTable() ? getChunkManagerBase().getVersion(_thisShardId)
|
||||||
|
: ChunkVersion::UNTRACKED());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -136,7 +139,8 @@ public:
|
||||||
* timestamp".
|
* timestamp".
|
||||||
*/
|
*/
|
||||||
Timestamp getShardMaxValidAfter() const {
|
Timestamp getShardMaxValidAfter() const {
|
||||||
return (hasRoutingTable() ? _cm->getMaxValidAfter(_thisShardId) : Timestamp(0, 0));
|
return (hasRoutingTable() ? getChunkManagerBase().getMaxValidAfter(_thisShardId)
|
||||||
|
: Timestamp(0, 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -149,7 +153,7 @@ public:
|
||||||
* instead.
|
* instead.
|
||||||
*/
|
*/
|
||||||
ChunkVersion getShardPlacementVersionForLogging() const {
|
ChunkVersion getShardPlacementVersionForLogging() const {
|
||||||
return (hasRoutingTable() ? _cm->getVersionForLogging(_thisShardId)
|
return (hasRoutingTable() ? getChunkManagerBase().getVersionForLogging(_thisShardId)
|
||||||
: ChunkVersion::UNTRACKED());
|
: ChunkVersion::UNTRACKED());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -158,7 +162,7 @@ public:
|
||||||
* table.
|
* table.
|
||||||
*/
|
*/
|
||||||
ChunkVersion getCollPlacementVersion() const {
|
ChunkVersion getCollPlacementVersion() const {
|
||||||
return (hasRoutingTable() ? _cm->getVersion() : ChunkVersion::UNTRACKED());
|
return (hasRoutingTable() ? getChunkManagerBase().getVersion() : ChunkVersion::UNTRACKED());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -171,7 +175,7 @@ public:
|
||||||
|
|
||||||
const ShardKeyPattern& getShardKeyPattern() const {
|
const ShardKeyPattern& getShardKeyPattern() const {
|
||||||
tassert(10016206, "Expected a routing table to be initialized", hasRoutingTable());
|
tassert(10016206, "Expected a routing table to be initialized", hasRoutingTable());
|
||||||
return _cm->getShardKeyPattern();
|
return getChunkManagerBase().getShardKeyPattern();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -199,12 +203,12 @@ public:
|
||||||
|
|
||||||
bool uuidMatches(UUID uuid) const {
|
bool uuidMatches(UUID uuid) const {
|
||||||
tassert(10016215, "Expected a routing table to be initialized", hasRoutingTable());
|
tassert(10016215, "Expected a routing table to be initialized", hasRoutingTable());
|
||||||
return _cm->uuidMatches(uuid);
|
return getChunkManagerBase().uuidMatches(uuid);
|
||||||
}
|
}
|
||||||
|
|
||||||
const UUID& getUUID() const {
|
const UUID& getUUID() const {
|
||||||
tassert(10016216, "Expected a routing table to be initialized", hasRoutingTable());
|
tassert(10016216, "Expected a routing table to be initialized", hasRoutingTable());
|
||||||
return _cm->getUUID();
|
return getChunkManagerBase().getUUID();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -230,7 +234,32 @@ public:
|
||||||
|
|
||||||
const ChunkManager* getChunkManager() const {
|
const ChunkManager* getChunkManager() const {
|
||||||
tassert(10016207, "Expected a routing table to be initialized", hasRoutingTable());
|
tassert(10016207, "Expected a routing table to be initialized", hasRoutingTable());
|
||||||
return _cm.get_ptr();
|
return std::visit([](const auto& cm) -> const ChunkManager* { return &cm; }, *_cm);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a pointer to ChunkManager if that's what's stored, nullptr otherwise.
|
||||||
|
* Use this only when you specifically need the current (non-point-in-time) ChunkManager.
|
||||||
|
*/
|
||||||
|
const CurrentChunkManager* getCurrentChunkManager() const {
|
||||||
|
tassert(9014101, "Expected a routing table to be initialized", hasRoutingTable());
|
||||||
|
return std::get_if<CurrentChunkManager>(&*_cm);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns a pointer to PointInTimeChunkManager if that's what's stored, nullptr otherwise.
|
||||||
|
* Use this only when you specifically need the point-in-time ChunkManager.
|
||||||
|
*/
|
||||||
|
const PointInTimeChunkManager* getPointInTimeChunkManager() const {
|
||||||
|
tassert(9014102, "Expected a routing table to be initialized", hasRoutingTable());
|
||||||
|
return std::get_if<PointInTimeChunkManager>(&*_cm);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Returns true if this metadata holds a point-in-time chunk manager.
|
||||||
|
*/
|
||||||
|
bool isAtPointInTime() const {
|
||||||
|
return _cm && std::holds_alternative<PointInTimeChunkManager>(*_cm);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -239,7 +268,7 @@ public:
|
||||||
*/
|
*/
|
||||||
bool keyBelongsToMe(const BSONObj& key) const {
|
bool keyBelongsToMe(const BSONObj& key) const {
|
||||||
tassert(10016208, "Expected a routing table to be initialized", hasRoutingTable());
|
tassert(10016208, "Expected a routing table to be initialized", hasRoutingTable());
|
||||||
return _cm->keyBelongsToShard(key, _thisShardId);
|
return getChunkManagerBase().keyBelongsToShard(key, _thisShardId);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -250,7 +279,7 @@ public:
|
||||||
ChunkManager::ChunkOwnership nearestOwnedChunk(const BSONObj& key,
|
ChunkManager::ChunkOwnership nearestOwnedChunk(const BSONObj& key,
|
||||||
ChunkMap::Direction direction) const {
|
ChunkMap::Direction direction) const {
|
||||||
tassert(9526301, "Expected a routing table to be initialized", hasRoutingTable());
|
tassert(9526301, "Expected a routing table to be initialized", hasRoutingTable());
|
||||||
return _cm->nearestOwnedChunk(key, _thisShardId, direction);
|
return getChunkManagerBase().nearestOwnedChunk(key, _thisShardId, direction);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -266,7 +295,7 @@ public:
|
||||||
*/
|
*/
|
||||||
bool rangeOverlapsChunk(const ChunkRange& range) const {
|
bool rangeOverlapsChunk(const ChunkRange& range) const {
|
||||||
tassert(10016209, "Expected a routing table to be initialized", hasRoutingTable());
|
tassert(10016209, "Expected a routing table to be initialized", hasRoutingTable());
|
||||||
return _cm->rangeOverlapsShard(range, _thisShardId);
|
return getChunkManagerBase().rangeOverlapsShard(range, _thisShardId);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -307,22 +336,34 @@ public:
|
||||||
|
|
||||||
const boost::optional<TypeCollectionReshardingFields>& getReshardingFields() const {
|
const boost::optional<TypeCollectionReshardingFields>& getReshardingFields() const {
|
||||||
tassert(10016210, "Expected a routing table to be initialized", hasRoutingTable());
|
tassert(10016210, "Expected a routing table to be initialized", hasRoutingTable());
|
||||||
return _cm->getReshardingFields();
|
return getChunkManagerBase().getReshardingFields();
|
||||||
}
|
}
|
||||||
|
|
||||||
const boost::optional<TypeCollectionTimeseriesFields>& getTimeseriesFields() const {
|
const boost::optional<TypeCollectionTimeseriesFields>& getTimeseriesFields() const {
|
||||||
tassert(10016211, "Expected a routing table to be initialized", hasRoutingTable());
|
tassert(10016211, "Expected a routing table to be initialized", hasRoutingTable());
|
||||||
return _cm->getTimeseriesFields();
|
return getChunkManagerBase().getTimeseriesFields();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isUniqueShardKey() const {
|
bool isUniqueShardKey() const {
|
||||||
tassert(10016212, "Expected a routing table to be initialized", hasRoutingTable());
|
tassert(10016212, "Expected a routing table to be initialized", hasRoutingTable());
|
||||||
return _cm->isUnique();
|
return getChunkManagerBase().isUnique();
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
|
/**
|
||||||
|
* Helper to access the ChunkManager interface regardless of whether _cm holds
|
||||||
|
* a ChunkManager or PointInTimeChunkManager.
|
||||||
|
*/
|
||||||
|
const ChunkManager& getChunkManagerBase() const {
|
||||||
|
tassert(9014100, "Expected _cm to be initialized", _cm.has_value());
|
||||||
|
return std::visit([](const auto& cm) -> const ChunkManager& { return cm; }, *_cm);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type alias for the variant holding either ChunkManager or PointInTimeChunkManager
|
||||||
|
using ChunkManagerVariant = std::variant<CurrentChunkManager, PointInTimeChunkManager>;
|
||||||
|
|
||||||
// The full routing table for the collection or boost::none if the collection is not tracked
|
// The full routing table for the collection or boost::none if the collection is not tracked
|
||||||
boost::optional<ChunkManager> _cm;
|
boost::optional<ChunkManagerVariant> _cm;
|
||||||
|
|
||||||
// The identity of this shard, for the purpose of answering "key belongs to me" queries. If the
|
// The identity of this shard, for the purpose of answering "key belongs to me" queries. If the
|
||||||
// collection is not tracked (_cm is boost::none), then this value will be empty.
|
// collection is not tracked (_cm is boost::none), then this value will be empty.
|
||||||
|
|
|
||||||
|
|
@ -148,7 +148,7 @@ protected:
|
||||||
return std::vector<ChunkType>{chunk1, chunk2, chunk3, chunk4};
|
return std::vector<ChunkType>{chunk1, chunk2, chunk3, chunk4};
|
||||||
}());
|
}());
|
||||||
|
|
||||||
ChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none);
|
CurrentChunkManager cm(makeStandaloneRoutingTableHistory(std::move(rt)));
|
||||||
ASSERT_EQ(4, cm.numChunks());
|
ASSERT_EQ(4, cm.numChunks());
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|
|
||||||
|
|
@ -103,8 +103,7 @@ CollectionMetadata makeTrackedCollectionMetadataImpl(
|
||||||
chunk.setHistory({ChunkHistory(*chunk.getOnCurrentShardSince(), chunk.getShard())});
|
chunk.setHistory({ChunkHistory(*chunk.getOnCurrentShardSince(), chunk.getShard())});
|
||||||
}
|
}
|
||||||
|
|
||||||
return CollectionMetadata(
|
auto routingTableHistory = ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(
|
||||||
ChunkManager(ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(
|
|
||||||
RoutingTableHistory::makeNew(kNss,
|
RoutingTableHistory::makeNew(kNss,
|
||||||
uuid,
|
uuid,
|
||||||
shardKeyPattern,
|
shardKeyPattern,
|
||||||
|
|
@ -116,9 +115,15 @@ CollectionMetadata makeTrackedCollectionMetadataImpl(
|
||||||
boost::none /* timeseriesFields */,
|
boost::none /* timeseriesFields */,
|
||||||
std::move(reshardingFields),
|
std::move(reshardingFields),
|
||||||
true,
|
true,
|
||||||
allChunks)),
|
allChunks));
|
||||||
kChunkManager),
|
|
||||||
|
if (kChunkManager) {
|
||||||
|
return CollectionMetadata(
|
||||||
|
PointInTimeChunkManager(std::move(routingTableHistory), kChunkManager.get()),
|
||||||
kThisShard);
|
kThisShard);
|
||||||
|
} else {
|
||||||
|
return CollectionMetadata(CurrentChunkManager(std::move(routingTableHistory)), kThisShard);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -116,7 +116,7 @@ public:
|
||||||
auto range = ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY));
|
auto range = ChunkRange(BSON(kShardKey << MINKEY), BSON(kShardKey << MAXKEY));
|
||||||
auto chunk = ChunkType(
|
auto chunk = ChunkType(
|
||||||
uuid, std::move(range), ChunkVersion({epoch, timestamp}, {1, 0}), chunkShardId);
|
uuid, std::move(range), ChunkVersion({epoch, timestamp}, {1, 0}), chunkShardId);
|
||||||
ChunkManager cm(makeStandaloneRoutingTableHistory(
|
CurrentChunkManager cm(makeStandaloneRoutingTableHistory(
|
||||||
RoutingTableHistory::makeNew(kTestNss,
|
RoutingTableHistory::makeNew(kTestNss,
|
||||||
uuid,
|
uuid,
|
||||||
kShardKeyPattern,
|
kShardKeyPattern,
|
||||||
|
|
@ -129,8 +129,7 @@ public:
|
||||||
boost::none /* reshardingFields */,
|
boost::none /* reshardingFields */,
|
||||||
|
|
||||||
true,
|
true,
|
||||||
{std::move(chunk)})),
|
{std::move(chunk)})));
|
||||||
boost::none);
|
|
||||||
|
|
||||||
return CollectionMetadata(std::move(cm), collectionShardId);
|
return CollectionMetadata(std::move(cm), collectionShardId);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -151,8 +151,9 @@ std::shared_ptr<ScopedCollectionDescription::Impl> MetadataManager::getActiveMet
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::make_shared<MetadataAtTimestamp>(CollectionMetadata(
|
return std::make_shared<MetadataAtTimestamp>(
|
||||||
ChunkManager::makeAtTime(*activeMetadata->getChunkManager(), atClusterTime->asTimestamp()),
|
CollectionMetadata(PointInTimeChunkManager::make(*activeMetadata->getChunkManager(),
|
||||||
|
atClusterTime->asTimestamp()),
|
||||||
activeMetadata->shardId()));
|
activeMetadata->shardId()));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -109,8 +109,7 @@ protected:
|
||||||
{ChunkType{uuid, range, ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}), kOtherShard}});
|
{ChunkType{uuid, range, ChunkVersion({epoch, Timestamp(1, 1)}, {1, 0}), kOtherShard}});
|
||||||
|
|
||||||
return CollectionMetadata(
|
return CollectionMetadata(
|
||||||
ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none),
|
CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))), kThisShard);
|
||||||
kThisShard);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
@ -163,8 +162,7 @@ protected:
|
||||||
splitChunks);
|
splitChunks);
|
||||||
|
|
||||||
return CollectionMetadata(
|
return CollectionMetadata(
|
||||||
ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none),
|
CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))), kThisShard);
|
||||||
kThisShard);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static CollectionMetadata cloneMetadataMinusChunk(const CollectionMetadata& metadata,
|
static CollectionMetadata cloneMetadataMinusChunk(const CollectionMetadata& metadata,
|
||||||
|
|
@ -190,8 +188,7 @@ protected:
|
||||||
{ChunkType(metadata.getUUID(), ChunkRange(minKey, maxKey), chunkVersion, kOtherShard)});
|
{ChunkType(metadata.getUUID(), ChunkRange(minKey, maxKey), chunkVersion, kOtherShard)});
|
||||||
|
|
||||||
return CollectionMetadata(
|
return CollectionMetadata(
|
||||||
ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), boost::none),
|
CurrentChunkManager(makeStandaloneRoutingTableHistory(std::move(rt))), kThisShard);
|
||||||
kThisShard);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::shared_ptr<MetadataManager> _manager;
|
std::shared_ptr<MetadataManager> _manager;
|
||||||
|
|
|
||||||
|
|
@ -141,7 +141,8 @@ protected:
|
||||||
{std::move(chunk)});
|
{std::move(chunk)});
|
||||||
|
|
||||||
return CollectionMetadata(
|
return CollectionMetadata(
|
||||||
ChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)), Timestamp(100, 0)),
|
PointInTimeChunkManager(makeStandaloneRoutingTableHistory(std::move(rt)),
|
||||||
|
Timestamp(100, 0)),
|
||||||
ShardId("this"));
|
ShardId("this"));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -234,8 +234,7 @@ void ShardRoleTest::installShardedCollectionMetadata(
|
||||||
RoutingTableHistoryValueHandle(std::make_shared<RoutingTableHistory>(std::move(rt)),
|
RoutingTableHistoryValueHandle(std::make_shared<RoutingTableHistory>(std::move(rt)),
|
||||||
ComparableChunkVersion::makeComparableChunkVersion(version));
|
ComparableChunkVersion::makeComparableChunkVersion(version));
|
||||||
|
|
||||||
const auto collectionMetadata =
|
const auto collectionMetadata = CollectionMetadata(CurrentChunkManager(rtHandle), kMyShardName);
|
||||||
CollectionMetadata(ChunkManager(rtHandle, boost::none), kMyShardName);
|
|
||||||
|
|
||||||
AutoGetCollection coll(opCtx, nss, MODE_IX);
|
AutoGetCollection coll(opCtx, nss, MODE_IX);
|
||||||
CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(opCtx, nss)
|
CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(opCtx, nss)
|
||||||
|
|
|
||||||
|
|
@ -98,7 +98,7 @@ public:
|
||||||
|
|
||||||
std::set<ShardId> candidateShardIds;
|
std::set<ShardId> candidateShardIds;
|
||||||
if (cri.hasRoutingTable()) {
|
if (cri.hasRoutingTable()) {
|
||||||
cri.getChunkManager().getAllShardIds(&candidateShardIds);
|
cri.getCurrentChunkManager().getAllShardIds(&candidateShardIds);
|
||||||
} else {
|
} else {
|
||||||
candidateShardIds.insert(primaryShardId);
|
candidateShardIds.insert(primaryShardId);
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -160,7 +160,7 @@ public:
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
ChunkManager createChunkManager(const UUID& uuid, const NamespaceString& nss) {
|
CurrentChunkManager createChunkManager(const UUID& uuid, const NamespaceString& nss) {
|
||||||
ShardKeyPattern sk{fromjson("{x: 1, _id: 1}")};
|
ShardKeyPattern sk{fromjson("{x: 1, _id: 1}")};
|
||||||
std::deque<DocumentSource::GetNextResult> configData{
|
std::deque<DocumentSource::GetNextResult> configData{
|
||||||
Document(fromjson("{_id: {x: {$minKey: 1}, _id: {$minKey: 1}}, max: {x: 0.0, _id: "
|
Document(fromjson("{_id: {x: {$minKey: 1}, _id: {$minKey: 1}}, max: {x: 0.0, _id: "
|
||||||
|
|
@ -193,9 +193,8 @@ public:
|
||||||
false,
|
false,
|
||||||
chunks);
|
chunks);
|
||||||
|
|
||||||
return ChunkManager(
|
return CurrentChunkManager(
|
||||||
ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rt)),
|
ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rt)));
|
||||||
boost::none);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
|
|
||||||
|
|
@ -587,7 +587,12 @@ void getShardIdsAndChunksForCanonicalQuery(const CanonicalQuery& query,
|
||||||
// entries if a shard no longer owns chunks when it used to at _clusterTime. Similarly,
|
// entries if a shard no longer owns chunks when it used to at _clusterTime. Similarly,
|
||||||
// this optimization does not apply when it's necessary to fill chunkRanges, as the last
|
// this optimization does not apply when it's necessary to fill chunkRanges, as the last
|
||||||
// chunks can be lost.
|
// chunks can be lost.
|
||||||
if (!cm.isAtPointInTime() && shardIds->size() == cm.getNShardsOwningChunks() && !info) {
|
//
|
||||||
|
// Uses getAproxNShardsOwningChunks() as getNShardsOwningChunks() is only available on
|
||||||
|
// CurrentChunkManager, but both currently share the same implementation.
|
||||||
|
// TODO SERVER-114823 Review the usage of getAproxNShardsOwningChunks here.
|
||||||
|
if (!cm.isAtPointInTime() && shardIds->size() == cm.getAproxNShardsOwningChunks() &&
|
||||||
|
!info) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -110,7 +110,7 @@ struct WriteOpAnalyzerTestImpl : public ShardingTestFixture {
|
||||||
const NamespaceString kUnsplittableNss =
|
const NamespaceString kUnsplittableNss =
|
||||||
NamespaceString::createNamespaceString_forTest("test", "unsplittable");
|
NamespaceString::createNamespaceString_forTest("test", "unsplittable");
|
||||||
|
|
||||||
ChunkManager createChunkManager(
|
CurrentChunkManager createChunkManager(
|
||||||
const UUID& uuid,
|
const UUID& uuid,
|
||||||
const NamespaceString& nss,
|
const NamespaceString& nss,
|
||||||
boost::optional<TypeCollectionTimeseriesFields> timeseriesFields = boost::none,
|
boost::optional<TypeCollectionTimeseriesFields> timeseriesFields = boost::none,
|
||||||
|
|
@ -152,9 +152,8 @@ struct WriteOpAnalyzerTestImpl : public ShardingTestFixture {
|
||||||
false,
|
false,
|
||||||
chunks);
|
chunks);
|
||||||
|
|
||||||
return ChunkManager(
|
return CurrentChunkManager(
|
||||||
ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rt)),
|
ShardingTestFixtureCommon::makeStandaloneRoutingTableHistory(std::move(rt)));
|
||||||
boost::none);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<RoutingContext> createRoutingContextSharded(
|
std::unique_ptr<RoutingContext> createRoutingContextSharded(
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue