mirror of https://github.com/mongodb/mongo
SERVER-113473: Ensure proper management of sharding subsystems during the lifecycle of ReplicationCoordinatorDisagg (#41353)
Co-authored-by: Paolo Polato <paolo.polato@mongodb.com> GitOrigin-RevId: 6f76fd6fd207944d3a65f92946e4484f16283ebf
This commit is contained in:
parent
8c44a56167
commit
856b9e329b
|
|
@ -2156,6 +2156,7 @@ WORKSPACE.bazel @10gen/devprod-build @svc-auto-approve-bot
|
|||
|
||||
# The following patterns are parsed from ./src/mongo/db/modules/atlas/jstests/disagg_storage/OWNERS.yml
|
||||
/src/mongo/db/modules/atlas/jstests/disagg_storage/**/* @10gen/server-disagg-storage @svc-auto-approve-bot
|
||||
/src/mongo/db/modules/atlas/jstests/disagg_storage/**/sharding_basic.js @10gen/server-catalog-and-routing @svc-auto-approve-bot
|
||||
|
||||
# The following patterns are parsed from ./src/mongo/db/modules/atlas/jstests/disagg_storage/encryption/OWNERS.yml
|
||||
/src/mongo/db/modules/atlas/jstests/disagg_storage/encryption/**/* @10gen/server-security @svc-auto-approve-bot
|
||||
|
|
|
|||
|
|
@ -8,9 +8,14 @@ function getOplog(node) {
|
|||
export function getSortedCatalogEntries(node, sortField = "ident") {
|
||||
const adminDB = node.getDB("admin");
|
||||
const isSystemProfile = {"name": "system.profile"};
|
||||
// The collections supporting the query analysis are asynchronously created upon onStepUpComplete() and may not be immediately available.
|
||||
const isQueryAnalysisCollection = {
|
||||
"db": "config",
|
||||
"name": {$in: ["sampledQueries", "sampledQueriesDiff", "analyzeShardKeySplitPoints"]},
|
||||
};
|
||||
const isLocal = {"db": "local"};
|
||||
const match = {$nor: [isSystemProfile, isLocal]};
|
||||
return adminDB.aggregate([{$listCatalog: {}}, {$match: match}, {$sort: {sortField: 1}}]).toArray();
|
||||
const match = {$nor: [isSystemProfile, isLocal, isQueryAnalysisCollection]};
|
||||
return adminDB.aggregate([{$listCatalog: {}}, {$match: match}, {$sort: {[sortField]: 1}}]).toArray();
|
||||
}
|
||||
/**
|
||||
* Given catalog entries for 2 nodes, where catalog entries for both nodes must be sorted by the
|
||||
|
|
@ -25,7 +30,7 @@ export function assertMatchingCatalogIdents(node0CatalogIdents, node1CatalogIden
|
|||
node1CatalogIdents.length,
|
||||
`Expected nodes to have same number of entries. Entries for node0 ${tojson(
|
||||
node0CatalogIdents,
|
||||
)}, entries for node1 ${node1CatalogIdents}`,
|
||||
)}, entries for node1 ${tojson(node1CatalogIdents)}`,
|
||||
);
|
||||
|
||||
const numCatalogEntries = node0CatalogIdents.length;
|
||||
|
|
|
|||
|
|
@ -720,6 +720,11 @@ Milliseconds getMajorityReplicationLag(OperationContext* opCtx) {
|
|||
const auto& replCoord = repl::ReplicationCoordinator::get(opCtx);
|
||||
const auto lastAppliedWallTime = replCoord->getMyLastAppliedOpTimeAndWallTime().wallTime;
|
||||
const auto lastCommittedWallTime = replCoord->getLastCommittedOpTimeAndWallTime().wallTime;
|
||||
// TODO SERVER-113571 Remove this if block and adjust the replication lag calculation (if
|
||||
// needed).
|
||||
if (lastCommittedWallTime == Date_t()) {
|
||||
return Milliseconds(0);
|
||||
}
|
||||
|
||||
if (!lastAppliedWallTime.isFormattable() || !lastCommittedWallTime.isFormattable()) {
|
||||
return Milliseconds(0);
|
||||
|
|
|
|||
|
|
@ -834,6 +834,7 @@ void ShardingInitializationMongoD::onStepUpComplete(OperationContext* opCtx, lon
|
|||
}
|
||||
|
||||
void ShardingInitializationMongoD::onStepDown() {
|
||||
// TODO (SERVER-113612): remove cc() usage.
|
||||
auto opCtx = cc().getOperationContext();
|
||||
|
||||
if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) {
|
||||
|
|
|
|||
Loading…
Reference in New Issue