mirror of https://github.com/mongodb/mongo
SERVER-113590 Test for availability canaries under several transitions (#43701)
GitOrigin-RevId: 80b4bceec0ae93de926225a7670585148f4755f4
This commit is contained in:
parent
1b93592ccc
commit
e3db61bf18
|
|
@ -1591,6 +1591,7 @@ WORKSPACE.bazel @10gen/devprod-build @svc-auto-approve-bot
|
|||
/jstests/sharding/**/safe_secondary_reads_causal_consistency.js @10gen/server-cluster-scalability @svc-auto-approve-bot
|
||||
/jstests/sharding/**/remove_shard_*.js @10gen/server-catalog-and-routing-routing-and-topology @svc-auto-approve-bot
|
||||
/jstests/sharding/**/reporting_compound_wildcard_index_prefixed_by_shard_key.js @10gen/server-catalog-and-routing-ddl @svc-auto-approve-bot
|
||||
/jstests/sharding/**/availability_canaries_under_transitions.js @10gen/server-catalog-and-routing @svc-auto-approve-bot
|
||||
|
||||
# The following patterns are parsed from ./jstests/sharding/analyze_shard_key/OWNERS.yml
|
||||
/jstests/sharding/analyze_shard_key/**/* @10gen/server-cluster-scalability @svc-auto-approve-bot
|
||||
|
|
|
|||
|
|
@ -136,3 +136,6 @@ filters:
|
|||
- "reporting_compound_wildcard_index_prefixed_by_shard_key.js":
|
||||
approvers:
|
||||
- 10gen/server-catalog-and-routing-ddl
|
||||
- "availability_canaries_under_transitions.js":
|
||||
approvers:
|
||||
- 10gen/server-catalog-and-routing
|
||||
|
|
|
|||
|
|
@ -0,0 +1,106 @@
|
|||
/**
|
||||
* Test that the availability canaries writing to the config database don't cause issues during various transitions:
|
||||
* to dedicated, to embedded, adding or removing shards.
|
||||
*
|
||||
* @tags: [
|
||||
* requires_fcv_83
|
||||
* ]
|
||||
*/
|
||||
|
||||
import {ReplSetTest} from "jstests/libs/replsettest.js";
|
||||
import {ShardingTest} from "jstests/libs/shardingtest.js";
|
||||
|
||||
jsTest.log.info("Create sharded cluster 'st' with embedded config server");
|
||||
let st = new ShardingTest({
|
||||
name: "st",
|
||||
shards: 2,
|
||||
useHostname: false,
|
||||
other: {
|
||||
enableBalancer: true,
|
||||
configShard: true,
|
||||
rsOptions: {setParameter: {orphanCleanupDelaySecs: 0}},
|
||||
},
|
||||
});
|
||||
|
||||
// Use retryWrites when writing to the configsvr because mongos does not automatically retry those.
|
||||
const mongosSession = st.s.startSession({retryWrites: true});
|
||||
const configDB = mongosSession.getDatabase("config");
|
||||
const adminDB = mongosSession.getDatabase("admin");
|
||||
|
||||
// Verifies that config.availability doesn't get registered on the global catalog, that data is not moved between
|
||||
// shards and the expected number of shards
|
||||
const verify = (expectedNumberOfShards, dbsToCheck = []) => {
|
||||
const existingShards = configDB.shards.find({}).toArray();
|
||||
assert.eq(expectedNumberOfShards, existingShards.length, "Unexpected number of shards: " + tojson(existingShards));
|
||||
|
||||
assert.eq(0, configDB.collections.find({_id: "config.availability"}).itcount());
|
||||
for (let [shardName, db] of dbsToCheck) {
|
||||
assert.eq([{_id: shardName}], db.availability.find().toArray());
|
||||
}
|
||||
};
|
||||
|
||||
// Verify we have 2 shards initially, no data yet.
|
||||
verify(2);
|
||||
|
||||
assert.commandWorked(st.shard1.getDB("config").availability.insert({_id: "shard1"}));
|
||||
assert.commandWorked(st.configRS.getPrimary().getDB("config").availability.insert({_id: "config"}));
|
||||
|
||||
jsTest.log.info("Start transition to dedicated config server");
|
||||
assert.commandWorked(adminDB.runCommand({startTransitionToDedicatedConfigServer: 1}));
|
||||
st.configRS.awaitReplication();
|
||||
|
||||
jsTest.log.info("Wait for draining to complete");
|
||||
assert.soon(() => {
|
||||
const drainingStatus = adminDB.runCommand({getTransitionToDedicatedConfigServerStatus: 1});
|
||||
assert.commandWorked(drainingStatus);
|
||||
return "drainingComplete" == drainingStatus.state;
|
||||
}, "getTransitionToDedicatedConfigServerStatus did not return 'drainingComplete' status within the timeout");
|
||||
|
||||
jsTest.log.info("Commit transition to dedicated config server");
|
||||
assert.commandWorked(adminDB.runCommand({commitTransitionToDedicatedConfigServer: 1}));
|
||||
|
||||
let dbsToCheck = [
|
||||
["shard1", st.shard1.getDB("config")],
|
||||
["config", st.configRS.getPrimary().getDB("config")],
|
||||
];
|
||||
|
||||
// Verify after transition to dedicated, only 1 shard
|
||||
verify(1, dbsToCheck);
|
||||
|
||||
// Create a shard and add to the cluster
|
||||
const rs1 = new ReplSetTest({name: "addShard", host: "localhost", nodes: 1});
|
||||
rs1.startSet({shardsvr: ""});
|
||||
rs1.initiate();
|
||||
|
||||
const newShard = "myShard";
|
||||
assert.commandWorked(rs1.getPrimary().getDB("config").availability.insert({_id: newShard}));
|
||||
assert.eq(1, rs1.getPrimary().getDB("config").availability.find({_id: newShard}).itcount());
|
||||
|
||||
jsTest.log.info("Add new shard with existing data in config.availability");
|
||||
assert.commandWorked(adminDB.runCommand({addShard: rs1.getURL(), name: newShard}));
|
||||
|
||||
const newShardDoc = configDB.shards.findOne({_id: newShard});
|
||||
assert(newShardDoc.topologyTime instanceof Timestamp);
|
||||
|
||||
// Reverify after shard has been added
|
||||
dbsToCheck.push([newShard, rs1.getPrimary().getDB("config")]);
|
||||
verify(2, dbsToCheck);
|
||||
|
||||
// Remove older shard
|
||||
assert.soon(() => {
|
||||
let removeResult = assert.commandWorked(adminDB.runCommand({removeShard: st.shard1.shardName}));
|
||||
return "completed" == removeResult.state;
|
||||
}, "Failed to remove shard in time");
|
||||
|
||||
// Reverify after shard has been removed
|
||||
dbsToCheck = dbsToCheck.filter(([name]) => name !== "shard1");
|
||||
verify(1, dbsToCheck);
|
||||
|
||||
// Transition to embedded config server
|
||||
assert.commandWorked(adminDB.runCommand({transitionFromDedicatedConfigServer: 1}));
|
||||
|
||||
// Reverify after transition to embedded
|
||||
verify(2, dbsToCheck);
|
||||
|
||||
st.stop();
|
||||
rs1.stopSet();
|
||||
Loading…
Reference in New Issue