SERVER-84548: Re-add transition to catalog shard feature flag and use

CSCCL.
This commit is contained in:
Kshitij Gupta 2024-02-05 20:27:36 +00:00 committed by Evergreen Agent
parent 4c279dae59
commit 5f15dd43c6
37 changed files with 288 additions and 52 deletions

View File

@ -367,6 +367,7 @@ class ShardedClusterFixture(interface.Fixture, interface._DockerComposeInterface
del mongod_options["shardsvr"]
mongod_options["configsvr"] = ""
replset_config_options["configsvr"] = True
mongod_options["set_parameters"]["featureFlagTransitionToCatalogShard"] = "true"
mongod_options["storageEngine"] = "wiredTiger"
configsvr_options = self.configsvr_options.copy()
@ -415,6 +416,8 @@ class ShardedClusterFixture(interface.Fixture, interface._DockerComposeInterface
"""Return options that may be passed to a mongos."""
mongos_options = self.mongos_options.copy()
mongos_options["configdb"] = self.configsvr.get_internal_connection_string()
if self.config_shard is not None:
mongos_options["set_parameters"]["featureFlagTransitionToCatalogShard"] = "true"
mongos_options["set_parameters"] = mongos_options.get("set_parameters",
self.fixturelib.make_historic(
{})).copy()

View File

@ -547,6 +547,12 @@ last-continuous:
ticket: SERVER-83119
- test_file: jstests/sharding/eof_plan.js
ticket: SERVER-83119
- test_file: jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster_config_shard.js
ticket: SERVER-84548
- test_file: src/mongo/db/modules/enterprise/jstests/hot_backups/multiVersion/sharded_last_cont_pit_backup_restore_simple_config_shard.js
ticket: SERVER-84548
- test_file: src/mongo/db/modules/enterprise/jstests/hot_backups/multiVersion/sharded_last_cont_backup_restore_simple_config_shard.js
ticket: SERVER-84548
suites: null
last-lts:
all:
@ -1134,4 +1140,10 @@ last-lts:
ticket: SERVER-83119
- test_file: jstests/sharding/eof_plan.js
ticket: SERVER-83119
- test_file: jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster_config_shard.js
ticket: SERVER-84548
- test_file: src/mongo/db/modules/enterprise/jstests/hot_backups/multiVersion/sharded_last_cont_pit_backup_restore_simple_config_shard.js
ticket: SERVER-84548
- test_file: src/mongo/db/modules/enterprise/jstests/hot_backups/multiVersion/sharded_last_cont_backup_restore_simple_config_shard.js
ticket: SERVER-84548
suites: null

View File

@ -60,7 +60,8 @@ include:
- filename: etc/evergreen_yml_components/variants/in_memory.yml
- filename: etc/evergreen_yml_components/variants/ninja.yml
- filename: etc/evergreen_yml_components/variants/compile_static_analysis.yml
- filename: etc/evergreen_yml_components/variants/config_shard.yml
# TODO: (SERVER-86211) Re-enable config shard evergreen variant.
# - filename: etc/evergreen_yml_components/variants/config_shard.yml
- filename: etc/evergreen_yml_components/perf_tasks.yml
variables:

View File

@ -17,7 +17,8 @@ include:
### Uncomment when using this file for a LTS or Rapid release branch. ###
- filename: etc/evergreen_yml_components/variants/classic_engine.yml
### Uncomment when using this file for a LTS or Rapid release branch. ###
- filename: etc/evergreen_yml_components/variants/config_shard.yml
# TODO: (SERVER-86211) Re-enable config shard evergreen variant.
# - filename: etc/evergreen_yml_components/variants/config_shard.yml
- filename: etc/evergreen_yml_components/variants/compile_static_analysis.yml

View File

@ -463,6 +463,9 @@ export const authCommandsLib = {
testname: 'transitionFromDedicatedConfigServer',
command: {transitionFromDedicatedConfigServer: 1},
skipUnlessSharded: true,
skipTest: (conn) => {
return !TestData.setParameters.featureFlagTransitionToCatalogShard;
},
testcases: [
{
runOnDb: adminDbName,
@ -477,6 +480,9 @@ export const authCommandsLib = {
testname: "_configsvrTransitionFromDedicatedConfigServer",
command: {_configsvrTransitionFromDedicatedConfigServer: 1},
skipSharded: true,
skipTest: (conn) => {
return !TestData.setParameters.featureFlagTransitionToCatalogShard;
},
testcases: [
{
runOnDb: adminDbName,
@ -492,6 +498,9 @@ export const authCommandsLib = {
testname: "transitionToDedicatedConfigServer",
command: { transitionToDedicatedConfigServer: 1 },
skipUnlessSharded: true,
skipTest: (conn) => {
return !TestData.setParameters.featureFlagTransitionToCatalogShard;
},
testcases: [
{
runOnDb: adminDbName,
@ -507,6 +516,9 @@ export const authCommandsLib = {
testname: "_configsvrTransitionToDedicatedConfigServer",
command: {_configsvrTransitionToDedicatedConfigServer: 1},
skipSharded: true,
skipTest: (conn) => {
return !TestData.setParameters.featureFlagTransitionToCatalogShard;
},
testcases: [
{
runOnDb: adminDbName,

View File

@ -4,6 +4,13 @@
import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
export const ConfigShardUtil = (function() {
function isTransitionEnabledIgnoringFCV(st) {
return FeatureFlagUtil.isEnabled(st.configRS.getPrimary(),
"TransitionToCatalogShard",
undefined /* user */,
true /* ignoreFCV */);
}
function transitionToDedicatedConfigServer(st, timeout) {
if (timeout == undefined) {
timeout = 10 * 60 * 1000; // 10 minutes
@ -39,6 +46,7 @@ export const ConfigShardUtil = (function() {
}
return {
isTransitionEnabledIgnoringFCV,
transitionToDedicatedConfigServer,
waitForRangeDeletions,
};

View File

@ -20,7 +20,7 @@ import {awaitRSClientHosts} from "jstests/replsets/rslib.js";
TestData.skipCheckingUUIDsConsistentAcrossCluster = true;
// Test upgrade/downgrade between "latest" and "last-lts"/"last-continuous".
for (let oldVersion of ["last-lts", "last-continuous"]) {
for (let oldVersion of ["last-lts"]) {
var st = new ShardingTest({
shards: 2,
mongos: 1,

View File

@ -10,6 +10,7 @@
* @tags: [
* multiversion_incompatible,
* requires_replication,
* featureFlagTransitionToCatalogShard,
* ]
* */

View File

@ -3,6 +3,7 @@
*
* @tags: [
* requires_fcv_70,
* featureFlagTransitionToCatalogShard,
* ]
*/

View File

@ -3,6 +3,7 @@
*
* @tags: [
* requires_fcv_70,
* featureFlagTransitionToCatalogShard,
* ]
*/
import {moveChunkParallel} from "jstests/libs/chunk_manipulation_util.js";

View File

@ -3,6 +3,7 @@
*
* @tags: [
* requires_fcv_70,
* featureFlagTransitionToCatalogShard,
* ]
*/

View File

@ -1,5 +1,7 @@
// Various tests of the ability to establish a cursor on each mongod in a sharded cluster.
import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
function listMongodStats(db) {
return db.getSiblingDB("admin").aggregate([{$_internalShardServerInfo: {}}]).toArray();
}
@ -126,12 +128,15 @@ const st = new ShardingTest({
// This one has always worked fine.
let results = listMongodStats(st.s.getDB(jsTestName()));
assert.eq(4, results.length, results);
assert.commandWorked(st.s.getDB("admin").runCommand({transitionFromDedicatedConfigServer: 1}));
// After the above command, this once tripped an invariant failure: SERVER-79372.
results = listMongodStats(st.s.getDB(jsTestName()));
// Assert there are results from all hosts.
assert.eq(5, results.length, results);
if (FeatureFlagUtil.isPresentAndEnabled(st.s0, "FeatureFlagTransitionToCatalogShard")) {
assert.commandWorked(st.s.getDB("admin").runCommand({transitionFromDedicatedConfigServer: 1}));
// After the above command, this once tripped an invariant failure: SERVER-79372.
results = listMongodStats(st.s.getDB(jsTestName()));
// Assert there are results from all hosts.
assert.eq(5, results.length, results);
}
st.stop();
}());

View File

@ -90,11 +90,13 @@ MongoRunner.stopMongod(shardSecondary);
shardSecondary = MongoRunner.runMongod({restart: shardSecondary});
assert.soon(() => isShardingReady(shardSecondary));
// Transition the config shard to a dedicated config server and ensure that ShardingReady is still
// set on the config server primary and secondary.
assert.commandWorked(mongos.adminCommand({transitionToDedicatedConfigServer: 1}));
assert.soon(() => isShardingReady(configPrimary));
assert.soon(() => isShardingReady(configSecondary));
if (TestData.configShard) {
// Transition the config shard to a dedicated config server and ensure that ShardingReady is
// still set on the config server primary and secondary.
assert.commandWorked(mongos.adminCommand({transitionToDedicatedConfigServer: 1}));
assert.soon(() => isShardingReady(configPrimary));
assert.soon(() => isShardingReady(configSecondary));
}
// Restart the dedicated config server primary and secondaries and ensure that they still have
// ShardingReady set.

View File

@ -0,0 +1,30 @@
/**
* Verifies the transitionFromDedicatedConfigServer feature flag guards running the config shard
* transition commands.
*
* @tags: [requires_fcv_70]
*/
delete TestData.setParameters.featureFlagTransitionToCatalogShard;
delete TestData.setParametersMongos.featureFlagTransitionToCatalogShard;
const st = new ShardingTest({
mongos: 1,
mongosOptions: {setParameter: {featureFlagTransitionToCatalogShard: false}},
config: 1,
configOptions: {setParameter: {featureFlagTransitionToCatalogShard: false}},
shards: 1,
rs: {nodes: 1},
rsOptions: {setParameter: {featureFlagTransitionToCatalogShard: false}},
});
// None of the transition commands can be run on mongos or the config server.
assert.commandFailedWithCode(st.s.adminCommand({transitionFromDedicatedConfigServer: 1}), 8454804);
assert.commandFailedWithCode(st.s.adminCommand({transitionToDedicatedConfigServer: 1}), 7368401);
const configPrimary = st.configRS.getPrimary();
assert.commandFailedWithCode(
configPrimary.adminCommand({_configsvrTransitionFromDedicatedConfigServer: 1}), 8454803);
assert.commandFailedWithCode(
configPrimary.adminCommand({_configsvrTransitionToDedicatedConfigServer: 1}), 7368402);
st.stop();

View File

@ -4,7 +4,7 @@
* server is also a shard server, we want to make sure that we can't use the localhost exception to
* create two users.
*
* @tags: [requires_fcv_70]
* @tags: [requires_fcv_70, featureFlagTransitionToCatalogShard]
*/
// Test that we can't create a shard specific user on the config shard if we already created a
// cluster wide user using the localhost exception.

View File

@ -2,7 +2,7 @@
* Tests to make sure that the mongos does not allow certain commands on the config and admin
* databases when configShard is enabled.
*
* @tags: [requires_fcv_70]
* @tags: [requires_fcv_70, featureFlagTransitionToCatalogShard]
*/
var st = new ShardingTest({mongos: 1, shards: 1, config: 1, configShard: true});

View File

@ -111,8 +111,12 @@ if (isShardSvrRst) {
const cfg = rst.getReplSetConfigFromNode();
cfg["configsvr"] = true;
reconfig(rst, cfg);
rst.upgradeSet(Object.assign(
{configsvr: "", setParameter: {skipShardingConfigurationChecks: false}}, upgradeOpts));
rst.upgradeSet(Object.assign({
configsvr: "",
setParameter:
{skipShardingConfigurationChecks: false, featureFlagTransitionToCatalogShard: true}
},
upgradeOpts));
}, tmpTestData);
}
@ -165,7 +169,11 @@ if (isShardSvrRst) {
});
}
mongos = MongoRunner.runMongos({configdb: rst.getURL(), keyFile});
mongos = MongoRunner.runMongos({
configdb: rst.getURL(),
keyFile,
setParameter: "featureFlagTransitionToCatalogShard=true",
});
authutil.asCluster(mongos, keyFile, () => {
assert.commandWorked(mongos.adminCommand({transitionFromDedicatedConfigServer: 1}));
});

View File

@ -5,13 +5,11 @@
* uses_parallel_shell,
* ]
*/
import {ConfigShardUtil} from "jstests/libs/config_shard_util.js";
const dbName = "test";
const collName = "collTest";
const ns = dbName + "." + collName;
const st =
new ShardingTest({shards: 2, mongos: 1, config: 1, configShard: true, enableBalancer: true});
const st = new ShardingTest({shards: 2, mongos: 1, config: 1, enableBalancer: true});
const adminDB = st.s.getDB('admin');
const distributed_txn_insert_count = 10;
@ -115,10 +113,4 @@ assert.eq(ret.errmsg.includes(errmsg), true);
performFsyncLockUnlockWithReadWriteOperations();
// Make sure the lock and unlock commands still work as expected after transitioning to a dedicated
// config server.
st.s.adminCommand({movePrimary: dbName, to: st.shard1.shardName});
ConfigShardUtil.transitionToDedicatedConfigServer(st);
performFsyncLockUnlockWithReadWriteOperations();
st.stop();

View File

@ -4,6 +4,7 @@
*
* @tags: [
* requires_fcv_70,
* featureFlagTransitionToCatalogShard,
* ]
*/
const st = new ShardingTest({shards: 2, configShard: true});

View File

@ -1458,6 +1458,8 @@ export let MongosAPIParametersUtil = (function() {
assert.commandWorked(st.rs0.getPrimary().adminCommand({serverStatus: 1}))
.storageEngine.supportsCommittedReads;
const isConfigShardEnabled = ConfigShardUtil.isTransitionEnabledIgnoringFCV(st);
(() => {
// Validate test cases for all commands. Ensure there is at least one test case for every
// mongos command, and that the test cases are well formed.
@ -1572,6 +1574,9 @@ export let MongosAPIParametersUtil = (function() {
if (!supportsCommittedReads && runOrExplain.requiresCommittedReads)
continue;
if (!isConfigShardEnabled && runOrExplain.requiresCatalogShardEnabled)
continue;
if (apiParameters.apiStrict && !runOrExplain.inAPIVersion1)
continue;

View File

@ -11,8 +11,16 @@ import {ShardingStateTest} from "jstests/sharding/libs/sharding_state_test.js";
const st = new ShardingTest({config: 1, shards: {rs0: {nodes: 1}}});
const rs = st.rs0;
const serverTypeFlag = TestData.configShard ? "configsvr" : "shardsvr";
const newNode = ShardingStateTest.addReplSetNode({replSet: rs, serverTypeFlag});
let newNode;
if (TestData.configShard) {
newNode = ShardingStateTest.addReplSetNode({
replSet: rs,
serverTypeFlag: "configsvr",
newNodeParams: "featureFlagTransitionToCatalogShard=true"
});
} else {
newNode = ShardingStateTest.addReplSetNode({replSet: rs, serverTypeFlag: "shardsvr"});
}
jsTestLog("Checking sharding state before failover.");
ShardingStateTest.checkShardingState(st);

View File

@ -14,7 +14,16 @@ const st = new ShardingTest({
});
const configRS = st.configRS;
const newNode = ShardingStateTest.addReplSetNode({replSet: configRS, serverTypeFlag: "configsvr"});
let newNode;
if (TestData.configShard) {
newNode = ShardingStateTest.addReplSetNode({
replSet: configRS,
serverTypeFlag: "configsvr",
newNodeParams: "featureFlagTransitionToCatalogShard=true"
});
} else {
newNode = ShardingStateTest.addReplSetNode({replSet: configRS, serverTypeFlag: "configsvr"});
}
jsTestLog("Checking sharding state before failover.");
ShardingStateTest.checkShardingState(st);

View File

@ -355,8 +355,11 @@ if (!TestData.configShard) {
cfg.configsvr = true;
reconfig(configShard, cfg);
configShard.restart(
0, {configsvr: '', setParameter: {skipShardingConfigurationChecks: false}});
configShard.restart(0, {
configsvr: '',
setParameter:
{skipShardingConfigurationChecks: false, featureFlagTransitionToCatalogShard: true}
});
configShard.awaitNodesAgreeOnPrimary();
// Cluster params should still exist.
@ -371,7 +374,10 @@ if (!TestData.configShard) {
assert.commandWorked(configShard.getPrimary().adminCommand(
{setFeatureCompatibilityVersion: targetFCV, confirm: true}));
}
var mongos = MongoRunner.runMongos({configdb: configShard.getURL()});
var mongos = MongoRunner.runMongos({
configdb: configShard.getURL(),
setParameter: "featureFlagTransitionToCatalogShard=true",
});
assert.commandWorked(mongos.adminCommand({transitionFromDedicatedConfigServer: 1}));
checkClusterParameters(clusterParameter2Name,
clusterParameter2Value,

View File

@ -65,7 +65,7 @@ const rs = s.rs0;
if (!TestData.configShard) {
rs.add({'shardsvr': ""});
} else {
rs.add({'configsvr': ""});
rs.add({'configsvr': "", 'setParameter': "featureFlagTransitionToCatalogShard=true"});
}
try {

View File

@ -60,6 +60,7 @@ env.Library(
target='server_base',
source=[
'basic_types.idl',
'catalog_shard_feature_flag.idl',
'cluster_role.cpp',
'feature_compatibility_version_document.idl',
'feature_compatibility_version_parser.cpp',

View File

@ -0,0 +1,37 @@
# Copyright (C) 2022-present MongoDB, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the Server Side Public License, version 1,
# as published by MongoDB, Inc.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Server Side Public License for more details.
#
# You should have received a copy of the Server Side Public License
# along with this program. If not, see
# <http://www.mongodb.com/licensing/server-side-public-license>.
#
# As a special exception, the copyright holders give permission to link the
# code of portions of this program with the OpenSSL library under certain
# conditions as described in each individual source file and distribute
# linked combinations including the program with the OpenSSL library. You
# must comply with the Server Side Public License in all respects for
# all of the code used other than as permitted herein. If you modify file(s)
# with this exception, you may extend this exception to your version of the
# file(s), but you are not obligated to do so. If you do not wish to do so,
# delete this exception statement from your version. If you delete this
# exception statement from all source files in the program, then also delete
# it in the license file.
#
global:
cpp_namespace: "mongo"
feature_flags:
featureFlagTransitionToCatalogShard:
description: "Feature flag for transitioning a config server in and out of config shard mode"
cpp_varname: gFeatureFlagTransitionToCatalogShard
default: false
shouldBeFCVGated: false

View File

@ -52,6 +52,7 @@
#include "mongo/db/catalog/drop_collection.h"
#include "mongo/db/catalog/local_oplog_info.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/catalog_shard_feature_flag_gen.h"
#include "mongo/db/change_stream_pre_images_collection_manager.h"
#include "mongo/db/change_stream_serverless_helpers.h"
#include "mongo/db/client.h"
@ -884,11 +885,13 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnStepDownHook() {
TransactionCoordinatorService::get(_service)->onStepDown();
}
if (ShardingState::get(_service)->enabled()) {
CatalogCacheLoader::get(_service).onStepDown();
if (!serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) {
// Called earlier for config servers.
TransactionCoordinatorService::get(_service)->onStepDown();
CatalogCacheLoader::get(_service).onStepDown();
// (Ignore FCV check): TODO(SERVER-75389): add why FCV is ignored here.
} else if (gFeatureFlagTransitionToCatalogShard.isEnabledAndIgnoreFCVUnsafe()) {
CatalogCacheLoader::get(_service).onStepDown();
}
}
@ -989,17 +992,19 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook
PeriodicShardedIndexConsistencyChecker::get(_service).onStepUp(_service);
TransactionCoordinatorService::get(_service)->onStepUp(opCtx);
CatalogCacheLoader::get(_service).onStepUp();
// (Ignore FCV check): TODO(SERVER-75389): add why FCV is ignored here.
if (gFeatureFlagTransitionToCatalogShard.isEnabledAndIgnoreFCVUnsafe()) {
CatalogCacheLoader::get(_service).onStepUp();
}
}
if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) {
if (ShardingState::get(opCtx)->enabled()) {
VectorClockMutable::get(opCtx)->recoverDirect(opCtx);
CatalogCacheLoader::get(_service).onStepUp();
if (!serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) {
// Called earlier for config servers.
TransactionCoordinatorService::get(_service)->onStepUp(opCtx);
CatalogCacheLoader::get(_service).onStepUp();
}
const auto configsvrConnStr =

View File

@ -36,6 +36,7 @@
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/catalog_shard_feature_flag_gen.h"
#include "mongo/db/cluster_role.h"
#include "mongo/db/commands.h"
#include "mongo/db/database_name.h"
@ -87,6 +88,10 @@ public:
using InvocationBase::InvocationBase;
void typedRun(OperationContext* opCtx) {
// (Ignore FCV check): TODO(SERVER-75389): add why FCV is ignored here.
uassert(8454803,
"The transition to config shard feature is disabled",
gFeatureFlagTransitionToCatalogShard.isEnabledAndIgnoreFCVUnsafe());
uassert(
ErrorCodes::IllegalOperation,
"_configsvrTransitionFromDedicatedConfigServer can only be run on config servers",

View File

@ -39,6 +39,7 @@
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/catalog_shard_feature_flag_gen.h"
#include "mongo/db/cluster_role.h"
#include "mongo/db/commands.h"
#include "mongo/db/database_name.h"
@ -112,6 +113,11 @@ public:
const DatabaseName&,
const BSONObj& cmdObj,
BSONObjBuilder& result) override {
// (Ignore FCV check): TODO(SERVER-75389): add why FCV is ignored here.
uassert(7368402,
"The transition to config shard feature is disabled",
gFeatureFlagTransitionToCatalogShard.isEnabledAndIgnoreFCVUnsafe());
uassert(ErrorCodes::IllegalOperation,
"_configsvrTransitionToDedicatedConfigServer can only be run on config servers",
serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer));

View File

@ -43,6 +43,7 @@
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/catalog_shard_feature_flag_gen.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/concurrency/lock_manager_defs.h"
@ -212,6 +213,18 @@ public:
uassertStatusOK(onDbVersionMismatchNoExcept(opCtx, dbName, boost::none));
}
// A config server could receive this command even if not in config shard mode if the CS
// secondary is on an older binary version running a ShardServerCatalogCacheLoader. In
// that case we don't want to hit the MONGO_UNREACHABLE in
// ConfigServerCatalogCacheLoader::waitForDatabaseFlush() but throw an error instead so
// that the secondaries know they don't have updated metadata yet.
// (Ignore FCV check): TODO(SERVER-75389): add why FCV is ignored here.
if (!gFeatureFlagTransitionToCatalogShard.isEnabledAndIgnoreFCVUnsafe() &&
serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) {
uasserted(8454801, "config server is not storing cached metadata");
}
CatalogCacheLoader::get(opCtx).waitForDatabaseFlush(opCtx, dbName);
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);

View File

@ -38,6 +38,7 @@
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/catalog_shard_feature_flag_gen.h"
#include "mongo/db/client.h"
#include "mongo/db/commands.h"
#include "mongo/db/concurrency/d_concurrency.h"
@ -157,6 +158,18 @@ public:
onCollectionPlacementVersionMismatch(opCtx, ns(), boost::none);
}
// A config server could receive this command even if not in config shard mode if the CS
// secondary is on an older binary version running a ShardServerCatalogCacheLoader. In
// that case we don't want to hit the MONGO_UNREACHABLE in
// ConfigServerCatalogCacheLoader::waitForCollectionFlush() but throw an error instead
// so that the secondaries know they don't have updated metadata yet.
// (Ignore FCV check): TODO(SERVER-75389): add why FCV is ignored here.
if (!gFeatureFlagTransitionToCatalogShard.isEnabledAndIgnoreFCVUnsafe() &&
serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) {
uasserted(8454802, "config server is not storing cached metadata");
}
CatalogCacheLoader::get(opCtx).waitForCollectionFlush(opCtx, ns());
repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx);

View File

@ -53,6 +53,7 @@
#include "mongo/client/replica_set_monitor.h"
#include "mongo/db/audit.h"
#include "mongo/db/catalog_raii.h"
#include "mongo/db/catalog_shard_feature_flag_gen.h"
#include "mongo/db/client.h"
#include "mongo/db/client_metadata_propagation_egress_hook.h"
#include "mongo/db/cluster_role.h"
@ -613,18 +614,23 @@ void initializeGlobalShardingStateForConfigServerIfNeeded(OperationContext* opCt
return boost::none;
}();
CatalogCacheLoader::set(service,
std::make_unique<ShardServerCatalogCacheLoader>(
std::make_unique<ConfigServerCatalogCacheLoader>()));
// This is only called in startup when there shouldn't be replication state changes, but to
// be safe we take the RSTL anyway.
repl::ReplicationStateTransitionLockGuard rstl(opCtx, MODE_IX);
const auto replCoord = repl::ReplicationCoordinator::get(opCtx);
bool isReplSet = replCoord->getSettings().isReplSet();
bool isStandaloneOrPrimary =
!isReplSet || (replCoord->getMemberState() == repl::MemberState::RS_PRIMARY);
CatalogCacheLoader::get(opCtx).initializeReplicaSetRole(isStandaloneOrPrimary);
if (gFeatureFlagTransitionToCatalogShard.isEnabledAndIgnoreFCVUnsafeAtStartup()) {
CatalogCacheLoader::set(service,
std::make_unique<ShardServerCatalogCacheLoader>(
std::make_unique<ConfigServerCatalogCacheLoader>()));
// This is only called in startup when there shouldn't be replication state changes, but to
// be safe we take the RSTL anyway.
repl::ReplicationStateTransitionLockGuard rstl(opCtx, MODE_IX);
const auto replCoord = repl::ReplicationCoordinator::get(opCtx);
bool isReplSet = replCoord->getSettings().isReplSet();
bool isStandaloneOrPrimary =
!isReplSet || (replCoord->getMemberState() == repl::MemberState::RS_PRIMARY);
CatalogCacheLoader::get(opCtx).initializeReplicaSetRole(isStandaloneOrPrimary);
} else {
CatalogCacheLoader::set(service, std::make_unique<ConfigServerCatalogCacheLoader>());
}
initializeGlobalShardingStateForMongoD(opCtx, configCS);

View File

@ -35,6 +35,7 @@
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/catalog_shard_feature_flag_gen.h"
#include "mongo/db/commands.h"
#include "mongo/db/database_name.h"
#include "mongo/db/feature_flag.h"
@ -78,6 +79,10 @@ public:
using InvocationBase::InvocationBase;
void typedRun(OperationContext* opCtx) {
// (Ignore FCV check): TODO(SERVER-75389): add why FCV is ignored here.
uassert(8454804,
"The transition to config shard feature is disabled",
gFeatureFlagTransitionToCatalogShard.isEnabledAndIgnoreFCVUnsafe());
ConfigsvrTransitionFromDedicatedConfigServer cmdToSend;
cmdToSend.setDbName(DatabaseName::kAdmin);

View File

@ -40,6 +40,7 @@
#include "mongo/db/auth/action_type.h"
#include "mongo/db/auth/authorization_session.h"
#include "mongo/db/auth/resource_pattern.h"
#include "mongo/db/catalog_shard_feature_flag_gen.h"
#include "mongo/db/commands.h"
#include "mongo/db/database_name.h"
#include "mongo/db/feature_flag.h"
@ -96,6 +97,11 @@ public:
const DatabaseName&,
const BSONObj& cmdObj,
BSONObjBuilder& result) override {
// (Ignore FCV check): TODO(SERVER-75389): add why FCV is ignored here.
uassert(7368401,
"The transition to config shard feature is disabled",
gFeatureFlagTransitionToCatalogShard.isEnabledAndIgnoreFCVUnsafe());
auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard();
ConfigsvrTransitionToDedicatedConfig transitionToDedicatedConfigServer;

View File

@ -118,7 +118,7 @@ void ConfigServerCatalogCacheLoader::onStepUp() {
}
void ConfigServerCatalogCacheLoader::onReplicationRollback() {
MONGO_UNREACHABLE;
// no-op
}
void ConfigServerCatalogCacheLoader::shutDown() {
@ -128,7 +128,7 @@ void ConfigServerCatalogCacheLoader::shutDown() {
void ConfigServerCatalogCacheLoader::notifyOfCollectionRefreshEndMarkerSeen(
const NamespaceString& nss, const Timestamp& commitTime) {
MONGO_UNREACHABLE;
// no-op
}
void ConfigServerCatalogCacheLoader::waitForCollectionFlush(OperationContext* opCtx,
@ -143,6 +143,13 @@ void ConfigServerCatalogCacheLoader::waitForDatabaseFlush(OperationContext* opCt
SemiFuture<CollectionAndChangedChunks> ConfigServerCatalogCacheLoader::getChunksSince(
const NamespaceString& nss, ChunkVersion version) {
// There's no need to refresh if a collection is always unsharded. Further, attempting to
// refresh config.collections or config.chunks would trigger recursive refreshes since a config
// shard can use the shard svr process interface.
if (nss.isNamespaceAlwaysUntracked()) {
return Status(ErrorCodes::NamespaceNotFound,
str::stream() << "Collection " << nss.toStringForErrorMsg() << " not found");
}
return ExecutorFuture<void>(_executor)
.then([=]() {

View File

@ -3277,6 +3277,12 @@ var ReplSetTest = function ReplSetTest(opts) {
// We need to recalculate the DWC after each reconfig until the full set is included.
options.setParameter.enableDefaultWriteConcernUpdatesForInitiate = true;
if (baseOptions.hasOwnProperty("setParameter") &&
baseOptions.setParameter.hasOwnProperty("featureFlagTransitionToCatalogShard") &&
baseOptions.setParameter.featureFlagTransitionToCatalogShard) {
options.setParameter.featureFlagTransitionToCatalogShard = true;
}
// Disable a check in reconfig that will prevent certain configs with arbiters from
// spinning up. We will re-enable this check after the replica set has finished initiating.
if (jsTestOptions().enableTestCommands) {

View File

@ -1392,9 +1392,24 @@ var ShardingTest = function ShardingTest(params) {
}
rsDefaults.setParameter = rsDefaults.setParameter || {};
if (typeof (rsDefaults.setParameter) === "string") {
var eqIdx = rsDefaults.setParameter.indexOf("=");
if (eqIdx != -1) {
var param = rsDefaults.setParameter.substring(0, eqIdx);
var value = rsDefaults.setParameter.substring(eqIdx + 1);
rsDefaults.setParameter = {};
rsDefaults.setParameter[param] = value;
}
}
rsDefaults.setParameter.migrationLockAcquisitionMaxWaitMS =
otherParams.migrationLockAcquisitionMaxWaitMS;
if (isConfigShardMode && i == 0) {
rsDefaults.setParameter.featureFlagTransitionToCatalogShard = true
}
var rsSettings = rsDefaults.settings;
delete rsDefaults.settings;
@ -1701,6 +1716,10 @@ var ShardingTest = function ShardingTest(params) {
options.setParameter.mongosShutdownTimeoutMillisForSignaledShutdown =
options.setParameter.mongosShutdownTimeoutMillisForSignaledShutdown || 0;
if (isConfigShardMode) {
options.setParameter.featureFlagTransitionToCatalogShard = true;
}
options.port = options.port || _allocatePortForMongos();
mongosOptions.push(options);