mirror of https://github.com/mongodb/mongo
SERVER-84180 Translate logical refine shard keys to buckets form (#44709)
GitOrigin-RevId: f589d951f87e7ccac69f58ab58266bdb8e0e0bbc
This commit is contained in:
parent
c7df923af5
commit
6c6aa23b79
|
|
@ -0,0 +1,86 @@
|
||||||
|
/**
|
||||||
|
* Tests refineCollectionShardKey for timeseries collections.
|
||||||
|
* @tags: [
|
||||||
|
* assumes_balancer_off,
|
||||||
|
* does_not_support_stepdowns,
|
||||||
|
* requires_timeseries,
|
||||||
|
* # older fcv versions don't accept logical fields in shard key of refineCollectionShardKey
|
||||||
|
* requires_fcv_83,
|
||||||
|
* # This test performs explicit calls to shardCollection
|
||||||
|
* assumes_unsharded_collection,
|
||||||
|
* ]
|
||||||
|
*/
|
||||||
|
|
||||||
|
import {
|
||||||
|
areViewlessTimeseriesEnabled,
|
||||||
|
getTimeseriesCollForDDLOps,
|
||||||
|
} from "jstests/core/timeseries/libs/viewless_timeseries_util.js";
|
||||||
|
|
||||||
|
const mongos = db.getMongo();
|
||||||
|
const dbName = db.getName();
|
||||||
|
const collName = jsTestName();
|
||||||
|
|
||||||
|
const timeField = "time";
|
||||||
|
const metaField = "myMeta";
|
||||||
|
const bucketMetaField = "meta";
|
||||||
|
const controlTimeField = `control.min.${timeField}`;
|
||||||
|
const initialKey = {[metaField]: 1};
|
||||||
|
const refinedKey = {[metaField]: 1, [timeField]: 1};
|
||||||
|
const docsPerTest = 6;
|
||||||
|
|
||||||
|
assert.commandWorked(mongos.adminCommand({enableSharding: dbName}));
|
||||||
|
|
||||||
|
function createShardedTimeseriesCollection(collName) {
|
||||||
|
const coll = db.getCollection(collName);
|
||||||
|
db.runCommand({drop: collName});
|
||||||
|
assert.commandWorked(
|
||||||
|
mongos.adminCommand({
|
||||||
|
shardCollection: coll.getFullName(),
|
||||||
|
key: initialKey,
|
||||||
|
timeseries: {timeField: timeField, metaField: metaField},
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
for (let i = 0; i < docsPerTest; ++i) {
|
||||||
|
assert.commandWorked(coll.insert({[metaField]: i, [timeField]: ISODate()}));
|
||||||
|
}
|
||||||
|
return coll;
|
||||||
|
}
|
||||||
|
|
||||||
|
function testAcceptsLogicalFields() {
|
||||||
|
const coll = createShardedTimeseriesCollection(collName);
|
||||||
|
|
||||||
|
const timeseriesNs = getTimeseriesCollForDDLOps(db, coll).getFullName();
|
||||||
|
|
||||||
|
// Verify initial shard key using $listClusterCatalog
|
||||||
|
let expectedBucketKey = {[bucketMetaField]: 1};
|
||||||
|
let catalogEntry = db.aggregate([{$listClusterCatalog: {}}, {$match: {ns: timeseriesNs}}]).toArray();
|
||||||
|
assert.docEq(expectedBucketKey, catalogEntry[0].shardKey, "Initial shard key mismatch");
|
||||||
|
|
||||||
|
assert.commandWorked(mongos.adminCommand({refineCollectionShardKey: coll.getFullName(), key: refinedKey}));
|
||||||
|
|
||||||
|
// Verify refined shard key using $listClusterCatalog
|
||||||
|
expectedBucketKey = {[bucketMetaField]: 1, [controlTimeField]: 1};
|
||||||
|
catalogEntry = db.aggregate([{$listClusterCatalog: {}}, {$match: {ns: timeseriesNs}}]).toArray();
|
||||||
|
assert.docEq(expectedBucketKey, catalogEntry[0].shardKey, "Refined shard key mismatch");
|
||||||
|
|
||||||
|
assert.commandWorked(coll.insert({[metaField]: "after", [timeField]: ISODate()}));
|
||||||
|
assert.eq(docsPerTest + 1, coll.countDocuments({}));
|
||||||
|
|
||||||
|
assert.commandWorked(db.runCommand({drop: collName}));
|
||||||
|
}
|
||||||
|
|
||||||
|
function testRejectsBucketFields() {
|
||||||
|
const coll = createShardedTimeseriesCollection(collName);
|
||||||
|
const invalidKey = {[bucketMetaField]: 1, [controlTimeField]: 1};
|
||||||
|
|
||||||
|
assert.commandFailedWithCode(
|
||||||
|
mongos.adminCommand({refineCollectionShardKey: coll.getFullName(), key: invalidKey}),
|
||||||
|
5914001,
|
||||||
|
);
|
||||||
|
assert.eq(docsPerTest, coll.countDocuments({}));
|
||||||
|
|
||||||
|
assert.commandWorked(db.runCommand({drop: collName}));
|
||||||
|
}
|
||||||
|
|
||||||
|
testAcceptsLogicalFields();
|
||||||
|
testRejectsBucketFields();
|
||||||
|
|
@ -12,13 +12,15 @@ import {
|
||||||
} from "jstests/core/timeseries/libs/viewless_timeseries_util.js";
|
} from "jstests/core/timeseries/libs/viewless_timeseries_util.js";
|
||||||
import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
|
import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
|
||||||
import {ShardingTest} from "jstests/libs/shardingtest.js";
|
import {ShardingTest} from "jstests/libs/shardingtest.js";
|
||||||
|
import {isFCVgte} from "jstests/libs/feature_compatibility_version.js";
|
||||||
|
|
||||||
// Connections.
|
// Connections.
|
||||||
const mongo = new ShardingTest({shards: 2, rs: {nodes: 3}});
|
const mongo = new ShardingTest({shards: 2, rs: {nodes: 3}});
|
||||||
const dbName = "testDB";
|
const dbName = "testDB";
|
||||||
const collName = "testColl";
|
const collName = "testColl";
|
||||||
const timeField = "time";
|
const timeField = "time";
|
||||||
const metaField = "meta";
|
const metaField = "myMeta";
|
||||||
|
const bucketMetaField = "meta";
|
||||||
const collNss = `${dbName}.${collName}`;
|
const collNss = `${dbName}.${collName}`;
|
||||||
const controlTimeField = `control.min.${timeField}`;
|
const controlTimeField = `control.min.${timeField}`;
|
||||||
const numDocsInserted = 20;
|
const numDocsInserted = 20;
|
||||||
|
|
@ -66,40 +68,40 @@ const zoneShardingTestCases = [
|
||||||
{
|
{
|
||||||
shardKey: {[metaField]: 1, [timeField]: 1},
|
shardKey: {[metaField]: 1, [timeField]: 1},
|
||||||
index: {[metaField]: 1, [timeField]: 1},
|
index: {[metaField]: 1, [timeField]: 1},
|
||||||
min: {[metaField]: 1, [controlTimeField]: 1},
|
min: {[bucketMetaField]: 1, [controlTimeField]: 1},
|
||||||
max: {[metaField]: 10, [controlTimeField]: 10},
|
max: {[bucketMetaField]: 10, [controlTimeField]: 10},
|
||||||
worksWhenUpdatingZoneKeyRangeBeforeSharding: false,
|
worksWhenUpdatingZoneKeyRangeBeforeSharding: false,
|
||||||
worksWhenUpdatingZoneKeyRangeAfterSharding: false,
|
worksWhenUpdatingZoneKeyRangeAfterSharding: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
shardKey: {[metaField]: 1, [timeField]: 1},
|
shardKey: {[metaField]: 1, [timeField]: 1},
|
||||||
index: {[metaField]: 1, [timeField]: 1},
|
index: {[metaField]: 1, [timeField]: 1},
|
||||||
min: {[metaField]: 1, [controlTimeField]: 1},
|
min: {[bucketMetaField]: 1, [controlTimeField]: 1},
|
||||||
max: {[metaField]: 10, [controlTimeField]: MaxKey},
|
max: {[bucketMetaField]: 10, [controlTimeField]: MaxKey},
|
||||||
worksWhenUpdatingZoneKeyRangeBeforeSharding: false,
|
worksWhenUpdatingZoneKeyRangeBeforeSharding: false,
|
||||||
worksWhenUpdatingZoneKeyRangeAfterSharding: false,
|
worksWhenUpdatingZoneKeyRangeAfterSharding: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
shardKey: {[metaField]: 1, [timeField]: 1},
|
shardKey: {[metaField]: 1, [timeField]: 1},
|
||||||
index: {[metaField]: 1, [timeField]: 1},
|
index: {[metaField]: 1, [timeField]: 1},
|
||||||
min: {[metaField]: 1, [controlTimeField]: MinKey},
|
min: {[bucketMetaField]: 1, [controlTimeField]: MinKey},
|
||||||
max: {[metaField]: 10, [controlTimeField]: 10},
|
max: {[bucketMetaField]: 10, [controlTimeField]: 10},
|
||||||
worksWhenUpdatingZoneKeyRangeBeforeSharding: false,
|
worksWhenUpdatingZoneKeyRangeBeforeSharding: false,
|
||||||
worksWhenUpdatingZoneKeyRangeAfterSharding: false,
|
worksWhenUpdatingZoneKeyRangeAfterSharding: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
shardKey: {[metaField]: 1, [timeField]: 1},
|
shardKey: {[metaField]: 1, [timeField]: 1},
|
||||||
index: {[metaField]: 1, [timeField]: 1},
|
index: {[metaField]: 1, [timeField]: 1},
|
||||||
min: {[metaField]: 1, [controlTimeField]: MinKey},
|
min: {[bucketMetaField]: 1, [controlTimeField]: MinKey},
|
||||||
max: {[metaField]: 10, [controlTimeField]: MaxKey},
|
max: {[bucketMetaField]: 10, [controlTimeField]: MaxKey},
|
||||||
worksWhenUpdatingZoneKeyRangeBeforeSharding: false,
|
worksWhenUpdatingZoneKeyRangeBeforeSharding: false,
|
||||||
worksWhenUpdatingZoneKeyRangeAfterSharding: false,
|
worksWhenUpdatingZoneKeyRangeAfterSharding: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
shardKey: {[metaField]: 1, [timeField]: 1},
|
shardKey: {[metaField]: 1, [timeField]: 1},
|
||||||
index: {[metaField]: 1, [timeField]: 1},
|
index: {[metaField]: 1, [timeField]: 1},
|
||||||
min: {[metaField]: 1, [controlTimeField]: MinKey},
|
min: {[bucketMetaField]: 1, [controlTimeField]: MinKey},
|
||||||
max: {[metaField]: 10, [controlTimeField]: MinKey},
|
max: {[bucketMetaField]: 10, [controlTimeField]: MinKey},
|
||||||
worksWhenUpdatingZoneKeyRangeBeforeSharding: true,
|
worksWhenUpdatingZoneKeyRangeBeforeSharding: true,
|
||||||
worksWhenUpdatingZoneKeyRangeAfterSharding: true,
|
worksWhenUpdatingZoneKeyRangeAfterSharding: true,
|
||||||
},
|
},
|
||||||
|
|
@ -115,8 +117,8 @@ const zoneShardingTestCases = [
|
||||||
{
|
{
|
||||||
shardKey: {[metaField]: 1, [timeField]: 1},
|
shardKey: {[metaField]: 1, [timeField]: 1},
|
||||||
index: {[metaField]: 1, [timeField]: 1},
|
index: {[metaField]: 1, [timeField]: 1},
|
||||||
min: {[metaField]: 1},
|
min: {[bucketMetaField]: 1},
|
||||||
max: {[metaField]: 10},
|
max: {[bucketMetaField]: 10},
|
||||||
// Sharding a collection fails if the predefined zones don't exactly match the shard key,
|
// Sharding a collection fails if the predefined zones don't exactly match the shard key,
|
||||||
// but not vice versa. Note this behavior applies to all collections, not just time-series.
|
// but not vice versa. Note this behavior applies to all collections, not just time-series.
|
||||||
worksWhenUpdatingZoneKeyRangeBeforeSharding: false,
|
worksWhenUpdatingZoneKeyRangeBeforeSharding: false,
|
||||||
|
|
@ -125,16 +127,16 @@ const zoneShardingTestCases = [
|
||||||
{
|
{
|
||||||
shardKey: {[metaField]: 1},
|
shardKey: {[metaField]: 1},
|
||||||
index: {[metaField]: 1},
|
index: {[metaField]: 1},
|
||||||
min: {[metaField]: 1},
|
min: {[bucketMetaField]: 1},
|
||||||
max: {[metaField]: 10},
|
max: {[bucketMetaField]: 10},
|
||||||
worksWhenUpdatingZoneKeyRangeBeforeSharding: true,
|
worksWhenUpdatingZoneKeyRangeBeforeSharding: true,
|
||||||
worksWhenUpdatingZoneKeyRangeAfterSharding: true,
|
worksWhenUpdatingZoneKeyRangeAfterSharding: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
shardKey: {[metaField + ".xyz"]: 1},
|
shardKey: {[metaField + ".xyz"]: 1},
|
||||||
index: {[metaField + ".xyz"]: 1},
|
index: {[metaField + ".xyz"]: 1},
|
||||||
min: {[metaField + ".xyz"]: 1},
|
min: {[bucketMetaField + ".xyz"]: 1},
|
||||||
max: {[metaField + ".xyz"]: 10},
|
max: {[bucketMetaField + ".xyz"]: 10},
|
||||||
worksWhenUpdatingZoneKeyRangeBeforeSharding: true,
|
worksWhenUpdatingZoneKeyRangeBeforeSharding: true,
|
||||||
worksWhenUpdatingZoneKeyRangeAfterSharding: true,
|
worksWhenUpdatingZoneKeyRangeAfterSharding: true,
|
||||||
},
|
},
|
||||||
|
|
@ -285,7 +287,7 @@ const zoneShardingTestCases = [
|
||||||
assert.commandWorked(
|
assert.commandWorked(
|
||||||
primaryShard.getDB(dbName).runCommand({
|
primaryShard.getDB(dbName).runCommand({
|
||||||
checkShardingIndex: getTimeseriesCollForDDLOps(db, coll).getFullName(),
|
checkShardingIndex: getTimeseriesCollForDDLOps(db, coll).getFullName(),
|
||||||
keyPattern: {[metaField]: 1, [controlTimeField]: 1},
|
keyPattern: {[bucketMetaField]: 1, [controlTimeField]: 1},
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
assert.commandFailed(
|
assert.commandFailed(
|
||||||
|
|
@ -310,9 +312,9 @@ const zoneShardingTestCases = [
|
||||||
createTimeSeriesColl({index: {[metaField]: 1, [timeField]: 1}, shardKey: {[metaField]: 1, [timeField]: 1}});
|
createTimeSeriesColl({index: {[metaField]: 1, [timeField]: 1}, shardKey: {[metaField]: 1, [timeField]: 1}});
|
||||||
const primaryShard = mongo.getPrimaryShard(dbName);
|
const primaryShard = mongo.getPrimaryShard(dbName);
|
||||||
const otherShard = mongo.getOther(primaryShard);
|
const otherShard = mongo.getOther(primaryShard);
|
||||||
const minChunk = {[metaField]: MinKey, [controlTimeField]: MinKey};
|
const minChunk = {[bucketMetaField]: MinKey, [controlTimeField]: MinKey};
|
||||||
const splitChunk = {[metaField]: numDocsInserted / 2, [controlTimeField]: MinKey};
|
const splitChunk = {[bucketMetaField]: numDocsInserted / 2, [controlTimeField]: MinKey};
|
||||||
const maxChunk = {[metaField]: MaxKey, [controlTimeField]: MaxKey};
|
const maxChunk = {[bucketMetaField]: MaxKey, [controlTimeField]: MaxKey};
|
||||||
function checkChunkCount(expectedCounts) {
|
function checkChunkCount(expectedCounts) {
|
||||||
const counts = mongo.chunkCounts(collName, dbName);
|
const counts = mongo.chunkCounts(collName, dbName);
|
||||||
assert.docEq(expectedCounts, counts);
|
assert.docEq(expectedCounts, counts);
|
||||||
|
|
@ -368,15 +370,20 @@ const zoneShardingTestCases = [
|
||||||
|
|
||||||
// Can add control.min.time as the last shard key component on the timeseries collection.
|
// Can add control.min.time as the last shard key component on the timeseries collection.
|
||||||
(function checkRefineCollectionShardKeyCommand() {
|
(function checkRefineCollectionShardKeyCommand() {
|
||||||
|
if (isFCVgte(db, "8.3")) {
|
||||||
createTimeSeriesColl({index: {[metaField]: 1, [timeField]: 1}, shardKey: {[metaField]: 1}});
|
createTimeSeriesColl({index: {[metaField]: 1, [timeField]: 1}, shardKey: {[metaField]: 1}});
|
||||||
assert.commandWorked(
|
assert.commandWorked(
|
||||||
mongo.s0.adminCommand({refineCollectionShardKey: collNss, key: {[metaField]: 1, [controlTimeField]: 1}}),
|
mongo.s0.adminCommand({refineCollectionShardKey: collNss, key: {[metaField]: 1, [timeField]: 1}}),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
assert(coll.drop());
|
||||||
|
createTimeSeriesColl({index: {[metaField]: 1, [timeField]: 1}, shardKey: {[metaField]: 1}});
|
||||||
|
|
||||||
if (!areViewlessTimeseriesEnabled(mongo.s.getDB(dbName))) {
|
if (!areViewlessTimeseriesEnabled(mongo.s.getDB(dbName))) {
|
||||||
assert.commandWorked(
|
assert.commandWorked(
|
||||||
mongo.s0.adminCommand({
|
mongo.s0.adminCommand({
|
||||||
refineCollectionShardKey: getTimeseriesCollForDDLOps(db, coll).getFullName(),
|
refineCollectionShardKey: getTimeseriesCollForDDLOps(db, coll).getFullName(),
|
||||||
key: {[metaField]: 1, [controlTimeField]: 1},
|
key: {[metaField]: 1, [timeField]: 1},
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
@ -385,6 +392,7 @@ const zoneShardingTestCases = [
|
||||||
}
|
}
|
||||||
assert.eq(numDocsInserted * 2, coll.find({}).count());
|
assert.eq(numDocsInserted * 2, coll.find({}).count());
|
||||||
assert(coll.drop());
|
assert(coll.drop());
|
||||||
|
}
|
||||||
})();
|
})();
|
||||||
|
|
||||||
// Check clearJumboFlag command can clear chunk jumbo flag.
|
// Check clearJumboFlag command can clear chunk jumbo flag.
|
||||||
|
|
|
||||||
|
|
@ -1135,20 +1135,6 @@ void exitCriticalSectionsOnCoordinator(OperationContext* opCtx,
|
||||||
throwIfReasonDiffers);
|
throwIfReasonDiffers);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
* Check the requested shardKey is a timefield, then convert it to a shardKey compatible for the
|
|
||||||
* bucket collection.
|
|
||||||
*/
|
|
||||||
BSONObj validateAndTranslateShardKey(OperationContext* opCtx,
|
|
||||||
const TypeCollectionTimeseriesFields& timeseriesFields,
|
|
||||||
const BSONObj& shardKey) {
|
|
||||||
shardkeyutil::validateTimeseriesShardKey(
|
|
||||||
timeseriesFields.getTimeField(), timeseriesFields.getMetaField(), shardKey);
|
|
||||||
|
|
||||||
return uassertStatusOK(timeseries::createBucketsShardKeySpecFromTimeseriesShardKeySpec(
|
|
||||||
timeseriesFields.getTimeseriesOptions(), shardKey));
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Helper function to log the end of the shard collection event.
|
* Helper function to log the end of the shard collection event.
|
||||||
*/
|
*/
|
||||||
|
|
@ -1798,8 +1784,8 @@ void CreateCollectionCoordinator::_translateRequestParameters(OperationContext*
|
||||||
// Assign the correct shard key: in case of timeseries, the shard key must be converted.
|
// Assign the correct shard key: in case of timeseries, the shard key must be converted.
|
||||||
KeyPattern keyPattern;
|
KeyPattern keyPattern;
|
||||||
if (optExtendedTimeseriesFields && isSharded(_request)) {
|
if (optExtendedTimeseriesFields && isSharded(_request)) {
|
||||||
keyPattern = validateAndTranslateShardKey(
|
keyPattern = shardkeyutil::validateAndTranslateTimeseriesShardKey(
|
||||||
opCtx, *optExtendedTimeseriesFields, *_request.getShardKey());
|
optExtendedTimeseriesFields->getTimeseriesOptions(), *_request.getShardKey());
|
||||||
} else {
|
} else {
|
||||||
keyPattern = *_request.getShardKey();
|
keyPattern = *_request.getShardKey();
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -212,6 +212,12 @@ ExecutorFuture<void> RefineCollectionShardKeyCoordinator::_runImpl(
|
||||||
_doc.setOldKey(
|
_doc.setOldKey(
|
||||||
metadata->getChunkManager()->getShardKeyPattern().getKeyPattern());
|
metadata->getChunkManager()->getShardKeyPattern().getKeyPattern());
|
||||||
|
|
||||||
|
if (auto ts = metadata->getTimeseriesFields()) {
|
||||||
|
auto bucketsKey = shardkeyutil::validateAndTranslateTimeseriesShardKey(
|
||||||
|
ts->getTimeseriesOptions(), _doc.getNewShardKey().toBSON());
|
||||||
|
_doc.setNewShardKey(KeyPattern(bucketsKey));
|
||||||
|
}
|
||||||
|
|
||||||
// No need to keep going if the shard key is already refined.
|
// No need to keep going if the shard key is already refined.
|
||||||
if (SimpleBSONObjComparator::kInstance.evaluate(
|
if (SimpleBSONObjComparator::kInstance.evaluate(
|
||||||
_doc.getOldKey()->toBSON() == _doc.getNewShardKey().toBSON())) {
|
_doc.getOldKey()->toBSON() == _doc.getNewShardKey().toBSON())) {
|
||||||
|
|
|
||||||
|
|
@ -366,6 +366,15 @@ void validateTimeseriesShardKey(StringData timeFieldName,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
BSONObj validateAndTranslateTimeseriesShardKey(const TimeseriesOptions& tsOptions,
|
||||||
|
const BSONObj& tsShardKey) {
|
||||||
|
shardkeyutil::validateTimeseriesShardKey(
|
||||||
|
tsOptions.getTimeField(), tsOptions.getMetaField(), tsShardKey);
|
||||||
|
|
||||||
|
return uassertStatusOK(
|
||||||
|
timeseries::createBucketsShardKeySpecFromTimeseriesShardKeySpec(tsOptions, tsShardKey));
|
||||||
|
}
|
||||||
|
|
||||||
// TODO: SERVER-64187 move calls to validateShardKeyIsNotEncrypted into
|
// TODO: SERVER-64187 move calls to validateShardKeyIsNotEncrypted into
|
||||||
// validateShardKeyIndexExistsOrCreateIfPossible
|
// validateShardKeyIndexExistsOrCreateIfPossible
|
||||||
void validateShardKeyIsNotEncrypted(OperationContext* opCtx,
|
void validateShardKeyIsNotEncrypted(OperationContext* opCtx,
|
||||||
|
|
|
||||||
|
|
@ -250,6 +250,14 @@ MONGO_MOD_NEEDS_REPLACEMENT void validateTimeseriesShardKey(
|
||||||
boost::optional<StringData> metaFieldName,
|
boost::optional<StringData> metaFieldName,
|
||||||
const BSONObj& shardKeyPattern);
|
const BSONObj& shardKeyPattern);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Validates that 'tsShardKey' uses the user-facing time-series field names (timeField and
|
||||||
|
* metaField), then translates it to the internal buckets collection format.
|
||||||
|
* Throws if the shard key contains fields other than the defined timeField or metaField.
|
||||||
|
*/
|
||||||
|
MONGO_MOD_NEEDS_REPLACEMENT BSONObj validateAndTranslateTimeseriesShardKey(
|
||||||
|
const TimeseriesOptions& tsOptions, const BSONObj& tsShardKey);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a chunk range with extended or truncated boundaries to match the number of fields in the
|
* Returns a chunk range with extended or truncated boundaries to match the number of fields in the
|
||||||
* given metadata's shard key pattern.
|
* given metadata's shard key pattern.
|
||||||
|
|
|
||||||
|
|
@ -568,11 +568,8 @@ ReshardingCoordinatorDocument createReshardingCoordinatorDoc(
|
||||||
(!setProvenance ||
|
(!setProvenance ||
|
||||||
(*request.getProvenance() == ReshardingProvenanceEnum::kReshardCollection))) {
|
(*request.getProvenance() == ReshardingProvenanceEnum::kReshardCollection))) {
|
||||||
auto tsOptions = collEntry.getTimeseriesFields().get().getTimeseriesOptions();
|
auto tsOptions = collEntry.getTimeseriesFields().get().getTimeseriesOptions();
|
||||||
shardkeyutil::validateTimeseriesShardKey(
|
|
||||||
tsOptions.getTimeField(), tsOptions.getMetaField(), request.getKey());
|
|
||||||
shardKeySpec =
|
shardKeySpec =
|
||||||
uassertStatusOK(timeseries::createBucketsShardKeySpecFromTimeseriesShardKeySpec(
|
shardkeyutil::validateAndTranslateTimeseriesShardKey(tsOptions, request.getKey());
|
||||||
tsOptions, request.getKey()));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
auto tempReshardingNss = resharding::constructTemporaryReshardingNss(nss, collEntry.getUuid());
|
auto tempReshardingNss = resharding::constructTemporaryReshardingNss(nss, collEntry.getUuid());
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue