SERVER-50027 Make CollectionType use IDL (Part 2)

This change gets rid of the writing of the 'dropped' field on the
collections in lieu of directly deleting them from the
config.collections.
This commit is contained in:
Kaloian Manassiev 2020-10-22 10:26:26 -04:00 committed by Evergreen Agent
parent f48a671a94
commit b9bd006064
18 changed files with 255 additions and 363 deletions

View File

@ -45,8 +45,6 @@ assert.neq(null, st.shard1.getDB('test').user.findOne({_id: 10}));
var configDB = st.s.getDB('config');
var collDoc = configDB.collections.findOne({_id: 'test.user'});
assert(!collDoc.dropped);
assert.eq(2, configDB.chunks.count({ns: 'test.user'}));
assert.eq(1, configDB.tags.count({ns: 'test.user'}));
@ -59,12 +57,20 @@ assert.eq(null, st.shard1.getDB('test').user.findOne());
assert.commandWorked(testDB.runCommand({drop: 'user'}));
// Check for the collection with majority RC to verify that the write to remove the collection
// document from the catalog has propagated to the majority snapshot.
var findColl = configDB.runCommand(
{find: 'collections', filter: {_id: 'test.user'}, readConcern: {'level': 'majority'}});
collDoc = findColl.cursor.firstBatch[0];
assert(collDoc.dropped);
// document from the catalog has propagated to the majority snapshot. Note that here we explicitly
// use a command instead of going through the driver's 'find' helper, in order to be able to specify
// a 'majority' read concern.
//
// TODO (SERVER-51881): Remove this check after 5.0 is released
var collEntry =
assert
.commandWorked(configDB.runCommand(
{find: 'collections', filter: {_id: 'test.user'}, readConcern: {'level': 'majority'}}))
.cursor.firstBatch;
if (collEntry.length > 0) {
assert.eq(1, collEntry.length);
assert.eq(true, collEntry[0].dropped);
}
assert.eq(0, configDB.chunks.count({ns: 'test.user'}));
assert.eq(0, configDB.tags.count({ns: 'test.user'}));

View File

@ -4,6 +4,7 @@
// operation on a mongos may be active when it happens. All operations should handle gracefully.
//
(function() {
var st = new ShardingTest({shards: 2, mongos: 5, verbose: 1});
// Balancer is by default stopped, thus it will not interfere
@ -72,12 +73,15 @@ jsTest.log("Rebuilding sharded collection with different split...");
coll.drop();
// TODO (SERVER-51881): Remove this check after 5.0 is released
var droppedCollDoc = config.collections.findOne({_id: coll.getFullName()});
assert(droppedCollDoc != null);
assert.eq(true, droppedCollDoc.dropped);
assert(droppedCollDoc.lastmodEpoch != null);
assert(droppedCollDoc.lastmodEpoch.equals(new ObjectId("000000000000000000000000")),
"epoch not zero: " + droppedCollDoc.lastmodEpoch);
if (droppedCollDoc) {
assert(droppedCollDoc != null);
assert.eq(true, droppedCollDoc.dropped);
assert(droppedCollDoc.lastmodEpoch != null);
assert(droppedCollDoc.lastmodEpoch.equals(new ObjectId("000000000000000000000000")),
"epoch not zero: " + droppedCollDoc.lastmodEpoch);
}
assert.commandWorked(admin.runCommand({enableSharding: coll.getDB() + ""}));
st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName);
@ -131,3 +135,4 @@ coll.drop();
jsTest.log("Done!");
st.stop();
})();

View File

@ -46,8 +46,14 @@ assert.eq(0, config.databases.count({_id: dbA.getName()}));
assert.eq(1, config.databases.count({_id: dbB.getName()}));
assert.eq(1, config.databases.count({_id: dbC.getName()}));
// 10 dropped collections
assert.eq(numColls, config.collections.count({_id: RegExp("^" + dbA + "\\..*"), dropped: true}));
// 10 dropped collections (they either do not exist or all have the dropped field set)
//
// TODO (SERVER-51881): Remove this check after 5.0 is released
var droppedCollEntries = config.collections.count({_id: RegExp("^" + dbA + "\\..*")});
if (droppedCollEntries > 0) {
assert.eq(numColls, droppedCollEntries.length);
droppedCollEntries.forEach((collEntry) => assert.eq(true, collEntry.dropped));
}
// 20 active (dropped is missing)
assert.eq(numColls, config.collections.count({_id: RegExp("^" + dbB + "\\..*")}));

View File

@ -141,16 +141,16 @@ var queryConfigCollections = function(st, testNamespaces) {
var cursor;
// Find query.
cursor = configDB.collections.find({"key.a": 1}, {dropped: 1, "key.a": 1, "key.c": 1})
cursor = configDB.collections.find({"key.a": 1}, {"key.a": 1, "key.c": 1})
.sort({"_id": 1})
.batchSize(2);
assert.eq(cursor.objsLeftInBatch(), 2);
assert.eq(cursor.next(), {_id: testNamespaces[1], dropped: false, key: {a: 1}});
assert.eq(cursor.next(), {_id: testNamespaces[3], dropped: false, key: {a: 1, c: 1}});
assert.eq(cursor.next(), {_id: testNamespaces[1], key: {a: 1}});
assert.eq(cursor.next(), {_id: testNamespaces[3], key: {a: 1, c: 1}});
assert(cursor.hasNext());
assert.eq(cursor.objsLeftInBatch(), 2);
assert.eq(cursor.next(), {_id: testNamespaces[2], dropped: false, key: {a: 1}});
assert.eq(cursor.next(), {_id: testNamespaces[0], dropped: false, key: {a: 1}});
assert.eq(cursor.next(), {_id: testNamespaces[2], key: {a: 1}});
assert.eq(cursor.next(), {_id: testNamespaces[0], key: {a: 1}});
assert(!cursor.hasNext());
// Aggregate query.

View File

@ -15,7 +15,7 @@ function testAndClenaupWithKeyNoIndexFailed(keyDoc) {
var ns = kDbName + '.foo';
assert.commandFailed(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
assert.eq(mongos.getDB('config').collections.count({_id: ns}), 0);
assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
}
@ -24,11 +24,11 @@ function testAndClenaupWithKeyOK(keyDoc) {
assert.commandWorked(mongos.getDB(kDbName).foo.createIndex(keyDoc));
var ns = kDbName + '.foo';
assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
assert.eq(mongos.getDB('config').collections.count({_id: ns}), 0);
assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 1);
assert.eq(mongos.getDB('config').collections.count({_id: ns}), 1);
assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
}
@ -36,11 +36,11 @@ function testAndClenaupWithKeyNoIndexOK(keyDoc) {
assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
var ns = kDbName + '.foo';
assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 0);
assert.eq(mongos.getDB('config').collections.count({_id: ns}), 0);
assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: keyDoc}));
assert.eq(mongos.getDB('config').collections.count({_id: ns, dropped: false}), 1);
assert.eq(mongos.getDB('config').collections.count({_id: ns}), 1);
assert.commandWorked(mongos.getDB(kDbName).dropDatabase());
}

View File

@ -20,9 +20,12 @@ assert.eq(exists, 1);
// Test that drop database properly cleans up config
s.getDB(specialDB).dropDatabase();
var cursor = s.getDB("config").collections.find({_id: specialNS});
assert(cursor.next()["dropped"]);
assert(!cursor.hasNext());
// TODO (SERVER-51881): Remove this check after 5.0 is released
var droppedColl = s.getDB("config").collections.find({_id: specialNS}).toArray();
if (droppedColl.length > 0) {
assert.eq(1, droppedColl.length);
assert.eq(true, droppedColl.dropped);
}
s.stop();
})();

View File

@ -63,6 +63,7 @@ public:
CollectionType shardedCollection;
shardedCollection.setNss(shardedNS());
shardedCollection.setEpoch(OID::gen());
shardedCollection.setUpdatedAt(Date_t::now());
shardedCollection.setKeyPattern(BSON("x" << 1));
ASSERT_OK(insertToConfigCollection(
@ -146,23 +147,6 @@ TEST_F(AssignKeyRangeToZoneTestFixture, BasicAssignKeyRangeOnUnshardedColl) {
assertOnlyZone(unshardedNS(), newRange, zoneName());
}
TEST_F(AssignKeyRangeToZoneTestFixture, AssignKeyRangeOnDroppedShardedColl) {
CollectionType unshardedCollection;
unshardedCollection.setNss(unshardedNS());
unshardedCollection.setEpoch(OID::gen());
unshardedCollection.setKeyPattern(BSON("x" << 1));
unshardedCollection.setDropped(true);
const ChunkRange newRange(BSON("x" << 0), BSON("x" << 10));
ASSERT_OK(insertToConfigCollection(
operationContext(), CollectionType::ConfigNS, unshardedCollection.toBSON()));
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->assignKeyRangeToZone(operationContext(), unshardedNS(), newRange, zoneName()));
assertOnlyZone(unshardedNS(), newRange, zoneName());
}
TEST_F(AssignKeyRangeToZoneTestFixture, AssignKeyRangeNonExistingZoneShouldFail) {
auto status = ShardingCatalogManager::get(operationContext())
->assignKeyRangeToZone(operationContext(),
@ -276,6 +260,7 @@ TEST_F(AssignKeyRangeToZoneTestFixture, MinThatIsAShardKeyPrefixShouldConvertToF
CollectionType shardedCollection;
shardedCollection.setNss(ns);
shardedCollection.setEpoch(OID::gen());
shardedCollection.setUpdatedAt(Date_t::now());
shardedCollection.setKeyPattern(BSON("x" << 1 << "y" << 1));
ASSERT_OK(insertToConfigCollection(
@ -295,6 +280,7 @@ TEST_F(AssignKeyRangeToZoneTestFixture, MaxThatIsAShardKeyPrefixShouldConvertToF
CollectionType shardedCollection;
shardedCollection.setNss(ns);
shardedCollection.setEpoch(OID::gen());
shardedCollection.setUpdatedAt(Date_t::now());
shardedCollection.setKeyPattern(BSON("x" << 1 << "y" << 1));
ASSERT_OK(insertToConfigCollection(
@ -349,6 +335,7 @@ TEST_F(AssignKeyRangeToZoneTestFixture, MinMaxThatIsAShardKeyPrefixShouldSucceed
CollectionType shardedCollection;
shardedCollection.setNss(ns);
shardedCollection.setEpoch(OID::gen());
shardedCollection.setUpdatedAt(Date_t::now());
shardedCollection.setKeyPattern(BSON("x" << 1 << "y" << 1));
ASSERT_OK(insertToConfigCollection(
@ -488,6 +475,7 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, NewRangeOverlappingWithDifferentNSShou
CollectionType shardedCollection;
shardedCollection.setNss(NamespaceString("other.coll"));
shardedCollection.setEpoch(OID::gen());
shardedCollection.setUpdatedAt(Date_t::now());
shardedCollection.setKeyPattern(BSON("x" << 1));
ASSERT_OK(insertToConfigCollection(
@ -683,23 +671,6 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, BasicRemoveKeyRangeOnUnshardedColl) {
assertNoZoneDocWithNamespace(unshardedNS());
}
TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveKeyRangeOnDroppedShardedColl) {
CollectionType unshardedCollection;
unshardedCollection.setNss(unshardedNS());
unshardedCollection.setEpoch(OID::gen());
unshardedCollection.setKeyPattern(BSON("x" << 1));
unshardedCollection.setDropped(true);
ASSERT_OK(insertToConfigCollection(
operationContext(), CollectionType::ConfigNS, unshardedCollection.toBSON()));
ASSERT_OK(ShardingCatalogManager::get(operationContext())
->removeKeyRangeFromZone(operationContext(),
unshardedNS(),
ChunkRange(BSON("x" << 0), BSON("x" << 10))));
assertOnlyZone(shardedNS(), getExistingRange(), zoneName());
}
TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveWithInvalidMinShardKeyShouldFail) {
auto status = ShardingCatalogManager::get(operationContext())
->removeKeyRangeFromZone(operationContext(),
@ -725,6 +696,7 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveWithPartialMinPrefixShouldRemove
CollectionType shardedCollection;
shardedCollection.setNss(ns);
shardedCollection.setEpoch(OID::gen());
shardedCollection.setUpdatedAt(Date_t::now());
shardedCollection.setKeyPattern(BSON("x" << 1 << "y" << 1));
ASSERT_OK(insertToConfigCollection(
@ -751,6 +723,7 @@ TEST_F(AssignKeyRangeWithOneRangeFixture, RemoveWithPartialMaxPrefixShouldRemove
CollectionType shardedCollection;
shardedCollection.setNss(ns);
shardedCollection.setEpoch(OID::gen());
shardedCollection.setUpdatedAt(Date_t::now());
shardedCollection.setKeyPattern(BSON("x" << 1 << "y" << 1));
ASSERT_OK(insertToConfigCollection(

View File

@ -76,6 +76,7 @@ protected:
CollectionType collection;
collection.setNss(_namespace);
collection.setEpoch(_epoch);
collection.setUpdatedAt(Date_t::now());
collection.setKeyPattern(BSON("x" << 1));
ASSERT_OK(insertToConfigCollection(

View File

@ -401,21 +401,16 @@ void ShardingCatalogManager::dropCollection(OperationContext* opCtx, const Names
"dropCollection chunk and tag data deleted",
"namespace"_attr = nss.ns());
// Mark the collection as dropped
CollectionType coll;
coll.setNss(nss);
coll.setDropped(true);
coll.setEpoch(ChunkVersion::DROPPED().epoch());
coll.setUpdatedAt(Grid::get(opCtx)->getNetwork()->now());
const bool upsert = false;
uassertStatusOK(ShardingCatalogClientImpl::updateShardingCatalogEntryForCollection(
opCtx, nss, coll, upsert));
const auto catalogClient = Grid::get(opCtx)->catalogClient();
uassertStatusOK(
catalogClient->removeConfigDocuments(opCtx,
CollectionType::ConfigNS,
BSON(CollectionType::kNssFieldName << nss.ns()),
ShardingCatalogClient::kMajorityWriteConcern));
LOGV2_DEBUG(21927,
1,
"dropCollection {namespace} collection marked as dropped",
"dropCollection collection marked as dropped",
"dropCollection {namespace} collection entry deleted",
"dropCollection collection entry deleted",
"namespace"_attr = nss.ns());
sendSSVToAllShards(opCtx, nss);

View File

@ -83,6 +83,7 @@ public:
CollectionType shardedCollection;
shardedCollection.setNss(dropNS());
shardedCollection.setEpoch(OID::gen());
shardedCollection.setUpdatedAt(Date_t::now());
shardedCollection.setKeyPattern(BSON(_shardKey << 1));
ASSERT_OK(insertToConfigCollection(
operationContext(), CollectionType::ConfigNS, shardedCollection.toBSON()));
@ -151,11 +152,10 @@ public:
HostAndPort(shard.getHost()), shard, dropNS(), ChunkVersion::DROPPED());
}
void expectCollectionDocMarkedAsDropped() {
void expectNoCollectionDocs() {
auto findStatus =
findOneOnConfigCollection(operationContext(), CollectionType::ConfigNS, BSONObj());
ASSERT_OK(findStatus.getStatus());
ASSERT_TRUE(findStatus.getValue().getField("dropped"));
ASSERT_EQ(ErrorCodes::NoMatchingDocument, findStatus);
}
void expectNoChunkDocs() {
@ -214,7 +214,7 @@ TEST_F(DropColl2ShardTest, Basic) {
future.default_timed_get();
expectCollectionDocMarkedAsDropped();
expectNoCollectionDocs();
expectNoChunkDocs();
expectNoTagDocs();
}
@ -261,7 +261,7 @@ TEST_F(DropColl2ShardTest, NSNotFound) {
future.default_timed_get();
expectCollectionDocMarkedAsDropped();
expectNoCollectionDocs();
expectNoChunkDocs();
expectNoTagDocs();
}
@ -345,7 +345,7 @@ TEST_F(DropColl2ShardTest, CleanupChunkError) {
future.default_timed_get();
expectCollectionDocMarkedAsDropped();
expectNoCollectionDocs();
expectNoChunkDocs();
expectNoTagDocs();
}
@ -364,7 +364,7 @@ TEST_F(DropColl2ShardTest, SSVCmdErrorOnShard1) {
future.default_timed_get();
expectCollectionDocMarkedAsDropped();
expectNoCollectionDocs();
expectNoChunkDocs();
expectNoTagDocs();
}
@ -383,7 +383,7 @@ TEST_F(DropColl2ShardTest, SSVErrorOnShard1) {
future.default_timed_get();
expectCollectionDocMarkedAsDropped();
expectNoCollectionDocs();
expectNoChunkDocs();
expectNoTagDocs();
}
@ -404,7 +404,7 @@ TEST_F(DropColl2ShardTest, SSVCmdErrorOnShard2) {
future.default_timed_get();
expectCollectionDocMarkedAsDropped();
expectNoCollectionDocs();
expectNoChunkDocs();
expectNoTagDocs();
}
@ -425,7 +425,7 @@ TEST_F(DropColl2ShardTest, SSVErrorOnShard2) {
future.default_timed_get();
expectCollectionDocMarkedAsDropped();
expectNoCollectionDocs();
expectNoChunkDocs();
expectNoTagDocs();
}
@ -445,7 +445,7 @@ TEST_F(DropColl2ShardTest, AfterSuccessRetryWillStillSendDropSSV) {
firstDropFuture.default_timed_get();
expectCollectionDocMarkedAsDropped();
expectNoCollectionDocs();
expectNoChunkDocs();
expectNoTagDocs();
@ -459,7 +459,7 @@ TEST_F(DropColl2ShardTest, AfterSuccessRetryWillStillSendDropSSV) {
secondDropFuture.default_timed_get();
expectCollectionDocMarkedAsDropped();
expectNoCollectionDocs();
expectNoChunkDocs();
expectNoTagDocs();
}
@ -484,7 +484,7 @@ TEST_F(DropColl2ShardTest, AfterFailedDropRetryWillStillSendDropSSV) {
secondDropFuture.default_timed_get();
expectCollectionDocMarkedAsDropped();
expectNoCollectionDocs();
expectNoChunkDocs();
expectNoTagDocs();
expectNoTagDocs();
@ -513,7 +513,7 @@ TEST_F(DropColl2ShardTest, AfterFailedSSVRetryWillStillSendDropSSV) {
secondDropFuture.default_timed_get();
expectCollectionDocMarkedAsDropped();
expectNoCollectionDocs();
expectNoChunkDocs();
expectNoTagDocs();
expectNoTagDocs();
@ -531,7 +531,7 @@ TEST_F(DropColl2ShardTest, SSVisRetried) {
dropFuture.default_timed_get();
expectCollectionDocMarkedAsDropped();
expectNoCollectionDocs();
expectNoChunkDocs();
expectNoTagDocs();
expectNoTagDocs();

View File

@ -139,6 +139,7 @@ public:
CollectionType coll;
coll.setNss(tempNss);
coll.setEpoch(epoch);
coll.setUpdatedAt(Date_t::now());
coll.setKeyPattern(skey.getKeyPattern());
coll.setUnique(false);
coll.setUUID(uuid);

View File

@ -84,15 +84,15 @@ structs:
type: namespacestring
description: "The full namespace (with the database prefix)."
optional: false
uuid:
type: uuid
description: "The UUID of the collection, if known."
optional: true
epoch:
type: objectid
description: "Uniquely identifies this instance of the collection, in case of
drop/create or shard key refine."
optional: false
uuid:
type: uuid
description: "The UUID of the collection, if known."
optional: true
key:
cpp_name: keyPattern
type: KeyPattern

View File

@ -42,15 +42,11 @@ namespace mongo {
namespace {
const BSONField<bool> kNoBalance("noBalance");
const BSONField<bool> kDropped("dropped");
} // namespace
const NamespaceString CollectionType::ConfigNS("config.collections");
const BSONField<OID> CollectionType::epoch("lastmodEpoch");
const BSONField<Date_t> CollectionType::updatedAt("lastmod");
const BSONField<BSONObj> CollectionType::keyPattern("key");
const BSONField<BSONObj> CollectionType::defaultCollation("defaultCollation");
const BSONField<bool> CollectionType::unique("unique");
const BSONField<UUID> CollectionType::uuid("uuid");
@ -60,8 +56,14 @@ const BSONField<ReshardingFields> CollectionType::reshardingFields("reshardingFi
CollectionType::CollectionType(const BSONObj& obj) {
CollectionType::parseProtected(IDLParserErrorContext("CollectionType"), obj);
uassert(ErrorCodes::BadValue,
str::stream() << "invalid namespace " << getNss(),
str::stream() << "Invalid namespace " << getNss(),
getNss().isValid());
if (!getPre22CompatibleEpoch()) {
setPre22CompatibleEpoch(OID());
}
uassert(ErrorCodes::NoSuchKey,
"Shard key is missing",
getPre50CompatibleKeyPattern() || getDropped());
}
StatusWith<CollectionType> CollectionType::fromBSON(const BSONObj& source) {
@ -78,24 +80,6 @@ StatusWith<CollectionType> CollectionType::fromBSON(const BSONObj& source) {
CollectionType coll = std::move(swColl.getValue());
{
OID collEpoch;
Status status = bsonExtractOIDFieldWithDefault(source, epoch.name(), OID(), &collEpoch);
if (!status.isOK())
return status;
coll._epoch = collEpoch;
}
{
BSONElement collUpdatedAt;
Status status = bsonExtractTypedField(source, updatedAt.name(), Date, &collUpdatedAt);
if (!status.isOK())
return status;
coll._updatedAt = collUpdatedAt.Date();
}
{
std::string collDistributionMode;
Status status =
@ -116,39 +100,6 @@ StatusWith<CollectionType> CollectionType::fromBSON(const BSONObj& source) {
}
}
{
bool collDropped;
Status status = bsonExtractBooleanField(source, kDropped.name(), &collDropped);
if (status.isOK()) {
coll._dropped = collDropped;
} else if (status == ErrorCodes::NoSuchKey) {
// Dropped can be missing in which case it is presumed false
} else {
return status;
}
}
{
BSONElement collKeyPattern;
Status status = bsonExtractTypedField(source, keyPattern.name(), Object, &collKeyPattern);
if (status.isOK()) {
BSONObj obj = collKeyPattern.Obj();
if (obj.isEmpty()) {
return Status(ErrorCodes::ShardKeyNotFound, "empty shard key");
}
coll._keyPattern = KeyPattern(obj.getOwned());
} else if (status == ErrorCodes::NoSuchKey) {
// Sharding key can only be missing if the collection is dropped
if (!coll.getDropped()) {
return {ErrorCodes::NoSuchKey,
str::stream() << "Empty shard key. Failed to parse: " << source.toString()};
}
} else {
return status;
}
}
{
BSONElement collDefaultCollation;
Status status =
@ -219,30 +170,6 @@ StatusWith<CollectionType> CollectionType::fromBSON(const BSONObj& source) {
}
Status CollectionType::validate() const {
if (!_epoch.is_initialized()) {
return Status(ErrorCodes::NoSuchKey, "missing epoch");
}
if (!_updatedAt.is_initialized()) {
return Status(ErrorCodes::NoSuchKey, "missing updated at timestamp");
}
if (!_dropped.get_value_or(false)) {
if (!_epoch->isSet()) {
return Status(ErrorCodes::BadValue, "invalid epoch");
}
if (Date_t() == _updatedAt.get()) {
return Status(ErrorCodes::BadValue, "invalid updated at timestamp");
}
if (!_keyPattern.is_initialized()) {
return Status(ErrorCodes::NoSuchKey, "missing key pattern");
} else {
invariant(!_keyPattern->toBSON().isEmpty());
}
}
return Status::OK();
}
@ -250,17 +177,6 @@ BSONObj CollectionType::toBSON() const {
BSONObjBuilder builder;
serialize(&builder);
builder.append(epoch.name(), _epoch.get_value_or(OID()));
builder.append(updatedAt.name(), _updatedAt.get_value_or(Date_t()));
builder.append(kDropped.name(), _dropped.get_value_or(false));
// These fields are optional, so do not include them in the metadata for the purposes of
// consuming less space on the config servers.
if (_keyPattern.is_initialized()) {
builder.append(keyPattern.name(), _keyPattern->toBSON());
}
if (!_defaultCollation.isEmpty()) {
builder.append(defaultCollation.name(), _defaultCollation);
}
@ -299,16 +215,11 @@ std::string CollectionType::toString() const {
}
void CollectionType::setEpoch(OID epoch) {
_epoch = epoch;
setPre22CompatibleEpoch(std::move(epoch));
}
void CollectionType::setUpdatedAt(Date_t updatedAt) {
_updatedAt = updatedAt;
}
void CollectionType::setKeyPattern(const KeyPattern& keyPattern) {
invariant(!keyPattern.toBSON().isEmpty());
_keyPattern = keyPattern;
void CollectionType::setKeyPattern(KeyPattern keyPattern) {
setPre50CompatibleKeyPattern(std::move(keyPattern));
}
void CollectionType::setReshardingFields(boost::optional<ReshardingFields> reshardingFields) {
@ -316,11 +227,8 @@ void CollectionType::setReshardingFields(boost::optional<ReshardingFields> resha
}
bool CollectionType::hasSameOptions(const CollectionType& other) const {
// The relevant options must have been set on this CollectionType.
invariant(_keyPattern && _unique);
return getNss() == other.getNss() &&
SimpleBSONObjComparator::kInstance.evaluate(_keyPattern->toBSON() ==
SimpleBSONObjComparator::kInstance.evaluate(getKeyPattern().toBSON() ==
other.getKeyPattern().toBSON()) &&
SimpleBSONObjComparator::kInstance.evaluate(_defaultCollation ==
other.getDefaultCollation()) &&

View File

@ -29,10 +29,6 @@
#pragma once
#include <boost/optional.hpp>
#include <string>
#include "mongo/db/jsobj.h"
#include "mongo/db/keypattern.h"
#include "mongo/db/namespace_string.h"
#include "mongo/s/catalog/type_collection_gen.h"
@ -40,10 +36,6 @@
namespace mongo {
class Status;
template <typename T>
class StatusWith;
using ReshardingFields = TypeCollectionReshardingFields;
/**
@ -88,24 +80,34 @@ using ReshardingFields = TypeCollectionReshardingFields;
* }
*
*/
class CollectionType : public CollectionTypeBase {
class CollectionType : private CollectionTypeBase {
public:
// Make field names accessible.
using CollectionTypeBase::kNssFieldName;
static constexpr auto kEpochFieldName = CollectionTypeBase::kPre22CompatibleEpochFieldName;
using CollectionTypeBase::kUpdatedAtFieldName;
static constexpr auto kKeyPatternFieldName =
CollectionTypeBase::kPre50CompatibleKeyPatternFieldName;
// Make getters and setters accessible.
using CollectionTypeBase::getNss;
using CollectionTypeBase::getUpdatedAt;
using CollectionTypeBase::setNss;
using CollectionTypeBase::setUpdatedAt;
// Name of the collections collection in the config server.
static const NamespaceString ConfigNS;
static const BSONField<OID> epoch;
static const BSONField<Date_t> updatedAt;
static const BSONField<BSONObj> keyPattern;
static const BSONField<BSONObj> defaultCollation;
static const BSONField<bool> unique;
static const BSONField<UUID> uuid;
static const BSONField<std::string> distributionMode;
static const BSONField<ReshardingFields> reshardingFields;
explicit CollectionType(const BSONObj& obj);
CollectionType() = default;
explicit CollectionType(const BSONObj& obj);
/**
* Constructs a new CollectionType object from BSON. Also does validation of the contents.
*
@ -136,27 +138,19 @@ public:
*/
std::string toString() const;
OID getEpoch() const {
return _epoch.get();
const OID& getEpoch() const {
return *getPre22CompatibleEpoch();
}
void setEpoch(OID epoch);
Date_t getUpdatedAt() const {
return _updatedAt.get();
}
void setUpdatedAt(Date_t updatedAt);
bool getDropped() const {
return _dropped.get_value_or(false);
}
void setDropped(bool dropped) {
_dropped = dropped;
return getPre50CompatibleDropped() ? *getPre50CompatibleDropped() : false;
}
const KeyPattern& getKeyPattern() const {
return _keyPattern.get();
return *getPre50CompatibleKeyPattern();
}
void setKeyPattern(const KeyPattern& keyPattern);
void setKeyPattern(KeyPattern keyPattern);
const BSONObj& getDefaultCollation() const {
return _defaultCollation;
@ -175,7 +169,6 @@ public:
boost::optional<UUID> getUUID() const {
return _uuid;
}
void setUUID(UUID uuid) {
_uuid = uuid;
}
@ -184,39 +177,25 @@ public:
return _allowBalance.get_value_or(true);
}
DistributionMode getDistributionMode() const {
return _distributionMode.get_value_or(DistributionMode::kSharded);
}
void setDistributionMode(DistributionMode distributionMode) {
_distributionMode = distributionMode;
}
DistributionMode getDistributionMode() const {
return _distributionMode.get_value_or(DistributionMode::kSharded);
}
void setReshardingFields(boost::optional<ReshardingFields> reshardingFields);
const boost::optional<ReshardingFields>& getReshardingFields() const {
return _reshardingFields;
}
void setReshardingFields(boost::optional<ReshardingFields> reshardingFields);
bool hasSameOptions(const CollectionType& other) const;
private:
// Required to disambiguate collection namespace incarnations.
boost::optional<OID> _epoch;
// Required last updated time.
boost::optional<Date_t> _updatedAt;
// New field in v4.4; optional in v4.4 for backwards compatibility with v4.2. Whether the
// collection is unsharded or sharded. If missing, implies sharded.
boost::optional<DistributionMode> _distributionMode;
// Optional, whether the collection has been dropped. If missing, implies false.
boost::optional<bool> _dropped;
// Sharding key. Required, if collection is not dropped.
boost::optional<KeyPattern> _keyPattern;
// Optional collection default collation. If empty, implies simple collation.
BSONObj _defaultCollation;

View File

@ -46,3 +46,35 @@ structs:
type: namespacestring
description: "The full namespace (with the database prefix)."
optional: false
lastmodEpoch:
cpp_name: pre22CompatibleEpoch
type: objectid
description: "Uniquely identifies this incarnation of the collection. Only changes
in case of drop and create, or shard key refine.
It is optional for parsing purposes, because in versions of MongoDB
prior to 2.2, this value wasn't being written. In such cases, it will
default to an all-zero OID."
optional: true
lastmod:
cpp_name: updatedAt
type: date
description: "Contains the time of when the collection was either created, or if
dropped = true, when it was dropped."
optional: false
dropped: # TODO (SERVER-51881): Remove this field after 5.0 is released
cpp_name: pre50CompatibleDropped
type: bool
description: "Legacy (pre 5.0 only) field, which indicates that the collection is
dropped"
optional: true
key:
cpp_name: pre50CompatibleKeyPattern
type: KeyPattern
description: "The shard key pattern for the collection.
It is optional for parsing purposes, because in versions of MongoDB
prior to 5.0, this value would be missing for entries where 'dropped'
is set to true, because dropped collections' entries were being
written as dropped with certain fields missing instead of deleted."
optional: true

View File

@ -29,8 +29,6 @@
#include "mongo/platform/basic.h"
#include "mongo/base/status_with.h"
#include "mongo/bson/oid.h"
#include "mongo/s/catalog/type_collection.h"
#include "mongo/unittest/unittest.h"
#include "mongo/util/time_support.h"
@ -47,13 +45,14 @@ TEST(CollectionType, Empty) {
TEST(CollectionType, Basic) {
const OID oid = OID::gen();
StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
CollectionType::kNssFieldName << "db.coll" << CollectionType::epoch(oid)
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1))
<< CollectionType::defaultCollation(BSON("locale"
<< "fr_CA"))
<< CollectionType::unique(true)));
StatusWith<CollectionType> status = CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName << "db.coll" << CollectionType::kEpochFieldName << oid
<< CollectionType::kUpdatedAtFieldName
<< Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
<< CollectionType::defaultCollation(BSON("locale"
<< "fr_CA"))
<< CollectionType::unique(true)));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@ -80,9 +79,9 @@ TEST(CollectionType, AllFieldsPresent) {
StatusWith<CollectionType> status = CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::epoch(oid)
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1))
<< "db.coll" << CollectionType::kEpochFieldName << oid
<< CollectionType::kUpdatedAtFieldName << Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
<< CollectionType::defaultCollation(BSON("locale"
<< "fr_CA"))
<< CollectionType::unique(true) << CollectionType::uuid() << uuid
@ -111,20 +110,20 @@ TEST(CollectionType, EmptyDefaultCollationFailsToParse) {
const OID oid = OID::gen();
StatusWith<CollectionType> status = CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::epoch(oid)
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1))
<< "db.coll" << CollectionType::kEpochFieldName << oid
<< CollectionType::kUpdatedAtFieldName << Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
<< CollectionType::defaultCollation(BSONObj()) << CollectionType::unique(true)));
ASSERT_FALSE(status.isOK());
}
TEST(CollectionType, MissingDefaultCollationParses) {
const OID oid = OID::gen();
StatusWith<CollectionType> status = CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::epoch(oid)
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
CollectionType::kNssFieldName
<< "db.coll" << CollectionType::kEpochFieldName << oid
<< CollectionType::kUpdatedAtFieldName << Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1) << CollectionType::unique(true)));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@ -134,13 +133,14 @@ TEST(CollectionType, MissingDefaultCollationParses) {
TEST(CollectionType, DefaultCollationSerializesCorrectly) {
const OID oid = OID::gen();
StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
CollectionType::kNssFieldName << "db.coll" << CollectionType::epoch(oid)
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1))
<< CollectionType::defaultCollation(BSON("locale"
<< "fr_CA"))
<< CollectionType::unique(true)));
StatusWith<CollectionType> status = CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName << "db.coll" << CollectionType::kEpochFieldName << oid
<< CollectionType::kUpdatedAtFieldName
<< Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
<< CollectionType::defaultCollation(BSON("locale"
<< "fr_CA"))
<< CollectionType::unique(true)));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@ -153,11 +153,11 @@ TEST(CollectionType, DefaultCollationSerializesCorrectly) {
TEST(CollectionType, MissingDefaultCollationIsNotSerialized) {
const OID oid = OID::gen();
StatusWith<CollectionType> status = CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::epoch(oid)
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
CollectionType::kNssFieldName
<< "db.coll" << CollectionType::kEpochFieldName << oid
<< CollectionType::kUpdatedAtFieldName << Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1) << CollectionType::unique(true)));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@ -168,11 +168,11 @@ TEST(CollectionType, MissingDefaultCollationIsNotSerialized) {
TEST(CollectionType, MissingDistributionModeImpliesDistributionModeSharded) {
const OID oid = OID::gen();
StatusWith<CollectionType> status = CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::epoch(oid)
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
StatusWith<CollectionType> status = CollectionType::fromBSON(BSON(
CollectionType::kNssFieldName
<< "db.coll" << CollectionType::kEpochFieldName << oid
<< CollectionType::kUpdatedAtFieldName << Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1) << CollectionType::unique(true)));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@ -189,10 +189,10 @@ TEST(CollectionType, DistributionModeUnshardedParses) {
const OID oid = OID::gen();
StatusWith<CollectionType> status = CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::epoch(oid)
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)
<< CollectionType::distributionMode("unsharded")));
<< "db.coll" << CollectionType::kEpochFieldName << oid
<< CollectionType::kUpdatedAtFieldName << Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
<< CollectionType::unique(true) << CollectionType::distributionMode("unsharded")));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@ -208,10 +208,10 @@ TEST(CollectionType, DistributionModeShardedParses) {
const OID oid = OID::gen();
StatusWith<CollectionType> status = CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::epoch(oid)
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)
<< CollectionType::distributionMode("sharded")));
<< "db.coll" << CollectionType::kEpochFieldName << oid
<< CollectionType::kUpdatedAtFieldName << Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
<< CollectionType::unique(true) << CollectionType::distributionMode("sharded")));
ASSERT_TRUE(status.isOK());
CollectionType coll = status.getValue();
@ -227,27 +227,27 @@ TEST(CollectionType, UnknownDistributionModeFailsToParse) {
const OID oid = OID::gen();
StatusWith<CollectionType> status = CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::epoch(oid)
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)
<< CollectionType::distributionMode("badvalue")));
<< "db.coll" << CollectionType::kEpochFieldName << oid
<< CollectionType::kUpdatedAtFieldName << Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
<< CollectionType::unique(true) << CollectionType::distributionMode("badvalue")));
ASSERT_EQUALS(ErrorCodes::FailedToParse, status.getStatus());
}
TEST(CollectionType, HasSameOptionsReturnsTrueIfBothDistributionModesExplicitlySetToUnsharded) {
const auto collType1 = uassertStatusOK(CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::epoch(OID::gen())
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)
<< CollectionType::distributionMode("unsharded"))));
<< "db.coll" << CollectionType::kEpochFieldName << OID::gen()
<< CollectionType::kUpdatedAtFieldName << Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
<< CollectionType::unique(true) << CollectionType::distributionMode("unsharded"))));
const auto collType2 = uassertStatusOK(CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::epoch(OID::gen())
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)
<< CollectionType::distributionMode("unsharded"))));
<< "db.coll" << CollectionType::kEpochFieldName << OID::gen()
<< CollectionType::kUpdatedAtFieldName << Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
<< CollectionType::unique(true) << CollectionType::distributionMode("unsharded"))));
ASSERT(collType1.hasSameOptions(collType2));
ASSERT(collType2.hasSameOptions(collType1));
@ -256,17 +256,17 @@ TEST(CollectionType, HasSameOptionsReturnsTrueIfBothDistributionModesExplicitlyS
TEST(CollectionType, HasSameOptionsReturnsTrueIfBothDistributionModesExplicitlySetToSharded) {
const auto collType1 = uassertStatusOK(CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::epoch(OID::gen())
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)
<< CollectionType::distributionMode("sharded"))));
<< "db.coll" << CollectionType::kEpochFieldName << OID::gen()
<< CollectionType::kUpdatedAtFieldName << Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
<< CollectionType::unique(true) << CollectionType::distributionMode("sharded"))));
const auto collType2 = uassertStatusOK(CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::epoch(OID::gen())
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)
<< CollectionType::distributionMode("sharded"))));
<< "db.coll" << CollectionType::kEpochFieldName << OID::gen()
<< CollectionType::kUpdatedAtFieldName << Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
<< CollectionType::unique(true) << CollectionType::distributionMode("sharded"))));
ASSERT(collType1.hasSameOptions(collType2));
ASSERT(collType2.hasSameOptions(collType1));
@ -277,17 +277,17 @@ TEST(
HasSameOptionsReturnsFalseIfOneDistributionModeExplicitlySetToUnshardedAndOtherExplicitlySetToSharded) {
const auto collType1 = uassertStatusOK(CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::epoch(OID::gen())
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)
<< CollectionType::distributionMode("unsharded"))));
<< "db.coll" << CollectionType::kEpochFieldName << OID::gen()
<< CollectionType::kUpdatedAtFieldName << Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
<< CollectionType::unique(true) << CollectionType::distributionMode("unsharded"))));
const auto collType2 = uassertStatusOK(CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::epoch(OID::gen())
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)
<< CollectionType::distributionMode("sharded"))));
<< "db.coll" << CollectionType::kEpochFieldName << OID::gen()
<< CollectionType::kUpdatedAtFieldName << Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
<< CollectionType::unique(true) << CollectionType::distributionMode("sharded"))));
ASSERT(!collType1.hasSameOptions(collType2));
ASSERT(!collType2.hasSameOptions(collType1));
@ -297,16 +297,17 @@ TEST(CollectionType,
HasSameOptionsReturnsTrueIfOneDistributionModeExplicitlySetToShardedAndOtherIsNotSet) {
const auto collType1 = uassertStatusOK(CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::epoch(OID::gen())
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)
<< CollectionType::distributionMode("sharded"))));
<< "db.coll" << CollectionType::kEpochFieldName << OID::gen()
<< CollectionType::kUpdatedAtFieldName << Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
<< CollectionType::unique(true) << CollectionType::distributionMode("sharded"))));
const auto collType2 = uassertStatusOK(CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::epoch(OID::gen())
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true))));
BSON(CollectionType::kNssFieldName << "db.coll" << CollectionType::kEpochFieldName
<< OID::gen() << CollectionType::kUpdatedAtFieldName
<< Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
<< CollectionType::unique(true))));
ASSERT(collType1.hasSameOptions(collType2));
ASSERT(collType2.hasSameOptions(collType1));
@ -314,44 +315,23 @@ TEST(CollectionType,
TEST(CollectionType, HasSameOptionsReturnsTrueIfNeitherDistributionModeExplicitlySet) {
const auto collType1 = uassertStatusOK(CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::epoch(OID::gen())
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true))));
BSON(CollectionType::kNssFieldName << "db.coll" << CollectionType::kEpochFieldName
<< OID::gen() << CollectionType::kUpdatedAtFieldName
<< Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
<< CollectionType::unique(true))));
const auto collType2 = uassertStatusOK(CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< "db.coll" << CollectionType::epoch(OID::gen())
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true))));
BSON(CollectionType::kNssFieldName << "db.coll" << CollectionType::kEpochFieldName
<< OID::gen() << CollectionType::kUpdatedAtFieldName
<< Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1)
<< CollectionType::unique(true))));
ASSERT(collType1.hasSameOptions(collType2));
ASSERT(collType2.hasSameOptions(collType1));
}
TEST(CollectionType, EpochCorrectness) {
CollectionType coll;
coll.setNss(NamespaceString{"db.coll"});
coll.setUpdatedAt(Date_t::fromMillisSinceEpoch(1));
coll.setKeyPattern(KeyPattern{BSON("a" << 1)});
coll.setUnique(false);
coll.setDropped(false);
// Validation will fail because we don't have epoch set. This ensures that if we read a
// collection with no epoch, we will write back one with epoch.
ASSERT_NOT_OK(coll.validate());
// We should be allowed to set empty epoch for dropped collections
coll.setDropped(true);
coll.setEpoch(OID());
ASSERT_OK(coll.validate());
// We should be allowed to set normal epoch for non-dropped collections
coll.setDropped(false);
coll.setEpoch(OID::gen());
ASSERT_OK(coll.validate());
}
TEST(CollectionType, Pre22Format) {
CollectionType coll = assertGet(
CollectionType::fromBSON(BSON("_id"
@ -370,11 +350,11 @@ TEST(CollectionType, Pre22Format) {
TEST(CollectionType, InvalidCollectionNamespace) {
const OID oid = OID::gen();
StatusWith<CollectionType> result = CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< "foo\\bar.coll" << CollectionType::epoch(oid)
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
StatusWith<CollectionType> result = CollectionType::fromBSON(BSON(
CollectionType::kNssFieldName
<< "foo\\bar.coll" << CollectionType::kEpochFieldName << oid
<< CollectionType::kUpdatedAtFieldName << Date_t::fromMillisSinceEpoch(1)
<< CollectionType::kKeyPatternFieldName << BSON("a" << 1) << CollectionType::unique(true)));
ASSERT_NOT_OK(result.getStatus());
}
@ -382,9 +362,9 @@ TEST(CollectionType, BadType) {
const OID oid = OID::gen();
StatusWith<CollectionType> result = CollectionType::fromBSON(
BSON(CollectionType::kNssFieldName
<< 1 << CollectionType::epoch(oid)
<< CollectionType::updatedAt(Date_t::fromMillisSinceEpoch(1))
<< CollectionType::keyPattern(BSON("a" << 1)) << CollectionType::unique(true)));
<< 1 << CollectionType::kEpochFieldName << oid << CollectionType::kUpdatedAtFieldName
<< Date_t::fromMillisSinceEpoch(1) << CollectionType::kKeyPatternFieldName
<< BSON("a" << 1) << CollectionType::unique(true)));
ASSERT_NOT_OK(result.getStatus());
}

View File

@ -89,6 +89,7 @@ protected:
collType.setNss(kNss);
collType.setEpoch(epoch);
collType.setUpdatedAt(Date_t::now());
collType.setKeyPattern(shardKeyPattern.toBSON());
collType.setUnique(false);

View File

@ -141,6 +141,7 @@ ChunkManager CatalogCacheTestFixture::makeChunkManager(
CollectionType coll;
coll.setNss(nss);
coll.setEpoch(version.epoch());
coll.setUpdatedAt(Date_t::now());
coll.setKeyPattern(shardKeyPattern.getKeyPattern());
coll.setUnique(unique);
@ -203,6 +204,7 @@ void CatalogCacheTestFixture::expectGetCollection(NamespaceString nss,
CollectionType collType;
collType.setNss(nss);
collType.setEpoch(epoch);
collType.setUpdatedAt(Date_t::now());
collType.setKeyPattern(shardKeyPattern.toBSON());
collType.setUnique(false);
if (uuid) {