mirror of https://github.com/mongodb/mongo
SERVER-82112 Test that buckets with mixed schema data cannot be reopened
This commit is contained in:
parent
4cf2ec98fc
commit
5303abb137
|
|
@ -398,12 +398,15 @@ Bucket* useAlternateBucket(BucketCatalog& catalog,
|
||||||
|
|
||||||
StatusWith<std::unique_ptr<Bucket>> rehydrateBucket(OperationContext* opCtx,
|
StatusWith<std::unique_ptr<Bucket>> rehydrateBucket(OperationContext* opCtx,
|
||||||
BucketStateRegistry& registry,
|
BucketStateRegistry& registry,
|
||||||
|
ExecutionStatsController& stats,
|
||||||
const NamespaceString& ns,
|
const NamespaceString& ns,
|
||||||
const StringDataComparator* comparator,
|
const StringDataComparator* comparator,
|
||||||
const TimeseriesOptions& options,
|
const TimeseriesOptions& options,
|
||||||
const BucketToReopen& bucketToReopen,
|
const BucketToReopen& bucketToReopen,
|
||||||
const uint64_t catalogEra,
|
const uint64_t catalogEra,
|
||||||
const BucketKey* expectedKey) {
|
const BucketKey* expectedKey) {
|
||||||
|
ScopeGuard updateStatsOnError([&stats] { stats.incNumBucketReopeningsFailed(); });
|
||||||
|
|
||||||
const auto& [bucketDoc, validator] = bucketToReopen;
|
const auto& [bucketDoc, validator] = bucketToReopen;
|
||||||
if (catalogEra < getCurrentEra(registry)) {
|
if (catalogEra < getCurrentEra(registry)) {
|
||||||
return {ErrorCodes::WriteConflict, "Bucket is from an earlier era, may be outdated"};
|
return {ErrorCodes::WriteConflict, "Bucket is from an earlier era, may be outdated"};
|
||||||
|
|
@ -508,6 +511,7 @@ StatusWith<std::unique_ptr<Bucket>> rehydrateBucket(OperationContext* opCtx,
|
||||||
bucket->schema.calculateMemUsage() + key.metadata.toBSON().objsize() + sizeof(Bucket) +
|
bucket->schema.calculateMemUsage() + key.metadata.toBSON().objsize() + sizeof(Bucket) +
|
||||||
sizeof(std::unique_ptr<Bucket>) + (sizeof(Bucket*) * 2);
|
sizeof(std::unique_ptr<Bucket>) + (sizeof(Bucket*) * 2);
|
||||||
|
|
||||||
|
updateStatsOnError.dismiss();
|
||||||
return {std::move(bucket)};
|
return {std::move(bucket)};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -743,6 +747,7 @@ StatusWith<InsertResult> insert(OperationContext* opCtx,
|
||||||
auto rehydratedBucket = (reopeningContext && reopeningContext->bucketToReopen.has_value())
|
auto rehydratedBucket = (reopeningContext && reopeningContext->bucketToReopen.has_value())
|
||||||
? rehydrateBucket(opCtx,
|
? rehydrateBucket(opCtx,
|
||||||
catalog.bucketStateRegistry,
|
catalog.bucketStateRegistry,
|
||||||
|
stats,
|
||||||
ns,
|
ns,
|
||||||
comparator,
|
comparator,
|
||||||
options,
|
options,
|
||||||
|
|
@ -751,7 +756,6 @@ StatusWith<InsertResult> insert(OperationContext* opCtx,
|
||||||
&key)
|
&key)
|
||||||
: StatusWith<std::unique_ptr<Bucket>>{ErrorCodes::BadValue, "No bucket to rehydrate"};
|
: StatusWith<std::unique_ptr<Bucket>>{ErrorCodes::BadValue, "No bucket to rehydrate"};
|
||||||
if (rehydratedBucket.getStatus().code() == ErrorCodes::WriteConflict) {
|
if (rehydratedBucket.getStatus().code() == ErrorCodes::WriteConflict) {
|
||||||
stats.incNumBucketReopeningsFailed();
|
|
||||||
return rehydratedBucket.getStatus();
|
return rehydratedBucket.getStatus();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -172,6 +172,7 @@ Bucket* useAlternateBucket(Stripe& stripe, WithLock stripeLock, const CreationIn
|
||||||
*/
|
*/
|
||||||
StatusWith<std::unique_ptr<Bucket>> rehydrateBucket(OperationContext* opCtx,
|
StatusWith<std::unique_ptr<Bucket>> rehydrateBucket(OperationContext* opCtx,
|
||||||
BucketStateRegistry& registry,
|
BucketStateRegistry& registry,
|
||||||
|
ExecutionStatsController& stats,
|
||||||
const NamespaceString& ns,
|
const NamespaceString& ns,
|
||||||
const StringDataComparator* comparator,
|
const StringDataComparator* comparator,
|
||||||
const TimeseriesOptions& options,
|
const TimeseriesOptions& options,
|
||||||
|
|
|
||||||
|
|
@ -322,6 +322,7 @@ Status BucketCatalogTest::_reopenBucket(const CollectionPtr& coll, const BSONObj
|
||||||
|
|
||||||
auto res = internal::rehydrateBucket(_opCtx,
|
auto res = internal::rehydrateBucket(_opCtx,
|
||||||
_bucketCatalog->bucketStateRegistry,
|
_bucketCatalog->bucketStateRegistry,
|
||||||
|
stats,
|
||||||
ns,
|
ns,
|
||||||
coll->getDefaultCollator(),
|
coll->getDefaultCollator(),
|
||||||
*options,
|
*options,
|
||||||
|
|
@ -1376,6 +1377,32 @@ TEST_F(BucketCatalogTest, ReopenMalformedBucket) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
TEST_F(BucketCatalogTest, ReopenMixedSchemaDataBucket) {
|
||||||
|
BSONObj bucketDoc = ::mongo::fromjson(
|
||||||
|
R"({"_id":{"$oid":"02091c2c050b7495eaef4581"},
|
||||||
|
"control":{"version":1,
|
||||||
|
"min":{"_id":{"$oid":"63091c30138e9261fd70a903"},
|
||||||
|
"time":{"$date":"2022-08-26T19:19:00Z"},
|
||||||
|
"x":1},
|
||||||
|
"max":{"_id":{"$oid":"63091c30138e9261fd70a905"},
|
||||||
|
"time":{"$date":"2022-08-26T19:19:30Z"},
|
||||||
|
"x":{"y":"z"}}},
|
||||||
|
"data":{"_id":{"0":{"$oid":"63091c30138e9261fd70a903"},
|
||||||
|
"1":{"$oid":"63091c30138e9261fd70a904"},
|
||||||
|
"2":{"$oid":"63091c30138e9261fd70a905"}},
|
||||||
|
"time":{"0":{"$date":"2022-08-26T19:19:30Z"},
|
||||||
|
"1":{"$date":"2022-08-26T19:19:30Z"},
|
||||||
|
"2":{"$date":"2022-08-26T19:19:30Z"}},
|
||||||
|
"x":{"0":1,"1":{"y":"z"},"2":"abc"}}})");
|
||||||
|
|
||||||
|
AutoGetCollection autoColl(_opCtx, _ns1.makeTimeseriesBucketsNamespace(), MODE_IX);
|
||||||
|
|
||||||
|
ASSERT_NOT_OK(_reopenBucket(autoColl.getCollection(), bucketDoc));
|
||||||
|
|
||||||
|
auto stats = internal::getExecutionStats(*_bucketCatalog, _ns1);
|
||||||
|
ASSERT_EQ(1, stats->numBucketReopeningsFailed.load());
|
||||||
|
}
|
||||||
|
|
||||||
TEST_F(BucketCatalogTest, ReopenClosedBuckets) {
|
TEST_F(BucketCatalogTest, ReopenClosedBuckets) {
|
||||||
AutoGetCollection autoColl(_opCtx, _ns1.makeTimeseriesBucketsNamespace(), MODE_IX);
|
AutoGetCollection autoColl(_opCtx, _ns1.makeTimeseriesBucketsNamespace(), MODE_IX);
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue