SERVER-111292 Reenable validation for geo indexes (#43162)

GitOrigin-RevId: 69f20c68a220ea68e94956475b633a04bff295ca
This commit is contained in:
henrikedin 2025-10-28 08:26:51 -04:00 committed by MongoDB Bot
parent 837034821c
commit 952ed1f670
5 changed files with 3 additions and 82 deletions

View File

@ -53,7 +53,6 @@ suites:
- src/mongo/db/modules/enterprise/jstests/live_restore/live_restore_server.js - src/mongo/db/modules/enterprise/jstests/live_restore/live_restore_server.js
- src/mongo/db/modules/enterprise/jstests/live_restore/live_restore_no_server.js - src/mongo/db/modules/enterprise/jstests/live_restore/live_restore_no_server.js
- jstests/noPassthrough/validate/validate_empty_collection.js - jstests/noPassthrough/validate/validate_empty_collection.js
- jstests/noPassthrough/validate/skip_geo_hash_checks.js
- jstests/noPassthrough/validate/validate_memory_limit.js - jstests/noPassthrough/validate/validate_memory_limit.js
- jstests/noPassthrough/validate/validate_with_long_index_name.js - jstests/noPassthrough/validate/validate_with_long_index_name.js
- src/mongo/db/modules/enterprise/jstests/live_restore/live_restore_block_fcv_change.js - src/mongo/db/modules/enterprise/jstests/live_restore/live_restore_block_fcv_change.js

View File

@ -65,9 +65,9 @@ const runTest = (indexDefinition) => {
coll.insert({x: polygon, y: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]}), coll.insert({x: polygon, y: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]}),
); );
// We should also get errors when we try to validate. // We should also get a warning when we try to validate.
const validation = assert.commandWorked(coll.validate()); const validation = assert.commandWorked(coll.validate());
assert.gt(validation.errors.length, 1); assert.eq(validation.warnings.length, 1);
// We should be able to remove a problem document. // We should be able to remove a problem document.
assert.commandWorked(coll.deleteOne({_id: "problem1"})); assert.commandWorked(coll.deleteOne({_id: "problem1"}));

View File

@ -1,57 +0,0 @@
/**
* As of SERVER-98427 we are skipping the hash checks for missing/extra keys for geo
* indexes as floating point errors can cause spurious failures. This test validates
* that they are skipped while also ensuring other valid failures are caught
* @tags: [
* requires_persistence,
* requires_wiredtiger,
* ]
*/
import {getUriForIndex, truncateUriAndRestartMongod} from "jstests/disk/libs/wt_file_helper.js";
const dbpath = MongoRunner.dataPath + "skip_geo_hash_checks";
resetDbpath(dbpath);
let conn = MongoRunner.runMongod();
let db = conn.getCollection("test.skip_geo_hash");
assert.commandWorked(db.getDB().createCollection(db.getName()));
assert.commandWorked(db.createIndex({loc: "2dsphere"}));
assert.commandWorked(db.createIndex({loc: "2d"}));
assert.commandWorked(
db.insertMany([
{
loc: [Math.random(), Math.random()],
},
{
loc: [Math.random(), Math.random()],
},
{
loc: [Math.random(), Math.random()],
},
]),
);
let result = assert.commandWorked(db.validate());
jsTestLog(result);
assert(result.valid);
// Truncate the index but this will pass validation because we are no longer performing this
// specific check
const uri = getUriForIndex(db, "loc_2dsphere");
conn = truncateUriAndRestartMongod(uri, conn);
db = conn.getCollection("test.skip_geo_hash");
result = assert.commandWorked(db.validate());
jsTestLog(result);
assert(result.valid);
// Ensure that other index errors will fail for geo indexes
assert.commandWorked(conn.adminCommand({configureFailPoint: "failIndexKeyOrdering", mode: "alwaysOn"}));
result = assert.commandWorked(db.validate());
jsTestLog(result);
// Check this index specifically because loc2d_sphere has no keys to compare ordering, and _id will
// also cause result.valid to fail
assert(!result.indexDetails["loc_2d"].valid);
assert.commandWorked(conn.adminCommand({configureFailPoint: "failIndexKeyOrdering", mode: "off"}));
MongoRunner.stopMongod(conn, null, {skipValidation: true});

View File

@ -122,8 +122,7 @@ IndexInfo::IndexInfo(const IndexDescriptor& descriptor)
indexNameHash(hash(descriptor.indexName())), indexNameHash(hash(descriptor.indexName())),
ord(Ordering::make(descriptor.keyPattern())), ord(Ordering::make(descriptor.keyPattern())),
unique(descriptor.unique()), unique(descriptor.unique()),
accessMethod(descriptor.getEntry()->accessMethod()), accessMethod(descriptor.getEntry()->accessMethod()) {}
indexType(descriptor.getIndexType()) {}
IndexConsistency::IndexConsistency(OperationContext* opCtx, IndexConsistency::IndexConsistency(OperationContext* opCtx,
CollectionValidation::ValidateState* validateState, CollectionValidation::ValidateState* validateState,
@ -314,9 +313,6 @@ void KeyStringIndexConsistency::addDocKey(OperationContext* opCtx,
IndexInfo* indexInfo, IndexInfo* indexInfo,
const RecordId& recordId, const RecordId& recordId,
ValidateResults* results) { ValidateResults* results) {
if (skipTrackingIndexKeyCount(*indexInfo)) {
return;
}
auto rawHash = ks.hash(indexInfo->indexNameHash); auto rawHash = ks.hash(indexInfo->indexNameHash);
auto hashLower = rawHash % kNumHashBuckets; auto hashLower = rawHash % kNumHashBuckets;
auto hashUpper = (rawHash / kNumHashBuckets) % kNumHashBuckets; auto hashUpper = (rawHash / kNumHashBuckets) % kNumHashBuckets;
@ -356,9 +352,6 @@ void KeyStringIndexConsistency::addIndexKey(OperationContext* opCtx,
IndexInfo* indexInfo, IndexInfo* indexInfo,
const RecordId& recordId, const RecordId& recordId,
ValidateResults* results) { ValidateResults* results) {
if (skipTrackingIndexKeyCount(*indexInfo)) {
return;
}
auto rawHash = ks.hash(indexInfo->indexNameHash); auto rawHash = ks.hash(indexInfo->indexNameHash);
auto hashLower = rawHash % kNumHashBuckets; auto hashLower = rawHash % kNumHashBuckets;
auto hashUpper = (rawHash / kNumHashBuckets) % kNumHashBuckets; auto hashUpper = (rawHash / kNumHashBuckets) % kNumHashBuckets;
@ -425,12 +418,6 @@ void KeyStringIndexConsistency::addIndexKey(OperationContext* opCtx,
} }
} }
bool KeyStringIndexConsistency::skipTrackingIndexKeyCount(const IndexInfo& indexInfo) {
return indexInfo.indexType == IndexType::INDEX_2D ||
indexInfo.indexType == IndexType::INDEX_2DSPHERE ||
indexInfo.indexType == IndexType::INDEX_2DSPHERE_BUCKET;
}
bool KeyStringIndexConsistency::limitMemoryUsageForSecondPhase(ValidateResults* result) { bool KeyStringIndexConsistency::limitMemoryUsageForSecondPhase(ValidateResults* result) {
invariant(!_firstPhase); invariant(!_firstPhase);

View File

@ -85,7 +85,6 @@ struct IndexInfo {
const bool unique; const bool unique;
// Index access method pointer. // Index access method pointer.
const IndexAccessMethod* accessMethod; const IndexAccessMethod* accessMethod;
IndexType indexType;
}; };
/** /**
@ -277,13 +276,6 @@ private:
const RecordId& recordId, const RecordId& recordId,
ValidateResults* results); ValidateResults* results);
/**
* Returns true if we should skip doing the hash bucket counting to detect extra/missing index
* keys. This is currently done for geo indexes as they can experience rounding errors in trig
* functions leading to false positives
*/
bool skipTrackingIndexKeyCount(const IndexInfo& indexInfo);
/** /**
* During the first phase of validation, tracks the multikey paths for every observed document. * During the first phase of validation, tracks the multikey paths for every observed document.
*/ */