SERVER-111511 Ensure lifetime of catalog-owned pointers stored in QueryPlannerParams (#41881)

GitOrigin-RevId: 96dce3da49b8d2e9e0d328048cb56930eb1bdb2b
This commit is contained in:
Ben Shteinfeld 2025-09-26 18:02:08 -04:00 committed by MongoDB Bot
parent d152f51d37
commit 39539342a1
6 changed files with 202 additions and 7 deletions

View File

@ -0,0 +1,170 @@
/**
* Test that the query engine is capable of running queries when the catalog's in memory cache of
* objects is destroyed during a yield. This testcase serves as a regression test for SERVER-105873
* which was a problem with the query optimizer stashing a raw pointer to a storage-owned object
* which is destroyed during a yield and concurrent collMod operation. The comments inline represent
* the state of the code before SERVER-105873 was fixed.
*/
(function() {
"use strict";
load("jstests/libs/fail_point_util.js"); // For configureFailPoint
function insertDocs(coll) {
let docs = [{a: "abc", b: "def"}];
for (let i = 0; i < 1000; i++) {
docs.push({
a: 1,
b: 1,
c: i,
});
}
assert.commandWorked(coll.insertMany(docs));
}
// Test stale pointer for partial filter expression of an index.
function testPartialIndex(conn) {
let db = conn.getDB("test");
const collName = jsTestName();
const coll = db[collName];
coll.drop();
insertDocs(coll);
// Ensure first branch of $or has multiple indexed plans and must run the multiplanner. This
// ensures we yield the collection acquisition.
assert.commandWorked(
coll.createIndex({a: 1}, {partialFilterExpression: {b: 1}, name: "a_partial"}));
assert.commandWorked(coll.createIndex({b: 1}));
// Ensure we yield the collection acquisition.
assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
// Configure the fail point to pause after the query planner has taken pointers to the catalog
// owned objects.
let pauseAfterFillingOutIndexEntries =
configureFailPoint(conn, "pauseAfterFillingOutIndexEntries");
const awaitQuery = startParallelShell(function() {
const coll = db[jsTestName()];
const res = coll.find({
$or: [
{a: 1, b: 1},
// Ensure one branch does not have an indexed plan. This forces the
// subplanner to fallback to `choosePlanWholeQuery()`.
{c: null},
],
})
.toArray();
assert.eq(1001, res.length);
}, db.getMongo().port);
// At this point, the parallel shell's query is paused after filling out index entries in
// 'QueryPlannerParams' and no subplanning or multiplanning has began.
pauseAfterFillingOutIndexEntries.wait();
// Perform a collMod which forces the catalog to destroy its in-memory cache of the 'a_partial'
// index.
assert.commandWorked(
db.runCommand({collMod: collName, index: {name: "a_partial", hidden: true}}));
assert.commandWorked(
db.runCommand({collMod: collName, index: {name: "a_partial", hidden: false}}));
// Begin subplanning and multiplanning of the first branch. This results in a yield which
// releases the one and only reference to the initial copy of the 'a_partial'
// IndexCatalogEntryImpl which owns the partial filter MatchExpression. On restore, the catalog
// constructs a new copy of the 'a_partial' IndexCatalogEntryImpl. However, before
// SERVER-105873, the query engine was saving a raw pointer to the partial filter expression
// owned by the first copy of the IndexCatalogEntryImpl, which at this point is now destroyed.
// When the subplanner tries to plans the second branch, it is only able to create a collscan
// plan, which results in falling back to `choosePlanWholeQuery()`, which tries to dereference
// the destroyed partial index pointer when determining index eligibility.
pauseAfterFillingOutIndexEntries.off();
awaitQuery();
}
// Test stale pointer to wildcard projection.
// The repro is nearly identical to that of 'testPartialIndex()', so we've omitted the inline
// comments.
function testWildcardIndex(conn) {
let db = conn.getDB("test");
const collName = jsTestName();
const coll = db[collName];
coll.drop();
insertDocs(coll);
assert.commandWorked(coll.createIndex({"a.$**": 1}, {name: "a_wildcard"}));
assert.commandWorked(coll.createIndex({b: 1}));
assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
let pauseAfterFillingOutIndexEntries =
configureFailPoint(conn, "pauseAfterFillingOutIndexEntries");
const awaitQuery = startParallelShell(function() {
const coll = db[jsTestName()];
const res = coll.find({
$or: [{"a.foo": 1, b: 1}, {c: null}],
})
.toArray();
assert.eq(1, res.length);
}, db.getMongo().port);
pauseAfterFillingOutIndexEntries.wait();
assert.commandWorked(
db.runCommand({collMod: collName, index: {name: "a_wildcard", hidden: true}}));
assert.commandWorked(
db.runCommand({collMod: collName, index: {name: "a_wildcard", hidden: false}}));
pauseAfterFillingOutIndexEntries.off();
awaitQuery();
}
// Test stale pointer to collator.
// The repro is nearly identical to that of 'testPartialIndex()', so we've omitted the inline
// comments.
function testCollation(conn) {
let db = conn.getDB("test");
const collName = jsTestName();
const coll = db[collName];
coll.drop();
// Create collection with collation
const collation = {locale: "fr_CA", strength: 2};
assert.commandWorked(db.createCollection(collName, {collation: collation}));
insertDocs(coll);
assert.commandWorked(coll.createIndex({a: 1}, {name: "a_collation", collation: collation}));
assert.commandWorked(coll.createIndex({b: 1}, {collation: collation}));
assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1}));
let pauseAfterFillingOutIndexEntries =
configureFailPoint(conn, "pauseAfterFillingOutIndexEntries");
const awaitQuery = startParallelShell(function() {
const coll = db[jsTestName()];
const res = coll.find({
// Ensure predicate has string operands so collator is consulted.
$or: [{a: "abc", b: "def"}, {c: null}],
})
.collation({locale: "fr_CA", strength: 2})
.toArray();
assert.eq(1, res.length);
}, db.getMongo().port);
pauseAfterFillingOutIndexEntries.wait();
assert.commandWorked(
db.runCommand({collMod: collName, index: {name: "a_collation", hidden: true}}));
assert.commandWorked(
db.runCommand({collMod: collName, index: {name: "a_collation", hidden: false}}));
pauseAfterFillingOutIndexEntries.off();
awaitQuery();
}
function runTest(testFn) {
let conn = MongoRunner.runMongod();
try {
testFn(conn);
} finally {
MongoRunner.stopMongod(conn);
}
}
runTest(testPartialIndex);
runTest(testWildcardIndex);
runTest(testCollation);
})();

View File

@ -77,7 +77,8 @@ CoreIndexInfo indexInfoFromIndexCatalogEntry(const IndexCatalogEntry& ice) {
IndexEntry::Identifier{desc->indexName()},
ice.getFilterExpression(),
ice.getCollator(),
projExec};
projExec,
ice.shared_from_this()};
}
} // namespace

View File

@ -120,6 +120,8 @@
namespace mongo {
MONGO_FAIL_POINT_DEFINE(pauseAfterFillingOutIndexEntries);
boost::intrusive_ptr<ExpressionContext> makeExpressionContextForGetExecutor(
OperationContext* opCtx, const BSONObj& requestCollation, const NamespaceString& nss) {
invariant(opCtx);
@ -272,7 +274,8 @@ IndexEntry indexEntryFromIndexCatalogEntry(OperationContext* opCtx,
ice.getFilterExpression(),
desc->infoObj(),
ice.getCollator(),
wildcardProjection};
wildcardProjection,
ice.shared_from_this()};
}
ColumnIndexEntry columnIndexEntryFromIndexCatalogEntry(OperationContext* opCtx,
@ -462,6 +465,7 @@ void fillOutPlannerParams(OperationContext* opCtx,
plannerParams->availableMemoryBytes =
static_cast<long long>(ProcessInfo::getMemSizeMB()) * kMB;
}
pauseAfterFillingOutIndexEntries.pauseWhileSet();
}
std::map<NamespaceString, SecondaryCollectionInfo> fillOutSecondaryCollectionsInformation(

View File

@ -120,6 +120,7 @@ public:
{},
nullptr,
nullptr,
nullptr,
wildcardPos};
}

View File

@ -61,14 +61,16 @@ struct CoreIndexInfo {
Identifier ident,
const MatchExpression* fe = nullptr,
const CollatorInterface* ci = nullptr,
const IndexPathProjection* indexPathProj = nullptr)
const IndexPathProjection* indexPathProj = nullptr,
std::shared_ptr<const IndexCatalogEntry> iceStorage = nullptr)
: identifier(std::move(ident)),
keyPattern(kp),
filterExpr(fe),
type(type),
sparse(sp),
collator(ci),
indexPathProjection(indexPathProj) {
indexPathProjection(indexPathProj),
indexCatalogEntryStorage(std::move(iceStorage)) {
// If a projection executor exists, we always expect a $** index
if (indexPathProjection != nullptr)
invariant(type == IndexType::INDEX_WILDCARD || type == IndexType::INDEX_COLUMN);
@ -129,6 +131,10 @@ struct CoreIndexInfo {
// hashing.
BSONObj keyPattern;
// If this index is a partial index, 'filterExpr' is the MatchExpression representing the
// filter predicate. Otherwise, 'filterExpr' is null. It is the caller's responsibility to
// ensure that the pointer remains valid for the lifetime of this CoreIndexInfo.
// See 'indexCatalogEntryStorage' for more details.
const MatchExpression* filterExpr;
// What type of index is this? (What access method can we use on the index described by the
@ -139,12 +145,22 @@ struct CoreIndexInfo {
// Null if this index orders strings according to the simple binary compare. If non-null,
// represents the collator used to generate index keys for indexed strings.
// It is the caller's responsibility to ensure that the pointer remains valid for the lifetime
// of this CoreIndexInfo.
// See 'indexCatalogEntryStorage' for more details.
const CollatorInterface* collator = nullptr;
// For $** indexes, a pointer to the projection executor owned by the index access method. Null
// unless this IndexEntry represents a wildcard or column store index, in which case this is
// always non-null.
// always non-null. It is the caller's responsibility to ensure that the pointer remains valid
// for the lifetime of this CoreIndexInfo. See 'indexCatalogEntryStorage' for more details.
const IndexPathProjection* indexPathProjection = nullptr;
// Optional shared_ptr to the IndexCatalogEntry (storage's in-memory representation of an index
// in the catalog). This is used to keep the IndexCatalogEntry alive as long as this
// CoreIndexInfo. This is useful because the 'filterExpr', 'collator' and 'indexPathProjection'
// members are often pointers to data owned by IndexCatalogEntry.
std::shared_ptr<const IndexCatalogEntry> indexCatalogEntryStorage;
};
/**
@ -168,8 +184,10 @@ struct IndexEntry : CoreIndexInfo {
const BSONObj& io,
const CollatorInterface* ci,
const WildcardProjection* wildcardProjection,
std::shared_ptr<const IndexCatalogEntry> iceStorage = nullptr,
size_t wildcardPos = 0)
: CoreIndexInfo(kp, type, sp, std::move(ident), fe, ci, wildcardProjection),
: CoreIndexInfo(
kp, type, sp, std::move(ident), fe, ci, wildcardProjection, std::move(iceStorage)),
version(version),
multikey(mk),
unique(unq),

View File

@ -461,6 +461,7 @@ boost::optional<IndexEntry> createExpandedIndexEntry(const IndexEntry& wildcardI
wildcardIndex.infoObj,
wildcardIndex.collator,
wildcardIndex.indexPathProjection,
wildcardIndex.indexCatalogEntryStorage,
wildcardFieldPos);
return entry;
}