SERVER-66583 Re-enable SBE as the default execution engine and fall back to classic if no SBE compatible $group or $lookup exists

This commit is contained in:
Mihai Andrei 2022-05-27 16:45:12 +00:00 committed by Evergreen Agent
parent 3e717f0b3e
commit aa40f5e7c0
63 changed files with 236 additions and 458 deletions

View File

@ -1619,16 +1619,16 @@ buildvariants:
# - rhel80-large
- name: generate_buildid_to_debug_symbols_mapping
- &enterprise-rhel-80-64-bit-dynamic-sbe-engine
name: enterprise-rhel-80-64-bit-dynamic-sbe-engine
display_name: "Shared Library Enterprise RHEL 8.0 (SBE Engine)"
- &enterprise-rhel-80-64-bit-dynamic-classic-engine
name: enterprise-rhel-80-64-bit-dynamic-classic-engine
display_name: "Shared Library Enterprise RHEL 8.0 (Classic Engine)"
cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
stepback: false
modules:
- enterprise
run_on:
- rhel80-small
expansions: &enterprise-rhel-80-64-bit-dynamic-sbe-engine-expansions
expansions: &enterprise-rhel-80-64-bit-dynamic-classic-engine-expansions
additional_package_targets: archive-mongocryptd archive-mongocryptd-debug archive-mh archive-mh-debug
compile_flags: --ssl MONGO_DISTMOD=rhel80 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_v3_gcc.vars --link-model=dynamic
multiversion_platform: rhel80
@ -1644,7 +1644,7 @@ buildvariants:
burn_in_tag_buildvariants: enterprise-rhel-80-64-bit-inmem linux-64-duroff enterprise-rhel-80-64-bit-multiversion
num_scons_link_jobs_available: 0.99
test_flags: >-
--mongodSetParameters="{internalQueryForceClassicEngine: false}"
--mongodSetParameters="{internalQueryForceClassicEngine: true}"
tasks:
- name: .aggfuzzer
- name: .aggregation
@ -2338,8 +2338,8 @@ buildvariants:
expansions:
toolchain_version: v4
- name: rhel80-debug-asan-sbe-engine
display_name: ~ ASAN Enterprise RHEL 8.0 DEBUG (SBE Engine)
- name: rhel80-debug-asan-classic-engine
display_name: ~ ASAN Enterprise RHEL 8.0 DEBUG (Classic Engine)
cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
modules:
- enterprise
@ -2354,7 +2354,7 @@ buildvariants:
san_options: LSAN_OPTIONS="suppressions=etc/lsan.suppressions:report_objects=1:external_symbolizer_path=/opt/mongodbtoolchain/v3/bin/llvm-symbolizer" ASAN_OPTIONS="detect_leaks=1:check_initialization_order=true:strict_init_order=true:abort_on_error=1:disable_coredump=0:handle_abort=1:external_symbolizer_path=/opt/mongodbtoolchain/v3/bin/llvm-symbolizer"
compile_flags: --variables-files=etc/scons/mongodbtoolchain_v3_clang.vars --dbg=on --opt=on --allocator=system --sanitize=address --ssl --ocsp-stapling=off --enable-free-mon=on -j$(grep -c ^processor /proc/cpuinfo)
test_flags: >-
--mongodSetParameters="{internalQueryForceClassicEngine: false}"
--mongodSetParameters="{internalQueryForceClassicEngine: true}"
--excludeWithAnyTags=requires_fast_memory,requires_ocsp_stapling
multiversion_platform: rhel80
multiversion_edition: enterprise
@ -2493,8 +2493,8 @@ buildvariants:
expansions:
toolchain_version: v4
- name: rhel80-debug-ubsan-sbe-engine
display_name: "~ UBSAN Enterprise RHEL 8.0 DEBUG (SBE Engine)"
- name: rhel80-debug-ubsan-classic-engine
display_name: "~ UBSAN Enterprise RHEL 8.0 DEBUG (Classic Engine)"
cron: "0 4 * * *" # From the ${project_nightly_cron} parameter.
modules:
- enterprise
@ -2509,7 +2509,7 @@ buildvariants:
san_options: UBSAN_OPTIONS="print_stacktrace=1:external_symbolizer_path=/opt/mongodbtoolchain/v3/bin/llvm-symbolizer"
compile_flags: --variables-files=etc/scons/mongodbtoolchain_v3_clang.vars --dbg=on --opt=on --sanitize=undefined --ssl --ocsp-stapling=off --enable-free-mon=on -j$(grep -c ^processor /proc/cpuinfo)
test_flags: >-
--mongodSetParameters="{internalQueryForceClassicEngine: false}"
--mongodSetParameters="{internalQueryForceClassicEngine: true}"
--excludeWithAnyTags=requires_ocsp_stapling
multiversion_platform: rhel80
multiversion_edition: enterprise
@ -3044,21 +3044,20 @@ buildvariants:
- windows-vsCurrent-large
### QO & QE Patch-Specific Build Variants ###
- <<: *enterprise-rhel-80-64-bit-dynamic-sbe-engine
name: enterprise-rhel-80-64-bit-dynamic-sbe-engine-query-patch-only
display_name: "~ Shared Library Enterprise RHEL 8.0 Query Patch Only (SBE Engine)"
- <<: *enterprise-rhel-80-64-bit-dynamic-classic-engine
name: enterprise-rhel-80-64-bit-dynamic-classic-engine-query-patch-only
display_name: "~ Shared Library Enterprise RHEL 8.0 Query Patch Only (Classic Engine)"
cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter # This is a patch-only variant but we run on mainline to pick up task history.
expansions:
<<: *enterprise-rhel-80-64-bit-dynamic-sbe-engine-expansions
<<: *enterprise-rhel-80-64-bit-dynamic-classic-engine-expansions
jstestfuzz_num_generated_files: 20
jstestfuzz_concurrent_num_files: 5
target_resmoke_time: 30
max_sub_suites: 3
test_flags: >-
--mongodSetParameters="{internalQueryForceClassicEngine: false}"
--mongodSetParameters="{internalQueryForceClassicEngine: true}"
--excludeWithAnyTags=resource_intensive
# Intentionally derive from SBE to run the SBE tests with all feature flags.
- <<: *enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-template
name: enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-query-patch-only
display_name: "~ Shared Library Enterprise RHEL 8.0 Query Patch Only (all feature flags)"

View File

@ -46,8 +46,8 @@ build_variant_large_distro_exceptions:
- ubuntu1604-debug
- ubuntu1804-debug-asan
- ubuntu1804-debug-asan-all-feature-flags
- ubuntu1804-debug-asan-sbe-engine
- ubuntu1804-debug-asan-classic-engine
- ubuntu1804-debug-aubsan-lite-required
- ubuntu1804-debug-ubsan
- ubuntu1804-debug-ubsan-all-feature-flags
- ubuntu1804-debug-ubsan-sbe-engine
- ubuntu1804-debug-ubsan-classic-engine

View File

@ -16,7 +16,7 @@ let coll = db.explain_limit;
const kCollSize = 105;
const kLimit = 10;
const isSBEEnabled = checkSBEEnabled(db);
const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
// Return whether or explain() was successful and contained the appropriate fields given the
// requested verbosity. Checks that the number of documents examined and returned are correct given

View File

@ -130,7 +130,7 @@ runAndAssertThrows(["$dbl_arr", "$dbl_val"]);
// Confirm edge case where if invalid input precedes null or missing inputs, the command fails.
// Note that when the SBE engine is enabled, null will be returned before invalid input because
// we check if any values are null before checking whether all values are arrays.
let evalFn = checkSBEEnabled(db) ? runAndAssertNull : runAndAssertThrows;
let evalFn = checkSBEEnabled(db, ["featureFlagSbeFull"]) ? runAndAssertNull : runAndAssertThrows;
evalFn(["$int_arr", "$dbl_val", "$null_val"]);
evalFn(["$int_arr", "some_string_value", "$null_val"]);
evalFn(["$dbl_val", "$null_val"]);

View File

@ -13,7 +13,6 @@
* 2. 'collation' option overrides local collection's collation
*/
load("jstests/aggregation/extras/utils.js"); // For anyEq.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
load("jstests/libs/analyze_plan.js"); // For getAggPlanStages, getWinningPlan.
(function() {

View File

@ -5,7 +5,6 @@
(function() {
"use strict";
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
load("jstests/libs/analyze_plan.js"); // For getAggPlanStages.
const localColl = db.local;

View File

@ -129,6 +129,11 @@ assertResultsMatch({
expectProjectToCoalesce: true,
pipelineOptimizedAway: true
});
assertResultsMatch({
pipeline: [{$sort: {a: 1}}, {$group: {_id: "$_id", a: {$sum: "$a"}}}, {$project: {arr: 1}}],
expectProjectToCoalesce:
!groupPushdownEnabled, // lowering $group into SBE prevents coalesing of projects
});
// Test that projections with computed fields are removed from the pipeline.
assertResultsMatch({

View File

@ -7,7 +7,6 @@
load("jstests/aggregation/extras/utils.js"); // For arrayEq.
load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection.
load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs.
const testDB = db.getSiblingDB(jsTestName());

View File

@ -1,7 +1,6 @@
'use strict';
load("jstests/libs/fixture_helpers.js"); // For isSharded.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
/**
* view_catalog_cycle_lookup.js

View File

@ -19,7 +19,7 @@
load("jstests/libs/profiler.js"); // getLatestProfileEntry.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
if (!checkSBEEnabled(db)) {
if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
jsTestLog("Skipping test because SBE is disabled");
return;
}

View File

@ -14,7 +14,8 @@
load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromExplain.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
const isSbePlanCacheEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache"]);
const isSbePlanCacheEnabled =
checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]);
var coll = db.collation_plan_cache;
coll.drop();

View File

@ -18,7 +18,8 @@
load("jstests/libs/analyze_plan.js");
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
const columnstoreEnabled = checkSBEEnabled(db, ["featureFlagColumnstoreIndexes"]);
const columnstoreEnabled =
checkSBEEnabled(db, ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"]);
if (!columnstoreEnabled) {
jsTestLog("Skipping columnstore index validation test since the feature flag is not enabled.");
return;

View File

@ -18,7 +18,8 @@
load("jstests/libs/analyze_plan.js");
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
const columnstoreEnabled = checkSBEEnabled(db, ["featureFlagColumnstoreIndexes"]);
const columnstoreEnabled =
checkSBEEnabled(db, ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"]);
if (!columnstoreEnabled) {
jsTestLog("Skipping columnstore index validation test since the feature flag is not enabled.");
return;

View File

@ -17,7 +17,8 @@ load("jstests/libs/fail_point_util.js");
load("jstests/libs/analyze_plan.js");
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
const columnstoreEnabled = checkSBEEnabled(db, ["featureFlagColumnstoreIndexes"]);
const columnstoreEnabled =
checkSBEEnabled(db, ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"]);
if (!columnstoreEnabled) {
jsTestLog("Skipping columnstore index validation test since the feature flag is not enabled.");
return;

View File

@ -2,7 +2,7 @@
"use strict";
load("jstests/aggregation/extras/utils.js"); // For arrayEq and orderedArrayEq.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabledOnSomeNode.
const isSBEEnabled = checkSBEEnabledOnSomeNode(db);
if (isSBEEnabled) {

View File

@ -62,7 +62,8 @@ winningPlan = getWinningPlan(explain.queryPlanner);
engineSpecificAssertion(!isIdhack(db, winningPlan), isIxscan(db, winningPlan), db, winningPlan);
// Covered query returning _id field only can be handled by ID hack.
const isSBEPlanCacheEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache"]);
const isSBEPlanCacheEnabled =
checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]);
const parentStage = isSBEPlanCacheEnabled ? "PROJECTION_COVERED" : "FETCH";
explain = t.find(query, {_id: 1}).explain();
winningPlan = getWinningPlan(explain.queryPlanner);

View File

@ -14,7 +14,6 @@
"use strict";
load("jstests/libs/analyze_plan.js"); // For getPlanStages.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
const collName = "index_filter_catalog_independent";
const coll = db[collName];

View File

@ -19,7 +19,8 @@ load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
// For testing convenience this variable is made an integer "1" if featureFlagSbePlanCache is on,
// because the expected amount of plan cache entries differs between the two different plan caches.
const isSbePlanCacheEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache"]) ? 1 : 0;
const isSbePlanCacheEnabled =
checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]) ? 1 : 0;
const collName = "index_filter_commands_invalidate_plan_cache_entries";
const coll = db[collName];

View File

@ -14,7 +14,6 @@
(function() {
'use strict';
load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
const collName = 'introspect_hidden_index_plan_cache_entries';
const collNotAffectedName = 'introspect_hidden_index_plan_cache_entries_unaffected';

View File

@ -23,7 +23,7 @@ load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
let coll = db.jstests_plan_cache_list_plans;
coll.drop();
const isSBEAndPlanCacheOn = checkSBEEnabled(db, ["featureFlagSbePlanCache"]);
const isSBEAndPlanCacheOn = checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]);
function dumpPlanCacheState() {
return coll.aggregate([{$planCacheStats: {}}]).toArray();

View File

@ -16,7 +16,7 @@
'use strict';
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
if (checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
if (checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
jsTest.log("Skipping test because SBE and SBE plan cache are both enabled.");
return;
}

View File

@ -24,7 +24,8 @@ load("jstests/libs/sbe_explain_helpers.js"); // For engineSpecificAssertion.
const coll = db.plan_cache_sbe;
coll.drop();
const isSbePlanCacheEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache"]);
const isSbePlanCacheEnabled =
checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]);
assert.commandWorked(coll.insert({a: 1, b: 1}));

View File

@ -16,7 +16,8 @@
load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
const isSBEPlanCacheEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache"]);
const isSBEPlanCacheEnabled =
checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]);
var coll = db.jstests_plan_cache_shell_helpers;
coll.drop();

View File

@ -112,7 +112,7 @@ assertPlanCacheField({
// SBE's planCacheKey encoding encodes "collection version" which will be increased after dropping
// an index.
if (!checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
if (!checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
// The 'planCacheKey' should be the same as what it was before we dropped the index.
assertPlanCacheField({
firstExplain: initialExplain,

View File

@ -16,7 +16,7 @@
load("jstests/libs/analyze_plan.js");
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache"]);
const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]);
if (!isSBEEnabled) {
jsTest.log("Skip running the test because featureFlagSbePlanCache is not enabled");
return;

View File

@ -7,9 +7,9 @@
load("jstests/libs/analyze_plan.js");
load("jstests/libs/collection_drop_recreate.js");
load("jstests/libs/sbe_util.js");
load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled'.
const isSBEEnabled = checkSBEEnabled(db);
const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
if (!isSBEEnabled) {
jsTestLog("Skipping test because the SBE feature flag is disabled");
return;

View File

@ -12,7 +12,7 @@
load('jstests/libs/analyze_plan.js'); // For getPlanStages
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
const isSBEEnabled = checkSBEEnabled(db);
const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
if (!isSBEEnabled) {
jsTestLog("Skipping test because the SBE feature flag is disabled");
return;

View File

@ -19,7 +19,7 @@ load("jstests/libs/sbe_util.js");
// This test is specifically verifying the behavior of the SBE plan cache. So if either the SBE plan
// cache or SBE itself are disabled, bail out.
if (!checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
if (!checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
jsTestLog("Skipping test because either SBE engine or SBE plan cache is disabled");
return;
}

View File

@ -11,7 +11,6 @@
"use strict";
load("jstests/core/timeseries/libs/timeseries.js");
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
TimeseriesTest.run((insert) => {
const testDB = db.getSiblingDB(jsTestName());

View File

@ -15,7 +15,6 @@
"use strict";
load("jstests/libs/analyze_plan.js");
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
let viewsDB = db.getSiblingDB("views_collation");
assert.commandWorked(viewsDB.dropDatabase());

View File

@ -7,8 +7,6 @@
(function() {
"use strict";
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
let viewsDb = db.getSiblingDB("views_validation");
const kMaxViewDepth = 20;

View File

@ -29,6 +29,8 @@ coll.drop();
assert.commandWorked(coll.createIndex({"b.$**": 1}));
assert.commandWorked(coll.createIndex({"a": 1}));
const sbePlanCacheEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]);
// In order for the plan cache to be used, there must be more than one plan available. Insert
// data into the collection such that the b.$** index will be far more selective than the index
// on 'a' for the query {a: 1, b: 1}.
@ -70,7 +72,7 @@ for (let i = 0; i < 2; i++) {
let cacheEntry = getCacheEntryForQuery(query);
assert.neq(cacheEntry, null);
assert.eq(cacheEntry.isActive, true);
if (!checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
if (!sbePlanCacheEnabled) {
// Should be at least two plans: one using the {a: 1} index and the other using the b.$** index.
assert.gte(cacheEntry.creationExecStats.length, 2, tojson(cacheEntry.plans));
@ -123,7 +125,7 @@ assert.neq(getPlanCacheKeyFromShape({query: queryWithBNull, collection: coll, db
// There should only have been one solution for the above query, so it would get cached only by the
// SBE plan cache.
cacheEntry = getCacheEntryForQuery({a: 1, b: null});
if (checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
if (sbePlanCacheEnabled) {
assert.neq(cacheEntry, null);
assert.eq(cacheEntry.isActive, true, cacheEntry);
assert.eq(cacheEntry.isPinned, true, cacheEntry);

View File

@ -14,7 +14,7 @@ load("jstests/libs/analyze_plan.js");
load("jstests/libs/fixture_helpers.js"); // For 'isMongos()'.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
if (checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
if (checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
jsTest.log("Skipping test because SBE and SBE plan cache are both enabled.");
return;
}

View File

@ -44,12 +44,14 @@ function getSbePlanStages(queryLayerOutput, stage) {
* Helper to make an assertion depending on the engine being used. If we're in a mixed version
* cluster, then we assert that either 'classicAssert' or 'sbeAssert' is true because the outcome
* will depend on which node we're making assertions against. If we're not in a mixed version
* scenario, then we make an assertion depending on the value of 'isSBEEnabled'.
* scenario, then we make an assertion depending on the return value of 'checkSBEEnabled'.
*/
function engineSpecificAssertion(classicAssert, sbeAssert, theDB, msg) {
if (checkBothEnginesAreRunOnCluster(theDB)) {
assert(classicAssert || sbeAssert, msg);
} else if (checkSBEEnabled(theDB)) {
} else if (checkSBEEnabled(theDB, ["featureFlagSbeFull"])) {
// This function assumes that SBE is fully enabled, and will fall back to the classic
// assert if it is not.
assert(sbeAssert, msg);
} else {
assert(classicAssert, msg);

View File

@ -21,7 +21,7 @@ assert.neq(null, conn, "mongod was unable to start up with options: " + tojson(o
const testDb = conn.getDB("test");
const collection = testDb.external_sort_find;
const isSBEEnabled = checkSBEEnabled(testDb);
const isSBEEnabled = checkSBEEnabled(testDb, ["featureFlagSbeFull"]);
// Construct a document that is just over 1 kB.
const charToRepeat = "-";

View File

@ -22,7 +22,9 @@ const largeStr = "A".repeat(1024 * 1024); // 1MB string
for (let i = 0; i < memoryLimitMb + 1; ++i)
assert.commandWorked(testDb.largeColl.insert({x: i, largeStr: largeStr + i}));
const pipeline = [{$group: {_id: '$largeStr', minId: {$min: '$_id'}}}];
// Inhibit optimization so that $group runs in the classic engine.
const pipeline =
[{$_internalInhibitOptimization: {}}, {$group: {_id: '$largeStr', minId: {$min: '$_id'}}}];
// Make sure that the pipeline needs to spill to disk.
assert.throwsWithCode(() => testDb.largeColl.aggregate(pipeline, {allowDiskUse: false}),
ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed);

View File

@ -86,7 +86,7 @@ function runLoggingTests({db, slowMs, logLevel, sampleRate}) {
assert.commandWorked(db.setLogLevel(logLevel, "command"));
assert.commandWorked(db.setLogLevel(logLevel, "write"));
const isSBEEnabled = checkSBEEnabled(db);
const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
// Certain fields in the log lines on mongoD are not applicable in their counterparts on
// mongoS, and vice-versa. Ignore these fields when examining the logs of an instance on

View File

@ -294,6 +294,48 @@ function setLookupPushdownDisabled(value) {
{allowDiskUse: true});
}());
// Verify that SBE is only used when a $lookup or a $group is present.
(function testLookupGroupIsRequiredForPushdown() {
// Don't execute this test case if SBE is fully enabled.
if (checkSBEEnabled(db, ["featureFlagSbeFull"])) {
jsTestLog("Skipping test case because we are supporting SBE beyond $group and $lookup" +
" pushdown");
return;
}
const assertEngineUsed = function(pipeline, isSBE) {
const explain = coll.explain().aggregate(pipeline);
assert(explain.hasOwnProperty("explainVersion"), explain);
if (isSBE) {
assert.eq(explain.explainVersion, 2, explain);
} else {
assert.eq(explain.explainVersion, 1, explain);
}
};
const lookup = {$lookup: {from: "coll", localField: "a", foreignField: "b", as: "out"}};
const group = {
$group: {
_id: "$a",
out: {$min: "$b"},
}
};
const match = {$match: {a: 1}};
// $lookup and $group should each run in SBE.
assertEngineUsed([lookup], true /* isSBE */);
assertEngineUsed([group], true /* isSBE */);
assertEngineUsed([lookup, group], true /* isSBE */);
// $match on its own won't use SBE, nor will an empty pipeline.
assertEngineUsed([match], false /* isSBE */);
assertEngineUsed([], false /* isSBE */);
// $match will use SBE if followed by either a $group or a $lookup.
assertEngineUsed([match, lookup], true /* isSBE */);
assertEngineUsed([match, group], true /* isSBE */);
})();
// Build an index on the foreign collection that matches the foreignField. This should cause us
// to choose an indexed nested loop join.
(function testIndexNestedLoopJoinRegularIndex() {

View File

@ -178,7 +178,7 @@ rst.initiate();
const primaryDB = rst.getPrimary().getDB(dbName);
const secondaryDB = rst.getSecondary().getDB(dbName);
if (checkSBEEnabled(primaryDB, ["featureFlagSbePlanCache"])) {
if (checkSBEEnabled(primaryDB, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
jsTest.log("Skipping test because SBE and SBE plan cache are both enabled.");
rst.stopSet();
return;

View File

@ -8,9 +8,9 @@ const conn = MongoRunner.runMongod();
assert.neq(null, conn, "mongod was unable to start up");
const testDB = conn.getDB("jstests_plan_cache_list_failed_plans");
const coll = testDB.test;
const isSBEEnabled = checkSBEEnabled(testDB);
const isSBEEnabled = checkSBEEnabled(testDB, ["featureFlagSbeFull"]);
if (checkSBEEnabled(testDB, ["featureFlagSbePlanCache"])) {
if (checkSBEEnabled(testDB, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
jsTest.log("Skipping test because SBE and SBE plan cache are both enabled.");
MongoRunner.stopMongod(conn);
return;

View File

@ -79,7 +79,7 @@ assert.neq(conn, null, "mongod failed to start");
const db = conn.getDB("test");
const coll = db.plan_cache_memory_debug_info;
if (checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
if (checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
jsTest.log("Skipping test because SBE and SBE plan cache are both enabled.");
MongoRunner.stopMongod(conn);
return;

View File

@ -44,7 +44,7 @@ assert.eq(1, cachedPlans.length, cachedPlans);
assert.eq(true, cachedPlans[0].isActive, cachedPlans);
const cachedPlan = getCachedPlan(cachedPlans[0].cachedPlan);
const cachedPlanVersion = cachedPlans[0].version;
if (checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
if (checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
// If the SBE plan cache is on, then the cached plan has a different format.
assert.eq(cachedPlanVersion, "2", cachedPlans);
assert(cachedPlan.stages.includes("sort"), cachedPlans);

View File

@ -16,7 +16,7 @@ const coll = testDb.plan_cache_stats_agg_source;
// Note that the "getParameter" command is expected to fail in versions of mongod that do not yet
// include the slot-based execution engine. When that happens, however, 'isSBEEnabled' still
// correctly evaluates to false.
const isSBEEnabled = checkSBEEnabled(testDb);
const isSBEEnabled = checkSBEEnabled(testDb, ["featureFlagSbeFull"]);
function makeMatchForFilteringByShape(query) {
const keyHash = getPlanCacheKeyFromShape({query: query, collection: coll, db: testDb});

View File

@ -8,11 +8,20 @@
// For 'getLatestProfilerEntry()'.
load("jstests/libs/profiler.js");
load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'.
const conn = MongoRunner.runMongod({});
assert.neq(null, conn, "mongod was unable to start up");
const db = conn.getDB(jsTestName());
// This test assumes that SBE is being used for most queries.
if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
jsTestLog("Skipping test because SBE is not fully enabled");
MongoRunner.stopMongod(conn);
return;
}
assert.commandWorked(db.dropDatabase());
const coll = db.collection;

View File

@ -6,7 +6,6 @@
load("jstests/libs/analyze_plan.js");
load("jstests/libs/storage_engine_utils.js");
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
let replSet = new ReplSetTest({nodes: 1});
replSet.startSet();

View File

@ -6,6 +6,8 @@
(function() {
"use strict";
load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'.
const numDocs = 1000;
const dbName = "sbe_multiplanner_db";
const collName = "sbe_multiplanner_coll";
@ -21,6 +23,13 @@ const trialLengthFromWorksKnob = 0.1 * numDocs;
const conn = MongoRunner.runMongod({});
assert.neq(conn, null, "mongod failed to start");
const db = conn.getDB(dbName);
// This test assumes that SBE is being used for most queries.
if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
jsTestLog("Skipping test because SBE is not fully enabled");
MongoRunner.stopMongod(conn);
return;
}
const coll = db[collName];
// Gets the "allPlansExecution" section from the explain of a query that has zero results, but for

View File

@ -1,144 +0,0 @@
/**
* Confirms that 'planCacheKey' and 'queryHash' are correctly reported when the query has $lookup
* and $query stages with enabled and disabled SBE Plan Cache.
*/
(function() {
"use strict";
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
const databaseName = "pipeline_plan_cache_key_reporting";
function isSBEEnabled() {
const conn = MongoRunner.runMongod({});
try {
const db = conn.getDB(databaseName);
return checkSBEEnabled(db);
} finally {
MongoRunner.stopMongod(conn);
}
}
if (!isSBEEnabled()) {
jsTest.log("Skipping test because SBE is not enabled.");
return;
}
/**
* Driver function that creates mongod instances with specified parameters and run the given test
* cases.
* @param {*} params to be passed to mongod in format like { setParameter:
* "featureFlagSbePlanCache=true"}
* @param {*} testCases a list of test cases where each test case is an object with 'setup(db)' and
* 'run(db, assertMessage)' functions.
* @returns results from 'testCase.run(db, assertMessage)'
*/
function runTests(params, testCases) {
let results = [];
const conn = MongoRunner.runMongod(params);
const db = conn.getDB(databaseName);
const assertMessage = `${tojson(params)}`;
try {
for (let testCase of testCases) {
testCase.setup(db);
results.push(testCase.run(db, assertMessage));
}
} finally {
MongoRunner.stopMongod(conn);
}
return results;
}
/**
* This function validates given explain and return and object with extracted and validated
* PlanCacheKey and QueryHash.
* @returns {planCacheKey, queryHash, explain}
*/
function processAndValidateExplain(explain, assertMessage) {
assert.neq(explain, null);
assert.eq(explain.explainVersion,
"2",
`[${assertMessage}] invalid explain version ${tojson(explain)}`);
const planCacheKey = explain.queryPlanner.planCacheKey;
validateKey(planCacheKey, `[${assertMessage}] Invalid planCacheKey: ${tojson(explain)}`);
const queryHash = explain.queryPlanner.queryHash;
validateKey(queryHash, `[${assertMessage}] Invalid queryHash: ${tojson(explain)}`);
return {planCacheKey, queryHash, explain};
}
/**
* Validates given 'key' (PlanCacheKey or QueryHash).
*/
function validateKey(key, assertMessage) {
assert.eq(typeof key, "string", assertMessage);
assert.gt(key.length, 0, assertMessage);
}
// 1. Create test cases for $lookup and $group stages.
const lookupTestCase = {
setup: db => {
db.coll.drop();
assert.commandWorked(db.coll.createIndexes([{a: 1}, {a: 1, b: 1}]));
db.lookupColl.drop();
assert.commandWorked(db.lookupColl.createIndex({b: 1}));
},
run: (db, assertMessage) => {
const pipeline = [
{$lookup: {from: db.lookupColl.getName(), localField: "a", foreignField: "b", as: "w"}}
];
const explain = db.coll.explain().aggregate(pipeline);
return processAndValidateExplain(explain, assertMessage);
},
};
const groupTestCase = {
setup: db => {
db.coll.drop();
assert.commandWorked(db.coll.insertOne({a: 1}));
},
run: (db, assertMessage) => {
const pipeline = [{
$group: {
_id: "$b",
}
}];
const explain = db.coll.explain().aggregate(pipeline);
return processAndValidateExplain(explain, assertMessage);
},
};
const testCases = [lookupTestCase, groupTestCase];
// 2. Run the test cases with SBE Plan Cache Enabled.
const sbeParams = {
setParameter: "featureFlagSbePlanCache=true"
};
const sbeKeys = runTests(sbeParams, testCases);
assert.eq(testCases.length, sbeKeys.length);
// 3. Run the test cases with SBE Plan Cache disabled.
const classicParams = {
setParameter: "featureFlagSbePlanCache=false"
};
const classicKeys = runTests(classicParams, testCases);
assert.eq(testCases.length, classicKeys.length);
// 4. Validate that PlanCacheKeys and QueryHash are equal. They should be different once
// SERVER-61507 is completed.
for (let i = 0; i < sbeKeys.length; ++i) {
const sbe = sbeKeys[i];
const classic = classicKeys[i];
const message = `sbe=${tojson(sbe.explain)}, classic=${tojson(classic.explain)}`;
assert.eq(sbe.planCacheKey, classic.planCacheKey, message);
assert.eq(sbe.queryHash, classic.queryHash, message);
}
})();

View File

@ -50,7 +50,7 @@ const db = conn.getDB(dbName);
// This test is specifically verifying the behavior of the SBE plan cache. So if either the SBE plan
// cache or SBE itself are disabled, bail out.
if (!checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
if (!checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
jsTestLog("Skipping test because either SBE engine or SBE plan cache are disabled");
MongoRunner.stopMongod(conn);
return;

View File

@ -16,7 +16,7 @@ const conn = MongoRunner.runMongod({});
assert.neq(conn, null, "mongod failed to start");
const db = conn.getDB("sbe_plan_cache_memory_debug_info");
if (!checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
if (!checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
jsTest.log("Skipping test because SBE plan cache is not enabled.");
MongoRunner.stopMongod(conn);
return;

View File

@ -44,7 +44,8 @@ function assertQueryInPlanCache(coll, query) {
assert.eq(1, planCacheEntries.length, planCacheEntries);
}
const isSbePlanCacheEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache"]);
const isSbePlanCacheEnabled =
checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]);
if (isSbePlanCacheEnabled) {
const collectionName = "plan_cache_sbe";
const coll = db[collectionName];

View File

@ -13,6 +13,7 @@ function sumHistogramBucketCounts(histogram) {
}
load("jstests/libs/ftdc.js");
load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'.
const collName = jsTestName();
const dbName = jsTestName();
@ -22,6 +23,13 @@ const conn = MongoRunner.runMongod({});
assert.neq(conn, null, "mongod failed to start");
const db = conn.getDB(dbName);
// This test assumes that SBE is being used for most queries.
if (!checkSBEEnabled(db, ["featureFlagSbeFull"])) {
jsTestLog("Skipping test because SBE is not fully enabled");
MongoRunner.stopMongod(conn);
return;
}
let coll = db.getCollection(collName);
coll.drop();

View File

@ -9,19 +9,11 @@ load("jstests/libs/fail_point_util.js");
load("jstests/libs/analyze_plan.js");
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
const isSBEEnabled = checkSBEEnabled(db);
const isSBEEnabled = checkSBEEnabled(db, ["featureFlagColumnstoreIndexes", "featureFlagSbeFull"]);
if (!isSBEEnabled) {
// This test is only relevant when SBE is enabled.
return;
}
const getParamResponse =
assert.commandWorked(db.adminCommand({getParameter: 1, featureFlagColumnstoreIndexes: 1}));
const columnstoreEnabled = getParamResponse.hasOwnProperty("featureFlagColumnstoreIndexes") &&
getParamResponse.featureFlagColumnstoreIndexes.value;
if (!columnstoreEnabled) {
jsTestLog("Skipping columnstore test since the feature flag is not enabled.");
jsTestLog("Skipping columnstore test since either SBE or columnstore are disabled.");
return;
}

View File

@ -46,25 +46,6 @@ let assertGroupPushdown = function(
assert.sameMembers(results, expectedResults);
};
let assertProjectPushdown = function(
{coll, pipeline, expectProjectToBePushedDown, options = {}} = {}) {
const explain = coll.explain().aggregate(pipeline, options);
let result;
if (expectProjectToBePushedDown) {
result = getWinningPlan(explain.queryPlanner);
} else {
result = getWinningPlan(explain.stages[0].$cursor.queryPlanner);
}
// Check that $project uses the query system.
assert.eq(expectProjectToBePushedDown,
planHasStage(db, result, "PROJECTION_DEFAULT") ||
planHasStage(db, result, "PROJECTION_COVERED") ||
planHasStage(db, result, "PROJECTION_SIMPLE"),
explain);
};
let assertNoGroupPushdown = function(coll, pipeline, expectedResults, options = {}) {
const explain = coll.explain().aggregate(pipeline, options);
assert.eq(null, getAggPlanStage(explain, "GROUP"), explain);
@ -73,7 +54,7 @@ let assertNoGroupPushdown = function(coll, pipeline, expectedResults, options =
assert.sameMembers(resultNoGroupPushdown, expectedResults);
};
let assertResultsMatchWithAndWithoutGroupPushdown = function(
let assertResultsMatchWithAndWithoutPushdown = function(
coll, pipeline, expectedResults, expectedGroupCountInExplain) {
// Make sure the provided pipeline is eligible for pushdown.
assertGroupPushdown(coll, pipeline, expectedResults, expectedGroupCountInExplain);
@ -93,27 +74,6 @@ let assertResultsMatchWithAndWithoutGroupPushdown = function(
assert.sameMembers(resultNoGroupPushdown, resultWithGroupPushdown);
};
let assertResultsMatchWithAndWithoutProjectPushdown = function(
{coll, pipeline, expectProjectToBePushedDown, expectedResults, options = {}} = {}) {
// Make sure the provided pipeline is eligible for project pushdown.
assertProjectPushdown(
{coll: coll, pipeline: pipeline, expectProjectToBePushedDown: expectProjectToBePushedDown});
// Turn sbe off.
db.adminCommand({setParameter: 1, internalQueryForceClassicEngine: true});
// Sanity check the results when no project pushdown happens.
let resultNoProjectPushdown = coll.aggregate(pipeline).toArray();
assert.sameMembers(resultNoProjectPushdown, expectedResults);
// Turn sbe on which will allow $group stages that contain supported accumulators to be pushed
// down under certain conditions.
db.adminCommand({setParameter: 1, internalQueryForceClassicEngine: false});
let resultWithProjectPushdown = coll.aggregate(pipeline).toArray();
assert.sameMembers(resultNoProjectPushdown, resultWithProjectPushdown);
};
let assertShardedGroupResultsMatch = function(coll, pipeline, expectedGroupCountInExplain = 1) {
const originalSBEEngineStatus =
assert
@ -156,41 +116,38 @@ assert.eq(
[{"_id": 5, "item": "c", "price": 5, "quantity": 10, "date": ISODate("2014-02-15T09:05:00Z")}]);
// Run a simple $group with {$sum: 1} accumulator, and check if it gets pushed down.
assertResultsMatchWithAndWithoutGroupPushdown(
assertResultsMatchWithAndWithoutPushdown(
coll,
[{$group: {_id: "$item", c: {$sum: NumberInt(1)}}}],
[{_id: "a", c: NumberInt(2)}, {_id: "b", c: NumberInt(2)}, {_id: "c", c: NumberInt(1)}],
1);
assertResultsMatchWithAndWithoutGroupPushdown(
assertResultsMatchWithAndWithoutPushdown(
coll,
[{$group: {_id: "$item", c: {$sum: NumberLong(1)}}}],
[{_id: "a", c: NumberLong(2)}, {_id: "b", c: NumberLong(2)}, {_id: "c", c: NumberLong(1)}],
1);
assertResultsMatchWithAndWithoutGroupPushdown(
coll,
assertResultsMatchWithAndWithoutPushdown(coll,
[{$group: {_id: "$item", c: {$sum: 1}}}],
[{_id: "a", c: 2}, {_id: "b", c: 2}, {_id: "c", c: 1}],
1);
// Run a simple $group with supported $sum accumulator, and check if it gets pushed down.
assertResultsMatchWithAndWithoutGroupPushdown(
coll,
assertResultsMatchWithAndWithoutPushdown(coll,
[{$group: {_id: "$item", s: {$sum: "$price"}}}],
[{_id: "a", s: 15}, {_id: "b", s: 30}, {_id: "c", s: 5}],
1);
// The subexpression '$not' is not translated to $coerceToolBool and thus is SBE compatible.
assertResultsMatchWithAndWithoutGroupPushdown(
coll,
assertResultsMatchWithAndWithoutPushdown(coll,
[{$group: {_id: "$item", c: {$sum: {$not: "$price"}}}}],
[{_id: "a", c: 0}, {_id: "b", c: 0}, {_id: "c", c: 0}],
1);
// Two group stages both get pushed down and the second $group stage refer to only a top-level field
// which does not exist.
assertResultsMatchWithAndWithoutGroupPushdown(
assertResultsMatchWithAndWithoutPushdown(
coll,
[{$group: {_id: "$item", s: {$sum: "$price"}}}, {$group: {_id: "$quantity", c: {$count: {}}}}],
[{_id: null, c: 3}],
@ -198,7 +155,7 @@ assertResultsMatchWithAndWithoutGroupPushdown(
// Two group stages both get pushed down and the second $group stage refers to only existing
// top-level fields of the first $group.
assertResultsMatchWithAndWithoutGroupPushdown(
assertResultsMatchWithAndWithoutPushdown(
coll,
[
{$group: {_id: "$item", qsum: {$sum: "$quantity"}, msum: {$sum: "$price"}}},
@ -208,14 +165,14 @@ assertResultsMatchWithAndWithoutGroupPushdown(
2);
// The $group stage refers to the same top-level field twice.
assertResultsMatchWithAndWithoutGroupPushdown(
assertResultsMatchWithAndWithoutPushdown(
coll,
[{$group: {_id: "$item", ps1: {$sum: "$price"}, ps2: {$sum: "$price"}}}],
[{_id: "a", ps1: 15, ps2: 15}, {_id: "b", ps1: 30, ps2: 30}, {_id: "c", ps1: 5, ps2: 5}],
1);
// The $group stage refers to the same top-level field twice and another top-level field.
assertResultsMatchWithAndWithoutGroupPushdown(
assertResultsMatchWithAndWithoutPushdown(
coll,
[{
$group:
@ -229,7 +186,7 @@ assertResultsMatchWithAndWithoutGroupPushdown(
1);
// The $group stage refers to two existing sub-fields.
assertResultsMatchWithAndWithoutGroupPushdown(
assertResultsMatchWithAndWithoutPushdown(
coll,
[
{$project: {item: 1, price: 1, quantity: 1, dateParts: {$dateToParts: {date: "$date"}}}},
@ -244,7 +201,7 @@ assertResultsMatchWithAndWithoutGroupPushdown(
1);
// The $group stage refers to a non-existing sub-field twice.
assertResultsMatchWithAndWithoutGroupPushdown(
assertResultsMatchWithAndWithoutPushdown(
coll,
[{$group: {_id: "$item", hs: {$sum: {$add: ["$date.hour", "$date.hour"]}}}}],
[{"_id": "a", "hs": 0}, {"_id": "b", "hs": 0}, {"_id": "c", "hs": 0}],
@ -287,12 +244,12 @@ assertResultsMatchWithAndWithoutGroupPushdown(
{$group: {_id: "$_id", ss: {$sum: {$add: ["$indexKeyPattern", "$indexKeyPattern"]}}}}
],
].forEach(pipeline =>
assertResultsMatchWithAndWithoutGroupPushdown(
assertResultsMatchWithAndWithoutPushdown(
coll, pipeline, [{_id: "a", ss: 30}, {_id: "b", ss: 60}, {_id: "c", ss: 10}], 2));
// The second $group stage refers to both a top-level field and a sub-field twice which does not
// exist.
assertResultsMatchWithAndWithoutGroupPushdown(
assertResultsMatchWithAndWithoutPushdown(
coll,
[
{$group: {_id: "$item", ps: {$sum: "$price"}}},
@ -306,7 +263,7 @@ assertResultsMatchWithAndWithoutGroupPushdown(
2);
// The second $group stage refers to a sub-field which does exist.
assertResultsMatchWithAndWithoutGroupPushdown(
assertResultsMatchWithAndWithoutPushdown(
coll,
[
{$group: {_id: {i: "$item", p: {$divide: ["$price", 5]}}}},
@ -316,7 +273,7 @@ assertResultsMatchWithAndWithoutGroupPushdown(
2);
// Verifies that an optimized expression can be pushed down.
assertResultsMatchWithAndWithoutGroupPushdown(
assertResultsMatchWithAndWithoutPushdown(
coll,
// {"$ifNull": [1, 2]} will be optimized into just the constant 1.
[{$group: {_id: {"$ifNull": [1, 2]}, o: {$min: "$quantity"}}}],
@ -338,75 +295,6 @@ assertGroupPushdown(coll,
[{$group: {_id: {"i": "$item"}, s: {$sum: "$price"}}}],
[{_id: {i: "a"}, s: 15}, {_id: {i: "b"}, s: 30}, {_id: {i: "c"}, s: 5}]);
assertResultsMatchWithAndWithoutProjectPushdown({
coll: coll,
pipeline: [{$project: {x: "$item"}}],
expectProjectToBePushedDown: true,
expectedResults: [
{"_id": 5, "x": "c"},
{"_id": 4, "x": "b"},
{"_id": 3, "x": "a"},
{"_id": 2, "x": "b"},
{"_id": 1, "x": "a"}
]
});
assertResultsMatchWithAndWithoutProjectPushdown({
coll: coll,
pipeline: [{$group: {_id: {"i": "$item"}, s: {$sum: "$price"}}}, {$project: {x: "$s"}}],
expectProjectToBePushedDown: true,
expectedResults:
[{"_id": {"i": "b"}, "x": 30}, {"_id": {"i": "a"}, "x": 15}, {"_id": {"i": "c"}, "x": 5}]
});
assertResultsMatchWithAndWithoutProjectPushdown({
coll: coll,
pipeline: [
{$group: {_id: "$item", s: {$sum: "$price"}}},
{$project: {_id: 1, x: "$s"}},
{$group: {_id: "$_id", total: {$sum: "$x"}}}
],
expectProjectToBePushedDown: true,
expectedResults:
[{"_id": "a", "total": 15}, {"_id": "c", "total": 5}, {"_id": "b", "total": 30}]
});
assertResultsMatchWithAndWithoutProjectPushdown({
coll: coll,
pipeline: [
{$group: {_id: {"i": "$item"}, s: {$sum: "$price"}}},
{$addFields: {x: 1}},
{$project: {s: 0}}
],
expectProjectToBePushedDown: false,
expectedResults:
[{"_id": {"i": "c"}, "x": 1}, {"_id": {"i": "b"}, "x": 1}, {"_id": {"i": "a"}, "x": 1}]
});
assertResultsMatchWithAndWithoutProjectPushdown({
coll: coll,
pipeline: [
{$group: {_id: {"i": "$item"}, s: {$sum: "$price"}}},
{$addFields: {x: 1}},
{$project: {s: 1}}
],
expectProjectToBePushedDown: false,
expectedResults:
[{"_id": {"i": "c"}, "s": 5}, {"_id": {"i": "b"}, "s": 30}, {"_id": {"i": "a"}, "s": 15}]
});
assertResultsMatchWithAndWithoutProjectPushdown({
coll: coll,
pipeline: [
{$match: {item: "a"}},
{$sort: {price: 1}},
{$group: {_id: "$item"}},
{$project: {x: "$item"}}
],
expectProjectToBePushedDown: true,
expectedResults: [{"_id": "a"}]
});
// Run a group with spilling on and check that $group is pushed down.
assertGroupPushdown(coll,
[{$group: {_id: "$item", s: {$sum: "$price"}}}],
@ -507,16 +395,14 @@ assert.commandWorked(coll.insert(docs));
const verifyGroupPushdownWhenSubplanning = () => {
const matchWithOr = {$match: {$or: [{"item": "a"}, {"price": 10}]}};
const groupPushedDown = {$group: {_id: "$item", quantity: {$sum: "$quantity"}}};
assertResultsMatchWithAndWithoutGroupPushdown(
coll,
assertResultsMatchWithAndWithoutPushdown(coll,
[matchWithOr, groupPushedDown],
[{_id: "a", quantity: 7}, {_id: "b", quantity: 10}],
1);
// A trival $and with only one $or will be optimized away and thus $or will be the top
// expression.
const matchWithTrivialAndOr = {$match: {$and: [{$or: [{"item": "a"}, {"price": 10}]}]}};
assertResultsMatchWithAndWithoutGroupPushdown(
coll,
assertResultsMatchWithAndWithoutPushdown(coll,
[matchWithTrivialAndOr, groupPushedDown],
[{_id: "a", quantity: 7}, {_id: "b", quantity: 10}],
1);
@ -551,7 +437,7 @@ assertNoGroupPushdown(
]);
// Verify that $bucket is pushed down to SBE and returns correct results.
assertResultsMatchWithAndWithoutGroupPushdown(
assertResultsMatchWithAndWithoutPushdown(
coll,
[{
$bucket:
@ -559,7 +445,7 @@ assertResultsMatchWithAndWithoutGroupPushdown(
}],
[{"_id": 1, "quantity": 15}, {"_id": 10, "quantity": 13}]);
assertResultsMatchWithAndWithoutGroupPushdown(
assertResultsMatchWithAndWithoutPushdown(
coll,
[{
$bucket: {
@ -571,7 +457,7 @@ assertResultsMatchWithAndWithoutGroupPushdown(
[{"_id": 1, "count": 5, "quantity": 28}]);
// Verify that $sortByCount is pushed down to SBE and returns correct results.
assertResultsMatchWithAndWithoutGroupPushdown(
assertResultsMatchWithAndWithoutPushdown(
coll,
[{$sortByCount: "$item"}],
[{"_id": "a", "count": 2}, {"_id": "b", "count": 2}, {"_id": "c", "count": 1}]);

View File

@ -9,7 +9,7 @@
load("jstests/libs/analyze_plan.js"); // For explain helpers.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
const isSBEEnabled = checkSBEEnabled(db);
const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
if (!isSBEEnabled) {
// This test is only relevant when SBE is enabled.

View File

@ -31,7 +31,7 @@ function runTest(queryToCache, queryToRunAfterCaching) {
// a different planCacheKey. The SBE plan cache, on the other hand, does not auto-parameterize
// $in or $eq involving a constant of type array, and therefore will consider the two queries to
// have different shapes.
if (checkSBEEnabled(db, ["featureFlagSbePlanCache"])) {
if (checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"])) {
assert.neq(explain.queryPlanner.queryHash, cacheEntries[0].queryHash);
} else {
assert.eq(explain.queryPlanner.queryHash, cacheEntries[0].queryHash);

View File

@ -10,7 +10,8 @@ load('jstests/libs/analyze_plan.js'); // For getPlanStage().
load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
const isSbePlanCacheEnabled = checkSBEEnabled(db, ["featureFlagSbePlanCache"]);
const isSbePlanCacheEnabled =
checkSBEEnabled(db, ["featureFlagSbePlanCache", "featureFlagSbeFull"]);
let coll = assertDropAndRecreateCollection(db, "plan_cache_replanning");

View File

@ -7,7 +7,7 @@
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
const isSBEEnabled = checkSBEEnabled(db);
const isSBEEnabled = checkSBEEnabled(db, ["featureFlagSbeFull"]);
if (!isSBEEnabled) {
jsTestLog("Skipping test because the SBE feature flag is disabled");
return;

View File

@ -11,7 +11,6 @@
load("jstests/aggregation/extras/utils.js"); // for arrayEq
load("jstests/libs/discover_topology.js"); // For findDataBearingNodes.
load("jstests/libs/sbe_util.js"); // For checkSBEEnabled.
function runTests(withDefaultCollationColl, withoutDefaultCollationColl, collation) {
// Test that the $lookup stage respects the inherited collation.

View File

@ -66,7 +66,6 @@
#include "mongo/db/pipeline/document_source_internal_unpack_bucket.h"
#include "mongo/db/pipeline/document_source_lookup.h"
#include "mongo/db/pipeline/document_source_match.h"
#include "mongo/db/pipeline/document_source_project.h"
#include "mongo/db/pipeline/document_source_sample.h"
#include "mongo/db/pipeline/document_source_sample_from_random_cursor.h"
#include "mongo/db/pipeline/document_source_single_document_transformation.h"
@ -110,24 +109,23 @@ namespace {
* pipeline to prepare for pushdown of $group and $lookup into the inner query layer so that it
* can be executed using SBE.
* Group stages are extracted from the pipeline when all of the following conditions are met:
* 0. When the 'internalQueryForceClassicEngine' feature flag is 'false'.
* 1. When 'allowDiskUse' is false. We currently don't support spilling in the SBE HashAgg
* stage. This will change once that is supported when SERVER-58436 is complete.
* 2. When the DocumentSourceGroup has 'doingMerge=false', this will change when we implement
* hash table spilling in SERVER-58436.
* - When the 'internalQueryForceClassicEngine' feature flag is 'false'.
* - When the 'internalQuerySlotBasedExecutionDisableGroupPushdown' query knob is 'false'.
* - When the 'featureFlagSBEGroupPushdown' feature flag is 'true'.
* - When the DocumentSourceGroup has 'doingMerge=false'.
*
* Lookup stages are extracted from the pipeline when all of the following conditions are met:
* 0. When the 'internalQueryForceClassicEngine' feature flag is 'false'.
* 1. When the 'featureFlagSBELookupPushdown' feature flag is 'true'.
* 2. The $lookup uses only the 'localField'/'foreignField' syntax (no pipelines).
* 3. The foreign collection is neither sharded nor a view.
* - When the 'internalQueryForceClassicEngine' feature flag is 'false'.
* - When the 'internalQuerySlotBasedExecutionDisableLookupPushdown' query knob is 'false'.
* - When the 'featureFlagSBELookupPushdown' feature flag is 'true'.
* - The $lookup uses only the 'localField'/'foreignField' syntax (no pipelines).
* - The foreign collection is neither sharded nor a view.
*/
std::vector<std::unique_ptr<InnerPipelineStageInterface>> extractSbeCompatibleStagesForPushdown(
const intrusive_ptr<ExpressionContext>& expCtx,
const MultipleCollectionAccessor& collections,
const CanonicalQuery* cq,
Pipeline* pipeline,
const bool origSbeCompatible) {
Pipeline* pipeline) {
// We will eventually use the extracted group stages to populate 'CanonicalQuery::pipeline'
// which requires stages to be wrapped in an interface.
std::vector<std::unique_ptr<InnerPipelineStageInterface>> stagesForPushdown;
@ -185,23 +183,6 @@ std::vector<std::unique_ptr<InnerPipelineStageInterface>> extractSbeCompatibleSt
break;
}
// $project pushdown logic.
if (auto projectStage =
dynamic_cast<DocumentSourceSingleDocumentTransformation*>(itr->get())) {
bool projectEligibleForPushdown = feature_flags::gFeatureFlagSBEGroupPushdown.isEnabled(
serverGlobalParams.featureCompatibility) &&
origSbeCompatible &&
(projectStage->getType() ==
TransformerInterface::TransformerType::kInclusionProjection);
if (projectEligibleForPushdown) {
stagesForPushdown.push_back(std::make_unique<InnerPipelineStageImpl>(projectStage));
sources.erase(itr++);
continue;
}
break;
}
// $lookup pushdown logic.
if (auto lookupStage = dynamic_cast<DocumentSourceLookUp*>(itr->get())) {
if (disallowLookupPushdown) {
@ -268,7 +249,6 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> attemptToGetExe
// Reset the 'sbeCompatible' flag before canonicalizing the 'findCommand' to potentially allow
// SBE to execute the portion of the query that's pushed down, even if the portion of the query
// that is not pushed down contains expressions not supported by SBE.
bool origSbeCompatible = expCtx->sbeCompatible;
expCtx->sbeCompatible = true;
auto cq = CanonicalQuery::canonicalize(expCtx->opCtx,
@ -321,13 +301,12 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> attemptToGetExe
}
auto permitYield = true;
return getExecutorFind(
expCtx->opCtx,
return getExecutorFind(expCtx->opCtx,
collections,
std::move(cq.getValue()),
[&, origSbeCompatible](auto* canonicalQuery) {
[&](auto* canonicalQuery) {
canonicalQuery->setPipeline(extractSbeCompatibleStagesForPushdown(
expCtx, collections, canonicalQuery, pipeline, origSbeCompatible));
expCtx, collections, canonicalQuery, pipeline));
},
permitYield,
plannerOpts);

View File

@ -1295,13 +1295,8 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getSlotBasedExe
OperationContext* opCtx,
const MultipleCollectionAccessor& collections,
std::unique_ptr<CanonicalQuery> cq,
std::function<void(CanonicalQuery*)> extractAndAttachPipelineStages,
PlanYieldPolicy::YieldPolicy requestedYieldPolicy,
size_t plannerOptions) {
invariant(cq);
if (extractAndAttachPipelineStages) {
extractAndAttachPipelineStages(cq.get());
}
// Mark that this query uses the SBE engine, unless this has already been set.
OpDebug& opDebug = CurOp::get(opCtx)->debug();
if (!opDebug.classicEngineUsed) {
@ -1383,17 +1378,31 @@ StatusWith<std::unique_ptr<PlanExecutor, PlanExecutor::Deleter>> getExecutor(
std::function<void(CanonicalQuery*)> extractAndAttachPipelineStages,
PlanYieldPolicy::YieldPolicy yieldPolicy,
size_t plannerOptions) {
invariant(canonicalQuery);
const auto& mainColl = collections.getMainCollection();
canonicalQuery->setSbeCompatible(
sbe::isQuerySbeCompatible(&mainColl, canonicalQuery.get(), plannerOptions));
return !canonicalQuery->getForceClassicEngine() && canonicalQuery->isSbeCompatible()
? getSlotBasedExecutor(opCtx,
collections,
std::move(canonicalQuery),
extractAndAttachPipelineStages,
yieldPolicy,
plannerOptions)
: getClassicExecutor(
// Use SBE if 'canonicalQuery' is SBE compatible.
if (!canonicalQuery->getForceClassicEngine() && canonicalQuery->isSbeCompatible()) {
if (extractAndAttachPipelineStages) {
extractAndAttachPipelineStages(canonicalQuery.get());
}
// TODO SERVER-65960: Optionally refactor this logic once we have a mechanism to reattach
// pipeline stages.
// Use SBE if we find any $group/$lookup stages eligible for execution in SBE or if SBE
// is fully enabled. Otherwise, fallback to the classic engine.
if (canonicalQuery->pipeline().empty() &&
!feature_flags::gFeatureFlagSbeFull.isEnabledAndIgnoreFCV()) {
canonicalQuery->setSbeCompatible(false);
} else {
return getSlotBasedExecutor(
opCtx, collections, std::move(canonicalQuery), yieldPolicy, plannerOptions);
}
}
return getClassicExecutor(
opCtx, mainColl, std::move(canonicalQuery), yieldPolicy, plannerOptions);
}

View File

@ -81,8 +81,7 @@ feature_flags:
featureFlagSbePlanCache:
description: "Feature flag for enabling use of the SBE plan cache"
cpp_varname: gFeatureFlagSbePlanCache
default: true
version: 6.0
default: false
featureFlagSortArray:
description: "Feature flag for allowing use of the $sortArray aggregation expression"
@ -151,3 +150,8 @@ feature_flags:
description: "Feature flag to enable reading change events from the change collection rather than the oplog"
cpp_varname: gFeatureFlagServerlessChangeStreams
default: false
featureFlagSbeFull:
description: "Feature flag to enable using SBE for a larger number of queries"
cpp_varname: gFeatureFlagSbeFull
default: false

View File

@ -659,7 +659,7 @@ server_parameters:
set_at: [ startup, runtime ]
cpp_varname: "internalQueryForceClassicEngine"
cpp_vartype: AtomicWord<bool>
default: true
default: false
internalQueryAppendIdToSetWindowFieldsSort:
description: "If true, appends _id to the sort stage generated by desugaring $setWindowFields to

View File

@ -46,7 +46,6 @@
#include "mongo/db/matcher/expression_text.h"
#include "mongo/db/pipeline/document_source_group.h"
#include "mongo/db/pipeline/document_source_lookup.h"
#include "mongo/db/pipeline/document_source_single_document_transformation.h"
#include "mongo/db/query/canonical_query.h"
#include "mongo/db/query/classic_plan_cache.h"
#include "mongo/db/query/collation/collation_index_key.h"
@ -56,8 +55,6 @@
#include "mongo/db/query/planner_access.h"
#include "mongo/db/query/planner_analysis.h"
#include "mongo/db/query/planner_ixselect.h"
#include "mongo/db/query/projection_parser.h"
#include "mongo/db/query/query_knobs_gen.h"
#include "mongo/db/query/query_planner_common.h"
#include "mongo/db/query/query_solution.h"
#include "mongo/logv2/log.h"
@ -1389,26 +1386,6 @@ std::unique_ptr<QuerySolution> QueryPlanner::extendWithAggPipeline(
continue;
}
auto projStage =
dynamic_cast<DocumentSourceSingleDocumentTransformation*>(innerStage->documentSource());
if (projStage) {
auto projObj =
projStage->getTransformer().serializeTransformation(boost::none).toBson();
auto projAst =
projection_ast::parseAndAnalyze(projStage->getContext(),
projObj,
ProjectionPolicies::aggregateProjectionPolicies());
if (projAst.isSimple()) {
solnForAgg = std::make_unique<ProjectionNodeSimple>(
std::move(solnForAgg), *query.root(), projAst);
} else {
solnForAgg = std::make_unique<ProjectionNodeDefault>(
std::move(solnForAgg), *query.root(), projAst);
}
continue;
}
auto lookupStage = dynamic_cast<DocumentSourceLookUp*>(innerStage->documentSource());
if (lookupStage) {
tassert(6369000,
@ -1430,8 +1407,7 @@ std::unique_ptr<QuerySolution> QueryPlanner::extendWithAggPipeline(
}
tasserted(5842400,
"Cannot support pushdown of a stage other than $group $project or $lookup at the "
"moment");
"Cannot support pushdown of a stage other than $group or $lookup at the moment");
}
solution->extendWith(std::move(solnForAgg));