mirror of https://github.com/mongodb/mongo
SERVER-107427 Add originalQueryShapeHash to slow query logs on mongod (#44945)
GitOrigin-RevId: 76a1410ab52e0aed023e695368090661647d80b7
This commit is contained in:
parent
dbfa36321a
commit
785512528b
|
|
@ -0,0 +1,378 @@
|
|||
// Tests that 'originalQueryShapeHash' appears in slow query logs on the shards when the command
|
||||
// originates from the router. 'originalQueryShapeHash' is not expected to appear for getMore and
|
||||
// explain.
|
||||
//
|
||||
// @tags: [
|
||||
// requires_profiling,
|
||||
// # Profile command doesn't support stepdowns.
|
||||
// does_not_support_stepdowns,
|
||||
// # Cowardly refusing to run test that interacts with the system profiler as the 'system.profile'
|
||||
// # collection is not replicated.
|
||||
// does_not_support_causal_consistency,
|
||||
// # Does not support transactions as the test issues getMores which cannot be started in a transaction.
|
||||
// does_not_support_transactions,
|
||||
// requires_fcv_83,
|
||||
// ]
|
||||
|
||||
import {after, before, describe, it} from "jstests/libs/mochalite.js";
|
||||
import {ShardingTest} from "jstests/libs/shardingtest.js";
|
||||
import {getExplainCommand} from "jstests/libs/cmd_object_utils.js";
|
||||
|
||||
describe("originalQueryShapeHash appears in slow logs", function () {
|
||||
let st;
|
||||
let routerDB;
|
||||
let shard0DB;
|
||||
|
||||
// Collection configurations to test.
|
||||
const collNames = {
|
||||
unsharded: "unsharded_coll",
|
||||
two_shard: "sharded_two_shards_coll",
|
||||
one_shard: "sharded_one_shard_coll",
|
||||
};
|
||||
|
||||
before(function () {
|
||||
st = new ShardingTest({shards: 2, mongos: 1});
|
||||
assert(st.adminCommand({enableSharding: jsTestName(), primaryShard: st.shard0.shardName}));
|
||||
routerDB = st.s.getDB(jsTestName());
|
||||
shard0DB = st.shard0.getDB(jsTestName());
|
||||
|
||||
// Set up unsharded collection.
|
||||
const unshardedColl = routerDB[collNames.unsharded];
|
||||
unshardedColl.drop();
|
||||
unshardedColl.insertMany([
|
||||
{x: 4, y: 1},
|
||||
{x: 4, y: 2},
|
||||
{x: 5, y: 2},
|
||||
{x: 6, y: 3},
|
||||
]);
|
||||
assert.commandWorked(routerDB.adminCommand({untrackUnshardedCollection: unshardedColl.getFullName()}));
|
||||
|
||||
// Set up sharded collection on two shards.
|
||||
const shardedTwoShardsColl = routerDB[collNames.two_shard];
|
||||
shardedTwoShardsColl.drop();
|
||||
shardedTwoShardsColl.insertMany([
|
||||
{x: 4, y: 1},
|
||||
{x: 4, y: 2},
|
||||
{x: 5, y: 2},
|
||||
{x: 6, y: 3},
|
||||
{x: 7, y: 3},
|
||||
]);
|
||||
shardedTwoShardsColl.createIndex({y: 1});
|
||||
assert.commandWorked(routerDB.adminCommand({shardCollection: shardedTwoShardsColl.getFullName(), key: {y: 1}}));
|
||||
// Split and move chunks to ensure data is on both shards.
|
||||
assert.commandWorked(routerDB.adminCommand({split: shardedTwoShardsColl.getFullName(), middle: {y: 2}}));
|
||||
assert.commandWorked(
|
||||
routerDB.adminCommand({
|
||||
moveChunk: shardedTwoShardsColl.getFullName(),
|
||||
find: {y: 3},
|
||||
to: st.shard1.shardName,
|
||||
}),
|
||||
);
|
||||
|
||||
// Set up sharded collection on one shard.
|
||||
const shardedOneShardColl = routerDB[collNames.one_shard];
|
||||
shardedOneShardColl.drop();
|
||||
shardedOneShardColl.insertMany([
|
||||
{x: 4, y: 1},
|
||||
{x: 4, y: 2},
|
||||
{x: 5, y: 2},
|
||||
{x: 6, y: 3},
|
||||
]);
|
||||
shardedOneShardColl.createIndex({y: 1});
|
||||
assert.commandWorked(routerDB.adminCommand({shardCollection: shardedOneShardColl.getFullName(), key: {y: 1}}));
|
||||
|
||||
// Set slow query threshold to -1 so every query gets logged on the router and shards.
|
||||
routerDB.setProfilingLevel(0, -1);
|
||||
shard0DB.setProfilingLevel(0, -1);
|
||||
const shard1DB = st.shard1.getDB(jsTestName());
|
||||
shard1DB.setProfilingLevel(0, -1);
|
||||
});
|
||||
|
||||
after(function () {
|
||||
st.stop();
|
||||
});
|
||||
|
||||
function getSlowQueryLogLines({queryComment, testDB, commandType = null}) {
|
||||
const slowQueryLogs = assert
|
||||
.commandWorked(testDB.adminCommand({getLog: "global"}))
|
||||
.log.map((entry) => JSON.parse(entry))
|
||||
.filter((entry) => {
|
||||
if (entry.msg !== "Slow query" || !entry.attr || !entry.attr.command) {
|
||||
return false;
|
||||
}
|
||||
if (commandType === "getMore") {
|
||||
return entry.attr.command.getMore && entry.attr.originatingCommand.comment == queryComment;
|
||||
}
|
||||
return entry.attr.command.comment == queryComment && !entry.attr.command.getMore;
|
||||
});
|
||||
jsTest.log.debug("Slow query logs", {queryComment, commandType, slowQueryLogs});
|
||||
return slowQueryLogs;
|
||||
}
|
||||
|
||||
function getSlowQueryLogLinesFromComment({queryComment, testDB, commandType = null}) {
|
||||
const slowQueryLogs = getSlowQueryLogLines({queryComment, testDB, commandType});
|
||||
assert(
|
||||
slowQueryLogs.length > 0,
|
||||
`No slow query log found for comment: ${queryComment}, commandType: ${commandType}`,
|
||||
);
|
||||
return slowQueryLogs;
|
||||
}
|
||||
|
||||
// Asserts whether a hash field should appear in slow query logs. If it should appear, asserts
|
||||
// all slow query logs have the same value. The 'key' parameter specifies which field to check:
|
||||
// - "queryShapeHash": checks log.attr.queryShapeHash
|
||||
// - "originalQueryShapeHash": checks log.attr.command.originalQueryShapeHash
|
||||
function assertHashInSlowLogs({comment, testDB, key, shouldAppear = true}) {
|
||||
const slowQueryLogs = getSlowQueryLogLinesFromComment({queryComment: comment, testDB});
|
||||
let hashValue;
|
||||
slowQueryLogs.forEach((slowQueryLog) => {
|
||||
const value =
|
||||
key === "originalQueryShapeHash"
|
||||
? slowQueryLog.attr.command.originalQueryShapeHash
|
||||
: slowQueryLog.attr.queryShapeHash;
|
||||
assert(
|
||||
shouldAppear == Boolean(value),
|
||||
`${key} expected to ${shouldAppear ? "appear" : "not appear"} in slow query log. Received: ` +
|
||||
tojson(slowQueryLog),
|
||||
);
|
||||
if (hashValue) {
|
||||
assert.eq(
|
||||
hashValue,
|
||||
value,
|
||||
`Inconsistent ${key} in slow logs. Slow Query Log: ` + tojson(slowQueryLog),
|
||||
);
|
||||
}
|
||||
hashValue = value;
|
||||
});
|
||||
return hashValue;
|
||||
}
|
||||
|
||||
function testQueryShapeHash(query) {
|
||||
// Run command to create slow query logs.
|
||||
const result = assert.commandWorked(routerDB.runCommand(query));
|
||||
|
||||
// Assert 'originalQueryShapeHash' doesn't appear on the router.
|
||||
assertHashInSlowLogs({
|
||||
comment: query.comment,
|
||||
testDB: routerDB,
|
||||
key: "originalQueryShapeHash",
|
||||
shouldAppear: false,
|
||||
});
|
||||
|
||||
// Assert 'originalQueryShapeHash' appears on the shard(s).
|
||||
const originalQueryShapeHash = assertHashInSlowLogs({
|
||||
comment: query.comment,
|
||||
testDB: shard0DB,
|
||||
key: "originalQueryShapeHash",
|
||||
});
|
||||
|
||||
// 'originalQueryShapeHash' on the shard should be the same as 'queryShapeHash' on the router.
|
||||
const routerQueryShapeHash = assertHashInSlowLogs({
|
||||
comment: query.comment,
|
||||
testDB: routerDB,
|
||||
key: "queryShapeHash",
|
||||
});
|
||||
assert.eq(
|
||||
routerQueryShapeHash,
|
||||
originalQueryShapeHash,
|
||||
"originalQueryShapeHash and routerQueryShapeHash do not match",
|
||||
);
|
||||
|
||||
// If cursor is present, issue getMores and verify 'originalQueryShapeHash' doesn't appear.
|
||||
// TODO SERVER-115109 update test to verify 'originalQueryShapeHash' does appear.
|
||||
if (result.cursor) {
|
||||
const commandCursor = new DBCommandCursor(routerDB, result);
|
||||
commandCursor.itcount(); // exhaust the cursor
|
||||
const getMoreLogs = [
|
||||
...getSlowQueryLogLines({queryComment: query.comment, testDB: shard0DB, commandType: "getMore"}),
|
||||
...getSlowQueryLogLines({queryComment: query.comment, testDB: routerDB, commandType: "getMore"}),
|
||||
];
|
||||
const logsWithOriginalHash = getMoreLogs.filter((log) => log.attr.command.originalQueryShapeHash);
|
||||
assert.eq(
|
||||
logsWithOriginalHash.length,
|
||||
0,
|
||||
"getMore should not have originalQueryShapeHash: " + tojson(logsWithOriginalHash),
|
||||
);
|
||||
}
|
||||
|
||||
// Run explain. 'originalQueryShapeHash' should not appear in the slow query logs for explain.
|
||||
const explainComment = query.comment + "_explain";
|
||||
const explainQuery = {...query, comment: explainComment};
|
||||
assert.commandWorked(routerDB.runCommand(getExplainCommand(explainQuery)));
|
||||
assertHashInSlowLogs({
|
||||
comment: explainComment,
|
||||
testDB: routerDB,
|
||||
key: "originalQueryShapeHash",
|
||||
shouldAppear: false,
|
||||
});
|
||||
assertHashInSlowLogs({
|
||||
comment: explainComment,
|
||||
testDB: shard0DB,
|
||||
key: "originalQueryShapeHash",
|
||||
shouldAppear: false,
|
||||
});
|
||||
}
|
||||
|
||||
// Generate test cases for each collection type.
|
||||
Object.values(collNames).forEach((collName) => {
|
||||
const viewName = collName + "_view";
|
||||
|
||||
describe(`running tests on ${collName}`, function () {
|
||||
before(function () {
|
||||
// Create a view on top of this collection for view tests.
|
||||
routerDB[viewName].drop();
|
||||
assert.commandWorked(routerDB.createView(viewName, collName, [{$addFields: {z: 1}}]));
|
||||
});
|
||||
|
||||
it("should be reported for find", function () {
|
||||
testQueryShapeHash({
|
||||
find: collName,
|
||||
filter: {x: 4},
|
||||
batchSize: 0,
|
||||
comment: `!!find ${collName} test`,
|
||||
});
|
||||
});
|
||||
|
||||
it("should be reported for find on a view", function () {
|
||||
testQueryShapeHash({
|
||||
find: viewName,
|
||||
filter: {x: 4},
|
||||
batchSize: 0,
|
||||
comment: `!!find view ${collName} test`,
|
||||
});
|
||||
});
|
||||
|
||||
it("should be reported for aggregate", function () {
|
||||
testQueryShapeHash({
|
||||
aggregate: collName,
|
||||
pipeline: [{$match: {x: 4}}],
|
||||
cursor: {batchSize: 0},
|
||||
comment: `!!aggregate ${collName} test`,
|
||||
});
|
||||
});
|
||||
|
||||
it("should be reported for aggregate on a view", function () {
|
||||
testQueryShapeHash({
|
||||
aggregate: viewName,
|
||||
pipeline: [{$match: {x: 4}}],
|
||||
cursor: {batchSize: 0},
|
||||
comment: `!!aggregate view ${collName} test`,
|
||||
});
|
||||
});
|
||||
|
||||
it("should be reported for count", function () {
|
||||
testQueryShapeHash({
|
||||
count: collName,
|
||||
query: {x: 4},
|
||||
comment: `!!count ${collName} test`,
|
||||
});
|
||||
});
|
||||
|
||||
it("should be reported for count on a view", function () {
|
||||
testQueryShapeHash({
|
||||
count: viewName,
|
||||
query: {x: 4},
|
||||
comment: `!!count view ${collName} test`,
|
||||
});
|
||||
});
|
||||
|
||||
it("should be reported for distinct", function () {
|
||||
testQueryShapeHash({
|
||||
distinct: collName,
|
||||
key: "x",
|
||||
query: {x: 4},
|
||||
comment: `!!distinct ${collName} test`,
|
||||
});
|
||||
});
|
||||
|
||||
it("should be reported for distinct on a view", function () {
|
||||
testQueryShapeHash({
|
||||
distinct: viewName,
|
||||
key: "x",
|
||||
query: {x: 4},
|
||||
comment: `!!distinct view ${collName} test`,
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// Tests that 'originalQueryShapeHash' cannot be set by users.
|
||||
const exampleHash = "90114C24B1F2EB8B3EBCD0F8387199B8C85D3D202DD68126CD9143AFC684E6BF";
|
||||
const collName = collNames.unsharded;
|
||||
|
||||
it("should error when find includes originalQueryShapeHash on router", function () {
|
||||
const query = {
|
||||
find: collName,
|
||||
filter: {x: 4},
|
||||
originalQueryShapeHash: exampleHash,
|
||||
};
|
||||
assert.commandFailedWithCode(routerDB.runCommand(query), 10742703);
|
||||
});
|
||||
|
||||
it("should error when find includes originalQueryShapeHash on shard", function () {
|
||||
const query = {
|
||||
find: collName,
|
||||
filter: {x: 4},
|
||||
originalQueryShapeHash: exampleHash,
|
||||
};
|
||||
assert.commandFailedWithCode(shard0DB.runCommand(query), 10742702);
|
||||
});
|
||||
|
||||
it("should error when aggregate includes originalQueryShapeHash on router", function () {
|
||||
const query = {
|
||||
aggregate: collName,
|
||||
pipeline: [{$match: {x: 4}}],
|
||||
cursor: {},
|
||||
originalQueryShapeHash: exampleHash,
|
||||
};
|
||||
assert.commandFailedWithCode(routerDB.runCommand(query), 10742706);
|
||||
});
|
||||
|
||||
it("should error when aggregate includes originalQueryShapeHash on shard", function () {
|
||||
const query = {
|
||||
aggregate: collName,
|
||||
pipeline: [{$match: {x: 4}}],
|
||||
cursor: {},
|
||||
originalQueryShapeHash: exampleHash,
|
||||
};
|
||||
assert.commandFailedWithCode(shard0DB.runCommand(query), 10742706);
|
||||
});
|
||||
|
||||
it("should error when count includes originalQueryShapeHash on router", function () {
|
||||
const query = {
|
||||
count: collName,
|
||||
query: {x: 4},
|
||||
originalQueryShapeHash: exampleHash,
|
||||
};
|
||||
assert.commandFailedWithCode(routerDB.runCommand(query), 10742704);
|
||||
});
|
||||
|
||||
it("should error when count includes originalQueryShapeHash on shard", function () {
|
||||
const query = {
|
||||
count: collName,
|
||||
query: {x: 4},
|
||||
originalQueryShapeHash: exampleHash,
|
||||
};
|
||||
assert.commandFailedWithCode(shard0DB.runCommand(query), 10742702);
|
||||
});
|
||||
|
||||
it("should error when distinct includes originalQueryShapeHash on router", function () {
|
||||
const query = {
|
||||
distinct: collName,
|
||||
key: "x",
|
||||
query: {x: 4},
|
||||
originalQueryShapeHash: exampleHash,
|
||||
};
|
||||
assert.commandFailedWithCode(routerDB.runCommand(query), 10742700);
|
||||
});
|
||||
|
||||
it("should error when distinct includes originalQueryShapeHash on shard", function () {
|
||||
const query = {
|
||||
distinct: collName,
|
||||
key: "x",
|
||||
query: {x: 4},
|
||||
originalQueryShapeHash: exampleHash,
|
||||
};
|
||||
assert.commandFailedWithCode(shard0DB.runCommand(query), 10742702);
|
||||
});
|
||||
});
|
||||
|
|
@ -469,7 +469,7 @@ void AggExState::performValidationChecks() const {
|
|||
auto& liteParsedPipeline = _aggReqDerivatives->liteParsedPipeline;
|
||||
|
||||
liteParsedPipeline.validate(_opCtx);
|
||||
aggregation_request_helper::validateRequestForAPIVersion(_opCtx, request);
|
||||
aggregation_request_helper::validateRequestWithClient(_opCtx, request);
|
||||
aggregation_request_helper::validateRequestFromClusterQueryWithoutShardKey(request);
|
||||
|
||||
// If we are in a transaction, check whether the parsed pipeline supports being in
|
||||
|
|
|
|||
|
|
@ -65,6 +65,9 @@
|
|||
#include "mongo/db/query/plan_explainer.h"
|
||||
#include "mongo/db/query/plan_summary_stats.h"
|
||||
#include "mongo/db/query/query_settings/query_settings_gen.h"
|
||||
#include "mongo/db/query/query_shape/count_cmd_shape.h"
|
||||
#include "mongo/db/query/query_shape/query_shape_hash.h"
|
||||
#include "mongo/db/query/query_shape/shape_helpers.h"
|
||||
#include "mongo/db/query/query_stats/count_key.h"
|
||||
#include "mongo/db/query/query_stats/query_stats.h"
|
||||
#include "mongo/db/query/shard_key_diagnostic_printer.h"
|
||||
|
|
@ -366,7 +369,7 @@ public:
|
|||
parsed_find_command::parseFromCount(expCtx, request(), *extensionsCallback, ns));
|
||||
|
||||
registerRequestForQueryStats(
|
||||
opCtx, expCtx, curOp, *collOrViewAcquisition, request(), *parsedFind);
|
||||
opCtx, expCtx, curOp, *collOrViewAcquisition, request(), *parsedFind, ns);
|
||||
|
||||
if (collOrViewAcquisition) {
|
||||
if (collOrViewAcquisition->isView() ||
|
||||
|
|
@ -507,19 +510,29 @@ public:
|
|||
CurOp* curOp,
|
||||
const CollectionOrViewAcquisition& collectionOrView,
|
||||
const CountCommandRequest& req,
|
||||
const ParsedFindCommand& parsedFind) {
|
||||
const ParsedFindCommand& parsedFind,
|
||||
const NamespaceString& ns) {
|
||||
// Compute QueryShapeHash and record it in CurOp.
|
||||
query_shape::DeferredQueryShape deferredShape{[&]() {
|
||||
return shape_helpers::tryMakeShape<query_shape::CountCmdShape>(
|
||||
parsedFind, req.getLimit().has_value(), req.getSkip().has_value());
|
||||
}};
|
||||
boost::optional<query_shape::QueryShapeHash> queryShapeHash =
|
||||
CurOp::get(opCtx)->debug().ensureQueryShapeHash(opCtx, [&]() {
|
||||
return shape_helpers::computeQueryShapeHash(expCtx, deferredShape, ns);
|
||||
});
|
||||
|
||||
if (feature_flags::gFeatureFlagQueryStatsCountDistinct
|
||||
.isEnabledUseLastLTSFCVWhenUninitialized(
|
||||
VersionContext::getDecoration(opCtx),
|
||||
serverGlobalParams.featureCompatibility.acquireFCVSnapshot())) {
|
||||
query_stats::registerRequest(opCtx, _ns, [&]() {
|
||||
uassertStatusOKWithContext(deferredShape->getStatus(),
|
||||
"Failed to compute query shape");
|
||||
return std::make_unique<query_stats::CountKey>(
|
||||
expCtx,
|
||||
parsedFind,
|
||||
req.getLimit().has_value(),
|
||||
req.getSkip().has_value(),
|
||||
req.getReadConcern(),
|
||||
req.getMaxTimeMS().has_value(),
|
||||
req,
|
||||
std::move(deferredShape->getValue()),
|
||||
collectionOrView.getCollectionType());
|
||||
});
|
||||
|
||||
|
|
|
|||
|
|
@ -75,6 +75,7 @@
|
|||
#include "mongo/db/query/query_shape/query_shape.h"
|
||||
#include "mongo/db/query/query_stats/distinct_key.h"
|
||||
#include "mongo/db/query/query_stats/query_stats.h"
|
||||
#include "mongo/db/query/query_utils.h"
|
||||
#include "mongo/db/query/shard_key_diagnostic_printer.h"
|
||||
#include "mongo/db/query/timeseries/timeseries_translation.h"
|
||||
#include "mongo/db/query/view_response_formatter.h"
|
||||
|
|
@ -142,12 +143,7 @@ std::unique_ptr<CanonicalQuery> parseDistinctCmd(
|
|||
// Start the query planning timer right after parsing.
|
||||
CurOp::get(opCtx)->beginQueryPlanningTimer();
|
||||
|
||||
// Forbid users from passing 'querySettings' explicitly.
|
||||
uassert(7923000,
|
||||
"BSON field 'querySettings' is an unknown field",
|
||||
query_settings::allowQuerySettingsFromClient(opCtx->getClient()) ||
|
||||
!distinctCommand->getQuerySettings().has_value());
|
||||
|
||||
assertInternalParamsAreSetByInternalClients(opCtx->getClient(), *distinctCommand);
|
||||
auto expCtx = ExpressionContextBuilder{}
|
||||
.fromRequest(opCtx, *distinctCommand, defaultCollator)
|
||||
.ns(nss)
|
||||
|
|
|
|||
|
|
@ -1164,11 +1164,7 @@ private:
|
|||
CommandHelpers::ensureValidCollectionName(nss.nss());
|
||||
}
|
||||
|
||||
// Forbid users from passing 'querySettings' explicitly.
|
||||
uassert(7746901,
|
||||
"BSON field 'querySettings' is an unknown field",
|
||||
query_settings::allowQuerySettingsFromClient(opCtx->getClient()) ||
|
||||
!findCommand->getQuerySettings().has_value());
|
||||
assertInternalParamsAreSetByInternalClients(opCtx->getClient(), *findCommand);
|
||||
|
||||
uassert(ErrorCodes::FailedToParse,
|
||||
"Use of forcedPlanSolutionHash not permitted.",
|
||||
|
|
|
|||
|
|
@ -46,6 +46,7 @@ imports:
|
|||
- "mongo/db/query/hint.idl"
|
||||
- "mongo/db/query/query_settings/query_settings.idl"
|
||||
- "mongo/db/write_concern_options.idl"
|
||||
- "mongo/db/query/query_shape/query_shape_hash.idl"
|
||||
|
||||
types:
|
||||
pipeline:
|
||||
|
|
@ -389,3 +390,11 @@ commands:
|
|||
type: safeInt64
|
||||
optional: true
|
||||
stability: internal
|
||||
originalQueryShapeHash:
|
||||
description:
|
||||
"The query shape hash of the first query to enter the query system. For example
|
||||
if the router received a query and this is the shard's portion of the query,
|
||||
this hash will be the hash of the query that the router saw."
|
||||
type: QueryShapeHash
|
||||
optional: true
|
||||
stability: internal
|
||||
|
|
|
|||
|
|
@ -165,7 +165,7 @@ void validate(const AggregateCommandRequest& aggregate,
|
|||
}
|
||||
}
|
||||
|
||||
void validateRequestForAPIVersion(const OperationContext* opCtx,
|
||||
void validateRequestWithClient(const OperationContext* opCtx,
|
||||
const AggregateCommandRequest& request) {
|
||||
invariant(opCtx);
|
||||
|
||||
|
|
@ -187,6 +187,13 @@ void validateRequestForAPIVersion(const OperationContext* opCtx,
|
|||
<< apiVersion,
|
||||
isInternalThreadOrClient);
|
||||
}
|
||||
|
||||
// Forbid users from passing 'originalQueryShapeHash' explicitly.
|
||||
if (request.getOriginalQueryShapeHash()) {
|
||||
uassert(10742706,
|
||||
"BSON field 'originalQueryShapeHash' is an unknown field",
|
||||
isInternalThreadOrClient || client->isInDirectClient());
|
||||
}
|
||||
}
|
||||
|
||||
void validateRequestFromClusterQueryWithoutShardKey(const AggregateCommandRequest& request) {
|
||||
|
|
|
|||
|
|
@ -105,10 +105,10 @@ void addQuerySettingsToRequest(AggregateCommandRequest& request,
|
|||
const boost::intrusive_ptr<ExpressionContext>& expCtx);
|
||||
|
||||
/**
|
||||
* Validates if 'AggregateCommandRequest' specs complies with API versioning. Throws uassert in case
|
||||
* of any failure.
|
||||
* Validates if 'AggregateCommandRequest' specs complies with the current Client, which is required
|
||||
* for API versioning checks. Throws uassert in case of any failure.
|
||||
*/
|
||||
void validateRequestForAPIVersion(const OperationContext* opCtx,
|
||||
void validateRequestWithClient(const OperationContext* opCtx,
|
||||
const AggregateCommandRequest& request);
|
||||
/**
|
||||
* Validates if 'AggregateCommandRequest' sets the "isClusterQueryWithoutShardKeyCmd" field then the
|
||||
|
|
|
|||
|
|
@ -231,6 +231,10 @@ TEST(AggregationRequestTest, ShouldSerializeOptionalValuesIfSet) {
|
|||
request.setIsClusterQueryWithoutShardKeyCmd(true);
|
||||
|
||||
request.setIncludeQueryStatsMetrics(true);
|
||||
const BSONObj query = BSON("hello" << 1);
|
||||
const HashBlock<SHA256BlockTraits> queryShapeHash =
|
||||
SHA256Block::computeHash((const uint8_t*)query.objdata(), query.objsize());
|
||||
request.setOriginalQueryShapeHash(queryShapeHash);
|
||||
|
||||
auto expectedSerialization = Document{
|
||||
{AggregateCommandRequest::kCommandName, nss.coll()},
|
||||
|
|
@ -248,6 +252,7 @@ TEST(AggregationRequestTest, ShouldSerializeOptionalValuesIfSet) {
|
|||
{AggregateCommandRequest::kCollectionUUIDFieldName, uuid},
|
||||
{AggregateCommandRequest::kIsClusterQueryWithoutShardKeyCmdFieldName, true},
|
||||
{AggregateCommandRequest::kIncludeQueryStatsMetricsFieldName, true},
|
||||
{AggregateCommandRequest::kOriginalQueryShapeHashFieldName, queryShapeHash.toHexString()},
|
||||
{query_request_helper::cmdOptionMaxTimeMS, 10},
|
||||
{repl::ReadConcernArgs::kReadConcernFieldName, readConcernObj},
|
||||
{query_request_helper::kUnwrappedReadPrefField, readPrefObj}};
|
||||
|
|
|
|||
|
|
@ -380,6 +380,7 @@ idl_generator(
|
|||
"//src/mongo/db:basic_types_gen",
|
||||
"//src/mongo/db/auth:access_checks_gen",
|
||||
"//src/mongo/db/auth:action_type_gen",
|
||||
"//src/mongo/db/query/query_shape:query_shape_hash_gen",
|
||||
"//src/mongo/idl:generic_argument_gen",
|
||||
],
|
||||
)
|
||||
|
|
|
|||
|
|
@ -39,6 +39,7 @@ imports:
|
|||
- "mongo/db/auth/action_type.idl"
|
||||
- "mongo/db/auth/access_checks.idl"
|
||||
- "mongo/db/query/hint.idl"
|
||||
- "mongo/db/query/query_shape/query_shape_hash.idl"
|
||||
|
||||
types:
|
||||
countLimit:
|
||||
|
|
@ -142,3 +143,11 @@ commands:
|
|||
description: "Indicates whether or not query stats metrics should be included in the response."
|
||||
type: optionalBool
|
||||
stability: internal
|
||||
originalQueryShapeHash:
|
||||
description:
|
||||
"The query shape hash of the first query to enter the query system. For example
|
||||
if the router received a query and this is the shard's portion of the query,
|
||||
this hash will be the hash of the query that the router saw."
|
||||
type: QueryShapeHash
|
||||
optional: true
|
||||
stability: internal
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ imports:
|
|||
- "mongo/db/basic_types.idl"
|
||||
- "mongo/db/query/hint.idl"
|
||||
- "mongo/db/query/query_settings/query_settings.idl"
|
||||
- "mongo/db/query/query_shape/query_shape_hash.idl"
|
||||
|
||||
commands:
|
||||
distinct:
|
||||
|
|
@ -75,3 +76,10 @@ commands:
|
|||
includeQueryStatsMetrics:
|
||||
description: "Determines whether or not to include query stats metrics in the response."
|
||||
type: optionalBool
|
||||
originalQueryShapeHash:
|
||||
description:
|
||||
"The query shape hash of the first query to enter the query system. For example
|
||||
if the router received a query and this is the shard's portion of the query,
|
||||
this hash will be the hash of the query that the router saw."
|
||||
type: QueryShapeHash
|
||||
optional: true
|
||||
|
|
|
|||
|
|
@ -45,6 +45,7 @@ imports:
|
|||
- "mongo/db/query/client_cursor/cursor_response.idl"
|
||||
- "mongo/db/query/hint.idl"
|
||||
- "mongo/db/query/query_settings/query_settings.idl"
|
||||
- "mongo/db/query/query_shape/query_shape_hash.idl"
|
||||
|
||||
types:
|
||||
boolNoOpSerializer:
|
||||
|
|
@ -264,3 +265,11 @@ commands:
|
|||
type: safeInt64
|
||||
optional: true
|
||||
stability: internal
|
||||
originalQueryShapeHash:
|
||||
description:
|
||||
"The query shape hash of the first query to enter the query system. For example
|
||||
if the router received a query and this is the shard's portion of the query,
|
||||
this hash will be the hash of the query that the router saw."
|
||||
type: QueryShapeHash
|
||||
optional: true
|
||||
stability: internal
|
||||
|
|
|
|||
|
|
@ -39,6 +39,7 @@
|
|||
#include "mongo/db/query/compiler/logical_model/projection/projection_parser.h"
|
||||
#include "mongo/db/query/query_planner_common.h"
|
||||
#include "mongo/db/query/query_request_helper.h"
|
||||
#include "mongo/db/query/query_utils.h"
|
||||
#include "mongo/util/assert_util.h"
|
||||
#include "mongo/util/str.h"
|
||||
|
||||
|
|
@ -356,6 +357,9 @@ StatusWith<std::unique_ptr<ParsedFindCommand>> parseFromCount(
|
|||
CollatorInterface::collatorsMatch(collator.get(), expCtx->getCollator()));
|
||||
}
|
||||
|
||||
assertInternalParamsAreSetByInternalClients(expCtx->getOperationContext()->getClient(),
|
||||
countCommand);
|
||||
|
||||
// Copy necessary count command fields to find command. Notably, the skip and limit fields are
|
||||
// _not_ copied because the count stage in the PlanExecutor already applies the skip and limit,
|
||||
// and thus copying the fields would involve erroneously skipping and limiting twice.
|
||||
|
|
|
|||
|
|
@ -328,3 +328,10 @@ feature_flags:
|
|||
default: false
|
||||
fcv_gated: false
|
||||
incremental_rollout_phase: in_development
|
||||
|
||||
featureFlagOriginalQueryShapeHash:
|
||||
description: "Feature flag to to pass 'originalQueryShapeHash' field in commands from router to shards."
|
||||
cpp_varname: gFeatureFlagOriginalQueryShapeHash
|
||||
default: true
|
||||
version: 8.3
|
||||
fcv_gated: true
|
||||
|
|
|
|||
|
|
@ -297,10 +297,6 @@ RepresentativeQueryInfo createRepresentativeInfoAgg(OperationContext* opCtx,
|
|||
};
|
||||
}
|
||||
|
||||
bool requestComesFromRouterOrSentDirectlyToShard(Client* client) {
|
||||
return client->isInternalClient() || client->isInDirectClient();
|
||||
}
|
||||
|
||||
void validateIndexKeyPatternStructure(const IndexHint& hint) {
|
||||
if (auto&& keyPattern = hint.getIndexKeyPattern()) {
|
||||
uassert(9646000, "key pattern index can't be empty", keyPattern->nFields() > 0);
|
||||
|
|
@ -563,7 +559,7 @@ public:
|
|||
}
|
||||
|
||||
auto* opCtx = expCtx->getOperationContext();
|
||||
if (requestComesFromRouterOrSentDirectlyToShard(opCtx->getClient()) ||
|
||||
if (isInternalOrDirectClient(opCtx->getClient()) ||
|
||||
querySettingsFromOriginalCommand.has_value()) {
|
||||
return querySettingsFromOriginalCommand.get_value_or(QuerySettings());
|
||||
}
|
||||
|
|
@ -849,7 +845,7 @@ bool allowQuerySettingsFromClient(Client* client) {
|
|||
// - comes from router (internal client), which has already performed the query settings lookup
|
||||
// or
|
||||
// - has been created interally and is executed via DBDirectClient.
|
||||
return requestComesFromRouterOrSentDirectlyToShard(client);
|
||||
return isInternalOrDirectClient(client);
|
||||
}
|
||||
|
||||
bool isDefault(const QuerySettings& settings) {
|
||||
|
|
|
|||
|
|
@ -44,17 +44,14 @@ namespace mongo::query_stats {
|
|||
class CountKey final : public Key {
|
||||
public:
|
||||
CountKey(const boost::intrusive_ptr<ExpressionContext>& expCtx,
|
||||
const ParsedFindCommand& findCommand,
|
||||
bool hasLimit,
|
||||
bool hasSkip,
|
||||
const boost::optional<repl::ReadConcernArgs>& readConcern,
|
||||
bool hasMaxTimeMS,
|
||||
const CountCommandRequest& request,
|
||||
std::unique_ptr<query_shape::Shape> countShape,
|
||||
query_shape::CollectionType collectionType = query_shape::CollectionType::kUnknown)
|
||||
: Key(expCtx->getOperationContext(),
|
||||
std::make_unique<query_shape::CountCmdShape>(findCommand, hasLimit, hasSkip),
|
||||
findCommand.findCommandRequest->getHint(),
|
||||
readConcern,
|
||||
hasMaxTimeMS,
|
||||
std::move(countShape),
|
||||
request.getHint(),
|
||||
request.getReadConcern(),
|
||||
request.getMaxTimeMS().has_value(),
|
||||
collectionType) {}
|
||||
|
||||
// The default implementation of hashing for smart pointers is not a good one for our purposes.
|
||||
|
|
|
|||
|
|
@ -47,9 +47,13 @@ public:
|
|||
make_intrusive<ExpressionContextForTest>();
|
||||
const SerializationOptions opts =
|
||||
SerializationOptions(SerializationOptions::kRepresentativeQueryShapeSerializeOptions);
|
||||
const std::unique_ptr<ParsedFindCommand> parsedRequest =
|
||||
uassertStatusOK(parsed_find_command::parseFromCount(
|
||||
expCtx, CountCommandRequest(testNss), ExtensionsCallbackNoop(), testNss));
|
||||
|
||||
std::unique_ptr<query_shape::CountCmdShape> makeCountShapeFromRequest(CountCommandRequest req) {
|
||||
const std::unique_ptr<ParsedFindCommand> parsedRequest = uassertStatusOK(
|
||||
parsed_find_command::parseFromCount(expCtx, req, ExtensionsCallbackNoop(), testNss));
|
||||
return std::make_unique<query_shape::CountCmdShape>(
|
||||
*parsedRequest, req.getLimit().has_value(), req.getSkip().has_value());
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -58,13 +62,9 @@ public:
|
|||
|
||||
// Test that a count command without any fields generates the expected key.
|
||||
TEST_F(CountKeyTest, DefaultCountKey) {
|
||||
const auto key = std::make_unique<CountKey>(expCtx,
|
||||
*parsedRequest,
|
||||
false /* hasLimit */,
|
||||
false /* hasSkip */,
|
||||
boost::none /* readConcern */,
|
||||
false /* maxTimeMS */,
|
||||
collectionType);
|
||||
const CountCommandRequest& countReq = CountCommandRequest(testNss);
|
||||
const auto key = std::make_unique<CountKey>(
|
||||
expCtx, countReq, makeCountShapeFromRequest(countReq), collectionType);
|
||||
|
||||
const auto expectedKey = fromjson(
|
||||
R"({
|
||||
|
|
@ -80,14 +80,10 @@ TEST_F(CountKeyTest, DefaultCountKey) {
|
|||
|
||||
// Test that the hint parameter is included in the key.
|
||||
TEST_F(CountKeyTest, CountHintKey) {
|
||||
parsedRequest->findCommandRequest->setHint(BSON("a" << 1));
|
||||
const auto key = std::make_unique<CountKey>(expCtx,
|
||||
*parsedRequest,
|
||||
false /* hasLimit */,
|
||||
false /* hasSkip */,
|
||||
boost::none /* readConcern */,
|
||||
false /* maxTimeMS */,
|
||||
collectionType);
|
||||
CountCommandRequest request = CountCommandRequest(testNss);
|
||||
request.setHint(BSON("a" << 1));
|
||||
const auto key = std::make_unique<CountKey>(
|
||||
expCtx, request, makeCountShapeFromRequest(request), collectionType);
|
||||
|
||||
const auto expectedKey = fromjson(
|
||||
R"({
|
||||
|
|
@ -104,13 +100,10 @@ TEST_F(CountKeyTest, CountHintKey) {
|
|||
|
||||
// Test that the readConcern parameter is included in the key.
|
||||
TEST_F(CountKeyTest, CountReadConcernKey) {
|
||||
const auto key = std::make_unique<CountKey>(expCtx,
|
||||
*parsedRequest,
|
||||
false /* hasLimit */,
|
||||
false /* hasSkip */,
|
||||
repl::ReadConcernArgs::kLocal,
|
||||
false /* maxTimeMS */,
|
||||
collectionType);
|
||||
CountCommandRequest request = CountCommandRequest(testNss);
|
||||
request.setReadConcern(repl::ReadConcernArgs::kLocal);
|
||||
const auto key = std::make_unique<CountKey>(
|
||||
expCtx, request, makeCountShapeFromRequest(request), collectionType);
|
||||
|
||||
const auto expectedKey = fromjson(
|
||||
R"({
|
||||
|
|
@ -127,13 +120,10 @@ TEST_F(CountKeyTest, CountReadConcernKey) {
|
|||
|
||||
// Test that the maxTimeMS parameter is included in the key.
|
||||
TEST_F(CountKeyTest, CountMaxTimeMSKey) {
|
||||
const auto key = std::make_unique<CountKey>(expCtx,
|
||||
*parsedRequest,
|
||||
false /* hasLimit */,
|
||||
false /* hasSkip */,
|
||||
boost::none /* readConcern */,
|
||||
true /* maxTimeMS */,
|
||||
collectionType);
|
||||
CountCommandRequest request = CountCommandRequest(testNss);
|
||||
request.setMaxTimeMS(1000);
|
||||
const auto key = std::make_unique<CountKey>(
|
||||
expCtx, request, makeCountShapeFromRequest(request), collectionType);
|
||||
|
||||
const auto expectedKey = fromjson(
|
||||
R"({
|
||||
|
|
@ -150,15 +140,11 @@ TEST_F(CountKeyTest, CountMaxTimeMSKey) {
|
|||
|
||||
// Test that the comment parameter is included in the key.
|
||||
TEST_F(CountKeyTest, CountCommentKey) {
|
||||
CountCommandRequest request = CountCommandRequest(testNss);
|
||||
const auto comment = BSON("comment" << "hello");
|
||||
expCtx->getOperationContext()->setComment(comment);
|
||||
const auto key = std::make_unique<CountKey>(expCtx,
|
||||
*parsedRequest,
|
||||
false /* hasLimit */,
|
||||
false /* hasSkip */,
|
||||
boost::none /* readConcern */,
|
||||
false /* maxTimeMS */,
|
||||
collectionType);
|
||||
const auto key = std::make_unique<CountKey>(
|
||||
expCtx, request, makeCountShapeFromRequest(request), collectionType);
|
||||
|
||||
const auto expectedKey = fromjson(
|
||||
R"({
|
||||
|
|
|
|||
|
|
@ -150,5 +150,35 @@ inline ExpressEligibility isExpressEligible(OperationContext* opCtx,
|
|||
return ExpressEligibility::Ineligible;
|
||||
}
|
||||
|
||||
inline bool isInternalOrDirectClient(Client* client) {
|
||||
return client->isInternalClient() || client->isInDirectClient();
|
||||
}
|
||||
|
||||
/**
|
||||
* Verifies that users did not specify the internal fields 'originalQueryShapeHash' and
|
||||
* 'querySettings' directly.
|
||||
*/
|
||||
template <typename T>
|
||||
concept hasOriginalQueryShapeHash = requires(const T& t) { t.getOriginalQueryShapeHash(); };
|
||||
template <typename T>
|
||||
concept hasQuerySettings = requires(const T& t) { t.getQuerySettings(); };
|
||||
|
||||
template <typename T>
|
||||
requires hasOriginalQueryShapeHash<T>
|
||||
void assertInternalParamsAreSetByInternalClients(Client* client, T& req) {
|
||||
const bool isInternalOrDirect = isInternalOrDirectClient(client);
|
||||
|
||||
// Only check 'querySettings' if the command accepts it.
|
||||
if constexpr (hasQuerySettings<T>) {
|
||||
uassert(7923000,
|
||||
"BSON field 'querySettings' is an unknown field",
|
||||
isInternalOrDirect || !req.getQuerySettings().has_value());
|
||||
}
|
||||
|
||||
uassert(10742702,
|
||||
"BSON field 'originalQueryShapeHash' is an unknown field",
|
||||
isInternalOrDirect || !req.getOriginalQueryShapeHash().has_value());
|
||||
}
|
||||
|
||||
bool isSortSbeCompatible(const SortPattern& sortPattern);
|
||||
} // namespace mongo
|
||||
|
|
|
|||
|
|
@ -167,6 +167,7 @@ TEST(ResolvedViewTest, ExpandingAggRequestPreservesUnsetFields) {
|
|||
ASSERT_FALSE(result.getIsHybridSearch().has_value());
|
||||
ASSERT_FALSE(result.getReadConcern().has_value());
|
||||
ASSERT_FALSE(result.getUnwrappedReadPref().has_value());
|
||||
ASSERT_FALSE(result.getOriginalQueryShapeHash().has_value());
|
||||
}
|
||||
|
||||
TEST(ResolvedViewTest, ExpandingAggRequestPreservesMostFields) {
|
||||
|
|
@ -202,6 +203,10 @@ TEST(ResolvedViewTest, ExpandingAggRequestPreservesMostFields) {
|
|||
aggRequest.setResumeAfter(BSON("rid" << 12345));
|
||||
aggRequest.setStartAt(BSON("rid" << 67890));
|
||||
aggRequest.setIncludeQueryStatsMetrics(true);
|
||||
const BSONObj query = BSON("hello" << 1);
|
||||
const HashBlock<SHA256BlockTraits> queryShapeHash =
|
||||
SHA256Block::computeHash((const uint8_t*)query.objdata(), query.objsize());
|
||||
aggRequest.setOriginalQueryShapeHash(queryShapeHash);
|
||||
aggRequest.setIsHybridSearch(true);
|
||||
aggRequest.setMaxTimeMS(100u);
|
||||
aggRequest.setReadConcern(repl::ReadConcernArgs::kLinearizable);
|
||||
|
|
@ -242,6 +247,7 @@ TEST(ResolvedViewTest, ExpandingAggRequestPreservesMostFields) {
|
|||
ASSERT_BSONOBJ_EQ(result.getResumeAfter().value(), BSON("rid" << 12345));
|
||||
ASSERT_BSONOBJ_EQ(result.getStartAt().value(), BSON("rid" << 67890));
|
||||
ASSERT_TRUE(result.getIncludeQueryStatsMetrics());
|
||||
ASSERT_EQ(result.getOriginalQueryShapeHash(), queryShapeHash);
|
||||
ASSERT_TRUE(result.getIsHybridSearch().value_or(false));
|
||||
ASSERT_EQ(result.getMaxTimeMS().value(), 100u);
|
||||
ASSERT_BSONOBJ_EQ(result.getReadConcern()->toBSONInner(), BSON("level" << "linearizable"));
|
||||
|
|
|
|||
|
|
@ -34,10 +34,14 @@
|
|||
#include "mongo/db/auth/validated_tenancy_scope.h"
|
||||
#include "mongo/db/commands.h"
|
||||
#include "mongo/db/fle_crud.h"
|
||||
#include "mongo/db/operation_context.h"
|
||||
#include "mongo/db/pipeline/expression_context_builder.h"
|
||||
#include "mongo/db/pipeline/expression_context_diagnostic_printer.h"
|
||||
#include "mongo/db/pipeline/query_request_conversion.h"
|
||||
#include "mongo/db/query/count_command_gen.h"
|
||||
#include "mongo/db/query/query_shape/count_cmd_shape.h"
|
||||
#include "mongo/db/query/query_shape/query_shape_hash.h"
|
||||
#include "mongo/db/query/query_shape/shape_helpers.h"
|
||||
#include "mongo/db/query/query_stats/count_key.h"
|
||||
#include "mongo/db/query/query_stats/query_stats.h"
|
||||
#include "mongo/db/query/shard_key_diagnostic_printer.h"
|
||||
|
|
@ -66,13 +70,29 @@
|
|||
|
||||
namespace mongo {
|
||||
|
||||
inline BSONObj prepareCountForPassthrough(const BSONObj& cmdObj, bool requestQueryStats) {
|
||||
if (!requestQueryStats) {
|
||||
return CommandHelpers::filterCommandRequestForPassthrough(cmdObj);
|
||||
inline BSONObj prepareCountForPassthrough(const OperationContext* opCtx,
|
||||
const BSONObj& cmdObj,
|
||||
bool requestQueryStats) {
|
||||
BSONObjBuilder bob(cmdObj);
|
||||
|
||||
// Pass the queryShapeHash to the shards. We must validate that all participating shards can
|
||||
// understand 'originalQueryShapeHash' and therefore check the feature flag. We use the last
|
||||
// LTS when the FCV is uninitialized, since count commands can run during initial sync. This is
|
||||
// because the feature is exclusively for observability enhancements and should only be applied
|
||||
// when we are confident that the shard can correctly read this field, ensuring the query will
|
||||
// not error.
|
||||
if (feature_flags::gFeatureFlagOriginalQueryShapeHash.isEnabledUseLastLTSFCVWhenUninitialized(
|
||||
VersionContext::getDecoration(opCtx),
|
||||
serverGlobalParams.featureCompatibility.acquireFCVSnapshot())) {
|
||||
if (auto&& queryShapeHash = CurOp::get(opCtx)->debug().getQueryShapeHash()) {
|
||||
bob.append(CountCommandRequest::kOriginalQueryShapeHashFieldName,
|
||||
queryShapeHash->toHexString());
|
||||
}
|
||||
}
|
||||
if (requestQueryStats) {
|
||||
bob.append(CountCommandRequest::kIncludeQueryStatsMetricsFieldName, true);
|
||||
}
|
||||
|
||||
BSONObjBuilder bob(cmdObj);
|
||||
bob.append("includeQueryStatsMetrics", true);
|
||||
return CommandHelpers::filterCommandRequestForPassthrough(bob.done());
|
||||
}
|
||||
|
||||
|
|
@ -108,6 +128,34 @@ inline bool convertAndRunAggregateIfViewlessTimeseries(
|
|||
}
|
||||
}
|
||||
|
||||
inline void createShapeAndRegisterQueryStats(const boost::intrusive_ptr<ExpressionContext>& expCtx,
|
||||
const CountCommandRequest& countRequest,
|
||||
const NamespaceString& nss) {
|
||||
const std::unique_ptr<ParsedFindCommand> parsedFind = uassertStatusOK(
|
||||
parsed_find_command::parseFromCount(expCtx, countRequest, ExtensionsCallbackNoop(), nss));
|
||||
|
||||
// Compute QueryShapeHash and record it in CurOp.
|
||||
OperationContext* opCtx = expCtx->getOperationContext();
|
||||
const query_shape::DeferredQueryShape deferredShape{[&]() {
|
||||
return shape_helpers::tryMakeShape<query_shape::CountCmdShape>(
|
||||
*parsedFind, countRequest.getLimit().has_value(), countRequest.getSkip().has_value());
|
||||
}};
|
||||
boost::optional<query_shape::QueryShapeHash> queryShapeHash =
|
||||
CurOp::get(opCtx)->debug().ensureQueryShapeHash(opCtx, [&]() {
|
||||
return shape_helpers::computeQueryShapeHash(expCtx, deferredShape, nss);
|
||||
});
|
||||
|
||||
if (feature_flags::gFeatureFlagQueryStatsCountDistinct.isEnabled(
|
||||
VersionContext::getDecoration(opCtx),
|
||||
serverGlobalParams.featureCompatibility.acquireFCVSnapshot())) {
|
||||
query_stats::registerRequest(opCtx, nss, [&]() {
|
||||
uassertStatusOKWithContext(deferredShape->getStatus(), "Failed to compute query shape");
|
||||
return std::make_unique<query_stats::CountKey>(
|
||||
expCtx, countRequest, std::move(deferredShape->getValue()));
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Implements the find command on mongos.
|
||||
*/
|
||||
|
|
@ -206,6 +254,11 @@ public:
|
|||
auto countRequest =
|
||||
CountCommandRequest::parse(cmdObj, IDLParserContext("count"));
|
||||
|
||||
// Forbid users from passing 'originalQueryShapeHash' explicitly.
|
||||
uassert(10742704,
|
||||
"BSON field 'originalQueryShapeHash' is an unknown field",
|
||||
!countRequest.getOriginalQueryShapeHash().has_value());
|
||||
|
||||
// Create an RAII object that prints the collection's shard key in the case of a
|
||||
// tassert or crash.
|
||||
const auto& cri = routingCtx.getCollectionRoutingInfo(nss);
|
||||
|
|
@ -235,22 +288,7 @@ public:
|
|||
ScopedDebugInfo expCtxDiagnostics(
|
||||
"ExpCtxDiagnostics", diagnostic_printers::ExpressionContextPrinter{expCtx});
|
||||
|
||||
const auto parsedFind = uassertStatusOK(parsed_find_command::parseFromCount(
|
||||
expCtx, countRequest, ExtensionsCallbackNoop(), nss));
|
||||
|
||||
if (feature_flags::gFeatureFlagQueryStatsCountDistinct.isEnabled(
|
||||
VersionContext::getDecoration(opCtx),
|
||||
serverGlobalParams.featureCompatibility.acquireFCVSnapshot())) {
|
||||
query_stats::registerRequest(opCtx, nss, [&]() {
|
||||
return std::make_unique<query_stats::CountKey>(
|
||||
expCtx,
|
||||
*parsedFind,
|
||||
countRequest.getLimit().has_value(),
|
||||
countRequest.getSkip().has_value(),
|
||||
countRequest.getReadConcern(),
|
||||
countRequest.getMaxTimeMS().has_value());
|
||||
});
|
||||
}
|
||||
createShapeAndRegisterQueryStats(expCtx, countRequest, nss);
|
||||
|
||||
// Note: This must happen after query stats because query stats retain the
|
||||
// originating command type for timeseries.
|
||||
|
|
@ -300,7 +338,8 @@ public:
|
|||
nss,
|
||||
applyReadWriteConcern(opCtx,
|
||||
this,
|
||||
prepareCountForPassthrough(countRequest.toBSON(),
|
||||
prepareCountForPassthrough(opCtx,
|
||||
countRequest.toBSON(),
|
||||
requestQueryStats)),
|
||||
ReadPreferenceSetting::get(opCtx),
|
||||
Shard::RetryPolicy::kIdempotent,
|
||||
|
|
|
|||
|
|
@ -65,6 +65,7 @@
|
|||
#include "mongo/db/query/query_settings/query_settings_service.h"
|
||||
#include "mongo/db/query/query_shape/distinct_cmd_shape.h"
|
||||
#include "mongo/db/query/query_shape/query_shape.h"
|
||||
#include "mongo/db/query/query_shape/query_shape_hash.h"
|
||||
#include "mongo/db/query/query_stats/distinct_key.h"
|
||||
#include "mongo/db/query/query_stats/query_stats.h"
|
||||
#include "mongo/db/query/shard_key_diagnostic_printer.h"
|
||||
|
|
@ -137,6 +138,11 @@ std::unique_ptr<CanonicalQuery> parseDistinctCmd(
|
|||
"BSON field 'querySettings' is an unknown field",
|
||||
!distinctCommand->getQuerySettings().has_value());
|
||||
|
||||
// Forbid users from passing 'originalQueryShapeHash' explicitly.
|
||||
uassert(10742700,
|
||||
"BSON field 'originalQueryShapeHash' is an unknown field",
|
||||
!distinctCommand->getOriginalQueryShapeHash().has_value());
|
||||
|
||||
auto expCtx = ExpressionContextBuilder{}
|
||||
.fromRequest(opCtx, *distinctCommand, defaultCollator)
|
||||
.ns(nss)
|
||||
|
|
@ -179,18 +185,37 @@ std::unique_ptr<CanonicalQuery> parseDistinctCmd(
|
|||
std::move(parsedDistinct));
|
||||
}
|
||||
|
||||
BSONObj prepareDistinctForPassthrough(const BSONObj& cmd,
|
||||
BSONObj prepareDistinctForPassthrough(
|
||||
const OperationContext* opCtx,
|
||||
const BSONObj& cmd,
|
||||
const query_settings::QuerySettings& qs,
|
||||
const bool requestQueryStats) {
|
||||
const bool requestQueryStats,
|
||||
const boost::optional<query_shape::QueryShapeHash>& queryShapeHash) {
|
||||
const auto qsBson = qs.toBSON();
|
||||
if (requestQueryStats || !qsBson.isEmpty()) {
|
||||
if (requestQueryStats || !qsBson.isEmpty() || queryShapeHash) {
|
||||
BSONObjBuilder bob(cmd);
|
||||
// Append distinct command with the query settings and includeQueryStatsMetrics if needed.
|
||||
if (requestQueryStats) {
|
||||
bob.append("includeQueryStatsMetrics", true);
|
||||
bob.append(DistinctCommandRequest::kIncludeQueryStatsMetricsFieldName, true);
|
||||
}
|
||||
if (!qsBson.isEmpty()) {
|
||||
bob.append("querySettings", qsBson);
|
||||
bob.append(DistinctCommandRequest::kQuerySettingsFieldName, qsBson);
|
||||
}
|
||||
|
||||
// Pass the queryShapeHash to the shards. We must validate that all participating shards can
|
||||
// understand 'originalQueryShapeHash' and therefore check the feature flag. We use the last
|
||||
// LTS when the FCV is uninitialized, even though distinct commands cannot execute during
|
||||
// initial sync. This is because the feature is exclusively for observability enhancements
|
||||
// and should only be applied when we are confident that the shard can correctly read this
|
||||
// field, ensuring the query will not error.
|
||||
if (feature_flags::gFeatureFlagOriginalQueryShapeHash
|
||||
.isEnabledUseLastLTSFCVWhenUninitialized(
|
||||
VersionContext::getDecoration(opCtx),
|
||||
serverGlobalParams.featureCompatibility.acquireFCVSnapshot())) {
|
||||
if (queryShapeHash) {
|
||||
bob.append(DistinctCommandRequest::kOriginalQueryShapeHashFieldName,
|
||||
queryShapeHash->toHexString());
|
||||
}
|
||||
}
|
||||
return CommandHelpers::filterCommandRequestForPassthrough(bob.done());
|
||||
}
|
||||
|
|
@ -427,9 +452,15 @@ public:
|
|||
// We will decide if remote query stats metrics should be collected.
|
||||
bool requestQueryStats =
|
||||
query_stats::shouldRequestRemoteMetrics(CurOp::get(opCtx)->debug());
|
||||
boost::optional<query_shape::QueryShapeHash> queryShapeHash =
|
||||
CurOp::get(opCtx)->debug().getQueryShapeHash();
|
||||
|
||||
BSONObj distinctReadyForPassthrough = prepareDistinctForPassthrough(
|
||||
cmdObj, canonicalQuery->getExpCtx()->getQuerySettings(), requestQueryStats);
|
||||
opCtx,
|
||||
cmdObj,
|
||||
canonicalQuery->getExpCtx()->getQuerySettings(),
|
||||
requestQueryStats,
|
||||
queryShapeHash);
|
||||
|
||||
|
||||
const auto& cri = routingCtx.getCollectionRoutingInfo(nss);
|
||||
|
|
|
|||
|
|
@ -89,6 +89,10 @@ inline std::unique_ptr<FindCommandRequest> parseCmdObjectToFindCommandRequest(
|
|||
"BSON field 'querySettings' is an unknown field",
|
||||
!findCommand->getQuerySettings().has_value());
|
||||
|
||||
uassert(10742703,
|
||||
"BSON field 'originalQueryShapeHash' is an unknown field",
|
||||
!findCommand->getOriginalQueryShapeHash().has_value());
|
||||
|
||||
uassert(ErrorCodes::InvalidNamespace,
|
||||
"Cannot specify UUID to a mongos.",
|
||||
!findCommand->getNamespaceOrUUID().isUUID());
|
||||
|
|
|
|||
|
|
@ -165,6 +165,22 @@ Document serializeForPassthrough(const boost::intrusive_ptr<ExpressionContext>&
|
|||
req.setRawData(rawData);
|
||||
aggregation_request_helper::addQuerySettingsToRequest(req, expCtx);
|
||||
|
||||
// Pass the queryShapeHash to the shards. We must validate that all participating shards can
|
||||
// understand 'originalQueryShapeHash' and therefore check the feature flag. We use the last
|
||||
// LTS when the FCV is uninitialized, even though aggregates cannot execute during initial sync.
|
||||
// This is because the feature is exclusively for observability enhancements and should only be
|
||||
// applied when we are confident that the shard can correctly read this field, ensuring the
|
||||
// query will not error.
|
||||
if (!req.getExplain().has_value() &&
|
||||
feature_flags::gFeatureFlagOriginalQueryShapeHash.isEnabledUseLastLTSFCVWhenUninitialized(
|
||||
VersionContext::getDecoration(expCtx->getOperationContext()),
|
||||
serverGlobalParams.featureCompatibility.acquireFCVSnapshot())) {
|
||||
if (auto&& queryShapeHash =
|
||||
CurOp::get(expCtx->getOperationContext())->debug().getQueryShapeHash()) {
|
||||
req.setOriginalQueryShapeHash(queryShapeHash);
|
||||
}
|
||||
}
|
||||
|
||||
auto cmdObj =
|
||||
isRawDataOperation(expCtx->getOperationContext()) && req.getNamespace() != executionNs
|
||||
? rewriteCommandForRawDataOperation<AggregateCommandRequest>(req.toBSON(),
|
||||
|
|
@ -321,7 +337,7 @@ void performValidationChecks(const OperationContext* opCtx,
|
|||
const AggregateCommandRequest& request,
|
||||
const LiteParsedPipeline& liteParsedPipeline) {
|
||||
liteParsedPipeline.validate(opCtx);
|
||||
aggregation_request_helper::validateRequestForAPIVersion(opCtx, request);
|
||||
aggregation_request_helper::validateRequestWithClient(opCtx, request);
|
||||
aggregation_request_helper::validateRequestFromClusterQueryWithoutShardKey(request);
|
||||
|
||||
uassert(51028, "Cannot specify exchange option to a router", !request.getExchange());
|
||||
|
|
@ -871,6 +887,8 @@ Status runAggregateImpl(OperationContext* opCtx,
|
|||
auto status = [&](auto& expCtx) {
|
||||
bool requestQueryStatsFromRemotes = query_stats::shouldRequestRemoteMetrics(
|
||||
CurOp::get(expCtx->getOperationContext())->debug());
|
||||
boost::optional<query_shape::QueryShapeHash> queryShapeHash =
|
||||
CurOp::get(opCtx)->debug().getQueryShapeHash();
|
||||
try {
|
||||
switch (targeter.policy) {
|
||||
case cluster_aggregation_planner::AggregationTargeter::TargetingPolicy::
|
||||
|
|
|
|||
|
|
@ -75,6 +75,7 @@
|
|||
#include "mongo/db/query/query_settings/query_settings_service.h"
|
||||
#include "mongo/db/query/query_shape/find_cmd_shape.h"
|
||||
#include "mongo/db/query/query_shape/query_shape.h"
|
||||
#include "mongo/db/query/query_shape/query_shape_hash.h"
|
||||
#include "mongo/db/query/query_stats/find_key.h"
|
||||
#include "mongo/db/query/query_stats/query_stats.h"
|
||||
#include "mongo/db/query/shard_key_diagnostic_printer.h"
|
||||
|
|
@ -200,6 +201,20 @@ BSONObj makeFindCommandForShards(OperationContext* opCtx,
|
|||
findCommand.setQuerySettings(query.getExpCtx()->getQuerySettings());
|
||||
}
|
||||
|
||||
// Pass the queryShapeHash to the shards. We must validate that all participating shards can
|
||||
// understand 'originalQueryShapeHash' and therefore check the feature flag. We use the last LTS
|
||||
// when the FCV is uninitialized, even though find commands cannot execute during initial sync.
|
||||
// This is because the feature is exclusively for observability enhancements and should only be
|
||||
// applied when we are confident that the shard can correctly read this field, ensuring the
|
||||
// query will not error.
|
||||
if (feature_flags::gFeatureFlagOriginalQueryShapeHash.isEnabledUseLastLTSFCVWhenUninitialized(
|
||||
VersionContext::getDecoration(opCtx),
|
||||
serverGlobalParams.featureCompatibility.acquireFCVSnapshot())) {
|
||||
if (auto&& queryShapeHash = CurOp::get(opCtx)->debug().getQueryShapeHash()) {
|
||||
findCommand.setOriginalQueryShapeHash(queryShapeHash);
|
||||
}
|
||||
}
|
||||
|
||||
// Request metrics if necessary.
|
||||
{
|
||||
// We'll set includeQueryStatsMetrics if our configuration (e.g., feature flag, sample
|
||||
|
|
|
|||
Loading…
Reference in New Issue