mirror of https://github.com/mongodb/mongo
SERVER-115161 Move OpDebug fields behind accessors (#45035)
GitOrigin-RevId: 7e19c3aa63433eebd1fa1153ae64f713947867db
This commit is contained in:
parent
3fd38608b0
commit
452eada852
|
|
@ -196,8 +196,8 @@ void cappedDeleteUntilBelowConfiguredMaximum(OperationContext* opCtx,
|
|||
opCtx, *shard_role_details::getRecoveryUnit(opCtx), toDelete);
|
||||
|
||||
if (opDebug) {
|
||||
opDebug->additiveMetrics.incrementKeysDeleted(keysDeleted);
|
||||
opDebug->additiveMetrics.incrementNdeleted(1);
|
||||
opDebug->getAdditiveMetrics().incrementKeysDeleted(keysDeleted);
|
||||
opDebug->getAdditiveMetrics().incrementNdeleted(1);
|
||||
}
|
||||
serviceOpCounters(opCtx).gotDelete();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -367,12 +367,12 @@ Status insertDocumentsImpl(OperationContext* opCtx,
|
|||
}
|
||||
|
||||
if (opDebug) {
|
||||
opDebug->additiveMetrics.incrementKeysInserted(keysInserted);
|
||||
opDebug->getAdditiveMetrics().incrementKeysInserted(keysInserted);
|
||||
// 'opDebug' may be deleted at rollback time in case of multi-document transaction.
|
||||
if (!opCtx->inMultiDocumentTransaction()) {
|
||||
shard_role_details::getRecoveryUnit(opCtx)->onRollback(
|
||||
[opDebug, keysInserted](OperationContext*) {
|
||||
opDebug->additiveMetrics.incrementKeysInserted(-keysInserted);
|
||||
opDebug->getAdditiveMetrics().incrementKeysInserted(-keysInserted);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
@ -730,14 +730,14 @@ void updateDocument(OperationContext* opCtx,
|
|||
}
|
||||
|
||||
if (opDebug) {
|
||||
opDebug->additiveMetrics.incrementKeysInserted(keysInserted);
|
||||
opDebug->additiveMetrics.incrementKeysDeleted(keysDeleted);
|
||||
opDebug->getAdditiveMetrics().incrementKeysInserted(keysInserted);
|
||||
opDebug->getAdditiveMetrics().incrementKeysDeleted(keysDeleted);
|
||||
// 'opDebug' may be deleted at rollback time in case of multi-document transaction.
|
||||
if (!opCtx->inMultiDocumentTransaction()) {
|
||||
shard_role_details::getRecoveryUnit(opCtx)->onRollback(
|
||||
[opDebug, keysInserted, keysDeleted](OperationContext*) {
|
||||
opDebug->additiveMetrics.incrementKeysInserted(-keysInserted);
|
||||
opDebug->additiveMetrics.incrementKeysDeleted(-keysDeleted);
|
||||
opDebug->getAdditiveMetrics().incrementKeysInserted(-keysInserted);
|
||||
opDebug->getAdditiveMetrics().incrementKeysDeleted(-keysDeleted);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
@ -825,14 +825,14 @@ StatusWith<BSONObj> updateDocumentWithDamages(OperationContext* opCtx,
|
|||
}
|
||||
|
||||
if (opDebug) {
|
||||
opDebug->additiveMetrics.incrementKeysInserted(keysInserted);
|
||||
opDebug->additiveMetrics.incrementKeysDeleted(keysDeleted);
|
||||
opDebug->getAdditiveMetrics().incrementKeysInserted(keysInserted);
|
||||
opDebug->getAdditiveMetrics().incrementKeysDeleted(keysDeleted);
|
||||
// 'opDebug' may be deleted at rollback time in case of multi-document transaction.
|
||||
if (!opCtx->inMultiDocumentTransaction()) {
|
||||
shard_role_details::getRecoveryUnit(opCtx)->onRollback(
|
||||
[opDebug, keysInserted, keysDeleted](OperationContext*) {
|
||||
opDebug->additiveMetrics.incrementKeysInserted(-keysInserted);
|
||||
opDebug->additiveMetrics.incrementKeysDeleted(-keysDeleted);
|
||||
opDebug->getAdditiveMetrics().incrementKeysInserted(-keysInserted);
|
||||
opDebug->getAdditiveMetrics().incrementKeysDeleted(-keysDeleted);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
@ -928,12 +928,12 @@ void deleteDocument(OperationContext* opCtx,
|
|||
opCtx, collection, stmtId, doc.value(), documentKey, deleteArgs);
|
||||
|
||||
if (opDebug) {
|
||||
opDebug->additiveMetrics.incrementKeysDeleted(keysDeleted);
|
||||
opDebug->getAdditiveMetrics().incrementKeysDeleted(keysDeleted);
|
||||
// 'opDebug' may be deleted at rollback time in case of multi-document transaction.
|
||||
if (!opCtx->inMultiDocumentTransaction()) {
|
||||
shard_role_details::getRecoveryUnit(opCtx)->onRollback(
|
||||
[opDebug, keysDeleted](OperationContext*) {
|
||||
opDebug->additiveMetrics.incrementKeysDeleted(-keysDeleted);
|
||||
opDebug->getAdditiveMetrics().incrementKeysDeleted(-keysDeleted);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -448,7 +448,7 @@ void finishCurOp(OperationContext* opCtx, CurOp* curOp, LogicalOp logicalOp) {
|
|||
try {
|
||||
curOp->done();
|
||||
auto executionTimeMicros = curOp->elapsedTimeExcludingPauses();
|
||||
curOp->debug().additiveMetrics.executionTime = executionTimeMicros;
|
||||
curOp->debug().getAdditiveMetrics().executionTime = executionTimeMicros;
|
||||
|
||||
recordCurOpMetrics(opCtx);
|
||||
Top::getDecoration(opCtx).record(opCtx,
|
||||
|
|
@ -505,7 +505,7 @@ void setCurOpInfoAndEnsureStarted(OperationContext* opCtx,
|
|||
curOp->ensureStarted();
|
||||
|
||||
if (logicalOp == LogicalOp::opInsert) {
|
||||
curOp->debug().additiveMetrics.ninserted = 0;
|
||||
curOp->debug().getAdditiveMetrics().ninserted = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -436,10 +436,11 @@ public:
|
|||
// Store profiling data if profiling is enabled.
|
||||
collectProfilingDataIfNeeded(curOp, *exec);
|
||||
|
||||
collectQueryStatsMongod(opCtx, expCtx, std::move(curOp->debug().queryStatsInfo.key));
|
||||
collectQueryStatsMongod(
|
||||
opCtx, expCtx, std::move(curOp->debug().getQueryStatsInfo().key));
|
||||
|
||||
CountCommandReply reply = buildCountReply(countResult);
|
||||
if (curOp->debug().queryStatsInfo.metricsRequested) {
|
||||
if (curOp->debug().getQueryStatsInfo().metricsRequested) {
|
||||
reply.setMetrics(curOp->debug().getCursorMetrics().toBSON());
|
||||
}
|
||||
return reply;
|
||||
|
|
@ -523,7 +524,7 @@ public:
|
|||
});
|
||||
|
||||
if (req.getIncludeQueryStatsMetrics()) {
|
||||
curOp->debug().queryStatsInfo.metricsRequested = true;
|
||||
curOp->debug().getQueryStatsInfo().metricsRequested = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -591,7 +592,7 @@ public:
|
|||
ExplainOptions::Verbosity verbosity,
|
||||
rpc::ReplyBuilderInterface* replyBuilder) {
|
||||
auto curOp = CurOp::get(opCtx);
|
||||
curOp->debug().queryStatsInfo.disableForSubqueryExecution = true;
|
||||
curOp->debug().getQueryStatsInfo().disableForSubqueryExecution = true;
|
||||
const auto vts = auth::ValidatedTenancyScope::get(opCtx);
|
||||
auto viewAggRequest =
|
||||
query_request_conversion::asAggregateCommandRequest(req, true /* hasExplain */);
|
||||
|
|
|
|||
|
|
@ -187,7 +187,7 @@ std::unique_ptr<CanonicalQuery> parseDistinctCmd(
|
|||
});
|
||||
|
||||
if (parsedDistinct->distinctCommandRequest->getIncludeQueryStatsMetrics()) {
|
||||
CurOp::get(opCtx)->debug().queryStatsInfo.metricsRequested = true;
|
||||
CurOp::get(opCtx)->debug().getQueryStatsInfo().metricsRequested = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -722,10 +722,10 @@ public:
|
|||
|
||||
auto* cq = executor->getCanonicalQuery();
|
||||
collectQueryStatsMongod(
|
||||
opCtx, cq->getExpCtx(), std::move(curOp->debug().queryStatsInfo.key));
|
||||
opCtx, cq->getExpCtx(), std::move(curOp->debug().getQueryStatsInfo().key));
|
||||
|
||||
// Include queryStats metrics in the result to be sent to mongos.
|
||||
const bool includeMetrics = CurOp::get(opCtx)->debug().queryStatsInfo.metricsRequested;
|
||||
const bool includeMetrics = CurOp::get(opCtx)->debug().getQueryStatsInfo().metricsRequested;
|
||||
|
||||
if (includeMetrics) {
|
||||
// It is safe to unconditionally add the metrics because we are assured that the user
|
||||
|
|
@ -772,8 +772,8 @@ public:
|
|||
|
||||
// We must store the key in distinct to prevent collecting query stats when the aggregation
|
||||
// runs.
|
||||
auto ownedQueryStatsKey = std::move(curOp->debug().queryStatsInfo.key);
|
||||
curOp->debug().queryStatsInfo.disableForSubqueryExecution = true;
|
||||
auto ownedQueryStatsKey = std::move(curOp->debug().getQueryStatsInfo().key);
|
||||
curOp->debug().getQueryStatsInfo().disableForSubqueryExecution = true;
|
||||
|
||||
// If running explain distinct as agg, then aggregate is executed without privilege checks
|
||||
// and without response formatting.
|
||||
|
|
@ -815,7 +815,7 @@ public:
|
|||
// that can be read completely locally, such as non-existent database collections or
|
||||
// unsplittable collections, will run through this distinct path on mongod and return
|
||||
// metrics back to mongos.
|
||||
const bool includeMetrics = curOp->debug().queryStatsInfo.metricsRequested;
|
||||
const bool includeMetrics = curOp->debug().getQueryStatsInfo().metricsRequested;
|
||||
boost::optional<BSONObj> metrics = includeMetrics
|
||||
? boost::make_optional(curOp->debug().getCursorMetrics().toBSON())
|
||||
: boost::none;
|
||||
|
|
|
|||
|
|
@ -236,7 +236,7 @@ std::unique_ptr<CanonicalQuery> parseQueryAndBeginOperation(
|
|||
});
|
||||
|
||||
if (parsedRequest->findCommandRequest->getIncludeQueryStatsMetrics()) {
|
||||
CurOp::get(opCtx)->debug().queryStatsInfo.metricsRequested = true;
|
||||
CurOp::get(opCtx)->debug().getQueryStatsInfo().metricsRequested = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -577,7 +577,7 @@ public:
|
|||
timeseries::requiresViewlessTimeseriesTranslation(opCtx, *collectionOrView)) {
|
||||
// Relinquish locks. The aggregation command will re-acquire them.
|
||||
collectionOrView.reset();
|
||||
CurOp::get(opCtx)->debug().queryStatsInfo.disableForSubqueryExecution = true;
|
||||
CurOp::get(opCtx)->debug().getQueryStatsInfo().disableForSubqueryExecution = true;
|
||||
return runFindAsAgg(opCtx, *cq, verbosity, replyBuilder);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -687,7 +687,7 @@ public:
|
|||
|
||||
// Collect and increment metrics now that we have enough information. It's important
|
||||
// we do so before generating the response so that the response can include metrics.
|
||||
curOp->debug().additiveMetrics.nBatches = 1;
|
||||
curOp->debug().getAdditiveMetrics().nBatches = 1;
|
||||
curOp->setEndOfOpMetrics(numResults);
|
||||
collectQueryStatsMongod(opCtx, cursorPin);
|
||||
|
||||
|
|
@ -802,7 +802,7 @@ public:
|
|||
}
|
||||
|
||||
if (_cmd.getIncludeQueryStatsMetrics()) {
|
||||
curOp->debug().queryStatsInfo.metricsRequested = true;
|
||||
curOp->debug().getQueryStatsInfo().metricsRequested = true;
|
||||
}
|
||||
|
||||
ClientCursorPin cursorPin = pinCursorWithRetry(opCtx, cursorId, nss);
|
||||
|
|
|
|||
|
|
@ -279,7 +279,7 @@ void collectQueryStats(const AggExState& aggExState,
|
|||
if (maybePinnedCursor) {
|
||||
collectQueryStatsMongod(opCtx, *maybePinnedCursor);
|
||||
} else {
|
||||
collectQueryStatsMongod(opCtx, expCtx, std::move(curOp->debug().queryStatsInfo.key));
|
||||
collectQueryStatsMongod(opCtx, expCtx, std::move(curOp->debug().getQueryStatsInfo().key));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -813,7 +813,7 @@ void executeExplain(const AggExState& aggExState,
|
|||
collectQueryStatsMongod(
|
||||
aggExState.getOpCtx(),
|
||||
expCtx,
|
||||
std::move(CurOp::get(aggExState.getOpCtx())->debug().queryStatsInfo.key));
|
||||
std::move(CurOp::get(aggExState.getOpCtx())->debug().getQueryStatsInfo().key));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -935,7 +935,7 @@ std::unique_ptr<Pipeline> parsePipelineAndRegisterQueryStats(
|
|||
aggExState.hasChangeStream());
|
||||
|
||||
if (aggExState.getRequest().getIncludeQueryStatsMetrics()) {
|
||||
CurOp::get(aggExState.getOpCtx())->debug().queryStatsInfo.metricsRequested = true;
|
||||
CurOp::get(aggExState.getOpCtx())->debug().getQueryStatsInfo().metricsRequested = true;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -462,7 +462,7 @@ void CurOp::_fetchStorageStatsIfNecessary(Date_t deadline, bool isFinal) {
|
|||
}
|
||||
|
||||
void CurOp::setEndOfOpMetrics(long long nreturned) {
|
||||
_debug.additiveMetrics.nreturned = nreturned;
|
||||
_debug.getAdditiveMetrics().nreturned = nreturned;
|
||||
// A non-none queryStatsInfo.keyHash indicates the current query is being tracked locally for
|
||||
// queryStats, and a metricsRequested being true indicates the query is being tracked remotely
|
||||
// via the metrics included in cursor responses. In either case, we need to track the current
|
||||
|
|
@ -472,9 +472,9 @@ void CurOp::setEndOfOpMetrics(long long nreturned) {
|
|||
// for query stats collection we want it set before incrementing cursor metrics using OpDebug's
|
||||
// AdditiveMetrics. The value of executionTime set here will be overwritten later in
|
||||
// completeAndLogOperation.
|
||||
const auto& info = _debug.queryStatsInfo;
|
||||
const auto& info = _debug.getQueryStatsInfo();
|
||||
if (info.keyHash || info.metricsRequested) {
|
||||
auto& metrics = _debug.additiveMetrics;
|
||||
auto& metrics = _debug.getAdditiveMetrics();
|
||||
auto elapsed = elapsedTimeExcludingPauses();
|
||||
// We don't strictly need to record executionTime unless keyHash is non-none, but there's
|
||||
// no harm in recording it since we've already computed the value.
|
||||
|
|
@ -719,9 +719,9 @@ bool CurOp::shouldCurOpStackOmitDiagnosticInformation(CurOp* curop) {
|
|||
}
|
||||
|
||||
void CurOp::_updateExecutionTimers() {
|
||||
_debug.additiveMetrics.executionTime = elapsedTimeExcludingPauses();
|
||||
_debug.getAdditiveMetrics().executionTime = elapsedTimeExcludingPauses();
|
||||
|
||||
auto workingMillis = duration_cast<Milliseconds>(*_debug.additiveMetrics.executionTime) -
|
||||
auto workingMillis = duration_cast<Milliseconds>(*_debug.getAdditiveMetrics().executionTime) -
|
||||
(_sumBlockedTimeTotal() - _blockedTimeAtStart);
|
||||
// Round up to zero if necessary to allow precision errors from FastClockSource used by flow
|
||||
// control ticketholder.
|
||||
|
|
@ -834,7 +834,7 @@ bool CurOp::completeAndLogOperation(const logv2::LogOptions& logOptions,
|
|||
|
||||
if (_debug.isReplOplogGetMore) {
|
||||
oplogGetMoreStats.recordMillis(
|
||||
durationCount<Milliseconds>(*_debug.additiveMetrics.executionTime));
|
||||
durationCount<Milliseconds>(*_debug.getAdditiveMetrics().executionTime));
|
||||
}
|
||||
const auto [shouldProfileAtLevel1, shouldLogSlowOp] = _shouldProfileAtLevel1AndLogSlowQuery(
|
||||
logOptions,
|
||||
|
|
|
|||
|
|
@ -112,7 +112,7 @@ struct InShard : InBoth {
|
|||
InBoth::record(opCtx);
|
||||
auto* curOp = CurOp::get(opCtx);
|
||||
auto& debug = curOp->debug();
|
||||
auto& am = debug.additiveMetrics;
|
||||
auto& am = debug.getAdditiveMetrics();
|
||||
incrCounter(deleted, am.ndeleted);
|
||||
incrCounter(inserted, am.ninserted);
|
||||
incrCounter(returned, am.nreturned);
|
||||
|
|
|
|||
|
|
@ -322,8 +322,8 @@ public:
|
|||
|
||||
auto&& opDebug = CurOp::get(opCtx)->debug();
|
||||
opDebug.nShards = ccc->getNumRemotes();
|
||||
opDebug.additiveMetrics.nBatches = 1;
|
||||
opDebug.additiveMetrics.nreturned = firstBatch.size();
|
||||
opDebug.getAdditiveMetrics().nBatches = 1;
|
||||
opDebug.getAdditiveMetrics().nreturned = firstBatch.size();
|
||||
|
||||
if (cursorState == ClusterCursorManager::CursorState::Exhausted) {
|
||||
opDebug.cursorExhausted = true;
|
||||
|
|
|
|||
|
|
@ -852,8 +852,8 @@ CursorInitialReply createInitialCursorReplyMongod(OperationContext* opCtx,
|
|||
}
|
||||
|
||||
auto&& opDebug = CurOp::get(opCtx)->debug();
|
||||
opDebug.additiveMetrics.nBatches = 1;
|
||||
opDebug.additiveMetrics.nreturned = firstBatch.size();
|
||||
opDebug.getAdditiveMetrics().nBatches = 1;
|
||||
opDebug.getAdditiveMetrics().nreturned = firstBatch.size();
|
||||
|
||||
if (exec->isEOF()) {
|
||||
opDebug.cursorExhausted = true;
|
||||
|
|
|
|||
|
|
@ -307,6 +307,7 @@ void OpDebug::report(OperationContext* opCtx,
|
|||
|
||||
OPDEBUG_TOATTR_HELP_BOOL(exhaust);
|
||||
|
||||
const AdditiveMetrics& additiveMetrics = getAdditiveMetrics();
|
||||
OPDEBUG_TOATTR_HELP_OPTIONAL("keysExamined", additiveMetrics.keysExamined);
|
||||
OPDEBUG_TOATTR_HELP_OPTIONAL("docsExamined", additiveMetrics.docsExamined);
|
||||
|
||||
|
|
@ -590,6 +591,7 @@ void OpDebug::append(OperationContext* opCtx,
|
|||
|
||||
OPDEBUG_APPEND_BOOL(b, exhaust);
|
||||
|
||||
const AdditiveMetrics& additiveMetrics = getAdditiveMetrics();
|
||||
OPDEBUG_APPEND_OPTIONAL(b, "keysExamined", additiveMetrics.keysExamined);
|
||||
OPDEBUG_APPEND_OPTIONAL(b, "docsExamined", additiveMetrics.docsExamined);
|
||||
|
||||
|
|
@ -919,22 +921,22 @@ std::function<BSONObj(ProfileFilter::Args)> OpDebug::appendStaged(OperationConte
|
|||
});
|
||||
|
||||
addIfNeeded("keysExamined", [](auto field, auto args, auto& b) {
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.keysExamined);
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.getAdditiveMetrics().keysExamined);
|
||||
});
|
||||
addIfNeeded("docsExamined", [](auto field, auto args, auto& b) {
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.docsExamined);
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.getAdditiveMetrics().docsExamined);
|
||||
});
|
||||
addIfNeeded("hasSortStage", [](auto field, auto args, auto& b) {
|
||||
OPDEBUG_APPEND_BOOL2(b, field, args.op.additiveMetrics.hasSortStage);
|
||||
OPDEBUG_APPEND_BOOL2(b, field, args.op.getAdditiveMetrics().hasSortStage);
|
||||
});
|
||||
addIfNeeded("usedDisk", [](auto field, auto args, auto& b) {
|
||||
OPDEBUG_APPEND_BOOL2(b, field, args.op.additiveMetrics.usedDisk);
|
||||
OPDEBUG_APPEND_BOOL2(b, field, args.op.getAdditiveMetrics().usedDisk);
|
||||
});
|
||||
addIfNeeded("fromMultiPlanner", [](auto field, auto args, auto& b) {
|
||||
OPDEBUG_APPEND_BOOL2(b, field, args.op.additiveMetrics.fromMultiPlanner);
|
||||
OPDEBUG_APPEND_BOOL2(b, field, args.op.getAdditiveMetrics().fromMultiPlanner);
|
||||
});
|
||||
addIfNeeded("fromPlanCache", [](auto field, auto args, auto& b) {
|
||||
OPDEBUG_APPEND_BOOL2(b, field, args.op.additiveMetrics.fromPlanCache.value_or(false));
|
||||
OPDEBUG_APPEND_BOOL2(b, field, args.op.getAdditiveMetrics().fromPlanCache.value_or(false));
|
||||
});
|
||||
addIfNeeded("replanned", [](auto field, auto args, auto& b) {
|
||||
if (args.op.replanReason) {
|
||||
|
|
@ -947,32 +949,32 @@ std::function<BSONObj(ProfileFilter::Args)> OpDebug::appendStaged(OperationConte
|
|||
}
|
||||
});
|
||||
addIfNeeded("nMatched", [](auto field, auto args, auto& b) {
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.nMatched);
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.getAdditiveMetrics().nMatched);
|
||||
});
|
||||
addIfNeeded("nBatches", [](auto field, auto args, auto& b) {
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.nBatches);
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.getAdditiveMetrics().nBatches);
|
||||
});
|
||||
addIfNeeded("nModified", [](auto field, auto args, auto& b) {
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.nModified);
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.getAdditiveMetrics().nModified);
|
||||
});
|
||||
addIfNeeded("ninserted", [](auto field, auto args, auto& b) {
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.ninserted);
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.getAdditiveMetrics().ninserted);
|
||||
});
|
||||
addIfNeeded("ndeleted", [](auto field, auto args, auto& b) {
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.ndeleted);
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.getAdditiveMetrics().ndeleted);
|
||||
});
|
||||
addIfNeeded("nUpserted", [](auto field, auto args, auto& b) {
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.nUpserted);
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.getAdditiveMetrics().nUpserted);
|
||||
});
|
||||
addIfNeeded("cursorExhausted", [](auto field, auto args, auto& b) {
|
||||
OPDEBUG_APPEND_BOOL2(b, field, args.op.cursorExhausted);
|
||||
});
|
||||
|
||||
addIfNeeded("keysInserted", [](auto field, auto args, auto& b) {
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.keysInserted);
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.getAdditiveMetrics().keysInserted);
|
||||
});
|
||||
addIfNeeded("keysDeleted", [](auto field, auto args, auto& b) {
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.keysDeleted);
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.getAdditiveMetrics().keysDeleted);
|
||||
});
|
||||
|
||||
addIfNeeded("prepareReadConflicts", [](auto field, auto args, auto& b) {
|
||||
|
|
@ -1008,7 +1010,7 @@ std::function<BSONObj(ProfileFilter::Args)> OpDebug::appendStaged(OperationConte
|
|||
b.appendNumber(field, args.curop.numYields());
|
||||
});
|
||||
addIfNeeded("nreturned", [](auto field, auto args, auto& b) {
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.additiveMetrics.nreturned);
|
||||
OPDEBUG_APPEND_OPTIONAL(b, field, args.op.getAdditiveMetrics().nreturned);
|
||||
});
|
||||
|
||||
addIfNeeded("planCacheShapeHash", [](auto field, auto args, auto& b) {
|
||||
|
|
@ -1150,9 +1152,10 @@ std::function<BSONObj(ProfileFilter::Args)> OpDebug::appendStaged(OperationConte
|
|||
});
|
||||
|
||||
addIfNeeded("workingMillis", [](auto field, auto args, auto& b) {
|
||||
b.appendNumber(field,
|
||||
b.appendNumber(
|
||||
field,
|
||||
durationCount<Milliseconds>(
|
||||
args.op.additiveMetrics.clusterWorkingTime.value_or(Milliseconds{0})));
|
||||
args.op.getAdditiveMetrics().clusterWorkingTime.value_or(Milliseconds{0})));
|
||||
});
|
||||
|
||||
addIfNeeded("planSummary", [](auto field, auto args, auto& b) {
|
||||
|
|
@ -1211,6 +1214,7 @@ void OpDebug::setPlanSummaryMetrics(PlanSummaryStats&& planSummaryStats) {
|
|||
// Data-bearing node metrics need to be aggregated here rather than just assigned.
|
||||
// Certain operations like $mergeCursors may have already accumulated metrics from remote
|
||||
// data-bearing nodes, and we need to add in the work done locally.
|
||||
AdditiveMetrics& additiveMetrics = getAdditiveMetrics();
|
||||
additiveMetrics.keysExamined =
|
||||
additiveMetrics.keysExamined.value_or(0) + planSummaryStats.totalKeysExamined;
|
||||
additiveMetrics.docsExamined =
|
||||
|
|
@ -1315,6 +1319,7 @@ static void appendResolvedViewsInfoImpl(
|
|||
CursorMetrics OpDebug::getCursorMetrics() const {
|
||||
CursorMetrics metrics;
|
||||
|
||||
const AdditiveMetrics& additiveMetrics = getAdditiveMetrics();
|
||||
metrics.setKeysExamined(additiveMetrics.keysExamined.value_or(0));
|
||||
metrics.setDocsExamined(additiveMetrics.docsExamined.value_or(0));
|
||||
metrics.setBytesRead(additiveMetrics.bytesRead.value_or(0));
|
||||
|
|
|
|||
|
|
@ -460,7 +460,15 @@ public:
|
|||
bool metricsRequested = false;
|
||||
};
|
||||
|
||||
MONGO_MOD_PRIVATE QueryStatsInfo queryStatsInfo;
|
||||
// Return the QueryStatsInfo for the current operation.
|
||||
MONGO_MOD_PRIVATE QueryStatsInfo& getQueryStatsInfo() {
|
||||
return _queryStatsInfo;
|
||||
}
|
||||
|
||||
// Return the QueryStatsInfo for the current operation (const friendly).
|
||||
MONGO_MOD_PRIVATE const QueryStatsInfo& getQueryStatsInfo() const {
|
||||
return _queryStatsInfo;
|
||||
}
|
||||
|
||||
// The query framework that this operation used. Will be unknown for non query operations.
|
||||
PlanExecutor::QueryFramework queryFramework{PlanExecutor::QueryFramework::kUnknown};
|
||||
|
|
@ -541,10 +549,17 @@ public:
|
|||
// Used to track the amount of time spent waiting for a response from remote operations.
|
||||
boost::optional<Microseconds> remoteOpWaitTime;
|
||||
|
||||
// Stores the current operation's count of these metrics. If they are needed to be accumulated
|
||||
// Returns the current operation's count of these metrics. If they are needed to be accumulated
|
||||
// elsewhere, they should be extracted by another aggregator (like the ClientCursor) to ensure
|
||||
// these only ever reflect just this CurOp's consumption.
|
||||
AdditiveMetrics additiveMetrics;
|
||||
AdditiveMetrics& getAdditiveMetrics() {
|
||||
return _additiveMetrics;
|
||||
}
|
||||
|
||||
// Const version of the above method.
|
||||
const AdditiveMetrics& getAdditiveMetrics() const {
|
||||
return _additiveMetrics;
|
||||
}
|
||||
|
||||
// Stores storage statistics.
|
||||
std::unique_ptr<StorageStats> storageStats;
|
||||
|
|
@ -579,6 +594,14 @@ public:
|
|||
extension::host::OperationMetricsRegistry extensionMetrics;
|
||||
|
||||
private:
|
||||
// QueryStatsInfo for the current operation, accessible via accessor methods defined above.
|
||||
QueryStatsInfo _queryStatsInfo;
|
||||
|
||||
// Stores the current operation's count of these metrics. If they are needed to be accumulated
|
||||
// elsewhere, they should be extracted by another aggregator (like the ClientCursor) to ensure
|
||||
// these only ever reflect just this CurOp's consumption.
|
||||
AdditiveMetrics _additiveMetrics;
|
||||
|
||||
// The hash of query_shape::QueryShapeHash.
|
||||
boost::optional<query_shape::QueryShapeHash> _queryShapeHash;
|
||||
|
||||
|
|
|
|||
|
|
@ -179,12 +179,12 @@ TEST_F(ProfileFilterTest, FilterDependsOnEnabledFeatureFlag) {
|
|||
ASSERT_TRUE(profileFilter.dependsOn("nreturned"));
|
||||
|
||||
// '$_testFeatureFlagLatest' will always return 1. If 'nreturned' is 2, the filter should match.
|
||||
opDebug->additiveMetrics.nreturned = 2;
|
||||
opDebug->getAdditiveMetrics().nreturned = 2;
|
||||
ASSERT_TRUE(profileFilter.matches(opCtx, *opDebug, *curop));
|
||||
|
||||
// '$_testFeatureFlagLatest' will always return 1. If 'nreturned' is 0.1, the filter should not
|
||||
// match.
|
||||
opDebug->additiveMetrics.nreturned = 0.1;
|
||||
opDebug->getAdditiveMetrics().nreturned = 0.1;
|
||||
ASSERT_FALSE(profileFilter.matches(opCtx, *opDebug, *curop));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -127,10 +127,10 @@ ClientCursor::ClientCursor(ClientCursorParams params,
|
|||
_planCacheKey(CurOp::get(operationUsingCursor)->debug().planCacheKey),
|
||||
_planCacheShapeHash(CurOp::get(operationUsingCursor)->debug().planCacheShapeHash),
|
||||
_queryShapeHash(CurOp::get(operationUsingCursor)->debug().getQueryShapeHash()),
|
||||
_queryStatsKeyHash(CurOp::get(operationUsingCursor)->debug().queryStatsInfo.keyHash),
|
||||
_queryStatsKey(std::move(CurOp::get(operationUsingCursor)->debug().queryStatsInfo.key)),
|
||||
_queryStatsKeyHash(CurOp::get(operationUsingCursor)->debug().getQueryStatsInfo().keyHash),
|
||||
_queryStatsKey(std::move(CurOp::get(operationUsingCursor)->debug().getQueryStatsInfo().key)),
|
||||
_queryStatsWillNeverExhaust(
|
||||
CurOp::get(operationUsingCursor)->debug().queryStatsInfo.willNeverExhaust),
|
||||
CurOp::get(operationUsingCursor)->debug().getQueryStatsInfo().willNeverExhaust),
|
||||
_isChangeStreamQuery(CurOp::get(operationUsingCursor)->debug().isChangeStreamQuery),
|
||||
_shouldOmitDiagnosticInformation(
|
||||
CurOp::get(operationUsingCursor)->getShouldOmitDiagnosticInformation()),
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@
|
|||
namespace mongo {
|
||||
|
||||
void collectQueryStatsMongod(OperationContext* opCtx, ClientCursorPin& pinnedCursor) {
|
||||
pinnedCursor->incrementCursorMetrics(CurOp::get(opCtx)->debug().additiveMetrics);
|
||||
pinnedCursor->incrementCursorMetrics(CurOp::get(opCtx)->debug().getAdditiveMetrics());
|
||||
|
||||
// For a change stream query, we want to collect and update query stats on the initial query and
|
||||
// for every getMore.
|
||||
|
|
@ -44,11 +44,11 @@ void collectQueryStatsMongod(OperationContext* opCtx, ClientCursorPin& pinnedCur
|
|||
|
||||
auto snapshot = query_stats::captureMetrics(
|
||||
opCtx,
|
||||
query_stats::microsecondsToUint64(opDebug.additiveMetrics.executionTime),
|
||||
opDebug.additiveMetrics);
|
||||
query_stats::microsecondsToUint64(opDebug.getAdditiveMetrics().executionTime),
|
||||
opDebug.getAdditiveMetrics());
|
||||
|
||||
query_stats::writeQueryStats(opCtx,
|
||||
opDebug.queryStatsInfo.keyHash,
|
||||
opDebug.getQueryStatsInfo().keyHash,
|
||||
pinnedCursor->takeKey(),
|
||||
snapshot,
|
||||
{} /* supplementalMetrics */,
|
||||
|
|
@ -65,11 +65,11 @@ void collectQueryStatsMongod(OperationContext* opCtx,
|
|||
|
||||
auto snapshot = query_stats::captureMetrics(
|
||||
opCtx,
|
||||
query_stats::microsecondsToUint64(opDebug.additiveMetrics.executionTime),
|
||||
opDebug.additiveMetrics);
|
||||
query_stats::microsecondsToUint64(opDebug.getAdditiveMetrics().executionTime),
|
||||
opDebug.getAdditiveMetrics());
|
||||
|
||||
query_stats::writeQueryStats(opCtx,
|
||||
opDebug.queryStatsInfo.keyHash,
|
||||
opDebug.getQueryStatsInfo().keyHash,
|
||||
std::move(key),
|
||||
snapshot,
|
||||
query_stats::computeSupplementalQueryStatsMetrics(opDebug));
|
||||
|
|
|
|||
|
|
@ -238,8 +238,8 @@ StatusWith<ClientCursorPin> CursorManager::pinCursor(
|
|||
CurOp::get(opCtx)->debug().setQueryShapeHash(opCtx, cursor->_queryShapeHash);
|
||||
|
||||
// Pass along queryStats context so it is retrievable after query execution for storing metrics.
|
||||
CurOp::get(opCtx)->debug().queryStatsInfo.keyHash = cursor->_queryStatsKeyHash;
|
||||
CurOp::get(opCtx)->debug().queryStatsInfo.willNeverExhaust =
|
||||
CurOp::get(opCtx)->debug().getQueryStatsInfo().keyHash = cursor->_queryStatsKeyHash;
|
||||
CurOp::get(opCtx)->debug().getQueryStatsInfo().willNeverExhaust =
|
||||
cursor->_queryStatsWillNeverExhaust;
|
||||
// Pass along 'isChangeStreamQuery' for serverStatus metrics.
|
||||
CurOp::get(opCtx)->debug().isChangeStreamQuery = cursor->_isChangeStreamQuery;
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ void endQueryOp(OperationContext* opCtx,
|
|||
// are collected within collectQueryStatsMongod.
|
||||
curOp->debug().cursorid = (cursor.has_value() ? cursor->getCursor()->cursorid() : -1);
|
||||
curOp->debug().cursorExhausted = !cursor.has_value();
|
||||
curOp->debug().additiveMetrics.nBatches = 1;
|
||||
curOp->debug().getAdditiveMetrics().nBatches = 1;
|
||||
|
||||
// Fill out CurOp based on explain summary statistics.
|
||||
PlanSummaryStats summaryStats;
|
||||
|
|
@ -116,7 +116,7 @@ void endQueryOp(OperationContext* opCtx,
|
|||
} else {
|
||||
auto* cq = exec.getCanonicalQuery();
|
||||
const auto& expCtx = cq ? cq->getExpCtx() : makeBlankExpressionContext(opCtx, exec.nss());
|
||||
collectQueryStatsMongod(opCtx, expCtx, std::move(curOp->debug().queryStatsInfo.key));
|
||||
collectQueryStatsMongod(opCtx, expCtx, std::move(curOp->debug().getQueryStatsInfo().key));
|
||||
}
|
||||
|
||||
if (curOp->shouldDBProfile()) {
|
||||
|
|
|
|||
|
|
@ -374,7 +374,7 @@ void registerRequest(OperationContext* opCtx,
|
|||
|
||||
auto& opDebug = CurOp::get(opCtx)->debug();
|
||||
|
||||
if (opDebug.queryStatsInfo.disableForSubqueryExecution) {
|
||||
if (opDebug.getQueryStatsInfo().disableForSubqueryExecution) {
|
||||
LOGV2_DEBUG(
|
||||
9219800,
|
||||
4,
|
||||
|
|
@ -383,11 +383,11 @@ void registerRequest(OperationContext* opCtx,
|
|||
}
|
||||
|
||||
if (!shouldCollect(opCtx->getServiceContext())) {
|
||||
opDebug.queryStatsInfo.disableForSubqueryExecution = true;
|
||||
opDebug.getQueryStatsInfo().disableForSubqueryExecution = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (opDebug.queryStatsInfo.key) {
|
||||
if (opDebug.getQueryStatsInfo().key) {
|
||||
// A find() or distinct() request may have already registered the shapifier. Ie, it's
|
||||
// a find or distinct command over a non-physical collection, eg view, which is
|
||||
// implemented by generating an agg pipeline.
|
||||
|
|
@ -398,15 +398,15 @@ void registerRequest(OperationContext* opCtx,
|
|||
return;
|
||||
}
|
||||
|
||||
opDebug.queryStatsInfo.willNeverExhaust = willNeverExhaust;
|
||||
opDebug.getQueryStatsInfo().willNeverExhaust = willNeverExhaust;
|
||||
// There are a few cases where a query shape can be larger than the original query. For example,
|
||||
// {$exists: false} in the input query serializes to {$not: {$exists: true}. In rare cases where
|
||||
// an input query has thousands of clauses, the cumulative bloat that shapification adds results
|
||||
// in a BSON object that exceeds the 16 MB memory limit. In these cases, we want to exclude the
|
||||
// original query from queryStats metrics collection and let it execute normally.
|
||||
try {
|
||||
opDebug.queryStatsInfo.key = makeKey();
|
||||
opDebug.queryStatsInfo.keyHash = absl::HashOf(*opDebug.queryStatsInfo.key);
|
||||
opDebug.getQueryStatsInfo().key = makeKey();
|
||||
opDebug.getQueryStatsInfo().keyHash = absl::HashOf(*opDebug.getQueryStatsInfo().key);
|
||||
if (MONGO_unlikely(queryStatsFailToSerializeKey.shouldFail())) {
|
||||
uasserted(ErrorCodes::FailPointEnabled,
|
||||
"queryStatsFailToSerializeKey fail point is enabled");
|
||||
|
|
@ -463,8 +463,9 @@ bool shouldRequestRemoteMetrics(const OpDebug& opDebug) {
|
|||
// execution. If the keyHash is non-null, then we expect we should forward remote query stats
|
||||
// metrics to a higher level of execution, such as running an aggregation for a view, or there
|
||||
// are multiple cursors open in a single operation context, such as in $search.
|
||||
return opDebug.queryStatsInfo.metricsRequested || opDebug.queryStatsInfo.key != nullptr ||
|
||||
opDebug.queryStatsInfo.keyHash != boost::none;
|
||||
return opDebug.getQueryStatsInfo().metricsRequested ||
|
||||
opDebug.getQueryStatsInfo().key != nullptr ||
|
||||
opDebug.getQueryStatsInfo().keyHash != boost::none;
|
||||
}
|
||||
|
||||
QueryStatsStore& getQueryStatsStore(OperationContext* opCtx) {
|
||||
|
|
|
|||
|
|
@ -296,7 +296,7 @@ TEST_F(QueryStatsStoreTest, GenerateMaxBsonSizeQueryShape) {
|
|||
}
|
||||
})());
|
||||
auto& opDebug = CurOp::get(*opCtx)->debug();
|
||||
ASSERT_EQ(opDebug.queryStatsInfo.keyHash, boost::none);
|
||||
ASSERT_EQ(opDebug.getQueryStatsInfo().keyHash, boost::none);
|
||||
}
|
||||
|
||||
TEST_F(QueryStatsStoreTest, CorrectlyRedactsFindCommandRequestAllFields) {
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ TEST_F(QueryStatsTest, TwoRegisterRequestsWithSameOpCtxRateLimitedFirstCall) {
|
|||
|
||||
RAIIServerParameterControllerForTest controller("featureFlagQueryStats", true);
|
||||
auto& opDebug = CurOp::get(*opCtx)->debug();
|
||||
ASSERT_EQ(opDebug.queryStatsInfo.disableForSubqueryExecution, false);
|
||||
ASSERT_EQ(opDebug.getQueryStatsInfo().disableForSubqueryExecution, false);
|
||||
|
||||
// First call to registerRequest() should be rate limited.
|
||||
auto& limiter = QueryStatsStoreManager::getRateLimiter(opCtx->getServiceContext());
|
||||
|
|
@ -80,8 +80,8 @@ TEST_F(QueryStatsTest, TwoRegisterRequestsWithSameOpCtxRateLimitedFirstCall) {
|
|||
}));
|
||||
|
||||
// Since the query was rate limited, no key should have been created.
|
||||
ASSERT(opDebug.queryStatsInfo.key == nullptr);
|
||||
ASSERT_EQ(opDebug.queryStatsInfo.disableForSubqueryExecution, true);
|
||||
ASSERT(opDebug.getQueryStatsInfo().key == nullptr);
|
||||
ASSERT_EQ(opDebug.getQueryStatsInfo().disableForSubqueryExecution, true);
|
||||
|
||||
// Second call should not be rate limited.
|
||||
QueryStatsStoreManager::getRateLimiter(opCtx->getServiceContext())
|
||||
|
|
@ -96,9 +96,9 @@ TEST_F(QueryStatsTest, TwoRegisterRequestsWithSameOpCtxRateLimitedFirstCall) {
|
|||
}));
|
||||
|
||||
// queryStatsKey should not be created for previously rate limited query.
|
||||
ASSERT(opDebug.queryStatsInfo.key == nullptr);
|
||||
ASSERT_EQ(opDebug.queryStatsInfo.disableForSubqueryExecution, true);
|
||||
ASSERT_FALSE(opDebug.queryStatsInfo.keyHash.has_value());
|
||||
ASSERT(opDebug.getQueryStatsInfo().key == nullptr);
|
||||
ASSERT_EQ(opDebug.getQueryStatsInfo().disableForSubqueryExecution, true);
|
||||
ASSERT_FALSE(opDebug.getQueryStatsInfo().keyHash.has_value());
|
||||
}
|
||||
|
||||
TEST_F(QueryStatsTest, TwoRegisterRequestsWithSameOpCtxDisabledBetween) {
|
||||
|
|
@ -114,8 +114,8 @@ TEST_F(QueryStatsTest, TwoRegisterRequestsWithSameOpCtxDisabledBetween) {
|
|||
auto opCtx = makeOperationContext();
|
||||
|
||||
auto& opDebug = CurOp::get(*opCtx)->debug();
|
||||
ASSERT(opDebug.queryStatsInfo.key == nullptr);
|
||||
ASSERT_FALSE(opDebug.queryStatsInfo.keyHash.has_value());
|
||||
ASSERT(opDebug.getQueryStatsInfo().key == nullptr);
|
||||
ASSERT_FALSE(opDebug.getQueryStatsInfo().keyHash.has_value());
|
||||
QueryStatsStoreManager::get(serviceCtx) =
|
||||
std::make_unique<QueryStatsStoreManager>(16 * 1024 * 1024, 1);
|
||||
|
||||
|
|
@ -134,12 +134,13 @@ TEST_F(QueryStatsTest, TwoRegisterRequestsWithSameOpCtxDisabledBetween) {
|
|||
query_shape::CollectionType::kCollection);
|
||||
}));
|
||||
|
||||
ASSERT(opDebug.queryStatsInfo.key != nullptr);
|
||||
ASSERT(opDebug.queryStatsInfo.keyHash.has_value());
|
||||
ASSERT(opDebug.getQueryStatsInfo().key != nullptr);
|
||||
ASSERT(opDebug.getQueryStatsInfo().keyHash.has_value());
|
||||
|
||||
ASSERT_DOES_NOT_THROW(query_stats::writeQueryStats(opCtx.get(),
|
||||
opDebug.queryStatsInfo.keyHash,
|
||||
std::move(opDebug.queryStatsInfo.key),
|
||||
ASSERT_DOES_NOT_THROW(
|
||||
query_stats::writeQueryStats(opCtx.get(),
|
||||
opDebug.getQueryStatsInfo().keyHash,
|
||||
std::move(opDebug.getQueryStatsInfo().key),
|
||||
QueryStatsSnapshot{}));
|
||||
}
|
||||
|
||||
|
|
@ -164,22 +165,23 @@ TEST_F(QueryStatsTest, TwoRegisterRequestsWithSameOpCtxDisabledBetween) {
|
|||
}));
|
||||
|
||||
// queryStatsKey should not be created since we have a size budget of 0.
|
||||
ASSERT(opDebug.queryStatsInfo.key == nullptr);
|
||||
ASSERT(opDebug.getQueryStatsInfo().key == nullptr);
|
||||
// Query stats are disabled by a lack of space, not by being a on a subquery path.
|
||||
ASSERT_EQ(opDebug.queryStatsInfo.disableForSubqueryExecution, false);
|
||||
ASSERT_EQ(opDebug.getQueryStatsInfo().disableForSubqueryExecution, false);
|
||||
|
||||
// Interestingly, we purposefully leave the hash value around on the OperationContext after
|
||||
// the previous operation finishes. This is because we think it may have value in being
|
||||
// logged in the future, even after query stats have been written. Excepting obscure
|
||||
// internal use-cases, most OperationContexts will die shortly after the query stats are
|
||||
// written, so this isn't expected to be a large issue.
|
||||
ASSERT(opDebug.queryStatsInfo.keyHash.has_value());
|
||||
ASSERT(opDebug.getQueryStatsInfo().keyHash.has_value());
|
||||
|
||||
QueryStatsStoreManager::get(serviceCtx)->resetSize(16 * 1024 * 1024);
|
||||
// SERVER-84730 this assertion used to throw since there is no key, but there is a hash.
|
||||
ASSERT_DOES_NOT_THROW(query_stats::writeQueryStats(opCtx.get(),
|
||||
opDebug.queryStatsInfo.keyHash,
|
||||
std::move(opDebug.queryStatsInfo.key),
|
||||
ASSERT_DOES_NOT_THROW(
|
||||
query_stats::writeQueryStats(opCtx.get(),
|
||||
opDebug.getQueryStatsInfo().keyHash,
|
||||
std::move(opDebug.getQueryStatsInfo().key),
|
||||
QueryStatsSnapshot{}));
|
||||
}
|
||||
}
|
||||
|
|
@ -203,7 +205,7 @@ TEST_F(QueryStatsTest, RegisterRequestAbsorbsErrors) {
|
|||
|
||||
// Skip this check for debug builds because errors are always fatal in that environment.
|
||||
if (!kDebugBuild) {
|
||||
opDebug.queryStatsInfo = OpDebug::QueryStatsInfo{};
|
||||
opDebug.getQueryStatsInfo() = OpDebug::QueryStatsInfo{};
|
||||
ASSERT_DOES_NOT_THROW(query_stats::registerRequest(opCtx.get(), nss, [&]() {
|
||||
uasserted(ErrorCodes::BadValue, "fake error");
|
||||
return nullptr;
|
||||
|
|
@ -214,14 +216,14 @@ TEST_F(QueryStatsTest, RegisterRequestAbsorbsErrors) {
|
|||
internalQueryStatsErrorsAreCommandFatal.store(true);
|
||||
|
||||
// We shouldn't propagate 'BSONObjectTooLarge' errors under any circumstances.
|
||||
opDebug.queryStatsInfo = OpDebug::QueryStatsInfo{};
|
||||
opDebug.getQueryStatsInfo() = OpDebug::QueryStatsInfo{};
|
||||
ASSERT_DOES_NOT_THROW(query_stats::registerRequest(opCtx.get(), nss, [&]() {
|
||||
uasserted(ErrorCodes::BSONObjectTooLarge, "size error");
|
||||
return nullptr;
|
||||
}));
|
||||
|
||||
// This should hit our tripwire assertion.
|
||||
opDebug.queryStatsInfo = OpDebug::QueryStatsInfo{};
|
||||
opDebug.getQueryStatsInfo() = OpDebug::QueryStatsInfo{};
|
||||
ASSERT_THROWS_CODE(query_stats::registerRequest(opCtx.get(),
|
||||
nss,
|
||||
[&]() {
|
||||
|
|
|
|||
|
|
@ -233,7 +233,7 @@ void finishCurOp(OperationContext* opCtx, CurOp* curOp) {
|
|||
try {
|
||||
curOp->done();
|
||||
auto executionTimeMicros = curOp->elapsedTimeExcludingPauses();
|
||||
curOp->debug().additiveMetrics.executionTime = executionTimeMicros;
|
||||
curOp->debug().getAdditiveMetrics().executionTime = executionTimeMicros;
|
||||
|
||||
recordCurOpMetrics(opCtx);
|
||||
Top::getDecoration(opCtx).record(opCtx,
|
||||
|
|
@ -689,7 +689,7 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx,
|
|||
if (source != OperationSource::kTimeseriesInsert) {
|
||||
ServerWriteConcernMetrics::get(opCtx)->recordWriteConcernForInserts(
|
||||
opCtx->getWriteConcern(), batch.size());
|
||||
curOp.debug().additiveMetrics.incrementNinserted(batch.size());
|
||||
curOp.debug().getAdditiveMetrics().incrementNinserted(batch.size());
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
@ -724,7 +724,7 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx,
|
|||
result.setN(1);
|
||||
out->results.emplace_back(std::move(result));
|
||||
if (source != OperationSource::kTimeseriesInsert) {
|
||||
curOp.debug().additiveMetrics.incrementNinserted(1);
|
||||
curOp.debug().getAdditiveMetrics().incrementNinserted(1);
|
||||
}
|
||||
} catch (...) {
|
||||
// Release the lock following any error if we are not in multi-statement
|
||||
|
|
@ -902,7 +902,7 @@ UpdateResult performUpdate(OperationContext* opCtx,
|
|||
|
||||
if (updateRequest->shouldReturnAnyDocs()) {
|
||||
docFound = exec->executeFindAndModify();
|
||||
curOp->debug().additiveMetrics.nreturned = docFound ? 1 : 0;
|
||||
curOp->debug().getAdditiveMetrics().nreturned = docFound ? 1 : 0;
|
||||
} else {
|
||||
// The 'UpdateResult' object will be obtained later, so discard the return value.
|
||||
(void)exec->executeUpdate();
|
||||
|
|
@ -1036,7 +1036,7 @@ long long performDelete(OperationContext* opCtx,
|
|||
|
||||
if (deleteRequest->getReturnDeleted()) {
|
||||
docFound = exec->executeFindAndModify();
|
||||
curOp->debug().additiveMetrics.nreturned = docFound ? 1 : 0;
|
||||
curOp->debug().getAdditiveMetrics().nreturned = docFound ? 1 : 0;
|
||||
} else {
|
||||
// The number of deleted documents will be obtained from the plan executor later, so discard
|
||||
// the return value.
|
||||
|
|
@ -1059,7 +1059,7 @@ long long performDelete(OperationContext* opCtx,
|
|||
|
||||
// Fill out OpDebug with the number of deleted docs.
|
||||
auto nDeleted = exec->getDeleteResult();
|
||||
curOp->debug().additiveMetrics.ndeleted = nDeleted;
|
||||
curOp->debug().getAdditiveMetrics().ndeleted = nDeleted;
|
||||
|
||||
if (curOp->shouldDBProfile()) {
|
||||
auto&& explainer = exec->getPlanExplainer();
|
||||
|
|
@ -1233,7 +1233,7 @@ WriteResult performInserts(
|
|||
curOp.setLogicalOp(lk, LogicalOp::opInsert);
|
||||
curOp.ensureStarted();
|
||||
// Initialize 'ninserted' for the operation if is not yet.
|
||||
curOp.debug().additiveMetrics.incrementNinserted(0);
|
||||
curOp.debug().getAdditiveMetrics().incrementNinserted(0);
|
||||
}
|
||||
|
||||
uassertStatusOK(userAllowedWriteNS(opCtx, actualNs));
|
||||
|
|
@ -1417,7 +1417,7 @@ static SingleWriteResult performSingleUpdateOpNoRetry(OperationContext* opCtx,
|
|||
// Collect query stats for the update operation if a QueryStats key was generated during
|
||||
// registration. This ensures that we minimize the overhead of query stats collection for
|
||||
// updates even if it does not have query stats enabled.
|
||||
auto key = std::move(curOp.debug().queryStatsInfo.key);
|
||||
auto key = std::move(curOp.debug().getQueryStatsInfo().key);
|
||||
if (key) {
|
||||
curOp.setEndOfOpMetrics(0 /* no documents returned */);
|
||||
collectQueryStatsMongod(opCtx, canonicalUpdate.expCtx(), std::move(key));
|
||||
|
|
@ -1945,7 +1945,7 @@ WriteResult performUpdates(
|
|||
curOp.emplace(cmd);
|
||||
curOp->push(opCtx);
|
||||
if (singleOp.getIncludeQueryStatsMetrics()) {
|
||||
curOp->debug().queryStatsInfo.metricsRequested = true;
|
||||
curOp->debug().getQueryStatsInfo().metricsRequested = true;
|
||||
}
|
||||
}
|
||||
ON_BLOCK_EXIT([&] {
|
||||
|
|
@ -1953,7 +1953,8 @@ WriteResult performUpdates(
|
|||
finishCurOp(opCtx, &*curOp);
|
||||
// The last SingleWriteResult will be for the operation we just executed. If it
|
||||
// succeeded, and metrics were requested, set them now.
|
||||
if (curOp->debug().queryStatsInfo.metricsRequested && out.results.back().isOK()) {
|
||||
if (curOp->debug().getQueryStatsInfo().metricsRequested &&
|
||||
out.results.back().isOK()) {
|
||||
out.results.back().getValue().setQueryStatsMetrics(
|
||||
curOp->debug().getCursorMetrics());
|
||||
}
|
||||
|
|
@ -2162,7 +2163,7 @@ static SingleWriteResult performSingleDeleteOp(
|
|||
}
|
||||
|
||||
auto nDeleted = exec->executeDelete();
|
||||
curOp.debug().additiveMetrics.ndeleted = nDeleted;
|
||||
curOp.debug().getAdditiveMetrics().ndeleted = nDeleted;
|
||||
|
||||
PlanSummaryStats summary;
|
||||
auto&& explainer = exec->getPlanExplainer();
|
||||
|
|
@ -2321,9 +2322,10 @@ WriteResult performDeletes(
|
|||
|
||||
void recordUpdateResultInOpDebug(const UpdateResult& updateResult, OpDebug* opDebug) {
|
||||
tassert(11052015, "Expected non-null OpDebug pointer", opDebug);
|
||||
opDebug->additiveMetrics.nMatched = updateResult.numMatched;
|
||||
opDebug->additiveMetrics.nModified = updateResult.numDocsModified;
|
||||
opDebug->additiveMetrics.nUpserted = static_cast<long long>(!updateResult.upsertedId.isEmpty());
|
||||
opDebug->getAdditiveMetrics().nMatched = updateResult.numMatched;
|
||||
opDebug->getAdditiveMetrics().nModified = updateResult.numDocsModified;
|
||||
opDebug->getAdditiveMetrics().nUpserted =
|
||||
static_cast<long long>(!updateResult.upsertedId.isEmpty());
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
|
|
|||
|
|
@ -923,7 +923,7 @@ bool appendEmptyResultSet(OperationContext* opCtx,
|
|||
const NamespaceString& nss) {
|
||||
invariant(!status.isOK());
|
||||
|
||||
CurOp::get(opCtx)->debug().additiveMetrics.nreturned = 0;
|
||||
CurOp::get(opCtx)->debug().getAdditiveMetrics().nreturned = 0;
|
||||
CurOp::get(opCtx)->debug().nShards = 0;
|
||||
|
||||
if (status == ErrorCodes::NamespaceNotFound) {
|
||||
|
|
|
|||
|
|
@ -823,8 +823,8 @@ TEST_F(CatalogTestFixture, CappedDeleteRecord) {
|
|||
auto globalDeletesAfterInsert = serviceOpCounters(ClusterRole::ShardServer).getDelete()->load();
|
||||
ASSERT_EQUALS(globalDeletesAfterInsert, globalDeletesInitial + 1);
|
||||
|
||||
ASSERT_EQUALS(1, opDebug.additiveMetrics.keysDeleted.get_value_or(-1));
|
||||
ASSERT_EQUALS(1, opDebug.additiveMetrics.ndeleted.get_value_or(-1));
|
||||
ASSERT_EQUALS(1, opDebug.getAdditiveMetrics().keysDeleted.get_value_or(-1));
|
||||
ASSERT_EQUALS(1, opDebug.getAdditiveMetrics().ndeleted.get_value_or(-1));
|
||||
|
||||
ASSERT_EQUALS(1, coll->numRecords(operationContext()));
|
||||
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ mongo::write_ops::InsertCommandReply performTimeseriesWrites(
|
|||
curOp.setLogicalOp(lk, LogicalOp::opInsert);
|
||||
curOp.ensureStarted();
|
||||
// Initialize 'ninserted' for the operation if is not yet.
|
||||
curOp.debug().additiveMetrics.incrementNinserted(0);
|
||||
curOp.debug().getAdditiveMetrics().incrementNinserted(0);
|
||||
}
|
||||
|
||||
return performTimeseriesWrites(opCtx, request, preConditions, &curOp);
|
||||
|
|
@ -128,7 +128,7 @@ mongo::write_ops::InsertCommandReply performTimeseriesWrites(
|
|||
RetryableWritesStats::get(opCtx)->incrementRetriedCommandsCount();
|
||||
}
|
||||
|
||||
curOp->debug().additiveMetrics.ninserted = baseReply.getN();
|
||||
curOp->debug().getAdditiveMetrics().ninserted = baseReply.getN();
|
||||
serviceOpCounters(opCtx).gotInserts(baseReply.getN());
|
||||
ServerWriteConcernMetrics::get(opCtx)->recordWriteConcernForInserts(opCtx->getWriteConcern(),
|
||||
baseReply.getN());
|
||||
|
|
|
|||
|
|
@ -192,7 +192,7 @@ void TransactionMetricsObserver::onTransactionOperation(
|
|||
bool isPrepared) {
|
||||
// Add the latest operation stats to the aggregate OpDebug::AdditiveMetrics and StorageMetrics
|
||||
// objects stored in the SingleTransactionStats instance on the TransactionMetricsObserver.
|
||||
_singleTransactionStats.getOpDebug()->additiveMetrics.add(additiveMetrics);
|
||||
_singleTransactionStats.getOpDebug()->getAdditiveMetrics().add(additiveMetrics);
|
||||
_singleTransactionStats.incrementPrepareReadConflicts(prepareReadConflicts);
|
||||
_singleTransactionStats.getTransactionStorageMetrics() += storageMetrics;
|
||||
|
||||
|
|
|
|||
|
|
@ -1634,7 +1634,7 @@ void TransactionParticipant::Participant::_stashActiveTransaction(OperationConte
|
|||
|
||||
auto curop = CurOp::get(opCtx);
|
||||
o(lk).transactionMetricsObserver.onTransactionOperation(opCtx,
|
||||
curop->debug().additiveMetrics,
|
||||
curop->debug().getAdditiveMetrics(),
|
||||
curop->getPrepareReadConflicts(),
|
||||
curop->getOperationStorageMetrics(),
|
||||
o().txnState.isPrepared());
|
||||
|
|
@ -2532,7 +2532,7 @@ void TransactionParticipant::Participant::_finishCommitTransaction(
|
|||
|
||||
auto curop = CurOp::get(opCtx);
|
||||
o(lk).transactionMetricsObserver.onTransactionOperation(opCtx,
|
||||
curop->debug().additiveMetrics,
|
||||
curop->debug().getAdditiveMetrics(),
|
||||
curop->getPrepareReadConflicts(),
|
||||
curop->getOperationStorageMetrics(),
|
||||
o().txnState.isPrepared());
|
||||
|
|
@ -2651,7 +2651,7 @@ void TransactionParticipant::Participant::_abortActiveTransaction(
|
|||
stdx::lock_guard<Client> lk(*opCtx->getClient());
|
||||
auto curop = CurOp::get(opCtx);
|
||||
o(lk).transactionMetricsObserver.onTransactionOperation(opCtx,
|
||||
curop->debug().additiveMetrics,
|
||||
curop->debug().getAdditiveMetrics(),
|
||||
curop->getPrepareReadConflicts(),
|
||||
curop->getOperationStorageMetrics(),
|
||||
o().txnState.isPrepared());
|
||||
|
|
@ -3141,7 +3141,7 @@ void TransactionParticipant::Participant::_transactionInfoForLog(
|
|||
|
||||
attrs.addDeepCopy("readTimestamp", singleTransactionStats.getReadTimestamp().toString());
|
||||
|
||||
singleTransactionStats.getOpDebug()->additiveMetrics.report(&attrs);
|
||||
singleTransactionStats.getOpDebug()->getAdditiveMetrics().report(&attrs);
|
||||
|
||||
const auto& storageMetrics = singleTransactionStats.getTransactionStorageMetrics();
|
||||
attrs.add("writeConflicts", storageMetrics.writeConflicts.loadRelaxed());
|
||||
|
|
|
|||
|
|
@ -3581,25 +3581,36 @@ TEST_F(TransactionsMetricsTest, AdditiveMetricsObjectsShouldBeAddedTogetherUponS
|
|||
auto txnParticipant = TransactionParticipant::get(opCtx());
|
||||
|
||||
// Initialize field values for both AdditiveMetrics objects.
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.keysExamined =
|
||||
txnParticipant.getSingleTransactionStatsForTest()
|
||||
.getOpDebug()
|
||||
->getAdditiveMetrics()
|
||||
.keysExamined = 1;
|
||||
CurOp::get(opCtx())->debug().getAdditiveMetrics().keysExamined = 5;
|
||||
txnParticipant.getSingleTransactionStatsForTest()
|
||||
.getOpDebug()
|
||||
->getAdditiveMetrics()
|
||||
.docsExamined = 2;
|
||||
CurOp::get(opCtx())->debug().getAdditiveMetrics().docsExamined = 0;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->getAdditiveMetrics().nMatched =
|
||||
3;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->getAdditiveMetrics().nModified =
|
||||
1;
|
||||
CurOp::get(opCtx())->debug().additiveMetrics.keysExamined = 5;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.docsExamined =
|
||||
2;
|
||||
CurOp::get(opCtx())->debug().additiveMetrics.docsExamined = 0;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.nMatched = 3;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.nModified = 1;
|
||||
CurOp::get(opCtx())->debug().additiveMetrics.nModified = 1;
|
||||
CurOp::get(opCtx())->debug().additiveMetrics.ninserted = 4;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.keysInserted =
|
||||
1;
|
||||
CurOp::get(opCtx())->debug().additiveMetrics.keysInserted = 1;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.keysDeleted = 0;
|
||||
CurOp::get(opCtx())->debug().additiveMetrics.keysDeleted = 0;
|
||||
CurOp::get(opCtx())->debug().getAdditiveMetrics().nModified = 1;
|
||||
CurOp::get(opCtx())->debug().getAdditiveMetrics().ninserted = 4;
|
||||
txnParticipant.getSingleTransactionStatsForTest()
|
||||
.getOpDebug()
|
||||
->getAdditiveMetrics()
|
||||
.keysInserted = 1;
|
||||
CurOp::get(opCtx())->debug().getAdditiveMetrics().keysInserted = 1;
|
||||
txnParticipant.getSingleTransactionStatsForTest()
|
||||
.getOpDebug()
|
||||
->getAdditiveMetrics()
|
||||
.keysDeleted = 0;
|
||||
CurOp::get(opCtx())->debug().getAdditiveMetrics().keysDeleted = 0;
|
||||
|
||||
auto additiveMetricsToCompare =
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics;
|
||||
additiveMetricsToCompare.add(CurOp::get(opCtx())->debug().additiveMetrics);
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->getAdditiveMetrics();
|
||||
additiveMetricsToCompare.add(CurOp::get(opCtx())->debug().getAdditiveMetrics());
|
||||
|
||||
txnParticipant.unstashTransactionResources(opCtx(), "insert");
|
||||
// The transaction machinery cannot store an empty locker.
|
||||
|
|
@ -3608,7 +3619,8 @@ TEST_F(TransactionsMetricsTest, AdditiveMetricsObjectsShouldBeAddedTogetherUponS
|
|||
}
|
||||
txnParticipant.stashTransactionResources(opCtx());
|
||||
|
||||
ASSERT(txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.equals(
|
||||
ASSERT(
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->getAdditiveMetrics().equals(
|
||||
additiveMetricsToCompare));
|
||||
}
|
||||
|
||||
|
|
@ -3617,25 +3629,34 @@ TEST_F(TransactionsMetricsTest, AdditiveMetricsObjectsShouldBeAddedTogetherUponC
|
|||
auto txnParticipant = TransactionParticipant::get(opCtx());
|
||||
|
||||
// Initialize field values for both AdditiveMetrics objects.
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.keysExamined =
|
||||
3;
|
||||
CurOp::get(opCtx())->debug().additiveMetrics.keysExamined = 2;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.docsExamined =
|
||||
0;
|
||||
CurOp::get(opCtx())->debug().additiveMetrics.docsExamined = 2;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.nMatched = 4;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.nModified = 5;
|
||||
CurOp::get(opCtx())->debug().additiveMetrics.nModified = 1;
|
||||
CurOp::get(opCtx())->debug().additiveMetrics.ninserted = 1;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.ndeleted = 4;
|
||||
CurOp::get(opCtx())->debug().additiveMetrics.ndeleted = 0;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.keysInserted =
|
||||
1;
|
||||
CurOp::get(opCtx())->debug().additiveMetrics.keysInserted = 1;
|
||||
txnParticipant.getSingleTransactionStatsForTest()
|
||||
.getOpDebug()
|
||||
->getAdditiveMetrics()
|
||||
.keysExamined = 3;
|
||||
CurOp::get(opCtx())->debug().getAdditiveMetrics().keysExamined = 2;
|
||||
txnParticipant.getSingleTransactionStatsForTest()
|
||||
.getOpDebug()
|
||||
->getAdditiveMetrics()
|
||||
.docsExamined = 0;
|
||||
CurOp::get(opCtx())->debug().getAdditiveMetrics().docsExamined = 2;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->getAdditiveMetrics().nMatched =
|
||||
4;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->getAdditiveMetrics().nModified =
|
||||
5;
|
||||
CurOp::get(opCtx())->debug().getAdditiveMetrics().nModified = 1;
|
||||
CurOp::get(opCtx())->debug().getAdditiveMetrics().ninserted = 1;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->getAdditiveMetrics().ndeleted =
|
||||
4;
|
||||
CurOp::get(opCtx())->debug().getAdditiveMetrics().ndeleted = 0;
|
||||
txnParticipant.getSingleTransactionStatsForTest()
|
||||
.getOpDebug()
|
||||
->getAdditiveMetrics()
|
||||
.keysInserted = 1;
|
||||
CurOp::get(opCtx())->debug().getAdditiveMetrics().keysInserted = 1;
|
||||
|
||||
auto additiveMetricsToCompare =
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics;
|
||||
additiveMetricsToCompare.add(CurOp::get(opCtx())->debug().additiveMetrics);
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->getAdditiveMetrics();
|
||||
additiveMetricsToCompare.add(CurOp::get(opCtx())->debug().getAdditiveMetrics());
|
||||
|
||||
txnParticipant.unstashTransactionResources(opCtx(), "insert");
|
||||
// The transaction machinery cannot store an empty locker.
|
||||
|
|
@ -3644,7 +3665,8 @@ TEST_F(TransactionsMetricsTest, AdditiveMetricsObjectsShouldBeAddedTogetherUponC
|
|||
}
|
||||
txnParticipant.commitUnpreparedTransaction(opCtx());
|
||||
|
||||
ASSERT(txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.equals(
|
||||
ASSERT(
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->getAdditiveMetrics().equals(
|
||||
additiveMetricsToCompare));
|
||||
}
|
||||
|
||||
|
|
@ -3653,25 +3675,36 @@ TEST_F(TransactionsMetricsTest, AdditiveMetricsObjectsShouldBeAddedTogetherUponA
|
|||
auto txnParticipant = TransactionParticipant::get(opCtx());
|
||||
|
||||
// Initialize field values for both AdditiveMetrics objects.
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.keysExamined =
|
||||
txnParticipant.getSingleTransactionStatsForTest()
|
||||
.getOpDebug()
|
||||
->getAdditiveMetrics()
|
||||
.keysExamined = 2;
|
||||
CurOp::get(opCtx())->debug().getAdditiveMetrics().keysExamined = 4;
|
||||
txnParticipant.getSingleTransactionStatsForTest()
|
||||
.getOpDebug()
|
||||
->getAdditiveMetrics()
|
||||
.docsExamined = 1;
|
||||
CurOp::get(opCtx())->debug().getAdditiveMetrics().docsExamined = 3;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->getAdditiveMetrics().nMatched =
|
||||
2;
|
||||
CurOp::get(opCtx())->debug().additiveMetrics.keysExamined = 4;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.docsExamined =
|
||||
1;
|
||||
CurOp::get(opCtx())->debug().additiveMetrics.docsExamined = 3;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.nMatched = 2;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.nModified = 0;
|
||||
CurOp::get(opCtx())->debug().additiveMetrics.nModified = 3;
|
||||
CurOp::get(opCtx())->debug().additiveMetrics.ndeleted = 5;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.keysInserted =
|
||||
1;
|
||||
CurOp::get(opCtx())->debug().additiveMetrics.keysInserted = 1;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.keysDeleted = 6;
|
||||
CurOp::get(opCtx())->debug().additiveMetrics.keysDeleted = 0;
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->getAdditiveMetrics().nModified =
|
||||
0;
|
||||
CurOp::get(opCtx())->debug().getAdditiveMetrics().nModified = 3;
|
||||
CurOp::get(opCtx())->debug().getAdditiveMetrics().ndeleted = 5;
|
||||
txnParticipant.getSingleTransactionStatsForTest()
|
||||
.getOpDebug()
|
||||
->getAdditiveMetrics()
|
||||
.keysInserted = 1;
|
||||
CurOp::get(opCtx())->debug().getAdditiveMetrics().keysInserted = 1;
|
||||
txnParticipant.getSingleTransactionStatsForTest()
|
||||
.getOpDebug()
|
||||
->getAdditiveMetrics()
|
||||
.keysDeleted = 6;
|
||||
CurOp::get(opCtx())->debug().getAdditiveMetrics().keysDeleted = 0;
|
||||
|
||||
auto additiveMetricsToCompare =
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics;
|
||||
additiveMetricsToCompare.add(CurOp::get(opCtx())->debug().additiveMetrics);
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->getAdditiveMetrics();
|
||||
additiveMetricsToCompare.add(CurOp::get(opCtx())->debug().getAdditiveMetrics());
|
||||
|
||||
txnParticipant.unstashTransactionResources(opCtx(), "insert");
|
||||
// The transaction machinery cannot store an empty locker.
|
||||
|
|
@ -3680,7 +3713,8 @@ TEST_F(TransactionsMetricsTest, AdditiveMetricsObjectsShouldBeAddedTogetherUponA
|
|||
}
|
||||
txnParticipant.abortTransaction(opCtx());
|
||||
|
||||
ASSERT(txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->additiveMetrics.equals(
|
||||
ASSERT(
|
||||
txnParticipant.getSingleTransactionStatsForTest().getOpDebug()->getAdditiveMetrics().equals(
|
||||
additiveMetricsToCompare));
|
||||
}
|
||||
|
||||
|
|
@ -4220,14 +4254,14 @@ TEST_F(TransactionsMetricsTest, LastClientInfoShouldUpdateUponAbort) {
|
|||
* Sets up the additive metrics for Transactions Metrics test.
|
||||
*/
|
||||
void setupAdditiveMetrics(const int metricValue, OperationContext* opCtx) {
|
||||
CurOp::get(opCtx)->debug().additiveMetrics.keysExamined = metricValue;
|
||||
CurOp::get(opCtx)->debug().additiveMetrics.docsExamined = metricValue;
|
||||
CurOp::get(opCtx)->debug().additiveMetrics.nMatched = metricValue;
|
||||
CurOp::get(opCtx)->debug().additiveMetrics.nModified = metricValue;
|
||||
CurOp::get(opCtx)->debug().additiveMetrics.ninserted = metricValue;
|
||||
CurOp::get(opCtx)->debug().additiveMetrics.ndeleted = metricValue;
|
||||
CurOp::get(opCtx)->debug().additiveMetrics.keysInserted = metricValue;
|
||||
CurOp::get(opCtx)->debug().additiveMetrics.keysDeleted = metricValue;
|
||||
CurOp::get(opCtx)->debug().getAdditiveMetrics().keysExamined = metricValue;
|
||||
CurOp::get(opCtx)->debug().getAdditiveMetrics().docsExamined = metricValue;
|
||||
CurOp::get(opCtx)->debug().getAdditiveMetrics().nMatched = metricValue;
|
||||
CurOp::get(opCtx)->debug().getAdditiveMetrics().nModified = metricValue;
|
||||
CurOp::get(opCtx)->debug().getAdditiveMetrics().ninserted = metricValue;
|
||||
CurOp::get(opCtx)->debug().getAdditiveMetrics().ndeleted = metricValue;
|
||||
CurOp::get(opCtx)->debug().getAdditiveMetrics().keysInserted = metricValue;
|
||||
CurOp::get(opCtx)->debug().getAdditiveMetrics().keysDeleted = metricValue;
|
||||
}
|
||||
|
||||
void setupPrepareConflictMetrics(const int metricValue, OperationContext* opCtx) {
|
||||
|
|
|
|||
|
|
@ -619,7 +619,7 @@ bool TTLMonitor::_deleteExpiredWithIndex(OperationContext* opCtx,
|
|||
|
||||
try {
|
||||
const auto numDeletedDocs = exec->executeDelete();
|
||||
const auto numDeletedKeys = opDebug.additiveMetrics.keysDeleted.value_or(0ll);
|
||||
const auto numDeletedKeys = opDebug.getAdditiveMetrics().keysDeleted.value_or(0ll);
|
||||
ttlDeletedDocuments.increment(numDeletedDocs);
|
||||
ttlDeletedKeys.increment(numDeletedKeys);
|
||||
|
||||
|
|
@ -772,7 +772,7 @@ bool TTLMonitor::_performDeleteExpiredWithCollscan(OperationContext* opCtx,
|
|||
|
||||
try {
|
||||
const auto numDeletedDocs = exec->executeDelete();
|
||||
const auto numDeletedKeys = opDebug.additiveMetrics.keysDeleted.value_or(0ll);
|
||||
const auto numDeletedKeys = opDebug.getAdditiveMetrics().keysDeleted.value_or(0ll);
|
||||
ttlDeletedDocuments.increment(numDeletedDocs);
|
||||
ttlDeletedKeys.increment(numDeletedKeys);
|
||||
|
||||
|
|
|
|||
|
|
@ -360,7 +360,8 @@ public:
|
|||
shardMetrics.Obj(), IDLParserContext("CursorMetrics"));
|
||||
CurOp::get(opCtx)
|
||||
->debug()
|
||||
.additiveMetrics.aggregateCursorMetrics(metrics);
|
||||
.getAdditiveMetrics()
|
||||
.aggregateCursorMetrics(metrics);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
|
@ -385,7 +386,7 @@ public:
|
|||
|
||||
if (allShardMetricsReturned) {
|
||||
collectQueryStatsMongos(opCtx,
|
||||
std::move(curOp->debug().queryStatsInfo.key));
|
||||
std::move(curOp->debug().getQueryStatsInfo().key));
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
@ -401,7 +402,7 @@ public:
|
|||
auto* curOp = CurOp::get(opCtx);
|
||||
curOp->setEndOfOpMetrics(1);
|
||||
|
||||
collectQueryStatsMongos(opCtx, std::move(curOp->debug().queryStatsInfo.key));
|
||||
collectQueryStatsMongos(opCtx, std::move(curOp->debug().getQueryStatsInfo().key));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
@ -415,7 +416,7 @@ public:
|
|||
const BSONObj& originalCmdObj = request.body;
|
||||
|
||||
auto curOp = CurOp::get(opCtx);
|
||||
curOp->debug().queryStatsInfo.disableForSubqueryExecution = true;
|
||||
curOp->debug().getQueryStatsInfo().disableForSubqueryExecution = true;
|
||||
|
||||
const auto originalNss = parseNs(request.parseDbName(), originalCmdObj);
|
||||
uassert(ErrorCodes::InvalidNamespace,
|
||||
|
|
|
|||
|
|
@ -525,8 +525,10 @@ public:
|
|||
if (shardMetrics.isABSONObj()) {
|
||||
auto metrics = CursorMetrics::parse(
|
||||
shardMetrics.Obj(), IDLParserContext("CursorMetrics"));
|
||||
CurOp::get(opCtx)->debug().additiveMetrics.aggregateCursorMetrics(
|
||||
metrics);
|
||||
CurOp::get(opCtx)
|
||||
->debug()
|
||||
.getAdditiveMetrics()
|
||||
.aggregateCursorMetrics(metrics);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -551,7 +553,7 @@ public:
|
|||
|
||||
CurOp::get(opCtx)->setEndOfOpMetrics(n);
|
||||
collectQueryStatsMongos(
|
||||
opCtx, std::move(CurOp::get(opCtx)->debug().queryStatsInfo.key));
|
||||
opCtx, std::move(CurOp::get(opCtx)->debug().getQueryStatsInfo().key));
|
||||
|
||||
return true;
|
||||
});
|
||||
|
|
@ -570,7 +572,7 @@ public:
|
|||
result.appendArray("values", BSONObj());
|
||||
CurOp::get(opCtx)->setEndOfOpMetrics(0);
|
||||
collectQueryStatsMongos(opCtx,
|
||||
std::move(CurOp::get(opCtx)->debug().queryStatsInfo.key));
|
||||
std::move(CurOp::get(opCtx)->debug().getQueryStatsInfo().key));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
@ -598,8 +600,8 @@ public:
|
|||
|
||||
// We must store the key in distinct to prevent collecting query stats when the aggregation
|
||||
// runs.
|
||||
auto ownedQueryStatsKey = std::move(curOp->debug().queryStatsInfo.key);
|
||||
curOp->debug().queryStatsInfo.disableForSubqueryExecution = true;
|
||||
auto ownedQueryStatsKey = std::move(curOp->debug().getQueryStatsInfo().key);
|
||||
curOp->debug().getQueryStatsInfo().disableForSubqueryExecution = true;
|
||||
|
||||
// Skip privilege checking if we are in an explain.
|
||||
if (verbosity) {
|
||||
|
|
|
|||
|
|
@ -207,7 +207,7 @@ public:
|
|||
Impl::checkCanExplainHere(opCtx);
|
||||
|
||||
auto curOp = CurOp::get(opCtx);
|
||||
curOp->debug().queryStatsInfo.disableForSubqueryExecution = true;
|
||||
curOp->debug().getQueryStatsInfo().disableForSubqueryExecution = true;
|
||||
|
||||
setReadConcern(opCtx);
|
||||
doFLERewriteIfNeeded(opCtx);
|
||||
|
|
|
|||
|
|
@ -720,7 +720,7 @@ bool ClusterWriteCmd::InvocationBase::runImpl(OperationContext* opCtx,
|
|||
for (size_t i = 0; i < numAttempts; ++i) {
|
||||
serviceOpCounters(opCtx).gotInsert();
|
||||
}
|
||||
debug.additiveMetrics.ninserted = response.getN();
|
||||
debug.getAdditiveMetrics().ninserted = response.getN();
|
||||
break;
|
||||
case BatchedCommandRequest::BatchType_Update:
|
||||
for (size_t i = 0; i < numAttempts; ++i) {
|
||||
|
|
@ -729,12 +729,13 @@ bool ClusterWriteCmd::InvocationBase::runImpl(OperationContext* opCtx,
|
|||
|
||||
// The response.getN() count is the sum of documents matched and upserted.
|
||||
if (response.isUpsertDetailsSet()) {
|
||||
debug.additiveMetrics.nMatched = response.getN() - response.sizeUpsertDetails();
|
||||
debug.additiveMetrics.nUpserted = response.sizeUpsertDetails();
|
||||
debug.getAdditiveMetrics().nMatched =
|
||||
response.getN() - response.sizeUpsertDetails();
|
||||
debug.getAdditiveMetrics().nUpserted = response.sizeUpsertDetails();
|
||||
} else {
|
||||
debug.additiveMetrics.nMatched = response.getN();
|
||||
debug.getAdditiveMetrics().nMatched = response.getN();
|
||||
}
|
||||
debug.additiveMetrics.nModified = response.getNModified();
|
||||
debug.getAdditiveMetrics().nModified = response.getNModified();
|
||||
|
||||
for (auto&& update : _batchedRequest.getUpdateRequest().getUpdates()) {
|
||||
incrementUpdateMetrics(update.getU(),
|
||||
|
|
@ -747,7 +748,7 @@ bool ClusterWriteCmd::InvocationBase::runImpl(OperationContext* opCtx,
|
|||
for (size_t i = 0; i < numAttempts; ++i) {
|
||||
serviceOpCounters(opCtx).gotDelete();
|
||||
}
|
||||
debug.additiveMetrics.ndeleted = response.getN();
|
||||
debug.getAdditiveMetrics().ndeleted = response.getN();
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -94,9 +94,9 @@ ClusterClientCursorImpl::ClusterClientCursorImpl(OperationContext* opCtx,
|
|||
_planCacheShapeHash(CurOp::get(opCtx)->debug().planCacheShapeHash),
|
||||
_queryShapeHash(CurOp::get(opCtx)->debug().getQueryShapeHash()),
|
||||
_shouldOmitDiagnosticInformation(CurOp::get(opCtx)->getShouldOmitDiagnosticInformation()),
|
||||
_queryStatsKeyHash(CurOp::get(opCtx)->debug().queryStatsInfo.keyHash),
|
||||
_queryStatsKey(std::move(CurOp::get(opCtx)->debug().queryStatsInfo.key)),
|
||||
_queryStatsWillNeverExhaust(CurOp::get(opCtx)->debug().queryStatsInfo.willNeverExhaust),
|
||||
_queryStatsKeyHash(CurOp::get(opCtx)->debug().getQueryStatsInfo().keyHash),
|
||||
_queryStatsKey(std::move(CurOp::get(opCtx)->debug().getQueryStatsInfo().key)),
|
||||
_queryStatsWillNeverExhaust(CurOp::get(opCtx)->debug().getQueryStatsInfo().willNeverExhaust),
|
||||
_isChangeStreamQuery(CurOp::get(opCtx)->debug().isChangeStreamQuery) {
|
||||
dassert(!_params.compareWholeSortKeyOnRouter ||
|
||||
SimpleBSONObjComparator::kInstance.evaluate(
|
||||
|
|
@ -117,9 +117,9 @@ ClusterClientCursorImpl::ClusterClientCursorImpl(OperationContext* opCtx,
|
|||
_planCacheShapeHash(CurOp::get(opCtx)->debug().planCacheShapeHash),
|
||||
_queryShapeHash(CurOp::get(opCtx)->debug().getQueryShapeHash()),
|
||||
_shouldOmitDiagnosticInformation(CurOp::get(opCtx)->getShouldOmitDiagnosticInformation()),
|
||||
_queryStatsKeyHash(CurOp::get(opCtx)->debug().queryStatsInfo.keyHash),
|
||||
_queryStatsKey(std::move(CurOp::get(opCtx)->debug().queryStatsInfo.key)),
|
||||
_queryStatsWillNeverExhaust(CurOp::get(opCtx)->debug().queryStatsInfo.willNeverExhaust),
|
||||
_queryStatsKeyHash(CurOp::get(opCtx)->debug().getQueryStatsInfo().keyHash),
|
||||
_queryStatsKey(std::move(CurOp::get(opCtx)->debug().getQueryStatsInfo().key)),
|
||||
_queryStatsWillNeverExhaust(CurOp::get(opCtx)->debug().getQueryStatsInfo().willNeverExhaust),
|
||||
_isChangeStreamQuery(CurOp::get(opCtx)->debug().isChangeStreamQuery) {
|
||||
dassert(!_params.compareWholeSortKeyOnRouter ||
|
||||
SimpleBSONObjComparator::kInstance.evaluate(
|
||||
|
|
|
|||
|
|
@ -314,7 +314,7 @@ StatusWith<ClusterCursorManager::PinnedCursor> ClusterCursorManager::checkOutCur
|
|||
cursorGuard->reattachToOperationContext(opCtx);
|
||||
|
||||
CurOp::get(opCtx)->debug().planCacheShapeHash = cursorGuard->getPlanCacheShapeHash();
|
||||
CurOp::get(opCtx)->debug().queryStatsInfo.keyHash = cursorGuard->getQueryStatsKeyHash();
|
||||
CurOp::get(opCtx)->debug().getQueryStatsInfo().keyHash = cursorGuard->getQueryStatsKeyHash();
|
||||
CurOp::get(opCtx)->debug().setQueryShapeHash(opCtx, cursorGuard->getQueryShapeHash());
|
||||
|
||||
OperationMemoryUsageTracker::moveToOpCtxIfAvailable(opCtx,
|
||||
|
|
|
|||
|
|
@ -45,11 +45,11 @@ void collectQueryStatsMongos(OperationContext* opCtx, std::unique_ptr<query_stat
|
|||
|
||||
auto snapshot = query_stats::captureMetrics(
|
||||
opCtx,
|
||||
query_stats::microsecondsToUint64(opDebug.additiveMetrics.executionTime),
|
||||
opDebug.additiveMetrics);
|
||||
query_stats::microsecondsToUint64(opDebug.getAdditiveMetrics().executionTime),
|
||||
opDebug.getAdditiveMetrics());
|
||||
|
||||
query_stats::writeQueryStats(opCtx,
|
||||
opDebug.queryStatsInfo.keyHash,
|
||||
opDebug.getQueryStatsInfo().keyHash,
|
||||
std::move(key),
|
||||
snapshot,
|
||||
query_stats::computeSupplementalQueryStatsMetrics(opDebug));
|
||||
|
|
@ -57,8 +57,8 @@ void collectQueryStatsMongos(OperationContext* opCtx, std::unique_ptr<query_stat
|
|||
|
||||
void collectQueryStatsMongos(OperationContext* opCtx, ClusterClientCursorGuard& cursor) {
|
||||
auto& opDebug = CurOp::get(opCtx)->debug();
|
||||
opDebug.additiveMetrics.aggregateDataBearingNodeMetrics(cursor->takeRemoteMetrics());
|
||||
cursor->incrementCursorMetrics(CurOp::get(opCtx)->debug().additiveMetrics);
|
||||
opDebug.getAdditiveMetrics().aggregateDataBearingNodeMetrics(cursor->takeRemoteMetrics());
|
||||
cursor->incrementCursorMetrics(CurOp::get(opCtx)->debug().getAdditiveMetrics());
|
||||
|
||||
// For a change stream query that never ends, we want to collect query stats on the initial
|
||||
// query and each getMore. Here we record the initial query.
|
||||
|
|
@ -67,11 +67,11 @@ void collectQueryStatsMongos(OperationContext* opCtx, ClusterClientCursorGuard&
|
|||
|
||||
auto snapshot = query_stats::captureMetrics(
|
||||
opCtx,
|
||||
query_stats::microsecondsToUint64(opDebug.additiveMetrics.executionTime),
|
||||
opDebug.additiveMetrics);
|
||||
query_stats::microsecondsToUint64(opDebug.getAdditiveMetrics().executionTime),
|
||||
opDebug.getAdditiveMetrics());
|
||||
|
||||
query_stats::writeQueryStats(opCtx,
|
||||
opDebug.queryStatsInfo.keyHash,
|
||||
opDebug.getQueryStatsInfo().keyHash,
|
||||
cursor->takeKey(),
|
||||
snapshot,
|
||||
{} /* supplementalMetrics */,
|
||||
|
|
@ -81,8 +81,8 @@ void collectQueryStatsMongos(OperationContext* opCtx, ClusterClientCursorGuard&
|
|||
|
||||
void collectQueryStatsMongos(OperationContext* opCtx, ClusterCursorManager::PinnedCursor& cursor) {
|
||||
auto& opDebug = CurOp::get(opCtx)->debug();
|
||||
opDebug.additiveMetrics.aggregateDataBearingNodeMetrics(cursor->takeRemoteMetrics());
|
||||
cursor->incrementCursorMetrics(CurOp::get(opCtx)->debug().additiveMetrics);
|
||||
opDebug.getAdditiveMetrics().aggregateDataBearingNodeMetrics(cursor->takeRemoteMetrics());
|
||||
cursor->incrementCursorMetrics(CurOp::get(opCtx)->debug().getAdditiveMetrics());
|
||||
|
||||
// For a change stream query that never ends, we want to update query stats for every getMore on
|
||||
// the cursor.
|
||||
|
|
@ -91,11 +91,11 @@ void collectQueryStatsMongos(OperationContext* opCtx, ClusterCursorManager::Pinn
|
|||
|
||||
auto snapshot = query_stats::captureMetrics(
|
||||
opCtx,
|
||||
query_stats::microsecondsToUint64(opDebug.additiveMetrics.executionTime),
|
||||
opDebug.additiveMetrics);
|
||||
query_stats::microsecondsToUint64(opDebug.getAdditiveMetrics().executionTime),
|
||||
opDebug.getAdditiveMetrics());
|
||||
|
||||
query_stats::writeQueryStats(opCtx,
|
||||
opDebug.queryStatsInfo.keyHash,
|
||||
opDebug.getQueryStatsInfo().keyHash,
|
||||
nullptr,
|
||||
snapshot,
|
||||
{} /* supplementalMetrics */,
|
||||
|
|
|
|||
|
|
@ -287,7 +287,8 @@ void CursorEstablisher::_waitForResponse() {
|
|||
hasCursorToClean |= cursorValue.getCursorId() != 0;
|
||||
|
||||
if (const auto& cursorMetrics = cursorValue.getCursorMetrics()) {
|
||||
CurOp::get(_opCtx)->debug().additiveMetrics.aggregateCursorMetrics(*cursorMetrics);
|
||||
CurOp::get(_opCtx)->debug().getAdditiveMetrics().aggregateCursorMetrics(
|
||||
*cursorMetrics);
|
||||
}
|
||||
|
||||
// If we have already received an error back and are going to abort the operation
|
||||
|
|
@ -659,7 +660,7 @@ std::vector<RemoteCursor> establishCursorsOnAllHosts(
|
|||
|
||||
auto& cursorValue = cursor.getValue();
|
||||
if (const auto& cursorMetrics = cursorValue.getCursorMetrics()) {
|
||||
CurOp::get(opCtx)->debug().additiveMetrics.aggregateCursorMetrics(
|
||||
CurOp::get(opCtx)->debug().getAdditiveMetrics().aggregateCursorMetrics(
|
||||
*cursorMetrics);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -107,7 +107,7 @@ StatusWith<BSONObj> storePossibleCursor(OperationContext* opCtx,
|
|||
|
||||
auto& response = incomingCursorResponse.getValue();
|
||||
if (const auto& cursorMetrics = response.getCursorMetrics()) {
|
||||
CurOp::get(opCtx)->debug().additiveMetrics.aggregateCursorMetrics(*cursorMetrics);
|
||||
CurOp::get(opCtx)->debug().getAdditiveMetrics().aggregateCursorMetrics(*cursorMetrics);
|
||||
}
|
||||
|
||||
return storePossibleCursor(opCtx,
|
||||
|
|
@ -133,7 +133,7 @@ StatusWith<BSONObj> storePossibleCursor(OperationContext* opCtx,
|
|||
TailableModeEnum tailableMode,
|
||||
boost::optional<BSONObj> routerSort) {
|
||||
auto&& opDebug = CurOp::get(opCtx)->debug();
|
||||
opDebug.additiveMetrics.nBatches = 1;
|
||||
opDebug.getAdditiveMetrics().nBatches = 1;
|
||||
// If nShards has already been set, then we are storing the forwarding $mergeCursors cursor from
|
||||
// a split aggregation pipeline, and the shards half of that pipeline may have targeted multiple
|
||||
// shards. In that case, leave the current value as-is.
|
||||
|
|
@ -142,7 +142,7 @@ StatusWith<BSONObj> storePossibleCursor(OperationContext* opCtx,
|
|||
|
||||
if (incomingCursorResponse.getCursorId() == CursorId(0)) {
|
||||
opDebug.cursorExhausted = true;
|
||||
collectQueryStatsMongos(opCtx, std::move(opDebug.queryStatsInfo.key));
|
||||
collectQueryStatsMongos(opCtx, std::move(opDebug.getQueryStatsInfo().key));
|
||||
return incomingCursorResponse.toBSON(CursorResponse::ResponseType::InitialResponse);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -238,7 +238,7 @@ void appendEmptyResultSetWithStatus(OperationContext* opCtx,
|
|||
if (status == ErrorCodes::ShardNotFound) {
|
||||
status = {ErrorCodes::NamespaceNotFound, status.reason()};
|
||||
}
|
||||
collectQueryStatsMongos(opCtx, std::move(CurOp::get(opCtx)->debug().queryStatsInfo.key));
|
||||
collectQueryStatsMongos(opCtx, std::move(CurOp::get(opCtx)->debug().getQueryStatsInfo().key));
|
||||
appendEmptyResultSet(opCtx, *result, status, nss);
|
||||
}
|
||||
|
||||
|
|
@ -984,7 +984,7 @@ Status runAggregateImpl(OperationContext* opCtx,
|
|||
&result);
|
||||
}
|
||||
collectQueryStatsMongos(opCtx,
|
||||
std::move(CurOp::get(opCtx)->debug().queryStatsInfo.key));
|
||||
std::move(CurOp::get(opCtx)->debug().getQueryStatsInfo().key));
|
||||
}
|
||||
|
||||
// Populate `result` and `req` once we know this function is not going to be implicitly
|
||||
|
|
|
|||
|
|
@ -597,11 +597,11 @@ BSONObj establishMergingMongosCursor(OperationContext* opCtx,
|
|||
// the cursor from its opCtx.
|
||||
opDebug.nShards = std::max(opDebug.nShards, nShards);
|
||||
opDebug.cursorExhausted = exhausted;
|
||||
opDebug.additiveMetrics.nBatches = 1;
|
||||
opDebug.getAdditiveMetrics().nBatches = 1;
|
||||
CurOp::get(opCtx)->setEndOfOpMetrics(responseBuilder.numDocs());
|
||||
|
||||
if (exhausted) {
|
||||
opDebug.additiveMetrics.aggregateDataBearingNodeMetrics(ccc->takeRemoteMetrics());
|
||||
opDebug.getAdditiveMetrics().aggregateDataBearingNodeMetrics(ccc->takeRemoteMetrics());
|
||||
collectQueryStatsMongos(opCtx, ccc->takeKey());
|
||||
} else {
|
||||
collectQueryStatsMongos(opCtx, ccc);
|
||||
|
|
@ -1090,7 +1090,8 @@ Status runPipelineOnSpecificShardOnly(const boost::intrusive_ptr<ExpressionConte
|
|||
if (explain) {
|
||||
// If this was an explain, then we get back an explain result object rather than a cursor.
|
||||
result = response.swResponse.getValue().data;
|
||||
collectQueryStatsMongos(opCtx, std::move(CurOp::get(opCtx)->debug().queryStatsInfo.key));
|
||||
collectQueryStatsMongos(opCtx,
|
||||
std::move(CurOp::get(opCtx)->debug().getQueryStatsInfo().key));
|
||||
} else {
|
||||
result = uassertStatusOK(storePossibleCursor(
|
||||
opCtx,
|
||||
|
|
|
|||
|
|
@ -521,7 +521,7 @@ CursorId runQueryWithoutRetrying(OperationContext* opCtx,
|
|||
auto&& opDebug = CurOp::get(opCtx)->debug();
|
||||
// Fill out query exec properties.
|
||||
opDebug.nShards = ccc->getNumRemotes();
|
||||
opDebug.additiveMetrics.nBatches = 1;
|
||||
opDebug.getAdditiveMetrics().nBatches = 1;
|
||||
|
||||
// If the caller wants to know whether the cursor returned partial results, set it here.
|
||||
if (partialResultsReturned) {
|
||||
|
|
@ -539,7 +539,7 @@ CursorId runQueryWithoutRetrying(OperationContext* opCtx,
|
|||
updateNumHostsTargetedMetrics(opCtx, cri, shardIds.size());
|
||||
}
|
||||
if (const auto remoteMetrics = ccc->takeRemoteMetrics()) {
|
||||
opDebug.additiveMetrics.aggregateDataBearingNodeMetrics(*remoteMetrics);
|
||||
opDebug.getAdditiveMetrics().aggregateDataBearingNodeMetrics(*remoteMetrics);
|
||||
}
|
||||
collectQueryStatsMongos(opCtx, ccc->takeKey());
|
||||
return CursorId(0);
|
||||
|
|
@ -644,7 +644,7 @@ void earlyExitWithNoResults(OperationContext* opCtx,
|
|||
boost::none,
|
||||
allowedFeatures,
|
||||
!didDoFLERewrite /* mustRegisterRequestToQueryStats */);
|
||||
collectQueryStatsMongos(opCtx, std::move(CurOp::get(opCtx)->debug().queryStatsInfo.key));
|
||||
collectQueryStatsMongos(opCtx, std::move(CurOp::get(opCtx)->debug().getQueryStatsInfo().key));
|
||||
|
||||
auto cursorId = CursorId(0);
|
||||
|
||||
|
|
@ -1247,7 +1247,7 @@ StatusWith<CursorResponse> ClusterFind::runGetMore(OperationContext* opCtx,
|
|||
auto&& opDebug = CurOp::get(opCtx)->debug();
|
||||
// Set nReturned and whether the cursor has been exhausted.
|
||||
opDebug.cursorExhausted = (idToReturn == 0);
|
||||
opDebug.additiveMetrics.nBatches = 1;
|
||||
opDebug.getAdditiveMetrics().nBatches = 1;
|
||||
CurOp::get(opCtx)->setEndOfOpMetrics(batch.size());
|
||||
|
||||
const bool partialResultsReturned = pinnedCursor.getValue()->partialResultsReturned();
|
||||
|
|
|
|||
Loading…
Reference in New Issue