mirror of https://github.com/mongodb/mongo
SERVER-97062 No anon namespaces or function/variable definitions in headers (#29139)
GitOrigin-RevId: 8c4bfe921c786a6575e1fbca26227097ca4a46ef
This commit is contained in:
parent
df41ac71eb
commit
94e9b549a2
|
|
@ -57,8 +57,10 @@ Checks: '-*,
|
|||
bugprone-unused-return-value,
|
||||
bugprone-use-after-move,
|
||||
clang-diagnostic-*,
|
||||
google-build-namespaces,
|
||||
hicpp-static-assert,
|
||||
hicpp-undelegated-constructor,
|
||||
misc-definitions-in-headers,
|
||||
misc-new-delete-overloads,
|
||||
misc-static-assert,
|
||||
misc-unconventional-assign-operator,
|
||||
|
|
@ -114,7 +116,6 @@ Checks: '-*,
|
|||
-cppcoreguidelines-interfaces-global-init,
|
||||
-cppcoreguidelines-narrowing-conversions,
|
||||
-google-build-explicit-make-pair,
|
||||
-google-build-namespaces,
|
||||
-google-global-names-in-headers,
|
||||
-google-objc-avoid-throwing-exception,
|
||||
-google-objc-global-variable-declaration,
|
||||
|
|
@ -130,7 +131,6 @@ Checks: '-*,
|
|||
-hicpp-use-equals-delete,
|
||||
-hicpp-use-noexcept,
|
||||
-hicpp-vararg,
|
||||
-misc-definitions-in-headers,
|
||||
-misc-misplaced-const,
|
||||
-misc-non-copyable-objects,
|
||||
-misc-redundant-expression,
|
||||
|
|
|
|||
|
|
@ -43,7 +43,7 @@ def main():
|
|||
if os.path.isfile(arg):
|
||||
source_relative_path = os.path.relpath(arg, os.path.dirname(os.path.dirname(__file__)))
|
||||
if (
|
||||
arg.endswith(".cpp")
|
||||
(arg.endswith(".cpp") or arg.endswith(".h"))
|
||||
and source_relative_path.startswith("src/mongo")
|
||||
# TODO: SERVER-79076 remove this condition when resolved
|
||||
and not source_relative_path.startswith(
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@
|
|||
namespace mongo {
|
||||
namespace bson_bin_util {
|
||||
|
||||
std::string toHex(const char* data, std::size_t len) {
|
||||
inline std::string toHex(const char* data, std::size_t len) {
|
||||
auto rawString = mongo::StringData(data, len);
|
||||
std::ostringstream hexString;
|
||||
hexString << std::hex << std::uppercase;
|
||||
|
|
@ -53,15 +53,15 @@ std::string toHex(const char* data, std::size_t len) {
|
|||
return hexString.str();
|
||||
}
|
||||
|
||||
std::string toHex(const BufBuilder& bldr) {
|
||||
inline std::string toHex(const BufBuilder& bldr) {
|
||||
return toHex(static_cast<const char*>(bldr.buf()), static_cast<std::size_t>(bldr.len()));
|
||||
}
|
||||
|
||||
std::string toHex(const BSONBinData& binData) {
|
||||
inline std::string toHex(const BSONBinData& binData) {
|
||||
return toHex(static_cast<const char*>(binData.data), static_cast<std::size_t>(binData.length));
|
||||
}
|
||||
|
||||
std::string toHex(const BSONObj& obj) {
|
||||
inline std::string toHex(const BSONObj& obj) {
|
||||
return toHex(obj.objdata(), static_cast<std::size_t>(obj.objsize()));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -36,8 +36,6 @@
|
|||
#include "mongo/util/assert_util_core.h"
|
||||
|
||||
namespace mongo {
|
||||
namespace {
|
||||
|
||||
class DBCommandTestFixture : public ServiceContextMongoDTest {
|
||||
public:
|
||||
void setUp() override {
|
||||
|
|
@ -65,8 +63,7 @@ public:
|
|||
static const DatabaseName kDatabaseName;
|
||||
};
|
||||
|
||||
const DatabaseName DBCommandTestFixture::kDatabaseName =
|
||||
inline const DatabaseName DBCommandTestFixture::kDatabaseName =
|
||||
DatabaseName::createDatabaseName_forTest(boost::none, "unittest_db");
|
||||
|
||||
} // namespace
|
||||
} // namespace mongo
|
||||
|
|
|
|||
|
|
@ -41,7 +41,6 @@
|
|||
#include "mongo/platform/compiler.h"
|
||||
|
||||
namespace mongo::sbe::vm {
|
||||
namespace {
|
||||
class MakeObjImpl : ByteCode::MakeObjImplBase {
|
||||
public:
|
||||
using BaseT = ByteCode::MakeObjImplBase;
|
||||
|
|
@ -360,5 +359,4 @@ private:
|
|||
traverseAndProduceObj(spec, tag, val, fieldName, bob);
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
} // namespace mongo::sbe::vm
|
||||
|
|
|
|||
|
|
@ -32,7 +32,6 @@
|
|||
namespace mongo {
|
||||
namespace sbe {
|
||||
namespace vm {
|
||||
namespace {
|
||||
/**
|
||||
* Reads directly from memory for the ByteCode VM.
|
||||
*/
|
||||
|
|
@ -56,7 +55,6 @@ size_t writeToMemory(uint8_t* ptr, const T val) noexcept {
|
|||
return sizeof(T);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace vm
|
||||
} // namespace sbe
|
||||
} // namespace mongo
|
||||
|
|
|
|||
|
|
@ -54,7 +54,6 @@
|
|||
#include "mongo/util/time_support.h"
|
||||
|
||||
namespace mongo::timeseries {
|
||||
namespace {
|
||||
// A table that is useful for interpolations between the number of measurements in a bucket and
|
||||
// the byte size of a bucket's data section timestamp column. Each table entry is a pair (b_i,
|
||||
// S_i), where b_i is the number of measurements in the bucket and S_i is the byte size of the
|
||||
|
|
@ -62,7 +61,7 @@ namespace {
|
|||
// pairs of b_i and S_i for the lower bounds of the row key digit intervals [0, 9], [10, 99],
|
||||
// [100, 999], [1000, 9999] and so on. The last entry in the table, S7, is the first entry to
|
||||
// exceed the server BSON object limit of 16 MB.
|
||||
static constexpr std::array<std::pair<int32_t, int32_t>, 8> kTimestampObjSizeTable{
|
||||
inline constexpr std::array<std::pair<int32_t, int32_t>, 8> kTimestampObjSizeTable{
|
||||
{{0, BSONObj::kMinBSONLength},
|
||||
{10, 115},
|
||||
{100, 1195},
|
||||
|
|
@ -79,7 +78,7 @@ static constexpr std::array<std::pair<int32_t, int32_t>, 8> kTimestampObjSizeTab
|
|||
// pair such that 'targetTimestampObjSize' < S_i. Once the interval is found, the upper bound of the
|
||||
// pair for the interval is computed and then linear interpolation is used to compute the
|
||||
// measurement count corresponding to the 'targetTimestampObjSize' provided.
|
||||
int computeElementCountFromTimestampObjSize(int targetTimestampObjSize) {
|
||||
inline int computeElementCountFromTimestampObjSize(int targetTimestampObjSize) {
|
||||
auto currentInterval =
|
||||
std::find_if(std::begin(kTimestampObjSizeTable),
|
||||
std::end(kTimestampObjSizeTable),
|
||||
|
|
@ -100,7 +99,6 @@ int computeElementCountFromTimestampObjSize(int targetTimestampObjSize) {
|
|||
return currentInterval->first +
|
||||
((targetTimestampObjSize - currentInterval->second) / (10 + nDigitsInRowKey));
|
||||
}
|
||||
} // namespace
|
||||
|
||||
/**
|
||||
* BucketUnpacker will unpack bucket fields for metadata and the provided fields.
|
||||
|
|
|
|||
|
|
@ -245,9 +245,8 @@ struct DocumentSourceDeleter {
|
|||
}
|
||||
};
|
||||
|
||||
namespace {
|
||||
// A utility function to convert pipeline (a vector of BSONObj) to a string. Helpful for debugging.
|
||||
std::string to_string(const std::vector<BSONObj>& objs) {
|
||||
inline std::string to_string(const std::vector<BSONObj>& objs) {
|
||||
std::stringstream sstrm;
|
||||
sstrm << "[" << std::endl;
|
||||
for (const auto& obj : objs) {
|
||||
|
|
@ -256,5 +255,4 @@ std::string to_string(const std::vector<BSONObj>& objs) {
|
|||
sstrm << "]" << std::endl;
|
||||
return sstrm.str();
|
||||
}
|
||||
} // namespace
|
||||
} // namespace mongo
|
||||
|
|
|
|||
|
|
@ -36,12 +36,11 @@
|
|||
#include "mongo/s/transaction_router.h"
|
||||
|
||||
namespace mongo {
|
||||
namespace {
|
||||
/**
|
||||
* Function which produces an vector of 'ScopedShardRole' objects for the namespaces in 'nssList'
|
||||
* using the routing information in 'criMap'.
|
||||
*/
|
||||
std::vector<ScopedSetShardRole> createScopedShardRoles(
|
||||
inline std::vector<ScopedSetShardRole> createScopedShardRoles(
|
||||
OperationContext* opCtx,
|
||||
const stdx::unordered_map<NamespaceString, CollectionRoutingInfo>& criMap,
|
||||
const std::vector<NamespaceString>& nssList) {
|
||||
|
|
@ -72,7 +71,6 @@ std::vector<ScopedSetShardRole> createScopedShardRoles(
|
|||
}
|
||||
return scopedShardRoles;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
/**
|
||||
* Helper that constructs an 'AutoGetCollectionForReadCommandMaybeLockFree' using 'initAutoGetFn'.
|
||||
|
|
|
|||
|
|
@ -38,11 +38,10 @@
|
|||
|
||||
namespace mongo::optimizer::opt {
|
||||
|
||||
namespace {
|
||||
enum class ContainerImpl { STD, STDX };
|
||||
|
||||
// For debugging, switch between STD and STDX containers.
|
||||
static constexpr ContainerImpl kContainerImpl = ContainerImpl::STDX;
|
||||
inline constexpr ContainerImpl kContainerImpl = ContainerImpl::STDX;
|
||||
|
||||
template <ContainerImpl>
|
||||
struct OptContainers {};
|
||||
|
|
@ -70,7 +69,6 @@ struct OptContainers<ContainerImpl::STD> {
|
|||
};
|
||||
|
||||
using ActiveContainers = OptContainers<kContainerImpl>;
|
||||
} // namespace
|
||||
|
||||
template <class T, class H = ActiveContainers::Hasher<T>, typename... Args>
|
||||
using unordered_set = ActiveContainers::unordered_set<T, H, Args...>;
|
||||
|
|
|
|||
|
|
@ -103,7 +103,6 @@ private:
|
|||
boost::optional<FieldPath> _basePath;
|
||||
};
|
||||
|
||||
namespace {
|
||||
/**
|
||||
* A path tracking pre-visitor used for maintaining field names while traversing the AST.
|
||||
*
|
||||
|
|
@ -190,7 +189,6 @@ public:
|
|||
private:
|
||||
PathTrackingVisitorContext<UserData>* _context;
|
||||
};
|
||||
} // namespace
|
||||
|
||||
/**
|
||||
* A general path tracking walker to be used with projection AST visitors which need to track
|
||||
|
|
|
|||
|
|
@ -51,13 +51,6 @@
|
|||
#include "mongo/util/assert_util.h"
|
||||
|
||||
namespace mongo {
|
||||
namespace {
|
||||
// Should never be called, throw to ensure we catch this in tests.
|
||||
std::string defaultHmacStrategy(StringData s) {
|
||||
MONGO_UNREACHABLE_TASSERT(7332410);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
/**
|
||||
* A policy enum for how to serialize literal values.
|
||||
*/
|
||||
|
|
@ -210,6 +203,11 @@ struct SerializationOptions {
|
|||
Value serializeLiteral(const ImplicitValue& v,
|
||||
const boost::optional<Value>& representativeValue = boost::none) const;
|
||||
|
||||
// Should never be called, throw to ensure we catch this in tests.
|
||||
static std::string defaultHmacStrategy(StringData s) {
|
||||
MONGO_UNREACHABLE_TASSERT(7332410);
|
||||
}
|
||||
|
||||
// 'literalPolicy' is an independent option to serialize in a general format with the aim of
|
||||
// similar "shaped" queries serializing to the same object. For example, if set to
|
||||
// 'kToDebugTypeString', then the serialization of {a: {$gt: 2}} should result in {a: {$gt:
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ public:
|
|||
SerializationOptions opts = SerializationOptions::kDebugShapeAndMarkIdentifiers_FOR_TEST;
|
||||
if (!applyHmac) {
|
||||
opts.transformIdentifiers = false;
|
||||
opts.transformIdentifiersCallback = defaultHmacStrategy;
|
||||
opts.transformIdentifiersCallback = opts.defaultHmacStrategy;
|
||||
}
|
||||
return findKey.toBson(
|
||||
expCtx->getOperationContext(), opts, SerializationContext::stateDefault());
|
||||
|
|
@ -128,7 +128,7 @@ public:
|
|||
opts.literalPolicy = literalPolicy;
|
||||
if (!applyHmac) {
|
||||
opts.transformIdentifiers = false;
|
||||
opts.transformIdentifiersCallback = defaultHmacStrategy;
|
||||
opts.transformIdentifiersCallback = opts.defaultHmacStrategy;
|
||||
}
|
||||
return aggKey->toBson(
|
||||
expCtx->getOperationContext(), opts, SerializationContext::stateDefault());
|
||||
|
|
|
|||
|
|
@ -34,13 +34,11 @@
|
|||
#include "mongo/executor/task_executor_cursor.h"
|
||||
|
||||
namespace mongo::mongot_cursor {
|
||||
namespace {
|
||||
auto makeRetryOnNetworkErrorPolicy() {
|
||||
inline auto makeRetryOnNetworkErrorPolicy() {
|
||||
return [retried = false](const Status& st) mutable {
|
||||
return std::exchange(retried, true) ? false : ErrorCodes::isNetworkError(st);
|
||||
};
|
||||
}
|
||||
} // namespace
|
||||
|
||||
static constexpr StringData kSearchField = "search"_sd;
|
||||
static constexpr StringData kVectorSearchCmd = "vectorSearch"_sd;
|
||||
|
|
|
|||
|
|
@ -38,7 +38,6 @@
|
|||
#include "mongo/db/query/stage_builder/sbe/sbexpr.h"
|
||||
|
||||
namespace mongo::stage_builder {
|
||||
namespace {
|
||||
inline void makeSbExprOptSbSlotVecHelper(SbExprOptSbSlotVector& result) {}
|
||||
|
||||
template <typename... Ts>
|
||||
|
|
@ -79,7 +78,6 @@ inline void makeSbExprOptSbSlotVecHelper(SbExprOptSbSlotVector& result,
|
|||
result.emplace_back(std::move(p.first), p.second);
|
||||
makeSbExprOptSbSlotVecHelper(result, std::forward<Ts>(rest)...);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
template <typename... Ts>
|
||||
auto makeSbExprOptSbSlotVec(Ts&&... pack) {
|
||||
|
|
|
|||
|
|
@ -40,11 +40,6 @@
|
|||
|
||||
namespace mongo {
|
||||
|
||||
namespace {
|
||||
const std::vector<BSONObj> emptyArrayFilters{};
|
||||
const BSONObj emptyCollation{};
|
||||
const BSONObj emptySort{};
|
||||
|
||||
template <typename T>
|
||||
void appendArrayToString(const T& arr, StringBuilder* builder) {
|
||||
bool first = true;
|
||||
|
|
@ -58,7 +53,6 @@ void appendArrayToString(const T& arr, StringBuilder* builder) {
|
|||
}
|
||||
*builder << "]";
|
||||
}
|
||||
} // namespace
|
||||
|
||||
class FieldRef;
|
||||
|
||||
|
|
@ -361,6 +355,10 @@ public:
|
|||
}
|
||||
|
||||
private:
|
||||
inline static const std::vector<BSONObj> emptyArrayFilters{};
|
||||
inline static const BSONObj emptyCollation{};
|
||||
inline static const BSONObj emptySort{};
|
||||
|
||||
NamespaceString _nsString;
|
||||
|
||||
write_ops::UpdateOpEntry _updateOp;
|
||||
|
|
|
|||
|
|
@ -65,10 +65,8 @@
|
|||
namespace mongo {
|
||||
namespace repl {
|
||||
|
||||
namespace {
|
||||
const int kProgressMeterSecondsBetween = 60;
|
||||
const int kProgressMeterCheckInterval = 128;
|
||||
} // namespace
|
||||
inline const int kProgressMeterSecondsBetween = 60;
|
||||
inline const int kProgressMeterCheckInterval = 128;
|
||||
|
||||
class CollectionCloner final : public InitialSyncBaseCloner {
|
||||
public:
|
||||
|
|
|
|||
|
|
@ -68,9 +68,7 @@
|
|||
namespace mongo {
|
||||
namespace analyze_shard_key {
|
||||
|
||||
namespace {
|
||||
using QueryTargetingInfo = shard_key_pattern_query_util::QueryTargetingInfo;
|
||||
}
|
||||
|
||||
/**
|
||||
* The utility class for calculating read or write distribution metrics for sampled queries against
|
||||
|
|
|
|||
|
|
@ -44,8 +44,6 @@
|
|||
#include "mongo/util/processinfo.h"
|
||||
|
||||
namespace mongo {
|
||||
namespace {
|
||||
|
||||
class ServiceEntryPointBenchmarkFixture : public BenchmarkWithProfiler {
|
||||
public:
|
||||
void SetUp(benchmark::State& state) override {
|
||||
|
|
@ -130,10 +128,13 @@ const auto kSEPBMMaxThreads = 2 * ProcessInfo::getNumLogicalCores();
|
|||
|
||||
/**
|
||||
* Required initializers, but this is a benchmark so nothing needs to be done.
|
||||
*
|
||||
* These should not be in a header file, but it works because it is only included in 2 files and
|
||||
* they are never included into the same binary. If that changes, these should find a home in a new
|
||||
* cpp file.
|
||||
*/
|
||||
MONGO_INITIALIZER_GENERAL(ForkServer, ("EndStartupOptionHandling"), ("default"))
|
||||
MONGO_INITIALIZER_GENERAL(ForkServer, ("EndStartupOptionHandling"), ("default")) // NOLINT
|
||||
(InitializerContext* context) {}
|
||||
MONGO_INITIALIZER(ServerLogRedirection)(mongo::InitializerContext*) {}
|
||||
MONGO_INITIALIZER(ServerLogRedirection)(mongo::InitializerContext*) {} // NOLINT
|
||||
|
||||
} // namespace
|
||||
} // namespace mongo
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@
|
|||
namespace mongo {
|
||||
namespace service_entry_point_shard_role_helpers {
|
||||
|
||||
BSONObj getRedactedCopyForLogging(const Command* command, const BSONObj& cmdObj) {
|
||||
inline BSONObj getRedactedCopyForLogging(const Command* command, const BSONObj& cmdObj) {
|
||||
mutablebson::Document cmdToLog(cmdObj, mutablebson::Document::kInPlaceDisabled);
|
||||
command->snipForLogging(&cmdToLog);
|
||||
BSONObjBuilder bob;
|
||||
|
|
@ -101,12 +101,12 @@ BSONObj getRedactedCopyForLogging(const Command* command, const BSONObj& cmdObj)
|
|||
return bob.obj();
|
||||
}
|
||||
|
||||
bool lockedForWriting() {
|
||||
inline bool lockedForWriting() {
|
||||
return mongo::lockedForWriting();
|
||||
}
|
||||
|
||||
void setPrepareConflictBehaviorForReadConcern(OperationContext* opCtx,
|
||||
const CommandInvocation* invocation) {
|
||||
inline void setPrepareConflictBehaviorForReadConcern(OperationContext* opCtx,
|
||||
const CommandInvocation* invocation) {
|
||||
// Some read commands can safely ignore prepare conflicts by default because they do not
|
||||
// require snapshot isolation and do not conflict with concurrent writes. We also give these
|
||||
// operations permission to write, as this may be required for queries that spill using the
|
||||
|
|
@ -119,9 +119,9 @@ void setPrepareConflictBehaviorForReadConcern(OperationContext* opCtx,
|
|||
opCtx, repl::ReadConcernArgs::get(opCtx), prepareConflictBehavior);
|
||||
}
|
||||
|
||||
void waitForReadConcern(OperationContext* opCtx,
|
||||
const CommandInvocation* invocation,
|
||||
const OpMsgRequest& request) {
|
||||
inline void waitForReadConcern(OperationContext* opCtx,
|
||||
const CommandInvocation* invocation,
|
||||
const OpMsgRequest& request) {
|
||||
Status rcStatus = mongo::waitForReadConcern(opCtx,
|
||||
repl::ReadConcernArgs::get(opCtx),
|
||||
invocation->ns().dbName(),
|
||||
|
|
@ -144,7 +144,7 @@ void waitForReadConcern(OperationContext* opCtx,
|
|||
}
|
||||
}
|
||||
|
||||
void waitForSpeculativeMajorityReadConcern(OperationContext* opCtx) {
|
||||
inline void waitForSpeculativeMajorityReadConcern(OperationContext* opCtx) {
|
||||
auto speculativeReadInfo = repl::SpeculativeMajorityReadInfo::get(opCtx);
|
||||
if (!speculativeReadInfo.isSpeculativeRead()) {
|
||||
return;
|
||||
|
|
@ -153,10 +153,10 @@ void waitForSpeculativeMajorityReadConcern(OperationContext* opCtx) {
|
|||
}
|
||||
|
||||
|
||||
void waitForWriteConcern(OperationContext* opCtx,
|
||||
const CommandInvocation* invocation,
|
||||
const repl::OpTime& lastOpBeforeRun,
|
||||
BSONObjBuilder& commandResponseBuilder) {
|
||||
inline void waitForWriteConcern(OperationContext* opCtx,
|
||||
const CommandInvocation* invocation,
|
||||
const repl::OpTime& lastOpBeforeRun,
|
||||
BSONObjBuilder& commandResponseBuilder) {
|
||||
|
||||
// Prevent waiting for writeConcern if the command is changing only unreplicated namespaces.
|
||||
invariant(invocation);
|
||||
|
|
@ -231,7 +231,7 @@ void waitForWriteConcern(OperationContext* opCtx,
|
|||
// operation then we skip waiting for writeConcern.
|
||||
}
|
||||
|
||||
void waitForLinearizableReadConcern(OperationContext* opCtx) {
|
||||
inline void waitForLinearizableReadConcern(OperationContext* opCtx) {
|
||||
// When a linearizable read command is passed in, check to make sure we're reading from the
|
||||
// primary.
|
||||
if (repl::ReadConcernArgs::get(opCtx).getLevel() ==
|
||||
|
|
@ -240,19 +240,19 @@ void waitForLinearizableReadConcern(OperationContext* opCtx) {
|
|||
}
|
||||
}
|
||||
|
||||
void uassertCommandDoesNotSpecifyWriteConcern(const GenericArguments& requestArgs) {
|
||||
inline void uassertCommandDoesNotSpecifyWriteConcern(const GenericArguments& requestArgs) {
|
||||
uassert(ErrorCodes::InvalidOptions,
|
||||
"Command does not support writeConcern",
|
||||
!commandSpecifiesWriteConcern(requestArgs));
|
||||
}
|
||||
|
||||
void attachCurOpErrInfo(OperationContext* opCtx, const Status status) {
|
||||
inline void attachCurOpErrInfo(OperationContext* opCtx, const Status status) {
|
||||
CurOp::get(opCtx)->debug().errInfo = std::move(status);
|
||||
}
|
||||
|
||||
void appendReplyMetadata(OperationContext* opCtx,
|
||||
const GenericArguments& requestArgs,
|
||||
BSONObjBuilder* metadataBob) {
|
||||
inline void appendReplyMetadata(OperationContext* opCtx,
|
||||
const GenericArguments& requestArgs,
|
||||
BSONObjBuilder* metadataBob) {
|
||||
auto const replCoord = repl::ReplicationCoordinator::get(opCtx);
|
||||
const bool isReplSet = replCoord->getSettings().isReplSet();
|
||||
|
||||
|
|
@ -289,26 +289,26 @@ void appendReplyMetadata(OperationContext* opCtx,
|
|||
}
|
||||
}
|
||||
|
||||
Status refreshDatabase(OperationContext* opCtx, const StaleDbRoutingVersion& se) noexcept {
|
||||
inline Status refreshDatabase(OperationContext* opCtx, const StaleDbRoutingVersion& se) noexcept {
|
||||
return FilteringMetadataCache::get(opCtx)->onDbVersionMismatch(
|
||||
opCtx, se.getDb(), se.getVersionReceived());
|
||||
}
|
||||
|
||||
Status refreshCollection(OperationContext* opCtx, const StaleConfigInfo& se) noexcept {
|
||||
inline Status refreshCollection(OperationContext* opCtx, const StaleConfigInfo& se) noexcept {
|
||||
return FilteringMetadataCache::get(opCtx)->onCollectionPlacementVersionMismatch(
|
||||
opCtx, se.getNss(), se.getVersionReceived().placementVersion());
|
||||
}
|
||||
|
||||
Status refreshCatalogCache(OperationContext* opCtx,
|
||||
const ShardCannotRefreshDueToLocksHeldInfo& refreshInfo) noexcept {
|
||||
inline Status refreshCatalogCache(
|
||||
OperationContext* opCtx, const ShardCannotRefreshDueToLocksHeldInfo& refreshInfo) noexcept {
|
||||
return Grid::get(opCtx)
|
||||
->catalogCache()
|
||||
->getCollectionRoutingInfo(opCtx, refreshInfo.getNss())
|
||||
.getStatus();
|
||||
}
|
||||
|
||||
void handleReshardingCriticalSectionMetrics(OperationContext* opCtx,
|
||||
const StaleConfigInfo& se) noexcept {
|
||||
inline void handleReshardingCriticalSectionMetrics(OperationContext* opCtx,
|
||||
const StaleConfigInfo& se) noexcept {
|
||||
resharding_metrics::onCriticalSectionError(opCtx, se);
|
||||
}
|
||||
|
||||
|
|
@ -317,16 +317,16 @@ void handleReshardingCriticalSectionMetrics(OperationContext* opCtx,
|
|||
// lock. This will cause mongod to perhaps erroneously check for write concern when no writes
|
||||
// were done, or unnecessarily kill a read operation. If we re-use the opCtx to retry command
|
||||
// execution, we must reset the locker state.
|
||||
void resetLockerState(OperationContext* opCtx) noexcept {
|
||||
inline void resetLockerState(OperationContext* opCtx) noexcept {
|
||||
// It is necessary to lock the client to change the Locker on the OperationContext.
|
||||
stdx::lock_guard<Client> lk(*opCtx->getClient());
|
||||
invariant(!shard_role_details::getLocker(opCtx)->isLocked());
|
||||
shard_role_details::swapLocker(opCtx, std::make_unique<Locker>(opCtx->getServiceContext()), lk);
|
||||
}
|
||||
|
||||
void createTransactionCoordinator(OperationContext* opCtx,
|
||||
TxnNumber clientTxnNumber,
|
||||
boost::optional<TxnRetryCounter> clientTxnRetryCounter) {
|
||||
inline void createTransactionCoordinator(OperationContext* opCtx,
|
||||
TxnNumber clientTxnNumber,
|
||||
boost::optional<TxnRetryCounter> clientTxnRetryCounter) {
|
||||
auto clientLsid = opCtx->getLogicalSessionId().value();
|
||||
auto clockSource = opCtx->getServiceContext()->getFastClockSource();
|
||||
|
||||
|
|
|
|||
|
|
@ -43,7 +43,6 @@
|
|||
|
||||
|
||||
namespace mongo {
|
||||
namespace {
|
||||
|
||||
class VectorClockMongoD : public VectorClockMutable,
|
||||
public ReplicaSetAwareService<VectorClockMongoD> {
|
||||
|
|
@ -146,5 +145,4 @@ private:
|
|||
WaitableAtomic<bool> _taskIsRunning{false};
|
||||
};
|
||||
|
||||
} // namespace
|
||||
} // namespace mongo
|
||||
|
|
|
|||
|
|
@ -298,13 +298,11 @@ ExecutorFuture<AsyncRPCResponse<typename CommandType::Reply>> sendCommandWithRun
|
|||
}
|
||||
} // namespace detail
|
||||
|
||||
namespace {
|
||||
void createOperationKeyIfNeeded(GenericArguments& genericArgs) {
|
||||
inline void createOperationKeyIfNeeded(GenericArguments& genericArgs) {
|
||||
if (!genericArgs.getClientOperationKey()) {
|
||||
genericArgs.setClientOperationKey(UUID::gen());
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
||||
/**
|
||||
* Execute the command asynchronously on the given target with the provided executor.
|
||||
|
|
|
|||
|
|
@ -380,11 +380,11 @@ private:
|
|||
stdx::mutex _m;
|
||||
};
|
||||
|
||||
std::ostream& operator<<(std::ostream& s, const AsyncMockAsyncRPCRunner::Request& o) {
|
||||
inline std::ostream& operator<<(std::ostream& s, const AsyncMockAsyncRPCRunner::Request& o) {
|
||||
return s << o.toBSON();
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& s, const AsyncMockAsyncRPCRunner::Expectation& o) {
|
||||
inline std::ostream& operator<<(std::ostream& s, const AsyncMockAsyncRPCRunner::Expectation& o) {
|
||||
return s << o.name;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -56,8 +56,10 @@
|
|||
*/
|
||||
namespace mongo {
|
||||
namespace executor {
|
||||
namespace {
|
||||
BSONObj buildCursorResponse(StringData fieldName, size_t start, size_t end, size_t cursorId) {
|
||||
inline BSONObj buildCursorResponse(StringData fieldName,
|
||||
size_t start,
|
||||
size_t end,
|
||||
size_t cursorId) {
|
||||
BSONObjBuilder bob;
|
||||
{
|
||||
BSONObjBuilder cursor(bob.subobjStart("cursor"));
|
||||
|
|
@ -76,10 +78,10 @@ BSONObj buildCursorResponse(StringData fieldName, size_t start, size_t end, size
|
|||
return bob.obj();
|
||||
}
|
||||
|
||||
BSONObj buildMultiCursorResponse(StringData fieldName,
|
||||
size_t start,
|
||||
size_t end,
|
||||
std::vector<size_t> cursorIds) {
|
||||
inline BSONObj buildMultiCursorResponse(StringData fieldName,
|
||||
size_t start,
|
||||
size_t end,
|
||||
std::vector<size_t> cursorIds) {
|
||||
BSONObjBuilder bob;
|
||||
{
|
||||
BSONArrayBuilder cursors;
|
||||
|
|
@ -103,7 +105,6 @@ BSONObj buildMultiCursorResponse(StringData fieldName,
|
|||
bob.append("ok", 1);
|
||||
return bob.obj();
|
||||
}
|
||||
} // namespace
|
||||
|
||||
class NonPinningTaskExecutorCursorTestFixture : public ThreadPoolExecutorTest {
|
||||
public:
|
||||
|
|
|
|||
|
|
@ -37,10 +37,8 @@
|
|||
namespace mongo {
|
||||
namespace async_rpc {
|
||||
|
||||
namespace {
|
||||
// See ARS::kMaxNumFailedHostRetryAttempts and Shard::kOnErrorNumRetries
|
||||
const int kOnErrorNumRetries = 3;
|
||||
} // namespace
|
||||
inline const int kOnErrorNumRetries = 3;
|
||||
|
||||
class ShardRetryPolicy : public RetryPolicy {
|
||||
public:
|
||||
|
|
|
|||
|
|
@ -36,7 +36,6 @@
|
|||
#include "mongo/s/transaction_router.h"
|
||||
|
||||
namespace mongo {
|
||||
namespace {
|
||||
|
||||
/**
|
||||
* Implements the abortTransaction command for a router.
|
||||
|
|
@ -136,5 +135,4 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
} // namespace mongo
|
||||
|
|
|
|||
|
|
@ -35,7 +35,6 @@
|
|||
#include "mongo/s/transaction_router.h"
|
||||
|
||||
namespace mongo {
|
||||
namespace {
|
||||
|
||||
/**
|
||||
* Implements the commitTransaction command for a router.
|
||||
|
|
@ -118,5 +117,4 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
} // namespace mongo
|
||||
|
|
|
|||
|
|
@ -39,8 +39,6 @@
|
|||
|
||||
namespace mongo {
|
||||
|
||||
namespace {
|
||||
|
||||
// TODO SERVER-86458
|
||||
// RAII object that switches an OperationContext's service role to the router role if it isn't
|
||||
// already and if the OperationContext's ServiceContext has a router role Service, restoring the
|
||||
|
|
@ -67,8 +65,6 @@ private:
|
|||
Service* _original;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
template <typename Impl>
|
||||
class InternalTransactionsTestCommandBase : public TypedCommand<Impl> {
|
||||
public:
|
||||
|
|
|
|||
|
|
@ -57,12 +57,7 @@
|
|||
|
||||
namespace mongo {
|
||||
|
||||
namespace {
|
||||
|
||||
// The # of documents returned is always 1 for the count command.
|
||||
static constexpr long long kNReturned = 1;
|
||||
|
||||
BSONObj prepareCountForPassthrough(const BSONObj& cmdObj, bool requestQueryStats) {
|
||||
inline BSONObj prepareCountForPassthrough(const BSONObj& cmdObj, bool requestQueryStats) {
|
||||
if (!requestQueryStats) {
|
||||
return CommandHelpers::filterCommandRequestForPassthrough(cmdObj);
|
||||
}
|
||||
|
|
@ -72,8 +67,6 @@ BSONObj prepareCountForPassthrough(const BSONObj& cmdObj, bool requestQueryStats
|
|||
return CommandHelpers::filterCommandRequestForPassthrough(bob.done());
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
/**
|
||||
* Implements the find command on mongos.
|
||||
*/
|
||||
|
|
@ -283,6 +276,9 @@ public:
|
|||
total = applySkipLimit(total, cmdObj);
|
||||
result.appendNumber("n", total);
|
||||
|
||||
// The # of documents returned is always 1 for the count command.
|
||||
static constexpr long long kNReturned = 1;
|
||||
|
||||
auto* curOp = CurOp::get(opCtx);
|
||||
curOp->setEndOfOpMetrics(kNReturned);
|
||||
|
||||
|
|
|
|||
|
|
@ -56,12 +56,11 @@
|
|||
#include "mongo/s/query/planner/cluster_find.h"
|
||||
|
||||
namespace mongo {
|
||||
namespace {
|
||||
/**
|
||||
* Parses the command object to a FindCommandRequest and validates that no runtime
|
||||
* constants were supplied and that querySettings was not passed into the command.
|
||||
*/
|
||||
std::unique_ptr<FindCommandRequest> parseCmdObjectToFindCommandRequest(
|
||||
inline std::unique_ptr<FindCommandRequest> parseCmdObjectToFindCommandRequest(
|
||||
OperationContext* opCtx, const OpMsgRequest& request) {
|
||||
const auto& vts = auth::ValidatedTenancyScope::get(opCtx);
|
||||
auto findCommand = query_request_helper::makeFromFindCommand(
|
||||
|
|
@ -85,7 +84,6 @@ std::unique_ptr<FindCommandRequest> parseCmdObjectToFindCommandRequest(
|
|||
|
||||
return findCommand;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
/**
|
||||
* Implements the find command for a router.
|
||||
|
|
|
|||
|
|
@ -40,13 +40,12 @@
|
|||
#include "mongo/s/query/planner/cluster_find.h"
|
||||
|
||||
namespace mongo {
|
||||
namespace {
|
||||
|
||||
// getMore can run with any readConcern, because cursor-creating commands like find can run with any
|
||||
// readConcern. However, since getMore automatically uses the readConcern of the command that
|
||||
// created the cursor, it is not appropriate to apply the default readConcern (just as
|
||||
// client-specified readConcern isn't appropriate).
|
||||
static const ReadConcernSupportResult kSupportsReadConcernResult{
|
||||
inline const ReadConcernSupportResult kSupportsReadConcernResult{
|
||||
Status::OK(),
|
||||
{{ErrorCodes::InvalidOptions,
|
||||
"default read concern not permitted (getMore uses the cursor's read concern)"}}};
|
||||
|
|
@ -167,5 +166,5 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
} // namespace mongo
|
||||
|
|
|
|||
|
|
@ -39,7 +39,6 @@
|
|||
#include "mongo/s/query/planner/cluster_aggregate.h"
|
||||
|
||||
namespace mongo {
|
||||
namespace {
|
||||
|
||||
template <typename Impl>
|
||||
class ClusterPipelineCommandBase final : public Command {
|
||||
|
|
@ -223,5 +222,4 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
} // namespace
|
||||
} // namespace mongo
|
||||
|
|
|
|||
|
|
@ -61,7 +61,6 @@ namespace mongo {
|
|||
namespace dns {
|
||||
// The anonymous namespace is safe, in this header, as it is not really a header. It is only used
|
||||
// in the `dns_query.cpp` TU.
|
||||
namespace {
|
||||
|
||||
using std::begin;
|
||||
using std::end;
|
||||
|
|
@ -377,8 +376,7 @@ private:
|
|||
static stdx::mutex _staticMutex;
|
||||
};
|
||||
|
||||
stdx::mutex DNSQueryState::_staticMutex;
|
||||
inline stdx::mutex DNSQueryState::_staticMutex;
|
||||
|
||||
} // namespace
|
||||
} // namespace dns
|
||||
} // namespace mongo
|
||||
|
|
|
|||
Loading…
Reference in New Issue