mirror of https://github.com/mongodb/mongo
SERVER-57876 upgrade fmt to v11.1.3 (#32605)
GitOrigin-RevId: 49564e144d9f4420031fafb87ff1168ab37143d9
This commit is contained in:
parent
7d40c39cd2
commit
8bdf5e3d26
|
|
@ -35,7 +35,7 @@ a notice will be included in
|
|||
| [dcleblanc/SafeInt] | MIT | 3.0.26 | | ✗ |
|
||||
| [derickr/timelib] | MIT | 2022.13 | | ✗ |
|
||||
| [discover] | BSD-3-Clause | 0.4.0 | unknown | |
|
||||
| [fmtlib/fmt] | MIT | 7.1.3 | | ✗ |
|
||||
| [fmtlib/fmt] | MIT | 11.1.3 | | ✗ |
|
||||
| [google-re2] | BSD-3-Clause | 2023-11-01 | | ✗ |
|
||||
| [google-snappy] | BSD-3-Clause | 1.1.10 | ✗ | ✗ |
|
||||
| [google/s2geometry] | Apache-2.0 | Unknown | ✗ | ✗ |
|
||||
|
|
|
|||
|
|
@ -42,37 +42,43 @@
|
|||
########################################################################################################
|
||||
|
||||
tests:
|
||||
# The current suggested method for raising these values is to run the
|
||||
# benchmark a few times in patches, and comparing to the recent history.
|
||||
# From the benchmark's history page, obtain a "stable region mean"
|
||||
# and "CV of stable region". These are available in the "Trend Charts" tab on
|
||||
# Evergreen for the `benchmarks_sep` task.
|
||||
#
|
||||
# The deviations of these metrics are extremely tight.
|
||||
# We can easily accept 2 standard deviations up from the stable mean.
|
||||
#
|
||||
# threshold = mean * (1 + 2 * cv)
|
||||
#
|
||||
# 2025-02-16:
|
||||
# Router/1: mean=20077, cv=.0228 => threshold=20993
|
||||
# Router/32: mean=20111, cv=.0227 => threshold=21024
|
||||
# Shard/1: mean=22013, cv=.0290 => threshold=23290
|
||||
# Shard/32: mean=22087, cv=.0289 => threshold=23364
|
||||
ServiceEntryPointRouterRoleBenchmarkFixture/BM_SEP_PING:
|
||||
al2023-arm64-sep-benchmark:
|
||||
# The current suggested method for raising these values is to run the benchmark a few times in patches, get the
|
||||
# means of these metrics in them, and then multiply them by 1.002 (+ .2%). Please make sure the new values
|
||||
# are above the maximum values reported in those patches to prevent any future flakiness.
|
||||
#
|
||||
# The below thresholds are based on the stable region mean as of 2/14/25 * 1.004.
|
||||
- thread_level: 1
|
||||
metrics:
|
||||
- name: instructions_per_iteration_mean
|
||||
value: 20620
|
||||
value: 20993
|
||||
bound_direction: upper
|
||||
- thread_level: 32
|
||||
metrics:
|
||||
- name: instructions_per_iteration_mean
|
||||
value: 20652
|
||||
value: 21024
|
||||
bound_direction: upper
|
||||
ServiceEntryPointShardRoleBenchmarkFixture/BM_SEP_PING:
|
||||
al2023-arm64-sep-benchmark:
|
||||
# The current suggested method for raising these values is to run the benchmark a few times in patches, get the
|
||||
# means of these metrics in them, and then multiply them by 1.002 (+ .2%). Please make sure the new values
|
||||
# are above the maximum values reported in those patches to prevent any future flakiness.
|
||||
#
|
||||
# The below thresholds are based on the stable region mean as of 1/30/25 * 1.004.
|
||||
- thread_level: 1
|
||||
metrics:
|
||||
- name: instructions_per_iteration_mean
|
||||
value: 22708
|
||||
value: 23290
|
||||
bound_direction: upper
|
||||
- thread_level: 32
|
||||
metrics:
|
||||
- name: instructions_per_iteration_mean
|
||||
value: 22794
|
||||
value: 23364
|
||||
bound_direction: upper
|
||||
|
|
|
|||
|
|
@ -504,7 +504,7 @@
|
|||
"name": "Organization: github"
|
||||
},
|
||||
"name": "fmtlib/fmt",
|
||||
"version": "7.1.3",
|
||||
"version": "11.1.3",
|
||||
"licenses": [
|
||||
{
|
||||
"license": {
|
||||
|
|
@ -512,11 +512,11 @@
|
|||
}
|
||||
}
|
||||
],
|
||||
"purl": "pkg:github/fmtlib/fmt@7.1.3",
|
||||
"purl": "pkg:github/fmtlib/fmt@11.1.3",
|
||||
"properties": [
|
||||
{
|
||||
"name": "internal:team_responsible",
|
||||
"value": "Server Security"
|
||||
"value": "Server Programmability"
|
||||
},
|
||||
{
|
||||
"name": "emits_persisted_data",
|
||||
|
|
|
|||
|
|
@ -38,10 +38,13 @@ namespace mongo {
|
|||
|
||||
namespace {
|
||||
auto makeOverflowStatus(StringData action, size_t sizeOfT, size_t length, size_t debug_offset) {
|
||||
using namespace fmt::literals;
|
||||
return Status(ErrorCodes::Overflow,
|
||||
"buffer size too small to {} ({}) bytes out of buffer[{}] at offset: {}"_format(
|
||||
action, sizeOfT, length, debug_offset));
|
||||
return Status(
|
||||
ErrorCodes::Overflow,
|
||||
fmt::format("buffer size too small to {} ({}) bytes out of buffer[{}] at offset: {}",
|
||||
action,
|
||||
sizeOfT,
|
||||
length,
|
||||
debug_offset));
|
||||
}
|
||||
} // namespace
|
||||
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@
|
|||
#include "mongo/base/status.h"
|
||||
#include "mongo/base/status_with.h"
|
||||
#include "mongo/config.h" // IWYU pragma: keep
|
||||
#include "mongo/platform/compiler.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
|
|
@ -89,7 +90,11 @@ struct DataType {
|
|||
"trivially copyable. You may specialize the template to use it "
|
||||
"with other types.");
|
||||
if (ptr) {
|
||||
/** Silence spurious GCC stringop-overflow false negatives. */
|
||||
MONGO_COMPILER_DIAGNOSTIC_PUSH
|
||||
MONGO_COMPILER_IF_GCC(MONGO_COMPILER_DIAGNOSTIC_IGNORED("-Wstringop-overflow"))
|
||||
std::memcpy(ptr, &t, sizeof(T));
|
||||
MONGO_COMPILER_DIAGNOSTIC_POP
|
||||
}
|
||||
|
||||
if (advanced) {
|
||||
|
|
|
|||
|
|
@ -67,7 +67,6 @@ void DependencyGraph::addNode(std::string name,
|
|||
|
||||
namespace {
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
template <typename Seq>
|
||||
void strAppendJoin(std::string& out, StringData separator, const Seq& sequence) {
|
||||
|
|
@ -88,7 +87,7 @@ void throwGraphContainsCycle(Iter first, Iter last, std::vector<std::string>* cy
|
|||
*cycle = names;
|
||||
names.push_back((*first)->name());
|
||||
uasserted(ErrorCodes::GraphContainsCycle,
|
||||
format(FMT_STRING("Cycle in dependency graph: {}"), fmt::join(names, " -> ")));
|
||||
fmt::format("Cycle in dependency graph: {}", fmt::join(names, " -> ")));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
|
@ -123,7 +122,7 @@ std::vector<std::string> DependencyGraph::topSort(unsigned randomSeed,
|
|||
elementsStore.reserve(_nodes.size());
|
||||
for (auto iter = _nodes.begin(); iter != _nodes.end(); ++iter) {
|
||||
uassert(ErrorCodes::BadValue,
|
||||
"node {} was mentioned but never added"_format(iter->first),
|
||||
fmt::format("node {} was mentioned but never added", iter->first),
|
||||
iter->second.payload);
|
||||
elementsStore.push_back(Element{iter});
|
||||
}
|
||||
|
|
@ -141,8 +140,9 @@ std::vector<std::string> DependencyGraph::topSort(unsigned randomSeed,
|
|||
[&](StringData childName) {
|
||||
auto iter = byName.find(childName);
|
||||
uassert(ErrorCodes::BadValue,
|
||||
"node {} depends on missing node {}"_format(
|
||||
element.nodeIter->first, childName),
|
||||
fmt::format("node {} depends on missing node {}",
|
||||
element.nodeIter->first,
|
||||
childName),
|
||||
iter != byName.end());
|
||||
return iter->second;
|
||||
});
|
||||
|
|
|
|||
|
|
@ -128,12 +128,11 @@ void Initializer::_transition(State expected, State next) {
|
|||
if (_lifecycleState != expected)
|
||||
uasserted(
|
||||
ErrorCodes::IllegalOperation,
|
||||
format(
|
||||
FMT_STRING(
|
||||
"Invalid initializer state transition. Expected {} -> {}, but currently at {}"),
|
||||
expected,
|
||||
next,
|
||||
_lifecycleState));
|
||||
fmt::format(
|
||||
"Invalid initializer state transition. Expected {} -> {}, but currently at {}",
|
||||
fmt::underlying(expected),
|
||||
fmt::underlying(next),
|
||||
fmt::underlying(_lifecycleState)));
|
||||
_lifecycleState = next;
|
||||
}
|
||||
|
||||
|
|
@ -175,7 +174,7 @@ void Initializer::executeInitializers(const std::vector<std::string>& args) {
|
|||
continue; // Legacy initializer without re-initialization support.
|
||||
|
||||
uassert(ErrorCodes::InternalError,
|
||||
format(FMT_STRING("node has no init function: \"{}\""), nodeName),
|
||||
fmt::format("node has no init function: \"{}\"", nodeName),
|
||||
node->initFn);
|
||||
node->initFn(&context);
|
||||
|
||||
|
|
@ -216,9 +215,8 @@ InitializerFunction Initializer::getInitializerFunctionForTesting(const std::str
|
|||
}
|
||||
|
||||
unsigned extractRandomSeedFromOptions(const std::vector<std::string>& args) {
|
||||
using namespace fmt::literals;
|
||||
const std::string targetArg{"--initializerShuffleSeed"};
|
||||
const auto errMsg = "Value must be specified for {}"_format(targetArg);
|
||||
const auto errMsg = fmt::format("Value must be specified for {}", targetArg);
|
||||
|
||||
for (size_t i = 0; i < args.size(); i++) {
|
||||
StringData arg = args[i];
|
||||
|
|
|
|||
|
|
@ -47,7 +47,6 @@
|
|||
namespace mongo {
|
||||
namespace {
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
class InitializerTest : public unittest::Test {
|
||||
public:
|
||||
|
|
@ -128,18 +127,19 @@ public:
|
|||
for (auto req : reqs)
|
||||
if (states[req] != kInitialized)
|
||||
uasserted(ErrorCodes::UnknownError,
|
||||
"(init{0}) {1} not already initialized"_format(idx, req));
|
||||
fmt::format("(init{0}) {1} not already initialized", idx, req));
|
||||
states[idx] = kInitialized;
|
||||
}
|
||||
|
||||
void deinitImpl(size_t idx) {
|
||||
if (states[idx] != kInitialized)
|
||||
uasserted(ErrorCodes::UnknownError, "(deinit{0}) {0} not initialized"_format(idx));
|
||||
uasserted(ErrorCodes::UnknownError,
|
||||
fmt::format("(deinit{0}) {0} not initialized", idx));
|
||||
auto deps = graph.dependents()[idx];
|
||||
for (auto dep : deps)
|
||||
if (states[dep] != kDeinitialized)
|
||||
uasserted(ErrorCodes::UnknownError,
|
||||
"(deinit{0}) {1} not already deinitialized"_format(idx, dep));
|
||||
fmt::format("(deinit{0}) {1} not already deinitialized", idx, dep));
|
||||
states[idx] = kDeinitialized;
|
||||
}
|
||||
|
||||
|
|
@ -327,11 +327,11 @@ TEST(RandomSeedTest, RandomSeedParsing) {
|
|||
};
|
||||
const Spec specs[] = {
|
||||
{{"input1", "input2", opt, std::to_string(seed)}, {}, seed},
|
||||
{{"input1", "input2", "{}={}"_format(opt, seed)}, {}, seed},
|
||||
{{"input1", "input2", fmt::format("{}={}", opt, seed)}, {}, seed},
|
||||
{{"input1", "input2", opt}, ErrorCodes::InvalidOptions},
|
||||
{{"input1", "input2", "{}="_format(opt)}, ErrorCodes::FailedToParse},
|
||||
{{"input1", "input2", fmt::format("{}=", opt)}, ErrorCodes::FailedToParse},
|
||||
{{"input1", "input2", opt, "abcd"}, ErrorCodes::FailedToParse},
|
||||
{{"input1", "input2", "{}={}"_format(opt, "abcd")}, ErrorCodes::FailedToParse},
|
||||
{{"input1", "input2", fmt::format("{}={}", opt, "abcd")}, ErrorCodes::FailedToParse},
|
||||
};
|
||||
|
||||
for (auto&& spec : specs) {
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ namespace {
|
|||
|
||||
std::string fmtError(StringData prefix) {
|
||||
auto ec = lastSystemError();
|
||||
return format(FMT_STRING("{}: {}"), prefix, errorMessage(ec));
|
||||
return fmt::format("{}: {}", prefix, errorMessage(ec));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -47,7 +47,6 @@
|
|||
namespace mongo {
|
||||
namespace {
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
static constexpr const char* kReason = "reason";
|
||||
static const std::string& kReasonString = *new std::string{kReason};
|
||||
|
|
@ -56,7 +55,7 @@ static const std::string& kReasonString = *new std::string{kReason};
|
|||
template <typename R>
|
||||
void checkReason(R&& r, std::string expected = kReasonString) {
|
||||
ASSERT_EQUALS(Status(ErrorCodes::MaxError, std::forward<R>(r)).reason(), expected)
|
||||
<< "type {}"_format(demangleName(typeid(decltype(r))));
|
||||
<< fmt::format("type {}", demangleName(typeid(decltype(r))));
|
||||
};
|
||||
|
||||
struct CanString {
|
||||
|
|
|
|||
|
|
@ -418,13 +418,13 @@ constexpr StringData operator"" _sd(const char* c, std::size_t len) {
|
|||
|
||||
namespace fmt {
|
||||
template <>
|
||||
class formatter<mongo::StringData> : formatter<std::string_view> {
|
||||
class formatter<mongo::StringData> : private formatter<std::string_view> {
|
||||
using Base = formatter<std::string_view>;
|
||||
|
||||
public:
|
||||
using Base::parse;
|
||||
template <typename FormatContext>
|
||||
auto format(mongo::StringData s, FormatContext& fc) {
|
||||
|
||||
auto format(mongo::StringData s, auto& fc) const {
|
||||
return Base::format(std::string_view{s}, fc);
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -47,7 +47,6 @@
|
|||
namespace mongo {
|
||||
namespace {
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
TEST(Construction, Empty) {
|
||||
StringData strData;
|
||||
|
|
@ -192,7 +191,7 @@ TEST(Find, Char1) {
|
|||
auto withStdString = s.find(ch, pos);
|
||||
auto withStringData = StringData{s}.find(ch, pos);
|
||||
ASSERT_EQUALS(withStdString, withStringData)
|
||||
<< format(FMT_STRING(R"(s:'{}', ch:'{}', pos:{})"), s, StringData{&ch, 1}, pos);
|
||||
<< fmt::format(R"(s:'{}', ch:'{}', pos:{})", s, StringData{&ch, 1}, pos);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -224,7 +223,7 @@ TEST(Find, Str1) {
|
|||
auto withStdString = s.find(sub, pos);
|
||||
auto withStringData = StringData{s}.find(StringData{sub}, pos);
|
||||
ASSERT_EQUALS(withStdString, withStringData)
|
||||
<< format(FMT_STRING(R"(s:'{}', sub:'{}', pos:{})"), s, sub, pos);
|
||||
<< fmt::format(R"(s:'{}', sub:'{}', pos:{})", s, sub, pos);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -251,12 +250,12 @@ TEST(Hasher, Str1) {
|
|||
};
|
||||
if constexpr (sizeofSizeT == 4) {
|
||||
for (auto&& s : specs)
|
||||
ASSERT_EQUALS(tryHash(s.str), s.h4) << "str={}"_format(s.str);
|
||||
ASSERT_EQUALS(tryHash(s.str), s.h4) << fmt::format("str={}", s.str);
|
||||
} else if constexpr (sizeofSizeT == 8) {
|
||||
for (auto&& s : specs)
|
||||
ASSERT_EQUALS(tryHash(s.str), s.h8) << "str={}"_format(s.str);
|
||||
ASSERT_EQUALS(tryHash(s.str), s.h8) << fmt::format("str={}", s.str);
|
||||
} else {
|
||||
FAIL("sizeT weird size") << " sizeof(size_t) == {}"_format(sizeofSizeT);
|
||||
FAIL("sizeT weird size") << fmt::format("sizeof(size_t) == {}", sizeofSizeT);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -285,7 +284,7 @@ TEST(Rfind, Char1) {
|
|||
auto withStdString = s.rfind(ch, pos);
|
||||
auto withStringData = StringData{s}.rfind(ch, pos);
|
||||
ASSERT_EQUALS(withStdString, withStringData)
|
||||
<< format(FMT_STRING(R"(s:'{}', ch:'{}', pos:{})"), s, StringData{&ch, 1}, pos);
|
||||
<< fmt::format(R"(s:'{}', ch:'{}', pos:{})", s, StringData{&ch, 1}, pos);
|
||||
};
|
||||
// Try all possibly-relevent `pos` arguments.
|
||||
for (size_t pos = 0; pos < s.size() + 2; ++pos)
|
||||
|
|
@ -421,9 +420,7 @@ TEST(ConstIterator, StdReplaceCopy) {
|
|||
}
|
||||
|
||||
TEST(StringDataFmt, Fmt) {
|
||||
using namespace fmt::literals;
|
||||
ASSERT_EQUALS(fmt::format("-{}-", "abc"_sd), "-abc-");
|
||||
ASSERT_EQUALS("-{}-"_format("abc"_sd), "-abc-");
|
||||
}
|
||||
|
||||
TEST(Ostream, StringDataMatchesStdString) {
|
||||
|
|
|
|||
|
|
@ -299,14 +299,14 @@ private:
|
|||
fmt::format(
|
||||
"BSON type '{}' is not supported for Encrypted BSON Value subtype {}",
|
||||
typeName(originalBsonType),
|
||||
encryptedBinDataType),
|
||||
fmt::underlying(encryptedBinDataType)),
|
||||
isFLE2SupportedType(encryptedBinDataType, originalBsonType));
|
||||
break;
|
||||
}
|
||||
default: {
|
||||
uasserted(ErrorCodes::NonConformantBSON,
|
||||
fmt::format("Unsupported Encrypted BSON Value type {} in the collection",
|
||||
encryptedBinDataType));
|
||||
fmt::underlying(encryptedBinDataType)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -113,7 +113,7 @@ BSONObj BSONElement::_jsonStringGenerator(const Generator& g,
|
|||
if (includeSeparator)
|
||||
buffer.push_back(',');
|
||||
if (pretty)
|
||||
fmt::format_to(buffer, "\n{:<{}}", "", (pretty - 1) * 4);
|
||||
fmt::format_to(std::back_inserter(buffer), "\n{:<{}}", "", (pretty - 1) * 4);
|
||||
|
||||
if (includeFieldNames) {
|
||||
g.writePadding(buffer);
|
||||
|
|
@ -766,14 +766,14 @@ void BSONElement::toString(
|
|||
const char* data = binDataClean(len);
|
||||
// If the BinData is a correctly sized newUUID, display it as such.
|
||||
if (binDataType() == newUUID && len == 16) {
|
||||
using namespace fmt::literals;
|
||||
StringData sd(data, len);
|
||||
// 4 Octets - 2 Octets - 2 Octets - 2 Octets - 6 Octets
|
||||
s << "UUID(\"{}-{}-{}-{}-{}\")"_format(hexblob::encodeLower(sd.substr(0, 4)),
|
||||
hexblob::encodeLower(sd.substr(4, 2)),
|
||||
hexblob::encodeLower(sd.substr(6, 2)),
|
||||
hexblob::encodeLower(sd.substr(8, 2)),
|
||||
hexblob::encodeLower(sd.substr(10, 6)));
|
||||
s << fmt::format("UUID(\"{}-{}-{}-{}-{}\")",
|
||||
hexblob::encodeLower(sd.substr(0, 4)),
|
||||
hexblob::encodeLower(sd.substr(4, 2)),
|
||||
hexblob::encodeLower(sd.substr(6, 2)),
|
||||
hexblob::encodeLower(sd.substr(8, 2)),
|
||||
hexblob::encodeLower(sd.substr(10, 6)));
|
||||
break;
|
||||
}
|
||||
s << "BinData(" << binDataType() << ", ";
|
||||
|
|
|
|||
|
|
@ -46,7 +46,6 @@
|
|||
namespace mongo {
|
||||
namespace {
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
TEST(BSONElement, BinDataToString) {
|
||||
BSONObjBuilder builder;
|
||||
|
|
@ -99,7 +98,7 @@ std::string vecStr(std::vector<uint8_t> v) {
|
|||
std::string r = "[";
|
||||
StringData sep;
|
||||
for (const uint8_t& b : v) {
|
||||
r += "{}{:02x}"_format(sep, (unsigned)b);
|
||||
r += fmt::format("{}{:02x}", sep, (unsigned)b);
|
||||
sep = ","_sd;
|
||||
}
|
||||
r += "]";
|
||||
|
|
|
|||
|
|
@ -324,7 +324,7 @@ BSONObj BSONObj::_jsonStringGenerator(const Generator& g,
|
|||
}
|
||||
|
||||
if (pretty)
|
||||
fmt::format_to(buffer, "\n{:<{}}", "", (pretty - 1) * 4);
|
||||
fmt::format_to(std::back_inserter(buffer), "\n{:<{}}", "", (pretty - 1) * 4);
|
||||
buffer.push_back(isArray ? ']' : '}');
|
||||
return truncation;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -44,7 +44,6 @@ namespace mongo {
|
|||
namespace {
|
||||
bool localTimeZoneForDate = false;
|
||||
}
|
||||
using namespace fmt::literals;
|
||||
|
||||
const char kMaxKeyData[] = {7, 0, 0, 0, static_cast<char>(MaxKey), 0, 0};
|
||||
const BSONObj kMaxBSONKey(kMaxKeyData);
|
||||
|
|
@ -138,7 +137,7 @@ boost::optional<BSONType> findBSONTypeAlias(StringData key) {
|
|||
|
||||
BSONType typeFromName(StringData name) {
|
||||
auto typeAlias = findBSONTypeAlias(name);
|
||||
uassert(ErrorCodes::BadValue, "Unknown type name: {}"_format(name), typeAlias);
|
||||
uassert(ErrorCodes::BadValue, fmt::format("Unknown type name: {}", name), typeAlias);
|
||||
return *typeAlias;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -113,6 +113,10 @@ enum BSONType {
|
|||
MaxKey = 127
|
||||
};
|
||||
|
||||
inline auto format_as(BSONType t) {
|
||||
return fmt::underlying(t);
|
||||
}
|
||||
|
||||
/**
|
||||
* Maps from the set of type aliases accepted by the $type query operator to the corresponding BSON
|
||||
* types. Excludes "number", since this alias maps to a set of BSON types.
|
||||
|
|
@ -210,6 +214,10 @@ enum BinDataType {
|
|||
bdtCustom = 128
|
||||
};
|
||||
|
||||
inline auto format_as(BinDataType type) {
|
||||
return fmt::underlying(type);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the name of the BinData Type.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -61,17 +61,18 @@ public:
|
|||
}
|
||||
|
||||
void writeInt32(fmt::memory_buffer& buffer, int32_t val) const {
|
||||
format_to(std::back_inserter(buffer), FMT_COMPILE(R"({{"$numberInt":"{}"}})"), val);
|
||||
fmt::format_to(std::back_inserter(buffer), FMT_COMPILE(R"({{"$numberInt":"{}"}})"), val);
|
||||
}
|
||||
|
||||
void writeInt64(fmt::memory_buffer& buffer, int64_t val) const {
|
||||
format_to(std::back_inserter(buffer), FMT_COMPILE(R"({{"$numberLong":"{}"}})"), val);
|
||||
fmt::format_to(std::back_inserter(buffer), FMT_COMPILE(R"({{"$numberLong":"{}"}})"), val);
|
||||
}
|
||||
|
||||
void writeDouble(fmt::memory_buffer& buffer, double val) const {
|
||||
if (val >= std::numeric_limits<double>::lowest() &&
|
||||
val <= std::numeric_limits<double>::max())
|
||||
format_to(std::back_inserter(buffer), FMT_COMPILE(R"({{"$numberDouble":"{}"}})"), val);
|
||||
fmt::format_to(
|
||||
std::back_inserter(buffer), FMT_COMPILE(R"({{"$numberDouble":"{}"}})"), val);
|
||||
else if (std::isnan(val))
|
||||
appendTo(buffer, R"({"$numberDouble":"NaN"})"_sd);
|
||||
else if (std::isinf(val)) {
|
||||
|
|
@ -90,20 +91,20 @@ public:
|
|||
if (val.isNaN())
|
||||
appendTo(buffer, R"({"$numberDecimal":"NaN"})"_sd);
|
||||
else if (val.isInfinite())
|
||||
format_to(std::back_inserter(buffer),
|
||||
FMT_COMPILE(R"({{"$numberDecimal":"{}"}})"),
|
||||
val.isNegative() ? "-Infinity" : "Infinity");
|
||||
fmt::format_to(std::back_inserter(buffer),
|
||||
FMT_COMPILE(R"({{"$numberDecimal":"{}"}})"),
|
||||
val.isNegative() ? "-Infinity" : "Infinity");
|
||||
else {
|
||||
format_to(std::back_inserter(buffer),
|
||||
FMT_COMPILE(R"({{"$numberDecimal":"{}"}})"),
|
||||
val.toString());
|
||||
fmt::format_to(std::back_inserter(buffer),
|
||||
FMT_COMPILE(R"({{"$numberDecimal":"{}"}})"),
|
||||
val.toString());
|
||||
}
|
||||
}
|
||||
|
||||
void writeDate(fmt::memory_buffer& buffer, Date_t val) const {
|
||||
format_to(std::back_inserter(buffer),
|
||||
FMT_COMPILE(R"({{"$date":{{"$numberLong":"{}"}}}})"),
|
||||
val.toMillisSinceEpoch());
|
||||
fmt::format_to(std::back_inserter(buffer),
|
||||
FMT_COMPILE(R"({{"$date":{{"$numberLong":"{}"}}}})"),
|
||||
val.toMillisSinceEpoch());
|
||||
}
|
||||
|
||||
void writeDBRef(fmt::memory_buffer& buffer, StringData ref, OID id) const {
|
||||
|
|
@ -112,14 +113,14 @@ public:
|
|||
str::escapeForJSON(buffer, ref);
|
||||
|
||||
// OID is a hex string and does not need to be escaped
|
||||
format_to(std::back_inserter(buffer), FMT_COMPILE(R"(","$id":"{}"}})"), id.toString());
|
||||
fmt::format_to(std::back_inserter(buffer), FMT_COMPILE(R"(","$id":"{}"}})"), id.toString());
|
||||
}
|
||||
|
||||
void writeOID(fmt::memory_buffer& buffer, OID val) const {
|
||||
// OID is a hex string and does not need to be escaped
|
||||
static_assert(OID::kOIDSize == 12);
|
||||
const uint8_t* data = reinterpret_cast<const uint8_t*>(val.view().view());
|
||||
format_to(
|
||||
fmt::format_to(
|
||||
std::back_inserter(buffer),
|
||||
FMT_COMPILE(
|
||||
R"({{"$oid":"{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}"}})"),
|
||||
|
|
@ -138,15 +139,15 @@ public:
|
|||
}
|
||||
|
||||
void writeTimestamp(fmt::memory_buffer& buffer, Timestamp val) const {
|
||||
format_to(std::back_inserter(buffer),
|
||||
FMT_COMPILE(R"({{"$timestamp":{{"t":{},"i":{}}}}})"),
|
||||
val.getSecs(),
|
||||
val.getInc());
|
||||
fmt::format_to(std::back_inserter(buffer),
|
||||
FMT_COMPILE(R"({{"$timestamp":{{"t":{},"i":{}}}}})"),
|
||||
val.getSecs(),
|
||||
val.getInc());
|
||||
}
|
||||
|
||||
void writeBinData(fmt::memory_buffer& buffer, StringData data, BinDataType type) const {
|
||||
if (type == newUUID && data.size() == 16) {
|
||||
format_to(
|
||||
fmt::format_to(
|
||||
std::back_inserter(buffer),
|
||||
FMT_COMPILE(
|
||||
R"({{"$uuid":"{:02x}{:02x}{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}-{:02x}{:02x}{:02x}{:02x}{:02x}{:02x}"}})"),
|
||||
|
|
@ -169,9 +170,8 @@ public:
|
|||
} else {
|
||||
appendTo(buffer, R"({"$binary":{"base64":")"_sd);
|
||||
base64::encode(buffer, data);
|
||||
format_to(std::back_inserter(buffer),
|
||||
FMT_COMPILE(R"(","subType":"{:x}"}}}})"),
|
||||
static_cast<std::underlying_type_t<BSONType>>(type));
|
||||
fmt::format_to(
|
||||
std::back_inserter(buffer), FMT_COMPILE(R"(","subType":"{:x}"}}}})"), type);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -64,7 +64,7 @@ public:
|
|||
void writeDouble(fmt::memory_buffer& buffer, double val) const {
|
||||
if (val >= std::numeric_limits<double>::lowest() &&
|
||||
val <= std::numeric_limits<double>::max())
|
||||
format_to(std::back_inserter(buffer), FMT_COMPILE(R"({})"), val);
|
||||
fmt::format_to(std::back_inserter(buffer), FMT_COMPILE(R"({})"), val);
|
||||
else {
|
||||
ExtendedCanonicalV200Generator::writeDouble(buffer, val);
|
||||
}
|
||||
|
|
@ -78,9 +78,9 @@ public:
|
|||
// handles both the case where Date_t::millis is too large, and the case where
|
||||
// Date_t::millis is negative (before the epoch).
|
||||
if (val.isFormattable()) {
|
||||
format_to(std::back_inserter(buffer),
|
||||
FMT_COMPILE(R"({{"$date":"{}"}})"),
|
||||
StringData{DateStringBuffer{}.iso8601(val, _localDate)});
|
||||
fmt::format_to(std::back_inserter(buffer),
|
||||
FMT_COMPILE(R"({{"$date":"{}"}})"),
|
||||
StringData{DateStringBuffer{}.iso8601(val, _localDate)});
|
||||
} else {
|
||||
ExtendedCanonicalV200Generator::writeDate(buffer, val);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ public:
|
|||
}
|
||||
|
||||
void writeString(fmt::memory_buffer& buffer, StringData str) const {
|
||||
fmt::format_to(buffer, R"("{}")", str::escape(str));
|
||||
fmt::format_to(std::back_inserter(buffer), R"("{}")", str::escape(str));
|
||||
}
|
||||
|
||||
void writeSymbol(fmt::memory_buffer& buffer, StringData symbol) const {
|
||||
|
|
@ -58,13 +58,13 @@ public:
|
|||
}
|
||||
|
||||
void writeInt64(fmt::memory_buffer& buffer, int64_t val) const {
|
||||
fmt::format_to(buffer, R"({{ "$numberLong" : "{}" }})", val);
|
||||
fmt::format_to(std::back_inserter(buffer), R"({{ "$numberLong" : "{}" }})", val);
|
||||
}
|
||||
|
||||
void writeDouble(fmt::memory_buffer& buffer, double val) const {
|
||||
if (val >= std::numeric_limits<double>::lowest() &&
|
||||
val <= std::numeric_limits<double>::max())
|
||||
fmt::format_to(buffer, R"({:.16g})", val);
|
||||
fmt::format_to(std::back_inserter(buffer), R"({:.16g})", val);
|
||||
else if (std::isnan(val))
|
||||
appendTo(buffer, "NaN"_sd);
|
||||
else if (std::isinf(val)) {
|
||||
|
|
@ -83,11 +83,12 @@ public:
|
|||
if (val.isNaN())
|
||||
appendTo(buffer, R"({ "$numberDecimal" : "NaN" })"_sd);
|
||||
else if (val.isInfinite())
|
||||
fmt::format_to(buffer,
|
||||
fmt::format_to(std::back_inserter(buffer),
|
||||
R"({{ "$numberDecimal" : "{}" }})",
|
||||
val.isNegative() ? "-Infinity"_sd : "Infinity"_sd);
|
||||
else {
|
||||
fmt::format_to(buffer, R"({{ "$numberDecimal" : "{}" }})", val.toString());
|
||||
fmt::format_to(
|
||||
std::back_inserter(buffer), R"({{ "$numberDecimal" : "{}" }})", val.toString());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -99,46 +100,52 @@ public:
|
|||
// handles both the case where Date_t::millis is too large, and the case where
|
||||
// Date_t::millis is negative (before the epoch).
|
||||
if (val.isFormattable()) {
|
||||
fmt::format_to(buffer, R"({{ "$date" : "{}" }})", dateToISOStringLocal(val));
|
||||
} else {
|
||||
fmt::format_to(
|
||||
buffer, R"({{ "$date" : {{ "$numberLong" : "{}" }} }})", val.toMillisSinceEpoch());
|
||||
std::back_inserter(buffer), R"({{ "$date" : "{}" }})", dateToISOStringLocal(val));
|
||||
} else {
|
||||
fmt::format_to(std::back_inserter(buffer),
|
||||
R"({{ "$date" : {{ "$numberLong" : "{}" }} }})",
|
||||
val.toMillisSinceEpoch());
|
||||
}
|
||||
}
|
||||
|
||||
void writeDBRef(fmt::memory_buffer& buffer, StringData ref, OID id) const {
|
||||
fmt::format_to(buffer, R"({{ "$ref" : "{}", "$id" : "{}" }})", ref, id.toString());
|
||||
fmt::format_to(
|
||||
std::back_inserter(buffer), R"({{ "$ref" : "{}", "$id" : "{}" }})", ref, id.toString());
|
||||
}
|
||||
|
||||
void writeOID(fmt::memory_buffer& buffer, OID val) const {
|
||||
fmt::format_to(buffer, R"({{ "$oid" : "{}" }})", val.toString());
|
||||
fmt::format_to(std::back_inserter(buffer), R"({{ "$oid" : "{}" }})", val.toString());
|
||||
}
|
||||
|
||||
void writeBinData(fmt::memory_buffer& buffer, StringData data, BinDataType type) const {
|
||||
appendTo(buffer, R"({ "$binary" : ")");
|
||||
base64::encode(buffer, data);
|
||||
fmt::format_to(buffer, R"(", "$type" : "{:02x}" }})", type);
|
||||
fmt::format_to(std::back_inserter(buffer), R"(", "$type" : "{:02x}" }})", type);
|
||||
}
|
||||
|
||||
void writeRegex(fmt::memory_buffer& buffer, StringData pattern, StringData options) const {
|
||||
fmt::format_to(
|
||||
buffer, R"({{ "$regex" : "{}", "$options" : "{}" }})", str::escape(pattern), options);
|
||||
fmt::format_to(std::back_inserter(buffer),
|
||||
R"({{ "$regex" : "{}", "$options" : "{}" }})",
|
||||
str::escape(pattern),
|
||||
options);
|
||||
}
|
||||
|
||||
void writeCode(fmt::memory_buffer& buffer, StringData code) const {
|
||||
fmt::format_to(buffer, R"({{ "$code" : "{}" }})", str::escape(code));
|
||||
fmt::format_to(std::back_inserter(buffer), R"({{ "$code" : "{}" }})", str::escape(code));
|
||||
}
|
||||
|
||||
void writeCodeWithScope(fmt::memory_buffer& buffer,
|
||||
StringData code,
|
||||
BSONObj const& scope) const {
|
||||
fmt::format_to(buffer, R"({{ "$code" : "{}", "$scope" : )", str::escape(code));
|
||||
fmt::format_to(
|
||||
std::back_inserter(buffer), R"({{ "$code" : "{}", "$scope" : )", str::escape(code));
|
||||
scope.jsonStringGenerator(*this, 0, false, buffer);
|
||||
appendTo(buffer, R"( })");
|
||||
}
|
||||
|
||||
void writeTimestamp(fmt::memory_buffer& buffer, Timestamp val) const {
|
||||
fmt::format_to(buffer,
|
||||
fmt::format_to(std::back_inserter(buffer),
|
||||
R"({{ "$timestamp" : {{ "t" : {}, "i" : {} }} }})",
|
||||
val.getSecs(),
|
||||
val.getInc());
|
||||
|
|
|
|||
|
|
@ -65,7 +65,6 @@ namespace mongo {
|
|||
using std::ostringstream;
|
||||
using std::string;
|
||||
using std::unique_ptr;
|
||||
using namespace fmt::literals;
|
||||
|
||||
#if 0
|
||||
#define MONGO_JSON_DEBUG(message) \
|
||||
|
|
@ -1588,7 +1587,10 @@ BSONObj fromJsonImpl(const char* jsonString, size_t len) {
|
|||
}
|
||||
|
||||
if (ret != Status::OK()) {
|
||||
uasserted(16619, "code {}: {}: {}"_format(ret.code(), ret.codeString(), ret.reason()));
|
||||
uasserted(
|
||||
16619,
|
||||
fmt::format(
|
||||
"code {}: {}: {}", fmt::underlying(ret.code()), ret.codeString(), ret.reason()));
|
||||
}
|
||||
uassert(ErrorCodes::FailedToParse,
|
||||
"Garbage at end of json string",
|
||||
|
|
|
|||
|
|
@ -251,9 +251,9 @@ public:
|
|||
}
|
||||
|
||||
std::string toString() const override {
|
||||
using namespace fmt::literals;
|
||||
return R"!({{type: "RetryPolicyForCategory",categoryIndex: {}, maxAttempts: {}, maxTimeMS: {}}})!"_format(
|
||||
static_cast<std::underlying_type_t<ErrorCategory>>(kCategory),
|
||||
return fmt::format(
|
||||
R"!({{type: "RetryPolicyForCategory",categoryIndex: {}, maxAttempts: {}, maxTimeMS: {}}})!",
|
||||
fmt::underlying(kCategory),
|
||||
_maximumAttempts,
|
||||
_maximumResponseElapsedTotal.count());
|
||||
}
|
||||
|
|
|
|||
|
|
@ -62,7 +62,6 @@
|
|||
|
||||
namespace mongo {
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
Value coerceValueToRangeIndexTypes(Value val, BSONType fieldType) {
|
||||
BSONType valType = val.getType();
|
||||
|
|
@ -324,7 +323,7 @@ uint32_t getNumberOfBitsInDomain(BSONType fieldType,
|
|||
|
||||
void validateRangeIndex(BSONType fieldType, StringData fieldPath, QueryTypeConfig& query) {
|
||||
uassert(6775201,
|
||||
"Type '{}' is not a supported range indexed type"_format(typeName(fieldType)),
|
||||
fmt::format("Type '{}' is not a supported range indexed type", typeName(fieldType)),
|
||||
isFLE2RangeIndexedSupportedType(fieldType));
|
||||
|
||||
auto& indexMin = query.getMin();
|
||||
|
|
@ -338,14 +337,16 @@ void validateRangeIndex(BSONType fieldType, StringData fieldPath, QueryTypeConfi
|
|||
|
||||
if (indexMin) {
|
||||
uassert(7018200,
|
||||
"Range field type '{}' does not match the min value type '{}'"_format(
|
||||
typeName(fieldType), typeName(indexMin->getType())),
|
||||
fmt::format("Range field type '{}' does not match the min value type '{}'",
|
||||
typeName(fieldType),
|
||||
typeName(indexMin->getType())),
|
||||
fieldType == indexMin->getType());
|
||||
}
|
||||
if (indexMax) {
|
||||
uassert(7018201,
|
||||
"Range field type '{}' does not match the max value type '{}'"_format(
|
||||
typeName(fieldType), typeName(indexMax->getType())),
|
||||
fmt::format("Range field type '{}' does not match the max value type '{}'",
|
||||
typeName(fieldType),
|
||||
typeName(indexMax->getType())),
|
||||
fieldType == indexMax->getType());
|
||||
}
|
||||
if (indexMin && indexMax) {
|
||||
|
|
@ -366,37 +367,43 @@ void validateRangeIndex(BSONType fieldType, StringData fieldPath, QueryTypeConfi
|
|||
if (fieldType == NumberDouble) {
|
||||
auto min = query.getMin()->coerceToDouble();
|
||||
uassert(6966805,
|
||||
"The number of decimal digits for minimum value of field '{}' "
|
||||
"must be less than or equal to precision"_format(fieldPath),
|
||||
fmt::format("The number of decimal digits for minimum value of field '{}' "
|
||||
"must be less than or equal to precision",
|
||||
fieldPath),
|
||||
validateDoublePrecisionRange(min, precision));
|
||||
auto max = query.getMax()->coerceToDouble();
|
||||
uassert(6966806,
|
||||
"The number of decimal digits for maximum value of field '{}' "
|
||||
"must be less than or equal to precision"_format(fieldPath),
|
||||
fmt::format("The number of decimal digits for maximum value of field '{}' "
|
||||
"must be less than or equal to precision",
|
||||
fieldPath),
|
||||
validateDoublePrecisionRange(max, precision));
|
||||
uassert(
|
||||
9157100,
|
||||
"The domain of double values specified by the min, max, and precision "
|
||||
"for field '{}' cannot be represented in fewer than 64 bits"_format(fieldPath),
|
||||
query.getQueryType() == QueryTypeEnum::RangePreviewDeprecated ||
|
||||
canUsePrecisionMode(min, max, precision));
|
||||
uassert(9157100,
|
||||
fmt::format(
|
||||
"The domain of double values specified by the min, max, and precision "
|
||||
"for field '{}' cannot be represented in fewer than 64 bits",
|
||||
fieldPath),
|
||||
query.getQueryType() == QueryTypeEnum::RangePreviewDeprecated ||
|
||||
canUsePrecisionMode(min, max, precision));
|
||||
} else {
|
||||
auto minDecimal = query.getMin()->coerceToDecimal();
|
||||
uassert(6966807,
|
||||
"The number of decimal digits for minimum value of field '{}' "
|
||||
"must be less than or equal to precision"_format(fieldPath),
|
||||
fmt::format("The number of decimal digits for minimum value of field '{}' "
|
||||
"must be less than or equal to precision",
|
||||
fieldPath),
|
||||
validateDecimal128PrecisionRange(minDecimal, precision));
|
||||
auto maxDecimal = query.getMax()->coerceToDecimal();
|
||||
uassert(6966808,
|
||||
"The number of decimal digits for maximum value of field '{}' "
|
||||
"must be less than or equal to precision"_format(fieldPath),
|
||||
fmt::format("The number of decimal digits for maximum value of field '{}' "
|
||||
"must be less than or equal to precision",
|
||||
fieldPath),
|
||||
validateDecimal128PrecisionRange(maxDecimal, precision));
|
||||
uassert(
|
||||
9157101,
|
||||
"The domain of decimal values specified by the min, max, and precision "
|
||||
"for field '{}' cannot be represented in fewer than 128 bits"_format(fieldPath),
|
||||
query.getQueryType() == QueryTypeEnum::RangePreviewDeprecated ||
|
||||
canUsePrecisionMode(minDecimal, maxDecimal, precision));
|
||||
uassert(9157101,
|
||||
fmt::format(
|
||||
"The domain of decimal values specified by the min, max, and precision "
|
||||
"for field '{}' cannot be represented in fewer than 128 bits",
|
||||
fieldPath),
|
||||
query.getQueryType() == QueryTypeEnum::RangePreviewDeprecated ||
|
||||
canUsePrecisionMode(minDecimal, maxDecimal, precision));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -431,10 +438,12 @@ void validateTextSearchIndex(BSONType fieldType,
|
|||
boost::optional<bool> previousCaseSensitivity,
|
||||
boost::optional<bool> previousDiacriticSensitivity,
|
||||
boost::optional<std::int64_t> previousContention) {
|
||||
uassert(9783400,
|
||||
"Type '{}' is not a supported type for text search indexed encrypted field {}"_format(
|
||||
typeName(fieldType), fieldPath),
|
||||
fieldType == BSONType::String);
|
||||
uassert(
|
||||
9783400,
|
||||
fmt::format("Type '{}' is not a supported type for text search indexed encrypted field {}",
|
||||
typeName(fieldType),
|
||||
fieldPath),
|
||||
fieldType == BSONType::String);
|
||||
auto qTypeStr = QueryType_serializer(query.getQueryType());
|
||||
|
||||
uassert(9783401,
|
||||
|
|
@ -442,20 +451,24 @@ void validateTextSearchIndex(BSONType fieldType,
|
|||
isTextSearchQueryType(query.getQueryType()));
|
||||
|
||||
uassert(9783402,
|
||||
"strMinQueryLength parameter is required for {} query type of field {}"_format(
|
||||
qTypeStr, fieldPath),
|
||||
fmt::format("strMinQueryLength parameter is required for {} query type of field {}",
|
||||
qTypeStr,
|
||||
fieldPath),
|
||||
query.getStrMinQueryLength().has_value());
|
||||
uassert(9783403,
|
||||
"strMaxQueryLength parameter is required for {} query type of field {}"_format(
|
||||
qTypeStr, fieldPath),
|
||||
fmt::format("strMaxQueryLength parameter is required for {} query type of field {}",
|
||||
qTypeStr,
|
||||
fieldPath),
|
||||
query.getStrMaxQueryLength().has_value());
|
||||
uassert(9783404,
|
||||
"caseSensitive parameter is required for {} query type of field {}"_format(qTypeStr,
|
||||
fieldPath),
|
||||
fmt::format("caseSensitive parameter is required for {} query type of field {}",
|
||||
qTypeStr,
|
||||
fieldPath),
|
||||
query.getCaseSensitive().has_value());
|
||||
uassert(9783405,
|
||||
"diacriticSensitive parameter is required for {} query type of field {}"_format(
|
||||
qTypeStr, fieldPath),
|
||||
fmt::format("diacriticSensitive parameter is required for {} query type of field {}",
|
||||
qTypeStr,
|
||||
fieldPath),
|
||||
query.getDiacriticSensitive().has_value());
|
||||
uassert(9783406,
|
||||
"strMinQueryLength cannot be greater than strMaxQueryLength",
|
||||
|
|
@ -463,8 +476,9 @@ void validateTextSearchIndex(BSONType fieldType,
|
|||
|
||||
if (query.getQueryType() == QueryTypeEnum::SubstringPreview) {
|
||||
uassert(9783407,
|
||||
"strMaxLength parameter is required for {} query type of field {}"_format(
|
||||
qTypeStr, fieldPath),
|
||||
fmt::format("strMaxLength parameter is required for {} query type of field {}",
|
||||
qTypeStr,
|
||||
fieldPath),
|
||||
query.getStrMaxLength().has_value());
|
||||
uassert(9783408,
|
||||
"strMaxQueryLength cannot be greater than strMaxLength",
|
||||
|
|
@ -473,21 +487,24 @@ void validateTextSearchIndex(BSONType fieldType,
|
|||
|
||||
if (previousCaseSensitivity.has_value() &&
|
||||
query.getCaseSensitive().value() != *previousCaseSensitivity) {
|
||||
uasserted(9783409,
|
||||
"caseSensitive parameter must be the same for all query types of field {}"_format(
|
||||
fieldPath));
|
||||
uasserted(
|
||||
9783409,
|
||||
fmt::format("caseSensitive parameter must be the same for all query types of field {}",
|
||||
fieldPath));
|
||||
}
|
||||
if (previousDiacriticSensitivity.has_value() &&
|
||||
query.getDiacriticSensitive().value() != *previousDiacriticSensitivity) {
|
||||
uasserted(
|
||||
9783410,
|
||||
"diacriticSensitive parameter must be the same for all query types of field {}"_format(
|
||||
fmt::format(
|
||||
"diacriticSensitive parameter must be the same for all query types of field {}",
|
||||
fieldPath));
|
||||
}
|
||||
if (previousContention.has_value() && query.getContention() != *previousContention) {
|
||||
uasserted(9783411,
|
||||
"contention parameter must be the same for all query types of field {}"_format(
|
||||
fieldPath));
|
||||
uasserted(
|
||||
9783411,
|
||||
fmt::format("contention parameter must be the same for all query types of field {}",
|
||||
fieldPath));
|
||||
};
|
||||
}
|
||||
|
||||
|
|
@ -519,9 +536,9 @@ void validateEncryptedField(const EncryptedField* field) {
|
|||
auto qtype1 = queryTypeConfigs.front().getQueryType();
|
||||
auto qtype2 = queryTypeConfigs.back().getQueryType();
|
||||
uassert(9783414,
|
||||
"Multiple query types may only include the {} and {} query types"_format(
|
||||
QueryType_serializer(QueryTypeEnum::SuffixPreview),
|
||||
QueryType_serializer(QueryTypeEnum::PrefixPreview)),
|
||||
fmt::format("Multiple query types may only include the {} and {} query types",
|
||||
QueryType_serializer(QueryTypeEnum::SuffixPreview),
|
||||
QueryType_serializer(QueryTypeEnum::PrefixPreview)),
|
||||
(qtype1 == QueryTypeEnum::SuffixPreview &&
|
||||
qtype2 == QueryTypeEnum::PrefixPreview) ||
|
||||
(qtype2 == QueryTypeEnum::SuffixPreview &&
|
||||
|
|
|
|||
|
|
@ -133,7 +133,6 @@ extern "C" {
|
|||
static_assert(kDebugBuild == 1, "Only use in debug builds");
|
||||
#endif
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
namespace mongo {
|
||||
|
||||
|
|
@ -362,10 +361,10 @@ void toEncryptedBinDataPretyped(StringData field,
|
|||
uassert(9784114, "Input buffer of encrypted data cannot be empty", cdr.length() > 0);
|
||||
auto dtAsNum = static_cast<uint8_t>(dt);
|
||||
auto firstByte = static_cast<uint8_t>(cdr.data()[0]);
|
||||
uassert(
|
||||
9588900,
|
||||
"Expected buffer to begin with type tag {}, but began with {}"_format(dtAsNum, firstByte),
|
||||
firstByte == dtAsNum);
|
||||
uassert(9588900,
|
||||
fmt::format(
|
||||
"Expected buffer to begin with type tag {}, but began with {}", dtAsNum, firstByte),
|
||||
firstByte == dtAsNum);
|
||||
|
||||
builder->appendBinData(field, cdr.length(), BinDataType::Encrypt, cdr.data());
|
||||
}
|
||||
|
|
@ -2045,7 +2044,7 @@ StateCollectionTokensV2 StateCollectionTokensV2::Encrypted::decrypt(const ECOCTo
|
|||
if (expectLeaf) {
|
||||
auto leaf = cdrc.readAndAdvance<uint8_t>();
|
||||
uassert(ErrorCodes::BadValue,
|
||||
"Invalid value for ESCTokensV2 leaf tag {}"_format(leaf),
|
||||
fmt::format("Invalid value for ESCTokensV2 leaf tag {}", leaf),
|
||||
(leaf == 0) || (leaf == 1));
|
||||
|
||||
isLeaf = !!leaf;
|
||||
|
|
@ -2360,7 +2359,7 @@ BSONObj ESCCollectionAnchorPadding::generatePaddingDocument(
|
|||
toBinData(kId, block, &builder);
|
||||
toBinData(kValue, cipherText, &builder);
|
||||
#ifdef FLE2_DEBUG_STATE_COLLECTIONS
|
||||
builder.append(kDebugId, "NULL DOC({})"_format(id));
|
||||
builder.append(kDebugId, fmt::format("NULL DOC({})", id));
|
||||
builder.append(kDebugValuePosition, 0);
|
||||
builder.append(kDebugValueCount, 0);
|
||||
#endif
|
||||
|
|
@ -3417,8 +3416,9 @@ FLE2IndexedTextEncryptedValue::FLE2IndexedTextEncryptedValue(ConstDataRange toPa
|
|||
mc_FLE2IndexedEncryptedValueV2_parse(_value.get(), buf.get(), status);
|
||||
uassertStatusOK(status.toStatus());
|
||||
uassert(9784115,
|
||||
"Expected buffer to begin with type tag {}, but began with {}"_format(kFLE2IEVTypeText,
|
||||
_value->type),
|
||||
fmt::format("Expected buffer to begin with type tag {}, but began with {}",
|
||||
fmt::underlying(kFLE2IEVTypeText),
|
||||
fmt::underlying(_value->type)),
|
||||
_value->type == kFLE2IEVTypeText);
|
||||
}
|
||||
|
||||
|
|
@ -4134,10 +4134,11 @@ std::vector<CompactionToken> CompactionHelpers::parseCompactionTokens(BSONObj co
|
|||
std::move(fieldName), doc.getECOCToken(), doc.getAnchorPaddingToken()};
|
||||
}
|
||||
|
||||
uasserted(
|
||||
6346801,
|
||||
"Field '{}' of compaction tokens must be a BinData(General) or Object, got '{}'"_format(
|
||||
fieldName, typeName(token.type())));
|
||||
uasserted(6346801,
|
||||
fmt::format("Field '{}' of compaction tokens must be a BinData(General) or "
|
||||
"Object, got '{}'",
|
||||
fieldName,
|
||||
typeName(token.type())));
|
||||
});
|
||||
return parsed;
|
||||
}
|
||||
|
|
@ -4208,16 +4209,16 @@ bool hasQueryType(const EncryptedFieldConfig& config, QueryTypeEnum queryType) {
|
|||
|
||||
QueryTypeConfig getQueryType(const EncryptedField& field, QueryTypeEnum queryType) {
|
||||
uassert(8574703,
|
||||
"Field '{}' is missing a QueryTypeConfig"_format(field.getPath()),
|
||||
fmt::format("Field '{}' is missing a QueryTypeConfig", field.getPath()),
|
||||
field.getQueries());
|
||||
|
||||
return visit(OverloadedVisitor{
|
||||
[&](QueryTypeConfig query) {
|
||||
uassert(8574704,
|
||||
"Field '{}' should be of type '{}', got '{}'"_format(
|
||||
field.getPath(),
|
||||
QueryType_serializer(queryType),
|
||||
QueryType_serializer(query.getQueryType())),
|
||||
fmt::format("Field '{}' should be of type '{}', got '{}'",
|
||||
field.getPath(),
|
||||
QueryType_serializer(queryType),
|
||||
QueryType_serializer(query.getQueryType())),
|
||||
query.getQueryType() == queryType);
|
||||
return query;
|
||||
},
|
||||
|
|
@ -4227,9 +4228,11 @@ QueryTypeConfig getQueryType(const EncryptedField& field, QueryTypeEnum queryTyp
|
|||
return query;
|
||||
}
|
||||
}
|
||||
uasserted(8674705,
|
||||
"Field '{}' should be of type '{}', but no configs match"_format(
|
||||
field.getPath(), QueryType_serializer(queryType)));
|
||||
uasserted(
|
||||
8674705,
|
||||
fmt::format("Field '{}' should be of type '{}', but no configs match",
|
||||
field.getPath(),
|
||||
QueryType_serializer(queryType)));
|
||||
}},
|
||||
field.getQueries().get());
|
||||
}
|
||||
|
|
@ -4424,7 +4427,8 @@ std::uint64_t getEdgesLength(BSONType fieldType, StringData fieldPath, QueryType
|
|||
->size();
|
||||
}
|
||||
default:
|
||||
uasserted(8674710, "Invalid queryTypeConfig.type '{}'"_format(typeName(fieldType)));
|
||||
uasserted(8674710,
|
||||
fmt::format("Invalid queryTypeConfig.type '{}'", typeName(fieldType)));
|
||||
}
|
||||
|
||||
MONGO_UNREACHABLE;
|
||||
|
|
|
|||
|
|
@ -66,12 +66,12 @@ struct FLEKey {
|
|||
FLEKey() = default;
|
||||
|
||||
FLEKey(KeyMaterial dataIn) : data(std::move(dataIn)) {
|
||||
using namespace fmt::literals;
|
||||
|
||||
// This is not a mistake; same keys will be used in FLE2 as in FLE1
|
||||
uassert(6364500,
|
||||
"Length of KeyMaterial is expected to be {} bytes, found {}"_format(
|
||||
kFieldLevelEncryptionKeySize, data->size()),
|
||||
fmt::format("Length of KeyMaterial is expected to be {} bytes, found {}",
|
||||
kFieldLevelEncryptionKeySize,
|
||||
data->size()),
|
||||
data->size() == kFieldLevelEncryptionKeySize);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -404,12 +404,13 @@ public:
|
|||
static constexpr std::size_t kCipherLengthESCAndLeafFlag = kCipherLengthESCOnly + 1;
|
||||
|
||||
static void assertLength(std::size_t sz) {
|
||||
using namespace fmt::literals;
|
||||
uassert(
|
||||
ErrorCodes::BadValue,
|
||||
"Invalid length for EncryptedStateCollectionTokensV2, expected {} or {}, got {}"_format(
|
||||
kCipherLengthESCOnly, kCipherLengthESCAndLeafFlag, sz),
|
||||
(sz == kCipherLengthESCOnly) || (sz == kCipherLengthESCAndLeafFlag));
|
||||
uassert(ErrorCodes::BadValue,
|
||||
fmt::format("Invalid length for EncryptedStateCollectionTokensV2, expected {} "
|
||||
"or {}, got {}",
|
||||
kCipherLengthESCOnly,
|
||||
kCipherLengthESCAndLeafFlag,
|
||||
sz),
|
||||
(sz == kCipherLengthESCOnly) || (sz == kCipherLengthESCAndLeafFlag));
|
||||
}
|
||||
|
||||
std::vector<std::uint8_t> _encryptedTokens;
|
||||
|
|
|
|||
|
|
@ -47,7 +47,6 @@
|
|||
|
||||
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
namespace mongo::crypto {
|
||||
namespace {
|
||||
|
|
|
|||
|
|
@ -39,7 +39,6 @@ extern "C" {
|
|||
#include <mongocrypt-buffer-private.h>
|
||||
}
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
namespace mongo {
|
||||
|
||||
|
|
@ -138,8 +137,7 @@ LibBSONValue::LibBSONValue(const BSONElement& elem) : LibBSONValue() {
|
|||
break;
|
||||
}
|
||||
default:
|
||||
uasserted(ErrorCodes::BadValue,
|
||||
"Unknown BSON value type {}"_format(static_cast<int>(elem.type())));
|
||||
uasserted(ErrorCodes::BadValue, fmt::format("Unknown BSON value type {}", elem.type()));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -246,7 +244,7 @@ void doSerialize(const bson_value_t& value,
|
|||
break;
|
||||
default:
|
||||
uasserted(ErrorCodes::BadValue,
|
||||
"Unknown BSON value type {}"_format(static_cast<int>(value.value_type)));
|
||||
fmt::format("Unknown BSON value type {}", fmt::underlying(value.value_type)));
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
|
|
|||
|
|
@ -63,8 +63,6 @@ std::string MongoCryptStatus::reason() const {
|
|||
}
|
||||
|
||||
Status MongoCryptStatus::toStatus() const {
|
||||
using namespace fmt::literals;
|
||||
|
||||
StringData errorPrefix;
|
||||
switch (mongocrypt_status_type(_status)) {
|
||||
case MONGOCRYPT_STATUS_OK:
|
||||
|
|
@ -83,7 +81,7 @@ Status MongoCryptStatus::toStatus() const {
|
|||
break;
|
||||
}
|
||||
|
||||
return Status(ErrorCodes::LibmongocryptError, "{}: {}"_format(errorPrefix, reason()));
|
||||
return Status(ErrorCodes::LibmongocryptError, fmt::format("{}: {}", errorPrefix, reason()));
|
||||
}
|
||||
|
||||
} // namespace mongo
|
||||
|
|
|
|||
|
|
@ -99,13 +99,12 @@ public:
|
|||
* satisfies this restriction set.
|
||||
*/
|
||||
Status validate(const RestrictionEnvironment& environment) const override {
|
||||
using namespace fmt::literals;
|
||||
|
||||
auto const addr = T::addr(environment);
|
||||
if (addr.getType() == AF_UNSPEC) {
|
||||
// GRPCTransportLayer doesn't know server local address.
|
||||
return {ErrorCodes::AuthenticationRestrictionUnmet,
|
||||
"{} restriction can not be verified when address is unknown"_format(T::label)};
|
||||
fmt::format("{} restriction can not be verified when address is unknown",
|
||||
T::label)};
|
||||
}
|
||||
|
||||
if (!addr.isIP()) {
|
||||
|
|
|
|||
|
|
@ -81,7 +81,6 @@
|
|||
|
||||
|
||||
namespace mongo::auth {
|
||||
using namespace fmt::literals;
|
||||
|
||||
using ResolvedRoleData = AuthorizationBackendInterface::ResolvedRoleData;
|
||||
using ResolveRoleOption = AuthorizationBackendInterface::ResolveRoleOption;
|
||||
|
|
@ -386,8 +385,8 @@ StatusWith<ResolvedRoleData> AuthorizationBackendLocal::resolveRoles(
|
|||
for (const auto& privElem : elem.Obj()) {
|
||||
if (privElem.type() != Object) {
|
||||
return {ErrorCodes::UnsupportedFormat,
|
||||
"Expected privilege document as object, got {}"_format(
|
||||
typeName(privElem.type()))};
|
||||
fmt::format("Expected privilege document as object, got {}",
|
||||
typeName(privElem.type()))};
|
||||
}
|
||||
auto pp = auth::ParsedPrivilege::parse(idlctx, privElem.Obj());
|
||||
Privilege::addPrivilegeToPrivilegeVector(
|
||||
|
|
|
|||
|
|
@ -74,7 +74,6 @@
|
|||
|
||||
|
||||
namespace mongo {
|
||||
using namespace fmt::literals;
|
||||
|
||||
using std::vector;
|
||||
using ResolveRoleOption = AuthzManagerExternalStateLocal::ResolveRoleOption;
|
||||
|
|
|
|||
|
|
@ -42,10 +42,6 @@
|
|||
|
||||
namespace mongo {
|
||||
|
||||
namespace {
|
||||
using namespace fmt::literals;
|
||||
} // namespace
|
||||
|
||||
OAuthAuthorizationServerMetadata OAuthDiscoveryFactory::acquire(StringData issuer) {
|
||||
// RFC8414 declares that the well-known addresses defined by OpenID Connect are valid for
|
||||
// compliant clients for legacy purposes. Newer clients should use
|
||||
|
|
@ -59,7 +55,7 @@ OAuthAuthorizationServerMetadata OAuthDiscoveryFactory::acquire(StringData issue
|
|||
issuer.remove_suffix(1);
|
||||
}
|
||||
|
||||
auto openIDConfiguationEndpoint = "{}/.well-known/openid-configuration"_format(issuer);
|
||||
auto openIDConfiguationEndpoint = fmt::format("{}/.well-known/openid-configuration", issuer);
|
||||
|
||||
DataBuilder results = _client->get(openIDConfiguationEndpoint);
|
||||
StringData textResult =
|
||||
|
|
|
|||
|
|
@ -50,11 +50,11 @@
|
|||
#include "mongo/util/assert_util.h"
|
||||
|
||||
namespace mongo {
|
||||
using namespace fmt::literals;
|
||||
namespace {
|
||||
void uassertNoConflict(StringData resource, StringData found, bool cond) {
|
||||
uassert(
|
||||
ErrorCodes::BadValue, "{} conflicts with resource type '{}'"_format(resource, found), cond);
|
||||
uassert(ErrorCodes::BadValue,
|
||||
fmt::format("{} conflicts with resource type '{}'", resource, found),
|
||||
cond);
|
||||
}
|
||||
|
||||
bool isValidCollectionName(StringData db, StringData coll) {
|
||||
|
|
@ -139,7 +139,7 @@ Privilege Privilege::resolvePrivilegeWithTenant(const boost::optional<TenantId>&
|
|||
auto db = rsrc.getDb().get();
|
||||
auto coll = rsrc.getCollection().get();
|
||||
uassert(ErrorCodes::BadValue,
|
||||
"'{}' is not a valid collection name"_format(coll),
|
||||
fmt::format("'{}' is not a valid collection name", coll),
|
||||
coll.empty() || isValidCollectionName(db, coll));
|
||||
|
||||
if (db.empty() && coll.empty()) {
|
||||
|
|
@ -291,9 +291,9 @@ auth::ParsedPrivilege Privilege::toParsedPrivilege() const {
|
|||
break;
|
||||
|
||||
default:
|
||||
uasserted(
|
||||
ErrorCodes::InvalidOptions,
|
||||
"{} is not a valid user-grantable resource pattern"_format(_resource.toString()));
|
||||
uasserted(ErrorCodes::InvalidOptions,
|
||||
fmt::format("{} is not a valid user-grantable resource pattern",
|
||||
_resource.toString()));
|
||||
}
|
||||
|
||||
pp.setResource(rsrc);
|
||||
|
|
|
|||
|
|
@ -143,10 +143,10 @@ public:
|
|||
* "<dbName>.system.buckets.<collectionName>"
|
||||
*/
|
||||
static ResourcePattern forExactSystemBucketsCollection(const NamespaceString& nss) {
|
||||
using namespace fmt::literals;
|
||||
uassert(ErrorCodes::InvalidNamespace,
|
||||
"Invalid namespace '{}.system.buckets.{}'"_format(
|
||||
nss.dbName().toStringForErrorMsg(), nss.coll()),
|
||||
fmt::format("Invalid namespace '{}.system.buckets.{}'",
|
||||
nss.dbName().toStringForErrorMsg(),
|
||||
nss.coll()),
|
||||
!nss.coll().startsWith("system.buckets."));
|
||||
return ResourcePattern(MatchTypeEnum::kMatchExactSystemBucketResource, nss);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -65,7 +65,6 @@
|
|||
namespace mongo::auth {
|
||||
|
||||
namespace {
|
||||
using namespace fmt::literals;
|
||||
|
||||
constexpr auto kX509Str = "x509"_sd;
|
||||
constexpr auto kX509Subject = "C=US,ST=New York,L=New York City,O=MongoDB,OU=Kernel,CN=client"_sd;
|
||||
|
|
@ -74,11 +73,11 @@ constexpr auto kX509UTF8String = 12;
|
|||
BSONObj generateX509UserDocument(const StringData username) {
|
||||
const auto database = "$external"_sd;
|
||||
|
||||
return BSON("_id"
|
||||
<< "{}.{}"_format(database, username) << AuthorizationManager::USER_NAME_FIELD_NAME
|
||||
<< username << AuthorizationManager::USER_DB_FIELD_NAME << database << "roles"
|
||||
<< BSONArray() << "privileges" << BSONArray() << "credentials"
|
||||
<< BSON("external" << true));
|
||||
return BSON("_id" << fmt::format("{}.{}", database, username)
|
||||
<< AuthorizationManager::USER_NAME_FIELD_NAME << username
|
||||
<< AuthorizationManager::USER_DB_FIELD_NAME << database << "roles"
|
||||
<< BSONArray() << "privileges" << BSONArray() << "credentials"
|
||||
<< BSON("external" << true));
|
||||
}
|
||||
|
||||
// Construct a simple, structured X509 name equivalent to "CN=mongodb.com"
|
||||
|
|
|
|||
|
|
@ -55,7 +55,6 @@
|
|||
namespace mongo::auth {
|
||||
|
||||
namespace {
|
||||
using namespace fmt::literals;
|
||||
|
||||
// Signed auth tokens are for internal testing only, and require the use of a preshared key.
|
||||
// These tokens will have fixed values for kid/iss/aud fields.
|
||||
|
|
@ -147,7 +146,7 @@ ParsedTokenView parseSignedToken(StringData token) {
|
|||
|
||||
BSONObj decodeJSON(StringData b64) try { return fromjson(base64url::decode(b64)); } catch (...) {
|
||||
auto status = exceptionToStatus();
|
||||
uasserted(status.code(), "Unable to parse security token: {}"_format(status.reason()));
|
||||
uasserted(status.code(), fmt::format("Unable to parse security token: {}", status.reason()));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
|
@ -158,9 +157,10 @@ ValidatedTenancyScope ValidatedTenancyScopeFactory::parseUnsignedToken(Client* c
|
|||
const auto parsed = parseSignedToken(securityToken);
|
||||
|
||||
auto header = crypto::JWSHeader::parse(ctxt, decodeJSON(parsed.header));
|
||||
uassert(ErrorCodes::InvalidJWT,
|
||||
"Unexpected algorithm '{}' for unsigned security token"_format(header.getAlgorithm()),
|
||||
header.getAlgorithm() == "none");
|
||||
uassert(
|
||||
ErrorCodes::InvalidJWT,
|
||||
fmt::format("Unexpected algorithm '{}' for unsigned security token", header.getAlgorithm()),
|
||||
header.getAlgorithm() == "none");
|
||||
|
||||
uassert(ErrorCodes::InvalidJWT,
|
||||
"Unexpected signature on unsigned security token",
|
||||
|
|
@ -225,16 +225,16 @@ ValidatedTenancyScope ValidatedTenancyScopeFactory::parseToken(Client* client,
|
|||
// These signed tokens are used exclusively by internal testing,
|
||||
// and should not ever have different values than what we create.
|
||||
uassert(ErrorCodes::BadValue,
|
||||
"Security token must use kid == '{}'"_format(kTestOnlyKeyId),
|
||||
fmt::format("Security token must use kid == '{}'", kTestOnlyKeyId),
|
||||
header.getKeyId() == kTestOnlyKeyId);
|
||||
uassert(ErrorCodes::BadValue,
|
||||
"Security token must use iss == '{}'"_format(kTestOnlyIssuer),
|
||||
fmt::format("Security token must use iss == '{}'", kTestOnlyIssuer),
|
||||
jwt.getIssuer() == kTestOnlyIssuer);
|
||||
uassert(ErrorCodes::BadValue,
|
||||
"Security token must use aud == '{}'"_format(kTestOnlyAudience),
|
||||
fmt::format("Security token must use aud == '{}'", kTestOnlyAudience),
|
||||
holds_alternative<std::string>(jwt.getAudience()));
|
||||
uassert(ErrorCodes::BadValue,
|
||||
"Security token must use aud == '{}'"_format(kTestOnlyAudience),
|
||||
fmt::format("Security token must use aud == '{}'", kTestOnlyAudience),
|
||||
std::get<std::string>(jwt.getAudience()) == kTestOnlyAudience);
|
||||
|
||||
auto swUserName = UserName::parse(jwt.getSubject(), jwt.getTenantId());
|
||||
|
|
@ -294,8 +294,9 @@ ValidatedTenancyScope ValidatedTenancyScopeFactory::create(
|
|||
body.setExpiration(std::move(expiration));
|
||||
body.setExpectPrefix(protocol == ValidatedTenancyScope::TenantProtocol::kAtlasProxy);
|
||||
|
||||
std::string payload = "{}.{}"_format(base64url::encode(tojson(header.toBSON())),
|
||||
base64url::encode(tojson(body.toBSON())));
|
||||
std::string payload = fmt::format("{}.{}",
|
||||
base64url::encode(tojson(header.toBSON())),
|
||||
base64url::encode(tojson(body.toBSON())));
|
||||
|
||||
auto computed =
|
||||
SHA256Block::computeHmac(reinterpret_cast<const std::uint8_t*>(secret.rawData()),
|
||||
|
|
@ -304,9 +305,10 @@ ValidatedTenancyScope ValidatedTenancyScopeFactory::create(
|
|||
payload.size());
|
||||
|
||||
const std::string originalToken =
|
||||
"{}.{}"_format(payload,
|
||||
base64url::encode(StringData(reinterpret_cast<const char*>(computed.data()),
|
||||
computed.size())));
|
||||
fmt::format("{}.{}",
|
||||
payload,
|
||||
base64url::encode(StringData(reinterpret_cast<const char*>(computed.data()),
|
||||
computed.size())));
|
||||
|
||||
if (gTestOnlyValidatedTenancyScopeKey == secret) {
|
||||
return ValidatedTenancyScope(userName, originalToken, body.getExpiration(), protocol);
|
||||
|
|
@ -330,8 +332,9 @@ ValidatedTenancyScope ValidatedTenancyScopeFactory::create(
|
|||
body.setExpiration(Date_t::max());
|
||||
body.setExpectPrefix(protocol == ValidatedTenancyScope::TenantProtocol::kAtlasProxy);
|
||||
|
||||
const std::string originalToken = "{}.{}."_format(base64url::encode(tojson(header.toBSON())),
|
||||
base64url::encode(tojson(body.toBSON())));
|
||||
const std::string originalToken = fmt::format("{}.{}.",
|
||||
base64url::encode(tojson(header.toBSON())),
|
||||
base64url::encode(tojson(body.toBSON())));
|
||||
return ValidatedTenancyScope(originalToken, std::move(tenant), protocol);
|
||||
}
|
||||
|
||||
|
|
@ -347,15 +350,16 @@ ValidatedTenancyScope ValidatedTenancyScopeFactory::create(TenantId tenant,
|
|||
header.setKeyId("none"_sd);
|
||||
|
||||
crypto::JWT body;
|
||||
body.setIssuer("mongodb://{}"_format(prettyHostNameAndPort(serverGlobalParams.port)));
|
||||
body.setIssuer(fmt::format("mongodb://{}", prettyHostNameAndPort(serverGlobalParams.port)));
|
||||
body.setSubject(".");
|
||||
body.setAudience(std::string{"interal-request"});
|
||||
body.setTenantId(tenant);
|
||||
body.setExpiration(Date_t::max());
|
||||
body.setExpectPrefix(false); // Always use default protocol, not expect prefix.
|
||||
|
||||
const std::string originalToken = "{}.{}."_format(base64url::encode(tojson(header.toBSON())),
|
||||
base64url::encode(tojson(body.toBSON())));
|
||||
const std::string originalToken = fmt::format("{}.{}.",
|
||||
base64url::encode(tojson(header.toBSON())),
|
||||
base64url::encode(tojson(body.toBSON())));
|
||||
return ValidatedTenancyScope(
|
||||
originalToken, std::move(tenant), ValidatedTenancyScope::TenantProtocol::kDefault);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -49,7 +49,6 @@
|
|||
#include "mongo/util/version/releases.h"
|
||||
|
||||
namespace mongo {
|
||||
using namespace fmt::literals;
|
||||
using unittest::assertGet;
|
||||
|
||||
void checkRoundTrip(const CollectionOptions& options) {
|
||||
|
|
|
|||
|
|
@ -521,7 +521,6 @@ TEST_F(CreateVirtualCollectionTest, VirtualCollectionOptionsWithMultiSource) {
|
|||
}
|
||||
|
||||
TEST_F(CreateVirtualCollectionTest, InvalidVirtualCollectionOptions) {
|
||||
using namespace fmt::literals;
|
||||
|
||||
NamespaceString vcollNss = NamespaceString::createNamespaceString_forTest("myDb", "vcoll.name");
|
||||
auto opCtx = makeOpCtx();
|
||||
|
|
@ -540,7 +539,7 @@ TEST_F(CreateVirtualCollectionTest, InvalidVirtualCollectionOptions) {
|
|||
}
|
||||
|
||||
ASSERT_TRUE(exceptionOccurred)
|
||||
<< "Invalid 'url': {} must fail but succeeded"_format(kInvalidUrl);
|
||||
<< fmt::format("Invalid 'url': {} must fail but succeeded", kInvalidUrl);
|
||||
}
|
||||
|
||||
{
|
||||
|
|
@ -555,8 +554,8 @@ TEST_F(CreateVirtualCollectionTest, InvalidVirtualCollectionOptions) {
|
|||
}
|
||||
|
||||
ASSERT_TRUE(exceptionOccurred)
|
||||
<< "Unknown 'storageType': {} must fail but succeeded"_format(
|
||||
stdx::to_underlying(kInvalidStorageTypeEnum));
|
||||
<< fmt::format("Unknown 'storageType': {} must fail but succeeded",
|
||||
stdx::to_underlying(kInvalidStorageTypeEnum));
|
||||
}
|
||||
|
||||
{
|
||||
|
|
@ -570,8 +569,9 @@ TEST_F(CreateVirtualCollectionTest, InvalidVirtualCollectionOptions) {
|
|||
exceptionOccurred = true;
|
||||
}
|
||||
|
||||
ASSERT_TRUE(exceptionOccurred) << "Unknown 'fileType': {} must fail but succeeded"_format(
|
||||
stdx::to_underlying(kInvalidFileTypeEnum));
|
||||
ASSERT_TRUE(exceptionOccurred)
|
||||
<< fmt::format("Unknown 'fileType': {} must fail but succeeded",
|
||||
stdx::to_underlying(kInvalidFileTypeEnum));
|
||||
}
|
||||
}
|
||||
} // namespace
|
||||
|
|
|
|||
|
|
@ -729,7 +729,6 @@ Status isDroppableCollection(OperationContext* opCtx, const NamespaceString& nss
|
|||
nss.isSystemDotJavascript() || nss.isSystemStatsCollection();
|
||||
};
|
||||
|
||||
using namespace fmt::literals;
|
||||
if (nss.isSystemDotProfile()) {
|
||||
if (DatabaseProfileSettings::get(opCtx->getServiceContext())
|
||||
.getDatabaseProfileLevel(nss.dbName()) != 0)
|
||||
|
|
@ -742,13 +741,14 @@ Status isDroppableCollection(OperationContext* opCtx, const NamespaceString& nss
|
|||
if (!viewStats || viewStats->userTimeseries != 0) {
|
||||
return Status(
|
||||
ErrorCodes::CommandFailed,
|
||||
"cannot drop collection {} when time-series collections are present"_format(
|
||||
fmt::format(
|
||||
"cannot drop collection {} when time-series collections are present",
|
||||
nss.toStringForErrorMsg()));
|
||||
}
|
||||
}
|
||||
} else if (!isDroppableSystemCollection(nss)) {
|
||||
return Status(ErrorCodes::IllegalOperation,
|
||||
"cannot drop system collection {}"_format(nss.toStringForErrorMsg()));
|
||||
fmt::format("cannot drop system collection {}", nss.toStringForErrorMsg()));
|
||||
}
|
||||
|
||||
return Status::OK();
|
||||
|
|
|
|||
|
|
@ -47,9 +47,8 @@ struct ExternalDataSourceMetadata {
|
|||
StorageTypeEnum storageTypeEnum,
|
||||
FileTypeEnum fileTypeEnum)
|
||||
: url(urlStr), storageType(storageTypeEnum), fileType(fileTypeEnum) {
|
||||
using namespace fmt::literals;
|
||||
uassert(6968500,
|
||||
"File url must start with {}"_format(kUrlProtocolFile),
|
||||
fmt::format("File url must start with {}", kUrlProtocolFile),
|
||||
urlStr.startsWith(kUrlProtocolFile));
|
||||
uassert(6968501, "Storage type must be 'pipe'", storageType == StorageTypeEnum::pipe);
|
||||
uassert(6968502, "File type must be 'bson'", fileType == FileTypeEnum::bson);
|
||||
|
|
|
|||
|
|
@ -49,11 +49,10 @@ inline StringMap<std::string> clusterCommandTranslations = {
|
|||
{"update", "clusterUpdate"}};
|
||||
|
||||
inline BSONObj replaceCommandNameWithClusterCommandName(BSONObj cmdObj) {
|
||||
using namespace fmt::literals;
|
||||
auto cmdName = cmdObj.firstElement().fieldNameStringData();
|
||||
auto newNameIt = clusterCommandTranslations.find(cmdName);
|
||||
uassert(6349501,
|
||||
"Cannot use unsupported command {} with cluster transaction API"_format(cmdName),
|
||||
fmt::format("Cannot use unsupported command {} with cluster transaction API", cmdName),
|
||||
newNameIt != clusterCommandTranslations.end());
|
||||
|
||||
return cmdObj.replaceFieldNames(BSON(newNameIt->second << 1));
|
||||
|
|
|
|||
|
|
@ -46,7 +46,6 @@
|
|||
namespace mongo::repl {
|
||||
namespace {
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
MONGO_INITIALIZER_GENERAL(DisableLogging, (), ())
|
||||
(InitializerContext*) {
|
||||
|
|
@ -72,13 +71,15 @@ class CollectionAcquisitionBenchmark {
|
|||
public:
|
||||
explicit CollectionAcquisitionBenchmark(benchmark::State& state) : _state{state} {
|
||||
if constexpr (superVerbose)
|
||||
std::cout << "CollectionAcquisitionBenchmark ctor: thread=[{}/{}]\n"_format(
|
||||
_state.thread_index, _state.threads);
|
||||
std::cout << fmt::format("CollectionAcquisitionBenchmark ctor: thread=[{}/{}]\n",
|
||||
_state.thread_index,
|
||||
_state.threads);
|
||||
}
|
||||
~CollectionAcquisitionBenchmark() {
|
||||
if constexpr (superVerbose)
|
||||
std::cout << "CollectionAcquisitionBenchmark dtor: thread=[{}/{}]\n"_format(
|
||||
_state.thread_index, _state.threads);
|
||||
std::cout << fmt::format("CollectionAcquisitionBenchmark dtor: thread=[{}/{}]\n",
|
||||
_state.thread_index,
|
||||
_state.threads);
|
||||
}
|
||||
|
||||
template <typename F>
|
||||
|
|
@ -89,8 +90,8 @@ public:
|
|||
|
||||
OperationContext* getOperationContext() {
|
||||
if constexpr (superVerbose)
|
||||
std::cout << "getOperationContext (thread=[{}/{}])\n"_format(_state.thread_index,
|
||||
_state.threads);
|
||||
std::cout << fmt::format(
|
||||
"getOperationContext (thread=[{}/{}])\n", _state.thread_index, _state.threads);
|
||||
return _uniqueOpCtx.get();
|
||||
}
|
||||
|
||||
|
|
@ -105,7 +106,7 @@ private:
|
|||
public:
|
||||
TestEnv() {
|
||||
if constexpr (superVerbose)
|
||||
std::cout << "Creating TestEnv @{}\n"_format((void*)this);
|
||||
std::cout << fmt::format("Creating TestEnv @{}\n", (void*)this);
|
||||
_thread = unittest::JoinThread([this] {
|
||||
auto uniqueTest = std::make_unique<Test>();
|
||||
_test = uniqueTest.get();
|
||||
|
|
@ -167,7 +168,7 @@ private:
|
|||
std::cout << "Sharing fixture\n";
|
||||
}
|
||||
if constexpr (superVerbose)
|
||||
std::cout << "Got fixture @{}\n"_format((void*)sp.get());
|
||||
std::cout << fmt::format("Got fixture @{}\n", (void*)sp.get());
|
||||
|
||||
_threadClient.emplace(sp->getService());
|
||||
_uniqueOpCtx = sp->makeOperationContext();
|
||||
|
|
|
|||
|
|
@ -86,7 +86,6 @@ const std::set<std::string> kNoApiVersions = {};
|
|||
const std::set<std::string> kApiVersions1 = {"1"};
|
||||
|
||||
namespace {
|
||||
using namespace fmt::literals;
|
||||
|
||||
const int kFailedFindCommandDebugLevel = 3;
|
||||
|
||||
|
|
@ -589,8 +588,9 @@ bool CommandHelpers::uassertShouldAttemptParse(OperationContext* opCtx,
|
|||
void CommandHelpers::uassertCommandRunWithMajority(StringData commandName,
|
||||
const WriteConcernOptions& writeConcern) {
|
||||
uassert(ErrorCodes::InvalidOptions,
|
||||
"\"{}\" must be called with majority writeConcern, got: {} "_format(
|
||||
commandName, writeConcern.toBSON().toString()),
|
||||
fmt::format("\"{}\" must be called with majority writeConcern, got: {} ",
|
||||
commandName,
|
||||
writeConcern.toBSON().toString()),
|
||||
writeConcern.isMajority());
|
||||
}
|
||||
|
||||
|
|
@ -1086,7 +1086,7 @@ void Command::initializeClusterRole(ClusterRole role) {
|
|||
std::pair{&_commandsFailed, "failed"},
|
||||
std::pair{&_commandsRejected, "rejected"},
|
||||
})
|
||||
*ptr = &*MetricBuilder<Counter64>{"commands.{}.{}"_format(_name, stat)}.setRole(role);
|
||||
*ptr = &*MetricBuilder<Counter64>{fmt::format("commands.{}.{}", _name, stat)}.setRole(role);
|
||||
doInitializeClusterRole(role);
|
||||
}
|
||||
|
||||
|
|
@ -1162,7 +1162,7 @@ void CommandRegistry::registerCommand(Command* command) {
|
|||
auto ep = std::make_unique<Entry>();
|
||||
ep->command = command;
|
||||
auto [cIt, cOk] = _commands.emplace(command, std::move(ep));
|
||||
invariant(cOk, "Command identity collision: {}"_format(name));
|
||||
invariant(cOk, fmt::format("Command identity collision: {}", name));
|
||||
|
||||
// When a `Command*` is introduced to `_commands`, its names are introduced
|
||||
// to `_commandNames`.
|
||||
|
|
@ -1171,7 +1171,7 @@ void CommandRegistry::registerCommand(Command* command) {
|
|||
if (key.empty())
|
||||
continue;
|
||||
auto [nIt, nOk] = _commandNames.try_emplace(key, command);
|
||||
invariant(nOk, "Command name collision: {}"_format(key));
|
||||
invariant(nOk, fmt::format("Command name collision: {}", key));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1207,7 +1207,7 @@ BSONObj toBSON(const CommandConstructionPlan::Entry& e) {
|
|||
bob.append("expr", e.expr);
|
||||
bob.append("roles", toString(e.roles.value_or(ClusterRole::None)));
|
||||
if (e.location)
|
||||
bob.append("loc", "{}:{}"_format(e.location->file_name(), e.location->line()));
|
||||
bob.append("loc", fmt::format("{}:{}", e.location->file_name(), e.location->line()));
|
||||
return bob.obj();
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -65,8 +65,6 @@
|
|||
namespace mongo {
|
||||
namespace {
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
class AsyncCommandExecutionTest : public unittest::Test, public ScopedGlobalServiceContextForTest {
|
||||
public:
|
||||
struct TestState;
|
||||
|
|
|
|||
|
|
@ -93,8 +93,6 @@ using repl::UnreplicatedWritesBlock;
|
|||
using GenericFCV = multiversion::GenericFCV;
|
||||
using FCV = multiversion::FeatureCompatibilityVersion;
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
namespace {
|
||||
|
||||
/**
|
||||
|
|
@ -305,11 +303,12 @@ void FeatureCompatibilityVersion::validateSetFeatureCompatibilityVersionRequest(
|
|||
auto newVersion = setFCVRequest.getCommandParameter();
|
||||
auto isFromConfigServer = setFCVRequest.getFromConfigServer().value_or(false);
|
||||
|
||||
uassert(
|
||||
5147403,
|
||||
"cannot set featureCompatibilityVersion to '{}' while featureCompatibilityVersion is '{}'"_format(
|
||||
multiversion::toString(newVersion), multiversion::toString(fromVersion)),
|
||||
fcvTransitions.permitsTransition(fromVersion, newVersion, isFromConfigServer));
|
||||
uassert(5147403,
|
||||
fmt::format("cannot set featureCompatibilityVersion to '{}' while "
|
||||
"featureCompatibilityVersion is '{}'",
|
||||
multiversion::toString(newVersion),
|
||||
multiversion::toString(fromVersion)),
|
||||
fcvTransitions.permitsTransition(fromVersion, newVersion, isFromConfigServer));
|
||||
|
||||
auto fcvObj = findFeatureCompatibilityVersionDocument(opCtx);
|
||||
if (!fcvObj.isOK()) {
|
||||
|
|
|
|||
|
|
@ -81,8 +81,6 @@
|
|||
|
||||
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWrite
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
MONGO_FAIL_POINT_DEFINE(fleCompactOrCleanupFailBeforeECOCRead);
|
||||
MONGO_FAIL_POINT_DEFINE(fleCompactHangBeforeESCAnchorInsert);
|
||||
MONGO_FAIL_POINT_DEFINE(fleCleanupHangBeforeNullAnchorUpdate);
|
||||
|
|
@ -350,13 +348,13 @@ stdx::unordered_set<ECOCCompactionDocumentV2> getUniqueCompactionDocuments(
|
|||
|
||||
for (auto& doc : docs) {
|
||||
auto ecocDoc = ECOCCompactionDocumentV2::parseAndDecrypt(doc, compactionToken.token);
|
||||
uassert(
|
||||
8574701,
|
||||
"Compaction token for field '{}' is of type '{}', but ECOCDocument is of type '{}'"_format(
|
||||
compactionToken.fieldPathName,
|
||||
compactionToken.isRange() ? "range"_sd : "equality"_sd,
|
||||
ecocDoc.isRange() ? "range"_sd : "equality"_sd),
|
||||
ecocDoc.isRange() == compactionToken.isRange());
|
||||
uassert(8574701,
|
||||
fmt::format("Compaction token for field '{}' is of type '{}', but ECOCDocument "
|
||||
"is of type '{}'",
|
||||
compactionToken.fieldPathName,
|
||||
compactionToken.isRange() ? "range"_sd : "equality"_sd,
|
||||
ecocDoc.isRange() ? "range"_sd : "equality"_sd),
|
||||
ecocDoc.isRange() == compactionToken.isRange());
|
||||
if (compactionToken.isRange()) {
|
||||
ecocDoc.anchorPaddingRootToken = compactionToken.anchorPaddingToken;
|
||||
}
|
||||
|
|
@ -709,9 +707,9 @@ void processFLECompactV2(OperationContext* opCtx,
|
|||
// Validate that we have an EncryptedFieldConfig for each range field.
|
||||
if (!rangeFields.empty()) {
|
||||
uassert(8574702,
|
||||
"Command '{}' requires field '{}' when range fields are present"_format(
|
||||
CompactStructuredEncryptionData::kCommandName,
|
||||
CompactStructuredEncryptionData::kEncryptionInformationFieldName),
|
||||
fmt::format("Command '{}' requires field '{}' when range fields are present",
|
||||
CompactStructuredEncryptionData::kCommandName,
|
||||
CompactStructuredEncryptionData::kEncryptionInformationFieldName),
|
||||
request.getEncryptionInformation());
|
||||
auto efc = EncryptionInformationHelpers::getAndValidateSchema(
|
||||
request.getNamespace(), request.getEncryptionInformation().get());
|
||||
|
|
@ -720,18 +718,18 @@ void processFLECompactV2(OperationContext* opCtx,
|
|||
auto fieldConfig = std::find_if(efcFields.begin(), efcFields.end(), [&](const auto& f) {
|
||||
return rfIt.first == f.getPath();
|
||||
});
|
||||
uassert(
|
||||
8574705,
|
||||
"Missing range field '{}' in '{}'"_format(
|
||||
rfIt.first, CompactStructuredEncryptionData::kEncryptionInformationFieldName),
|
||||
fieldConfig != efcFields.end());
|
||||
uassert(8574705,
|
||||
fmt::format("Missing range field '{}' in '{}'",
|
||||
rfIt.first,
|
||||
CompactStructuredEncryptionData::kEncryptionInformationFieldName),
|
||||
fieldConfig != efcFields.end());
|
||||
rfIt.second.queryTypeConfig = getQueryType(*fieldConfig, QueryTypeEnum::Range);
|
||||
|
||||
uassert(
|
||||
9107500,
|
||||
"Missing bsonType for range field '{}' in '{}'"_format(
|
||||
rfIt.first, CompactStructuredEncryptionData::kEncryptionInformationFieldName),
|
||||
fieldConfig->getBsonType().has_value());
|
||||
uassert(9107500,
|
||||
fmt::format("Missing bsonType for range field '{}' in '{}'",
|
||||
rfIt.first,
|
||||
CompactStructuredEncryptionData::kEncryptionInformationFieldName),
|
||||
fieldConfig->getBsonType().has_value());
|
||||
rfIt.second.fieldType = typeFromName(fieldConfig->getBsonType().value());
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -71,7 +71,6 @@
|
|||
|
||||
namespace mongo {
|
||||
namespace {
|
||||
using namespace fmt::literals;
|
||||
|
||||
class PipeWaiter {
|
||||
public:
|
||||
|
|
@ -143,7 +142,7 @@ protected:
|
|||
BSONObj res = runCommand(originalAggCommand.addFields(BSON("explain" << true)));
|
||||
// Sanity checks of result.
|
||||
ASSERT_EQ(res["ok"].Number(), 1.0)
|
||||
<< "Expected to succeed but failed. result = {}"_format(res.toString());
|
||||
<< fmt::format("Expected to succeed but failed. result = {}", res.toString());
|
||||
}
|
||||
|
||||
PseudoRandom _random{SecureRandom{}.nextInt64()};
|
||||
|
|
|
|||
|
|
@ -130,7 +130,6 @@
|
|||
namespace mongo {
|
||||
namespace {
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
MONGO_FAIL_POINT_DEFINE(rsStopGetMoreCmd);
|
||||
MONGO_FAIL_POINT_DEFINE(getMoreHangAfterPinCursor);
|
||||
|
|
@ -342,8 +341,10 @@ void setUpOperationContextStateForGetMore(OperationContext* opCtx,
|
|||
auto apiParamsFromClient = APIParameters::get(opCtx);
|
||||
uassert(
|
||||
ErrorCodes::APIMismatchError,
|
||||
"API parameter mismatch: getMore used params {}, the cursor-creating command used {}"_format(
|
||||
apiParamsFromClient.toBSON().toString(), cursor.getAPIParameters().toBSON().toString()),
|
||||
fmt::format(
|
||||
"API parameter mismatch: getMore used params {}, the cursor-creating command used {}",
|
||||
apiParamsFromClient.toBSON().toString(),
|
||||
cursor.getAPIParameters().toBSON().toString()),
|
||||
apiParamsFromClient == cursor.getAPIParameters());
|
||||
|
||||
setUpOperationDeadline(opCtx, cursor, cmd, disableAwaitDataFailpointActive);
|
||||
|
|
|
|||
|
|
@ -91,7 +91,6 @@ namespace {
|
|||
using namespace std::string_literals;
|
||||
|
||||
Status interpretTranslationError(DBException* ex, const MapReduceCommandRequest& parsedMr) {
|
||||
using namespace fmt::literals;
|
||||
|
||||
auto status = ex->toStatus();
|
||||
auto outOptions = parsedMr.getOutOptions();
|
||||
|
|
@ -105,8 +104,8 @@ Status interpretTranslationError(DBException* ex, const MapReduceCommandRequest&
|
|||
std::string error;
|
||||
switch (static_cast<int>(ex->code())) {
|
||||
case ErrorCodes::InvalidNamespace:
|
||||
error =
|
||||
"Invalid output namespace {} for MapReduce"_format(outNss.toStringForErrorMsg());
|
||||
error = fmt::format("Invalid output namespace {} for MapReduce",
|
||||
outNss.toStringForErrorMsg());
|
||||
break;
|
||||
case 15976:
|
||||
error = "The mapReduce sort option must have at least one sort key";
|
||||
|
|
@ -121,12 +120,13 @@ Status interpretTranslationError(DBException* ex, const MapReduceCommandRequest&
|
|||
break;
|
||||
case 17385:
|
||||
case 31319:
|
||||
error = "Can't output mapReduce results to special collection {}"_format(outNss.coll());
|
||||
error = fmt::format("Can't output mapReduce results to special collection {}",
|
||||
outNss.coll());
|
||||
break;
|
||||
case 31320:
|
||||
case 31321:
|
||||
error = "Can't output mapReduce results to internal DB {}"_format(
|
||||
outNss.dbName().toStringForErrorMsg());
|
||||
error = fmt::format("Can't output mapReduce results to internal DB {}",
|
||||
outNss.dbName().toStringForErrorMsg());
|
||||
break;
|
||||
default:
|
||||
// Prepend MapReduce context in the event of an unknown exception.
|
||||
|
|
|
|||
|
|
@ -41,8 +41,8 @@
|
|||
namespace mongo {
|
||||
namespace {
|
||||
Counter64* getSingletonMetricPtr(StringData commandName, StringData stat, ClusterRole role) {
|
||||
using namespace fmt::literals;
|
||||
return &*MetricBuilder<Counter64>{"commands.{}.{}"_format(commandName, stat)}.setRole(role);
|
||||
return &*MetricBuilder<Counter64>{fmt::format("commands.{}.{}", commandName, stat)}.setRole(
|
||||
role);
|
||||
}
|
||||
} // namespace
|
||||
|
||||
|
|
|
|||
|
|
@ -56,7 +56,6 @@ ServerStatusSectionRegistry::RoleTag ServerStatusSectionRegistry::getTagForRole(
|
|||
}
|
||||
|
||||
void ServerStatusSectionRegistry::addSection(std::unique_ptr<ServerStatusSection> section) {
|
||||
using namespace fmt::literals;
|
||||
// Disallow adding a section named "timing" as it is reserved for the server status command.
|
||||
dassert(section->getSectionName() != kTimingSection);
|
||||
MONGO_verify(!_runCalled.load());
|
||||
|
|
@ -75,13 +74,15 @@ void ServerStatusSectionRegistry::addSection(std::unique_ptr<ServerStatusSection
|
|||
for (auto&& it = lower; it != upper; ++it) {
|
||||
auto existingSectionRole = it->first.second;
|
||||
invariant(!areRolesIncompatible(existingSectionRole, roleTag),
|
||||
"Duplicate ServerStatusSection Registration with name {} and role {}"_format(
|
||||
name, role));
|
||||
fmt::format("Duplicate ServerStatusSection Registration with name {} and role {}",
|
||||
name,
|
||||
toString(role)));
|
||||
}
|
||||
auto [iter, ok] = _sections.try_emplace({name, roleTag}, std::move(section));
|
||||
invariant(
|
||||
ok,
|
||||
"Duplicate ServerStatusSection Registration with name {} and role {}"_format(name, role));
|
||||
invariant(ok,
|
||||
fmt::format("Duplicate ServerStatusSection Registration with name {} and role {}",
|
||||
name,
|
||||
toString(role)));
|
||||
}
|
||||
|
||||
ServerStatusSectionRegistry::SectionMap::const_iterator ServerStatusSectionRegistry::begin() {
|
||||
|
|
|
|||
|
|
@ -54,8 +54,6 @@
|
|||
namespace mongo {
|
||||
namespace {
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
/** Algorithm object implementing the `appendMergedTrees` function. */
|
||||
class AppendMergedTreesInvocation {
|
||||
/** Represents progress in the iteration through the children of one node. */
|
||||
|
|
@ -169,9 +167,10 @@ public:
|
|||
if (!excluded) {
|
||||
if (!frame.inSubtreePhase) {
|
||||
// This node has no subtrees. It should therefore have one member.
|
||||
uassert(ErrorCodes::BadValue,
|
||||
"Collision between trees at node {}.{}"_format(_pathDiagJoin(), key),
|
||||
relevant.size() == 1);
|
||||
uassert(
|
||||
ErrorCodes::BadValue,
|
||||
fmt::format("Collision between trees at node {}.{}", _pathDiagJoin(), key),
|
||||
relevant.size() == 1);
|
||||
cursor.getMetric().appendTo(*frame.bob, key);
|
||||
} else {
|
||||
_stack.push_back(_descentFrame(relevant, std::move(excludeSub)));
|
||||
|
|
@ -299,7 +298,7 @@ void MetricTree::add(StringData path, std::unique_ptr<ServerStatusMetric> metric
|
|||
if (!path.empty())
|
||||
_add(path, std::move(metric));
|
||||
} else {
|
||||
_add("metrics.{}"_format(path), std::move(metric));
|
||||
_add(fmt::format("metrics.{}", path), std::move(metric));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -43,8 +43,6 @@
|
|||
namespace mongo {
|
||||
namespace {
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
bool falseNodesPredicate(const BSONElement& el) {
|
||||
return el.type() == Bool && !el.boolean();
|
||||
}
|
||||
|
|
@ -106,7 +104,7 @@ protected:
|
|||
switch (el.type()) {
|
||||
case Object:
|
||||
for (auto&& v : extractTreeNodes(el.Obj(), pred))
|
||||
nodes.push_back("{}.{}"_format(key, v));
|
||||
nodes.push_back(fmt::format("{}.{}", key, v));
|
||||
break;
|
||||
default:
|
||||
if (!pred || pred(el))
|
||||
|
|
@ -150,7 +148,7 @@ protected:
|
|||
|
||||
/** Adds the implicit "metrics" root to the dotted `path`. */
|
||||
static std::string mStr(StringData path) {
|
||||
return "metrics.{}"_format(path);
|
||||
return fmt::format("metrics.{}", path);
|
||||
}
|
||||
|
||||
/** New vector from adding implicit "metrics" to each element. */
|
||||
|
|
@ -226,7 +224,8 @@ TEST_F(MetricTreeTest, ValidateCounterMetric) {
|
|||
auto& counter = addCounter("tree.counter");
|
||||
for (auto&& incr : {1, 2}) {
|
||||
counter.increment(incr);
|
||||
ASSERT_BSONOBJ_EQ(serialize(), mJson("{{tree:{{counter:{}}}}}"_format(counter.get())));
|
||||
ASSERT_BSONOBJ_EQ(serialize(),
|
||||
mJson(fmt::format("{{tree:{{counter:{}}}}}", counter.get())));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -234,7 +233,7 @@ TEST_F(MetricTreeTest, ValidateTextMetric) {
|
|||
auto& text = *MetricBuilder<std::string>{"tree.text"}.setTreeSet(&trees());
|
||||
for (auto&& str : {"hello", "bye"}) {
|
||||
text = std::string{str};
|
||||
ASSERT_BSONOBJ_EQ(serialize(), mJson("{{tree:{{text:\"{}\"}}}}"_format(str)));
|
||||
ASSERT_BSONOBJ_EQ(serialize(), mJson(fmt::format("{{tree:{{text:\"{}\"}}}}", str)));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -67,7 +67,6 @@
|
|||
|
||||
namespace mongo {
|
||||
|
||||
using namespace fmt::literals;
|
||||
namespace {
|
||||
const WriteConcernOptions kMajorityWriteConcern{WriteConcernOptions::kMajority,
|
||||
WriteConcernOptions::SyncMode::UNSET,
|
||||
|
|
@ -107,12 +106,14 @@ public:
|
|||
request().getCommandParameter().nFields() > 0);
|
||||
|
||||
uassert(ErrorCodes::InvalidOptions,
|
||||
"{} only supports setting exactly one parameter"_format(Request::kCommandName),
|
||||
fmt::format("{} only supports setting exactly one parameter",
|
||||
Request::kCommandName),
|
||||
request().getCommandParameter().nFields() == 1);
|
||||
|
||||
uassert(
|
||||
ErrorCodes::NoSuchKey,
|
||||
"Unknown server parameter: {}"_format(
|
||||
fmt::format(
|
||||
"Unknown server parameter: {}",
|
||||
query_settings::QuerySettingsManager::kQuerySettingsClusterParameterName),
|
||||
!request().getCommandParameter()
|
||||
[query_settings::QuerySettingsManager::kQuerySettingsClusterParameterName]);
|
||||
|
|
|
|||
|
|
@ -143,8 +143,6 @@
|
|||
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault
|
||||
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
namespace mongo {
|
||||
namespace {
|
||||
|
||||
|
|
@ -212,9 +210,11 @@ void abortAllReshardCollection(OperationContext* opCtx) {
|
|||
|
||||
uasserted(
|
||||
ErrorCodes::ManualInterventionRequired,
|
||||
"reshardCollection was not properly cleaned up after attempted abort for these ns: "
|
||||
"[{}]. This is sign that the resharding operation was interrupted but not "
|
||||
"aborted."_format(nsListStr));
|
||||
fmt::format(
|
||||
"reshardCollection was not properly cleaned up after attempted abort for these ns: "
|
||||
"[{}]. This is sign that the resharding operation was interrupted but not "
|
||||
"aborted.",
|
||||
nsListStr));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -361,9 +361,11 @@ public:
|
|||
serverGlobalParams.featureCompatibility.acquireFCVSnapshot().getVersion();
|
||||
|
||||
auto isConfirmed = request.getConfirm().value_or(false);
|
||||
const auto upgradeMsg =
|
||||
"Once you have upgraded to {}, you will not be able to downgrade FCV and binary version without support assistance. Please re-run this command with 'confirm: true' to acknowledge this and continue with the FCV upgrade."_format(
|
||||
multiversion::toString(requestedVersion));
|
||||
const auto upgradeMsg = fmt::format(
|
||||
"Once you have upgraded to {}, you will not be able to downgrade FCV and binary "
|
||||
"version without support assistance. Please re-run this command with 'confirm: true' "
|
||||
"to acknowledge this and continue with the FCV upgrade.",
|
||||
multiversion::toString(requestedVersion));
|
||||
const auto downgradeMsg =
|
||||
"Once you have downgraded the FCV, if you choose to downgrade the binary version, "
|
||||
"it will require support assistance. Please re-run this command with 'confirm: "
|
||||
|
|
|
|||
|
|
@ -79,7 +79,6 @@
|
|||
namespace mongo {
|
||||
namespace {
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
using service_context_test::RoleOverride;
|
||||
using service_context_test::ServerRoleIndex;
|
||||
|
|
@ -524,7 +523,7 @@ class TrivialNopCommand : public BasicCommand {
|
|||
}
|
||||
|
||||
static std::string makeName() {
|
||||
return "trivialNopCommand_{}_{}"_format(n, nextSerial());
|
||||
return fmt::format("trivialNopCommand_{}_{}", n, nextSerial());
|
||||
}
|
||||
|
||||
public:
|
||||
|
|
|
|||
|
|
@ -101,10 +101,8 @@ template <ErrorCodes::Error ec>
|
|||
* modify that document, this exception will get thrown by one of them.
|
||||
*/
|
||||
[[noreturn]] inline void throwWriteConflictException(StringData context) {
|
||||
using namespace fmt::literals;
|
||||
error_details::throwExceptionFor<ErrorCodes::WriteConflict>(
|
||||
"Caused by :: {} :: Please retry your operation or multi-document transaction."_format(
|
||||
context));
|
||||
error_details::throwExceptionFor<ErrorCodes::WriteConflict>(fmt::format(
|
||||
"Caused by :: {} :: Please retry your operation or multi-document transaction.", context));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ namespace {
|
|||
|
||||
template <typename T>
|
||||
std::string formatHex(T&& x) {
|
||||
return format(FMT_STRING("{:#x}"), x);
|
||||
return fmt::format("{:#x}", x);
|
||||
}
|
||||
|
||||
std::string formatPtr(const void* x) {
|
||||
|
|
@ -68,7 +68,7 @@ std::string formatPtr(const void* x) {
|
|||
}
|
||||
|
||||
std::string formatThreadId(stdx::thread::id x) {
|
||||
return format(FMT_STRING("{}"), x);
|
||||
return fmt::format("{}", fmt::streamed(x));
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -341,7 +341,7 @@ void Locker::reacquireTicket(OperationContext* opCtx) {
|
|||
uassert(ErrorCodes::LockTimeout,
|
||||
fmt::format("Unable to acquire ticket with mode '{}' due to detected lock "
|
||||
"conflict for resource {}",
|
||||
_modeForTicket,
|
||||
fmt::underlying(_modeForTicket),
|
||||
it.key().toString()),
|
||||
!_lockManager->hasConflictingRequests(it.key(), it.objAddr()));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -73,15 +73,8 @@ boost::optional<StringData> isIneligibleForDiagnosticPrinting(OperationContext*
|
|||
return boost::none;
|
||||
}
|
||||
|
||||
OutIt CurOpPrinter::format(fmt::basic_format_context<OutIt, char>& fc) const {
|
||||
auto out = fc.out();
|
||||
|
||||
if (auto msg = isIneligibleForDiagnosticPrinting(opCtx)) {
|
||||
out = format_to(out, FMT_STRING("{}"), msg.get());
|
||||
return out;
|
||||
}
|
||||
|
||||
CurOp* curOp = CurOp::get(opCtx);
|
||||
auto CurOpPrinter::_gatherInfo() const -> Info {
|
||||
CurOp* curOp = CurOp::get(_opCtx);
|
||||
const Command* curCommand = curOp->getCommand();
|
||||
|
||||
// Remove sensitive fields from the command object before logging.
|
||||
|
|
@ -92,16 +85,10 @@ OutIt CurOpPrinter::format(fmt::basic_format_context<OutIt, char>& fc) const {
|
|||
curCommand->snipForLogging(&cmdToLog);
|
||||
BSONObj cmd = cmdToLog.getObject();
|
||||
|
||||
auto opDesc = redact(cmd).toString();
|
||||
auto opDebug = redact(serializeOpDebug(opCtx, *curOp)).toString();
|
||||
auto origCommand = redact(curOp->originatingCommand()).toString();
|
||||
out = format_to(
|
||||
out,
|
||||
FMT_STRING("{{'currentOp': {}, 'opDescription': {}{}}}"),
|
||||
opDebug,
|
||||
opDesc,
|
||||
curOp->originatingCommand().isEmpty() ? "" : ", 'originatingCommand': " + origCommand);
|
||||
return out;
|
||||
return {redact(cmd).toString(),
|
||||
redact(serializeOpDebug(_opCtx, *curOp)).toString(),
|
||||
curOp->originatingCommand().isEmpty() ? boost::optional<std::string>{}
|
||||
: redact(curOp->originatingCommand()).toString()};
|
||||
}
|
||||
|
||||
} // namespace mongo::diagnostic_printers
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@
|
|||
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include "mongo/base/string_data.h"
|
||||
#include "mongo/db/operation_context.h"
|
||||
|
||||
/*
|
||||
|
|
@ -42,13 +43,13 @@
|
|||
|
||||
namespace mongo::diagnostic_printers {
|
||||
|
||||
static constexpr StringData kOmitUnsupportedCurOpMsg =
|
||||
constexpr inline auto kOmitUnsupportedCurOpMsg =
|
||||
"omitted: this CurOp does not support diagnostic printing"_sd;
|
||||
static constexpr StringData kOmitUnrecognizedCommandMsg = "omitted: unrecognized command";
|
||||
static constexpr StringData kOmitUnsupportedCommandMsg =
|
||||
constexpr inline auto kOmitUnrecognizedCommandMsg = "omitted: unrecognized command"_sd;
|
||||
constexpr inline auto kOmitUnsupportedCommandMsg =
|
||||
"omitted: command does not support diagnostic printing"_sd;
|
||||
static constexpr StringData kOpCtxIsNullMsg = "opCtx is null"_sd;
|
||||
static constexpr StringData kCurOpIsNullMsg = "the opCtx's curOp is null"_sd;
|
||||
constexpr inline auto kOpCtxIsNullMsg = "opCtx is null"_sd;
|
||||
constexpr inline auto kCurOpIsNullMsg = "the opCtx's curOp is null"_sd;
|
||||
|
||||
/**
|
||||
* Indicates if the operation associated with 'opCtx' is ineligible for diagnostic logging. If the
|
||||
|
|
@ -57,12 +58,39 @@ static constexpr StringData kCurOpIsNullMsg = "the opCtx's curOp is null"_sd;
|
|||
*/
|
||||
boost::optional<StringData> isIneligibleForDiagnosticPrinting(OperationContext* opCtx);
|
||||
|
||||
using OutIt = fmt::detail::buffer_appender<char>;
|
||||
struct CurOpPrinter {
|
||||
OutIt format(fmt::basic_format_context<OutIt, char>& fc) const;
|
||||
class CurOpPrinter {
|
||||
public:
|
||||
explicit CurOpPrinter(OperationContext* opCtx) : _opCtx{opCtx} {}
|
||||
|
||||
auto format(auto& fc) const {
|
||||
auto out = fc.out();
|
||||
if (auto msg = isIneligibleForDiagnosticPrinting(_opCtx))
|
||||
return fmt::format_to(out, "{}", *msg);
|
||||
|
||||
Info info = _gatherInfo();
|
||||
out = fmt::format_to(out, "{{");
|
||||
auto field = [&, sep = ""_sd](StringData name, const auto& value) mutable {
|
||||
out = fmt::format_to(out, "{}'{}': {}", std::exchange(sep, ", "_sd), name, value);
|
||||
};
|
||||
field("currentOp", info.opDebug);
|
||||
field("opDescription", info.opDesc);
|
||||
if (info.origCommand)
|
||||
field("originatingCommand", *info.origCommand);
|
||||
out = fmt::format_to(out, "}}");
|
||||
return out;
|
||||
}
|
||||
|
||||
private:
|
||||
struct Info {
|
||||
std::string opDesc;
|
||||
std::string opDebug;
|
||||
boost::optional<std::string> origCommand;
|
||||
};
|
||||
|
||||
Info _gatherInfo() const;
|
||||
|
||||
// This pointer must outlive this class.
|
||||
OperationContext* opCtx;
|
||||
OperationContext* _opCtx;
|
||||
};
|
||||
|
||||
} // namespace mongo::diagnostic_printers
|
||||
|
|
@ -75,7 +103,7 @@ struct formatter<mongo::diagnostic_printers::CurOpPrinter> {
|
|||
return ctx.begin();
|
||||
}
|
||||
|
||||
auto format(const mongo::diagnostic_printers::CurOpPrinter& obj, auto& ctx) {
|
||||
auto format(const mongo::diagnostic_printers::CurOpPrinter& obj, auto& ctx) const {
|
||||
return obj.format(ctx);
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -40,7 +40,6 @@ namespace mongo {
|
|||
|
||||
namespace {
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
static constexpr StringData kCmdName = "mockCmd"_sd;
|
||||
static constexpr StringData kCmdValue = "abcdefgh"_sd;
|
||||
|
|
@ -103,7 +102,7 @@ public:
|
|||
|
||||
std::string printCurOpDiagnostics() {
|
||||
diagnostic_printers::CurOpPrinter printer{opCtx()};
|
||||
return "{}"_format(printer);
|
||||
return fmt::format("{}", printer);
|
||||
}
|
||||
|
||||
MockCmd _cmd;
|
||||
|
|
@ -218,7 +217,7 @@ TEST_F(DiagnosticPrinterTest, OmitsAllFieldsWhenCommandDoesNotEnableDiagnosticPr
|
|||
|
||||
TEST_F(DiagnosticPrinterTest, FormattingGracefullyExitsWhenOpCtxIsNull) {
|
||||
diagnostic_printers::CurOpPrinter printer{nullptr};
|
||||
ASSERT_EQ(diagnostic_printers::kOpCtxIsNullMsg, "{}"_format(printer));
|
||||
ASSERT_EQ(diagnostic_printers::kOpCtxIsNullMsg, fmt::format("{}", printer));
|
||||
}
|
||||
|
||||
TEST_F(DiagnosticPrinterTest, CreateIndexCommandIsEligibleForDiagnosticLog) {
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ namespace {
|
|||
|
||||
template <typename T>
|
||||
std::string formatHex(T&& x) {
|
||||
return format(FMT_STRING("{:#x}"), x);
|
||||
return fmt::format("{:#x}", x);
|
||||
}
|
||||
|
||||
std::string formatPtr(const void* x) {
|
||||
|
|
|
|||
|
|
@ -39,7 +39,6 @@
|
|||
|
||||
|
||||
namespace mongo {
|
||||
using namespace fmt::literals;
|
||||
|
||||
BatchedDeleteStageBuffer::BatchedDeleteStageBuffer(WorkingSet* ws) : _ws(ws) {}
|
||||
|
||||
|
|
@ -48,10 +47,12 @@ void BatchedDeleteStageBuffer::append(WorkingSetID id) {
|
|||
}
|
||||
|
||||
void BatchedDeleteStageBuffer::eraseUpToOffsetInclusive(size_t bufferOffset) {
|
||||
tassert(6515701,
|
||||
"Cannot erase offset '{}' - beyond the size of the BatchedDeleteStageBuffer {}"_format(
|
||||
bufferOffset, _buffer.size()),
|
||||
bufferOffset < _buffer.size());
|
||||
tassert(
|
||||
6515701,
|
||||
fmt::format("Cannot erase offset '{}' - beyond the size of the BatchedDeleteStageBuffer {}",
|
||||
bufferOffset,
|
||||
_buffer.size()),
|
||||
bufferOffset < _buffer.size());
|
||||
for (unsigned int i = 0; i <= bufferOffset; i++) {
|
||||
auto id = _buffer.at(i);
|
||||
_ws->free(id);
|
||||
|
|
@ -62,11 +63,11 @@ void BatchedDeleteStageBuffer::eraseUpToOffsetInclusive(size_t bufferOffset) {
|
|||
|
||||
void BatchedDeleteStageBuffer::erase(const std::set<WorkingSetID>& idsToRemove) {
|
||||
for (auto& workingSetMemberId : idsToRemove) {
|
||||
tassert(
|
||||
6515702,
|
||||
"Attempted to free member with WorkingSetId '{}', which does not exist in the BatchedDeleteStageBuffer"_format(
|
||||
workingSetMemberId),
|
||||
std::find(_buffer.begin(), _buffer.end(), workingSetMemberId) != _buffer.end());
|
||||
tassert(6515702,
|
||||
fmt::format("Attempted to free member with WorkingSetId '{}', which does not exist "
|
||||
"in the BatchedDeleteStageBuffer",
|
||||
workingSetMemberId),
|
||||
std::find(_buffer.begin(), _buffer.end(), workingSetMemberId) != _buffer.end());
|
||||
|
||||
_ws->free(workingSetMemberId);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -58,10 +58,9 @@ class Value;
|
|||
class RCString final : public RefCountable {
|
||||
public:
|
||||
static boost::intrusive_ptr<const RCString> create(StringData s) {
|
||||
using namespace fmt::literals;
|
||||
static constexpr size_t sizeLimit = BSONObjMaxUserSize;
|
||||
uassert(16493,
|
||||
"RCString too large. Requires size={} < limit={}"_format(s.size(), sizeLimit),
|
||||
fmt::format("RCString too large. Requires size={} < limit={}", s.size(), sizeLimit),
|
||||
s.size() < sizeLimit);
|
||||
return boost::intrusive_ptr{new (s) RCString{s}};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,19 +37,17 @@ namespace exec::expression {
|
|||
Value evaluate(const ExpressionInternalFindPositional& expr,
|
||||
const Document& root,
|
||||
Variables* variables) {
|
||||
using namespace fmt::literals;
|
||||
|
||||
auto& children = expr.getChildren();
|
||||
|
||||
auto preImage = children[0]->evaluate(root, variables);
|
||||
auto postImage = children[1]->evaluate(root, variables);
|
||||
uassert(51255,
|
||||
"Positional operator pre-image can only be an object, but got {}"_format(
|
||||
typeName(preImage.getType())),
|
||||
fmt::format("Positional operator pre-image can only be an object, but got {}",
|
||||
typeName(preImage.getType())),
|
||||
preImage.getType() == BSONType::Object);
|
||||
uassert(51258,
|
||||
"Positional operator post-image can only be an object, but got {}"_format(
|
||||
typeName(postImage.getType())),
|
||||
fmt::format("Positional operator post-image can only be an object, but got {}",
|
||||
typeName(postImage.getType())),
|
||||
postImage.getType() == BSONType::Object);
|
||||
return Value{projection_executor_utils::applyFindPositionalProjection(preImage.getDocument(),
|
||||
postImage.getDocument(),
|
||||
|
|
@ -60,12 +58,10 @@ Value evaluate(const ExpressionInternalFindPositional& expr,
|
|||
Value evaluate(const ExpressionInternalFindSlice& expr,
|
||||
const Document& root,
|
||||
Variables* variables) {
|
||||
using namespace fmt::literals;
|
||||
|
||||
auto postImage = expr.getChildren()[0]->evaluate(root, variables);
|
||||
uassert(51256,
|
||||
"$slice operator can only be applied to an object, but got {}"_format(
|
||||
typeName(postImage.getType())),
|
||||
fmt::format("$slice operator can only be applied to an object, but got {}",
|
||||
typeName(postImage.getType())),
|
||||
postImage.getType() == BSONType::Object);
|
||||
return Value{projection_executor_utils::applyFindSliceProjection(
|
||||
postImage.getDocument(), expr.getFieldPath(), expr.getSkip(), expr.getLimit())};
|
||||
|
|
@ -74,8 +70,6 @@ Value evaluate(const ExpressionInternalFindSlice& expr,
|
|||
Value evaluate(const ExpressionInternalFindElemMatch& expr,
|
||||
const Document& root,
|
||||
Variables* variables) {
|
||||
using namespace fmt::literals;
|
||||
|
||||
auto input = expr.getChildren()[0]->evaluate(root, variables);
|
||||
invariant(input.getType() == BSONType::Object);
|
||||
return projection_executor_utils::applyFindElemMatchProjection(
|
||||
|
|
|
|||
|
|
@ -146,13 +146,11 @@ protected:
|
|||
|
||||
private:
|
||||
Document _applyRootReplacementExpression(const Document& input, const Document& output) const {
|
||||
using namespace fmt::literals;
|
||||
|
||||
_expCtx->variables.setValue(_projectionPostImageVarId, Value{output});
|
||||
auto val = _rootReplacementExpression->evaluate(input, &_expCtx->variables);
|
||||
uassert(51254,
|
||||
"Root-replacement expression must return a document, but got {}"_format(
|
||||
typeName(val.getType())),
|
||||
fmt::format("Root-replacement expression must return a document, but got {}",
|
||||
typeName(val.getType())),
|
||||
val.getType() == BSONType::Object);
|
||||
return val.getDocument();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -51,7 +51,6 @@
|
|||
#include "mongo/unittest/unittest.h"
|
||||
|
||||
namespace mongo::sbe {
|
||||
using namespace fmt::literals;
|
||||
class BlockStagesTest : public PlanStageTestFixture {
|
||||
protected:
|
||||
BSONObj compressBucket(const BSONObj& bucket) {
|
||||
|
|
@ -165,7 +164,8 @@ protected:
|
|||
if (metaAccessor) {
|
||||
auto metaTagVal = metaAccessor->getViewOfValue();
|
||||
auto expectedTagVal = bson::convertFrom<true>(expectedData[i]["tag"]);
|
||||
ASSERT_THAT(expectedTagVal, ValueEq(metaTagVal)) << "for {}th 'tag'"_format(i);
|
||||
ASSERT_THAT(expectedTagVal, ValueEq(metaTagVal))
|
||||
<< fmt::format("for {}th 'tag'", i);
|
||||
}
|
||||
|
||||
// Verifies rows.
|
||||
|
|
@ -174,7 +174,7 @@ protected:
|
|||
auto expectedTagVal = bson::convertFrom<true>(expectedData[i][cellPaths[j]]);
|
||||
|
||||
ASSERT_THAT(expectedTagVal, ValueEq(actualTagVal))
|
||||
<< "for {}th path '{}'"_format(i, cellPaths[j]);
|
||||
<< fmt::format("for {}th path '{}'", i, cellPaths[j]);
|
||||
}
|
||||
|
||||
if (i == yieldAfter) {
|
||||
|
|
@ -806,7 +806,7 @@ void BlockStagesTest::testBlockToBitmap(
|
|||
auto tagVal = accessor->getViewOfValue();
|
||||
auto expectedTagVal = expected.getAt(i);
|
||||
|
||||
ASSERT_THAT(tagVal, ValueEq(expectedTagVal)) << "for {}th 'tag'"_format(i);
|
||||
ASSERT_THAT(tagVal, ValueEq(expectedTagVal)) << fmt::format("for {}th 'tag'", i);
|
||||
|
||||
// Test out saveState() and restoreState() for 50% of the documents (the first document,
|
||||
// the third document, the fifth document, and so on).
|
||||
|
|
|
|||
|
|
@ -198,7 +198,6 @@ const SpecificStats* UnionStage::getSpecificStats() const {
|
|||
}
|
||||
|
||||
std::vector<DebugPrinter::Block> UnionStage::debugPrint() const {
|
||||
using namespace fmt::literals;
|
||||
auto ret = PlanStage::debugPrint();
|
||||
|
||||
ret.emplace_back(DebugPrinter::Block("[`"));
|
||||
|
|
@ -212,7 +211,7 @@ std::vector<DebugPrinter::Block> UnionStage::debugPrint() const {
|
|||
|
||||
ret.emplace_back(DebugPrinter::Block::cmdIncIndent);
|
||||
for (size_t childNum = 0; childNum < _children.size(); childNum++) {
|
||||
DebugPrinter::addKeyword(ret, "branch{}"_format(childNum));
|
||||
DebugPrinter::addKeyword(ret, fmt::format("branch{}", childNum));
|
||||
|
||||
ret.emplace_back(DebugPrinter::Block("[`"));
|
||||
for (size_t idx = 0; idx < _inputVals[childNum].size(); idx++) {
|
||||
|
|
|
|||
|
|
@ -513,14 +513,14 @@ void ValuePrinter<T>::writeValueToStream(TypeTags tag, Value val, size_t depth)
|
|||
|
||||
// If the BinData is a correctly sized newUUID, display it as such.
|
||||
if (type == newUUID && len == kNewUUIDLength) {
|
||||
using namespace fmt::literals;
|
||||
StringData sd(data, len);
|
||||
// 4 Octets - 2 Octets - 2 Octets - 2 Octets - 6 Octets
|
||||
stream << "UUID(\"{}-{}-{}-{}-{}\")"_format(hexblob::encodeLower(sd.substr(0, 4)),
|
||||
hexblob::encodeLower(sd.substr(4, 2)),
|
||||
hexblob::encodeLower(sd.substr(6, 2)),
|
||||
hexblob::encodeLower(sd.substr(8, 2)),
|
||||
hexblob::encodeLower(sd.substr(10, 6)));
|
||||
stream << fmt::format("UUID(\"{}-{}-{}-{}-{}\")",
|
||||
hexblob::encodeLower(sd.substr(0, 4)),
|
||||
hexblob::encodeLower(sd.substr(4, 2)),
|
||||
hexblob::encodeLower(sd.substr(6, 2)),
|
||||
hexblob::encodeLower(sd.substr(8, 2)),
|
||||
hexblob::encodeLower(sd.substr(10, 6)));
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -81,7 +81,6 @@
|
|||
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWrite
|
||||
|
||||
namespace mongo {
|
||||
using namespace fmt::literals;
|
||||
const char* TimeseriesModifyStage::kStageType = "TS_MODIFY";
|
||||
|
||||
TimeseriesModifyStage::TimeseriesModifyStage(ExpressionContext* expCtx,
|
||||
|
|
@ -201,7 +200,7 @@ const std::vector<std::unique_ptr<FieldRef>>& TimeseriesModifyStage::_getUserLev
|
|||
_immutablePaths.emplace_back(std::make_unique<FieldRef>(timeField));
|
||||
} else {
|
||||
tasserted(7687100,
|
||||
"Unexpected shard key field: {}"_format(shardKeyField->dottedField()));
|
||||
fmt::format("Unexpected shard key field: {}", shardKeyField->dottedField()));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -281,7 +280,6 @@ std::vector<BSONObj> TimeseriesModifyStage::_applyUpdate(
|
|||
|
||||
void TimeseriesModifyStage::_checkRestrictionsOnUpdatingShardKeyAreNotViolated(
|
||||
const ScopedCollectionDescription& collDesc, const FieldRefSet& shardKeyPaths) {
|
||||
using namespace fmt::literals;
|
||||
// We do not allow modifying either the current shard key value or new shard key value (if
|
||||
// resharding) without specifying the full current shard key in the query.
|
||||
// If the query is a simple equality match on _id, then '_params.canonicalQuery' will be null.
|
||||
|
|
@ -324,9 +322,11 @@ void TimeseriesModifyStage::_checkRestrictionsOnUpdatingShardKeyAreNotViolated(
|
|||
} else {
|
||||
FieldRefSet userLevelShardKeyPaths(_getUserLevelShardKeyPaths(collDesc));
|
||||
uassert(7717803,
|
||||
"Shard key update is not allowed without specifying the full shard key in the "
|
||||
"query: pred = {}, shardKeyPaths = {}"_format(
|
||||
_originalPredicate->serialize().toString(), userLevelShardKeyPaths.toString()),
|
||||
fmt::format(
|
||||
"Shard key update is not allowed without specifying the full shard key in the "
|
||||
"query: pred = {}, shardKeyPaths = {}",
|
||||
_originalPredicate->serialize().toString(),
|
||||
userLevelShardKeyPaths.toString()),
|
||||
(_originalPredicate &&
|
||||
pathsupport::extractFullEqualityMatches(
|
||||
*_originalPredicate, userLevelShardKeyPaths, &equalities)
|
||||
|
|
@ -353,7 +353,6 @@ void TimeseriesModifyStage::_checkUpdateChangesExistingShardKey(const BSONObj& n
|
|||
const BSONObj& oldBucket,
|
||||
const BSONObj& newMeasurement,
|
||||
const BSONObj& oldMeasurement) {
|
||||
using namespace fmt::literals;
|
||||
const auto& collDesc = collectionAcquisition().getShardingDescription();
|
||||
const auto& shardKeyPattern = collDesc.getShardKeyPattern();
|
||||
|
||||
|
|
@ -403,7 +402,6 @@ void TimeseriesModifyStage::_checkUpdateChangesReshardingKey(
|
|||
const BSONObj& oldBucket,
|
||||
const BSONObj& newMeasurement,
|
||||
const BSONObj& oldMeasurement) {
|
||||
using namespace fmt::literals;
|
||||
const auto& collDesc = collectionAcquisition().getShardingDescription();
|
||||
|
||||
auto reshardingKeyPattern = collDesc.getReshardingKeyIfShouldForwardOps();
|
||||
|
|
@ -807,7 +805,7 @@ PlanStage::StageState TimeseriesModifyStage::doWork(WorkingSetID* out) {
|
|||
void TimeseriesModifyStage::doRestoreStateRequiresCollection() {
|
||||
const NamespaceString& ns = collectionPtr()->ns();
|
||||
uassert(ErrorCodes::PrimarySteppedDown,
|
||||
"Demoted from primary while removing from {}"_format(ns.toStringForErrorMsg()),
|
||||
fmt::format("Demoted from primary while removing from {}", ns.toStringForErrorMsg()),
|
||||
!opCtx()->writesAreReplicated() ||
|
||||
repl::ReplicationCoordinator::get(opCtx())->canAcceptWritesFor(opCtx(), ns));
|
||||
|
||||
|
|
|
|||
|
|
@ -211,7 +211,6 @@ BSONObj TimeseriesUpsertStage::_produceNewDocumentForInsert() {
|
|||
_getImmutablePaths();
|
||||
|
||||
mutablebson::Document doc;
|
||||
using namespace fmt::literals;
|
||||
|
||||
if (_request.shouldUpsertSuppliedDocument()) {
|
||||
update::generateNewDocumentFromSuppliedDoc(opCtx(), _immutablePaths, &_request, doc);
|
||||
|
|
@ -233,7 +232,7 @@ BSONObj TimeseriesUpsertStage::_produceNewDocumentForInsert() {
|
|||
auto newDocument = doc.getObject();
|
||||
if (!DocumentValidationSettings::get(opCtx()).isInternalValidationDisabled()) {
|
||||
uassert(7655103,
|
||||
"Document to upsert is larger than {}"_format(BSONObjMaxUserSize),
|
||||
fmt::format("Document to upsert is larger than {}", BSONObjMaxUserSize),
|
||||
newDocument.objsize() <= BSONObjMaxUserSize);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@
|
|||
*/
|
||||
|
||||
#include <fmt/format.h>
|
||||
#include <fmt/ostream.h>
|
||||
|
||||
#include "mongo/base/error_codes.h"
|
||||
#include "mongo/base/status.h"
|
||||
|
|
@ -132,7 +133,7 @@ StringData FeatureCompatibilityVersionParser::serializeVersionForFeatureFlags(FC
|
|||
return multiversion::toString(version);
|
||||
}
|
||||
uasserted(ErrorCodes::BadValue,
|
||||
fmt::format("Invalid FCV version {} for feature flag.", version));
|
||||
fmt::format("Invalid FCV version {} for feature flag.", fmt::underlying(version)));
|
||||
}
|
||||
|
||||
Status FeatureCompatibilityVersionParser::validatePreviousVersionField(FCV version) {
|
||||
|
|
|
|||
|
|
@ -58,11 +58,10 @@ private:
|
|||
const BSONField<T>& field,
|
||||
StringData expected,
|
||||
std::string* errMsg) {
|
||||
using namespace fmt::literals;
|
||||
if (!errMsg)
|
||||
return;
|
||||
*errMsg = "wrong type for '{}' field, expected {}, found {}"_format(
|
||||
field(), expected, elem.toString());
|
||||
*errMsg = fmt::format(
|
||||
"wrong type for '{}' field, expected {}, found {}", field(), expected, elem.toString());
|
||||
}
|
||||
|
||||
public:
|
||||
|
|
@ -452,7 +451,6 @@ FieldParser::FieldState FieldParser::extract(BSONElement elem,
|
|||
const BSONField<std::vector<T>>& field,
|
||||
std::vector<T>* out,
|
||||
std::string* errMsg) {
|
||||
using namespace fmt::literals;
|
||||
if (elem.eoo()) {
|
||||
if (field.hasDefault()) {
|
||||
*out = field.getDefault();
|
||||
|
|
@ -478,8 +476,8 @@ FieldParser::FieldState FieldParser::extract(BSONElement elem,
|
|||
|
||||
if (!FieldParser::extract(next, fieldFor, &out->at(initialSize + i), &elErrMsg)) {
|
||||
if (errMsg) {
|
||||
*errMsg = "error parsing element {} of field {}{}"_format(
|
||||
i, field(), causedBy(elErrMsg));
|
||||
*errMsg = fmt::format(
|
||||
"error parsing element {} of field {}{}", i, field(), causedBy(elErrMsg));
|
||||
}
|
||||
return FIELD_INVALID;
|
||||
}
|
||||
|
|
@ -545,7 +543,6 @@ FieldParser::FieldState FieldParser::extract(BSONObj doc,
|
|||
const BSONField<std::vector<T*>>& field,
|
||||
std::vector<T*>** out,
|
||||
std::string* errMsg) {
|
||||
using namespace fmt::literals;
|
||||
dassert(!field.hasDefault());
|
||||
|
||||
BSONElement elem = doc[field.name()];
|
||||
|
|
@ -574,8 +571,10 @@ FieldParser::FieldState FieldParser::extract(BSONObj doc,
|
|||
|
||||
if (next.type() != Object) {
|
||||
if (errMsg) {
|
||||
*errMsg = "wrong type for '{}' field contents, expected object, found {}"_format(
|
||||
field(), elem.type());
|
||||
*errMsg =
|
||||
fmt::format("wrong type for '{}' field contents, expected object, found {}",
|
||||
field(),
|
||||
elem.type());
|
||||
}
|
||||
return FIELD_INVALID;
|
||||
}
|
||||
|
|
@ -605,7 +604,6 @@ FieldParser::FieldState FieldParser::extract(BSONElement elem,
|
|||
const BSONField<std::map<K, T>>& field,
|
||||
std::map<K, T>* out,
|
||||
std::string* errMsg) {
|
||||
using namespace fmt::literals;
|
||||
if (elem.eoo()) {
|
||||
if (field.hasDefault()) {
|
||||
*out = field.getDefault();
|
||||
|
|
@ -627,8 +625,10 @@ FieldParser::FieldState FieldParser::extract(BSONElement elem,
|
|||
BSONField<T> fieldFor(next.fieldName(), value);
|
||||
if (!FieldParser::extract(next, fieldFor, &value, &elErrMsg)) {
|
||||
if (errMsg) {
|
||||
*errMsg = "error parsing map element {} of field {}{}"_format(
|
||||
next.fieldName(), field(), causedBy(elErrMsg));
|
||||
*errMsg = fmt::format("error parsing map element {} of field {}{}",
|
||||
next.fieldName(),
|
||||
field(),
|
||||
causedBy(elErrMsg));
|
||||
}
|
||||
return FIELD_INVALID;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -49,8 +49,6 @@ namespace mongo::fts {
|
|||
|
||||
namespace {
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
/**
|
||||
* Case-insensitive StringData comparator.
|
||||
* Returns true if a < b.
|
||||
|
|
@ -144,8 +142,9 @@ public:
|
|||
} else {
|
||||
// v2 and above reject unrecognized language strings.
|
||||
uasserted(ErrorCodes::BadValue,
|
||||
R"(unsupported language: "{}" for text index version {})"_format(langName,
|
||||
ver));
|
||||
fmt::format(R"(unsupported language: "{}" for text index version {})",
|
||||
langName,
|
||||
fmt::underlying(ver)));
|
||||
}
|
||||
}
|
||||
return *it->second;
|
||||
|
|
|
|||
|
|
@ -281,14 +281,14 @@ static bool forkServer() {
|
|||
croak("closing read side of pipe failed");
|
||||
serverGlobalParams.forkReadyFd = readyPipe[1];
|
||||
|
||||
std::cout << format(FMT_STRING("forked process: {}"), getpid()) << std::endl;
|
||||
std::cout << fmt::format("forked process: {}", getpid()) << std::endl;
|
||||
|
||||
auto stdioDetach = [](FILE* fp, const char* mode, StringData name) {
|
||||
if (!freopen("/dev/null", mode, fp)) {
|
||||
int saved = errno;
|
||||
std::cout << format(FMT_STRING("Cannot reassign {} while forking server process: {}"),
|
||||
name,
|
||||
strerror(saved))
|
||||
std::cout << fmt::format("Cannot reassign {} while forking server process: {}",
|
||||
name,
|
||||
strerror(saved))
|
||||
<< std::endl;
|
||||
return false;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -75,8 +75,7 @@ void logProcessDetails(std::ostream* os) {
|
|||
.obj();
|
||||
vii.logBuildInfo(os);
|
||||
if (os) {
|
||||
*os << format(FMT_STRING("Operating System: {}"),
|
||||
tojson(osInfo, ExtendedRelaxedV2_0_0, true))
|
||||
*os << fmt::format("Operating System: {}", tojson(osInfo, ExtendedRelaxedV2_0_0, true))
|
||||
<< std::endl;
|
||||
} else {
|
||||
LOGV2(51765, "Operating System", "os"_attr = osInfo);
|
||||
|
|
|
|||
|
|
@ -75,7 +75,6 @@ protected:
|
|||
|
||||
namespace {
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
TEST_F(NamespaceStringTest, createNamespaceString_forTest) {
|
||||
TenantId tenantId(OID::gen());
|
||||
|
|
@ -377,7 +376,7 @@ TEST_F(NamespaceStringTest, NSSWithTenantId) {
|
|||
ASSERT_EQ(nss4.size(), 3);
|
||||
ASSERT_EQ(nss4.ns_forTest(), "foo");
|
||||
ASSERT_EQ(nss4.toString_forTest(), "foo");
|
||||
ASSERT_EQ(nss4.toStringWithTenantId_forTest(), "{}_foo"_format(tenantId.toString()));
|
||||
ASSERT_EQ(nss4.toStringWithTenantId_forTest(), fmt::format("{}_foo", tenantId.toString()));
|
||||
ASSERT_EQ(nss4.db_forTest(), "foo");
|
||||
ASSERT_EQ(nss4.coll(), "");
|
||||
ASSERT_EQ(nss4.dbName().toString_forTest(), "foo");
|
||||
|
|
@ -394,7 +393,7 @@ TEST_F(NamespaceStringTest, NSSWithTenantId) {
|
|||
ASSERT_EQ(multiNss.ns_forTest(), "config.system.change_collection");
|
||||
ASSERT_EQ(multiNss.toString_forTest(), "config.system.change_collection");
|
||||
ASSERT_EQ(multiNss.toStringWithTenantId_forTest(),
|
||||
"{}_config.system.change_collection"_format(tenantId.toString()));
|
||||
fmt::format("{}_config.system.change_collection", tenantId.toString()));
|
||||
ASSERT_EQ(multiNss.db_forTest(), "config");
|
||||
ASSERT_EQ(multiNss.coll(), "system.change_collection");
|
||||
ASSERT_EQ(multiNss.dbName().toString_forTest(), "config");
|
||||
|
|
@ -424,12 +423,12 @@ TEST_F(NamespaceStringTest, NSSWithTenantId) {
|
|||
ASSERT_EQ(*emptyWithTenant.tenantId(), tenantId);
|
||||
ASSERT_EQ(emptyWithTenant.toString_forTest(), "");
|
||||
ASSERT_EQ(emptyWithTenant.toStringWithTenantId_forTest(),
|
||||
"{}_"_format(tenantId.toString()));
|
||||
fmt::format("{}_", tenantId.toString()));
|
||||
ASSERT(emptyWithTenant.dbName().tenantId());
|
||||
ASSERT_EQ(emptyWithTenant.dbName().tenantId(), tenantId);
|
||||
ASSERT_EQ(emptyWithTenant.dbName().toString_forTest(), "");
|
||||
ASSERT_EQ(emptyWithTenant.dbName().toStringWithTenantId_forTest(),
|
||||
"{}_"_format(tenantId.toString()));
|
||||
fmt::format("{}_", tenantId.toString()));
|
||||
}
|
||||
|
||||
{
|
||||
|
|
|
|||
|
|
@ -57,7 +57,6 @@
|
|||
|
||||
namespace mongo {
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
#if defined(__linux__)
|
||||
|
||||
|
|
|
|||
|
|
@ -60,7 +60,6 @@ OperationKeyManager& OperationKeyManager::get(ServiceContext* serviceContext) {
|
|||
}
|
||||
|
||||
void OperationKeyManager::add(const OperationKey& key, OperationId id) {
|
||||
using namespace fmt::literals;
|
||||
|
||||
LOGV2_DEBUG(4615636,
|
||||
2,
|
||||
|
|
@ -71,8 +70,9 @@ void OperationKeyManager::add(const OperationKey& key, OperationId id) {
|
|||
stdx::lock_guard lk(_mutex);
|
||||
auto result = _idByOperationKey.emplace(key, id).second;
|
||||
|
||||
uassert(
|
||||
ErrorCodes::BadValue, "OperationKey currently '{}' in use"_format(key.toString()), result);
|
||||
uassert(ErrorCodes::BadValue,
|
||||
fmt::format("OperationKey currently '{}' in use", key.toString()),
|
||||
result);
|
||||
}
|
||||
|
||||
bool OperationKeyManager::remove(const OperationKey& key) {
|
||||
|
|
|
|||
|
|
@ -204,7 +204,6 @@ private:
|
|||
bool upsert,
|
||||
const WriteConcernOptions& writeConcern =
|
||||
WriteConcerns::kMajorityWriteConcernShardingTimeout) {
|
||||
using namespace fmt::literals;
|
||||
DBDirectClient dbClient(opCtx);
|
||||
|
||||
auto commandResponse = write_ops::checkWriteErrors(dbClient.update([&] {
|
||||
|
|
@ -218,8 +217,9 @@ private:
|
|||
}()));
|
||||
|
||||
uassert(ErrorCodes::NoMatchingDocument,
|
||||
"No matching document found for query {} on namespace {}"_format(
|
||||
filter.toString(), _storageNss.toStringForErrorMsg()),
|
||||
fmt::format("No matching document found for query {} on namespace {}",
|
||||
filter.toString(),
|
||||
_storageNss.toStringForErrorMsg()),
|
||||
upsert || commandResponse.getN() > 0);
|
||||
|
||||
WriteConcernResult ignoreResult;
|
||||
|
|
|
|||
|
|
@ -413,10 +413,9 @@ SortPattern getAccSortPattern(AccumulatorN* accN) {
|
|||
}
|
||||
|
||||
inline SortPattern getAccSortPattern(const boost::intrusive_ptr<AccumulatorState>& accState) {
|
||||
using namespace fmt::literals;
|
||||
auto accN = dynamic_cast<AccumulatorN*>(accState.get());
|
||||
tassert(8434700,
|
||||
"Expected AccumulatorN but the accumulator is {}"_format(accState->getOpName()),
|
||||
fmt::format("Expected AccumulatorN but the accumulator is {}", accState->getOpName()),
|
||||
accN);
|
||||
switch (accN->getAccumulatorType()) {
|
||||
case AccumulatorN::kTop:
|
||||
|
|
|
|||
|
|
@ -623,10 +623,9 @@ std::unique_ptr<MatchExpression> matchRewriteUpdateDescription(
|
|||
// not need to check whether the predicate matches a missing field in this case.
|
||||
static const std::vector<std::string> oplogFields = {"o.diff.d", "o.$unset"};
|
||||
auto rewrittenEquality = std::make_unique<OrMatchExpression>();
|
||||
using namespace fmt::literals;
|
||||
for (auto&& oplogField : oplogFields) {
|
||||
rewrittenEquality->add(std::make_unique<ExistsMatchExpression>(
|
||||
StringData("{}.{}"_format(oplogField, fieldName))));
|
||||
StringData(fmt::format("{}.{}", oplogField, fieldName))));
|
||||
}
|
||||
return rewrittenEquality;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -347,7 +347,6 @@ constexpr StringData getMergeFieldNameForAcc() {
|
|||
};
|
||||
|
||||
boost::intrusive_ptr<Expression> getOutputArgExpr(boost::intrusive_ptr<Expression> argExpr) {
|
||||
using namespace fmt::literals;
|
||||
auto exprObj = dynamic_cast<ExpressionObject*>(argExpr.get());
|
||||
tassert(8808700, "Expected object-type expression", exprObj);
|
||||
auto&& exprs = exprObj->getChildExpressions();
|
||||
|
|
@ -355,7 +354,7 @@ boost::intrusive_ptr<Expression> getOutputArgExpr(boost::intrusive_ptr<Expressio
|
|||
return expr.first == AccumulatorN::kFieldNameOutput;
|
||||
});
|
||||
tassert(8808701,
|
||||
"'{}' field not found"_format(AccumulatorN::kFieldNameOutput),
|
||||
fmt::format("'{}' field not found", AccumulatorN::kFieldNameOutput),
|
||||
outputArgExprIt != exprs.end());
|
||||
return outputArgExprIt->second;
|
||||
};
|
||||
|
|
@ -416,10 +415,9 @@ AccumulationStatement mergeAccStmtFor(boost::intrusive_ptr<ExpressionContext> pE
|
|||
|
||||
// Recomputes the rewritten nested accumulator fields to the user-requested
|
||||
// fields.
|
||||
using namespace fmt::literals;
|
||||
prjArgsBuilder.append(
|
||||
accStmts[accIdx].fieldName,
|
||||
"${}.{}"_format(mergeFieldName, accStmts[accIdx].fieldName));
|
||||
fmt::format("${}.{}", mergeFieldName, accStmts[accIdx].fieldName));
|
||||
}
|
||||
outputBuilder.doneFast();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,6 +31,8 @@
|
|||
#include <boost/move/utility_core.hpp>
|
||||
#include <cstdint>
|
||||
#include <deque>
|
||||
#include <fmt/format.h>
|
||||
#include <fmt/ranges.h>
|
||||
#include <functional>
|
||||
#include <list>
|
||||
#include <map>
|
||||
|
|
|
|||
|
|
@ -51,7 +51,6 @@ namespace mongo {
|
|||
|
||||
using boost::intrusive_ptr;
|
||||
using std::list;
|
||||
using namespace fmt::literals;
|
||||
|
||||
class ListClusterCatalogPipelineBuilder {
|
||||
public:
|
||||
|
|
|
|||
|
|
@ -67,7 +67,6 @@
|
|||
|
||||
|
||||
namespace mongo {
|
||||
using namespace fmt::literals;
|
||||
|
||||
MONGO_FAIL_POINT_DEFINE(hangWhileBuildingDocumentSourceMergeBatch);
|
||||
REGISTER_DOCUMENT_SOURCE(merge,
|
||||
|
|
@ -172,15 +171,17 @@ auto withErrorContext(const auto&& callback, StringData errorMessage) {
|
|||
std::unique_ptr<DocumentSourceMerge::LiteParsed> DocumentSourceMerge::LiteParsed::parse(
|
||||
const NamespaceString& nss, const BSONElement& spec) {
|
||||
uassert(ErrorCodes::TypeMismatch,
|
||||
"{} requires a string or object argument, but found {}"_format(kStageName,
|
||||
typeName(spec.type())),
|
||||
fmt::format("{} requires a string or object argument, but found {}",
|
||||
kStageName,
|
||||
typeName(spec.type())),
|
||||
spec.type() == BSONType::String || spec.type() == BSONType::Object);
|
||||
|
||||
auto mergeSpec = parseMergeSpecAndResolveTargetNamespace(spec, nss.dbName());
|
||||
auto targetNss = mergeSpec.getTargetNss();
|
||||
|
||||
uassert(ErrorCodes::InvalidNamespace,
|
||||
"Invalid {} target namespace: '{}'"_format(kStageName, targetNss.toStringForErrorMsg()),
|
||||
fmt::format(
|
||||
"Invalid {} target namespace: '{}'", kStageName, targetNss.toStringForErrorMsg()),
|
||||
targetNss.isValid());
|
||||
|
||||
auto whenMatched =
|
||||
|
|
@ -188,10 +189,11 @@ std::unique_ptr<DocumentSourceMerge::LiteParsed> DocumentSourceMerge::LiteParsed
|
|||
auto whenNotMatched = mergeSpec.getWhenNotMatched().value_or(kDefaultWhenNotMatched);
|
||||
|
||||
uassert(51181,
|
||||
"Combination of {} modes 'whenMatched: {}' and 'whenNotMatched: {}' "
|
||||
"is not supported"_format(kStageName,
|
||||
MergeWhenMatchedMode_serializer(whenMatched),
|
||||
MergeWhenNotMatchedMode_serializer(whenNotMatched)),
|
||||
fmt::format("Combination of {} modes 'whenMatched: {}' and 'whenNotMatched: {}' "
|
||||
"is not supported",
|
||||
kStageName,
|
||||
MergeWhenMatchedMode_serializer(whenMatched),
|
||||
MergeWhenNotMatchedMode_serializer(whenNotMatched)),
|
||||
isSupportedMergeMode(whenMatched, whenNotMatched));
|
||||
boost::optional<LiteParsedPipeline> liteParsedPipeline;
|
||||
if (whenMatched == MergeWhenMatchedModeEnum::kPipeline) {
|
||||
|
|
@ -250,29 +252,32 @@ boost::intrusive_ptr<DocumentSource> DocumentSourceMerge::create(
|
|||
boost::optional<ChunkVersion> collectionPlacementVersion,
|
||||
bool allowMergeOnNullishValues) {
|
||||
uassert(51189,
|
||||
"Combination of {} modes 'whenMatched: {}' and 'whenNotMatched: {}' "
|
||||
"is not supported"_format(kStageName,
|
||||
MergeWhenMatchedMode_serializer(whenMatched),
|
||||
MergeWhenNotMatchedMode_serializer(whenNotMatched)),
|
||||
fmt::format("Combination of {} modes 'whenMatched: {}' and 'whenNotMatched: {}' "
|
||||
"is not supported",
|
||||
kStageName,
|
||||
MergeWhenMatchedMode_serializer(whenMatched),
|
||||
MergeWhenNotMatchedMode_serializer(whenNotMatched)),
|
||||
isSupportedMergeMode(whenMatched, whenNotMatched));
|
||||
|
||||
uassert(ErrorCodes::InvalidNamespace,
|
||||
"Invalid {} target namespace: '{}'"_format(kStageName, outputNs.toStringForErrorMsg()),
|
||||
fmt::format(
|
||||
"Invalid {} target namespace: '{}'", kStageName, outputNs.toStringForErrorMsg()),
|
||||
outputNs.isValid());
|
||||
|
||||
uassert(ErrorCodes::OperationNotSupportedInTransaction,
|
||||
"{} cannot be used in a transaction"_format(kStageName),
|
||||
fmt::format("{} cannot be used in a transaction", kStageName),
|
||||
!expCtx->getOperationContext()->inMultiDocumentTransaction());
|
||||
|
||||
uassert(31319,
|
||||
"Cannot {} to special collection: {}"_format(kStageName, outputNs.coll()),
|
||||
fmt::format("Cannot {} to special collection: {}", kStageName, outputNs.coll()),
|
||||
!outputNs.isSystem() ||
|
||||
(outputNs.isSystemStatsCollection() &&
|
||||
isInternalClient(expCtx->getOperationContext()->getClient())));
|
||||
|
||||
uassert(31320,
|
||||
"Cannot {} to internal database: {}"_format(kStageName,
|
||||
outputNs.dbName().toStringForErrorMsg()),
|
||||
fmt::format("Cannot {} to internal database: {}",
|
||||
kStageName,
|
||||
outputNs.dbName().toStringForErrorMsg()),
|
||||
!outputNs.isOnInternalDb() ||
|
||||
isInternalClient(expCtx->getOperationContext()->getClient()));
|
||||
|
||||
|
|
@ -291,8 +296,8 @@ boost::intrusive_ptr<DocumentSource> DocumentSourceMerge::create(
|
|||
} else {
|
||||
// Ensure the 'let' argument cannot be used with any other merge modes.
|
||||
uassert(51199,
|
||||
"Cannot use 'let' variables with 'whenMatched: {}' mode"_format(
|
||||
MergeWhenMatchedMode_serializer(whenMatched)),
|
||||
fmt::format("Cannot use 'let' variables with 'whenMatched: {}' mode",
|
||||
MergeWhenMatchedMode_serializer(whenMatched)),
|
||||
!letVariables);
|
||||
}
|
||||
|
||||
|
|
@ -310,7 +315,8 @@ boost::intrusive_ptr<DocumentSource> DocumentSourceMerge::create(
|
|||
boost::intrusive_ptr<DocumentSource> DocumentSourceMerge::createFromBson(
|
||||
BSONElement spec, const boost::intrusive_ptr<ExpressionContext>& expCtx) {
|
||||
uassert(51182,
|
||||
"{} only supports a string or object argument, not {}"_format(kStageName, spec.type()),
|
||||
fmt::format(
|
||||
"{} only supports a string or object argument, not {}", kStageName, spec.type()),
|
||||
spec.type() == BSONType::String || spec.type() == BSONType::Object);
|
||||
|
||||
auto mergeSpec = parseMergeSpecAndResolveTargetNamespace(
|
||||
|
|
|
|||
|
|
@ -116,11 +116,11 @@ public:
|
|||
|
||||
ReadConcernSupportResult supportsReadConcern(repl::ReadConcernLevel level,
|
||||
bool isImplicitDefault) const final {
|
||||
using namespace fmt::literals;
|
||||
ReadConcernSupportResult result = {
|
||||
{level == repl::ReadConcernLevel::kLinearizableReadConcern,
|
||||
{ErrorCodes::InvalidOptions,
|
||||
"{} cannot be used with a 'linearizable' read concern level"_format(kStageName)}},
|
||||
fmt::format("{} cannot be used with a 'linearizable' read concern level",
|
||||
kStageName)}},
|
||||
Status::OK()};
|
||||
auto pipelineReadConcern = LiteParsedDocumentSourceNestedPipelines::supportsReadConcern(
|
||||
level, isImplicitDefault);
|
||||
|
|
|
|||
|
|
@ -47,19 +47,21 @@
|
|||
#include "mongo/util/namespace_string_util.h"
|
||||
|
||||
namespace mongo {
|
||||
using namespace fmt::literals;
|
||||
|
||||
NamespaceString mergeTargetNssParseFromBSON(boost::optional<TenantId> tenantId,
|
||||
const BSONElement& elem,
|
||||
const SerializationContext& sc) {
|
||||
uassert(51178,
|
||||
"{} 'into' field must be either a string or an object, "
|
||||
"but found {}"_format(DocumentSourceMerge::kStageName, typeName(elem.type())),
|
||||
fmt::format("{} 'into' field must be either a string or an object, "
|
||||
"but found {}",
|
||||
DocumentSourceMerge::kStageName,
|
||||
typeName(elem.type())),
|
||||
elem.type() == BSONType::String || elem.type() == BSONType::Object);
|
||||
|
||||
if (elem.type() == BSONType::String) {
|
||||
uassert(5786800,
|
||||
"{} 'into' field cannot be an empty string"_format(DocumentSourceMerge::kStageName),
|
||||
fmt::format("{} 'into' field cannot be an empty string",
|
||||
DocumentSourceMerge::kStageName),
|
||||
!elem.valueStringData().empty());
|
||||
return NamespaceStringUtil::deserialize(tenantId, "", elem.valueStringData(), sc);
|
||||
}
|
||||
|
|
@ -70,10 +72,11 @@ NamespaceString mergeTargetNssParseFromBSON(boost::optional<TenantId> tenantId,
|
|||
auto spec = NamespaceSpec::parse(
|
||||
IDLParserContext(elem.fieldNameStringData(), vts, tenantId, sc), elem.embeddedObject());
|
||||
auto coll = spec.getColl();
|
||||
uassert(5786801,
|
||||
"{} 'into' field must specify a 'coll' that is not empty, null or undefined"_format(
|
||||
DocumentSourceMerge::kStageName),
|
||||
coll && !coll->empty());
|
||||
uassert(
|
||||
5786801,
|
||||
fmt::format("{} 'into' field must specify a 'coll' that is not empty, null or undefined",
|
||||
DocumentSourceMerge::kStageName),
|
||||
coll && !coll->empty());
|
||||
|
||||
return NamespaceStringUtil::deserialize(
|
||||
spec.getDb().value_or(DatabaseNameUtil::deserialize(tenantId, "", sc)), *coll);
|
||||
|
|
@ -94,8 +97,10 @@ std::vector<std::string> mergeOnFieldsParseFromBSON(const BSONElement& elem) {
|
|||
std::vector<std::string> fields;
|
||||
|
||||
uassert(51186,
|
||||
"{} 'on' field must be either a string or an array of strings, "
|
||||
"but found {}"_format(DocumentSourceMerge::kStageName, typeName(elem.type())),
|
||||
fmt::format("{} 'on' field must be either a string or an array of strings, "
|
||||
"but found {}",
|
||||
DocumentSourceMerge::kStageName,
|
||||
typeName(elem.type())),
|
||||
elem.type() == BSONType::String || elem.type() == BSONType::Array);
|
||||
|
||||
if (elem.type() == BSONType::String) {
|
||||
|
|
@ -107,16 +112,17 @@ std::vector<std::string> mergeOnFieldsParseFromBSON(const BSONElement& elem) {
|
|||
while (iter.more()) {
|
||||
const BSONElement matchByElem = iter.next();
|
||||
uassert(51134,
|
||||
"{} 'on' array elements must be strings, but found {}"_format(
|
||||
DocumentSourceMerge::kStageName, typeName(matchByElem.type())),
|
||||
fmt::format("{} 'on' array elements must be strings, but found {}",
|
||||
DocumentSourceMerge::kStageName,
|
||||
typeName(matchByElem.type())),
|
||||
matchByElem.type() == BSONType::String);
|
||||
fields.push_back(matchByElem.str());
|
||||
}
|
||||
}
|
||||
|
||||
uassert(51187,
|
||||
"If explicitly specifying {} 'on', must include at least one field"_format(
|
||||
DocumentSourceMerge::kStageName),
|
||||
fmt::format("If explicitly specifying {} 'on', must include at least one field",
|
||||
DocumentSourceMerge::kStageName),
|
||||
fields.size() > 0);
|
||||
|
||||
return fields;
|
||||
|
|
@ -135,8 +141,10 @@ void mergeOnFieldsSerializeToBSON(const std::vector<std::string>& fields,
|
|||
|
||||
MergeWhenMatchedPolicy mergeWhenMatchedParseFromBSON(const BSONElement& elem) {
|
||||
uassert(51191,
|
||||
"{} 'whenMatched' field must be either a string or an array, "
|
||||
"but found {}"_format(DocumentSourceMerge::kStageName, typeName(elem.type())),
|
||||
fmt::format("{} 'whenMatched' field must be either a string or an array, "
|
||||
"but found {}",
|
||||
DocumentSourceMerge::kStageName,
|
||||
typeName(elem.type())),
|
||||
elem.type() == BSONType::String || elem.type() == BSONType::Array);
|
||||
|
||||
if (elem.type() == BSONType::Array) {
|
||||
|
|
|
|||
|
|
@ -66,8 +66,6 @@
|
|||
|
||||
namespace mongo {
|
||||
|
||||
using namespace fmt::literals;
|
||||
|
||||
MONGO_FAIL_POINT_DEFINE(hangWhileBuildingDocumentSourceOutBatch);
|
||||
MONGO_FAIL_POINT_DEFINE(outWaitAfterTempCollectionCreation);
|
||||
MONGO_FAIL_POINT_DEFINE(outWaitBeforeTempCollectionRename);
|
||||
|
|
@ -176,8 +174,9 @@ DocumentSourceOutSpec DocumentSourceOut::parseOutSpecAndResolveTargetNamespace(
|
|||
spec.embeddedObject());
|
||||
} else {
|
||||
uassert(16990,
|
||||
"{} only supports a string or object argument, but found {}"_format(
|
||||
kStageName, typeName(spec.type())),
|
||||
fmt::format("{} only supports a string or object argument, but found {}",
|
||||
kStageName,
|
||||
typeName(spec.type())),
|
||||
spec.type() == BSONType::String);
|
||||
}
|
||||
|
||||
|
|
@ -196,9 +195,10 @@ std::unique_ptr<DocumentSourceOut::LiteParsed> DocumentSourceOut::LiteParsed::pa
|
|||
outSpec.getColl(),
|
||||
outSpec.getSerializationContext());
|
||||
|
||||
uassert(ErrorCodes::InvalidNamespace,
|
||||
"Invalid {} target namespace, {}"_format(kStageName, targetNss.toStringForErrorMsg()),
|
||||
targetNss.isValid());
|
||||
uassert(
|
||||
ErrorCodes::InvalidNamespace,
|
||||
fmt::format("Invalid {} target namespace, {}", kStageName, targetNss.toStringForErrorMsg()),
|
||||
targetNss.isValid());
|
||||
return std::make_unique<DocumentSourceOut::LiteParsed>(spec.fieldName(), std::move(targetNss));
|
||||
}
|
||||
|
||||
|
|
@ -323,8 +323,9 @@ void DocumentSourceOut::initialize() {
|
|||
// work. If the collection becomes capped during processing, the collection options will
|
||||
// have changed, and the $out will fail.
|
||||
uassert(17152,
|
||||
"namespace '{}' is capped so it can't be used for {}"_format(
|
||||
getOutputNs().toStringForErrorMsg(), kStageName),
|
||||
fmt::format("namespace '{}' is capped so it can't be used for {}",
|
||||
getOutputNs().toStringForErrorMsg(),
|
||||
kStageName),
|
||||
_originalOutOptions["capped"].eoo());
|
||||
} catch (ExceptionFor<ErrorCodes::NamespaceNotFound>&) {
|
||||
LOGV2_DEBUG(7585601,
|
||||
|
|
@ -505,20 +506,22 @@ boost::intrusive_ptr<DocumentSource> DocumentSourceOut::create(
|
|||
const boost::intrusive_ptr<ExpressionContext>& expCtx,
|
||||
boost::optional<TimeseriesOptions> timeseries) {
|
||||
uassert(ErrorCodes::OperationNotSupportedInTransaction,
|
||||
"{} cannot be used in a transaction"_format(kStageName),
|
||||
fmt::format("{} cannot be used in a transaction", kStageName),
|
||||
!expCtx->getOperationContext()->inMultiDocumentTransaction());
|
||||
|
||||
uassert(ErrorCodes::InvalidNamespace,
|
||||
"Invalid {} target namespace, {}"_format(kStageName, outputNs.toStringForErrorMsg()),
|
||||
outputNs.isValid());
|
||||
uassert(
|
||||
ErrorCodes::InvalidNamespace,
|
||||
fmt::format("Invalid {} target namespace, {}", kStageName, outputNs.toStringForErrorMsg()),
|
||||
outputNs.isValid());
|
||||
|
||||
uassert(17385,
|
||||
"Can't {} to special collection: {}"_format(kStageName, outputNs.coll()),
|
||||
fmt::format("Can't {} to special collection: {}", kStageName, outputNs.coll()),
|
||||
!outputNs.isSystem());
|
||||
|
||||
uassert(31321,
|
||||
"Can't {} to internal database: {}"_format(kStageName,
|
||||
outputNs.dbName().toStringForErrorMsg()),
|
||||
fmt::format("Can't {} to internal database: {}",
|
||||
kStageName,
|
||||
outputNs.dbName().toStringForErrorMsg()),
|
||||
!outputNs.isOnInternalDb());
|
||||
return new DocumentSourceOut(std::move(outputNs), std::move(timeseries), expCtx);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -113,12 +113,11 @@ public:
|
|||
|
||||
ReadConcernSupportResult supportsReadConcern(repl::ReadConcernLevel level,
|
||||
bool isImplicitDefault) const final {
|
||||
using namespace fmt::literals;
|
||||
return {
|
||||
{level == repl::ReadConcernLevel::kLinearizableReadConcern,
|
||||
{ErrorCodes::InvalidOptions,
|
||||
"{} cannot be used with a 'linearizable' read concern level"_format(kStageName)}},
|
||||
Status::OK()};
|
||||
return {{level == repl::ReadConcernLevel::kLinearizableReadConcern,
|
||||
{ErrorCodes::InvalidOptions,
|
||||
fmt::format("{} cannot be used with a 'linearizable' read concern level",
|
||||
kStageName)}},
|
||||
Status::OK()};
|
||||
}
|
||||
|
||||
bool isWriteStage() const override {
|
||||
|
|
|
|||
|
|
@ -31,6 +31,8 @@
|
|||
|
||||
#include <algorithm>
|
||||
#include <boost/smart_ptr/intrusive_ptr.hpp>
|
||||
#include <fmt/format.h>
|
||||
#include <fmt/ranges.h>
|
||||
|
||||
#include "mongo/base/error_codes.h"
|
||||
#include "mongo/base/string_data.h"
|
||||
|
|
|
|||
|
|
@ -58,7 +58,6 @@
|
|||
namespace mongo {
|
||||
|
||||
using boost::intrusive_ptr;
|
||||
using namespace fmt::literals;
|
||||
|
||||
Document ReplaceRootTransformation::applyTransformation(const Document& input) const {
|
||||
// Extract subdocument in the form of a Value.
|
||||
|
|
@ -123,8 +122,8 @@ void ReplaceRootTransformation::reportRenames(const MatchExpression* expr,
|
|||
for (const auto& path : deps.fields) {
|
||||
// Only record renames for top level paths.
|
||||
const auto oldPathTopLevelField = FieldPath::extractFirstFieldFromDottedPath(path);
|
||||
renames.emplace(
|
||||
std::make_pair(oldPathTopLevelField, "{}.{}"_format(prefixPath, oldPathTopLevelField)));
|
||||
renames.emplace(std::make_pair(oldPathTopLevelField,
|
||||
fmt::format("{}.{}", prefixPath, oldPathTopLevelField)));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -208,7 +208,6 @@ private:
|
|||
|
||||
template <typename B>
|
||||
DocumentSource::GetNextResult DocumentSourceWriter<B>::doGetNext() {
|
||||
using namespace fmt::literals;
|
||||
if (_done) {
|
||||
return GetNextResult::makeEOF();
|
||||
}
|
||||
|
|
@ -249,8 +248,9 @@ DocumentSource::GetNextResult DocumentSourceWriter<B>::doGetNext() {
|
|||
internalQueryDocumentSourceWriterBatchExtraReservedBytes.load();
|
||||
|
||||
uassert(7637800,
|
||||
"Unable to proceed with write while metadata size ({}KB) exceeds {}KB"_format(
|
||||
initialRequestSize / 1024, BSONObjMaxUserSize / 1024),
|
||||
fmt::format("Unable to proceed with write while metadata size ({}KB) exceeds {}KB",
|
||||
initialRequestSize / 1024,
|
||||
BSONObjMaxUserSize / 1024),
|
||||
initialRequestSize <= BSONObjMaxUserSize);
|
||||
|
||||
const auto maxBatchSizeBytes = BSONObjMaxUserSize - initialRequestSize;
|
||||
|
|
|
|||
|
|
@ -31,6 +31,7 @@
|
|||
|
||||
#include <fmt/format.h>
|
||||
|
||||
#include "mongo/base/string_data.h"
|
||||
#include "mongo/db/pipeline/expression_context.h"
|
||||
#include "mongo/logv2/redaction.h"
|
||||
|
||||
|
|
@ -46,30 +47,28 @@ struct ExpressionContextPrinter {
|
|||
auto out = fc.out();
|
||||
|
||||
if (!expCtx) {
|
||||
out = format_to(out, FMT_STRING("expCtx is null"));
|
||||
out = fmt::format_to(out, "expCtx is null");
|
||||
return out;
|
||||
}
|
||||
|
||||
out = format_to(
|
||||
out,
|
||||
FMT_STRING("{{collator: {}, uuid: {}, needsMerge: {}, allowDiskUse: {}, "
|
||||
"isMapReduceCommand: {}, "
|
||||
"inLookup: {}, inUnionWith: {}, forcePlanCache: {}, sbeCompatibility: {}, "
|
||||
"sbeGroupCompatibility: {}, sbeWindowCompatibility: {}, "
|
||||
"sbePipelineCompatibility: {}, subPipelineDepth: {}}}"),
|
||||
expCtx->getCollatorBSON().toString(),
|
||||
expCtx->getUUID() ? redact(expCtx->getUUID()->toString()) : "none",
|
||||
expCtx->getNeedsMerge(),
|
||||
expCtx->getAllowDiskUse(),
|
||||
expCtx->isMapReduceCommand(),
|
||||
expCtx->getInLookup(),
|
||||
expCtx->getInUnionWith(),
|
||||
expCtx->getForcePlanCache(),
|
||||
expCtx->getSbeCompatibility(),
|
||||
expCtx->getSbeGroupCompatibility(),
|
||||
expCtx->getSbeWindowCompatibility(),
|
||||
expCtx->getSbePipelineCompatibility(),
|
||||
expCtx->getSubPipelineDepth());
|
||||
out = fmt::format_to(out, "{{");
|
||||
auto field = [&, sep = ""_sd](StringData name, auto&& value) mutable {
|
||||
out = fmt::format_to(out, "{}{}: {}", std::exchange(sep, ", "_sd), name, value);
|
||||
};
|
||||
field("collator", expCtx->getCollatorBSON().toString());
|
||||
field("uuid", expCtx->getUUID() ? redact(expCtx->getUUID()->toString()) : "none");
|
||||
field("needsMerge", expCtx->getNeedsMerge());
|
||||
field("allowDiskUse", expCtx->getAllowDiskUse());
|
||||
field("isMapReduceCommand", expCtx->isMapReduceCommand());
|
||||
field("inLookup", expCtx->getInLookup());
|
||||
field("inUnionWith", expCtx->getInUnionWith());
|
||||
field("forcePlanCache", expCtx->getForcePlanCache());
|
||||
field("sbeCompatibility", fmt::underlying(expCtx->getSbeCompatibility()));
|
||||
field("sbeGroupCompatibility", fmt::underlying(expCtx->getSbeGroupCompatibility()));
|
||||
field("sbeWindowCompatibility", fmt::underlying(expCtx->getSbeWindowCompatibility()));
|
||||
field("sbePipelineCompatibility", fmt::underlying(expCtx->getSbePipelineCompatibility()));
|
||||
field("subPipelineDepth", expCtx->getSubPipelineDepth());
|
||||
out = fmt::format_to(out, "}}");
|
||||
|
||||
return out;
|
||||
}
|
||||
|
|
@ -88,7 +87,7 @@ struct formatter<mongo::command_diagnostics::ExpressionContextPrinter> {
|
|||
return ctx.begin();
|
||||
}
|
||||
|
||||
auto format(const mongo::command_diagnostics::ExpressionContextPrinter& obj, auto& ctx) {
|
||||
auto format(const mongo::command_diagnostics::ExpressionContextPrinter& obj, auto& ctx) const {
|
||||
return obj.format(ctx);
|
||||
}
|
||||
};
|
||||
|
|
|
|||
|
|
@ -358,25 +358,25 @@ void MergeProcessor::flush(const NamespaceString& outputNs,
|
|||
|
||||
BSONObj MergeProcessor::_extractMergeOnFieldsFromDoc(
|
||||
const Document& doc, const std::set<FieldPath>& mergeOnFields) const {
|
||||
using namespace fmt::literals;
|
||||
MutableDocument result;
|
||||
for (const auto& field : mergeOnFields) {
|
||||
Value value{doc};
|
||||
for (size_t i = 0; i < field.getPathLength(); ++i) {
|
||||
value = value.getDocument().getField(field.getFieldName(i));
|
||||
uassert(51185,
|
||||
"$merge write error: 'on' field '{}' is an array"_format(field.fullPath()),
|
||||
!value.isArray());
|
||||
uassert(
|
||||
51185,
|
||||
fmt::format("$merge write error: 'on' field '{}' is an array", field.fullPath()),
|
||||
!value.isArray());
|
||||
if (i + 1 < field.getPathLength() && !value.isObject()) {
|
||||
value = Value();
|
||||
break;
|
||||
}
|
||||
}
|
||||
uassert(
|
||||
51132,
|
||||
"$merge write error: 'on' field '{}' cannot be missing, null or undefined if supporting index is sparse"_format(
|
||||
field.fullPath()),
|
||||
_allowMergeOnNullishValues || !value.nullish());
|
||||
uassert(51132,
|
||||
fmt::format("$merge write error: 'on' field '{}' cannot be missing, null or "
|
||||
"undefined if supporting index is sparse",
|
||||
field.fullPath()),
|
||||
_allowMergeOnNullishValues || !value.nullish());
|
||||
if (value.nullish()) {
|
||||
result.addField(field.fullPath(), Value(BSONNULL));
|
||||
} else {
|
||||
|
|
|
|||
|
|
@ -54,10 +54,9 @@ std::string NameExpression::evaluate(ExpressionContext* expCtx, const Document&
|
|||
}
|
||||
invariant(_expr);
|
||||
|
||||
using namespace fmt::literals;
|
||||
auto value = _expr->evaluate(doc, &expCtx->variables);
|
||||
uassert(8117101,
|
||||
"Expected string, but got {}"_format(typeName(value.getType())),
|
||||
fmt::format("Expected string, but got {}", typeName(value.getType())),
|
||||
value.getType() == BSONType::String);
|
||||
|
||||
return value.getStringData().toString();
|
||||
|
|
|
|||
|
|
@ -99,8 +99,8 @@ public:
|
|||
* Returns the string literal if the name expression is a string literal.
|
||||
*/
|
||||
std::string getLiteral() const {
|
||||
using namespace fmt::literals;
|
||||
tassert(8117103, "Non string literal name expression: {}"_format(toString()), _isLiteral);
|
||||
tassert(
|
||||
8117103, fmt::format("Non string literal name expression: {}", toString()), _isLiteral);
|
||||
return _name.getElement().str();
|
||||
}
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue