SERVER-103634 Apply "bazel run format" formatter to idl files (#34832)

GitOrigin-RevId: e23ba69af1ed014bde71eb620d93c5251cfad400
This commit is contained in:
Zack Winter 2025-04-09 14:11:16 -07:00 committed by MongoDB Bot
parent 9681c2cd8e
commit f20d8d6372
435 changed files with 16071 additions and 14721 deletions

View File

@ -11,6 +11,7 @@
!*.md
!*.yml
!*.yaml
!*.idl
# Ignore all golden test output files, which are machine-generated by the "golden"
# jstests. We would have to be able to run prettier (via bazel) during a jstest

View File

@ -6,6 +6,13 @@
"options": {
"tabWidth": 4
}
},
{
"files": "*.idl",
"options": {
"parser": "yaml",
"tabWidth": 4
}
},
]
}

View File

@ -89,7 +89,6 @@ types:
deserializer: "mongo::BSONElement::uuid"
is_view: false
structs:
default_values:
description: UnitTest for a single safeInt32
@ -121,4 +120,3 @@ structs:
uuidField:
type: bindata_uuid
description: "A binData of uuid subtype"

View File

@ -34,7 +34,8 @@ imports:
commands:
duplicateCommandNew:
description: "duplicate command in new commands fails because a command with the same name
description:
"duplicate command in new commands fails because a command with the same name
is in compatibility_test_fail_new.idl"
command_name: duplicateCommandNew
namespace: ignored

View File

@ -44,7 +44,8 @@ structs:
stability: stable
StructType:
description: "This struct contains a field that is non-optional in the old
description:
"This struct contains a field that is non-optional in the old
command and optional in the new command."
fields:
structField:

View File

@ -31,7 +31,8 @@ global:
structs:
ImportedUnstableNewFieldReply:
description: "This reply contains a field that is stable in the old command but is
description:
"This reply contains a field that is stable in the old command but is
unstable in the new command."
fields:
unstableNewField:

View File

@ -81,7 +81,8 @@ commands:
reply_type: NewStructFieldNoUnstableField
newCommandTypeStructFieldNoUnstableField:
description: "newly added command has a command type struct field without the unstable
description:
"newly added command has a command type struct field without the unstable
field specified"
command_name: newCommandTypeStructFieldNoUnstableField
namespace: type
@ -92,7 +93,8 @@ commands:
reply_type: OkReply
newCommandParameterBsonSerializationTypeAny:
description: "newly added command has a parameter type that has bson_serialization_type
description:
"newly added command has a parameter type that has bson_serialization_type
with 'any'"
command_name: newCommandParameterBsonSerializationTypeAny
namespace: ignored
@ -106,7 +108,8 @@ commands:
reply_type: OkReply
newCommandReplyBsonSerializationTypeAny:
description: "newly added command has a reply field type that has bson_serialization_type
description:
"newly added command has a reply field type that has bson_serialization_type
with 'any'"
command_name: newCommandReplyBsonSerializationTypeAny
namespace: ignored
@ -116,7 +119,8 @@ commands:
reply_type: NewStructFieldTypeContainsAny
newCommandTypeStructFieldBsonSerializationTypeAny:
description: "newly added command has a command struct field type that has
description:
"newly added command has a command struct field type that has
bson_serialization_type with 'any'"
command_name: newCommandTypeStructFieldBsonSerializationTypeAny
namespace: type

View File

@ -34,7 +34,8 @@ imports:
commands:
duplicateCommandOld:
description: "duplicate command in old commands fails because a command with the same name
description:
"duplicate command in old commands fails because a command with the same name
is in compatibility_test_fail_old.idl"
command_name: duplicateCommandOld
namespace: ignored

View File

@ -35,7 +35,6 @@ types:
is_view: false
structs:
ErrorReply:
description: "Error Reply struct should fail"
strict: false
@ -45,7 +44,8 @@ structs:
stability: stable
StructType:
description: "This struct contains a field that is non-optional in the old
description:
"This struct contains a field that is non-optional in the old
command and optional in the new command."
fields:
structField:

View File

@ -31,7 +31,8 @@ global:
structs:
ImportedUnstableNewFieldReply:
description: "This reply contains a field that is stable in the old command but is
description:
"This reply contains a field that is stable in the old command but is
unstable in the new command."
fields:
unstableNewField:

View File

@ -34,7 +34,8 @@ imports:
structs:
ImportedStableNewFieldReply:
description: "This reply contains a field that is unstable in the old command but is
description:
"This reply contains a field that is unstable in the old command but is
stable in the new command."
fields:
stableNewField:

View File

@ -34,7 +34,8 @@ imports:
structs:
ImportedStableNewFieldReply:
description: "This reply contains a field that is unstable in the old command but is
description:
"This reply contains a field that is unstable in the old command but is
stable in the new command."
fields:
stableNewField:

View File

@ -35,9 +35,9 @@ server_parameters:
The rate at which egress connection metrics below a certain time threshold will be logged at
info level. This only applies for the 'network.totalConnectionAcquiredToWireMillis'
server status metric.
set_at: [ startup, runtime ]
set_at: [startup, runtime]
cpp_vartype: AtomicWord<double>
cpp_varname: gConnectionAcquisitionToWireLoggingRate
default: 0.05
validator: { gte: 0.0, lte: 1.0 }
validator: {gte: 0.0, lte: 1.0}
redact: false

View File

@ -32,7 +32,6 @@ imports:
- "mongo/db/basic_types.idl"
structs:
# Follow the MongoDB Drivers API for passing API Version parameters in clients. The drivers API
# is like MongoClient(uri, api={version: "1", strict: true, deprecationErrors: true}).
@ -50,7 +49,8 @@ structs:
type: bool
optional: true
deprecationErrors:
description: "Whether to restrict the connection to non-deprecated behaviors in the
description:
"Whether to restrict the connection to non-deprecated behaviors in the
requested API version"
type: bool
optional: true

View File

@ -69,4 +69,3 @@ structs:
description: "Comma separated list of TLS protocols to disable [TLS1_0,TLS1_1,TLS1_2,TLS1_3]"
type: string
optional: true

View File

@ -58,11 +58,10 @@ enums:
# Read from any member.
Nearest: "nearest"
structs:
ReadPreferenceIdl:
description: "An object representing a read preference document, used only for mapping to
description:
"An object representing a read preference document, used only for mapping to
and from BSON."
strict: false
fields:
@ -84,7 +83,8 @@ structs:
validator:
callback: validateMaxStalenessSecondsExternal
$_isPretargeted:
description: "Used by the replica set endpoint in sharding to mark commands that it
description:
"Used by the replica set endpoint in sharding to mark commands that it
forces to go through the router as needing to target the local mongod."
cpp_name: isPretargeted
type: bool

View File

@ -39,13 +39,12 @@ server_parameters:
name: RSMProtocolServerParameter
redact: false
defaultFindReplicaSetHostTimeoutMS:
description: >-
The timeout used for finding a replica set host.
set_at: [ startup, runtime ]
cpp_vartype: 'AtomicWord<int>'
set_at: [startup, runtime]
cpp_vartype: "AtomicWord<int>"
cpp_varname: gDefaultFindReplicaSetHostTimeoutMS
default: 15000
test_only: true

View File

@ -3,8 +3,8 @@
global:
cpp_namespace: "mongo::awsIam"
configs:
section: 'AWS IAM Options'
source: [ cli ]
section: "AWS IAM Options"
source: [cli]
cpp_includes:
- mongo/client/sasl_aws_client_options.h

View File

@ -34,7 +34,7 @@ imports:
structs:
Ec2SecurityCredentials:
description : "Security Credentials from EC2 Instance Metadata"
description: "Security Credentials from EC2 Instance Metadata"
# See - https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials
# Don't fail if AWS expands this to include more fields in the future
strict: false
@ -48,7 +48,7 @@ structs:
Expiration: string
EcsTaskSecurityCredentials:
description : "Security Credentials from ECS Task"
description: "Security Credentials from ECS Task"
# See -https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html
# Don't fail if AWS expands this to include more fields in the future
strict: false

View File

@ -26,17 +26,15 @@
# it in the license file.
#
global:
cpp_namespace: "mongo"
cpp_includes:
- "mongo/client/sasl_sspi_options.h"
configs:
section: "Kerberos Options"
source: [ yaml, cli, ini ]
source: [yaml, cli, ini]
configs:
"security.sspiHostnamecanonicalization":
short_name: "sspiHostnamecanonicalization"
description: "DNS resolution strategy to use for hostname canonicalization. May be one of: {none, forward, forwardAndReverse}"
@ -53,4 +51,3 @@ configs:
hidden: true
condition:
preprocessor: "defined(_WIN32)"

View File

@ -32,30 +32,30 @@ global:
- "mongo/db/server_options.h"
- "mongo/db/server_options_base.h"
configs:
section: 'General options'
source: [ cli ]
section: "General options"
source: [cli]
initializer:
register: addCliOptions
configs:
help:
description: 'Show this usage information'
description: "Show this usage information"
single_name: h
arg_vartype: Switch
source: [ cli ]
source: [cli]
filter:
description: 'Filter by test file name'
description: "Filter by test file name"
single_name: f
arg_vartype: StringVector
source: [ cli ]
source: [cli]
source-dir:
description: 'The directory containing the json test files'
description: "The directory containing the json test files"
single_name: s
arg_vartype: String
source: [ cli ]
source: [cli]
verbose:
description: 'The verbosity level'
description: "The verbosity level"
single_name: v
arg_vartype: String
source: [ cli ]
source: [cli]
implicit: v

View File

@ -32,9 +32,10 @@ global:
server_parameters:
heartBeatFrequencyMs:
description: "For the 'sdam' replicaSetMonitorProtocol, determines how long to wait between
description: >-
For the 'sdam' replicaSetMonitorProtocol, determines how long to wait between
'hello' requests. For the 'streamable' replicaSetMonitorProtocol, duration between rtt
measurements."
measurements.
set_at: startup
cpp_vartype: int
cpp_varname: sdamHeartBeatFrequencyMs
@ -59,5 +60,3 @@ server_parameters:
gte: 500
default: 10000
redact: false

View File

@ -62,7 +62,6 @@ enums:
PrefixPreview: "prefixPreview"
structs:
QueryTypeConfig:
description: "Information about query support for a field"
strict: true
@ -76,7 +75,7 @@ structs:
type: exactInt64
default: 8
stability: unstable
validator: { gte: 0 }
validator: {gte: 0}
min:
description: "Defines the lower bound for field values in this encrypted index. Only applicable for queryType range"
type: encrypted_numeric
@ -92,7 +91,7 @@ structs:
type: exactInt64
optional: true
stability: unstable
validator: { gte: 1, lte: 8 }
validator: {gte: 1, lte: 8}
precision:
description: >-
Defines the number of digits after the decimal point for floating point numbers.
@ -100,11 +99,11 @@ structs:
type: safeInt
optional: true
stability: unstable
validator: { gte: 0 }
validator: {gte: 0}
trimFactor:
description: >-
Number of root tree levels to cut. Increasing this value will decrease write conflicts and speed up inserts,
but large range queries will slow down."
but large range queries will slow down.
type: safeInt
optional: true
stability: unstable
@ -116,7 +115,7 @@ structs:
type: safeInt
optional: true
stability: unstable
validator: { gt: 0 }
validator: {gt: 0}
strMinQueryLength:
description: >-
The minimum allowed length for inputs to prefix/suffix/substring lookups.
@ -124,7 +123,7 @@ structs:
type: safeInt
optional: true
stability: unstable
validator: { gt: 0 }
validator: {gt: 0}
strMaxQueryLength:
description: >-
The maximum allowed length for inputs to prefix/suffix/substring lookups.
@ -132,7 +131,7 @@ structs:
type: safeInt
optional: true
stability: unstable
validator: { gt: 0 }
validator: {gt: 0}
caseSensitive:
description: "Enables case folding in the StrEncode algorithm if set to false."
type: bool
@ -160,7 +159,7 @@ structs:
bsonType:
description: "BSON type of field to encrypt"
type: string
validator: { callback: "isValidBSONTypeName" }
validator: {callback: "isValidBSONTypeName"}
optional: true
stability: unstable
queries:
@ -199,4 +198,4 @@ structs:
type: int
optional: true
stability: unstable
validator: { gt: 0 }
validator: {gt: 0}

View File

@ -41,25 +41,25 @@ enums:
type: int
values:
# FLE 1 Payloads
kPlaceholder : 0 # see FLEEncryptionPlaceholder
kDeterministic : 1
kRandom : 2
kPlaceholder: 0 # see FLEEncryptionPlaceholder
kDeterministic: 1
kRandom: 2
# FLE 2
# FLE 2 Client-side
kFLE2Placeholder : 3 # see FLE2EncryptionPlaceholder
kFLE2InsertUpdatePayload : 4 # see FLE2InsertUpdatePayload
kFLE2FindEqualityPayload : 5 # see FLE2FindEqualityPayload
kFLE2Placeholder: 3 # see FLE2EncryptionPlaceholder
kFLE2InsertUpdatePayload: 4 # see FLE2InsertUpdatePayload
kFLE2FindEqualityPayload: 5 # see FLE2FindEqualityPayload
# FLE 2 Server-side
kFLE2UnindexedEncryptedValue : 6 # see FLE2IndexedEqualityEncryptedValue
kFLE2EqualityIndexedValue : 7
kFLE2UnindexedEncryptedValue: 6 # see FLE2IndexedEqualityEncryptedValue
kFLE2EqualityIndexedValue: 7
# Transient encrypted data in query rewrites, not persisted
# same as BinDataGeneral but redacted
kFLE2TransientRaw : 8
kFLE2TransientRaw: 8
kFLE2RangeIndexedValue : 9 # see FLE2IndexedRangeEncryptedValue
kFLE2FindRangePayload : 10 # see FLE2FindRangePayload
kFLE2RangeIndexedValue: 9 # see FLE2IndexedRangeEncryptedValue
kFLE2FindRangePayload: 10 # see FLE2FindRangePayload
# QE protocol version 2 - client-side payloads
kFLE2InsertUpdatePayloadV2: 11
@ -81,7 +81,8 @@ enums:
kFle2: 2
FleAlgorithmInt:
description: "The algorithm used to encrypt fields for field level encryption represented
description:
"The algorithm used to encrypt fields for field level encryption represented
as an unsigned integer."
type: int
values:
@ -89,7 +90,8 @@ enums:
kRandom: 2
Fle2AlgorithmInt:
description: "The algorithm used to encrypt fields for field level encryption represented
description:
"The algorithm used to encrypt fields for field level encryption represented
as an unsigned integer."
type: int
values:
@ -130,7 +132,8 @@ types:
structs:
EncryptionPlaceholder:
description: "Implements Encryption BinData (subtype 6) sub-subtype 0, the intent-to-encrypt
description:
"Implements Encryption BinData (subtype 6) sub-subtype 0, the intent-to-encrypt
mapping. Contains a value to encrypt and a description of how it should be encrypted."
strict: true
fields:
@ -144,7 +147,8 @@ structs:
cpp_name: keyId
optional: true
ka:
description: "Used to query the key vault by keyAltName. If omitted,
description:
"Used to query the key vault by keyAltName. If omitted,
ki must be specified."
type: string
cpp_name: keyAltName
@ -155,7 +159,8 @@ structs:
cpp_name: value
FLE2EncryptionPlaceholder:
description: "Implements Encryption BinData (subtype 6) sub-subtype 3, the intent-to-encrypt
description:
"Implements Encryption BinData (subtype 6) sub-subtype 3, the intent-to-encrypt
mapping. Contains a value to encrypt and a description of how it should be encrypted."
strict: true
cpp_validator_func: "validateIDLFLE2EncryptionPlaceholder"
@ -190,7 +195,7 @@ structs:
type: long
cpp_name: sparsity
optional: true
validator: { gte: 1, lte: 8 }
validator: {gte: 1, lte: 8}
EdgeTokenSetV2:
description: "Payload of an indexed field to insert or update. Version 2"
@ -358,7 +363,7 @@ structs:
description: "Randomly sampled contention factor value"
type: long
cpp_name: contentionFactor
validator: { gte: 0 }
validator: {gte: 0}
g:
description: "Array of Edges"
type: array<EdgeTokenSetV2>
@ -374,19 +379,19 @@ structs:
type: long
cpp_name: sparsity
optional: true
validator: { gte: 1, lte: 8 }
validator: {gte: 1, lte: 8}
pn:
cpp_name: precision
description: "Determines the number of digits after the decimal point for floating point values"
type: safeInt
optional: true
validator: { gte: 0 }
validator: {gte: 0}
tf:
cpp_name: trimFactor
description: "Determines how many levels of the hypergraph to trim"
type: safeInt
optional: true
validator: { gte: 0 }
validator: {gte: 0}
mn:
cpp_name: indexMin
description: "Minimum value for the encrypted index that this query is using."
@ -475,19 +480,19 @@ structs:
type: long
cpp_name: sparsity
optional: true
validator: { gte: 1, lte: 8 }
validator: {gte: 1, lte: 8}
pn:
cpp_name: precision
description: "Determines the number of digits after the decimal point for floating point values"
type: safeInt
optional: true
validator: { gte: 0 }
validator: {gte: 0}
tf:
cpp_name: trimFactor
description: "Determines how many levels of the hypergraph to trim"
type: safeInt
optional: true
validator: { gte: 0 }
validator: {gte: 0}
mn:
cpp_name: indexMin
description: "Minimum value for the encrypted index that this query is using."
@ -566,12 +571,12 @@ structs:
description: "Determines the number of digits after the decimal point for floating point values"
type: safeInt
optional: true
validator: { gte: 0 }
validator: {gte: 0}
trimFactor:
description: "Determines how many levels of the hypergraph to trim"
type: safeInt
optional: true
validator: { gte: 0 }
validator: {gte: 0}
indexMin:
description: "Minimum value for the encrypted index that this query is using."
type: encrypted_numeric_element
@ -624,12 +629,12 @@ structs:
description: "Determines the number of digits after the decimal point for floating point values"
type: safeInt
optional: true
validator: { gte: 0 }
validator: {gte: 0}
trimFactor:
description: "Determines how many levels of the hypergraph to trim"
type: safeInt
optional: true
validator: { gte: 0 }
validator: {gte: 0}
FLE2SubstringInsertSpec:
description: "Holds parameters applicable to substring indexed fields."
@ -638,17 +643,17 @@ structs:
mlen:
description: "max substring code point length to be indexed"
type: safeInt
validator: { gt: 0 }
validator: {gt: 0}
cpp_name: maxLength
ub:
description: "upper bound code point length of valid substring queries"
type: safeInt
validator: { gt: 0 }
validator: {gt: 0}
cpp_name: maxQueryLength
lb:
description: "lower bound code point length of valid substring queries"
type: safeInt
validator: { gt: 0 }
validator: {gt: 0}
cpp_name: minQueryLength
FLE2SuffixInsertSpec:
@ -658,12 +663,12 @@ structs:
ub:
description: "upper bound code point length of valid suffix queries"
type: safeInt
validator: { gt: 0 }
validator: {gt: 0}
cpp_name: maxQueryLength
lb:
description: "lower bound length of valid suffix queries"
type: safeInt
validator: { gt: 0 }
validator: {gt: 0}
cpp_name: minQueryLength
FLE2PrefixInsertSpec:
@ -673,12 +678,12 @@ structs:
ub:
description: "upper bound code point length of valid prefix queries"
type: safeInt
validator: { gt: 0 }
validator: {gt: 0}
cpp_name: maxQueryLength
lb:
description: "lower bound code point length of valid prefix queries"
type: safeInt
validator: { gt: 0 }
validator: {gt: 0}
cpp_name: minQueryLength
FLE2TextSearchInsertSpec:

View File

@ -34,7 +34,8 @@ imports:
structs:
FLECompactionOptions:
description: "Options related to compaction or cleanup operations in Queryable Encryption
description:
"Options related to compaction or cleanup operations in Queryable Encryption
protocol version 2"
strict: false
inline_chained_structs: true
@ -42,21 +43,24 @@ structs:
ClusterServerParameter: clusterServerParameter
fields:
maxCompactionSize:
description: "Limits the total size of the set of ESC documents that will be removed by
description:
"Limits the total size of the set of ESC documents that will be removed by
each compactStructuredEncryptionData operation"
type: int
default: 268435456 # 256 MB
validator:
gt: 0
maxAnchorCompactionSize:
description: "Limits the total size of the set of ESC anchors that will be removed by
description:
"Limits the total size of the set of ESC anchors that will be removed by
each cleanupStructuredEncryptionData operation"
type: int
default: 268435456 # 256 MB
validator:
gt: 0
maxESCEntriesPerCompactionDelete:
description: "Limits the number of ESC non-anchor documents that will be removed by each
description:
"Limits the number of ESC non-anchor documents that will be removed by each
delete operation performed by the compactStructuredEncryptionData operation"
type: int
default: 350000
@ -73,7 +77,7 @@ structs:
server_parameters:
unsupportedDangerousTestingFLEDiagnosticsEnabled:
description: 'Start with test-only FLE statistics behavior enabled'
description: "Start with test-only FLE statistics behavior enabled"
set_at: startup
cpp_vartype: bool
cpp_varname: gUnsupportedDangerousTestingFLEDiagnosticsEnabledAtStartup
@ -81,7 +85,8 @@ server_parameters:
redact: false
fleCompactionOptions:
description: "Options related to compaction or cleanup operations in Queryable Encryption
description:
"Options related to compaction or cleanup operations in Queryable Encryption
protocol version 2"
set_at: cluster
cpp_varname: "fleCompactionOptions"

View File

@ -37,361 +37,361 @@ types:
CollectionsLevel1Token:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'CollectionsLevel1Token'
description: "CollectionsLevel1Token"
cpp_type: "CollectionsLevel1Token"
serializer: 'toCDR'
deserializer: '::mongo::CollectionsLevel1Token::parse'
serializer: "toCDR"
deserializer: "::mongo::CollectionsLevel1Token::parse"
is_view: false
ServerDataEncryptionLevel1Token:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ServerDataEncryptionLevel1Token'
description: "ServerDataEncryptionLevel1Token"
cpp_type: "ServerDataEncryptionLevel1Token"
serializer: 'toCDR'
deserializer: '::mongo::ServerDataEncryptionLevel1Token::parse'
serializer: "toCDR"
deserializer: "::mongo::ServerDataEncryptionLevel1Token::parse"
is_view: false
EDCToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'EDCToken'
description: "EDCToken"
cpp_type: "EDCToken"
serializer: 'toCDR'
deserializer: '::mongo::EDCToken::parse'
serializer: "toCDR"
deserializer: "::mongo::EDCToken::parse"
is_view: false
ESCToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ESCToken'
description: "ESCToken"
cpp_type: "ESCToken"
serializer: 'toCDR'
deserializer: '::mongo::ESCToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ESCToken::parse"
is_view: false
ECOCToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ECOCToken'
description: "ECOCToken"
cpp_type: "ECOCToken"
serializer: 'toCDR'
deserializer: '::mongo::ECOCToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ECOCToken::parse"
is_view: false
EDCDerivedFromDataToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'EDCDerivedFromDataToken'
description: "EDCDerivedFromDataToken"
cpp_type: "EDCDerivedFromDataToken"
serializer: 'toCDR'
deserializer: '::mongo::EDCDerivedFromDataToken::parse'
serializer: "toCDR"
deserializer: "::mongo::EDCDerivedFromDataToken::parse"
is_view: false
ESCDerivedFromDataToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ESCDerivedFromDataToken'
description: "ESCDerivedFromDataToken"
cpp_type: "ESCDerivedFromDataToken"
serializer: 'toCDR'
deserializer: '::mongo::ESCDerivedFromDataToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ESCDerivedFromDataToken::parse"
is_view: false
EDCDerivedFromDataTokenAndContentionFactorToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'EDCDerivedFromDataTokenAndContentionFactorToken'
description: "EDCDerivedFromDataTokenAndContentionFactorToken"
cpp_type: "EDCDerivedFromDataTokenAndContentionFactorToken"
serializer: 'toCDR'
deserializer: '::mongo::EDCDerivedFromDataTokenAndContentionFactorToken::parse'
serializer: "toCDR"
deserializer: "::mongo::EDCDerivedFromDataTokenAndContentionFactorToken::parse"
is_view: false
ESCDerivedFromDataTokenAndContentionFactorToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ESCDerivedFromDataTokenAndContentionFactorToken'
description: "ESCDerivedFromDataTokenAndContentionFactorToken"
cpp_type: "ESCDerivedFromDataTokenAndContentionFactorToken"
serializer: 'toCDR'
deserializer: '::mongo::ESCDerivedFromDataTokenAndContentionFactorToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ESCDerivedFromDataTokenAndContentionFactorToken::parse"
is_view: false
EDCTwiceDerivedToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'EDCTwiceDerivedToken'
description: "EDCTwiceDerivedToken"
cpp_type: "EDCTwiceDerivedToken"
serializer: 'toCDR'
deserializer: '::mongo::EDCTwiceDerivedToken::parse'
serializer: "toCDR"
deserializer: "::mongo::EDCTwiceDerivedToken::parse"
is_view: false
ESCTwiceDerivedTagToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ESCTwiceDerivedTagToken'
description: "ESCTwiceDerivedTagToken"
cpp_type: "ESCTwiceDerivedTagToken"
serializer: 'toCDR'
deserializer: '::mongo::ESCTwiceDerivedTagToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ESCTwiceDerivedTagToken::parse"
is_view: false
ESCTwiceDerivedValueToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ESCTwiceDerivedValueToken'
description: "ESCTwiceDerivedValueToken"
cpp_type: "ESCTwiceDerivedValueToken"
serializer: 'toCDR'
deserializer: '::mongo::ESCTwiceDerivedValueToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ESCTwiceDerivedValueToken::parse"
is_view: false
ServerTokenDerivationLevel1Token:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ServerTokenDerivationLevel1Token'
description: "ServerTokenDerivationLevel1Token"
cpp_type: "ServerTokenDerivationLevel1Token"
serializer: 'toCDR'
deserializer: '::mongo::ServerTokenDerivationLevel1Token::parse'
serializer: "toCDR"
deserializer: "::mongo::ServerTokenDerivationLevel1Token::parse"
is_view: false
ServerDerivedFromDataToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ServerDerivedFromDataToken'
description: "ServerDerivedFromDataToken"
cpp_type: "ServerDerivedFromDataToken"
serializer: 'toCDR'
deserializer: '::mongo::ServerDerivedFromDataToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ServerDerivedFromDataToken::parse"
is_view: false
ServerCountAndContentionFactorEncryptionToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ServerCountAndContentionFactorEncryptionToken'
description: "ServerCountAndContentionFactorEncryptionToken"
cpp_type: "ServerCountAndContentionFactorEncryptionToken"
serializer: 'toCDR'
deserializer: '::mongo::ServerCountAndContentionFactorEncryptionToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ServerCountAndContentionFactorEncryptionToken::parse"
is_view: false
ServerZerosEncryptionToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ServerZerosEncryptionToken'
description: "ServerZerosEncryptionToken"
cpp_type: "ServerZerosEncryptionToken"
serializer: 'toCDR'
deserializer: '::mongo::ServerZerosEncryptionToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ServerZerosEncryptionToken::parse"
is_view: false
AnchorPaddingRootToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'AnchorPaddingRootToken'
description: "AnchorPaddingRootToken"
cpp_type: "AnchorPaddingRootToken"
serializer: 'toCDR'
deserializer: '::mongo::AnchorPaddingRootToken::parse'
serializer: "toCDR"
deserializer: "::mongo::AnchorPaddingRootToken::parse"
is_view: false
AnchorPaddingKeyToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'AnchorPaddingKeyToken'
description: "AnchorPaddingKeyToken"
cpp_type: "AnchorPaddingKeyToken"
serializer: 'toCDR'
deserializer: '::mongo::AnchorPaddingKeyToken::parse'
serializer: "toCDR"
deserializer: "::mongo::AnchorPaddingKeyToken::parse"
is_view: false
AnchorPaddingValueToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'AnchorPaddingValueToken'
description: "AnchorPaddingValueToken"
cpp_type: "AnchorPaddingValueToken"
serializer: 'toCDR'
deserializer: '::mongo::AnchorPaddingValueToken::parse'
serializer: "toCDR"
deserializer: "::mongo::AnchorPaddingValueToken::parse"
is_view: false
EDCTextExactDerivedFromDataToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'EDCTextExactDerivedFromDataToken'
description: "EDCTextExactDerivedFromDataToken"
cpp_type: "EDCTextExactDerivedFromDataToken"
serializer: 'toCDR'
deserializer: '::mongo::EDCTextExactDerivedFromDataToken::parse'
serializer: "toCDR"
deserializer: "::mongo::EDCTextExactDerivedFromDataToken::parse"
is_view: false
EDCTextSubstringDerivedFromDataToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'EDCTextSubstringDerivedFromDataToken'
description: "EDCTextSubstringDerivedFromDataToken"
cpp_type: "EDCTextSubstringDerivedFromDataToken"
serializer: 'toCDR'
deserializer: '::mongo::EDCTextSubstringDerivedFromDataToken::parse'
serializer: "toCDR"
deserializer: "::mongo::EDCTextSubstringDerivedFromDataToken::parse"
is_view: false
EDCTextSuffixDerivedFromDataToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'EDCTextSuffixDerivedFromDataToken'
description: "EDCTextSuffixDerivedFromDataToken"
cpp_type: "EDCTextSuffixDerivedFromDataToken"
serializer: 'toCDR'
deserializer: '::mongo::EDCTextSuffixDerivedFromDataToken::parse'
serializer: "toCDR"
deserializer: "::mongo::EDCTextSuffixDerivedFromDataToken::parse"
is_view: false
EDCTextPrefixDerivedFromDataToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'EDCTextPrefixDerivedFromDataToken'
description: "EDCTextPrefixDerivedFromDataToken"
cpp_type: "EDCTextPrefixDerivedFromDataToken"
serializer: 'toCDR'
deserializer: '::mongo::EDCTextPrefixDerivedFromDataToken::parse'
serializer: "toCDR"
deserializer: "::mongo::EDCTextPrefixDerivedFromDataToken::parse"
is_view: false
EDCTextExactDerivedFromDataTokenAndContentionFactorToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'EDCTextExactDerivedFromDataTokenAndContentionFactorToken'
description: "EDCTextExactDerivedFromDataTokenAndContentionFactorToken"
cpp_type: "EDCTextExactDerivedFromDataTokenAndContentionFactorToken"
serializer: 'toCDR'
deserializer: '::mongo::EDCTextExactDerivedFromDataTokenAndContentionFactorToken::parse'
serializer: "toCDR"
deserializer: "::mongo::EDCTextExactDerivedFromDataTokenAndContentionFactorToken::parse"
is_view: false
EDCTextSubstringDerivedFromDataTokenAndContentionFactorToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'EDCTextSubstringDerivedFromDataTokenAndContentionFactorToken'
description: "EDCTextSubstringDerivedFromDataTokenAndContentionFactorToken"
cpp_type: "EDCTextSubstringDerivedFromDataTokenAndContentionFactorToken"
serializer: 'toCDR'
deserializer: '::mongo::EDCTextSubstringDerivedFromDataTokenAndContentionFactorToken::parse'
serializer: "toCDR"
deserializer: "::mongo::EDCTextSubstringDerivedFromDataTokenAndContentionFactorToken::parse"
is_view: false
EDCTextSuffixDerivedFromDataTokenAndContentionFactorToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'EDCTextSuffixDerivedFromDataTokenAndContentionFactorToken'
description: "EDCTextSuffixDerivedFromDataTokenAndContentionFactorToken"
cpp_type: "EDCTextSuffixDerivedFromDataTokenAndContentionFactorToken"
serializer: 'toCDR'
deserializer: '::mongo::EDCTextSuffixDerivedFromDataTokenAndContentionFactorToken::parse'
serializer: "toCDR"
deserializer: "::mongo::EDCTextSuffixDerivedFromDataTokenAndContentionFactorToken::parse"
is_view: false
EDCTextPrefixDerivedFromDataTokenAndContentionFactorToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'EDCTextPrefixDerivedFromDataTokenAndContentionFactorToken'
description: "EDCTextPrefixDerivedFromDataTokenAndContentionFactorToken"
cpp_type: "EDCTextPrefixDerivedFromDataTokenAndContentionFactorToken"
serializer: 'toCDR'
deserializer: '::mongo::EDCTextPrefixDerivedFromDataTokenAndContentionFactorToken::parse'
serializer: "toCDR"
deserializer: "::mongo::EDCTextPrefixDerivedFromDataTokenAndContentionFactorToken::parse"
is_view: false
ESCTextExactDerivedFromDataToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ESCTextExactDerivedFromDataToken'
description: "ESCTextExactDerivedFromDataToken"
cpp_type: "ESCTextExactDerivedFromDataToken"
serializer: 'toCDR'
deserializer: '::mongo::ESCTextExactDerivedFromDataToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ESCTextExactDerivedFromDataToken::parse"
is_view: false
ESCTextSubstringDerivedFromDataToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ESCTextSubstringDerivedFromDataToken'
description: "ESCTextSubstringDerivedFromDataToken"
cpp_type: "ESCTextSubstringDerivedFromDataToken"
serializer: 'toCDR'
deserializer: '::mongo::ESCTextSubstringDerivedFromDataToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ESCTextSubstringDerivedFromDataToken::parse"
is_view: false
ESCTextSuffixDerivedFromDataToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ESCTextSuffixDerivedFromDataToken'
description: "ESCTextSuffixDerivedFromDataToken"
cpp_type: "ESCTextSuffixDerivedFromDataToken"
serializer: 'toCDR'
deserializer: '::mongo::ESCTextSuffixDerivedFromDataToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ESCTextSuffixDerivedFromDataToken::parse"
is_view: false
ESCTextPrefixDerivedFromDataToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ESCTextPrefixDerivedFromDataToken'
description: "ESCTextPrefixDerivedFromDataToken"
cpp_type: "ESCTextPrefixDerivedFromDataToken"
serializer: 'toCDR'
deserializer: '::mongo::ESCTextPrefixDerivedFromDataToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ESCTextPrefixDerivedFromDataToken::parse"
is_view: false
ESCTextExactDerivedFromDataTokenAndContentionFactorToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ESCTextExactDerivedFromDataTokenAndContentionFactorToken'
description: "ESCTextExactDerivedFromDataTokenAndContentionFactorToken"
cpp_type: "ESCTextExactDerivedFromDataTokenAndContentionFactorToken"
serializer: 'toCDR'
deserializer: '::mongo::ESCTextExactDerivedFromDataTokenAndContentionFactorToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ESCTextExactDerivedFromDataTokenAndContentionFactorToken::parse"
is_view: false
ESCTextSubstringDerivedFromDataTokenAndContentionFactorToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ESCTextSubstringDerivedFromDataTokenAndContentionFactorToken'
description: "ESCTextSubstringDerivedFromDataTokenAndContentionFactorToken"
cpp_type: "ESCTextSubstringDerivedFromDataTokenAndContentionFactorToken"
serializer: 'toCDR'
deserializer: '::mongo::ESCTextSubstringDerivedFromDataTokenAndContentionFactorToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ESCTextSubstringDerivedFromDataTokenAndContentionFactorToken::parse"
is_view: false
ESCTextSuffixDerivedFromDataTokenAndContentionFactorToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ESCTextSuffixDerivedFromDataTokenAndContentionFactorToken'
description: "ESCTextSuffixDerivedFromDataTokenAndContentionFactorToken"
cpp_type: "ESCTextSuffixDerivedFromDataTokenAndContentionFactorToken"
serializer: 'toCDR'
deserializer: '::mongo::ESCTextSuffixDerivedFromDataTokenAndContentionFactorToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ESCTextSuffixDerivedFromDataTokenAndContentionFactorToken::parse"
is_view: false
ESCTextPrefixDerivedFromDataTokenAndContentionFactorToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ESCTextPrefixDerivedFromDataTokenAndContentionFactorToken'
description: "ESCTextPrefixDerivedFromDataTokenAndContentionFactorToken"
cpp_type: "ESCTextPrefixDerivedFromDataTokenAndContentionFactorToken"
serializer: 'toCDR'
deserializer: '::mongo::ESCTextPrefixDerivedFromDataTokenAndContentionFactorToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ESCTextPrefixDerivedFromDataTokenAndContentionFactorToken::parse"
is_view: false
ServerTextExactDerivedFromDataToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ServerTextExactDerivedFromDataToken'
description: "ServerTextExactDerivedFromDataToken"
cpp_type: "ServerTextExactDerivedFromDataToken"
serializer: 'toCDR'
deserializer: '::mongo::ServerTextExactDerivedFromDataToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ServerTextExactDerivedFromDataToken::parse"
is_view: false
ServerTextSubstringDerivedFromDataToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ServerTextSubstringDerivedFromDataToken'
description: "ServerTextSubstringDerivedFromDataToken"
cpp_type: "ServerTextSubstringDerivedFromDataToken"
serializer: 'toCDR'
deserializer: '::mongo::ServerTextSubstringDerivedFromDataToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ServerTextSubstringDerivedFromDataToken::parse"
is_view: false
ServerTextSuffixDerivedFromDataToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ServerTextSuffixDerivedFromDataToken'
description: "ServerTextSuffixDerivedFromDataToken"
cpp_type: "ServerTextSuffixDerivedFromDataToken"
serializer: 'toCDR'
deserializer: '::mongo::ServerTextSuffixDerivedFromDataToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ServerTextSuffixDerivedFromDataToken::parse"
is_view: false
ServerTextPrefixDerivedFromDataToken:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'ServerTextPrefixDerivedFromDataToken'
description: "ServerTextPrefixDerivedFromDataToken"
cpp_type: "ServerTextPrefixDerivedFromDataToken"
serializer: 'toCDR'
deserializer: '::mongo::ServerTextPrefixDerivedFromDataToken::parse'
serializer: "toCDR"
deserializer: "::mongo::ServerTextPrefixDerivedFromDataToken::parse"
is_view: false
EncryptedStateCollectionTokensV2:
bson_serialization_type: bindata
bindata_subtype: generic
description: 'EncryptedStateCollectionTokensV2'
description: "EncryptedStateCollectionTokensV2"
cpp_type: "StateCollectionTokensV2::Encrypted"
serializer: 'toCDR'
deserializer: '::mongo::StateCollectionTokensV2::Encrypted::parse'
serializer: "toCDR"
deserializer: "::mongo::StateCollectionTokensV2::Encrypted::parse"
is_view: false
structs:

View File

@ -35,5 +35,5 @@ server_parameters:
cpp_vartype: AtomicWord<int>
cpp_varname: gJWKSMinimumQuiescePeriodSecs
default: 60 # One minute
validator: { gte: 0 }
validator: {gte: 0}
redact: false

View File

@ -117,7 +117,8 @@ structs:
cpp_name: tenantId
optional: true
"mongodb/expectPrefix":
description: If TRUE, mongod will expect to receive tenant-prefixed database names,
description:
If TRUE, mongod will expect to receive tenant-prefixed database names,
and should respond the same way.
type: safeBool
cpp_name: expectPrefix

View File

@ -49,7 +49,7 @@ enums:
server_parameters:
storageEngineConcurrentWriteTransactions:
description: "Storage Engine Concurrent Write Transactions"
set_at: [ startup, runtime ]
set_at: [startup, runtime]
cpp_vartype: AtomicWord<int32_t>
cpp_varname: gConcurrentWriteTransactions
on_update: "admission::TicketHolderManager::updateConcurrentWriteTransactions"
@ -66,7 +66,7 @@ server_parameters:
storageEngineConcurrentReadTransactions:
description: "Storage Engine Concurrent Read Transactions"
set_at: [ startup, runtime ]
set_at: [startup, runtime]
cpp_vartype: AtomicWord<int32_t>
cpp_varname: gConcurrentReadTransactions
on_update: "admission::TicketHolderManager::updateConcurrentReadTransactions"
@ -86,12 +86,12 @@ server_parameters:
Controls the maximum number of read transactions waiting in queue for a thread to be free.
Setting this number to 0 will not allow any transaction above the max conccurency amount.
Reducing this value will only affect new enqueued read transactions
set_at: [ startup, runtime ]
set_at: [startup, runtime]
cpp_vartype: Atomic<std::int32_t>
cpp_varname: gReadMaxQueueDepth
on_update: "admission::TicketHolderManager::updateReadMaxQueueDepth"
default: { expr: 'TicketHolder::kDefaultMaxQueueDepth' }
validator: { gte: 0 }
default: {expr: "TicketHolder::kDefaultMaxQueueDepth"}
validator: {gte: 0}
redact: false
storageEngineWriteMaxQueueDepth:
@ -99,12 +99,12 @@ server_parameters:
Controls the maximum number of write transactions waiting in queue for a thread to be free.
Setting this number to 0 will not allow any transaction above the max conccurency amount.
Reducing this value will only affect new enqueued write transactions
set_at: [ startup, runtime ]
set_at: [startup, runtime]
cpp_vartype: Atomic<std::int32_t>
cpp_varname: gWriteMaxQueueDepth
on_update: "admission::TicketHolderManager::updateWriteMaxQueueDepth"
default: { expr: 'TicketHolder::kDefaultMaxQueueDepth' }
validator: { gte: 0 }
default: {expr: "TicketHolder::kDefaultMaxQueueDepth"}
validator: {gte: 0}
redact: false
storageEngineConcurrencyAdjustmentAlgorithm:

View File

@ -30,99 +30,99 @@ global:
server_parameters:
enableFlowControl:
description: 'Enable flow control'
set_at: [ startup, runtime ]
cpp_vartype: 'AtomicWord<bool>'
cpp_varname: 'gFlowControlEnabled'
description: "Enable flow control"
set_at: [startup, runtime]
cpp_vartype: "AtomicWord<bool>"
cpp_varname: "gFlowControlEnabled"
default: true
redact: false
flowControlTargetLagSeconds:
description: 'Target maximum majority committed lag with flow control enabled'
set_at: [ startup, runtime ]
cpp_vartype: 'AtomicWord<std::int32_t>'
cpp_varname: 'gFlowControlTargetLagSeconds'
description: "Target maximum majority committed lag with flow control enabled"
set_at: [startup, runtime]
cpp_vartype: "AtomicWord<std::int32_t>"
cpp_varname: "gFlowControlTargetLagSeconds"
default: 10
validator: { gt: 0 }
validator: {gt: 0}
redact: false
flowControlThresholdLagPercentage:
description: 'Threshold percentage of target lag where flow control will become engaged'
set_at: [ startup, runtime ]
cpp_vartype: 'AtomicWord<double>'
cpp_varname: 'gFlowControlThresholdLagPercentage'
description: "Threshold percentage of target lag where flow control will become engaged"
set_at: [startup, runtime]
cpp_vartype: "AtomicWord<double>"
cpp_varname: "gFlowControlThresholdLagPercentage"
default: 0.5
validator: { gte: 0.0, lte: 1.0 }
validator: {gte: 0.0, lte: 1.0}
redact: false
flowControlMaxSamples:
description: 'The maximum number of samples the flow control algorithm will hold onto.'
set_at: [ startup ]
cpp_vartype: 'int'
cpp_varname: 'gFlowControlMaxSamples'
description: "The maximum number of samples the flow control algorithm will hold onto."
set_at: [startup]
cpp_vartype: "int"
cpp_varname: "gFlowControlMaxSamples"
default: 1000000
validator: { gt: 0 }
validator: {gt: 0}
redact: false
flowControlSamplePeriod:
description: 'How often flow control samples optimes. Smaller periods have higher resolution, but will suffer degredation sooner in the event of commit point lag.'
set_at: [ startup, runtime ]
cpp_vartype: 'AtomicWord<int>'
cpp_varname: 'gFlowControlSamplePeriod'
description: "How often flow control samples optimes. Smaller periods have higher resolution, but will suffer degredation sooner in the event of commit point lag."
set_at: [startup, runtime]
cpp_vartype: "AtomicWord<int>"
cpp_varname: "gFlowControlSamplePeriod"
default: 1000
validator: { gt: 0 }
validator: {gt: 0}
redact: false
flowControlMinTicketsPerSecond:
description: 'The minimum number of tickets flow control will hand out per second. A smaller value will have better resolution on batch-write workloads, but may unintentionally delay system operations.'
set_at: [ startup, runtime ]
cpp_vartype: 'AtomicWord<int>'
cpp_varname: 'gFlowControlMinTicketsPerSecond'
description: "The minimum number of tickets flow control will hand out per second. A smaller value will have better resolution on batch-write workloads, but may unintentionally delay system operations."
set_at: [startup, runtime]
cpp_vartype: "AtomicWord<int>"
cpp_varname: "gFlowControlMinTicketsPerSecond"
default: 100
validator: { gte: 0 }
validator: {gte: 0}
redact: false
flowControlDecayConstant:
description: 'The rate at which flow control will throttle based on how lagged the commit point is in time relative to the flow control threshold.'
set_at: [ startup, runtime ]
cpp_vartype: 'AtomicWord<double>'
cpp_varname: 'gFlowControlDecayConstant'
description: "The rate at which flow control will throttle based on how lagged the commit point is in time relative to the flow control threshold."
set_at: [startup, runtime]
cpp_vartype: "AtomicWord<double>"
cpp_varname: "gFlowControlDecayConstant"
default: 0.5
validator: { gt: 0.0, lt: 1.0 }
validator: {gt: 0.0, lt: 1.0}
redact: false
flowControlFudgeFactor:
description: 'When commit point lag is close to the threshold lag, the primary should track the sustainer rate, with some small penalty. This value represents that penalty. A value of 1.0 represents no penalty, a value close to 0.0 represents a large penalty. Reducing oscillations should keep this value close to 1.0.'
set_at: [ startup, runtime ]
cpp_vartype: 'AtomicWord<double>'
cpp_varname: 'gFlowControlFudgeFactor'
description: "When commit point lag is close to the threshold lag, the primary should track the sustainer rate, with some small penalty. This value represents that penalty. A value of 1.0 represents no penalty, a value close to 0.0 represents a large penalty. Reducing oscillations should keep this value close to 1.0."
set_at: [startup, runtime]
cpp_vartype: "AtomicWord<double>"
cpp_varname: "gFlowControlFudgeFactor"
default: 0.95
validator: { gt: 0.0, lte: 1.0 }
validator: {gt: 0.0, lte: 1.0}
redact: false
flowControlTicketAdderConstant:
description: 'When the commit point lag is small, flow control will add at least this many tickets per second. This provides meaningful ticket increase when immediately recovering from stalling events.'
set_at: [ startup, runtime ]
cpp_vartype: 'AtomicWord<int>'
cpp_varname: 'gFlowControlTicketAdderConstant'
description: "When the commit point lag is small, flow control will add at least this many tickets per second. This provides meaningful ticket increase when immediately recovering from stalling events."
set_at: [startup, runtime]
cpp_vartype: "AtomicWord<int>"
cpp_varname: "gFlowControlTicketAdderConstant"
default: 1000
validator: { gt: 0 }
validator: {gt: 0}
redact: false
flowControlTicketMultiplierConstant:
description: 'When the commit point lag is small, flow control will increase its ticket allocation by this factor. This provides meaningful ticket increase in more general recovery scenarios.'
set_at: [ startup, runtime ]
cpp_vartype: 'AtomicWord<double>'
cpp_varname: 'gFlowControlTicketMultiplierConstant'
description: "When the commit point lag is small, flow control will increase its ticket allocation by this factor. This provides meaningful ticket increase in more general recovery scenarios."
set_at: [startup, runtime]
cpp_vartype: "AtomicWord<double>"
cpp_varname: "gFlowControlTicketMultiplierConstant"
default: 1.05
validator: { gt: 1.0 }
validator: {gt: 1.0}
redact: false
flowControlWarnThresholdSeconds:
description: 'If flow control detects the replica set is lagged and the sustainer point is not moving, it will eventually log a warning. This value controls how much time the flow control is in this state before it logs. A value of zero will disable the warnings.'
set_at: [ startup, runtime ]
cpp_vartype: 'AtomicWord<int>'
cpp_varname: 'gFlowControlWarnThresholdSeconds'
description: "If flow control detects the replica set is lagged and the sustainer point is not moving, it will eventually log a warning. This value controls how much time the flow control is in this state before it logs. A value of zero will disable the warnings."
set_at: [startup, runtime]
cpp_vartype: "AtomicWord<int>"
cpp_varname: "gFlowControlWarnThresholdSeconds"
default: 10
validator: { gte: 0 }
validator: {gte: 0}
redact: false

View File

@ -35,7 +35,7 @@ global:
server_parameters:
ingressAdmissionControlEnabled:
description: Controls whether ingress admission control mechanism is enabled.
set_at: [ startup, runtime ]
set_at: [startup, runtime]
cpp_varname: gIngressAdmissionControlEnabled
cpp_vartype: AtomicWord<bool>
default: true
@ -45,25 +45,25 @@ server_parameters:
Controls the maximum number of tickets in the ingress admission control ticket pool. Note
that setting this parameter to 0 will block all incoming operations that are subject to
ingress admission control.
set_at: [ startup, runtime ]
set_at: [startup, runtime]
cpp_varname: gIngressAdmissionControllerTicketPoolSize
cpp_vartype: AtomicWord<int32_t>
on_update: IngressAdmissionController::onUpdateTicketPoolSize
# Setting the pool size equal to the max connections default means it is
# effectively infinite. See maxIncomingConnections in server_options_general.idl
default: { expr: 'static_cast<int>(DEFAULT_MAX_CONN)' }
validator: { gte: 0 }
default: {expr: "static_cast<int>(DEFAULT_MAX_CONN)"}
validator: {gte: 0}
redact: false
ingressAdmissionControllerMaxQueueDepth:
description: >-
Controls the maximum number of operations waiting for admission in the ingress admission control.
Setting this parameter will not apply to operations that are already waiting for a ticket.
set_at: [ startup, runtime ]
set_at: [startup, runtime]
cpp_varname: gIngressAdmissionControllerMaxQueueDepth
cpp_vartype: Atomic<std::int32_t>
on_update: IngressAdmissionController::onUpdateMaxQueueDepth
# Setting the max queue depth equal to the max connections default means it is
# effectively infinite. See maxIncomingConnections in server_options_general.idl
default: { expr: 'TicketHolder::kDefaultMaxQueueDepth' }
validator: { gte: 0 }
default: {expr: "TicketHolder::kDefaultMaxQueueDepth"}
validator: {gte: 0}
redact: false

View File

@ -33,7 +33,7 @@ global:
server_parameters:
throughputProbingStepMultiple:
description: "The step size for throughput probing is this multiple of the current concurrency"
set_at: [ startup, runtime ]
set_at: [startup, runtime]
cpp_vartype: AtomicWord<double>
cpp_varname: gStepMultiple
default: 0.1
@ -70,7 +70,7 @@ server_parameters:
description: >-
The maximum number of concurrent read/write transactions for throughput probing. This maximum
is applied to reads and writes separately.
set_at: [ startup, runtime ]
set_at: [startup, runtime]
cpp_vartype: AtomicWord<int32_t>
cpp_varname: gMaxConcurrency
default: 128
@ -82,7 +82,7 @@ server_parameters:
description: >-
The ratio of reads/writes when using throughput probing. A value of 0.5 indicates a 1:1 ratio,
while a value greater than 0.5 favors reads and a value less than 0.5 favors writes.
set_at: [ startup, runtime ]
set_at: [startup, runtime]
cpp_vartype: AtomicWord<double>
cpp_varname: gReadWriteRatio
default: 0.5
@ -96,7 +96,7 @@ server_parameters:
How much to weigh newer concurrency measurements into the exponentially-decaying moving
average. Higher values respond faster to changes, but with more variability. Lower values
respond slower, but with less variability.
set_at: [ startup, runtime ]
set_at: [startup, runtime]
cpp_vartype: AtomicWord<double>
cpp_varname: gConcurrencyMovingAverageWeight
default: 0.2
@ -108,7 +108,7 @@ server_parameters:
throughputProbingStallDetectionTimeoutMs:
description: >-
How long we wait until we report a stall
set_at: [ startup, runtime ]
set_at: [startup, runtime]
cpp_vartype: AtomicWord<int32_t>
cpp_varname: gStallDetectionTimeoutMs
default: 60000

View File

@ -36,7 +36,6 @@ imports:
- "mongo/db/basic_types.idl"
structs:
APIParametersFromClient:
description: "Parser for pulling out VersionedAPI parameters from commands"
strict: false
@ -47,13 +46,15 @@ structs:
type: string
optional: true
apiStrict:
description: "With apiVersion: 'V' and apiStrict: true, the server rejects requests to
use behaviors not included in V"
description: >-
With apiVersion: 'V' and apiStrict: true, the server rejects requests to
use behaviors not included in V
type: bool
optional: true
apiDeprecationErrors:
description: "With apiVersion: 'V' and apiDeprecationErrors: true, the server rejects
requests to use behaviors deprecated in V in the current MongoDB release"
description: >-
With apiVersion: 'V' and apiDeprecationErrors: true, the server rejects
requests to use behaviors deprecated in V in the current MongoDB release
type: bool
optional: true
@ -68,7 +69,7 @@ server_parameters:
redact: false
acceptApiVersion2:
description: "Allow users to issue commands that include apiVersion: \"2\". Test only."
description: 'Allow users to issue commands that include apiVersion: "2". Test only.'
test_only: true
set_at: startup
cpp_vartype: bool

View File

@ -33,22 +33,22 @@ enums:
description: "List of supported access checks in AuthorizationSession"
type: string
values:
kCheckAuthorizedToListCollections : "check_authorized_to_list_collections"
kCheckCursorSessionPrivilege : "check_cursor_session_privilege"
kGetAuthenticatedRoleNames : "get_authenticated_role_names"
kGetAuthenticatedUserName : "get_authenticated_user_name"
kGetAuthenticatedUser : "get_authenticated_user"
kIsAuthenticated : "is_authenticated"
kIsAuthenticatedAsUserWithRole : "is_authenticated_as_user_with_role"
kIsAuthorizedForAnyActionOnAnyResourceInDB : "is_authorized_for_any_action_on_any_resource_in_db"
kIsAuthorizedForAnyActionOnResource : "is_authorized_for_any_action_on_resource"
kIsAuthorizedToChangeAsUser : "is_authorized_to_change_as_user"
kIsAuthorizedToCreateRole : "is_authorized_to_create_role"
kIsAuthorizedToParseNamespaceElement : "is_authorized_to_parse_namespace_element"
kIsCoAuthorized : "is_coauthorized"
kIsCoauthorizedWith : "is_coauthorized_with"
kIsCoauthorizedWithClient : "is_coauthorized_with_client"
kIsImpersonating : "is_impersonating"
kIsUsingLocalhostBypass : "is_using_localhost_bypass" # Called in common code in commands.cpp dispatch
kLookupUser : "lookup_user"
kShouldIgnoreAuthChecks : "should_ignore_auth_checks"
kCheckAuthorizedToListCollections: "check_authorized_to_list_collections"
kCheckCursorSessionPrivilege: "check_cursor_session_privilege"
kGetAuthenticatedRoleNames: "get_authenticated_role_names"
kGetAuthenticatedUserName: "get_authenticated_user_name"
kGetAuthenticatedUser: "get_authenticated_user"
kIsAuthenticated: "is_authenticated"
kIsAuthenticatedAsUserWithRole: "is_authenticated_as_user_with_role"
kIsAuthorizedForAnyActionOnAnyResourceInDB: "is_authorized_for_any_action_on_any_resource_in_db"
kIsAuthorizedForAnyActionOnResource: "is_authorized_for_any_action_on_resource"
kIsAuthorizedToChangeAsUser: "is_authorized_to_change_as_user"
kIsAuthorizedToCreateRole: "is_authorized_to_create_role"
kIsAuthorizedToParseNamespaceElement: "is_authorized_to_parse_namespace_element"
kIsCoAuthorized: "is_coauthorized"
kIsCoauthorizedWith: "is_coauthorized_with"
kIsCoauthorizedWithClient: "is_coauthorized_with_client"
kIsImpersonating: "is_impersonating"
kIsUsingLocalhostBypass: "is_using_localhost_bypass" # Called in common code in commands.cpp dispatch
kLookupUser: "lookup_user"
kShouldIgnoreAuthChecks: "should_ignore_auth_checks"

View File

@ -43,174 +43,174 @@ enums:
description: "test"
type: string
values:
addShard : "addShard"
advanceClusterTime : "advanceClusterTime"
listClusterCatalog : "listClusterCatalog"
addShard: "addShard"
advanceClusterTime: "advanceClusterTime"
listClusterCatalog: "listClusterCatalog"
allCollectionStats: "allCollectionStats"
analyze : "analyze"
analyzeShardKey : "analyzeShardKey"
anyAction : "anyAction" # Special ActionType that represents *all* actions
appendOplogNote : "appendOplogNote"
applicationMessage : "applicationMessage"
auditConfigure : "auditConfigure"
auditLogRotate : "auditLogRotate" # ID only
authCheck : "authCheck" # ID only
authenticate : "authenticate" # ID only
authSchemaUpgrade : "authSchemaUpgrade"
analyze: "analyze"
analyzeShardKey: "analyzeShardKey"
anyAction: "anyAction" # Special ActionType that represents *all* actions
appendOplogNote: "appendOplogNote"
applicationMessage: "applicationMessage"
auditConfigure: "auditConfigure"
auditLogRotate: "auditLogRotate" # ID only
authCheck: "authCheck" # ID only
authenticate: "authenticate" # ID only
authSchemaUpgrade: "authSchemaUpgrade"
bypassDefaultMaxTimeMS: "bypassDefaultMaxTimeMS"
bypassDocumentValidation : "bypassDocumentValidation"
bypassWriteBlockingMode : "bypassWriteBlockingMode"
changeCustomData : "changeCustomData"
changePassword : "changePassword"
changeOwnPassword : "changeOwnPassword"
changeOwnCustomData : "changeOwnCustomData"
changeStream : "changeStream"
checkFreeMonitoringStatus : "checkFreeMonitoringStatus" # Removed (backwards compatibility)
checkMetadataConsistency : "checkMetadataConsistency"
cleanupOrphaned : "cleanupOrphaned"
clearJumboFlag : "clearJumboFlag"
bypassDocumentValidation: "bypassDocumentValidation"
bypassWriteBlockingMode: "bypassWriteBlockingMode"
changeCustomData: "changeCustomData"
changePassword: "changePassword"
changeOwnPassword: "changeOwnPassword"
changeOwnCustomData: "changeOwnCustomData"
changeStream: "changeStream"
checkFreeMonitoringStatus: "checkFreeMonitoringStatus" # Removed (backwards compatibility)
checkMetadataConsistency: "checkMetadataConsistency"
cleanupOrphaned: "cleanupOrphaned"
clearJumboFlag: "clearJumboFlag"
cleanupStructuredEncryptionData: "cleanupStructuredEncryptionData"
closeAllDatabases : "closeAllDatabases" # Deprecated (backwards compatibility)
collMod : "collMod"
collStats : "collStats"
compact : "compact"
closeAllDatabases: "closeAllDatabases" # Deprecated (backwards compatibility)
collMod: "collMod"
collStats: "collStats"
compact: "compact"
compactStructuredEncryptionData: "compactStructuredEncryptionData"
configureQueryAnalyzer : "configureQueryAnalyzer"
connPoolStats : "connPoolStats"
connPoolSync : "connPoolSync"
convertToCapped : "convertToCapped"
cpuProfiler : "cpuProfiler"
createCollection : "createCollection"
createDatabase : "createDatabase" # ID only
createIndex : "createIndex" # ID only
createRole : "createRole"
createSearchIndexes : "createSearchIndexes"
createUser : "createUser"
dbCheck : "dbCheck"
dbHash : "dbHash"
dbStats : "dbStats"
dropAllRolesFromDatabase : "dropAllRolesFromDatabase" # ID only
dropAllUsersFromDatabase : "dropAllUsersFromDatabase" # ID only
dropCollection : "dropCollection"
dropConnections : "dropConnections"
dropDatabase : "dropDatabase"
dropIndex : "dropIndex"
dropRole : "dropRole"
dropSearchIndex : "dropSearchIndex"
dropUser : "dropUser"
emptycapped : "emptycapped" # Deprecated (backwards compatibility)
enableProfiler : "enableProfiler"
enableSharding : "enableSharding"
exportCollection : "exportCollection"
find : "find"
flushRouterConfig : "flushRouterConfig"
forceUUID : "forceUUID"
fsync : "fsync"
configureQueryAnalyzer: "configureQueryAnalyzer"
connPoolStats: "connPoolStats"
connPoolSync: "connPoolSync"
convertToCapped: "convertToCapped"
cpuProfiler: "cpuProfiler"
createCollection: "createCollection"
createDatabase: "createDatabase" # ID only
createIndex: "createIndex" # ID only
createRole: "createRole"
createSearchIndexes: "createSearchIndexes"
createUser: "createUser"
dbCheck: "dbCheck"
dbHash: "dbHash"
dbStats: "dbStats"
dropAllRolesFromDatabase: "dropAllRolesFromDatabase" # ID only
dropAllUsersFromDatabase: "dropAllUsersFromDatabase" # ID only
dropCollection: "dropCollection"
dropConnections: "dropConnections"
dropDatabase: "dropDatabase"
dropIndex: "dropIndex"
dropRole: "dropRole"
dropSearchIndex: "dropSearchIndex"
dropUser: "dropUser"
emptycapped: "emptycapped" # Deprecated (backwards compatibility)
enableProfiler: "enableProfiler"
enableSharding: "enableSharding"
exportCollection: "exportCollection"
find: "find"
flushRouterConfig: "flushRouterConfig"
forceUUID: "forceUUID"
fsync: "fsync"
fsyncUnlock: "fsyncUnlock" # Deprecated in favor of unlock (refer to:SERVER-89642)
getClusterParameter: "getClusterParameter"
getDatabaseVersion : "getDatabaseVersion"
getDefaultRWConcern : "getDefaultRWConcern"
getCmdLineOpts : "getCmdLineOpts"
getLog : "getLog"
getParameter : "getParameter"
getShardMap : "getShardMap"
getShardVersion : "getShardVersion"
grantRole : "grantRole"
grantPrivilegesToRole : "grantPrivilegesToRole" # ID only
grantRolesToRole : "grantRolesToRole" # ID only
grantRolesToUser : "grantRolesToUser" # ID only
hostInfo : "hostInfo"
impersonate : "impersonate"
importCollection : "importCollection"
indexStats : "indexStats"
inprog : "inprog"
insert : "insert"
internal : "internal" # Special action type that represents internal actions
invalidateUserCache : "invalidateUserCache"
getDatabaseVersion: "getDatabaseVersion"
getDefaultRWConcern: "getDefaultRWConcern"
getCmdLineOpts: "getCmdLineOpts"
getLog: "getLog"
getParameter: "getParameter"
getShardMap: "getShardMap"
getShardVersion: "getShardVersion"
grantRole: "grantRole"
grantPrivilegesToRole: "grantPrivilegesToRole" # ID only
grantRolesToRole: "grantRolesToRole" # ID only
grantRolesToUser: "grantRolesToUser" # ID only
hostInfo: "hostInfo"
impersonate: "impersonate"
importCollection: "importCollection"
indexStats: "indexStats"
inprog: "inprog"
insert: "insert"
internal: "internal" # Special action type that represents internal actions
invalidateUserCache: "invalidateUserCache"
issueDirectShardOperations: "issueDirectShardOperations"
killAnyCursor : "killAnyCursor"
killAnySession : "killAnySession"
killCursors : "killCursors" # Deprecated in favor of killAnyCursor
killop : "killop"
listCachedAndActiveUsers : "listCachedAndActiveUsers"
listCollections : "listCollections"
listCursors : "listCursors"
listDatabases : "listDatabases"
listIndexes : "listIndexes"
listSampledQueries : "listSampledQueries"
listSearchIndexes : "listSearchIndexes"
listSessions : "listSessions"
listShards : "listShards"
logRotate : "logRotate"
updateSearchIndex : "updateSearchIndex"
moveChunk : "moveChunk"
moveCollection : "moveCollection"
netstat : "netstat"
oidcListKeys : "oidcListKeys"
oidcRefreshKeys : "oidcRefreshKeys"
oidReset : "oidReset" # machine ID reset via the features command
operationMetrics : "operationMetrics"
planCacheIndexFilter : "planCacheIndexFilter" # view/update index filters
planCacheRead : "planCacheRead" # view contents of plan cache
planCacheWrite : "planCacheWrite" # clear cache, drop cache entry, pin/unpin/shun plans
killAnyCursor: "killAnyCursor"
killAnySession: "killAnySession"
killCursors: "killCursors" # Deprecated in favor of killAnyCursor
killop: "killop"
listCachedAndActiveUsers: "listCachedAndActiveUsers"
listCollections: "listCollections"
listCursors: "listCursors"
listDatabases: "listDatabases"
listIndexes: "listIndexes"
listSampledQueries: "listSampledQueries"
listSearchIndexes: "listSearchIndexes"
listSessions: "listSessions"
listShards: "listShards"
logRotate: "logRotate"
updateSearchIndex: "updateSearchIndex"
moveChunk: "moveChunk"
moveCollection: "moveCollection"
netstat: "netstat"
oidcListKeys: "oidcListKeys"
oidcRefreshKeys: "oidcRefreshKeys"
oidReset: "oidReset" # machine ID reset via the features command
operationMetrics: "operationMetrics"
planCacheIndexFilter: "planCacheIndexFilter" # view/update index filters
planCacheRead: "planCacheRead" # view contents of plan cache
planCacheWrite: "planCacheWrite" # clear cache, drop cache entry, pin/unpin/shun plans
querySettings: "querySettings" # allows to set and retrieve querySettings
queryStatsRead: "queryStatsRead" # view untransformed contents of queryStats store
queryStatsReadTransformed: "queryStatsReadTransformed" # view transformed contents of queryStats store
performRawDataOperations: "performRawDataOperations" # allow operations that use the rawData field
refineCollectionShardKey : "refineCollectionShardKey"
reIndex : "reIndex"
refineCollectionShardKey: "refineCollectionShardKey"
reIndex: "reIndex"
releaseMemoryAnyCursor: "releaseMemoryAnyCursor"
remove : "remove"
removeShard : "removeShard"
renameCollection : "renameCollection" # ID only
renameCollectionSameDB : "renameCollectionSameDB"
repairDatabase : "repairDatabase" # Deprecated (backwards compatibility)
replSetConfigure : "replSetConfigure"
replSetGetConfig : "replSetGetConfig"
replSetGetStatus : "replSetGetStatus"
replSetHeartbeat : "replSetHeartbeat"
replSetReconfig : "replSetReconfig" # ID only
replSetResizeOplog : "replSetResizeOplog"
replSetStateChange : "replSetStateChange"
reshardCollection : "reshardCollection"
resync : "resync"
revokeRole : "revokeRole"
revokePrivilegesFromRole : "revokePrivilegesFromRole" # ID only
revokeRolesFromRole : "revokeRolesFromRole" # ID only
revokeRolesFromUser : "revokeRolesFromUser" # ID only
rotateCertificates : "rotateCertificates"
runAsLessPrivilegedUser : "runAsLessPrivilegedUser"
serverStatus : "serverStatus"
setAuthenticationRestriction : "setAuthenticationRestriction"
remove: "remove"
removeShard: "removeShard"
renameCollection: "renameCollection" # ID only
renameCollectionSameDB: "renameCollectionSameDB"
repairDatabase: "repairDatabase" # Deprecated (backwards compatibility)
replSetConfigure: "replSetConfigure"
replSetGetConfig: "replSetGetConfig"
replSetGetStatus: "replSetGetStatus"
replSetHeartbeat: "replSetHeartbeat"
replSetReconfig: "replSetReconfig" # ID only
replSetResizeOplog: "replSetResizeOplog"
replSetStateChange: "replSetStateChange"
reshardCollection: "reshardCollection"
resync: "resync"
revokeRole: "revokeRole"
revokePrivilegesFromRole: "revokePrivilegesFromRole" # ID only
revokeRolesFromRole: "revokeRolesFromRole" # ID only
revokeRolesFromUser: "revokeRolesFromUser" # ID only
rotateCertificates: "rotateCertificates"
runAsLessPrivilegedUser: "runAsLessPrivilegedUser"
serverStatus: "serverStatus"
setAuthenticationRestriction: "setAuthenticationRestriction"
setClusterParameter: "setClusterParameter"
setDefaultRWConcern : "setDefaultRWConcern"
setFeatureCompatibilityVersion : "setFeatureCompatibilityVersion"
setFreeMonitoring : "setFreeMonitoring" # Removed (backwards compatibility)
setParameter : "setParameter"
setDefaultRWConcern: "setDefaultRWConcern"
setFeatureCompatibilityVersion: "setFeatureCompatibilityVersion"
setFreeMonitoring: "setFreeMonitoring" # Removed (backwards compatibility)
setParameter: "setParameter"
setUserWriteBlockMode: "setUserWriteBlockMode"
shardCollection : "shardCollection" # ID only
shardedDataDistribution : "shardedDataDistribution"
shardingState : "shardingState"
shutdown : "shutdown"
splitChunk : "splitChunk"
splitVector : "splitVector"
storageDetails : "storageDetails"
top : "top"
touch : "touch"
trafficRecord : "trafficRecord"
transitionFromDedicatedConfigServer : "transitionFromDedicatedConfigServer"
transitionToDedicatedConfigServer : "transitionToDedicatedConfigServer"
unlock : "unlock"
unshardCollection : "unshardCollection"
useTenant : "useTenant"
useUUID : "useUUID"
update : "update"
updateRole : "updateRole" # ID only
updateUser : "updateUser" # ID only
validate : "validate"
viewRole : "viewRole"
viewUser : "viewUser"
applyOps : "applyOps"
shardCollection: "shardCollection" # ID only
shardedDataDistribution: "shardedDataDistribution"
shardingState: "shardingState"
shutdown: "shutdown"
splitChunk: "splitChunk"
splitVector: "splitVector"
storageDetails: "storageDetails"
top: "top"
touch: "touch"
trafficRecord: "trafficRecord"
transitionFromDedicatedConfigServer: "transitionFromDedicatedConfigServer"
transitionToDedicatedConfigServer: "transitionToDedicatedConfigServer"
unlock: "unlock"
unshardCollection: "unshardCollection"
useTenant: "useTenant"
useUUID: "useUUID"
update: "update"
updateRole: "updateRole" # ID only
updateUser: "updateUser" # ID only
validate: "validate"
viewRole: "viewRole"
viewUser: "viewUser"
applyOps: "applyOps"
setChangeStreamState: "setChangeStreamState"
getChangeStreamState: "getChangeStreamState"

View File

@ -39,7 +39,6 @@ imports:
- "mongo/db/basic_types.idl"
structs:
address_restriction:
description: "clientSource/serverAddress restriction pair"
strict: true

View File

@ -70,7 +70,7 @@ structs:
fields:
resolveRolesDelayMS:
type: int
validator: { gte: 0 }
validator: {gte: 0}
waitForUserCacheInvalidationFailPoint:
description: Data for waitForUserCacheInvalidation fail point
fields:

View File

@ -32,14 +32,14 @@ global:
server_parameters:
authSchemaVersion:
description: 'Read-only value describing the current auth schema version'
set_at: 'readonly'
description: "Read-only value describing the current auth schema version"
set_at: "readonly"
cpp_class:
name: AuthzVersionParameter
redact: false
startupAuthSchemaValidation:
description: 'Validate auth schema on startup'
description: "Validate auth schema on startup"
set_at: startup
cpp_vartype: bool
cpp_varname: gStartupAuthSchemaValidation
@ -47,8 +47,7 @@ server_parameters:
redact: false
allowRolesFromX509Certificates:
description:
Whether to allow roles contained in X509 certificates if X509 authentication is enabled
description: Whether to allow roles contained in X509 certificates if X509 authentication is enabled
set_at: startup
cpp_vartype: bool
cpp_varname: allowRolesFromX509Certificates

View File

@ -44,7 +44,7 @@ structs:
description: "The authorization server's issuer identifier"
cpp_name: issuer
type: string
validator: { callback: 'HttpClient::endpointIsSecure' }
validator: {callback: "HttpClient::endpointIsSecure"}
authorization_endpoint:
description: "URL of the authorization server's authorization endpoint"
cpp_name: authorizationEndpoint
@ -64,4 +64,4 @@ structs:
description: "URL of the authorization server's jwks endpoint"
cpp_name: jwksUri
type: string
validator: { callback: 'HttpClient::endpointIsSecure' }
validator: {callback: "HttpClient::endpointIsSecure"}

View File

@ -71,4 +71,3 @@ structs:
description: "Compact serialized JWT with signature"
cpp_name: JWT
type: string

View File

@ -40,22 +40,22 @@ structs:
fields:
cluster:
# Conflicts with all other resource subfields
description: 'ResourcePattern::forClusterResource'
description: "ResourcePattern::forClusterResource"
type: safeBool
optional: true
anyResource:
# Conflicts with all other resource subfields
description: 'ResourcePattern::forAnyResource'
description: "ResourcePattern::forAnyResource"
type: safeBool
optional: true
db:
# Conflicts with all but 'collection' and 'system_buckets'
description: 'Used with anyNormalResource or partial/exact namespace match'
description: "Used with anyNormalResource or partial/exact namespace match"
type: string
optional: true
collection:
# Conflicts with all but 'db', which is required
description: 'Used with anyNormalResource or partial/exact namespace match'
description: "Used with anyNormalResource or partial/exact namespace match"
type: string
optional: true
system_buckets:

View File

@ -32,7 +32,7 @@ global:
- "mongo/db/auth/sasl_options.h"
configs:
section: "SASL Options"
source: [ yaml ]
source: [yaml]
server_parameters:
authenticationMechanisms:
@ -41,7 +41,7 @@ server_parameters:
description: "The set of accepted authentication mechanisms"
set_at: startup
default:
expr: 'SASLGlobalParams::kDefaultAuthenticationMechanisms'
expr: "SASLGlobalParams::kDefaultAuthenticationMechanisms"
is_constexpr: false
on_update: "SASLGlobalParams::onSetAuthenticationMechanism"
cpp_varname: "saslGlobalParams.authenticationMechanisms"

View File

@ -50,4 +50,3 @@ server_parameters:
gte: 1
lte: 86400
redact: false

View File

@ -129,7 +129,8 @@ types:
- int
- decimal
- double
description: "Accepts any numerical type (including bool) which can be interpreted as a
description:
"Accepts any numerical type (including bool) which can be interpreted as a
true/false value"
cpp_type: bool
deserializer: "mongo::BSONElement::trueValue"
@ -137,7 +138,8 @@ types:
optionalBool:
bson_serialization_type: any
description: "An optional bool type that does not serialize unless explicitly set. Can be
description:
"An optional bool type that does not serialize unless explicitly set. Can be
used in place of boost::optional<bool> to provide more intuitive semantics,
since the standard optional will coerce to true if populated regardless of
its internal value."
@ -352,7 +354,8 @@ types:
IDLAnyTypeOwned:
bson_serialization_type: any
description: "Holds a BSONElement of any type. Does not require the backing BSON to stay
description:
"Holds a BSONElement of any type. Does not require the backing BSON to stay
alive."
cpp_type: "mongo::IDLAnyTypeOwned"
serializer: mongo::IDLAnyTypeOwned::serializeToBSON
@ -440,7 +443,7 @@ structs:
fields:
ok:
type: safeDouble
validator: { gte: 0.0, lte: 0.0 }
validator: {gte: 0.0, lte: 0.0}
stability: stable
code:
type: int
@ -463,7 +466,7 @@ structs:
batchSize:
type: safeInt64
optional: true
validator: { gte: 0 }
validator: {gte: 0}
stability: stable
Collation:
@ -488,7 +491,7 @@ structs:
strength:
type: safeInt
default: static_cast<int>(CollationStrength::kTertiary)
validator: { gte: 0, lte: 5 }
validator: {gte: 0, lte: 5}
stability: stable
# Order numbers based on numerical order and not lexicographic order.
numericOrdering:

View File

@ -50,16 +50,16 @@ structs:
strict: true
fields:
v:
description: 'Index spec version'
description: "Index spec version"
type: safeInt
default: 2
stability: stable
key:
description: 'Key to index on'
description: "Key to index on"
type: object_owned
stability: stable
name:
description: 'Descriptive name for the index'
description: "Descriptive name for the index"
type: string
optional: true
stability: stable
@ -74,9 +74,9 @@ structs:
indexSpec:
type: ClusteredIndexSpec
legacyFormat:
description: 'Legacy format means the clustered information was specified as
description:
"Legacy format means the clustered information was specified as
{clusteredIndex: true} and the cluster key defaults to _id. This should
only be supported for certain internal collections (e.g: time-series
buckets collections)'
buckets collections)"
type: safeBool

View File

@ -40,12 +40,13 @@ structs:
description: "Change streams pre- and post-images options."
fields:
expireAfterSeconds:
description: "The number of seconds after which a pre-image is eligible for
description:
"The number of seconds after which a pre-image is eligible for
deletion. A string value 'off' enables the default expiration policy."
stability: stable
type:
variant: [string, safeInt64]
default: "\"off\""
default: '"off"'
ChangeStreamOptions:
description: "A specification for the change streams options."
inline_chained_structs: true

View File

@ -37,14 +37,16 @@ structs:
description: "The parameters associated with 'setChangeStreamState' and 'getChangeStreamState' commands."
fields:
enabled:
description: "Represents the state of the change stream of a tenant. If true, then
description:
"Represents the state of the change stream of a tenant. If true, then
the change stream should be enabled, false otherwise. The corresponding
value is also returned by the 'getChangeStreamState' command"
type: bool
commands:
setChangeStreamState:
description: "The command to set the state of the change stream in the serverless for a
description:
"The command to set the state of the change stream in the serverless for a
particular tenant."
command_name: setChangeStreamState
cpp_name: SetChangeStreamStateCommandRequest
@ -53,7 +55,8 @@ commands:
chained_structs:
ChangeStreamStateParameters: ChangeStreamStateParameters
getChangeStreamState:
description: "The command to get the state of the change stream in the serverless for a
description:
"The command to get the state of the change stream in the serverless for a
particular tenant."
command_name: getChangeStreamState
cpp_name: GetChangeStreamStateCommandRequest

View File

@ -37,16 +37,18 @@ imports:
structs:
ChangeStreamsClusterParameterStorage:
description: "A specification for the 'changeStreams' cluster-wide configuration parameter
type."
description: >-
A specification for the 'changeStreams' cluster-wide configuration parameter
type.
inline_chained_structs: true
chained_structs:
ClusterServerParameter: clusterServerParameter
fields:
expireAfterSeconds:
description: "The number of seconds to retain the change events. This value will be a
description: >-
The number of seconds to retain the change events. This value will be a
non-zero positive value if the change stream is enabled and a zero value if the change
stream is disabled."
stream is disabled.
type: safeInt64
default: 60 * 60
@ -65,7 +67,7 @@ server_parameters:
changeCollectionExpiredDocumentsRemoverJobSleepSeconds:
description: "Specifies the number of seconds for which the periodic change collection remover job will sleep between each cycle."
set_at: [ startup ]
set_at: [startup]
cpp_vartype: AtomicWord<int>
cpp_varname: "gChangeCollectionExpiredDocumentsRemoverJobSleepSeconds"
validator:
@ -75,7 +77,7 @@ server_parameters:
disableExpiredChangeCollectionRemover:
description: Disables the expired change collection remover.
set_at: [ startup ]
set_at: [startup]
cpp_vartype: bool
cpp_varname: gChangeCollectionRemoverDisabled
default: false

View File

@ -33,5 +33,5 @@ server_parameters:
description: "Skip corrupt documents when cloning"
cpp_vartype: AtomicWord<bool>
cpp_varname: gSkipCorruptDocumentsWhenCloning
set_at: [ startup, runtime ]
set_at: [startup, runtime]
redact: false

View File

@ -32,13 +32,13 @@ global:
- "mongo/db/server_options.h"
- "mongo/db/server_options_base.h"
configs:
section: 'General options'
source: [ cli, ini, yaml ]
section: "General options"
source: [cli, ini, yaml]
initializer:
register: addClusterAuthModeServerOption
configs:
'security.clusterAuthMode':
"security.clusterAuthMode":
description: >-
Authentication mode used for cluster authentication. Alternatives are
(keyFile|sendKeyFile|sendX509|x509)

View File

@ -125,14 +125,16 @@ structs:
optional: true
stability: stable
validationLevel:
description: "Determines how strictly to apply the validation rules to existing
description:
"Determines how strictly to apply the validation rules to existing
documents during an update.
Can be one of following values: 'off', 'strict' or 'moderate'."
type: ValidationLevel
optional: true
stability: stable
validationAction:
description: "Determines whether to error on invalid documents or just warn about
description:
"Determines whether to error on invalid documents or just warn about
the violations but allow invalid documents to be inserted.
Can be either 'warn' or 'error'."
type: ValidationAction
@ -147,7 +149,8 @@ structs:
validator:
callback: create_command_validation::validateViewOnNotEmpty
pipeline:
description: "An array that consists of the aggregation pipeline. Defines the view
description:
"An array that consists of the aggregation pipeline. Defines the view
by applying the specified pipeline to the 'viewOn' collection or
view."
type: array<object>
@ -159,7 +162,8 @@ structs:
optional: true
stability: unstable
expireAfterSeconds:
description: "The number of seconds after which old data should be deleted. This can
description:
"The number of seconds after which old data should be deleted. This can
be disabled by passing in 'off' as a value"
optional: true
stability: stable
@ -186,7 +190,8 @@ structs:
type: safeBool
stability: unstable
dryRun:
description: "Runs the requested modification without modifying any database state.
description:
"Runs the requested modification without modifying any database state.
This can be used to determine in advance if a particular collMod
request can be completed without errors."
optional: true
@ -244,4 +249,3 @@ commands:
chained_structs:
CollModRequest: CollModRequest
reply_type: CollModReply

View File

@ -37,14 +37,14 @@ enums:
description: Acceptable values for the buildInfoAuthMode server parameter.
type: string
values:
kRequiresAuth : 'requiresAuth'
kVersionOnlyIfPreAuth : 'versionOnlyIfPreAuth'
kAllowedPreAuth : 'allowedPreAuth'
kRequiresAuth: "requiresAuth"
kVersionOnlyIfPreAuth: "versionOnlyIfPreAuth"
kAllowedPreAuth: "allowedPreAuth"
server_parameters:
buildInfoAuthMode:
description: "Control the level of authorization required to run the buildInfo command"
set_at: [ startup, runtime ]
set_at: [startup, runtime]
cpp_class: BuildInfoAuthModeServerParameter
redact: false
@ -52,7 +52,7 @@ commands:
buildInfo:
description: "Return information about the current build environment"
namespace: ignored
api_version: ''
api_version: ""
command_name: buildInfo
command_alias: buildinfo
cpp_name: BuildInfoCommand

View File

@ -52,7 +52,7 @@ commands:
description: "Minimum amount of space recoverable for compaction to proceed."
optional: true
type: safeInt64
validator: { gte: 1 }
validator: {gte: 1}
namespace: concatenate_with_db
strict: true
@ -68,7 +68,7 @@ commands:
description: "Minimum amount of space recoverable for compaction to proceed."
optional: true
type: safeInt64
validator: { gte: 1 }
validator: {gte: 1}
runOnce:
description: "Run compact once on every file on the node."
type: bool

View File

@ -35,28 +35,28 @@ imports:
structs:
ConnectionStatusReplyAuthInfo:
description: 'Currently authenticated users and roles, and optional privileges'
description: "Currently authenticated users and roles, and optional privileges"
fields:
authenticatedUsers:
description: 'Currently authenticated users'
description: "Currently authenticated users"
type: array<UserName>
authenticatedUserRoles:
description: 'Currently held roles for authenticated users'
description: "Currently held roles for authenticated users"
type: array<RoleName>
authenticatedUserPrivileges:
description: 'Currently authorized privileges across granted roles'
description: "Currently authorized privileges across granted roles"
type: array<ParsedPrivilege>
optional: true
ConnectionStatusReply:
description: 'Reply to connectionStatus command'
description: "Reply to connectionStatus command"
is_command_reply: true
fields:
authInfo:
description: 'Authenticated users and roles'
description: "Authenticated users and roles"
type: ConnectionStatusReplyAuthInfo
uuid:
description: 'UUID of the current client'
description: "UUID of the current client"
type: uuid
commands:
@ -69,7 +69,6 @@ commands:
reply_type: ConnectionStatusReply
fields:
showPrivileges:
description: 'Return detailed privileges'
description: "Return detailed privileges"
type: safeBool
default: false

View File

@ -42,12 +42,12 @@ imports:
structs:
CreateCommandReply:
description: 'Reply from the {create: ...} command'
description: "Reply from the {create: ...} command"
strict: true
is_command_reply: true
fields:
note:
description: 'Warnings or other additional information'
description: "Warnings or other additional information"
type: string
optional: true
stability: stable
@ -57,7 +57,8 @@ structs:
strict: true
fields:
capped:
description: "Specify true to create a capped collection. If you specify true, you
description:
"Specify true to create a capped collection. If you specify true, you
must also set a maximum size in the 'size' field."
type: safeBool
default: false
@ -78,7 +79,8 @@ structs:
lte: 1125899906842624 # 1 PB
stability: unstable
max:
description: "The maximum number of documents allowed in the capped collection. The
description:
"The maximum number of documents allowed in the capped collection. The
'size' limit takes precedence over this limit."
type: safeInt64
optional: true
@ -86,7 +88,8 @@ structs:
lt: 2147483648 # 2^31
stability: unstable
storageEngine:
description: "Specify a configuration to the storage engine on a per-collection
description:
"Specify a configuration to the storage engine on a per-collection
basis when creating a collection."
type: object
optional: true
@ -97,27 +100,31 @@ structs:
optional: true
stability: stable
validationLevel:
description: "Determines how strictly to apply the validation rules to existing
description:
"Determines how strictly to apply the validation rules to existing
documents during an update.
Can be one of following values: 'off', 'strict' or 'moderate'."
type: ValidationLevel
optional: true
stability: stable
validationAction:
description: "Determines whether to error on invalid documents or just warn about
description:
"Determines whether to error on invalid documents or just warn about
the violations but allow invalid documents to be inserted.
Can be either 'warn' or 'error'."
type: ValidationAction
optional: true
stability: stable
indexOptionDefaults:
description: "Allows users to specify a default configuration for indexes when
description:
"Allows users to specify a default configuration for indexes when
creating a collection."
type: IndexOptionDefaults
optional: true
stability: unstable
viewOn:
description: "The name of the source collection or view from which to create the
description:
"The name of the source collection or view from which to create the
view."
type: string
optional: true
@ -125,7 +132,8 @@ structs:
validator:
callback: create_command_validation::validateViewOnNotEmpty
pipeline:
description: "An array that consists of the aggregation pipeline. Creates the view
description:
"An array that consists of the aggregation pipeline. Creates the view
by applying the specified pipeline to the 'viewOn' collection or
view."
type: array<object>
@ -147,7 +155,8 @@ structs:
optional: true
stability: stable
clusteredIndex:
description: "Specifies whether this collection should have a clusteredIndex.
description:
"Specifies whether this collection should have a clusteredIndex.
Boolean is accepted as the legacy clustered index format for specific internal
collections - and implies clustering by _id. Otherwise, clusters according to
the ClusteredIndexSpec."
@ -156,7 +165,8 @@ structs:
optional: true
stability: stable
recordIdsReplicated:
description: "Specifies whether this collection should utilize the same recordId for a given document
description:
"Specifies whether this collection should utilize the same recordId for a given document
across all nodes in a replica set."
type: safeBool
optional: true
@ -211,10 +221,10 @@ commands:
action_type: find
- privilege: # Can be triggered by aggregation
resource_pattern: exact_namespace
action_type: [remove, update, collStats, indexStats, planCacheRead]
action_type:
[remove, update, collStats, indexStats, planCacheRead]
reply_type: CreateCommandReply
strict: true
inline_chained_structs: true
chained_structs:
CreateCollectionRequest: CreateCollectionRequest

View File

@ -31,8 +31,8 @@ global:
server_parameters:
featureCompatibilityVersion:
description: 'Read-only view of current Feature Compatability Version'
set_at: 'readonly'
description: "Read-only view of current Feature Compatability Version"
set_at: "readonly"
cpp_class:
name: FeatureCompatibilityVersionParameter
redact: false
@ -49,7 +49,7 @@ server_parameters:
redact: false
defaultStartupFCV:
description: 'Startup parameter to set a default FCV at startup'
description: "Startup parameter to set a default FCV at startup"
set_at: startup
cpp_vartype: std::string
cpp_varname: gDefaultStartupFCV

View File

@ -43,7 +43,7 @@ structs:
stats: CompactStats
commands:
compactStructuredEncryptionData :
compactStructuredEncryptionData:
description: "Parser for the 'compactStructuredEncryptionData' command"
command_name: compactStructuredEncryptionData
api_version: ""

View File

@ -96,7 +96,6 @@ structs:
fields:
tokens: array<QECountInfoReplyTokens>
QECountInfoRequestTokens:
description: "A ESC token to lookup in ESC"
strict: true
@ -117,7 +116,6 @@ structs:
fields:
tokens: array<QECountInfoRequestTokens>
QECountInfosReply:
description: "Reply from the {getQueryableEncryptionCountInfo: ...} command"
# MongoS/Txn add fields to the reply we want to ignore
@ -152,4 +150,3 @@ commands:
queryType:
description: "Purpose of command, either for insert, query, compact, or cleanup"
type: QECountInfoQueryType

View File

@ -28,7 +28,6 @@
global:
cpp_namespace: "mongo"
imports:
- "mongo/db/basic_types.idl"

View File

@ -33,7 +33,8 @@ imports:
commands:
internalRenameIfOptionsAndIndexesMatch:
description: "An internal command that does a rename, but first checks to make sure the
description:
"An internal command that does a rename, but first checks to make sure the
indexes and collection options on the destination match those given in the
command."
command_name: internalRenameIfOptionsAndIndexesMatch
@ -45,10 +46,10 @@ commands:
to:
type: namespacestring
collectionOptions:
description: "An object representing the options on the from collection with the
description:
"An object representing the options on the from collection with the
same format as the options from the listCollections command."
type: object
indexes:
description: "An object with form {indexName: {spec}, indexName: {spec}, ...}"
type: array<object>

View File

@ -62,7 +62,8 @@ commands:
api_version: ""
fields:
useClusterClient:
description: "Whether the transaction API client used should opt into running the
description:
"Whether the transaction API client used should opt into running the
'cluster' versions of commands that enables a non-router node to run
the router versions of commands. Only meaningful on mongod because a
mongos will always run 'cluster' commands."

View File

@ -28,7 +28,6 @@
global:
cpp_namespace: "mongo"
imports:
- "mongo/db/auth/access_checks.idl"
- "mongo/db/auth/action_type.idl"

View File

@ -28,7 +28,6 @@
global:
cpp_namespace: "mongo"
imports:
- "mongo/db/auth/access_checks.idl"
- "mongo/db/auth/action_type.idl"
@ -75,7 +74,8 @@ structs:
commands:
listDatabasesForAllTenants:
description: "listDatabasesForAllTenants Command: lists all databases for all tenants and
description:
"listDatabasesForAllTenants Command: lists all databases for all tenants and
can only be run if authenticated with internal __system role"
command_name: "listDatabasesForAllTenants"
cpp_name: ListDatabasesForAllTenantsCommand

View File

@ -38,13 +38,13 @@ structs:
is_command_reply: true
fields:
resourceId:
description: 'The resource on which the locks are being held or pending.'
description: "The resource on which the locks are being held or pending."
type: string
granted:
description: 'An array of documents. Each document provides information on locks that are currently granted on the resource.'
description: "An array of documents. Each document provides information on locks that are currently granted on the resource."
type: array<object>
pending:
description: 'An array of documents. Each document provides information on locks that are currently pending on the resource.'
description: "An array of documents. Each document provides information on locks that are currently pending on the resource."
type: array<object>
LockInfoReply:
@ -52,13 +52,13 @@ structs:
is_command_reply: true
fields:
lockInfo:
description: 'The lock information for each resource.'
description: "The lock information for each resource."
type: array<lockInfoElement>
optional: true
ok:
description: 'Result of the command execution.'
description: "Result of the command execution."
type: safeDouble
validator: { gte: 0.0, lte: 1.0 }
validator: {gte: 0.0, lte: 1.0}
commands:
lockInfoCommand:
@ -72,4 +72,3 @@ commands:
description: "Dump storage engine debug information to the logs"
type: bool
reply_type: LockInfoReply

View File

@ -1,5 +1,4 @@
# Copyright (C) 2022-present MongoDB, Inc.
# Copyright (C) 2022-present MongoDB, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the Server Side Public License, version 1,
@ -27,7 +26,6 @@
# it in the license file.
#
global:
cpp_namespace: "mongo"
cpp_includes:
@ -39,7 +37,6 @@ imports:
- "mongo/s/sharding_types.idl"
structs:
CollectionResharded:
description: "Event describing the details of a committing reshardCollection operation."
strict: false
@ -83,7 +80,8 @@ commands:
_shardsvrNotifyShardingEvent:
command_name: _shardsvrNotifyShardingEvent
cpp_name: ShardsvrNotifyShardingEventRequest
description: "Internal command to be invoked by the config server to notify a shard
description:
"Internal command to be invoked by the config server to notify a shard
of an event concerning the shard itself or the whole cluster."
namespace: ignored
api_version: ""
@ -93,9 +91,9 @@ commands:
eventType:
type: string
description: "The type of sharding event"
validator: { callback: "notify_sharding_event::validateEventType" }
validator:
{callback: "notify_sharding_event::validateEventType"}
details:
type: object_owned
description: "A descriptor of the sharding event
(to be serialized and interpreted based on the value of eventType)."

View File

@ -43,7 +43,6 @@ types:
is_view: true
commands:
profile:
description: "Parser for the 'profile' command."
command_name: "profile"
@ -63,7 +62,8 @@ commands:
optional: true
filter:
type: ObjectOrUnset
description: "A query predicate that determines which ops are logged/profiled. This is
description:
"A query predicate that determines which ops are logged/profiled. This is
an alternative to slowms and sampleRate. The special value 'unset' removes
the filter."
optional: true
@ -78,5 +78,6 @@ commands:
fields:
filter:
type: ObjectOrUnset
description: "A query predicate that determines which ops are logged/profiled on a global
description:
"A query predicate that determines which ops are logged/profiled on a global
level. The special value 'unset' removes the filter."

View File

@ -47,7 +47,8 @@ imports:
types:
BulkWriteReplyItem:
bson_serialization_type: object
description: "Structure used to report a single reply resulting from a batch write
description:
"Structure used to report a single reply resulting from a batch write
command."
cpp_type: "mongo::BulkWriteReplyItem"
serializer: "mongo::BulkWriteReplyItem::serialize"
@ -56,7 +57,6 @@ types:
is_view: false
structs:
BulkWriteInsertOp:
description: "A struct representing inserts in the 'bulkWrite' command."
strict: true
@ -64,7 +64,7 @@ structs:
insert:
description: "Insert command which references the namespace index in 'nsInfo'."
type: safeInt
validator: { gte: 0 }
validator: {gte: 0}
stability: stable
document:
description: "The document to insert into the given collection"
@ -78,21 +78,24 @@ structs:
update:
description: "Update command which references the namespace index in 'nsInfo'."
type: safeInt
validator: { gte: 0 }
validator: {gte: 0}
stability: stable
filter:
description: "The query that matches documents to update. Uses the same query
description:
"The query that matches documents to update. Uses the same query
selectors as used in the 'find' operation."
type: object
stability: stable
sort:
description: "Determines which document the operation modifies if the query selects
description:
"Determines which document the operation modifies if the query selects
multiple documents."
type: object
optional: true
stability: unstable
multi:
description: "If true, updates all documents that meet the query criteria. If false,
description:
"If true, updates all documents that meet the query criteria. If false,
limits the update to one document which meets the query criteria."
type: bool
default: false
@ -102,14 +105,16 @@ structs:
type: update_modification
stability: stable
upsert:
description: "If true, perform an insert if no documents match the query. If both
description:
"If true, perform an insert if no documents match the query. If both
upsert and multi are true and no documents match the query, the update
operation inserts only a single document."
type: bool
default: false
stability: stable
upsertSupplied:
description: "Only applicable when upsert is true. If set, and if no documents match
description:
"Only applicable when upsert is true. If set, and if no documents match
the query, the update subsystem will insert the document supplied as
'constants.new' rather than generating a new document from the update spec."
type: optionalBool
@ -125,7 +130,8 @@ structs:
default: mongo::BSONObj()
stability: stable
constants:
description: "Specifies constant values that can be referred to in the pipeline
description:
"Specifies constant values that can be referred to in the pipeline
performing a custom update."
type: object
optional: true
@ -141,7 +147,8 @@ structs:
optional: true
stability: internal
$_allowShardKeyUpdatesWithoutFullShardKeyInQuery:
description: "Set to true if shard key updates are allowed without the full shard
description:
"Set to true if shard key updates are allowed without the full shard
key in the query."
type: optionalBool
cpp_name: allowShardKeyUpdatesWithoutFullShardKeyInQuery
@ -155,15 +162,17 @@ structs:
description: "Delete command which references the namespace index in 'nsInfo'."
cpp_name: deleteCommand # `delete` is a C++ keyword.
type: safeInt
validator: { gte: 0 }
validator: {gte: 0}
stability: stable
filter:
description: "The query that matches documents to delete. Uses the same query
description:
"The query that matches documents to delete. Uses the same query
selectors as used in the 'find' operation."
type: object
stability: stable
multi:
description: "If true, deletes all documents that meet the query criteria. If false,
description:
"If true, deletes all documents that meet the query criteria. If false,
limits the delete to one document which meets the query criteria."
type: bool
default: false
@ -223,19 +232,22 @@ structs:
optional: true
stability: unstable
isTimeseriesNamespace:
description: "This flag is set to true when the command was originally sent to
description:
"This flag is set to true when the command was originally sent to
mongos on the time-series view, but got rewritten to target
time-series buckets namespace before being sent to shards."
type: optionalBool
stability: internal
shardVersion:
description: "The shard version of the collection. This is to be attached by
description:
"The shard version of the collection. This is to be attached by
mongos before being sent to a shard."
type: shard_version
optional: true
stability: internal
databaseVersion:
description: "The database version of the collection. This is to be attached by
description:
"The database version of the collection. This is to be attached by
mongos before being sent to a shard."
type: database_version
optional: true
@ -291,13 +303,15 @@ structs:
optional: true
stability: stable
retriedStmtIds:
description: "The statement numbers for the write statements that had already been
description:
"The statement numbers for the write statements that had already been
executed, thus were not executed by this command."
type: array<int>
optional: true
stability: unstable
writeConcernError:
description: "WriteConcernError returned when this command was run. In case multiple
description:
"WriteConcernError returned when this command was run. In case multiple
errors were returned from various shards, the errors are concatenated
in the error message."
type: BulkWriteWriteConcernError
@ -316,7 +330,8 @@ commands:
simple:
privilege:
resource_pattern: exact_namespace
action_type: [bypassDocumentValidation, find, insert, update, remove]
action_type:
[bypassDocumentValidation, find, insert, update, remove]
reply_type: BulkWriteCommandReply
fields:
ops:
@ -335,13 +350,15 @@ commands:
optional: true
stability: stable
bypassDocumentValidation:
description: "Enables the operation to bypass document validation. This lets you
description:
"Enables the operation to bypass document validation. This lets you
write documents that do not meet the validation requirements."
type: safeBool
default: false
stability: stable
ordered:
description: "If true, then when a write statement fails, the bulkWrite command returns
description:
"If true, then when a write statement fails, the bulkWrite command returns
without executing the remaining statements. If false, then statements
are allowed to be executed in parallel and if a statement fails,
continue with the remaining statements, if any."
@ -369,13 +386,15 @@ commands:
default: false
stability: stable
bypassEmptyTsReplacement:
description: "Only applicable for inserts and replacement updates. If set to true,
description:
"Only applicable for inserts and replacement updates. If set to true,
any empty timestamps (Timestamp(0,0)) in 'documents' or 'u' will not
be replaced by the current time and instead will be preserved as-is."
type: optionalBool
stability: unstable
$_originalQuery:
description: "The original write query. This is used for updateOne/deleteOne
description:
"The original write query. This is used for updateOne/deleteOne
without shard key during the write phase of the two phase protocol in
order to make sure the shard key query analysis stores the correct
client query."
@ -384,7 +403,8 @@ commands:
cpp_name: originalQuery
stability: internal
$_originalCollation:
description: "The original write query. This is used for updateOne/deleteOne
description:
"The original write query. This is used for updateOne/deleteOne
without shard key during the write phase of the two phase protocol in
order to make sure the shard key query analysis stores the correct
client collation."
@ -392,4 +412,3 @@ commands:
optional: true
cpp_name: originalCollation
stability: internal

View File

@ -48,7 +48,7 @@ commands:
verbosity:
description: "The verbosity for explain command."
type: Verbosity
default : kExecAllPlans
default: kExecAllPlans
stability: stable
# Dummy reply type as we won't use it to parse explain reply.
reply_type: OkReply

View File

@ -73,7 +73,6 @@ types:
# Deserializer uses getOwned.
is_view: false
commands:
mapReduce:
description: "The MapReduce command."
@ -84,11 +83,13 @@ commands:
strict: true
fields:
map:
description: "Javascript code to run as the map operation which associates a value
description:
"Javascript code to run as the map operation which associates a value
with a key and emits the key and value pair."
type: mapReduceJavascriptCodeType
reduce:
description: "Javascript code to run as the map operation which reduces all the
description:
"Javascript code to run as the map operation which reduces all the
values associated with a particular key to a single value."
type: mapReduceJavascriptCodeType
out:
@ -96,7 +97,8 @@ commands:
type: mapReduceOutOptionsType
cpp_name: outOptions
query:
description: "Query object in match language to use as a filter applied before the
description:
"Query object in match language to use as a filter applied before the
map step."
type: object
optional: true
@ -121,12 +123,14 @@ commands:
type: mapReduceGlobalVariableScopeType
optional: true
verbose:
description: "Specifies whether to include the timing information in the result
description:
"Specifies whether to include the timing information in the result
information."
type: bool
optional: true
bypassDocumentValidation:
description: "Causes the out portion of the operation to ignore the output
description:
"Causes the out portion of the operation to ignore the output
collection's document validation."
type: bool
optional: true

View File

@ -106,7 +106,7 @@ commands:
strict: true
fields:
indexes:
description: 'Search indexes to be created'
description: "Search indexes to be created"
type: array<IndexDefinition>
optional: false

View File

@ -66,7 +66,8 @@ commands:
# stayTemp is an internal attribute, which is intentionally undocumented and
# not part of the stable API.
stayTemp:
description: "If true, the original collection will remain temp if it was temp
description:
"If true, the original collection will remain temp if it was temp
before the rename."
type: bool
default: false

View File

@ -46,9 +46,9 @@ commands:
description: "Size of the oplog in MB"
type: safeDouble
optional: true
validator: { gte: 990, lte: 1073741824 } # gte 990MB; lte 1PB (in MB)
validator: {gte: 990, lte: 1073741824} # gte 990MB; lte 1PB (in MB)
minRetentionHours:
description: "Minimum number of hours to preserve oplog entry."
type: safeDouble
optional: true
validator: { gte: 0 }
validator: {gte: 0}

View File

@ -28,7 +28,6 @@
global:
cpp_namespace: "mongo"
imports:
- "mongo/db/basic_types.idl"
- "mongo/db/repl/read_concern_args.idl"
@ -43,12 +42,14 @@ structs:
RWConcernDefault: RWConcernDefault
fields:
inMemory:
description: "Whether the value came from the in-memory cache or it reflects the
description:
"Whether the value came from the in-memory cache or it reflects the
defaults which were persisted as of the time of invocation."
optional: true
type: bool
localUpdateWallClockTime:
description: "The wallclock time of when the default read or write concern was
description:
"The wallclock time of when the default read or write concern was
applied to the cache of the node, which returned the response. This
value is only informational and must not be used for any recency
comparisons."

View File

@ -69,7 +69,8 @@ commands:
type: safeBool
optional: true
fromConfigServer:
description: "A boolean that indicates whether the command is being requested by a
description:
"A boolean that indicates whether the command is being requested by a
config server. Normally FCV upgrades between last-lts and anything
less than latest are not allowed, but in a sharded cluster, newly
started shard nodes start in lastLTS and this parameter is required in
@ -78,14 +79,16 @@ commands:
type: safeBool
optional: true
phase:
description: "An enum that indicates whether the the command is requesting the shard
description:
"An enum that indicates whether the the command is requesting the shard
to enter phase-1 (kStart) or phase-2 (kComplete) of the 2-phase setFCV
protocol. Only valid to be specified for shards. If not specified on a
shard, it will run the full setFCV sequence (both phases)."
type: SetFCVPhase
optional: true
changeTimestamp:
description: "Timestamp used to identify the 2-phase setFCV request. Both phases
description:
"Timestamp used to identify the 2-phase setFCV request. Both phases
(kStart and kComplete) must have the same timestamp for the entire
sequence, and every new sequence started must strictly have
incrementing timestamp."

View File

@ -48,11 +48,13 @@ commands:
fields:
indexNames:
type: array<string>
description: "an array of all index names matching a single prior createIndexes
description:
"an array of all index names matching a single prior createIndexes
request."
commitQuorum:
type: CommitQuorum
description: "commitQuorum can be set to the same values as writeConcern.w and
description:
"commitQuorum can be set to the same values as writeConcern.w and
indicates how many and/or which replica set members must be ready to
commit the index build before the primary will proceed to commit the
index build."

View File

@ -27,7 +27,7 @@
#
global:
cpp_namespace: 'mongo'
cpp_namespace: "mongo"
cpp_includes:
- "mongo/db/views/resolved_view.h"
@ -36,7 +36,8 @@ imports:
types:
resolved_view:
description: "custom IDL type that wraps the cpp_type ResolvedView, used by ShardsvrResolveViewReply.
description:
"custom IDL type that wraps the cpp_type ResolvedView, used by ShardsvrResolveViewReply.
The custom serialize and deserialize methods instruct IDL how to covert to/from C++ and BSON. "
bson_serialization_type: any
cpp_type: "mongo::ResolvedView"
@ -58,7 +59,8 @@ commands:
_shardsvrResolveView:
command_name: _shardsvrResolveView
cpp_name: ShardsvrResolveView
description: "Mongos issues this command on the primary shard to resolve the view for search index commands,
description:
"Mongos issues this command on the primary shard to resolve the view for search index commands,
which require the view's name and effectivePipeline included in the request"
namespace: ignored
api_version: ""
@ -69,5 +71,3 @@ commands:
description: "current operation namespace"
type: namespacestring
optional: false

View File

@ -52,9 +52,9 @@ structs:
in shardsvrRunSearchIndexCommand.userCmd.
fields:
ok:
description: 'Result of the command execution.'
description: "Result of the command execution."
type: safeDouble
validator: { gte: 0.0, lte: 1.0 }
validator: {gte: 0.0, lte: 1.0}
optional: false
cursor:
description: "Mongot responds with this field only for $listSearchIndexes"
@ -89,9 +89,9 @@ structs:
type: SearchIndexManagerResponse
optional: true
ok:
description: 'Result of the command execution.'
description: "Result of the command execution."
type: safeDouble
validator: { gte: 0.0, lte: 1.0 }
validator: {gte: 0.0, lte: 1.0}
optional: false
$configTime:
description: The time component that represents the config time.
@ -112,7 +112,6 @@ structs:
type: logicalTime
optional: true
commands:
shardsvrRunSearchIndexCommand:
command_name: _shardsvrRunSearchIndexCommand

View File

@ -77,4 +77,3 @@ commands:
description: "The profile mode."
type: ProfileMode
default: record

View File

@ -33,11 +33,10 @@ global:
server_parameters:
enableTestCommands:
description: 'Enable test commands'
description: "Enable test commands"
set_at: startup
cpp_vartype: bool
cpp_varname: gEnableTestCommands
on_update: "onUpdateTestCommandsEnabled"
default: false
redact: false

View File

@ -34,7 +34,8 @@ imports:
structs:
AdditionalParticipantInfo:
description: "Contains information about a participant added to the transaction by
description:
"Contains information about a participant added to the transaction by
another participant"
strict: false
fields:
@ -42,18 +43,21 @@ structs:
description: "The name of the shard that was added as a participant"
type: shard_id
readOnly:
description: "True if the shard has the transaction in progress but has not done a
description:
"True if the shard has the transaction in progress but has not done a
write for it"
optional: true
type: bool
TxnResponseMetadata:
description: "Contains fields a participant attaches in responses to successful transaction
description:
"Contains fields a participant attaches in responses to successful transaction
statements"
strict: false
fields:
readOnly:
description: "True if the shard has the transaction in progress but has not done a
description:
"True if the shard has the transaction in progress but has not done a
write for it"
optional: true
type: bool
@ -80,13 +84,15 @@ structs:
commitTimestamp:
type: timestamp
optional: true
description: "Specifies the timestamp at which the multi-document transaction should
description:
"Specifies the timestamp at which the multi-document transaction should
be committed. Required for prepared transactions, not permitted for
unprepared ones."
count:
type: long
optional: true
description: "The number of in-transaction operations applied in a transaction.
description:
"The number of in-transaction operations applied in a transaction.
This field is required only for unprepared transactions."
AbortTransactionOplogObject:
@ -108,13 +114,15 @@ commands:
none: true
fields:
commitTimestamp:
description: "Timestamp at which to commit the transaction. Required for prepared
description:
"Timestamp at which to commit the transaction. Required for prepared
transactions and illegal for non-prepared ones."
optional: true
type: timestamp
stability: stable
recoveryToken:
description: "A mongos that doesn't know about this transaction can attempt to make
description:
"A mongos that doesn't know about this transaction can attempt to make
progress on commit by processing using the info in the recoveryToken."
optional: true
type: TxnRecoveryToken
@ -132,7 +140,8 @@ commands:
none: true
fields:
recoveryToken:
description: "A mongos that doesn't know about this transaction can attempt to make
description:
"A mongos that doesn't know about this transaction can attempt to make
progress on abort by processing using the info in the recoveryToken."
optional: true
type: TxnRecoveryToken

View File

@ -95,7 +95,7 @@ structs:
commitDelayMS:
type: int
default: 0
validator: { gte: 0 }
validator: {gte: 0}
commands:
createUser:
@ -441,4 +441,3 @@ commands:
description: "Drop temp collections when complete"
type: bool
default: false

View File

@ -82,4 +82,3 @@ commands:
optional: true
apiParameters:
type: APIParamsForCmd

View File

@ -39,7 +39,8 @@ commands:
voteCommitIndexBuild:
command_name: voteCommitIndexBuild
cpp_name: VoteCommitIndexBuild
description: "An internal mongod command pertaining to cross replica set index builds.
description:
"An internal mongod command pertaining to cross replica set index builds.
Can only be run on a primary node to signal that the node identified with 'hostAndPort'
is ready to commit the index build identified by the UUID in the command."
strict: false
@ -54,7 +55,8 @@ commands:
voteAbortIndexBuild:
command_name: voteAbortIndexBuild
cpp_name: VoteAbortIndexBuild
description: "An internal mongod command pertaining to cross replica set index builds.
description:
"An internal mongod command pertaining to cross replica set index builds.
Can only be run on primary node to request that an index build, indentified by UUID, be
aborted."
strict: false

View File

@ -32,7 +32,6 @@ global:
imports:
- "mongo/db/basic_types.idl"
# XRAY is a instrumentation tracer that is part of LLVM.
# see https://llvm.org/docs/XRay.html for more information.
#

View File

@ -31,18 +31,19 @@ global:
server_parameters:
enableTemporarilyUnavailableExceptions:
description: 'Enables the use of TemporarilyUnavailableExceptions. When disabled, reverts to
throwing WriteConflictException.'
set_at: [ startup, runtime ]
cpp_varname: 'gEnableTemporarilyUnavailableExceptions'
description:
"Enables the use of TemporarilyUnavailableExceptions. When disabled, reverts to
throwing WriteConflictException."
set_at: [startup, runtime]
cpp_varname: "gEnableTemporarilyUnavailableExceptions"
cpp_vartype: AtomicWord<bool>
default: true
redact: false
temporarilyUnavailableMaxRetries:
description: 'The number of times to retry a TemporarilyUnavailable error internally'
set_at: [ startup, runtime ]
cpp_varname: 'gTemporarilyUnavailableExceptionMaxRetryAttempts'
description: "The number of times to retry a TemporarilyUnavailable error internally"
set_at: [startup, runtime]
cpp_varname: "gTemporarilyUnavailableExceptionMaxRetryAttempts"
cpp_vartype: AtomicWord<long long>
default: 10
validator:
@ -50,11 +51,12 @@ server_parameters:
redact: false
temporarilyUnavailableBackoffBaseMs:
description: 'The base period of time to wait between each TemporarilyUnavailable retry
description:
"The base period of time to wait between each TemporarilyUnavailable retry
attempt. The backoff time is linear such that the Nth retry waits for N times
the base backoff period.'
set_at: [ startup, runtime ]
cpp_varname: 'gTemporarilyUnavailableExceptionRetryBackoffBaseMs'
the base backoff period."
set_at: [startup, runtime]
cpp_varname: "gTemporarilyUnavailableExceptionRetryBackoffBaseMs"
cpp_vartype: AtomicWord<long long>
default: 1000
validator:
@ -62,14 +64,15 @@ server_parameters:
redact: false
transactionTooLargeForCacheThreshold:
description: "Threshold on the proportion of total dirty cache bytes that the running
description:
"Threshold on the proportion of total dirty cache bytes that the running
transaction's dirty cache bytes can represent and still be considered
fullfillable on retry. If this threshold is exceeded, a
TransactionTooLargeForCache exception is thrown. Setting this parameter to 1.0
causes this check to be disabled, and TransactionTooLargeForCache exceptions
will not be thrown."
set_at: [ startup, runtime ]
cpp_varname: 'gTransactionTooLargeForCacheThreshold'
set_at: [startup, runtime]
cpp_varname: "gTransactionTooLargeForCacheThreshold"
cpp_vartype: AtomicWord<double>
default: 0.75
validator:

View File

@ -1,4 +1,3 @@
# Copyright (C) 2023-present MongoDB, Inc.
#
# This program is free software: you can redistribute it and/or modify
@ -34,7 +33,7 @@ server_parameters:
enableDetailedConnectionHealthMetricLogLines:
description: >-
Enables detailed log lines related to connection health in a cluster.
set_at: [ startup, runtime ]
set_at: [startup, runtime]
cpp_vartype: AtomicWord<bool>
cpp_varname: "gEnableDetailedConnectionHealthMetricLogLines"
default: true

View File

@ -41,12 +41,12 @@ structs:
is_command_reply: true
fields:
numIndexesBefore:
description: 'Index count before create'
description: "Index count before create"
type: int
optional: true
stability: stable
numIndexesAfter:
description: 'Index count after create'
description: "Index count after create"
type: int
optional: true
stability: stable
@ -58,12 +58,12 @@ structs:
optional: true
stability: stable
commitQuorum:
description: 'Commit Quorum options used'
description: "Commit Quorum options used"
type: CommitQuorum
optional: true
stability: stable
note:
description: 'Optional warning/error related to createIndex'
description: "Optional warning/error related to createIndex"
type: string
optional: true
stability: stable
@ -75,16 +75,16 @@ structs:
strict: true
fields:
v:
description: 'Index spec version'
description: "Index spec version"
type: safeInt
optional: true
stability: stable
key:
description: 'Key to index on'
description: "Key to index on"
type: object_owned
stability: stable
name:
description: 'Descriptive name for the index'
description: "Descriptive name for the index"
type: string
stability: stable
ns:
@ -205,22 +205,23 @@ commands:
strict: true
fields:
v:
description: 'Index schema version'
description: "Index schema version"
type: safeInt
default: 2
stability: stable
indexes:
description: 'Indexes to be created'
description: "Indexes to be created"
# array<NewIndexSpec> but respect ignoreUnknownIndexOptions
type: array<object>
stability: stable
ignoreUnknownIndexOptions:
description: 'Ignore unknown options in index spec'
description: "Ignore unknown options in index spec"
type: safeBool
default: false
stability: unstable
commitQuorum:
description: "Dictates which or how many replica set members must be ready to commit
description:
"Dictates which or how many replica set members must be ready to commit
the index build before the primary will proceed to commit the index.
This minimizes replication lag by ensuring secondaries are ready for
commit and can quickly apply the commit on a finished index build"
@ -228,12 +229,14 @@ commands:
optional: true
stability: stable
returnOnStart:
description: "For a two-phase index build, whether the command should return upon
description:
"For a two-phase index build, whether the command should return upon
the index build starting rather than upon it committing/aborting."
type: optionalBool
stability: unstable
isTimeseriesNamespace:
description: "This flag is set to true when the command was originally sent to
description:
"This flag is set to true when the command was originally sent to
mongos on the time-series view, but got rewritten to target
time-series buckets namespace before being sent to shards."
type: bool

View File

@ -58,7 +58,7 @@ structs:
optional: true
DBStats:
description: 'Reply from {dbStats: 1, scale: 1} command. Size fields are reduced by scale field.'
description: "Reply from {dbStats: 1, scale: 1} command. Size fields are reduced by scale field."
strict: false
is_command_reply: true
fields:
@ -131,14 +131,14 @@ commands:
command_name: collStats
command_alias: collstats
cpp_name: CollStatsCommand
api_version: ''
api_version: ""
namespace: concatenate_with_db
inline_chained_structs: true
chained_structs:
StorageStatsSpec: StorageStatsSpec
fields:
validationLevel:
description: 'Strictness of validation rules application.'
description: "Strictness of validation rules application."
type: ValidationLevel
optional: true
@ -153,7 +153,7 @@ commands:
command_name: dataSize
command_alias: datasize
cpp_name: DataSizeCommand
api_version: ''
api_version: ""
namespace: type
type: namespacestring
reply_type: DataSizeReply
@ -200,7 +200,7 @@ commands:
command_name: dbStats
command_alias: dbstats
cpp_name: DBStatsCommand
api_version: ''
api_version: ""
namespace: ignored
reply_type: DBStats
fields:

View File

@ -35,7 +35,8 @@ global:
structs:
DefaultMaxTimeMSParam:
description: "Cluster-wide default maxTimeMS used in query operations. When set to 0, operations
description:
"Cluster-wide default maxTimeMS used in query operations. When set to 0, operations
will not time out. If a query specifies an explicit 'maxTimeMS' value, it will
overrides this global default."
inline_chained_structs: true
@ -50,7 +51,8 @@ structs:
server_parameters:
defaultMaxTimeMS:
description: "Cluster-wide default maxTimeMS used in query operations. When set to 0, operations
description:
"Cluster-wide default maxTimeMS used in query operations. When set to 0, operations
will not time out. If a query specifies an explicit 'maxTimeMS' value, it will
overrides this global default."
set_at: cluster

View File

@ -40,7 +40,8 @@ structs:
is_command_reply: true
fields:
nIndexesWas:
description: Number of indexes on the collection at start of dropIndexes command
description:
Number of indexes on the collection at start of dropIndexes command
(mongod only)
type: int
optional: true
@ -56,7 +57,8 @@ structs:
strict: false
fields:
index:
description: An index name, or array of names, or "*" for all indexes, or an index
description:
An index name, or array of names, or "*" for all indexes, or an index
spec (an object).
type:
variant:
@ -65,7 +67,8 @@ structs:
- object
stability: stable
isTimeseriesNamespace:
description: "This flag is set to true when the command was originally sent to
description:
"This flag is set to true when the command was originally sent to
mongos on the time-series view, but got rewritten to target
time-series buckets namespace before being sent to shards."
type: optionalBool

View File

@ -36,7 +36,7 @@ server_parameters:
batchedDeletesTargetStagedDocBytes:
description: "Threshold in bytes accounting for documents (not index entries) at which a batch of document deletions is committed. A value of zero means unlimited"
set_at: [startup, runtime]
cpp_vartype: 'AtomicWord<long long>'
cpp_vartype: "AtomicWord<long long>"
cpp_varname: gBatchedDeletesTargetStagedDocBytes
default: 2097152 # 2MB
validator:
@ -47,7 +47,7 @@ server_parameters:
batchedDeletesTargetBatchDocs:
description: "Threshold of documents at which a batch of document deletions is committed. A value of zero means unlimited"
set_at: [startup, runtime]
cpp_vartype: 'AtomicWord<long long>'
cpp_vartype: "AtomicWord<long long>"
cpp_varname: "gBatchedDeletesTargetBatchDocs"
default: 10
validator:
@ -57,7 +57,7 @@ server_parameters:
batchedDeletesTargetBatchTimeMS:
description: "Threshold in milliseconds of batch processing time at which a batch of document deletions is committed. A value of zero means unlimited"
set_at: [startup, runtime]
cpp_vartype: 'AtomicWord<long long>'
cpp_vartype: "AtomicWord<long long>"
cpp_varname: "gBatchedDeletesTargetBatchTimeMS"
default: 5
validator:

View File

@ -33,9 +33,10 @@ global:
server_parameters:
allowDiskUseByDefault:
description: "Allow queries which exceed their memory budget to spill to disk. This option can
be overriden at the per-query level."
set_at: [ startup, runtime ]
description: >-
Allow queries which exceed their memory budget to spill to disk. This option can
be overriden at the per-query level.
set_at: [startup, runtime]
cpp_varname: "allowDiskUseByDefault"
cpp_vartype: AtomicWord<bool>
default: true

View File

@ -30,7 +30,6 @@ global:
cpp_includes:
- "mongo/db/feature_compatibility_version_parser.h"
imports:
- "mongo/db/basic_types.idl"
@ -59,13 +58,15 @@ structs:
validator:
callback: "FeatureCompatibilityVersionParser::validatePreviousVersionField"
changeTimestamp:
description: "Timestamp used to identify the 2-phase setFCV request. Both phases (kStart and
description:
"Timestamp used to identify the 2-phase setFCV request. Both phases (kStart and
kComplete) must have the same timestamp for the entire sequence, and every new
sequence started must strictly have incrementing timestamp."
type: timestamp
optional: true
isCleaningServerMetadata:
description: "Boolean indicating whether the setFCV downgrade request is in the middle of
description:
"Boolean indicating whether the setFCV downgrade request is in the middle of
cleaning up internal server metadata."
type: bool
optional: true

View File

@ -34,7 +34,8 @@ imports:
commands:
TriggerRotateFTDC:
description: "This command triggeres a rotate of the FTDC file for the node that this command is
description:
"This command triggeres a rotate of the FTDC file for the node that this command is
called on."
command_name: rotateFTDC
strict: true

View File

@ -38,7 +38,7 @@ server_parameters:
diagnosticDataCollectionStatsNamespaces:
description: "Specifies a comma-separated list of collections to collect metrics on"
set_at: [startup, runtime]
cpp_vartype: 'synchronized_value<std::vector<std::string>>'
cpp_vartype: "synchronized_value<std::vector<std::string>>"
cpp_varname: "gDiagnosticDataCollectionStatsNamespaces"
validator:
callback: validateCollectionStatsNamespaces

View File

@ -61,7 +61,8 @@ server_parameters:
redact: false
diagnosticDataCollectionSampleTimeoutMillis:
description: "Determines the timeout that each FTDC collector will be waited on during every
description:
"Determines the timeout that each FTDC collector will be waited on during every
sample. This is only used for async FTDC collectors."
set_at: [startup, runtime]
cpp_varname: "ftdcStartupParams.sampleTimeoutMillis"
@ -71,7 +72,8 @@ server_parameters:
redact: false
diagnosticDataCollectionMinThreads:
description: "Determines the minimum number of async threads to keep-alive in the async FTDC
description:
"Determines the minimum number of async threads to keep-alive in the async FTDC
collector threadpool. Runtime updates require a join on the threadpool which could
cause temporary collection delay."
set_at: [startup, runtime]
@ -82,7 +84,8 @@ server_parameters:
redact: false
diagnosticDataCollectionMaxThreads:
description: "Determines the maximum number of async threads to scale to in the async FTDC
description:
"Determines the maximum number of async threads to scale to in the async FTDC
collector threadpool. Runtime updates require a join on the threadpool which could
cause temporary collection delay."
set_at: [startup, runtime]
@ -137,13 +140,13 @@ server_parameters:
diagnosticDataCollectionEnableLatencyHistograms:
description: "Enable the capture of opLatencies: { histograms: true } } in FTDC."
set_at: [startup, runtime]
cpp_vartype: 'AtomicWord<bool>'
cpp_vartype: "AtomicWord<bool>"
cpp_varname: gDiagnosticDataCollectionEnableLatencyHistograms
redact: false
diagnosticDataCollectionVerboseTCMalloc:
description: "Enable the capture of verbose tcmalloc in FTDC."
set_at: [startup, runtime]
cpp_vartype: 'AtomicWord<bool>'
cpp_vartype: "AtomicWord<bool>"
cpp_varname: gDiagnosticDataCollectionVerboseTCMalloc
redact: false

Some files were not shown because too many files have changed in this diff Show More