mirror of https://github.com/mongodb/mongo
5.0.31 release maintenance (#28904)
GitOrigin-RevId: 4b909383e10623daa63c0b83e0c348e01ca6264d
This commit is contained in:
parent
71dbbb5ecd
commit
e80fe1fe2b
|
|
@ -2,8 +2,10 @@
|
|||
* @10gen/server-release
|
||||
|
||||
# Exclude some test files and READMEs from the backport approvals
|
||||
/jstests/**/*
|
||||
/etc/backports_required_for_multiversion_tests.yml
|
||||
/etc/*.suppressions
|
||||
/README.md
|
||||
**/README.md
|
||||
|
||||
# Exclude all the tests under "jstests" directories from backport approvals
|
||||
**/jstests/**/*
|
||||
|
|
|
|||
20
SConstruct
20
SConstruct
|
|
@ -1535,26 +1535,6 @@ if env.get('ENABLE_OOM_RETRY'):
|
|||
else:
|
||||
env['OOM_RETRY_ATTEMPTS'] = 10
|
||||
env['OOM_RETRY_MAX_DELAY_SECONDS'] = 120
|
||||
|
||||
if env.ToolchainIs('clang', 'gcc'):
|
||||
env['OOM_RETRY_MESSAGES'] = [
|
||||
': out of memory',
|
||||
'virtual memory exhausted: Cannot allocate memory',
|
||||
': fatal error: Killed signal terminated program cc1',
|
||||
# TODO: SERVER-77322 remove this non memory related ICE.
|
||||
r'during IPA pass: cp.+g\+\+: internal compiler error',
|
||||
'ld terminated with signal 9',
|
||||
]
|
||||
elif env.ToolchainIs('msvc'):
|
||||
env['OOM_RETRY_MESSAGES'] = [
|
||||
'LNK1102: out of memory',
|
||||
'C1060: compiler is out of heap space',
|
||||
'c1xx : fatal error C1063: INTERNAL COMPILER ERROR',
|
||||
r'LNK1171: unable to load mspdbcore\.dll',
|
||||
"LNK1201: error writing to program database ''",
|
||||
]
|
||||
env['OOM_RETRY_RETURNCODES'] = [1102]
|
||||
|
||||
env.Tool('oom_auto_retry')
|
||||
|
||||
if env['TARGET_ARCH']:
|
||||
|
|
|
|||
|
|
@ -161,6 +161,16 @@ ALLOW_ANY_TYPE_LIST: List[str] = [
|
|||
'explain-param-useNewUpsert'
|
||||
]
|
||||
|
||||
# Do not add user visible fields already released in earlier versions.
|
||||
IGNORE_UNSTABLE_LIST: List[str] = [
|
||||
# The 'bypassEmptyTsReplacement' field is used by mongorestore and mongosync and is not
|
||||
# documented to users.
|
||||
'insert-param-bypassEmptyTsReplacement',
|
||||
'update-param-bypassEmptyTsReplacement',
|
||||
'delete-param-bypassEmptyTsReplacement',
|
||||
'findAndModify-param-bypassEmptyTsReplacement',
|
||||
]
|
||||
|
||||
SKIPPED_FILES = ["unittest.idl"]
|
||||
|
||||
|
||||
|
|
@ -270,7 +280,7 @@ def check_superset(ctxt: IDLCompatibilityContext, cmd_name: str, type_name: str,
|
|||
|
||||
def check_reply_field_type_recursive(ctxt: IDLCompatibilityContext,
|
||||
field_pair: FieldCompatibilityPair) -> None:
|
||||
# pylint: disable=too-many-branches
|
||||
# pylint: disable=too-many-branches,too-many-locals
|
||||
"""Check compatibility between old and new reply field type if old field type is a syntax.Type instance."""
|
||||
old_field = field_pair.old
|
||||
new_field = field_pair.new
|
||||
|
|
@ -305,7 +315,8 @@ def check_reply_field_type_recursive(ctxt: IDLCompatibilityContext,
|
|||
|
||||
if "any" in old_field_type.bson_serialization_type:
|
||||
# If 'any' is not explicitly allowed as the bson_serialization_type.
|
||||
if allow_name not in ALLOW_ANY_TYPE_LIST:
|
||||
any_allow = allow_name in ALLOW_ANY_TYPE_LIST or old_field_type.name == 'optionalBool'
|
||||
if not any_allow:
|
||||
ctxt.add_reply_field_bson_any_not_allowed_error(
|
||||
cmd_name, field_name, old_field_type.name, old_field.idl_file_path)
|
||||
return
|
||||
|
|
@ -457,8 +468,9 @@ def check_reply_field(ctxt: IDLCompatibilityContext, old_field: syntax.Field,
|
|||
new_idl_file_path: str):
|
||||
"""Check compatibility between old and new reply field."""
|
||||
# pylint: disable=too-many-arguments
|
||||
if not old_field.unstable:
|
||||
if new_field.unstable:
|
||||
field_name: str = cmd_name + "-reply-" + new_field.name
|
||||
if not old_field.unstable and field_name not in IGNORE_UNSTABLE_LIST:
|
||||
if new_field.unstable and field_name not in IGNORE_UNSTABLE_LIST:
|
||||
ctxt.add_new_reply_field_unstable_error(cmd_name, new_field.name, new_idl_file_path)
|
||||
if new_field.optional and not old_field.optional:
|
||||
ctxt.add_new_reply_field_optional_error(cmd_name, new_field.name, new_idl_file_path)
|
||||
|
|
@ -572,7 +584,8 @@ def check_param_or_command_type_recursive(ctxt: IDLCompatibilityContext,
|
|||
|
||||
if "any" in old_type.bson_serialization_type:
|
||||
# If 'any' is not explicitly allowed as the bson_serialization_type.
|
||||
if allow_name not in ALLOW_ANY_TYPE_LIST:
|
||||
any_allow = allow_name in ALLOW_ANY_TYPE_LIST or old_type.name == 'optionalBool'
|
||||
if not any_allow:
|
||||
ctxt.add_command_or_param_type_bson_any_not_allowed_error(
|
||||
cmd_name, old_type.name, old_field.idl_file_path, param_name, is_command_parameter)
|
||||
return
|
||||
|
|
@ -786,7 +799,8 @@ def check_command_param_or_type_struct_field(
|
|||
is_command_parameter: bool):
|
||||
"""Check compatibility between the old and new command parameter or command type struct field."""
|
||||
# pylint: disable=too-many-arguments
|
||||
if not old_field.unstable and new_field.unstable:
|
||||
field_name: str = cmd_name + "-param-" + new_field.name
|
||||
if not old_field.unstable and new_field.unstable and field_name not in IGNORE_UNSTABLE_LIST:
|
||||
ctxt.add_new_param_or_command_type_field_unstable_error(
|
||||
cmd_name, old_field.name, old_idl_file_path, type_name, is_command_parameter)
|
||||
# If old field is unstable and new field is stable, the new field should either be optional or
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
"""Module for syncing a repo with Copybara and setting up configurations."""
|
||||
import argparse
|
||||
import fileinput
|
||||
import subprocess
|
||||
import os
|
||||
import sys
|
||||
|
|
@ -92,7 +93,7 @@ def send_failure_message_to_slack(expansions):
|
|||
)
|
||||
|
||||
|
||||
def main():
|
||||
def main(): # pylint: disable=too-many-locals
|
||||
"""Clone the Copybara repo, build its Docker image, and set up and run migrations."""
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--expansion-file", dest="expansion_file", type=str,
|
||||
|
|
@ -112,25 +113,56 @@ def main():
|
|||
# Read configurations
|
||||
expansions = read_config_file(args.expansion_file)
|
||||
|
||||
access_token_copybara_syncer = get_installation_access_token(
|
||||
expansions["app_id_copybara_syncer"], expansions["private_key_copybara_syncer"],
|
||||
expansions["installation_id_copybara_syncer"])
|
||||
token_mongodb_mongo = get_installation_access_token(
|
||||
expansions["app_id_copybara_syncer"],
|
||||
expansions["private_key_copybara_syncer"],
|
||||
expansions["installation_id_copybara_syncer"],
|
||||
)
|
||||
token_10gen_mongo = get_installation_access_token(
|
||||
expansions["app_id_copybara_syncer_10gen"],
|
||||
expansions["private_key_copybara_syncer_10gen"],
|
||||
expansions["installation_id_copybara_syncer_10gen"],
|
||||
)
|
||||
|
||||
tokens_map = {
|
||||
"https://github.com/mongodb/mongo.git": token_mongodb_mongo,
|
||||
"https://github.com/10gen/mongo.git": token_10gen_mongo,
|
||||
}
|
||||
|
||||
# Create the mongodb-bot.gitconfig file as necessary.
|
||||
create_mongodb_bot_gitconfig()
|
||||
|
||||
current_dir = os.getcwd()
|
||||
git_destination_url_with_token = f"https://x-access-token:{access_token_copybara_syncer}@github.com/mongodb/mongo.git"
|
||||
config_file = f"{current_dir}/copy.bara.sky"
|
||||
|
||||
# Overwrite repo urls in copybara config in-place
|
||||
with fileinput.FileInput(config_file, inplace=True) as file:
|
||||
for line in file:
|
||||
token = None
|
||||
for repo, value in tokens_map.items():
|
||||
if repo in line:
|
||||
token = value
|
||||
|
||||
if token:
|
||||
print(
|
||||
line.replace(
|
||||
"https://github.com",
|
||||
f"https://x-access-token:{token}@github.com",
|
||||
),
|
||||
end="",
|
||||
)
|
||||
else:
|
||||
print(line, end="")
|
||||
|
||||
# Set up the Docker command and execute it
|
||||
docker_cmd = [
|
||||
"docker run",
|
||||
"-v ~/.ssh:/root/.ssh",
|
||||
"-v ~/mongodb-bot.gitconfig:/root/.gitconfig",
|
||||
f'-v "{current_dir}/copy.bara.sky":/usr/src/app/copy.bara.sky',
|
||||
f'-v "{config_file}":/usr/src/app/copy.bara.sky',
|
||||
"-e COPYBARA_CONFIG='copy.bara.sky'",
|
||||
"-e COPYBARA_SUBCOMMAND='migrate'",
|
||||
f"-e COPYBARA_OPTIONS='-v --git-destination-url={git_destination_url_with_token}'",
|
||||
"-e COPYBARA_OPTIONS='-v'",
|
||||
"copybara copybara",
|
||||
]
|
||||
|
||||
|
|
|
|||
|
|
@ -5,8 +5,8 @@
|
|||
# sourceUrl = "/path/to/source"
|
||||
# destinationUrl = "/path/to/dest"
|
||||
|
||||
sourceUrl = "git@github.com:10gen/mongo.git"
|
||||
destinationUrl = "git@github.com:mongodb/mongo.git"
|
||||
sourceUrl = "https://github.com/10gen/mongo.git"
|
||||
destinationUrl = "https://github.com/mongodb/mongo.git"
|
||||
|
||||
core.workflow(
|
||||
name = "default",
|
||||
|
|
@ -25,7 +25,7 @@ core.workflow(
|
|||
mode = "ITERATIVE",
|
||||
transformations = [
|
||||
# (^.*?) - matches the first line (without the newline char)
|
||||
# \n - matches the first newline (or nothing at all if there is no newline). If there is no match then nopthing happens
|
||||
# \n - matches the first newline (or nothing at all if there is no newline). If there is no match then nothing happens
|
||||
# ((\n|.)*) - matches everything after
|
||||
# Overall, this copies only the first line of the commit rather than the body
|
||||
metadata.scrubber("(^.*?)\n((\n|.)*)", replacement = "$1"),
|
||||
|
|
|
|||
|
|
@ -1,14 +1,13 @@
|
|||
# This configuration is for migrating code from one Git repository to another using Copybara.
|
||||
# It selectively copies content, excluding specific paths and preserving authorship.
|
||||
sourceUrl = "git@github.com:10gen/mongo.git"
|
||||
destinationUrl = "git@github.com:10gen/mongo-copybara.git"
|
||||
sourceUrl = "https://github.com/10gen/mongo.git"
|
||||
destinationUrl = "https://github.com/10gen/mongo-copybara.git"
|
||||
|
||||
core.workflow(
|
||||
name = "default",
|
||||
origin = git.origin(
|
||||
url = sourceUrl,
|
||||
ref = "v5.0",
|
||||
# VersionSelector
|
||||
),
|
||||
destination = git.destination(
|
||||
url = destinationUrl,
|
||||
|
|
@ -22,7 +21,11 @@ core.workflow(
|
|||
|
||||
mode = "ITERATIVE",
|
||||
# Change the path here to the folder you want to publish publicly
|
||||
# transformations = [
|
||||
# core.move("path/to/folder/you/want/exported", ""),
|
||||
# ],
|
||||
transformations = [
|
||||
# (^.*?) - matches the first line (without the newline char)
|
||||
# \n - matches the first newline (or nothing at all if there is no newline). If there is no match then nothing happens
|
||||
# ((\n|.)*) - matches everything after
|
||||
# Overall, this copies only the first line of the commit rather than the body
|
||||
metadata.scrubber("(^.*?)\n((\n|.)*)", replacement = "$1"),
|
||||
],
|
||||
)
|
||||
|
|
@ -163,6 +163,10 @@ last-continuous:
|
|||
ticket: SERVER-88750
|
||||
- test_file: jstests/core/bypass_empty_ts_replacement_timeseries.js
|
||||
ticket: SERVER-88750
|
||||
- test_file: jstests/sharding/refresh_sessions.js
|
||||
ticket: SERVER-94635
|
||||
- test_file: jstests/replsets/catchup_ignores_old_heartbeats.js
|
||||
ticket: SERVER-86674
|
||||
suites:
|
||||
change_streams_multiversion_passthrough: null
|
||||
change_streams_sharded_collections_multiversion_passthrough: null
|
||||
|
|
@ -390,6 +394,10 @@ last-lts:
|
|||
ticket: SERVER-88750
|
||||
- test_file: jstests/core/bypass_empty_ts_replacement_timeseries.js
|
||||
ticket: SERVER-88750
|
||||
- test_file: jstests/sharding/refresh_sessions.js
|
||||
ticket: SERVER-94635
|
||||
- test_file: jstests/replsets/catchup_ignores_old_heartbeats.js
|
||||
ticket: SERVER-86674
|
||||
suites:
|
||||
change_streams_multiversion_passthrough: null
|
||||
change_streams_sharded_collections_multiversion_passthrough: null
|
||||
|
|
|
|||
|
|
@ -1360,7 +1360,7 @@ functions:
|
|||
params:
|
||||
provider: ec2
|
||||
distro: ${distro_id}
|
||||
timeout_teardown_secs: 604800 # 7 days
|
||||
timeout_teardown_secs: 86400 # 24 hours
|
||||
security_group_ids:
|
||||
- sg-097bff6dd0d1d31d0
|
||||
|
||||
|
|
@ -5915,13 +5915,13 @@ tasks:
|
|||
task_names: >-
|
||||
powercycle_smoke_skip_compile
|
||||
num_tasks: 20
|
||||
exec_timeout_secs: 604800 # 7 days
|
||||
timeout_secs: 604800 # 7 days
|
||||
exec_timeout_secs: 86400 # 24 hours
|
||||
timeout_secs: 86400 # 24 hours
|
||||
set_up_retry_count: 1800
|
||||
run_powercycle_args: --sshAccessRetryCount=1800
|
||||
|
||||
- name: powercycle_sentinel
|
||||
exec_timeout_secs: 604800 # 7 days
|
||||
exec_timeout_secs: 86400 # 24 hours
|
||||
commands:
|
||||
- func: "run powercycle sentinel"
|
||||
vars:
|
||||
|
|
@ -6114,6 +6114,7 @@ tasks:
|
|||
|
||||
- name: sync_repo_with_copybara
|
||||
tags: []
|
||||
patchable: false
|
||||
commands:
|
||||
- command: manifest.load
|
||||
- *git_get_project
|
||||
|
|
@ -11456,69 +11457,6 @@ buildvariants:
|
|||
- name: validate_commit_message
|
||||
- name: check_for_todos
|
||||
|
||||
- name: live-record
|
||||
display_name: "~ RHEL 8 Shared Library (with UndoDB live-record)"
|
||||
batchtime: *batchtime_one_week
|
||||
stepback: false
|
||||
run_on:
|
||||
- rhel80-medium
|
||||
expansions:
|
||||
compile_flags: --ssl MONGO_DISTMOD=rhel80 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_v3_gcc.vars --link-model=dynamic --use-diagnostic-latches=on
|
||||
multiversion_platform: rhel80
|
||||
multiversion_edition: enterprise
|
||||
has_packages: false
|
||||
scons_cache_scope: shared
|
||||
scons_cache_mode: all
|
||||
target_resmoke_time: 10
|
||||
max_sub_suites: 3
|
||||
large_distro_name: rhel80-medium
|
||||
num_scons_link_jobs_available: 0.99
|
||||
record_with: --recordWith /opt/undodb5/bin/live-record
|
||||
resmoke_jobs_factor: 0.3
|
||||
exec_timeout_secs: 28800 # 8 hours
|
||||
test_flags: --excludeWithAnyTags=requires_fast_memory,live_record_incompatible
|
||||
tasks:
|
||||
- name: compile_and_archive_dist_test_TG
|
||||
- name: build_variant_gen
|
||||
- name: .aggfuzzer
|
||||
- name: .aggregation
|
||||
# - name: audit
|
||||
# - name: .auth !.multiversion !.non_live_record
|
||||
# - name: .causally_consistent !.sharding
|
||||
# - name: .change_streams
|
||||
# - name: .misc_js !.non_live_record
|
||||
- name: .concurrency !.ubsan !.no_txns !.debug_only !.stepdowns !.non_live_record !.large
|
||||
# - name: .encrypt
|
||||
# - name: initial_sync_fuzzer_gen
|
||||
- name: .jscore .common
|
||||
# - name: jsCore_minimum_batch_size
|
||||
- name: jsCore_op_query
|
||||
- name: jsCore_txns_large_txns_format
|
||||
# - name: jsonSchema
|
||||
- name: .jstestfuzz !.flow_control !.stepdowns !.causal
|
||||
# - name: multiversion_sanity_check_gen
|
||||
# - name: mqlrun
|
||||
# - name: .multi_shard
|
||||
# - name: .query_fuzzer
|
||||
# - name: .read_write_concern
|
||||
# - name: .replica_sets !.encrypt !.auth !.non_live_record
|
||||
# - name: replica_sets_reconfig_jscore_passthrough_gen
|
||||
# - name: replica_sets_reconfig_jscore_stepdown_passthrough_gen
|
||||
- name: retryable_writes_jscore_passthrough_gen
|
||||
- name: retryable_writes_jscore_stepdown_passthrough
|
||||
# - name: .read_only
|
||||
# - name: .rollbackfuzzer
|
||||
- name: sasl
|
||||
# - name: search
|
||||
# - name: search_auth
|
||||
# - name: search_ssl
|
||||
# - name: session_jscore_passthrough
|
||||
- name: .sharding .jscore !.wo_snapshot !.multi_stmt
|
||||
# - name: .sharding .txns
|
||||
# - name: .sharding .common !.non_live_record
|
||||
- name: snmp
|
||||
# - name: .updatefuzzer
|
||||
|
||||
|
||||
###########################################
|
||||
# Copybara buildvariants #
|
||||
|
|
|
|||
|
|
@ -4,8 +4,12 @@ exec_timeout_secs: &exec_timeout_secs 21600
|
|||
timeout_secs: &timeout_secs 7200
|
||||
|
||||
include:
|
||||
- filename: etc/system_perf_yml_components/tasks.yml
|
||||
- filename: etc/system_perf_yml_components/variants.yml
|
||||
- filename: evergreen/system_perf/5.0/variants.yml
|
||||
module: dsi
|
||||
- filename: evergreen/system_perf/shared_tasks.yml
|
||||
module: dsi
|
||||
- filename: evergreen/system_perf/5.0/genny_tasks.yml
|
||||
module: genny
|
||||
|
||||
## Parameters for parameterized builds (see https://github.com/evergreen-ci/evergreen/wiki/Parameterized-Builds)
|
||||
parameters:
|
||||
|
|
@ -80,7 +84,7 @@ modules:
|
|||
owner: mongodb-labs
|
||||
repo: YCSB
|
||||
prefix: ${workdir}/src
|
||||
branch: production
|
||||
branch: main
|
||||
- name: py-tpcc
|
||||
owner: mongodb-labs
|
||||
repo: py-tpcc
|
||||
|
|
@ -118,61 +122,6 @@ timeout:
|
|||
|
||||
|
||||
functions:
|
||||
###
|
||||
# Same in every DSI project
|
||||
f_dsi_pre_run:
|
||||
- command: manifest.load
|
||||
f_dsi_post_run:
|
||||
- command: shell.exec
|
||||
params:
|
||||
script: ./src/dsi/run-dsi post_run
|
||||
- command: perf.send
|
||||
params:
|
||||
file: ./build/CedarReports/cedar_report.json
|
||||
aws_key: ${terraform_key}
|
||||
aws_secret: ${terraform_secret}
|
||||
bucket: genny-metrics
|
||||
region: us-east-1
|
||||
prefix: ${task_id}_${execution}
|
||||
- command: attach.results
|
||||
params:
|
||||
file_location: ./build/EvergreenResultsJson/results.json
|
||||
- command: s3.put
|
||||
params:
|
||||
aws_key: ${aws_key}
|
||||
aws_secret: ${aws_secret}
|
||||
local_file: ./build/Artifacts/DSIArtifacts.tgz
|
||||
remote_file: ${project_dir}/${build_variant}/${revision}/${task_id}/${version_id}/logs/dsi-artifacts-${task_name}-${build_id}-${execution}.tgz
|
||||
bucket: mciuploads
|
||||
permissions: public-read
|
||||
content_type: application/x-gzip
|
||||
display_name: DSI Artifacts - Execution ${execution}
|
||||
- command: s3.put
|
||||
params:
|
||||
aws_key: ${aws_key}
|
||||
aws_secret: ${aws_secret}
|
||||
local_file: ./build/Documentation/index.html
|
||||
remote_file: ${project_dir}/${build_variant}/${revision}/${task_id}/${version_id}/logs/${task_name}-${build_id}-index.html
|
||||
bucket: mciuploads
|
||||
permissions: public-read
|
||||
content_type: text/html
|
||||
display_name: Documentation
|
||||
- command: s3.put
|
||||
params:
|
||||
aws_key: ${aws_key}
|
||||
aws_secret: ${aws_secret}
|
||||
local_file: bootstrap.yml
|
||||
remote_file: ${project_dir}/${build_variant}/${revision}/${task_id}/${version_id}/bootstrap-${task_name}-${build_id}-${execution}.yml
|
||||
bucket: mciuploads
|
||||
permissions: public-read
|
||||
content_type: text/plain
|
||||
display_name: Task Bootstrap Config
|
||||
f_dsi_timeout:
|
||||
- command: shell.exec
|
||||
params:
|
||||
script: ./src/dsi/run-dsi on_timeout
|
||||
###
|
||||
|
||||
f_other_post_ops:
|
||||
- command: shell.exec
|
||||
params:
|
||||
|
|
@ -363,14 +312,6 @@ tasks:
|
|||
- func: "compile mongodb"
|
||||
|
||||
buildvariants:
|
||||
- name: task_generation
|
||||
display_name: Task Generation
|
||||
modules: *modules
|
||||
run_on:
|
||||
- amazon2-build
|
||||
tasks:
|
||||
- name: schedule_global_auto_tasks
|
||||
|
||||
- &compile-amazon2
|
||||
name: compile-amazon2
|
||||
display_name: Compile
|
||||
|
|
|
|||
|
|
@ -1,977 +0,0 @@
|
|||
variables:
|
||||
_src_dir: &src_dir src/mongo
|
||||
|
||||
functions:
|
||||
f_dsi_run_workload: &dsi_run_func
|
||||
- command: timeout.update
|
||||
params:
|
||||
exec_timeout_secs: ${exec_timeout_secs_override}
|
||||
timeout_secs: ${timeout_secs_override}
|
||||
- command: git.get_project
|
||||
params:
|
||||
directory: *src_dir
|
||||
clone_depth: 1000
|
||||
revisions:
|
||||
dsi: ${dsi_rev}
|
||||
genny: ${genny_rev}
|
||||
linkbench: ${linkbench_rev}
|
||||
linkbench2: ${linkbench2_rev}
|
||||
tsbs: ${tsbs_rev}
|
||||
workloads: ${workloads_rev}
|
||||
mongo-perf: ${mongo-perf_rev}
|
||||
YCSB: ${YCSB_rev}
|
||||
py-tpcc: ${py-tpcc_rev}
|
||||
flamegraph: ${flamegraph_rev}
|
||||
# mongo: ${mongo_rev}
|
||||
- command: expansions.write
|
||||
params:
|
||||
file: ./expansions.yml
|
||||
redacted: true
|
||||
- command: shell.exec
|
||||
params:
|
||||
script: ./src/dsi/run-dsi run_workload
|
||||
- command: shell.exec
|
||||
type: system
|
||||
params:
|
||||
script: ./src/dsi/run-dsi determine_failure -m SYSTEM
|
||||
- command: shell.exec
|
||||
type: setup
|
||||
params:
|
||||
script: ./src/dsi/run-dsi determine_failure -m SETUP
|
||||
- command: shell.exec
|
||||
type: test
|
||||
params:
|
||||
script: ./src/dsi/run-dsi determine_failure -m TEST
|
||||
f_run_dsi_workload: *dsi_run_func # Do not use this function. It is deprecated.
|
||||
|
||||
## Schedule Tasks ##
|
||||
f_schedule_tasks:
|
||||
- command: git.get_project
|
||||
params:
|
||||
directory: *src_dir
|
||||
clone_depth: 1000
|
||||
revisions:
|
||||
dsi: ${dsi_rev}
|
||||
genny: ${genny_rev}
|
||||
linkbench: ${linkbench_rev}
|
||||
linkbench2: ${linkbench2_rev}
|
||||
tsbs: ${tsbs_rev}
|
||||
workloads: ${workloads_rev}
|
||||
mongo-perf: ${mongo-perf_rev}
|
||||
YCSB: ${YCSB_rev}
|
||||
py-tpcc: ${py-tpcc_rev}
|
||||
- command: expansions.write
|
||||
params:
|
||||
file: ./expansions.yml
|
||||
- command: shell.exec
|
||||
params:
|
||||
script: ./src/dsi/run-dsi schedule_tasks --tasks=${tasks}
|
||||
- command: generate.tasks
|
||||
params:
|
||||
files:
|
||||
- build/TaskJSON/Tasks.json
|
||||
|
||||
tasks:
|
||||
###
|
||||
# Same in every DSI project
|
||||
- name: schedule_global_auto_tasks
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_schedule_tasks
|
||||
vars:
|
||||
tasks: all_tasks
|
||||
- name: schedule_variant_auto_tasks
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_schedule_tasks
|
||||
vars:
|
||||
tasks: variant_tasks
|
||||
- name: schedule_patch_auto_tasks
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_schedule_tasks
|
||||
vars:
|
||||
tasks: patch_tasks
|
||||
- name: smoke_test
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: short
|
||||
- name: canaries_only
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: canaries
|
||||
- name: smoke_test_ssl
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: short
|
||||
mongodb_setup: replica-ssl
|
||||
infrastructure_provisioning: replica
|
||||
- name: smoke_test_standalone_auth
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: short
|
||||
mongodb_setup: standalone-auth
|
||||
infrastructure_provisioning: single
|
||||
- name: smoke_test_replset_auth
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: short
|
||||
mongodb_setup: replica-auth
|
||||
infrastructure_provisioning: replica
|
||||
- name: smoke_test_shard_lite_auth
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: short
|
||||
mongodb_setup: shard-lite-auth
|
||||
infrastructure_provisioning: shard-lite
|
||||
###
|
||||
|
||||
- name: linkbench
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "linkbench"
|
||||
|
||||
- name: linkbench_stepdowns
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "linkbench_stepdowns"
|
||||
|
||||
- name: linkbench_rolling_restarts
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "linkbench_rolling_restarts"
|
||||
|
||||
- name: linkbench_non_retryable_writes_stepdowns
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "linkbench_non_retryable_writes_stepdowns"
|
||||
|
||||
- name: linkbench_non_retryable_writes_rolling_restarts
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "linkbench_non_retryable_writes_rolling_restarts"
|
||||
|
||||
- name: linkbench2
|
||||
priority: 5
|
||||
exec_timeout_secs: 43200 # 12 hours
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "linkbench2"
|
||||
additional_tfvars: "tags: {expire-on-delta: 12}"
|
||||
|
||||
- name: tsbs_load
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "tsbs_load"
|
||||
|
||||
- name: tsbs_query
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "tsbs_query"
|
||||
|
||||
- name: tsbs_query_manual_bucketing
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "tsbs_query_manual_bucketing"
|
||||
|
||||
- name: tpcc
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "tpcc"
|
||||
|
||||
- name: tpcc_majority
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "tpcc_majority"
|
||||
|
||||
- name: industry_benchmarks
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "ycsb"
|
||||
|
||||
- name: ycsb_60GB
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "ycsb-60GB"
|
||||
|
||||
- name: industry_benchmarks_secondary_reads
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "ycsb-secondary-reads"
|
||||
|
||||
- name: industry_benchmarks_wmajority
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "ycsb-wmajority"
|
||||
|
||||
- name: industry_benchmarks_stepdowns
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "ycsb_stepdowns"
|
||||
|
||||
- name: industry_benchmarks_rolling_restarts
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "ycsb_rolling_restarts"
|
||||
|
||||
- name: industry_benchmarks_non_retryable_writes_stepdowns
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "ycsb_non_retryable_writes_stepdowns"
|
||||
|
||||
- name: industry_benchmarks_non_retryable_writes_rolling_restarts
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "ycsb_non_retryable_writes_rolling_restarts"
|
||||
|
||||
- name: ycsb.2023-09
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "ycsb.2023-09"
|
||||
|
||||
- name: ycsb_w1.2023-09
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "ycsb-w1.2023-09"
|
||||
|
||||
- name: ycsb_60GB.2023-09
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "ycsb-60GB.2023-09"
|
||||
|
||||
- name: ycsb_60GB.long.2023-09
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "ycsb-60GB.long.2023-09"
|
||||
|
||||
- name: ycsb_secondary_reads.2023-09
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "ycsb-secondary-reads.2023-09"
|
||||
|
||||
- name: ycsb.load
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "ycsb.load"
|
||||
|
||||
- name: crud_workloads
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "crud_workloads"
|
||||
|
||||
- name: crud_workloads_majority
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "crud_workloads_majority"
|
||||
|
||||
- name: crud_workloads_w1
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "crud_workloads_w1.2023-02"
|
||||
|
||||
- name: cursor_manager
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "cursor_manager"
|
||||
|
||||
- name: mixed_workloads_genny_stepdowns
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "mixed_workloads_genny_stepdowns"
|
||||
|
||||
- name: mixed_workloads_genny_rolling_restarts
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "mixed_workloads_genny_rolling_restarts"
|
||||
|
||||
- name: misc_workloads
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "misc_workloads"
|
||||
|
||||
|
||||
- name: map_reduce_workloads
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "map_reduce_workloads"
|
||||
|
||||
- name: genny_canaries
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "genny_canaries"
|
||||
|
||||
- name: retryable_writes_workloads
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "retryable_writes"
|
||||
|
||||
- name: snapshot_reads
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "snapshot_reads"
|
||||
|
||||
- name: secondary_reads
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "secondary_reads"
|
||||
|
||||
- name: bestbuy_agg
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "bestbuy_agg"
|
||||
|
||||
- name: bestbuy_agg_merge_same_db
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "bestbuy_agg_merge_same_db"
|
||||
|
||||
- name: bestbuy_agg_merge_different_db
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "bestbuy_agg_merge_different_db"
|
||||
|
||||
- name: bestbuy_agg_merge_target_hashed
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "bestbuy_agg_merge_target_hashed"
|
||||
|
||||
- name: bestbuy_agg_merge_wordcount
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "bestbuy_agg_merge_wordcount"
|
||||
|
||||
- name: bestbuy_query
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "bestbuy_query"
|
||||
|
||||
- name: non_sharded_workloads
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "non_sharded"
|
||||
|
||||
- name: mongos_workloads
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "mongos"
|
||||
|
||||
- name: mongos_large_catalog_workloads
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "mongos_large_catalog"
|
||||
|
||||
- name: move_chunk_workloads
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "move_chunk"
|
||||
|
||||
- name: move_chunk_waiting_workloads
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "move_chunk_waiting"
|
||||
|
||||
- name: move_chunk_large_chunk_map_workloads
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "move_chunk_large_chunk_map"
|
||||
|
||||
- name: refine_shard_key_transaction_stress
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "refine_shard_key_transaction_stress"
|
||||
|
||||
- name: secondary_performance
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
# Unfortunately the dash/underscore style is different for mongodb_setup and test_control
|
||||
test_control: "secondary_performance"
|
||||
mongodb_setup: "secondary-performance"
|
||||
|
||||
- name: initialsync
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "initialsync"
|
||||
|
||||
|
||||
- name: initialsync-logkeeper
|
||||
priority: 5
|
||||
exec_timeout_secs: 216000 # 2.5 days
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
timeout_secs: 216000 # 2.5 days
|
||||
vars:
|
||||
test_control: "initialsync-logkeeper"
|
||||
|
||||
# The following two initial sync logkeeper automation tasks are only used in the commented-out
|
||||
# "Linux ReplSet Initial Sync LogKeeper Snapshot Update" variant below and are only intended to be
|
||||
# run in patch builds to update FCV for logkeeper datasets.
|
||||
|
||||
- name: initialsync-logkeeper-snapshot-update
|
||||
priority: 5
|
||||
exec_timeout_secs: 216000 # 2.5 days
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "initialsync-logkeeper-snapshot-update"
|
||||
|
||||
- name: initialsync-large
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "initialsync-large"
|
||||
|
||||
- name: change_streams_latency
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "change_streams_latency"
|
||||
|
||||
- name: change_streams_listen_throughput
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "change_streams_listen_throughput"
|
||||
|
||||
- name: change_streams_multi_mongos
|
||||
priority: 5
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: "change_streams_multi_mongos"
|
||||
|
||||
- name: genny_execution_UserAcquisition
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: auto_genny_workload
|
||||
auto_workload_path: ./src/genny/dist/etc/genny/workloads/execution/UserAcquisition.yml
|
||||
- name: genny_scale_InsertRemove
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: auto_genny_workload
|
||||
auto_workload_path: ./src/genny/dist/etc/genny/workloads/scale/InsertRemove.yml
|
||||
- name: query
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: query,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'false'}
|
||||
- name: query_read_commands
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: query,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'true'}
|
||||
- name: views-query
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: query_identityview,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'true'}
|
||||
- name: views-aggregation
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: aggregation_identityview,
|
||||
include_filter_2: regression,
|
||||
exclude_filter: none,
|
||||
threads: "1",
|
||||
read_cmd: 'true'}
|
||||
- name: where
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: where,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'false'}
|
||||
- name: where_read_commands
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: where,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'true'}
|
||||
- name: update
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: update,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'false'}
|
||||
- name: update_read_commands
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: update,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'true'}
|
||||
- name: insert
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: insert,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'false'}
|
||||
- name: insert_read_commands
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: insert,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'true'}
|
||||
- name: wildcard-index-read
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: wildcard_read,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'false'}
|
||||
- name: wildcard-index-read_read_commands
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: wildcard_read,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'true'}
|
||||
- name: wildcard-index-write
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: wildcard_write,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'false'}
|
||||
- name: wildcard-index-write_read_commands
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: wildcard_write,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'true'}
|
||||
- name: geo
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: geo,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'false'}
|
||||
- name: geo_read_commands
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: geo,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'true'}
|
||||
- name: misc
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: command multi remove mixed,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'false'}
|
||||
- name: misc_read_commands
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: command multi remove mixed,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'true'}
|
||||
- name: misc_custom_filter_default
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
mongodb_setup: mongo-perf-standalone-custom-filter-default.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: command multi remove mixed,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'false'}
|
||||
- name: misc_custom_filter_default_read_commands
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
mongodb_setup: mongo-perf-standalone-custom-filter-default.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: command multi remove mixed,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'true'}
|
||||
- name: misc_custom_filter_slow_or_sample
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
mongodb_setup: mongo-perf-standalone-custom-filter-slow-or-sample.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: command multi remove mixed,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'false'}
|
||||
- name: misc_custom_filter_slow_or_sample_read_commands
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
mongodb_setup: mongo-perf-standalone-custom-filter-slow-or-sample.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: command multi remove mixed,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'true'}
|
||||
- name: misc_custom_filter_complex
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
mongodb_setup: mongo-perf-standalone-custom-filter-complex.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: command multi remove mixed,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'false'}
|
||||
- name: misc_custom_filter_complex_read_commands
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
mongodb_setup: mongo-perf-standalone-custom-filter-complex.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: command multi remove mixed,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'true'}
|
||||
- name: misc_custom_filter_whole_doc
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
mongodb_setup: mongo-perf-standalone-custom-filter-whole-doc.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: command multi remove mixed,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'false'}
|
||||
- name: misc_custom_filter_whole_doc_read_commands
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
mongodb_setup: mongo-perf-standalone-custom-filter-whole-doc.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: command multi remove mixed,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'true'}
|
||||
- name: misc_slowms_everything
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
mongodb_setup: mongo-perf-standalone-slowms-everything.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: command multi remove mixed,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'false'}
|
||||
- name: misc_slowms_everything_read_commands
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
mongodb_setup: mongo-perf-standalone-slowms-everything.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: command multi remove mixed,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'true'}
|
||||
- name: singleThreaded
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: single_threaded,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: none,
|
||||
threads: "1",
|
||||
read_cmd: 'false'}
|
||||
- name: singleThreaded_read_commands
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: single_threaded,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: none,
|
||||
threads: "1",
|
||||
read_cmd: 'true'}
|
||||
- name: aggregation
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: aggregation,
|
||||
include_filter_2: regression,
|
||||
exclude_filter: js,
|
||||
threads: "1",
|
||||
read_cmd: 'false'}
|
||||
- name: aggregation_read_commands
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: aggregation,
|
||||
include_filter_2: regression,
|
||||
exclude_filter: js,
|
||||
threads: "1",
|
||||
read_cmd: 'true'}
|
||||
- name: agg-query-comparison
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: agg_query_comparison,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'false'}
|
||||
- name: agg-query-comparison_read_commands
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: agg_query_comparison,
|
||||
include_filter_2: core regression,
|
||||
exclude_filter: single_threaded,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'true'}
|
||||
- name: pipeline-updates
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: pipeline-updates,
|
||||
include_filter_2: regression,
|
||||
exclude_filter: none,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'true'}
|
||||
- name: javascript
|
||||
commands:
|
||||
- func: f_dsi_run_workload
|
||||
vars:
|
||||
test_control: mongo-perf.2023-02
|
||||
test_control_params: |
|
||||
{include_filter_1: js,
|
||||
include_filter_2: aggregation,
|
||||
exclude_filter: none,
|
||||
threads: "1 2 4 8",
|
||||
read_cmd: 'true'}
|
||||
|
|
@ -1,282 +0,0 @@
|
|||
variables:
|
||||
_modules: &modules
|
||||
- mongo-tools
|
||||
- dsi
|
||||
- genny
|
||||
- workloads
|
||||
- linkbench
|
||||
- linkbench2
|
||||
- tsbs
|
||||
- mongo-perf
|
||||
- YCSB
|
||||
- py-tpcc
|
||||
- PrivateWorkloads
|
||||
- flamegraph
|
||||
_project_dir: &project_dir dsi
|
||||
_compile_amazon2: &_compile_amazon2
|
||||
- name: compile
|
||||
variant: compile-amazon2
|
||||
- name: schedule_global_auto_tasks
|
||||
variant: task_generation
|
||||
_compile_amazon_linux2_arm64: &_compile_amazon_linux2_arm64
|
||||
- name: compile
|
||||
variant: compile-amazon-linux2-arm64
|
||||
- name: schedule_global_auto_tasks
|
||||
variant: task_generation
|
||||
|
||||
buildvariants:
|
||||
- name: perf-atlas-M60-real.arm.aws.2023-11
|
||||
display_name: PERF M60-Atlas ReplSet ARM AWS 2023-11
|
||||
modules: *modules
|
||||
expansions:
|
||||
mongodb_setup_release: 2022-11
|
||||
mongodb_setup: atlas
|
||||
canaries: none
|
||||
atlas_setup: M60-repl
|
||||
use_custom_build: true
|
||||
infrastructure_provisioning: workload_client_arm.2023-04
|
||||
infrastructure_provisioning_release: 2023-09
|
||||
workload_setup: 2022-11
|
||||
platform: linux
|
||||
project_dir: *project_dir
|
||||
storageEngine: wiredTiger
|
||||
compile_variant: "-arm64"
|
||||
run_on:
|
||||
- "rhel70-perf-atlas-large"
|
||||
depends_on:
|
||||
- name: compile
|
||||
variant: compile-amazon2
|
||||
- name: schedule_global_auto_tasks
|
||||
variant: task_generation
|
||||
- name: compile
|
||||
variant: compile-amazon-linux2-arm64
|
||||
- name: schedule_global_auto_tasks
|
||||
variant: task_generation
|
||||
tasks:
|
||||
- name: schedule_patch_auto_tasks
|
||||
- name: schedule_variant_auto_tasks
|
||||
- name: ycsb.2023-09
|
||||
- name: ycsb_60GB.2023-09
|
||||
- name: tpcc
|
||||
- name: tpcc_majority
|
||||
- name: linkbench
|
||||
- name: linkbench2
|
||||
|
||||
- name: perf-atlas-M60-real.intel.azure.2023-11
|
||||
display_name: PERF M60-Atlas ReplSet Intel Azure 2023-11
|
||||
modules: *modules
|
||||
expansions:
|
||||
mongodb_setup_release: 2022-11
|
||||
mongodb_setup: atlas
|
||||
canaries: none
|
||||
atlas_setup: M60-repl-azure
|
||||
use_custom_build_azure: true
|
||||
infrastructure_provisioning: workload_client_intel.2023-11
|
||||
infrastructure_provisioning_release: 2023-09
|
||||
workload_setup: 2022-11
|
||||
platform: linux
|
||||
project_dir: *project_dir
|
||||
storageEngine: wiredTiger
|
||||
run_on:
|
||||
- "rhel70-perf-atlas-large"
|
||||
depends_on: *_compile_amazon2
|
||||
tasks:
|
||||
- name: schedule_patch_auto_tasks
|
||||
- name: schedule_variant_auto_tasks
|
||||
- name: ycsb.2023-09
|
||||
- name: ycsb_60GB.2023-09
|
||||
- name: tpcc
|
||||
- name: tpcc_majority
|
||||
- name: linkbench
|
||||
- name: linkbench2
|
||||
|
||||
- name: perf-3-shard.arm.aws.2023-11
|
||||
display_name: PERF 3-Shard Cluster ARM AWS 2023-11
|
||||
modules: *modules
|
||||
expansions:
|
||||
mongodb_setup_release: 2022-11
|
||||
mongodb_setup: shard
|
||||
infrastructure_provisioning_release: 2023-09
|
||||
infrastructure_provisioning: shard
|
||||
workload_setup: 2022-11
|
||||
platform: linux
|
||||
project_dir: *project_dir
|
||||
authentication: enabled
|
||||
storageEngine: wiredTiger
|
||||
compile_variant: "-arm64"
|
||||
run_on:
|
||||
- "rhel70-perf-shard"
|
||||
depends_on: *_compile_amazon_linux2_arm64
|
||||
tasks:
|
||||
- name: schedule_patch_auto_tasks
|
||||
- name: schedule_variant_auto_tasks
|
||||
- name: ycsb.2023-09
|
||||
- name: ycsb_w1.2023-09
|
||||
- name: crud_workloads_majority
|
||||
- name: crud_workloads_w1
|
||||
- name: misc_workloads
|
||||
- name: map_reduce_workloads
|
||||
- name: smoke_test
|
||||
- name: mongos_workloads
|
||||
- name: mongos_large_catalog_workloads
|
||||
- name: change_streams_latency
|
||||
- name: change_streams_multi_mongos
|
||||
|
||||
- name: perf-3-node-replSet.arm.aws.2023-11
|
||||
display_name: PERF 3-Node ReplSet ARM AWS 2023-11
|
||||
modules: *modules
|
||||
expansions:
|
||||
mongodb_setup_release: 2022-11
|
||||
mongodb_setup: replica
|
||||
infrastructure_provisioning_release: 2023-09
|
||||
infrastructure_provisioning: replica
|
||||
workload_setup: 2022-11
|
||||
platform: linux
|
||||
project_dir: *project_dir
|
||||
authentication: enabled
|
||||
storageEngine: wiredTiger
|
||||
compile_variant: "-arm64"
|
||||
run_on:
|
||||
- "rhel70-perf-replset"
|
||||
depends_on: *_compile_amazon_linux2_arm64
|
||||
tasks: &3nodetasks
|
||||
- name: schedule_patch_auto_tasks
|
||||
- name: schedule_variant_auto_tasks
|
||||
- name: ycsb.2023-09
|
||||
- name: ycsb_w1.2023-09
|
||||
- name: ycsb_60GB.2023-09
|
||||
- name: ycsb.load
|
||||
- name: ycsb_60GB.long.2023-09
|
||||
- name: ycsb_secondary_reads.2023-09
|
||||
- name: crud_workloads_majority
|
||||
- name: crud_workloads_w1
|
||||
- name: misc_workloads
|
||||
- name: map_reduce_workloads
|
||||
- name: refine_shard_key_transaction_stress
|
||||
- name: smoke_test
|
||||
- name: secondary_performance # Uses a special 2 node mongodb setup
|
||||
- name: non_sharded_workloads
|
||||
- name: bestbuy_agg
|
||||
- name: bestbuy_agg_merge_different_db
|
||||
- name: bestbuy_agg_merge_same_db
|
||||
- name: bestbuy_agg_merge_wordcount
|
||||
- name: bestbuy_query
|
||||
- name: change_streams_latency
|
||||
- name: snapshot_reads
|
||||
- name: secondary_reads
|
||||
- name: tpcc
|
||||
- name: linkbench
|
||||
- name: linkbench2
|
||||
|
||||
- name: perf-3-node-replSet-intel.intel.aws.2023-11
|
||||
display_name: PERF 3-Node ReplSet Intel AWS 2023-11
|
||||
modules: *modules
|
||||
expansions:
|
||||
mongodb_setup_release: 2022-11
|
||||
mongodb_setup: replica
|
||||
infrastructure_provisioning_release: 2023-09
|
||||
infrastructure_provisioning: replica-intel.2023-11
|
||||
workload_setup: 2022-11
|
||||
platform: linux
|
||||
project_dir: *project_dir
|
||||
authentication: enabled
|
||||
storageEngine: wiredTiger
|
||||
run_on:
|
||||
- "rhel70-perf-replset"
|
||||
depends_on: *_compile_amazon2
|
||||
tasks: &3nodetasks
|
||||
- name: schedule_patch_auto_tasks
|
||||
- name: schedule_variant_auto_tasks
|
||||
- name: industry_benchmarks
|
||||
- name: ycsb.2023-09
|
||||
- name: ycsb_60GB.2023-09
|
||||
- name: ycsb_60GB.long.2023-09
|
||||
- name: crud_workloads_majority
|
||||
- name: smoke_test
|
||||
- name: linkbench
|
||||
- name: linkbench2
|
||||
|
||||
- name: perf-2-node-replSet-initialsync.arm.aws.2023-11
|
||||
display_name: PERF 2-Node ReplSet Initial Sync ARM AWS 2023-11
|
||||
modules: *modules
|
||||
expansions:
|
||||
mongodb_setup_release: 2022-11
|
||||
mongodb_setup: replica-2node
|
||||
infrastructure_provisioning_release: 2023-09
|
||||
infrastructure_provisioning: replica-2node
|
||||
workload_setup: 2022-11
|
||||
platform: linux
|
||||
authentication: disabled
|
||||
storageEngine: wiredTiger
|
||||
compile_variant: "-arm64"
|
||||
project_dir: *project_dir
|
||||
depends_on: *_compile_amazon_linux2_arm64
|
||||
run_on:
|
||||
- "rhel70-perf-replset"
|
||||
tasks:
|
||||
- name: schedule_patch_auto_tasks
|
||||
- name: schedule_variant_auto_tasks
|
||||
- name: initialsync-large
|
||||
|
||||
- &linux-microbenchmarks-standalone-arm
|
||||
name: perf-mongo-perf-standalone.arm.aws.2023-11
|
||||
display_name: PERF Monogo-Perf Standalone inMemory ARM AWS 2023-11
|
||||
cron: "0 0 * * 0" # Weekly, Sundays at 12 AM
|
||||
modules: *modules
|
||||
expansions: &standalone-arm-expansions
|
||||
mongodb_setup_release: 2022-11
|
||||
mongodb_setup: mongo-perf-standalone.2023-02
|
||||
infrastructure_provisioning_release: 2023-09
|
||||
infrastructure_provisioning: workload_client_mongod_combined.2023-01
|
||||
workload_setup: 2022-11
|
||||
use_scons_cache: true
|
||||
platform: linux
|
||||
canaries: none
|
||||
storageEngine: inMemory
|
||||
project_dir: *project_dir
|
||||
compile_variant: "-arm64"
|
||||
run_on:
|
||||
- "rhel70-perf-microbenchmarks"
|
||||
depends_on: *_compile_amazon_linux2_arm64
|
||||
tasks:
|
||||
- name: genny_scale_InsertRemove
|
||||
- name: genny_execution_UserAcquisition
|
||||
- name: aggregation
|
||||
- name: aggregation_read_commands
|
||||
- name: agg-query-comparison
|
||||
- name: agg-query-comparison_read_commands
|
||||
- name: query
|
||||
- name: query_read_commands
|
||||
- name: views-aggregation
|
||||
- name: views-query
|
||||
- name: where
|
||||
- name: where_read_commands
|
||||
- name: update
|
||||
- name: update_read_commands
|
||||
- name: insert
|
||||
- name: insert_read_commands
|
||||
- name: wildcard-index-read
|
||||
- name: wildcard-index-read_read_commands
|
||||
- name: wildcard-index-write
|
||||
- name: wildcard-index-write_read_commands
|
||||
- name: geo
|
||||
- name: geo_read_commands
|
||||
- name: misc
|
||||
- name: misc_read_commands
|
||||
- name: singleThreaded
|
||||
- name: singleThreaded_read_commands
|
||||
- name: pipeline-updates
|
||||
- name: javascript
|
||||
|
||||
- &linux-microbenchmarks-standalone-intel
|
||||
<<: *linux-microbenchmarks-standalone-arm
|
||||
name: perf-mongo-perf-standalone.intel.aws.2023-11
|
||||
display_name: PERF Mongo-Perf Standalone inMemory Intel AWS 2023-11
|
||||
cron: "0 0 * * 0" # Weekly, Sundays at 12 AM
|
||||
expansions: &standalone-intel-expansions
|
||||
<<: *standalone-arm-expansions
|
||||
infrastructure_provisioning: workload_client_mongod_combined_intel.2023-01
|
||||
compile_variant: ""
|
||||
run_on:
|
||||
- "rhel70-perf-microbenchmarks"
|
||||
depends_on: *_compile_amazon2
|
||||
|
|
@ -1,10 +1,9 @@
|
|||
set -o errexit
|
||||
|
||||
cd src
|
||||
git clone --branch=jepsen-mongodb-master --depth=1 git@github.com:10gen/jepsen.git jepsen-mongodb
|
||||
# "v0.2.0-jepsen-mongodb-master" is the latest jepsen version that works with v5.0,
|
||||
# because JS engine on v5.0 does not know the "import" statement, that was added in later jepsen versions
|
||||
git clone --branch=v0.2.0-jepsen-mongodb-master --depth=1 git@github.com:10gen/jepsen.git jepsen-mongodb
|
||||
cd jepsen-mongodb
|
||||
branch=$(git symbolic-ref --short HEAD)
|
||||
commit=$(git show -s --pretty=format:"%h - %an, %ar: %s")
|
||||
echo "Git branch: $branch, commit: $commit"
|
||||
|
||||
lein install
|
||||
|
|
|
|||
|
|
@ -6,9 +6,35 @@ function setup_db_contrib_tool {
|
|||
export PATH="$PATH:$PIPX_BIN_DIR"
|
||||
export PIP_CACHE_DIR=${workdir}/pip_cache
|
||||
|
||||
python -m pip --disable-pip-version-check install "pip==21.0.1" "wheel==0.37.0" || exit 1
|
||||
for i in {1..5}; do
|
||||
python -m pip --disable-pip-version-check install "pip==21.0.1" "wheel==0.37.0" && RET=0 && break || RET=$? && sleep 1
|
||||
echo "Failed to install pip and wheel, retrying..."
|
||||
done
|
||||
|
||||
if [ $RET -ne 0 ]; then
|
||||
echo "Failed to install pip and wheel"
|
||||
exit $RET
|
||||
fi
|
||||
|
||||
for i in {1..5}; do
|
||||
# We force reinstall here because when we download the previous venv the shebang
|
||||
# in pipx still points to the old machines python location.
|
||||
python -m pip --disable-pip-version-check install --force-reinstall --no-deps "pipx==1.2.0" || exit 1
|
||||
pipx install "db-contrib-tool==0.8.3" --pip-args="--no-cache-dir" || exit 1
|
||||
python -m pip --disable-pip-version-check install --force-reinstall --no-deps "pipx==1.2.0" && RET=0 && break || RET=$? && sleep 1
|
||||
echo "Failed to install pipx, retrying..."
|
||||
done
|
||||
|
||||
if [ $RET -ne 0 ]; then
|
||||
echo "Failed to install pipx"
|
||||
exit $RET
|
||||
fi
|
||||
|
||||
for i in {1..5}; do
|
||||
pipx install --force "db-contrib-tool==0.8.3" --pip-args="--no-cache-dir" && RET=0 && break || RET=$? && sleep 1
|
||||
echo "Failed to install db-contrib-tool, retrying..."
|
||||
done
|
||||
|
||||
if [ $RET -ne 0 ]; then
|
||||
echo "Failed to install db-contrib-tool"
|
||||
exit $RET
|
||||
fi
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,79 @@
|
|||
// SERVER-94839
|
||||
// Underflow in window_function memory trackers.
|
||||
|
||||
(function() {
|
||||
'use strict';
|
||||
|
||||
if (!db) {
|
||||
return;
|
||||
}
|
||||
|
||||
const pipeline = [
|
||||
{
|
||||
"$setWindowFields": {
|
||||
"partitionBy": "_id",
|
||||
"sortBy": {"_id": 1},
|
||||
"output": {
|
||||
"result": {
|
||||
"$min": {
|
||||
"$function": {
|
||||
"body": 'function(arg1, arg2){ return arg1; }',
|
||||
"args": [
|
||||
{"$objectToArray": {"str": "enhance dot-com Internal"}},
|
||||
],
|
||||
"lang": 'js'
|
||||
}
|
||||
},
|
||||
"window": {"documents": [9, 17]}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
];
|
||||
|
||||
const documentList = [
|
||||
{"_id": 0, "time": ISODate("2024-07-11T07:35:24.150Z"), "tag": {"s": 2}},
|
||||
{"_id": 7, "time": ISODate("2024-07-11T09:11:14.916Z"), "tag": {"s": 0}},
|
||||
{"_id": 14, "time": ISODate("2024-07-11T11:18:35.921Z"), "tag": {"s": 1}},
|
||||
{"_id": 15, "time": ISODate("2024-07-11T14:10:00.688Z"), "tag": {"s": 1}},
|
||||
{"_id": 16, "time": ISODate("2024-07-11T16:16:01.993Z"), "tag": {"s": 0}},
|
||||
{"_id": 23, "time": ISODate("2024-07-11T17:10:08.909Z"), "tag": {"s": 2}},
|
||||
{"_id": 24, "time": ISODate("2024-07-11T17:40:08.679Z"), "tag": {"s": 0}},
|
||||
{"_id": 32, "time": ISODate("2024-07-11T19:00:52.124Z"), "tag": {"s": 1}},
|
||||
{"_id": 33, "time": ISODate("2024-07-11T19:38:50.454Z"), "tag": {"s": 1}},
|
||||
{"_id": 34, "time": ISODate("2024-07-11T21:06:02.867Z"), "tag": {"s": 2}},
|
||||
{"_id": 35, "time": ISODate("2024-07-11T23:09:53.495Z"), "tag": {"s": 2}},
|
||||
{"_id": 38, "time": ISODate("2024-07-11T23:48:16.384Z"), "tag": {"s": 1}},
|
||||
{"_id": 39, "time": ISODate("2024-07-12T00:33:05.525Z"), "tag": {"s": 0}},
|
||||
{"_id": 40, "time": ISODate("2024-07-12T02:25:56.291Z"), "tag": {"s": 0}},
|
||||
{"_id": 41, "time": ISODate("2024-07-12T03:59:19.521Z"), "tag": {"s": 2}},
|
||||
{"_id": 42, "time": ISODate("2024-07-12T05:21:59.858Z"), "tag": {"s": 2}},
|
||||
{"_id": 43, "time": ISODate("2024-07-12T07:58:08.402Z"), "tag": {"s": 2}},
|
||||
{"_id": 49, "time": ISODate("2024-07-12T10:23:40.626Z"), "tag": {"s": 2}},
|
||||
{"_id": 50, "time": ISODate("2024-07-12T12:59:11.562Z"), "tag": {"s": 1}},
|
||||
{"_id": 52, "time": ISODate("2024-07-12T13:39:22.881Z"), "tag": {"s": 2}},
|
||||
{"_id": 59, "time": ISODate("2024-07-12T14:22:56.676Z"), "tag": {"s": 0}},
|
||||
{"_id": 60, "time": ISODate("2024-07-12T15:46:21.936Z"), "tag": {}},
|
||||
{"_id": 61, "time": ISODate("2024-07-12T17:51:43.398Z"), "tag": {"s": 1}},
|
||||
{"_id": 67, "time": ISODate("2024-07-12T18:57:08.266Z"), "tag": {}},
|
||||
{"_id": 73, "time": ISODate("2024-07-12T19:39:42.416Z"), "tag": {"s": 1}},
|
||||
{"_id": 74, "time": ISODate("2024-07-12T22:15:22.336Z"), "tag": {"s": 1}},
|
||||
{"_id": 75, "time": ISODate("2024-07-12T23:21:39.015Z"), "tag": {"s": 0}},
|
||||
{"_id": 78, "time": ISODate("2024-07-13T00:12:40.680Z"), "tag": {"s": 0}},
|
||||
{"_id": 79, "time": ISODate("2024-07-13T02:27:33.605Z"), "tag": {"s": 0}},
|
||||
{"_id": 87, "time": ISODate("2024-07-13T02:36:02.418Z"), "tag": {"s": 0}}
|
||||
];
|
||||
|
||||
const timeseriesParams = {
|
||||
timeField: 'time',
|
||||
metaField: 'tag',
|
||||
granularity: 'seconds',
|
||||
};
|
||||
|
||||
db.createCollection('test', {timeseries: timeseriesParams});
|
||||
db.test.insert(documentList);
|
||||
|
||||
// Simply test that the query can be fully executed and does not trigger a tripwire assertion.
|
||||
let res = db.test.aggregate(pipeline).toArray();
|
||||
assert(res.length >= 30);
|
||||
})();
|
||||
|
|
@ -0,0 +1,205 @@
|
|||
// Each entry in the returned array contains a command whose noop write concern behavior needs to be
|
||||
// tested. Entries have the following structure:
|
||||
// {
|
||||
// req: <object>, // Command request object that will result in a noop
|
||||
// // write after the setup function is called.
|
||||
//
|
||||
// setupFunc: <function()>, // Function to run to ensure that the request is a
|
||||
// // noop.
|
||||
//
|
||||
// confirmFunc: <function(res)>, // Function to run after the command is run to ensure
|
||||
// // that it executed properly. Accepts the result of
|
||||
// // the noop request to validate it.
|
||||
// }
|
||||
function getNoopWriteCommands(coll) {
|
||||
const db = coll.getDB();
|
||||
const collName = coll.getName();
|
||||
const commands = [];
|
||||
|
||||
// 'applyOps' where the update has already been done.
|
||||
commands.push({
|
||||
req: {applyOps: [{op: "u", ns: coll.getFullName(), o: {_id: 1}, o2: {_id: 1}}]},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({_id: 1}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(res);
|
||||
assert.eq(res.applied, 1);
|
||||
assert.eq(res.results[0], true);
|
||||
assert.eq(coll.find().itcount(), 1);
|
||||
assert.eq(coll.count({_id: 1}), 1);
|
||||
}
|
||||
});
|
||||
|
||||
// 'update' where the document to update does not exist.
|
||||
commands.push({
|
||||
req: {update: collName, updates: [{q: {a: 1}, u: {b: 2}}]},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({a: 1}));
|
||||
assert.commandWorked(coll.update({a: 1}, {b: 2}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(res);
|
||||
assert.eq(res.n, 0);
|
||||
assert.eq(res.nModified, 0);
|
||||
assert.eq(coll.find().itcount(), 1);
|
||||
assert.eq(coll.count({b: 2}), 1);
|
||||
}
|
||||
});
|
||||
|
||||
// 'update' where the update has already been done.
|
||||
commands.push({
|
||||
req: {update: collName, updates: [{q: {a: 1}, u: {$set: {b: 2}}}]},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({a: 1}));
|
||||
assert.commandWorked(coll.update({a: 1}, {$set: {b: 2}}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(res);
|
||||
assert.eq(res.n, 1);
|
||||
assert.eq(res.nModified, 0);
|
||||
assert.eq(coll.find().itcount(), 1);
|
||||
assert.eq(coll.count({a: 1, b: 2}), 1);
|
||||
}
|
||||
});
|
||||
|
||||
// 'delete' where the document to delete does not exist.
|
||||
commands.push({
|
||||
req: {delete: collName, deletes: [{q: {a: 1}, limit: 1}]},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({a: 1}));
|
||||
assert.commandWorked(coll.remove({a: 1}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(res);
|
||||
assert.eq(res.n, 0);
|
||||
assert.eq(coll.count({a: 1}), 0);
|
||||
}
|
||||
});
|
||||
|
||||
// 'createIndexes' where the index has already been created.
|
||||
// All voting data bearing nodes are not up for this test. So 'createIndexes' command can't
|
||||
// succeed with the default index commitQuorum value "votingMembers". So, running createIndexes
|
||||
// cmd using commit quorum "majority".
|
||||
commands.push({
|
||||
req: {
|
||||
createIndexes: collName,
|
||||
indexes: [{key: {a: 1}, name: "a_1"}],
|
||||
commitQuorum: "majority"
|
||||
},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({a: 1}));
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({
|
||||
createIndexes: collName,
|
||||
indexes: [{key: {a: 1}, name: "a_1"}],
|
||||
commitQuorum: "majority"
|
||||
}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(res);
|
||||
let details = res;
|
||||
if ("raw" in details) {
|
||||
const raw = details.raw;
|
||||
details = raw[Object.keys(raw)[0]];
|
||||
}
|
||||
assert.eq(details.numIndexesBefore, details.numIndexesAfter);
|
||||
assert.eq(details.note, 'all indexes already exist');
|
||||
}
|
||||
});
|
||||
|
||||
// 'findAndModify' where the document to update does not exist.
|
||||
commands.push({
|
||||
req: {findAndModify: collName, query: {a: 1}, update: {b: 2}},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({a: 1}));
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(
|
||||
db.runCommand({findAndModify: collName, query: {a: 1}, update: {b: 2}}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(res);
|
||||
assert.eq(res.lastErrorObject.updatedExisting, false);
|
||||
assert.eq(coll.find().itcount(), 1);
|
||||
assert.eq(coll.count({b: 2}), 1);
|
||||
}
|
||||
});
|
||||
|
||||
// 'findAndModify' where the update has already been done.
|
||||
commands.push({
|
||||
req: {findAndModify: collName, query: {a: 1}, update: {$set: {b: 2}}},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({a: 1}));
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(
|
||||
db.runCommand({findAndModify: collName, query: {a: 1}, update: {$set: {b: 2}}}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(res);
|
||||
assert.eq(res.lastErrorObject.updatedExisting, true);
|
||||
assert.eq(coll.find().itcount(), 1);
|
||||
assert.eq(coll.count({a: 1, b: 2}), 1);
|
||||
}
|
||||
});
|
||||
|
||||
// 'findAndModify' where the document to delete does not exist.
|
||||
commands.push({
|
||||
req: {findAndModify: collName, query: {a: 1}, remove: true},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({a: 1}));
|
||||
assert.commandWorked(coll.remove({a: 1}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(res);
|
||||
assert.eq(res.lastErrorObject.n, 0);
|
||||
}
|
||||
});
|
||||
|
||||
// 'dropDatabase' where the database has already been dropped.
|
||||
commands.push({
|
||||
req: {dropDatabase: 1},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({a: 1}));
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({dropDatabase: 1}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(res);
|
||||
}
|
||||
});
|
||||
|
||||
// 'drop' where the collection has already been dropped.
|
||||
commands.push({
|
||||
req: {drop: collName},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({a: 1}));
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({drop: collName}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandFailedWithCode(res, ErrorCodes.NamespaceNotFound);
|
||||
}
|
||||
});
|
||||
|
||||
// 'create' where the collection has already been created.
|
||||
commands.push({
|
||||
req: {create: collName},
|
||||
setupFunc: function() {
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({create: collName}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandFailedWithCode(res, ErrorCodes.NamespaceExists);
|
||||
}
|
||||
});
|
||||
|
||||
// 'insert' where the document with the same _id has already been inserted.
|
||||
commands.push({
|
||||
req: {insert: collName, documents: [{_id: 1}]},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({_id: 1}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandWorkedIgnoringWriteErrorsAndWriteConcernErrors(res);
|
||||
assert.eq(res.n, 0);
|
||||
assert.eq(res.writeErrors[0].code, ErrorCodes.DuplicateKey);
|
||||
assert.eq(coll.count({_id: 1}), 1);
|
||||
}
|
||||
});
|
||||
|
||||
return commands;
|
||||
}
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
/**
|
||||
* Test that queries eligible for OR-pushdown optimization do not crash the server when the
|
||||
* 'disableMatchExpressionOptimization' failpoint is enabled.
|
||||
*
|
||||
* Originally designed to reproduce SERVER-70597.
|
||||
*/
|
||||
(function() {
|
||||
"use strict";
|
||||
|
||||
const conn = MongoRunner.runMongod();
|
||||
const db = conn.getDB("test");
|
||||
|
||||
assert.commandWorked(
|
||||
db.adminCommand({configureFailPoint: "disableMatchExpressionOptimization", mode: "alwaysOn"}));
|
||||
|
||||
const coll = db.getCollection(jsTestName());
|
||||
coll.drop();
|
||||
assert.commandWorked(coll.createIndex({a: 1, b: 1}));
|
||||
|
||||
let docs = [];
|
||||
for (let a = 1; a <= 3; ++a) {
|
||||
for (let b = 1; b <= 3; ++b) {
|
||||
docs.push({a, b});
|
||||
}
|
||||
}
|
||||
assert.commandWorked(coll.insert(docs));
|
||||
|
||||
// This query has a nested $and, and a one-argument contained $or. Normally we canonicalize this
|
||||
// predicate by flattening the $and and unwrapping the $or. The OR-pushdown optimization assumes the
|
||||
// predicate has been canonicalized, but this assumption is broken by the failpoint.
|
||||
const results = coll.aggregate([
|
||||
{$match: {$and: [{$and: [{a: 2}]}, {$or: [{b: 3}]}]}},
|
||||
{$unset: "_id"},
|
||||
])
|
||||
.toArray();
|
||||
assert.eq(results, [{a: 2, b: 3}]);
|
||||
|
||||
MongoRunner.stopMongod(conn);
|
||||
})();
|
||||
|
|
@ -22,14 +22,6 @@ function getNumCursorsLessThan30Seconds() {
|
|||
return db.serverStatus().metrics.cursor.lifespan.lessThan30Seconds;
|
||||
}
|
||||
|
||||
function getNumCursorsLessThan1Minute() {
|
||||
return db.serverStatus().metrics.cursor.lifespan.lessThan1Minute;
|
||||
}
|
||||
|
||||
function getNumCursorsLessThan10Minutes() {
|
||||
return db.serverStatus().metrics.cursor.lifespan.lessThan10Minutes;
|
||||
}
|
||||
|
||||
for (let i = 0; i < 40; i++) {
|
||||
coll.insert({a: i, b: "field b"});
|
||||
}
|
||||
|
|
@ -38,8 +30,6 @@ const initialNumCursorsLt1s = getNumCursorsLessThan1Second();
|
|||
const initialNumCursorsLt5s = getNumCursorsLessThan5Seconds();
|
||||
const initialNumCursorsLt15s = getNumCursorsLessThan15Seconds();
|
||||
const initialNumCursorsLt30s = getNumCursorsLessThan30Seconds();
|
||||
const initialNumCursorsLt1m = getNumCursorsLessThan1Minute();
|
||||
const initialNumCursorsLt10m = getNumCursorsLessThan10Minutes();
|
||||
|
||||
// Since we aren't guaranteed perfect timings, the checks in this test have been relaxed to window
|
||||
// sizes of 30s. For example, a cursor that is expected to die in under 5s may actually take longer
|
||||
|
|
@ -65,21 +55,4 @@ for (let i = 0; i < 3; i++) {
|
|||
}
|
||||
|
||||
assert.eq(cursorsDeadSinceStartLt30Seconds(), 4);
|
||||
|
||||
const cursorLt1Minute = coll.find().batchSize(2);
|
||||
const cursorLt10Minutes = coll.aggregate([], {cursor: {batchSize: 2}});
|
||||
cursorLt1Minute.next();
|
||||
cursorLt10Minutes.next();
|
||||
|
||||
sleep(31000); // Sleep for 31 s.
|
||||
while (cursorLt1Minute.hasNext()) {
|
||||
cursorLt1Minute.next();
|
||||
}
|
||||
assert.eq(getNumCursorsLessThan1Minute() - initialNumCursorsLt1m, 1);
|
||||
|
||||
sleep(30000); // Sleep another 30s, so the total should be greater than 1m and less than 10m.
|
||||
while (cursorLt10Minutes.hasNext()) {
|
||||
cursorLt10Minutes.next();
|
||||
}
|
||||
assert.eq(getNumCursorsLessThan10Minutes() - initialNumCursorsLt10m, 1);
|
||||
}());
|
||||
|
|
@ -0,0 +1,70 @@
|
|||
// Makes sure an old heartbeat that is being processed when primary catchup starts does not cause
|
||||
// primary catchup to think we're already caught up.
|
||||
|
||||
(function() {
|
||||
|
||||
load('jstests/libs/fail_point_util.js');
|
||||
load('jstests/replsets/rslib.js');
|
||||
|
||||
var name = TestData.testName;
|
||||
var rst = new ReplSetTest({
|
||||
name: name,
|
||||
nodes: 3,
|
||||
// We're not testing catchup takeover in this test, and in the case where primary catchup fails,
|
||||
// catchup takeover may cause an additional election and muddle the results. Setting
|
||||
// catchUpTakeoverDelayMillis to -1 disables catchup takeover.
|
||||
settings: {chainingAllowed: true, catchUpTakeoverDelayMillis: -1},
|
||||
nodeOptions: {
|
||||
"setParameter": {
|
||||
"logComponentVerbosity": tojsononeline({"replication": {"verbosity": 2}}),
|
||||
},
|
||||
},
|
||||
useBridge: true,
|
||||
waitForKeys: true
|
||||
});
|
||||
|
||||
rst.startSet();
|
||||
rst.initiate();
|
||||
rst.awaitSecondaryNodes();
|
||||
|
||||
var primary = rst.getPrimary();
|
||||
var primaryColl = primary.getDB("test").coll;
|
||||
|
||||
// The default WC is majority and this test can't test catchup properly if it used majority writes.
|
||||
assert.commandWorked(primary.adminCommand(
|
||||
{setDefaultRWConcern: 1, defaultWriteConcern: {w: 1}, writeConcern: {w: "majority"}}));
|
||||
|
||||
assert(primary.host == rst.nodes[0].host);
|
||||
// Make us chain node 1 (the node which will become the new primary) from node 2. Don't allow
|
||||
// node 1 to switch back.
|
||||
const forceSyncSource = configureFailPoint(
|
||||
rst.nodes[1], "forceSyncSourceCandidate", {"hostAndPort": rst.nodes[2].host});
|
||||
syncFrom(rst.nodes[2], rst.nodes[0], rst);
|
||||
syncFrom(rst.nodes[1], rst.nodes[2], rst);
|
||||
const RBIDBeforeStepUp = assert.commandWorked(primary.adminCommand({replSetGetRBID: 1}));
|
||||
|
||||
// Disconnect the primary from the node syncing from it.
|
||||
primary.disconnect(rst.nodes[2]);
|
||||
// Get a heartbeat from the original primary "stuck" in the new primary.
|
||||
const newPrimary = rst.nodes[1];
|
||||
let hbfp =
|
||||
configureFailPoint(newPrimary, "pauseInHandleHeartbeatResponse", {"target": primary.host});
|
||||
hbfp.wait();
|
||||
// Put the original primary ahead of the secondaries.
|
||||
assert.commandWorked(primaryColl.insert({_id: 1}));
|
||||
jsTestLog("Stepping up new primary");
|
||||
assert.commandWorked(newPrimary.adminCommand({replSetStepUp: 1}));
|
||||
// Allow the "stuck" heartbeat to proceed.
|
||||
hbfp.off();
|
||||
// The step-up command waits for the election to complete, but not catch-up. Reconnect the old
|
||||
// primary to the new primary's sync source to allow replication.
|
||||
primary.reconnect(rst.nodes[2]);
|
||||
rst.awaitReplication();
|
||||
// The new primary should still be primary.
|
||||
assert.eq(newPrimary.host, rst.getPrimary().host);
|
||||
// No rollbacks should have happened.
|
||||
const RBIDAfterStepUp = assert.commandWorked(primary.adminCommand({replSetGetRBID: 1}));
|
||||
assert.eq(RBIDBeforeStepUp.rbid, RBIDAfterStepUp.rbid);
|
||||
forceSyncSource.off();
|
||||
rst.stopSet();
|
||||
})();
|
||||
|
|
@ -8,7 +8,7 @@
|
|||
(function() {
|
||||
"use strict";
|
||||
load('jstests/libs/write_concern_util.js');
|
||||
load('jstests/noPassthrough/libs/index_build.js');
|
||||
load('jstests/libs/noop_write_commands.js');
|
||||
|
||||
var name = 'noop_writes_wait_for_write_concern';
|
||||
var replTest = new ReplSetTest({
|
||||
|
|
@ -28,183 +28,13 @@ var dbName = 'testDB';
|
|||
var db = primary.getDB(dbName);
|
||||
var collName = 'testColl';
|
||||
var coll = db[collName];
|
||||
const commands = getNoopWriteCommands(coll);
|
||||
|
||||
function dropTestCollection() {
|
||||
coll.drop();
|
||||
assert.eq(0, coll.find().itcount(), "test collection not empty");
|
||||
}
|
||||
|
||||
// Each entry in this array contains a command whose noop write concern behavior needs to be
|
||||
// tested. Entries have the following structure:
|
||||
// {
|
||||
// req: <object>, // Command request object that will result in a noop
|
||||
// // write after the setup function is called.
|
||||
//
|
||||
// setupFunc: <function()>, // Function to run to ensure that the request is a
|
||||
// // noop.
|
||||
//
|
||||
// confirmFunc: <function(res)>, // Function to run after the command is run to ensure
|
||||
// // that it executed properly. Accepts the result of
|
||||
// // the noop request to validate it.
|
||||
// }
|
||||
var commands = [];
|
||||
|
||||
commands.push({
|
||||
req: {applyOps: [{op: "u", ns: coll.getFullName(), o: {_id: 1}, o2: {_id: 1}}]},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({_id: 1}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(res);
|
||||
assert.eq(res.applied, 1);
|
||||
assert.eq(res.results[0], true);
|
||||
assert.eq(coll.find().itcount(), 1);
|
||||
assert.eq(coll.count({_id: 1}), 1);
|
||||
}
|
||||
});
|
||||
|
||||
// 'update' where the document to update does not exist.
|
||||
commands.push({
|
||||
req: {update: collName, updates: [{q: {a: 1}, u: {b: 2}}]},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({a: 1}));
|
||||
assert.commandWorked(coll.update({a: 1}, {b: 2}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(res);
|
||||
assert.eq(res.n, 0);
|
||||
assert.eq(res.nModified, 0);
|
||||
assert.eq(coll.find().itcount(), 1);
|
||||
assert.eq(coll.count({b: 2}), 1);
|
||||
}
|
||||
});
|
||||
|
||||
// 'update' where the update has already been done.
|
||||
commands.push({
|
||||
req: {update: collName, updates: [{q: {a: 1}, u: {$set: {b: 2}}}]},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({a: 1}));
|
||||
assert.commandWorked(coll.update({a: 1}, {$set: {b: 2}}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(res);
|
||||
assert.eq(res.n, 1);
|
||||
assert.eq(res.nModified, 0);
|
||||
assert.eq(coll.find().itcount(), 1);
|
||||
assert.eq(coll.count({a: 1, b: 2}), 1);
|
||||
}
|
||||
});
|
||||
|
||||
commands.push({
|
||||
req: {delete: collName, deletes: [{q: {a: 1}, limit: 1}]},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({a: 1}));
|
||||
assert.commandWorked(coll.remove({a: 1}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(res);
|
||||
assert.eq(res.n, 0);
|
||||
assert.eq(coll.count({a: 1}), 0);
|
||||
}
|
||||
});
|
||||
|
||||
// All voting data bearing nodes are not up for this test. So 'createIndexes' command can't succeed
|
||||
// with the default index commitQuorum value "votingMembers". So, running createIndexes cmd using
|
||||
// commit quorum "majority".
|
||||
commands.push({
|
||||
req: {createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}], commitQuorum: "majority"},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({a: 1}));
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({
|
||||
createIndexes: collName,
|
||||
indexes: [{key: {a: 1}, name: "a_1"}],
|
||||
commitQuorum: "majority"
|
||||
}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(res);
|
||||
assert.eq(res.numIndexesBefore, res.numIndexesAfter);
|
||||
assert.eq(res.note, 'all indexes already exist');
|
||||
}
|
||||
});
|
||||
|
||||
// 'findAndModify' where the document to update does not exist.
|
||||
commands.push({
|
||||
req: {findAndModify: collName, query: {a: 1}, update: {b: 2}},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({a: 1}));
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(
|
||||
db.runCommand({findAndModify: collName, query: {a: 1}, update: {b: 2}}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(res);
|
||||
assert.eq(res.lastErrorObject.updatedExisting, false);
|
||||
assert.eq(coll.find().itcount(), 1);
|
||||
assert.eq(coll.count({b: 2}), 1);
|
||||
}
|
||||
});
|
||||
|
||||
// 'findAndModify' where the update has already been done.
|
||||
commands.push({
|
||||
req: {findAndModify: collName, query: {a: 1}, update: {$set: {b: 2}}},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({a: 1}));
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(
|
||||
db.runCommand({findAndModify: collName, query: {a: 1}, update: {$set: {b: 2}}}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(res);
|
||||
assert.eq(res.lastErrorObject.updatedExisting, true);
|
||||
assert.eq(coll.find().itcount(), 1);
|
||||
assert.eq(coll.count({a: 1, b: 2}), 1);
|
||||
}
|
||||
});
|
||||
|
||||
commands.push({
|
||||
req: {dropDatabase: 1},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({a: 1}));
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({dropDatabase: 1}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(res);
|
||||
}
|
||||
});
|
||||
|
||||
commands.push({
|
||||
req: {drop: collName},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({a: 1}));
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({drop: collName}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandFailedWithCode(res, ErrorCodes.NamespaceNotFound);
|
||||
}
|
||||
});
|
||||
|
||||
commands.push({
|
||||
req: {create: collName},
|
||||
setupFunc: function() {
|
||||
assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({create: collName}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandFailedWithCode(res, ErrorCodes.NamespaceExists);
|
||||
}
|
||||
});
|
||||
|
||||
commands.push({
|
||||
req: {insert: collName, documents: [{_id: 1}]},
|
||||
setupFunc: function() {
|
||||
assert.commandWorked(coll.insert({_id: 1}));
|
||||
},
|
||||
confirmFunc: function(res) {
|
||||
assert.commandWorkedIgnoringWriteErrorsAndWriteConcernErrors(res);
|
||||
assert.eq(res.n, 0);
|
||||
assert.eq(res.writeErrors[0].code, ErrorCodes.DuplicateKey);
|
||||
assert.eq(coll.count({_id: 1}), 1);
|
||||
}
|
||||
});
|
||||
|
||||
function testCommandWithWriteConcern(cmd) {
|
||||
// Provide a small wtimeout that we expect to time out.
|
||||
cmd.req.writeConcern = {w: 3, wtimeout: 1000};
|
||||
|
|
|
|||
|
|
@ -0,0 +1,88 @@
|
|||
// Sharding version of jstests/replsets/noop_writes_wait_for_write_concern.js.
|
||||
// @tags: [
|
||||
// multiversion_incompatible
|
||||
// ]
|
||||
|
||||
load('jstests/libs/write_concern_util.js');
|
||||
load('jstests/libs/noop_write_commands.js');
|
||||
|
||||
// Create a shard with 3 nodes and stop one of the secondaries. This will allow majority write
|
||||
// concern to be met, but w: 3 will always time out.
|
||||
var st = new ShardingTest({mongos: 1, shards: 1, rs: {nodes: 3}});
|
||||
const secondary = st.rs0.getSecondary();
|
||||
st.rs0.stop(secondary);
|
||||
|
||||
const mongos = st.s;
|
||||
var dbName = 'testDB';
|
||||
var db = mongos.getDB(dbName);
|
||||
var collName = 'testColl';
|
||||
var coll = db[collName];
|
||||
const commands = getNoopWriteCommands(coll);
|
||||
|
||||
function dropTestCollection() {
|
||||
coll.drop();
|
||||
assert.eq(0, coll.find().itcount(), "test collection not empty");
|
||||
}
|
||||
|
||||
function testCommandWithWriteConcern(cmd) {
|
||||
if ("applyOps" in cmd.req) {
|
||||
// applyOps is not available through mongos.
|
||||
return;
|
||||
}
|
||||
|
||||
if ("findAndModify" in cmd.req) {
|
||||
// TODO SERVER-80103: findAndModify does not return write concern errors in the presence of
|
||||
// other errors.
|
||||
return;
|
||||
}
|
||||
|
||||
if ("dropDatabase" in cmd.req || "drop" in cmd.req) {
|
||||
// TODO SERVER-80103: dropDatabase and dropCollection do not respect user supplied write
|
||||
// concern and instead always use majority.
|
||||
return;
|
||||
}
|
||||
|
||||
if ("create" in cmd.req) {
|
||||
// TODO SERVER-80103: create returns WriteConcernFailed as an ordinary error code instead of
|
||||
// using the writeConcernError field.
|
||||
return;
|
||||
}
|
||||
|
||||
// Provide a small wtimeout that we expect to time out.
|
||||
cmd.req.writeConcern = {w: 3, wtimeout: 1000};
|
||||
jsTest.log("Testing command: " + tojson(cmd.req));
|
||||
|
||||
dropTestCollection();
|
||||
|
||||
cmd.setupFunc();
|
||||
|
||||
// We run the command on a different connection. If the the command were run on the
|
||||
// same connection, then the client last op for the noop write would be set by the setup
|
||||
// operation. By using a fresh connection the client last op begins as null.
|
||||
// This test explicitly tests that write concern for noop writes works when the
|
||||
// client last op has not already been set by a duplicate operation.
|
||||
const shell2 = new Mongo(mongos.host);
|
||||
|
||||
// We check the error code of 'res' in the 'confirmFunc'.
|
||||
const res = "bulkWrite" in cmd.req ? shell2.adminCommand(cmd.req)
|
||||
: shell2.getDB(dbName).runCommand(cmd.req);
|
||||
|
||||
try {
|
||||
// Tests that the command receives a write concern error. If we don't wait for write
|
||||
// concern on noop writes then we won't get a write concern error.
|
||||
assertWriteConcernError(res);
|
||||
cmd.confirmFunc(res);
|
||||
} catch (e) {
|
||||
// Make sure that we print out the response.
|
||||
printjson(res);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
commands.forEach(function(cmd) {
|
||||
testCommandWithWriteConcern(cmd);
|
||||
});
|
||||
|
||||
// Restart the node so that consistency checks performed by st.stop() can succeed.
|
||||
st.rs0.restart(secondary);
|
||||
st.stop();
|
||||
|
|
@ -8,8 +8,15 @@ var sessionsDb = "config";
|
|||
var refresh = {refreshLogicalSessionCacheNow: 1};
|
||||
var startSession = {startSession: 1};
|
||||
|
||||
// Create a cluster with 1 shard.
|
||||
var cluster = new ShardingTest({shards: 2});
|
||||
var cluster = new ShardingTest({
|
||||
mongos: [{setParameter: {sessionWriteConcernTimeoutSystemMillis: 0, sessionMaxBatchSize: 500}}],
|
||||
shards: 2,
|
||||
rs: {setParameter: {sessionWriteConcernTimeoutSystemMillis: 0, sessionMaxBatchSize: 500}},
|
||||
other: {
|
||||
configOptions:
|
||||
{setParameter: {sessionWriteConcernTimeoutSystemMillis: 0, sessionMaxBatchSize: 500}}
|
||||
}
|
||||
});
|
||||
|
||||
// Test that we can refresh without any sessions, as a sanity check.
|
||||
{
|
||||
|
|
|
|||
|
|
@ -0,0 +1,69 @@
|
|||
/**
|
||||
* Checks that a replica set started with --shardsvr can process aggregations on views.
|
||||
* @tags: [
|
||||
* requires_persistence,
|
||||
* multiversion_incompatible,
|
||||
* ]
|
||||
*/
|
||||
(function() {
|
||||
'use strict';
|
||||
|
||||
load("jstests/sharding/libs/sharding_state_test.js");
|
||||
|
||||
const kRSName = 'rs';
|
||||
const kDbName = 'db';
|
||||
const kCollName = 'coll';
|
||||
const kViewName = 'collView';
|
||||
const kNDocs = 25;
|
||||
const nss = kDbName + '.' + kCollName;
|
||||
|
||||
const st = new ShardingTest({shards: 0, mongos: 2});
|
||||
|
||||
let rst = new ReplSetTest({name: kRSName, nodes: 1});
|
||||
rst.startSet();
|
||||
rst.initiate();
|
||||
|
||||
let primary = rst.getPrimary();
|
||||
let db = primary.getDB(kDbName);
|
||||
|
||||
assert.commandWorked(db.createCollection(kCollName));
|
||||
assert.commandWorked(db.getCollection(kCollName).createIndex({x: 1}));
|
||||
let coll = db.getCollection(kCollName);
|
||||
for (let i = 0; i < kNDocs; ++i) {
|
||||
assert.commandWorked(coll.insert({x: i}));
|
||||
}
|
||||
assert.commandWorked(db.createView(kViewName, kCollName, [{$match: {}}]));
|
||||
|
||||
primary.getDB('admin').shutdownServer();
|
||||
rst.restart(0, {shardsvr: ''});
|
||||
rst.awaitReplication();
|
||||
|
||||
db = rst.getPrimary().getDB(kDbName);
|
||||
let res = assert.commandWorked(db.runCommand({
|
||||
aggregate: kViewName,
|
||||
pipeline: [{$match: {}}, {$group: {_id: null, count: {$sum: 1}}}],
|
||||
cursor: {}
|
||||
}));
|
||||
assert.eq(kNDocs, res.cursor.firstBatch[0].count);
|
||||
|
||||
assert.commandWorked(st.s0.adminCommand({addShard: rst.getURL()}));
|
||||
res = assert.commandWorked(st.s0.getDB(kDbName).runCommand({
|
||||
aggregate: kViewName,
|
||||
pipeline: [{$match: {}}, {$group: {_id: null, count: {$sum: 1}}}],
|
||||
cursor: {}
|
||||
}));
|
||||
assert.eq(kNDocs, res.cursor.firstBatch[0].count);
|
||||
assert.eq(kNDocs, st.s1.getCollection(nss).countDocuments({}));
|
||||
|
||||
// Test initial sync.
|
||||
const staleMongos = st.s1;
|
||||
const mongos = st.s0;
|
||||
assert.commandWorked(mongos.adminCommand({enableSharding: kDbName}));
|
||||
assert.commandWorked(mongos.adminCommand({shardCollection: nss, key: {x: 1}}));
|
||||
ShardingStateTest.addReplSetNode({replSet: rst, serverTypeFlag: 'shardsvr'});
|
||||
staleMongos.setReadPref('secondary');
|
||||
assert.eq(kNDocs, staleMongos.getDB(kDbName).getCollection(kCollName).countDocuments({}));
|
||||
|
||||
st.stop();
|
||||
rst.stopSet();
|
||||
}());
|
||||
|
|
@ -27,7 +27,13 @@ function assertEqualObj(lhs, rhs, keysToIgnore) {
|
|||
}
|
||||
}
|
||||
|
||||
const st = new ShardingTest({shard: 2});
|
||||
const st = new ShardingTest({
|
||||
shard: 2,
|
||||
configOptions:
|
||||
{setParameter:
|
||||
{'reshardingCriticalSectionTimeoutMillis': 24 * 60 * 60 * 1000 /* 1 day */}}
|
||||
});
|
||||
|
||||
const configRSPrimary = st.configRS.getPrimary();
|
||||
|
||||
const dbName = "testDb";
|
||||
|
|
|
|||
|
|
@ -125,6 +125,10 @@ for (let i = 0; i < 3; i++) {
|
|||
lastUseValues[j] = sessionsCollectionArray[j].lastUse;
|
||||
}
|
||||
}
|
||||
|
||||
// Date_t has the granularity of milliseconds, so we have to make sure we don't run this loop
|
||||
// faster than that.
|
||||
sleep(10);
|
||||
}
|
||||
|
||||
// 3. Verify that letting sessions expire (simulated by manual deletion) will kill their
|
||||
|
|
|
|||
|
|
@ -32,6 +32,9 @@ import re
|
|||
|
||||
from typing import Callable, List, Dict
|
||||
|
||||
# Note: The auto-retry settings are prefixed w/ "OOM", but since it's an unconditional retry,
|
||||
# it's not really OOM-specific. We're keeping the OOM prefix to make the code change simpler.
|
||||
# (This custom retry logic will go away once the build is fully Bazelified).
|
||||
|
||||
def command_spawn_func(sh: str, escape: Callable[[str], str], cmd: str, args: List, env: Dict,
|
||||
target: List, source: List):
|
||||
|
|
@ -39,11 +42,6 @@ def command_spawn_func(sh: str, escape: Callable[[str], str], cmd: str, args: Li
|
|||
success = False
|
||||
|
||||
build_env = target[0].get_build_env()
|
||||
oom_messages = [
|
||||
re.compile(msg, re.MULTILINE | re.DOTALL)
|
||||
for msg in build_env.get('OOM_RETRY_MESSAGES', [])
|
||||
]
|
||||
oom_returncodes = [int(returncode) for returncode in build_env.get('OOM_RETRY_RETURNCODES', [])]
|
||||
max_retries = build_env.get('OOM_RETRY_ATTEMPTS', 10)
|
||||
oom_max_retry_delay = build_env.get('OOM_RETRY_MAX_DELAY_SECONDS', 120)
|
||||
|
||||
|
|
@ -63,12 +61,10 @@ def command_spawn_func(sh: str, escape: Callable[[str], str], cmd: str, args: Li
|
|||
except subprocess.CalledProcessError as exc:
|
||||
print(f"{os.path.basename(__file__)} captured error:")
|
||||
print(exc.stdout)
|
||||
if any([re.findall(oom_message, exc.stdout) for oom_message in oom_messages]) or any(
|
||||
[oom_returncode == exc.returncode for oom_returncode in oom_returncodes]):
|
||||
retries += 1
|
||||
retry_delay = int((time.time() - start_time) +
|
||||
oom_max_retry_delay * random.random())
|
||||
print(f"Ran out of memory while trying to build {target[0]}", )
|
||||
print(f"Failed while trying to build {target[0]}", )
|
||||
if retries <= max_retries:
|
||||
print(f"trying again in {retry_delay} seconds with retry attempt {retries}")
|
||||
time.sleep(retry_delay)
|
||||
|
|
|
|||
|
|
@ -47,7 +47,8 @@ void saslSetError(sasl_conn_t* conn, const std::string& msg) {
|
|||
}
|
||||
|
||||
SaslClientSession* createCyrusSaslClientSession(const std::string& mech) {
|
||||
if ((mech == "SCRAM-SHA-1") || (mech == "SCRAM-SHA-256") || mech == "MONGODB-AWS") {
|
||||
if ((mech == "SCRAM-SHA-1") || (mech == "SCRAM-SHA-256") || (mech == "PLAIN") ||
|
||||
mech == "MONGODB-AWS") {
|
||||
return new NativeSaslClientSession();
|
||||
}
|
||||
return new CyrusSaslClientSession();
|
||||
|
|
|
|||
|
|
@ -1442,6 +1442,7 @@ env.Library(
|
|||
target='sessions_collection',
|
||||
source=[
|
||||
'sessions_collection.cpp',
|
||||
'sessions_server_parameters.idl'
|
||||
],
|
||||
LIBDEPS=[
|
||||
'$BUILD_DIR/mongo/base',
|
||||
|
|
|
|||
|
|
@ -275,67 +275,6 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
TEST_F(CatalogReadCopyUpdateTest, ConcurrentCatalogWriteBatches) {
|
||||
// Start threads and perform write at the same time, record catalog instance observed
|
||||
constexpr int32_t NumThreads = 4;
|
||||
|
||||
unittest::Barrier barrier(NumThreads);
|
||||
std::array<const CollectionCatalog*, NumThreads> catalogInstancesObserved;
|
||||
std::array<NamespaceString, NumThreads> namespacesInserted;
|
||||
AtomicWord<int32_t> threadIndex{0};
|
||||
auto job = [&]() {
|
||||
// Determine a unique index for this worker, we use this to be able to write our results
|
||||
// without any synchronization.
|
||||
auto index = threadIndex.fetchAndAdd(1);
|
||||
|
||||
// Prepare a Collection instance that the writer will insert.
|
||||
NamespaceString nssToInsert("test", fmt::format("coll{}", index));
|
||||
auto collectionToInsert = std::make_shared<CollectionMock>(nssToInsert);
|
||||
namespacesInserted[index] = std::move(nssToInsert);
|
||||
|
||||
// Wait for all worker threads to reach this point before proceeding.
|
||||
barrier.countDownAndWait();
|
||||
|
||||
// The first thread that enters write() will begin copying the catalog instance, other
|
||||
// threads that enter while this copy is being made will be enqueued. When the thread
|
||||
// copying the catalog instance finishes the copy it will execute all writes using the same
|
||||
// writable catalog instance.
|
||||
//
|
||||
// To minimize the risk of this test being flaky we need to make the catalog copy slow
|
||||
// enough so the other threads properly enter the queue state. We achieve this by having a
|
||||
// large numbers of collections in the catalog.
|
||||
CollectionCatalog::write(getServiceContext(), [&](CollectionCatalog& writableCatalog) {
|
||||
catalogInstancesObserved[index] = &writableCatalog;
|
||||
|
||||
// Perform a write, we will later verify that all writes are observable even when
|
||||
// workers are batched together.
|
||||
writableCatalog.registerCollection(operationContext(), std::move(collectionToInsert));
|
||||
});
|
||||
};
|
||||
|
||||
std::array<stdx::thread, NumThreads> threads;
|
||||
for (auto&& thread : threads) {
|
||||
thread = stdx::thread(job);
|
||||
}
|
||||
for (auto&& thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
|
||||
// Verify that some batching was achieved where at least two threads observed the same catalog
|
||||
// instance. We do this by sorting the array, removing all duplicates and last verify that we
|
||||
// have less elements remaining than number of threads.
|
||||
std::sort(catalogInstancesObserved.begin(), catalogInstancesObserved.end());
|
||||
auto it = std::unique(catalogInstancesObserved.begin(), catalogInstancesObserved.end());
|
||||
ASSERT_LT(std::distance(catalogInstancesObserved.begin(), it), NumThreads);
|
||||
|
||||
// Check that all Collections we inserted are in the final Catalog instance, no overwrites
|
||||
// occured.
|
||||
auto catalog = CollectionCatalog::get(getServiceContext());
|
||||
for (auto&& nss : namespacesInserted) {
|
||||
ASSERT(catalog->lookupCollectionByNamespace(operationContext(), nss));
|
||||
}
|
||||
}
|
||||
|
||||
TEST_F(CatalogReadCopyUpdateTest, ConcurrentCatalogWriteBatchingMayThrow) {
|
||||
// Start threads and perform write that will throw at the same time
|
||||
constexpr int32_t NumThreads = 4;
|
||||
|
|
|
|||
|
|
@ -138,22 +138,6 @@ T DbMessage::readAndAdvance() {
|
|||
return t;
|
||||
}
|
||||
|
||||
namespace {
|
||||
template <typename Func>
|
||||
Message makeMessage(NetworkOp op, Func&& bodyBuilder) {
|
||||
BufBuilder b;
|
||||
b.skip(sizeof(MSGHEADER::Layout));
|
||||
|
||||
bodyBuilder(b);
|
||||
|
||||
const int size = b.len();
|
||||
auto out = Message(b.release());
|
||||
out.header().setOperation(op);
|
||||
out.header().setLen(size);
|
||||
return out;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
Message makeInsertMessage(StringData ns, const BSONObj* objs, size_t count, int flags) {
|
||||
return makeMessage(dbInsert, [&](BufBuilder& b) {
|
||||
int reservedFlags = 0;
|
||||
|
|
|
|||
|
|
@ -512,4 +512,18 @@ DbResponse replyToQuery(int queryResultFlags,
|
|||
inline DbResponse replyToQuery(const BSONObj& obj, int queryResultFlags = 0) {
|
||||
return replyToQuery(queryResultFlags, obj.objdata(), obj.objsize(), /*nReturned*/ 1);
|
||||
}
|
||||
|
||||
template <typename Func>
|
||||
Message makeMessage(NetworkOp op, Func&& bodyBuilder) {
|
||||
BufBuilder b;
|
||||
b.skip(sizeof(MSGHEADER::Layout));
|
||||
|
||||
bodyBuilder(b);
|
||||
|
||||
const int size = b.len();
|
||||
auto out = Message(b.release());
|
||||
out.header().setOperation(op);
|
||||
out.header().setLen(size);
|
||||
return out;
|
||||
}
|
||||
} // namespace mongo
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@
|
|||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <sys/resource.h>
|
||||
#include <vector>
|
||||
|
||||
#include "mongo/base/status.h"
|
||||
|
|
@ -40,6 +41,8 @@
|
|||
#include "mongo/bson/bsonobjbuilder.h"
|
||||
#include "mongo/db/ftdc/collector.h"
|
||||
#include "mongo/db/ftdc/controller.h"
|
||||
#include "mongo/util/errno_util.h"
|
||||
#include "mongo/util/functional.h"
|
||||
#include "mongo/util/processinfo.h"
|
||||
#include "mongo/util/procparser.h"
|
||||
|
||||
|
|
@ -165,10 +168,66 @@ private:
|
|||
std::vector<StringData> _disksStringData;
|
||||
};
|
||||
|
||||
class SimpleFunctionCollector final : public FTDCCollectorInterface {
|
||||
public:
|
||||
SimpleFunctionCollector(StringData name,
|
||||
unique_function<void(OperationContext*, BSONObjBuilder&)> collectFn)
|
||||
: _name(name.toString()), _collectFn(std::move(collectFn)) {}
|
||||
|
||||
void collect(OperationContext* opCtx, BSONObjBuilder& builder) override {
|
||||
_collectFn(opCtx, builder);
|
||||
}
|
||||
|
||||
std::string name() const override {
|
||||
return _name;
|
||||
}
|
||||
|
||||
private:
|
||||
std::string _name;
|
||||
unique_function<void(OperationContext*, BSONObjBuilder&)> _collectFn;
|
||||
};
|
||||
|
||||
|
||||
void collectUlimit(int resource, StringData resourceName, BSONObjBuilder& builder) {
|
||||
|
||||
struct rlimit rlim;
|
||||
|
||||
BSONObjBuilder subObjBuilder(builder.subobjStart(resourceName));
|
||||
|
||||
if (!getrlimit(resource, &rlim)) {
|
||||
subObjBuilder.append("soft", static_cast<int64_t>(rlim.rlim_cur));
|
||||
subObjBuilder.append("hard", static_cast<int64_t>(rlim.rlim_max));
|
||||
} else {
|
||||
auto ec = lastSystemError();
|
||||
|
||||
subObjBuilder.append("error", errorMessage(ec));
|
||||
}
|
||||
}
|
||||
|
||||
void collectUlimits(OperationContext*, BSONObjBuilder& builder) {
|
||||
collectUlimit(RLIMIT_CPU, "cpuTime_secs"_sd, builder);
|
||||
collectUlimit(RLIMIT_FSIZE, "fileSize_blocks"_sd, builder);
|
||||
collectUlimit(RLIMIT_DATA, "dataSegSize_kb"_sd, builder);
|
||||
collectUlimit(RLIMIT_STACK, "stackSize_kb"_sd, builder);
|
||||
collectUlimit(RLIMIT_CORE, "coreFileSize_blocks"_sd, builder);
|
||||
collectUlimit(RLIMIT_RSS, "residentSize_kb"_sd, builder);
|
||||
collectUlimit(RLIMIT_NOFILE, "fileDescriptors"_sd, builder);
|
||||
collectUlimit(RLIMIT_AS, "addressSpace_kb"_sd, builder);
|
||||
collectUlimit(RLIMIT_NPROC, "processes"_sd, builder);
|
||||
collectUlimit(RLIMIT_MEMLOCK, "memLock_kb"_sd, builder);
|
||||
collectUlimit(RLIMIT_LOCKS, "fileLocks"_sd, builder);
|
||||
collectUlimit(RLIMIT_SIGPENDING, "pendingSignals"_sd, builder);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
|
||||
void installSystemMetricsCollector(FTDCController* controller) {
|
||||
controller->addPeriodicCollector(std::make_unique<LinuxSystemMetricsCollector>());
|
||||
|
||||
// Collect ULimits settings on rotation.
|
||||
controller->addOnRotateCollector(
|
||||
std::make_unique<SimpleFunctionCollector>("ulimits", collectUlimits));
|
||||
}
|
||||
|
||||
} // namespace mongo
|
||||
|
|
|
|||
|
|
@ -172,6 +172,7 @@ structs:
|
|||
any empty timestamps (Timestamp(0,0)) in 'documents' or 'u' will not
|
||||
be replaced by the current time and instead will be preserved as-is."
|
||||
type: optionalBool
|
||||
unstable: true
|
||||
|
||||
UpdateOpEntry:
|
||||
description: "Parser for the entries in the 'updates' array of an update command."
|
||||
|
|
@ -454,3 +455,4 @@ commands:
|
|||
empty timestamps (Timestamp(0, 0)) in the update will not be replaced
|
||||
by the current time and instead will be preserved as-is."
|
||||
type: optionalBool
|
||||
unstable: true
|
||||
|
|
|
|||
|
|
@ -51,11 +51,19 @@ public:
|
|||
virtual Value getValue() const = 0;
|
||||
virtual void reset() = 0;
|
||||
size_t getApproximateSize() {
|
||||
tassert(5414200, "_memUsageBytes not set for function", _memUsageBytes != 0);
|
||||
return _memUsageBytes;
|
||||
}
|
||||
|
||||
protected:
|
||||
// Decrease _memUsageBytes by size, but prevent it from underflowing.
|
||||
void decreaseMemUsageBytes(size_t size) {
|
||||
if (size <= _memUsageBytes) {
|
||||
_memUsageBytes -= size;
|
||||
} else {
|
||||
_memUsageBytes = 0;
|
||||
}
|
||||
}
|
||||
|
||||
ExpressionContext* _expCtx;
|
||||
size_t _memUsageBytes = 0;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ public:
|
|||
auto iter = _values.find(std::move(value));
|
||||
tassert(
|
||||
5423800, "Can't remove from an empty WindowFunctionAddToSet", iter != _values.end());
|
||||
_memUsageBytes -= iter->getApproximateSize();
|
||||
decreaseMemUsageBytes(iter->getApproximateSize());
|
||||
_values.erase(iter);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ void WindowFunctionIntegral::add(Value value) {
|
|||
// track the values in the window because no removal will be made. 'pop_front()' whenever a new
|
||||
// value is added to the queue so as to save memory.
|
||||
if (!_values.empty() && isNonremovable) {
|
||||
_memUsageBytes -= _values.front().getApproximateSize();
|
||||
decreaseMemUsageBytes(_values.front().getApproximateSize());
|
||||
_values.pop_front();
|
||||
}
|
||||
_memUsageBytes += value.getApproximateSize();
|
||||
|
|
@ -109,7 +109,7 @@ void WindowFunctionIntegral::remove(Value value) {
|
|||
if (arr[0].isNaN() || arr[1].isNaN())
|
||||
_nanCount--;
|
||||
|
||||
_memUsageBytes -= value.getApproximateSize();
|
||||
decreaseMemUsageBytes(value.getApproximateSize());
|
||||
_values.pop_front();
|
||||
|
||||
// Update "_integral" if there are at least two values before removing the current value.
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ public:
|
|||
// which is what we want, to satisfy "remove() undoes add() when called in FIFO order".
|
||||
auto iter = _values.find(std::move(value));
|
||||
tassert(5371400, "Can't remove from an empty WindowFunctionMinMax", iter != _values.end());
|
||||
_memUsageBytes -= iter->getApproximateSize();
|
||||
decreaseMemUsageBytes(iter->getApproximateSize());
|
||||
_values.erase(iter);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -63,7 +63,7 @@ public:
|
|||
"Attempted to remove an element other than the first element from WindowFunctionPush",
|
||||
_expCtx->getValueComparator().evaluate(valToRemove == value));
|
||||
_values.pop_front();
|
||||
_memUsageBytes -= value.getApproximateSize();
|
||||
decreaseMemUsageBytes(value.getApproximateSize());
|
||||
}
|
||||
|
||||
void reset() override {
|
||||
|
|
|
|||
|
|
@ -261,7 +261,8 @@ PlanEnumerator::PlanEnumerator(const PlanEnumeratorParams& params)
|
|||
_ixisect(params.intersect),
|
||||
_enumerateOrChildrenLockstep(params.enumerateOrChildrenLockstep),
|
||||
_orLimit(params.maxSolutionsPerOr),
|
||||
_intersectLimit(params.maxIntersectPerAnd) {}
|
||||
_intersectLimit(params.maxIntersectPerAnd),
|
||||
_disableOrPushdown(params.disableOrPushdown) {}
|
||||
|
||||
PlanEnumerator::~PlanEnumerator() {
|
||||
typedef stdx::unordered_map<MemoID, NodeAssignment*> MemoMap;
|
||||
|
|
@ -528,11 +529,15 @@ bool PlanEnumerator::prepMemo(MatchExpression* node, PrepMemoContext context) {
|
|||
// preds to 'indexedPreds'. Adding the mandatory preds directly to 'indexedPreds' would lead
|
||||
// to problems such as pulling a predicate beneath an OR into a set joined by an AND.
|
||||
getIndexedPreds(node, childContext, &indexedPreds);
|
||||
// Pass in the indexed predicates as outside predicates when prepping the subnodes.
|
||||
// Pass in the indexed predicates as outside predicates when prepping the subnodes. But if
|
||||
// match expression optimization is disabled, skip this part: we don't want to do
|
||||
// OR-pushdown because it relies on the expression being canonicalized.
|
||||
auto childContextCopy = childContext;
|
||||
if (MONGO_likely(!_disableOrPushdown)) {
|
||||
for (auto pred : indexedPreds) {
|
||||
childContextCopy.outsidePreds[pred] = OutsidePredRoute{};
|
||||
}
|
||||
}
|
||||
if (!prepSubNodes(node, childContextCopy, &subnodes, &mandatorySubnodes)) {
|
||||
return false;
|
||||
}
|
||||
|
|
@ -835,6 +840,13 @@ void PlanEnumerator::assignPredicate(
|
|||
MatchExpression* pred,
|
||||
size_t position,
|
||||
OneIndexAssignment* indexAssignment) {
|
||||
if (MONGO_unlikely(_disableOrPushdown)) {
|
||||
// If match expression optimization is disabled, we also disable OR-pushdown,
|
||||
// so we should never get 'outsidePreds' here.
|
||||
tassert(7059700,
|
||||
"Tried to do OR-pushdown despite disableMatchExpressionOptimization",
|
||||
outsidePreds.empty());
|
||||
}
|
||||
if (outsidePreds.find(pred) != outsidePreds.end()) {
|
||||
OrPushdownTag::Destination dest;
|
||||
dest.route = outsidePreds.at(pred).route;
|
||||
|
|
|
|||
|
|
@ -44,7 +44,8 @@ namespace mongo {
|
|||
struct PlanEnumeratorParams {
|
||||
PlanEnumeratorParams()
|
||||
: maxSolutionsPerOr(internalQueryEnumerationMaxOrSolutions.load()),
|
||||
maxIntersectPerAnd(internalQueryEnumerationMaxIntersectPerAnd.load()) {}
|
||||
maxIntersectPerAnd(internalQueryEnumerationMaxIntersectPerAnd.load()),
|
||||
disableOrPushdown(disableMatchExpressionOptimization.shouldFail()) {}
|
||||
|
||||
// Do we provide solutions that use more indices than the minimum required to provide
|
||||
// an indexed solution?
|
||||
|
|
@ -69,6 +70,11 @@ struct PlanEnumeratorParams {
|
|||
// all-pairs approach, we could wind up creating a lot of enumeration possibilities for
|
||||
// certain inputs.
|
||||
size_t maxIntersectPerAnd;
|
||||
|
||||
// Whether to disable OR-pushdown optimization. OR-pushdown assumes that the expression has been
|
||||
// simplified: for example, that single-child $or nodes are unwrapped. To avoid this, when
|
||||
// the 'disableMatchExpressionOptimization' failpoint is set, we also disable OR-pushdown.
|
||||
bool disableOrPushdown;
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
@ -594,6 +600,9 @@ private:
|
|||
|
||||
// How many things do we want from each AND?
|
||||
size_t _intersectLimit;
|
||||
|
||||
// Whether we should disable OR-pushdown optimization.
|
||||
const bool _disableOrPushdown;
|
||||
};
|
||||
|
||||
} // namespace mongo
|
||||
|
|
|
|||
|
|
@ -434,6 +434,39 @@ TEST_F(QueryPlannerTest, RootedOrOfAndDontCollapseDifferentBounds) {
|
|||
"bounds: {c: [[3,3,true,true]], d: [[4,4,true,true]]}}}]}}}}");
|
||||
}
|
||||
|
||||
TEST_F(QueryPlannerTest, DontCrashTryingToPushToSingleChildIndexedOr1) {
|
||||
FailPointEnableBlock failPoint("disableMatchExpressionOptimization");
|
||||
addIndex(BSON("indexed" << 1));
|
||||
runQuery(
|
||||
fromjson("{ $and : [\n"
|
||||
" { $and : [ { indexed : { $gt : 5 } },\n"
|
||||
" { unindexed : 42 } ] },\n"
|
||||
" { $or : [ { indexed: { $lt : 100 } } ] }\n"
|
||||
" ] }"));
|
||||
|
||||
assertNumSolutions(3U);
|
||||
}
|
||||
|
||||
TEST_F(QueryPlannerTest, DontCrashTryingToPushToSingleChildIndexedOr2) {
|
||||
// Test that queries with single-child $and, $or do not crash when match-expression optimization
|
||||
// is disabled. Normally these single-child nodes are eliminated, so when they are left in place
|
||||
// it can confuse OR-pushdown optimization.
|
||||
//
|
||||
// Originally designed to reproduce SERVER-70597, which would only happen when the
|
||||
// INDEX_INTERSECTION option is enabled.
|
||||
FailPointEnableBlock failPoint("disableMatchExpressionOptimization");
|
||||
addIndex(BSON("a" << 1 << "b" << 1));
|
||||
|
||||
params.options |= QueryPlannerParams::INDEX_INTERSECTION;
|
||||
runQuery(
|
||||
fromjson("{ $and : [\n"
|
||||
" { $and : [ { a : 2 } ] },\n"
|
||||
" { $or : [ { b : 3 } ] }\n"
|
||||
" ] }"));
|
||||
|
||||
assertNumSolutions(2U);
|
||||
}
|
||||
|
||||
// SERVER-13960: properly handle $or with a mix of exact and inexact predicates.
|
||||
TEST_F(QueryPlannerTest, OrInexactWithExact) {
|
||||
addIndex(BSON("name" << 1));
|
||||
|
|
|
|||
|
|
@ -76,6 +76,7 @@ MONGO_FAIL_POINT_DEFINE(blockHeartbeatStepdown);
|
|||
MONGO_FAIL_POINT_DEFINE(blockHeartbeatReconfigFinish);
|
||||
MONGO_FAIL_POINT_DEFINE(hangAfterTrackingNewHandleInHandleHeartbeatResponseForTest);
|
||||
MONGO_FAIL_POINT_DEFINE(waitForPostActionCompleteInHbReconfig);
|
||||
MONGO_FAIL_POINT_DEFINE(pauseInHandleHeartbeatResponse);
|
||||
|
||||
} // namespace
|
||||
|
||||
|
|
@ -180,6 +181,12 @@ void ReplicationCoordinatorImpl::handleHeartbeatResponse_forTest(BSONObj respons
|
|||
|
||||
void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
|
||||
const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData) {
|
||||
pauseInHandleHeartbeatResponse.executeIf(
|
||||
[](const BSONObj& data) { pauseInHandleHeartbeatResponse.pauseWhileSet(); },
|
||||
[&cbData](const BSONObj& data) -> bool {
|
||||
StringData dtarget = data["target"].valueStringDataSafe();
|
||||
return dtarget == cbData.request.target.toString();
|
||||
});
|
||||
stdx::unique_lock<Latch> lk(_mutex);
|
||||
|
||||
// remove handle from queued heartbeats
|
||||
|
|
@ -190,7 +197,15 @@ void ReplicationCoordinatorImpl::_handleHeartbeatResponse(
|
|||
Status responseStatus = cbData.response.status;
|
||||
const HostAndPort& target = cbData.request.target;
|
||||
|
||||
if (responseStatus == ErrorCodes::CallbackCanceled) {
|
||||
// It is possible that the callback was canceled after handleHeartbeatResponse was called but
|
||||
// before it got the lock above.
|
||||
//
|
||||
// In this case, the responseStatus will be OK and we can process the heartbeat. However, if
|
||||
// we do so, cancelling heartbeats no longer establishes a barrier after which all heartbeats
|
||||
// processed are "new" (sent subsequent to the cancel), which is something we care about for
|
||||
// catchup takeover. So if we detect this situation (by checking if the handle was canceled)
|
||||
// we will NOT process the 'stale' heartbeat.
|
||||
if (responseStatus == ErrorCodes::CallbackCanceled || cbData.myHandle.isCanceled()) {
|
||||
LOGV2_FOR_HEARTBEATS(4615619,
|
||||
2,
|
||||
"Received response to heartbeat (requestId: {requestId}) from "
|
||||
|
|
|
|||
|
|
@ -42,7 +42,9 @@
|
|||
#include "mongo/db/repl/replication_coordinator_impl.h"
|
||||
#include "mongo/db/repl/replication_coordinator_test_fixture.h"
|
||||
#include "mongo/db/repl/topology_version_observer.h"
|
||||
#include "mongo/logv2/log.h"
|
||||
#include "mongo/unittest/barrier.h"
|
||||
#include "mongo/unittest/log_test.h"
|
||||
#include "mongo/unittest/unittest.h"
|
||||
#include "mongo/util/assert_util.h"
|
||||
#include "mongo/util/clock_source.h"
|
||||
|
|
@ -118,6 +120,9 @@ protected:
|
|||
const Milliseconds sleepTime = Milliseconds(100);
|
||||
|
||||
std::unique_ptr<TopologyVersionObserver> observer;
|
||||
|
||||
unittest::MinimumLoggedSeverityGuard severityGuard{logv2::LogComponent::kDefault,
|
||||
logv2::LogSeverity::Debug(4)};
|
||||
};
|
||||
|
||||
|
||||
|
|
@ -140,11 +145,15 @@ TEST_F(TopologyVersionObserverTest, UpdateCache) {
|
|||
auto electionTimeoutWhen = getReplCoord()->getElectionTimeout_forTest();
|
||||
simulateSuccessfulV1ElectionWithoutExitingDrainMode(electionTimeoutWhen, opCtx.get());
|
||||
|
||||
auto sleepCounter = 0;
|
||||
// Wait for the observer to update its cache
|
||||
while (observer->getCached()->getTopologyVersion()->getCounter() ==
|
||||
cachedResponse->getTopologyVersion()->getCounter()) {
|
||||
sleepFor(sleepTime);
|
||||
// Make sure the test doesn't wait here for longer than 15 seconds.
|
||||
ASSERT_LTE(sleepCounter++, 150);
|
||||
}
|
||||
LOGV2(9326401, "Observer topology incremented after successful election");
|
||||
|
||||
auto newResponse = observer->getCached();
|
||||
ASSERT(newResponse && newResponse->getTopologyVersion());
|
||||
|
|
|
|||
|
|
@ -168,8 +168,11 @@ std::vector<BSONObj> autoSplitVector(OperationContext* opCtx,
|
|||
}
|
||||
}
|
||||
|
||||
BSONObj maxKeyInChunk;
|
||||
// Compare the first and last document belonging to the range; if they have the same shard
|
||||
// key value, no split point can be found.
|
||||
bool chunkCanBeSplit = true;
|
||||
{
|
||||
BSONObj maxKeyInChunk;
|
||||
auto backwardIdxScanner =
|
||||
InternalPlanner::indexScan(opCtx,
|
||||
&(*collection),
|
||||
|
|
@ -185,11 +188,11 @@ std::vector<BSONObj> autoSplitVector(OperationContext* opCtx,
|
|||
// Range is empty
|
||||
return {};
|
||||
}
|
||||
|
||||
chunkCanBeSplit = minKeyInOriginalChunk.woCompare(maxKeyInChunk) != 0;
|
||||
}
|
||||
|
||||
if (minKeyInOriginalChunk.woCompare(maxKeyInChunk) == 0) {
|
||||
// Range contains only documents with a single key value. So we cannot possibly find a
|
||||
// split point, and there is no need to scan any further.
|
||||
if (!chunkCanBeSplit) {
|
||||
LOGV2_WARNING(
|
||||
5865001,
|
||||
"Possible low cardinality key detected in range. Range contains only a single key.",
|
||||
|
|
|
|||
|
|
@ -338,6 +338,11 @@ CollectionShardingRuntime::_getCurrentMetadataIfKnown(
|
|||
std::shared_ptr<ScopedCollectionDescription::Impl>
|
||||
CollectionShardingRuntime::_getMetadataWithVersionCheckAt(
|
||||
OperationContext* opCtx, const boost::optional<mongo::LogicalTime>& atClusterTime) {
|
||||
// If this node is not part of a sharded cluster, or the ShardingState has not been recovered
|
||||
// yet, consider all collections as untracked.
|
||||
if (!ShardingState::get(opCtx)->enabled())
|
||||
return kUnshardedCollection;
|
||||
|
||||
const auto optReceivedShardVersion = getOperationReceivedVersion(opCtx, _nss);
|
||||
if (!optReceivedShardVersion)
|
||||
return kUnshardedCollection;
|
||||
|
|
|
|||
|
|
@ -214,7 +214,7 @@ StringMap<std::vector<ShardId>> buildTagsToShardIdsMap(OperationContext* opCtx,
|
|||
} // namespace
|
||||
|
||||
std::vector<BSONObj> InitialSplitPolicy::calculateHashedSplitPoints(
|
||||
const ShardKeyPattern& shardKeyPattern, BSONObj prefix, int numInitialChunks) {
|
||||
const ShardKeyPattern& shardKeyPattern, BSONObj prefix, size_t numInitialChunks) {
|
||||
invariant(shardKeyPattern.isHashedPattern());
|
||||
invariant(numInitialChunks > 0);
|
||||
|
||||
|
|
@ -259,7 +259,7 @@ std::vector<BSONObj> InitialSplitPolicy::calculateHashedSplitPoints(
|
|||
current += intervalSize / 2;
|
||||
}
|
||||
|
||||
for (int i = 0; i < (numInitialChunks - 1) / 2; i++) {
|
||||
for (size_t i = 0; i < (numInitialChunks - 1) / 2; i++) {
|
||||
splitPoints.push_back(buildSplitPoint(current));
|
||||
splitPoints.push_back(buildSplitPoint(-current));
|
||||
current += intervalSize;
|
||||
|
|
|
|||
|
|
@ -102,7 +102,7 @@ public:
|
|||
*/
|
||||
static std::vector<BSONObj> calculateHashedSplitPoints(const ShardKeyPattern& shardKeyPattern,
|
||||
BSONObj prefix,
|
||||
int numInitialChunks);
|
||||
size_t numInitialChunks);
|
||||
|
||||
/**
|
||||
* Produces the initial chunks that need to be written for an *empty* collection which is being
|
||||
|
|
|
|||
|
|
@ -642,7 +642,7 @@ void CreateCollectionCoordinator::_checkCommandArguments(OperationContext* opCtx
|
|||
const int maxNumInitialChunksForShards =
|
||||
Grid::get(opCtx)->shardRegistry()->getNumShardsNoReload() * shardutil::kMaxSplitPoints;
|
||||
const int maxNumInitialChunksTotal = 1000 * 1000; // Arbitrary limit to memory consumption
|
||||
int numChunks = _request.getNumInitialChunks().value();
|
||||
const auto numChunks = _request.getNumInitialChunks().value();
|
||||
uassert(ErrorCodes::InvalidOptions,
|
||||
str::stream() << "numInitialChunks cannot be more than either: "
|
||||
<< maxNumInitialChunksForShards << ", " << shardutil::kMaxSplitPoints
|
||||
|
|
|
|||
|
|
@ -293,7 +293,8 @@ void updateSessionRecord(OperationContext* opCtx,
|
|||
BSONObj o2Field,
|
||||
std::vector<StmtId> stmtIds,
|
||||
boost::optional<repl::OpTime> preImageOpTime,
|
||||
boost::optional<repl::OpTime> postImageOpTime) {
|
||||
boost::optional<repl::OpTime> postImageOpTime,
|
||||
NamespaceString sourceNss) {
|
||||
invariant(opCtx->getLogicalSessionId());
|
||||
invariant(opCtx->getTxnNumber());
|
||||
|
||||
|
|
@ -304,7 +305,7 @@ void updateSessionRecord(OperationContext* opCtx,
|
|||
oplogEntry.setOpType(repl::OpTypeEnum::kNoop);
|
||||
oplogEntry.setObject(BSON(SessionCatalogMigrationDestination::kSessionMigrateOplogTag << 1));
|
||||
oplogEntry.setObject2(std::move(o2Field));
|
||||
oplogEntry.setNss({});
|
||||
oplogEntry.setNss(std::move(sourceNss));
|
||||
oplogEntry.setSessionId(opCtx->getLogicalSessionId());
|
||||
oplogEntry.setTxnNumber(opCtx->getTxnNumber());
|
||||
oplogEntry.setStatementIds(stmtIds);
|
||||
|
|
|
|||
|
|
@ -154,7 +154,8 @@ void updateSessionRecord(OperationContext* opCtx,
|
|||
BSONObj o2Field,
|
||||
std::vector<StmtId> stmtIds,
|
||||
boost::optional<repl::OpTime> preImageOpTime,
|
||||
boost::optional<repl::OpTime> postImageOpTime);
|
||||
boost::optional<repl::OpTime> postImageOpTime,
|
||||
NamespaceString sourceNss);
|
||||
|
||||
/**
|
||||
* Calls and returns the value from the supplied lambda function.
|
||||
|
|
|
|||
|
|
@ -79,6 +79,7 @@ repl::OpTime ReshardingOplogSessionApplication::_logPrePostImage(
|
|||
|
||||
boost::optional<SharedSemiFuture<void>> ReshardingOplogSessionApplication::tryApplyOperation(
|
||||
OperationContext* opCtx, const repl::OplogEntry& op) const {
|
||||
auto sourceNss = op.getNss();
|
||||
auto lsid = *op.getSessionId();
|
||||
auto txnNumber = *op.getTxnNumber();
|
||||
bool isRetryableWrite = op.isCrudOpType();
|
||||
|
|
@ -105,7 +106,8 @@ boost::optional<SharedSemiFuture<void>> ReshardingOplogSessionApplication::tryAp
|
|||
std::move(o2Field),
|
||||
std::move(stmtIds),
|
||||
std::move(preImageOpTime),
|
||||
std::move(postImageOpTime));
|
||||
std::move(postImageOpTime),
|
||||
std::move(sourceNss));
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -172,7 +172,8 @@ boost::optional<SharedSemiFuture<void>> ReshardingTxnCloner::doOneRecord(
|
|||
TransactionParticipant::kDeadEndSentinel,
|
||||
{kIncompleteHistoryStmtId},
|
||||
boost::none /* preImageOpTime */,
|
||||
boost::none /* postImageOpTime */);
|
||||
boost::none /* postImageOpTime */,
|
||||
{});
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -41,27 +41,14 @@
|
|||
#include "mongo/db/logical_session_id.h"
|
||||
#include "mongo/db/ops/write_ops.h"
|
||||
#include "mongo/db/repl/read_concern_args.h"
|
||||
#include "mongo/db/sessions_server_parameters_gen.h"
|
||||
#include "mongo/rpc/get_status_from_command_result.h"
|
||||
#include "mongo/s/write_ops/batched_command_response.h"
|
||||
#include "mongo/util/duration.h"
|
||||
|
||||
namespace mongo {
|
||||
namespace {
|
||||
|
||||
// This batch size is chosen to ensure that we don't form requests larger than the 16mb limit.
|
||||
// Especially for refreshes, the updates we send include the full user name (user@db), and user
|
||||
// names can be quite large (we enforce a max 10k limit for usernames used with sessions).
|
||||
//
|
||||
// At 1000 elements, a 16mb payload gives us a budget of 16000 bytes per user, which we should
|
||||
// comfortably be able to stay under, even with 10k user names.
|
||||
constexpr size_t kMaxBatchSize = 1000;
|
||||
|
||||
// Used to refresh or remove items from the session collection with write
|
||||
// concern majority
|
||||
const WriteConcernOptions kMajorityWriteConcern{WriteConcernOptions::kMajority,
|
||||
WriteConcernOptions::SyncMode::UNSET,
|
||||
WriteConcernOptions::kWriteConcernTimeoutSystem};
|
||||
|
||||
|
||||
BSONObj lsidQuery(const LogicalSessionId& lsid) {
|
||||
return BSON(LogicalSessionRecord::kIdFieldName << lsid.toBSON());
|
||||
}
|
||||
|
|
@ -104,7 +91,7 @@ void runBulkGeneric(TFactory makeT, AddLineFn addLine, SendFn sendBatch, const C
|
|||
for (const auto& item : items) {
|
||||
addLine(*thing, item);
|
||||
|
||||
if (++i >= kMaxBatchSize) {
|
||||
if (++i >= std::size_t(mongo::gSessionMaxBatchSize.load())) {
|
||||
sendLocalBatch();
|
||||
|
||||
setupBatch();
|
||||
|
|
@ -192,7 +179,14 @@ SessionsCollection::FindBatchFn SessionsCollection::makeFindFnForCommand(const N
|
|||
void SessionsCollection::_doRefresh(const NamespaceString& ns,
|
||||
const std::vector<LogicalSessionRecord>& sessions,
|
||||
SendBatchFn send) {
|
||||
auto init = [ns](BSONObjBuilder* batch) {
|
||||
// Used to refresh items from the session collection with write
|
||||
// concern majority
|
||||
const WriteConcernOptions kMajorityWriteConcern{
|
||||
WriteConcernOptions::kMajority,
|
||||
WriteConcernOptions::SyncMode::UNSET,
|
||||
Milliseconds(mongo::gSessionWriteConcernTimeoutSystemMillis.load())};
|
||||
|
||||
auto init = [ns, kMajorityWriteConcern](BSONObjBuilder* batch) {
|
||||
batch->append("update", ns.coll());
|
||||
batch->append("ordered", false);
|
||||
batch->append(WriteConcernOptions::kWriteConcernField, kMajorityWriteConcern.toBSON());
|
||||
|
|
@ -202,14 +196,20 @@ void SessionsCollection::_doRefresh(const NamespaceString& ns,
|
|||
entries->append(
|
||||
BSON("q" << lsidQuery(record) << "u" << updateQuery(record) << "upsert" << true));
|
||||
};
|
||||
|
||||
runBulkCmd("updates", init, add, send, sessions);
|
||||
}
|
||||
|
||||
void SessionsCollection::_doRemove(const NamespaceString& ns,
|
||||
const std::vector<LogicalSessionId>& sessions,
|
||||
SendBatchFn send) {
|
||||
auto init = [ns](BSONObjBuilder* batch) {
|
||||
// Used to remove items from the session collection with write
|
||||
// concern majority
|
||||
const WriteConcernOptions kMajorityWriteConcern{
|
||||
WriteConcernOptions::kMajority,
|
||||
WriteConcernOptions::SyncMode::UNSET,
|
||||
Milliseconds(mongo::gSessionWriteConcernTimeoutSystemMillis.load())};
|
||||
|
||||
auto init = [ns, kMajorityWriteConcern](BSONObjBuilder* batch) {
|
||||
batch->append("delete", ns.coll());
|
||||
batch->append("ordered", false);
|
||||
batch->append(WriteConcernOptions::kWriteConcernField, kMajorityWriteConcern.toBSON());
|
||||
|
|
|
|||
|
|
@ -0,0 +1,63 @@
|
|||
# Copyright (C) 2024-present MongoDB, Inc.
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the Server Side Public License, version 1,
|
||||
# as published by MongoDB, Inc.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# Server Side Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the Server Side Public License
|
||||
# along with this program. If not, see
|
||||
# <http://www.mongodb.com/licensing/server-side-public-license>.
|
||||
#
|
||||
# As a special exception, the copyright holders give permission to link the
|
||||
# code of portions of this program with the OpenSSL library under certain
|
||||
# conditions as described in each individual source file and distribute
|
||||
# linked combinations including the program with the OpenSSL library. You
|
||||
# must comply with the Server Side Public License in all respects for
|
||||
# all of the code used other than as permitted herein. If you modify file(s)
|
||||
# with this exception, you may extend this exception to your version of the
|
||||
# file(s), but you are not obligated to do so. If you do not wish to do so,
|
||||
# delete this exception statement from your version. If you delete this
|
||||
# exception statement from all source files in the program, then also delete
|
||||
# it in the license file.
|
||||
#
|
||||
|
||||
# Server parameters for configuring the refresh of the session colelction.
|
||||
|
||||
global:
|
||||
cpp_namespace: "mongo"
|
||||
|
||||
imports:
|
||||
- "mongo/idl/basic_types.idl"
|
||||
|
||||
server_parameters:
|
||||
sessionWriteConcernTimeoutSystemMillis:
|
||||
description: Controls the write concern timeout (in milliseconds) for the refresh or removal of items from the session collection.
|
||||
set_at: [startup, runtime]
|
||||
cpp_vartype: AtomicWord<int>
|
||||
cpp_varname: gSessionWriteConcernTimeoutSystemMillis
|
||||
default: 60000
|
||||
validator:
|
||||
gte: 0
|
||||
redact: false
|
||||
|
||||
sessionMaxBatchSize:
|
||||
description: >-
|
||||
Controls the maximum batch size (number of elements) for the sessions' refresh.
|
||||
This batch size is chosen to ensure that we don't form requests larger than the 16mb limit.
|
||||
Especially for refreshes, the updates we send include the full user name (user@db), and user
|
||||
names can be quite large (we enforce a max 10k limit for usernames used with sessions).
|
||||
At a default of 1000 elements, a 16mb payload gives us a budget of 16000 bytes per user, which we should
|
||||
comfortably be able to stay under, even with 10k user names. so we do not form requests larger than the 16mb limit.
|
||||
set_at: [startup, runtime]
|
||||
cpp_vartype: AtomicWord<int>
|
||||
cpp_varname: gSessionMaxBatchSize
|
||||
default: 1000
|
||||
validator:
|
||||
gte: 100
|
||||
lte: 10000
|
||||
redact: false
|
||||
|
|
@ -33,8 +33,10 @@
|
|||
|
||||
#include "mongo/platform/basic.h"
|
||||
|
||||
#include "mongo/bson/util/builder.h"
|
||||
#include "mongo/client/dbclient_connection.h"
|
||||
#include "mongo/client/dbclient_rs.h"
|
||||
#include "mongo/db/dbmessage.h"
|
||||
#include "mongo/db/ops/write_ops.h"
|
||||
#include "mongo/db/query/cursor_response.h"
|
||||
#include "mongo/db/query/getmore_request.h"
|
||||
|
|
@ -1391,5 +1393,17 @@ TEST(OpMsg, HelloOkCanBeDisabled) {
|
|||
ASSERT(!isHelloOk);
|
||||
}
|
||||
|
||||
TEST(Message, LegacyInvalidNs) {
|
||||
auto conn = getIntegrationTestConnection();
|
||||
|
||||
auto msg = makeMessage(dbQuery, [&](BufBuilder& b) {
|
||||
b.appendNum(0);
|
||||
b.appendStrBytes("nonullbyte");
|
||||
});
|
||||
// Since our request is not able to be parsed, we don't receive a response from the server.
|
||||
Message ignore;
|
||||
ASSERT_THROWS(conn->call(msg, ignore), DBException);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace mongo
|
||||
|
|
|
|||
|
|
@ -674,7 +674,7 @@ void ChunkManagerTargeter::noteStaleShardResponse(OperationContext* opCtx,
|
|||
Grid::get(opCtx)
|
||||
->catalogCache()
|
||||
->invalidateShardOrEntireCollectionEntryForShardedCollection(
|
||||
_nss, staleInfo.getVersionWanted(), endpoint.shardName);
|
||||
_nss, boost::none, endpoint.shardName);
|
||||
}
|
||||
|
||||
_lastError = LastErrorType::kStaleShardVersion;
|
||||
|
|
|
|||
|
|
@ -288,9 +288,17 @@ Future<DbResponse> HandleRequest::run() {
|
|||
}
|
||||
|
||||
Future<DbResponse> ServiceEntryPointMongos::handleRequest(OperationContext* opCtx,
|
||||
const Message& message) noexcept {
|
||||
const Message& message) noexcept try {
|
||||
auto hr = std::make_shared<HandleRequest>(opCtx, message);
|
||||
return hr->run();
|
||||
} catch (const DBException& ex) {
|
||||
auto status = ex.toStatus();
|
||||
LOGV2(9431602, "Failed to handle request", "error"_attr = redact(status));
|
||||
return status;
|
||||
} catch (...) {
|
||||
auto error = exceptionToStatus();
|
||||
LOGV2_FATAL(
|
||||
9431601, "Request handling produced unhandled exception", "error"_attr = redact(error));
|
||||
}
|
||||
|
||||
void ServiceEntryPointMongos::onClientConnect(Client* client) {
|
||||
|
|
|
|||
|
|
@ -769,12 +769,7 @@ void BatchWriteOp::buildClientResponse(BatchedCommandResponse* batchResp) {
|
|||
}
|
||||
}
|
||||
|
||||
// Only return a write concern error if everything succeeded (unordered or ordered)
|
||||
// OR if something succeeded and we're unordered
|
||||
const bool orderedOps = _clientRequest.getWriteCommandRequestBase().getOrdered();
|
||||
const bool reportWCError =
|
||||
errOps.empty() || (!orderedOps && errOps.size() < _clientRequest.sizeWriteOps());
|
||||
if (!_wcErrors.empty() && reportWCError) {
|
||||
if (!_wcErrors.empty()) {
|
||||
WriteConcernErrorDetail* error = new WriteConcernErrorDetail;
|
||||
|
||||
// Generate the multi-error message below
|
||||
|
|
|
|||
|
|
@ -1029,9 +1029,7 @@ TEST_F(BatchWriteOpTest, MultiOpPartialSingleShardErrorOrdered) {
|
|||
// Tests of edge-case functionality, lifecycle is assumed to be behaving normally
|
||||
//
|
||||
|
||||
// Multi-op (unordered) error and write concern error test. We never report the write concern error
|
||||
// for single-doc batches, since the error means there's no write concern applied. Don't suppress
|
||||
// the error if ordered : false.
|
||||
// Multi-op (unordered) error and write concern error test.
|
||||
TEST_F(BatchWriteOpTest, MultiOpErrorAndWriteConcernErrorUnordered) {
|
||||
NamespaceString nss("foo.bar");
|
||||
ShardEndpoint endpoint(ShardId("shard"), ChunkVersion::IGNORED(), boost::none);
|
||||
|
|
@ -1074,8 +1072,7 @@ TEST_F(BatchWriteOpTest, MultiOpErrorAndWriteConcernErrorUnordered) {
|
|||
ASSERT(clientResponse.isWriteConcernErrorSet());
|
||||
}
|
||||
|
||||
// Single-op (ordered) error and write concern error test. Suppress the write concern error if
|
||||
// ordered and we also have an error
|
||||
// Single-op (ordered) error and write concern error test.
|
||||
TEST_F(BatchWriteOpTest, SingleOpErrorAndWriteConcernErrorOrdered) {
|
||||
NamespaceString nss("foo.bar");
|
||||
ShardEndpoint endpointA(ShardId("shardA"), ChunkVersion::IGNORED(), boost::none);
|
||||
|
|
@ -1121,14 +1118,14 @@ TEST_F(BatchWriteOpTest, SingleOpErrorAndWriteConcernErrorOrdered) {
|
|||
ASSERT(batchOp.isFinished());
|
||||
ASSERT(++targetedIt == targeted.end());
|
||||
|
||||
// Ordered doesn't report write concern error
|
||||
// Ordered reports write concern error.
|
||||
BatchedCommandResponse clientResponse;
|
||||
batchOp.buildClientResponse(&clientResponse);
|
||||
ASSERT(clientResponse.getOk());
|
||||
ASSERT_EQUALS(clientResponse.getN(), 1);
|
||||
ASSERT(clientResponse.isErrDetailsSet());
|
||||
ASSERT_EQUALS(clientResponse.sizeErrDetails(), 1u);
|
||||
ASSERT(!clientResponse.isWriteConcernErrorSet());
|
||||
ASSERT(clientResponse.isWriteConcernErrorSet());
|
||||
}
|
||||
|
||||
// Targeting failure on second op in batch op (ordered)
|
||||
|
|
|
|||
|
|
@ -427,7 +427,10 @@ public:
|
|||
/**
|
||||
* Get some details about the CPU
|
||||
*/
|
||||
static void getCpuInfo(int& procCount, std::string& freq, std::string& features) {
|
||||
static void getCpuInfo(int& procCount,
|
||||
std::string& modelString,
|
||||
std::string& freq,
|
||||
std::string& features) {
|
||||
|
||||
procCount = 0;
|
||||
|
||||
|
|
@ -439,6 +442,7 @@ public:
|
|||
{"features", [&](const std::string& value) { features = value; }},
|
||||
#else
|
||||
{"processor", [&](const std::string& value) { procCount++; }},
|
||||
{"model name", [&](const std::string& value) { modelString = value; }},
|
||||
{"cpu MHz", [&](const std::string& value) { freq = value; }},
|
||||
{"flags", [&](const std::string& value) { features = value; }},
|
||||
#endif
|
||||
|
|
@ -664,6 +668,11 @@ void ProcessInfo::getExtraInfo(BSONObjBuilder& info) {
|
|||
|
||||
appendNumber("voluntary_context_switches", ru.ru_nvcsw);
|
||||
appendNumber("involuntary_context_switches", ru.ru_nivcsw);
|
||||
|
||||
LinuxProc p(_pid);
|
||||
|
||||
// Append the number of thread in use
|
||||
appendNumber("threads", p._nlwp);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -710,13 +719,13 @@ unsigned long countNumaNodes() {
|
|||
void ProcessInfo::SystemInfo::collectSystemInfo() {
|
||||
utsname unameData;
|
||||
std::string distroName, distroVersion;
|
||||
std::string cpuFreq, cpuFeatures;
|
||||
std::string cpuString, cpuFreq, cpuFeatures;
|
||||
int cpuCount;
|
||||
int physicalCores;
|
||||
int cpuSockets;
|
||||
|
||||
std::string verSig = LinuxSysHelper::readLineFromFile("/proc/version_signature");
|
||||
LinuxSysHelper::getCpuInfo(cpuCount, cpuFreq, cpuFeatures);
|
||||
LinuxSysHelper::getCpuInfo(cpuCount, cpuString, cpuFreq, cpuFeatures);
|
||||
LinuxSysHelper::getNumPhysicalCores(physicalCores);
|
||||
cpuSockets = LinuxSysHelper::getNumCpuSockets();
|
||||
LinuxSysHelper::getLinuxDistro(distroName, distroVersion);
|
||||
|
|
@ -760,6 +769,7 @@ void ProcessInfo::SystemInfo::collectSystemInfo() {
|
|||
bExtra.append("versionSignature", verSig);
|
||||
|
||||
bExtra.append("kernelVersion", unameData.release);
|
||||
bExtra.append("cpuString", cpuString);
|
||||
bExtra.append("cpuFrequencyMHz", cpuFreq);
|
||||
bExtra.append("cpuFeatures", cpuFeatures);
|
||||
bExtra.append("pageSize", static_cast<long long>(pageSize));
|
||||
|
|
|
|||
|
|
@ -33,13 +33,30 @@
|
|||
#include <iostream>
|
||||
#include <vector>
|
||||
|
||||
#include "mongo/bson/bsonobj.h"
|
||||
#include "mongo/bson/bsonobjbuilder.h"
|
||||
#include "mongo/unittest/unittest.h"
|
||||
#include "mongo/util/processinfo.h"
|
||||
|
||||
using boost::optional;
|
||||
using mongo::ProcessInfo;
|
||||
|
||||
namespace mongo_test {
|
||||
namespace mongo {
|
||||
|
||||
namespace {
|
||||
using StringMap = std::map<std::string, uint64_t>;
|
||||
|
||||
StringMap toStringMap(BSONObj& obj) {
|
||||
StringMap map;
|
||||
|
||||
for (const auto& e : obj) {
|
||||
map[e.fieldName()] = e.numberLong();
|
||||
}
|
||||
|
||||
return map;
|
||||
}
|
||||
|
||||
#define ASSERT_KEY(_key) ASSERT_TRUE(stringMap.find(_key) != stringMap.end());
|
||||
|
||||
TEST(ProcessInfo, SysInfoIsInitialized) {
|
||||
ProcessInfo processInfo;
|
||||
if (processInfo.supported()) {
|
||||
|
|
@ -47,6 +64,20 @@ TEST(ProcessInfo, SysInfoIsInitialized) {
|
|||
}
|
||||
}
|
||||
|
||||
TEST(FTDCProcSysInfo, TestSysInfo) {
|
||||
auto sysInfo = ProcessInfo();
|
||||
BSONObjBuilder builder;
|
||||
sysInfo.appendSystemDetails(builder);
|
||||
|
||||
BSONObj obj = builder.obj();
|
||||
auto stringMap = toStringMap(obj);
|
||||
ASSERT_KEY("extra");
|
||||
|
||||
BSONObj extra = obj.getObjectField("extra");
|
||||
stringMap = toStringMap(extra);
|
||||
ASSERT_KEY("cpuString");
|
||||
}
|
||||
|
||||
TEST(ProcessInfo, GetNumAvailableCores) {
|
||||
#if defined(__APPLE__) || defined(__linux__) || (defined(__sun) && defined(__SVR4)) || \
|
||||
defined(_WIN32)
|
||||
|
|
@ -59,4 +90,5 @@ TEST(ProcessInfo, GetNumAvailableCores) {
|
|||
TEST(ProcessInfo, GetNumCoresReturnsNonZeroNumberOfProcessors) {
|
||||
ASSERT_GREATER_THAN(ProcessInfo::getNumCores(), 0u);
|
||||
}
|
||||
} // namespace mongo_test
|
||||
} // namespace
|
||||
} // namespace mongo
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@
|
|||
* it in the license file.
|
||||
*/
|
||||
|
||||
#include "scopeguard.h"
|
||||
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl
|
||||
|
||||
#include "mongo/platform/basic.h"
|
||||
|
|
@ -39,6 +40,7 @@
|
|||
|
||||
#include "mongo/logv2/log.h"
|
||||
#include "mongo/util/processinfo.h"
|
||||
#include "mongo/util/text.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
|
|
@ -248,6 +250,43 @@ bool getFileVersion(const char* filePath, DWORD& fileVersionMS, DWORD& fileVersi
|
|||
return true;
|
||||
}
|
||||
|
||||
std::string getCpuString() {
|
||||
// get descriptive CPU string from registry
|
||||
HKEY hKey;
|
||||
LPCWSTR cpuKey = L"HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0";
|
||||
LPCWSTR valueName = L"ProcessorNameString";
|
||||
std::string cpuString;
|
||||
|
||||
// Open the CPU key in the Windows Registry
|
||||
if (RegOpenKeyEx(HKEY_LOCAL_MACHINE, cpuKey, 0, KEY_READ, &hKey) == ERROR_SUCCESS) {
|
||||
auto guard = makeGuard([hKey] { RegCloseKey(hKey); });
|
||||
WCHAR cpuModel[128];
|
||||
DWORD bufferSize = sizeof(cpuModel);
|
||||
|
||||
// Retrieve the value of ProcessorNameString
|
||||
if (RegQueryValueEx(hKey,
|
||||
valueName,
|
||||
nullptr,
|
||||
nullptr,
|
||||
reinterpret_cast<LPBYTE>(cpuModel),
|
||||
&bufferSize) == ERROR_SUCCESS) {
|
||||
cpuString = toUtf8String(cpuModel);
|
||||
} else {
|
||||
auto ec = lastSystemError();
|
||||
LOGV2_WARNING(7663101,
|
||||
"Failed to retrieve CPU model name from the registry",
|
||||
"error"_attr = errorMessage(ec));
|
||||
}
|
||||
|
||||
// Close the registry key
|
||||
} else {
|
||||
auto ec = lastSystemError();
|
||||
LOGV2_WARNING(
|
||||
7663102, "Failed to open CPU key in the registry", "error"_attr = errorMessage(ec));
|
||||
}
|
||||
return cpuString;
|
||||
}
|
||||
|
||||
void ProcessInfo::SystemInfo::collectSystemInfo() {
|
||||
BSONObjBuilder bExtra;
|
||||
std::stringstream verstr;
|
||||
|
|
@ -267,6 +306,12 @@ void ProcessInfo::SystemInfo::collectSystemInfo() {
|
|||
pageSize = static_cast<unsigned long long>(ntsysinfo.dwPageSize);
|
||||
bExtra.append("pageSize", static_cast<long long>(pageSize));
|
||||
|
||||
std::string cpuString = getCpuString();
|
||||
if (cpuString != nullptr) {
|
||||
bExtra.append("cpuString", cpuString);
|
||||
}
|
||||
|
||||
|
||||
// get memory info
|
||||
mse.dwLength = sizeof(mse);
|
||||
if (GlobalMemoryStatusEx(&mse)) {
|
||||
|
|
|
|||
|
|
@ -40,6 +40,7 @@
|
|||
#include "mongo/bson/bsonobjbuilder.h"
|
||||
#include "mongo/logv2/log.h"
|
||||
#include "mongo/unittest/unittest.h"
|
||||
#include "mongo/util/processinfo.h"
|
||||
|
||||
namespace mongo {
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue