mirror of https://github.com/mongodb/mongo
SERVER-78589 Programmatically generate docker-compose.yml for sharded, repl, standalone suites
This commit is contained in:
parent
4307c9d26f
commit
0c41da5707
|
|
@ -240,12 +240,6 @@ default.profraw
|
|||
/repo
|
||||
/rpm/tmp
|
||||
|
||||
# antithesis image building assets
|
||||
/buildscripts/antithesis/base_images/mongo_binaries/dist-test
|
||||
/buildscripts/antithesis/base_images/workload/src
|
||||
/buildscripts/antithesis/base_images/workload/mongo
|
||||
/buildscripts/resmokeconfig/suites/antithesis_*.yml
|
||||
|
||||
# generated by gen_all_feature_flag_list.py
|
||||
all_feature_flags.txt
|
||||
|
||||
|
|
@ -280,8 +274,8 @@ bazel-out
|
|||
bazel-testlogs
|
||||
bazelisk
|
||||
|
||||
# generated configs for antithesis
|
||||
antithesis/antithesis_config
|
||||
# generated configs for external fixture suites
|
||||
docker_compose/
|
||||
|
||||
# artifacts from antithesis docker base image builds
|
||||
antithesis-dist-test
|
||||
|
|
|
|||
|
|
@ -1,80 +0,0 @@
|
|||
version: '3.0'
|
||||
|
||||
services:
|
||||
{%- for i in range(num_configsvr) %}
|
||||
configsvr{{ i }}:
|
||||
container_name: configsvr{{ i }}
|
||||
hostname: configsvr{{ i }}
|
||||
image: mongo-binaries:{{ tag }}
|
||||
volumes:
|
||||
- ./logs/configsvr{{ i }}:/var/log/mongodb/
|
||||
- ./scripts:/scripts/
|
||||
- ./data/configsvr{{ i }}:/data/configdb/
|
||||
command: /bin/bash /scripts/configsvr{{ i }}_init.sh
|
||||
networks:
|
||||
antithesis-net:
|
||||
ipv4_address: 10.20.20.{{ get_and_increment_ip_address() }}
|
||||
{%- endfor %}
|
||||
|
||||
{%- for s in range(num_shard) %}
|
||||
{%- for n in range(num_node_per_shard) -%}
|
||||
{% set i = s*num_node_per_shard+n %}
|
||||
mongod{{ i }}:
|
||||
container_name: mongod{{ i }}
|
||||
hostname: mongod{{ i }}
|
||||
image: mongo-binaries:{{ tag }}
|
||||
volumes:
|
||||
- ./logs/mongod{{ i }}:/var/log/mongodb/
|
||||
- ./scripts:/scripts/
|
||||
- ./data/mongod{{ i }}:/data/db/
|
||||
command: /bin/bash /scripts/mongod{{ i }}_init.sh
|
||||
networks:
|
||||
antithesis-net:
|
||||
ipv4_address: 10.20.20.{{ get_and_increment_ip_address() }}
|
||||
{%- endfor %}
|
||||
{%- endfor %}
|
||||
|
||||
{%- for m in range(num_mongos) %}
|
||||
mongos{{ m }}:
|
||||
container_name: mongos{{ m }}
|
||||
hostname: mongos{{ m }}
|
||||
image: mongo-binaries:{{ tag }}
|
||||
volumes:
|
||||
- ./logs/mongos{{ m }}:/var/log/mongodb/
|
||||
- ./scripts:/scripts/
|
||||
command: python3 /scripts/mongos{{ m }}_init.py
|
||||
depends_on:
|
||||
{%- for i in range(num_configsvr) %}
|
||||
- configsvr{{ i }}
|
||||
{%- endfor %}
|
||||
{%- for s in range(num_shard) %}
|
||||
{%- for n in range(num_node_per_shard) -%}
|
||||
{% set i = s*num_node_per_shard+n %}
|
||||
- mongod{{ i }}
|
||||
{%- endfor %}
|
||||
{%- endfor %}
|
||||
networks:
|
||||
antithesis-net:
|
||||
ipv4_address: 10.20.20.{{ get_and_increment_ip_address() }}
|
||||
{%- endfor %}
|
||||
workload:
|
||||
container_name: workload
|
||||
hostname: workload
|
||||
image: workload:{{ tag }}
|
||||
volumes:
|
||||
- ./logs/workload:/var/log/resmoke/
|
||||
- ./scripts:/scripts/
|
||||
command: python3 /scripts/workload_init.py
|
||||
networks:
|
||||
antithesis-net:
|
||||
ipv4_address: 10.20.20.130
|
||||
depends_on:
|
||||
{%- for m in range(num_mongos) %}
|
||||
- mongos{{ m }}
|
||||
{%- endfor %}
|
||||
networks:
|
||||
antithesis-net:
|
||||
driver: bridge
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 10.20.20.0/24
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
FROM scratch
|
||||
COPY docker-compose.yml /
|
||||
ADD scripts /scripts
|
||||
ADD logs /logs
|
||||
ADD data /data
|
||||
ADD debug /debug
|
||||
|
|
@ -1,156 +0,0 @@
|
|||
"""Script to configure a sharded cluster in Antithesis from the mongos container."""
|
||||
import json
|
||||
import subprocess
|
||||
from time import sleep
|
||||
import subprocess
|
||||
import time
|
||||
import datetime
|
||||
|
||||
"""Util functions to assist in setting up a sharded cluster topology in Antithesis."""
|
||||
def mongo_process_running(host, port):
|
||||
"""Check to see if the process at the given host & port is running."""
|
||||
return subprocess.run(['mongo', '--host', host, '--port',
|
||||
str(port), '--eval', '"db.stats()"'], check=True)
|
||||
|
||||
|
||||
def retry_until_success(func, kwargs=None, wait_time=datetime.timedelta(seconds=1), timeout_period=datetime.timedelta(seconds=30)):
|
||||
"""
|
||||
Retry the function periodically until timeout.
|
||||
|
||||
:param func: Void function that we are attempting to run.
|
||||
:param kwargs: Dictionary of keyword arguments for the function.
|
||||
:param wait_time: Number of seconds to wait before retrying function call.
|
||||
:param timeout_period: Number of seconds we allow function to run before raising TimeoutError.
|
||||
:return: None
|
||||
"""
|
||||
kwargs = {} if kwargs is None else kwargs
|
||||
timeout = time.time() + timeout_period.seconds
|
||||
while True:
|
||||
if time.time() > timeout:
|
||||
raise TimeoutError(
|
||||
f"{func.__name__} called with {kwargs} timed out after {timeout_period.seconds} second(s).")
|
||||
try:
|
||||
func(**kwargs)
|
||||
break
|
||||
except: # pylint: disable=bare-except
|
||||
print(f"Retrying {func.__name__} called with {kwargs} after {wait_time.seconds} second(s).")
|
||||
time.sleep(wait_time.seconds)
|
||||
|
||||
|
||||
# Create Config
|
||||
CONFIGSVR_CONFIG = {
|
||||
"_id": "config-rs",
|
||||
"configsvr": True,
|
||||
"members": [
|
||||
{%- for c in configsvr.nodes %}
|
||||
{%- set i = loop.index0 %}
|
||||
{"_id": {{ i }}, "host": "configsvr{{ i }}:{{ CONFIG_PORT }}"},
|
||||
{%- endfor %}
|
||||
],
|
||||
"protocolVersion": 1,
|
||||
"settings": {
|
||||
{%- for key, value in get_replset_settings(configsvr).items() %}
|
||||
"{{ key }}": {{ value }},
|
||||
{%- endfor %}
|
||||
}
|
||||
}
|
||||
|
||||
{% for c in configsvr.nodes -%}
|
||||
{% set i = loop.index0 -%}
|
||||
retry_until_success(mongo_process_running, {"host": "configsvr{{ i }}", "port": {{ CONFIG_PORT }}})
|
||||
{% endfor -%}
|
||||
retry_until_success(
|
||||
subprocess.run, {
|
||||
"args": [
|
||||
"mongo",
|
||||
"--host",
|
||||
"configsvr0",
|
||||
"--port",
|
||||
"{{ CONFIG_PORT }}",
|
||||
"--eval",
|
||||
f"rs.initiate({json.dumps(CONFIGSVR_CONFIG)})",
|
||||
],
|
||||
"check": True,
|
||||
})
|
||||
|
||||
{%- for shard in shards %}
|
||||
{% set s = loop.index0 %}
|
||||
# Create Shard{{ s }}
|
||||
SHARD{{ s }}_CONFIG = {
|
||||
"_id": "Shard{{ s }}",
|
||||
"members": [
|
||||
{%- for node in shard.nodes %}
|
||||
{%- set i = s*shard.num_nodes+loop.index0 %}
|
||||
{"_id": {{ loop.index0 }}, "host": "mongod{{ i }}:{{ MONGOD_PORT }}"},
|
||||
{%- endfor %}
|
||||
],
|
||||
"protocolVersion": 1,
|
||||
"settings": {
|
||||
{%- for key, value in get_replset_settings(shard).items() %}
|
||||
"{{ key }}": {{ value }},
|
||||
{%- endfor %}
|
||||
}
|
||||
}
|
||||
|
||||
{% for node in shard.nodes -%}
|
||||
{%- set i = s*shard.num_nodes+loop.index0 -%}
|
||||
retry_until_success(mongo_process_running, {"host": "mongod{{ i }}", "port": {{ MONGOD_PORT }}})
|
||||
{% endfor -%}
|
||||
retry_until_success(
|
||||
subprocess.run, {
|
||||
"args": [
|
||||
"mongo",
|
||||
"--host",
|
||||
"mongod{{ s*shard.num_nodes }}",
|
||||
"--port",
|
||||
"{{ MONGOD_PORT }}",
|
||||
"--eval",
|
||||
f"rs.initiate({json.dumps(SHARD{{ s }}_CONFIG)})",
|
||||
],
|
||||
"check": True,
|
||||
})
|
||||
{%- endfor %}
|
||||
|
||||
# Create Mongos
|
||||
retry_until_success(
|
||||
subprocess.run, {
|
||||
"args": [
|
||||
{% for arg in mongos_args -%}
|
||||
"{{ arg }}",
|
||||
{% endfor -%}
|
||||
"--setParameter",
|
||||
"fassertOnLockTimeoutForStepUpDown=0",
|
||||
"--logpath",
|
||||
"/var/log/mongodb/mongodb.log",
|
||||
"--bind_ip",
|
||||
"0.0.0.0",
|
||||
"--fork"],
|
||||
"check": True,
|
||||
})
|
||||
|
||||
{%- for shard in shards %}
|
||||
{% set s = loop.index0 %}
|
||||
# Add Shard{{ s }} to cluster
|
||||
retry_until_success(
|
||||
subprocess.run, {
|
||||
"args": [
|
||||
"mongo",
|
||||
"--host",
|
||||
"{{ mongos_name }}",
|
||||
"--port",
|
||||
"{{ MONGOS_PORT }}",
|
||||
"--eval",
|
||||
{%- set members = [] -%}
|
||||
{%- for node in shard.nodes -%}
|
||||
{{ members.append("mongod" + (s*shard.num_nodes+loop.index0)|string + ":" + MONGOD_PORT|string) or "" }}
|
||||
{%- endfor %}
|
||||
'sh.addShard("Shard{{ s }}/{{ members|join(',')}}")'
|
||||
],
|
||||
"check": True,
|
||||
})
|
||||
{%- endfor %}
|
||||
|
||||
print("{{ mongos_name }} setup completed successfully.")
|
||||
|
||||
while True:
|
||||
sleep(10)
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
{{ command }}
|
||||
|
||||
# this cryptic statement keeps the container running.
|
||||
tail -f /dev/null
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
# Script to run the target antithesis suite
|
||||
|
||||
final_exit_code=0
|
||||
sudo docker-compose down
|
||||
sudo docker-compose up -d
|
||||
sudo docker exec workload /bin/bash -c \
|
||||
"cd resmoke && . python3-venv/bin/activate && python3 buildscripts/resmoke.py run --suite {{ suite }} --sanityCheck"
|
||||
final_exit_code=$(echo $?)
|
||||
sudo docker-compose down
|
||||
exit $final_exit_code
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
"""Script to initialize a workload container in Antithesis."""
|
||||
from time import sleep
|
||||
|
||||
while True:
|
||||
sleep(10)
|
||||
|
|
@ -1,8 +1,10 @@
|
|||
FROM ubuntu:22.04
|
||||
|
||||
EXPOSE 27017
|
||||
EXPOSE 27018
|
||||
EXPOSE 27019
|
||||
# Expose a port range because the mongo binaries that are eventually started using this
|
||||
# image will use the ports determined by resmoke. Resmoke determines the port to use
|
||||
# for the mongo{d,s} processes by starting at 20000 and incrementing the port number
|
||||
# for every new mongo{d,s} process.
|
||||
EXPOSE 20000-20100
|
||||
|
||||
# prep the environment
|
||||
RUN mkdir -p /data/db
|
||||
|
|
|
|||
|
|
@ -5,7 +5,7 @@ EXPOSE 27018
|
|||
EXPOSE 27019
|
||||
|
||||
RUN mkdir -p /scripts
|
||||
RUN mkdir -p /var/log/resmoke
|
||||
RUN mkdir -p /var/log/mongodb
|
||||
|
||||
RUN apt-get update
|
||||
|
||||
|
|
@ -25,11 +25,6 @@ RUN python -m pip install --upgrade pip wheel
|
|||
# -------------------
|
||||
# Everything above this line should be common image setup
|
||||
|
||||
# copy resmoke, make the venv, and pip install
|
||||
COPY src /resmoke
|
||||
|
||||
RUN bash -c "cd /resmoke && python3 -m venv python3-venv && . python3-venv/bin/activate && python3 -m pip install 'poetry==1.5.1' && python3 -m poetry install --no-root --sync"
|
||||
|
||||
# copy the mongo binary -- make sure it is executable
|
||||
COPY mongo /usr/bin
|
||||
RUN chmod +x /usr/bin/mongo
|
||||
|
|
@ -37,3 +32,22 @@ RUN chmod +x /usr/bin/mongo
|
|||
COPY libvoidstar.so /usr/lib/libvoidstar.so
|
||||
|
||||
RUN /usr/bin/mongo --version
|
||||
|
||||
# Initialize the MongoDB repository
|
||||
WORKDIR /mongo
|
||||
|
||||
# Copy mongodb python deps first
|
||||
COPY src/pyproject.toml src/poetry.lock ./
|
||||
|
||||
# Install mongodb python deps with poetry
|
||||
RUN python -m pip install 'poetry==1.5.1'
|
||||
RUN python -m poetry install --no-root --sync
|
||||
|
||||
# Add the poetry venv to the $PATH so that it's activated by default
|
||||
# (We use a symlink because the path to the poetry venv is unknown & generated at runtime)
|
||||
RUN ln -s $(dirname $(dirname $(poetry run which python))) /opt/venv
|
||||
ENV PATH="/opt/venv/bin:$PATH"
|
||||
|
||||
# copy mongo
|
||||
COPY src /mongo
|
||||
WORKDIR /mongo
|
||||
|
|
|
|||
|
|
@ -1,146 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Command line utility for generating suites for targeting antithesis."""
|
||||
|
||||
import os.path
|
||||
import sys
|
||||
|
||||
import click
|
||||
import yaml
|
||||
|
||||
# Get relative imports to work when the package is not installed on the PYTHONPATH.
|
||||
if __name__ == "__main__" and __package__ is None:
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
HOOKS_BLACKLIST = [
|
||||
"CleanEveryN",
|
||||
"ContinuousStepdown",
|
||||
"CheckOrphansDeleted",
|
||||
# TODO SERVER-70396 re-enable hook once the checkMetadata feature flag is removed
|
||||
# To check the feature flag we need to contact directly the config server that is not exposed in the ExternalFixture
|
||||
"CheckMetadataConsistencyInBackground",
|
||||
]
|
||||
|
||||
_SUITES_PATH = os.path.join("buildscripts", "resmokeconfig", "suites")
|
||||
|
||||
MONGOS_PORT = 27017
|
||||
|
||||
|
||||
def delete_archival(suite):
|
||||
"""Remove archival for Antithesis environment."""
|
||||
suite.pop("archive", None)
|
||||
suite.get("executor", {}).pop("archive", None)
|
||||
|
||||
|
||||
def make_hooks_compatible(suite):
|
||||
"""Make hooks compatible in Antithesis environment."""
|
||||
if suite.get("executor", {}).get("hooks", None):
|
||||
# it's either a list of strings, or a list of dicts, each with key 'class'
|
||||
if isinstance(suite["executor"]["hooks"][0], str):
|
||||
suite["executor"]["hooks"] = ["AntithesisLogging"] + [
|
||||
hook for hook in suite["executor"]["hooks"] if hook not in HOOKS_BLACKLIST
|
||||
]
|
||||
elif isinstance(suite["executor"]["hooks"][0], dict):
|
||||
suite["executor"]["hooks"] = [{"class": "AntithesisLogging"}] + [
|
||||
hook for hook in suite["executor"]["hooks"] if hook["class"] not in HOOKS_BLACKLIST
|
||||
]
|
||||
else:
|
||||
raise RuntimeError('Unknown structure in hook. File a TIG ticket.')
|
||||
|
||||
|
||||
def use_external_fixture(suite_name, suite):
|
||||
"""Use external version of this fixture."""
|
||||
if suite.get("executor", {}).get("fixture", None):
|
||||
suite["executor"]["fixture"] = {
|
||||
"class": f"External{suite['executor']['fixture']['class']}",
|
||||
"original_suite_name": suite_name,
|
||||
}
|
||||
|
||||
|
||||
def get_mongos_connection_url(suite):
|
||||
"""
|
||||
Return the mongos connection URL for suite if Antithesis compatible.
|
||||
|
||||
:param suite: Parsed YAML document for the suite we wish to connect to.
|
||||
:return: Connection url for the suite, or a warning if Antithesis incompatible.
|
||||
"""
|
||||
if suite.get("executor", {}).get("fixture", {}).get("num_mongos", None):
|
||||
return "mongodb://" + ",".join(
|
||||
[f"mongos{i}:{MONGOS_PORT}" for i in range(suite['executor']['fixture']['num_mongos'])])
|
||||
else:
|
||||
return "ANTITHESIS_INCOMPATIBLE"
|
||||
|
||||
|
||||
def update_test_data(suite):
|
||||
"""Update TestData to be compatible with antithesis."""
|
||||
suite.setdefault("executor", {}).setdefault(
|
||||
"config", {}).setdefault("shell_options", {}).setdefault("global_vars", {}).setdefault(
|
||||
"TestData", {}).update({"useActionPermittedFile": False})
|
||||
|
||||
|
||||
def update_shell(suite):
|
||||
"""Update shell for when running in Antithesis."""
|
||||
suite.setdefault("executor", {}).setdefault("config", {}).setdefault("shell_options",
|
||||
{}).setdefault("eval", "")
|
||||
suite["executor"]["config"]["shell_options"]["eval"] += "jsTestLog = Function.prototype;"
|
||||
|
||||
|
||||
def update_exclude_tags(suite):
|
||||
"""Update the exclude tags to exclude antithesis incompatible tests."""
|
||||
suite.setdefault('selector', {})
|
||||
if not suite.get('selector').get('exclude_with_any_tags'):
|
||||
suite['selector']['exclude_with_any_tags'] = ["antithesis_incompatible"]
|
||||
else:
|
||||
suite['selector']['exclude_with_any_tags'].append('antithesis_incompatible')
|
||||
|
||||
|
||||
def get_antithesis_suite_config(suite_name):
|
||||
"""Modify suite in-place to be antithesis compatible."""
|
||||
with open(os.path.join(_SUITES_PATH, f"{suite_name}.yml")) as fstream:
|
||||
suite = yaml.safe_load(fstream)
|
||||
|
||||
delete_archival(suite)
|
||||
make_hooks_compatible(suite)
|
||||
use_external_fixture(suite_name, suite)
|
||||
update_test_data(suite)
|
||||
update_shell(suite)
|
||||
update_exclude_tags(suite)
|
||||
|
||||
return suite
|
||||
|
||||
|
||||
@click.group()
|
||||
def cli():
|
||||
"""CLI Entry point."""
|
||||
pass
|
||||
|
||||
|
||||
def _generate(suite_name: str) -> None:
|
||||
suite = get_antithesis_suite_config(suite_name)
|
||||
|
||||
out = yaml.dump(suite)
|
||||
with open(os.path.join(_SUITES_PATH, f"antithesis_{suite_name}.yml"), "w") as fstream:
|
||||
fstream.write(
|
||||
"# this file was generated by buildscripts/antithesis_suite.py generate {}\n".format(
|
||||
suite_name))
|
||||
fstream.write("# Do not modify by hand\n")
|
||||
fstream.write(out)
|
||||
|
||||
|
||||
@cli.command()
|
||||
@click.argument('suite_name')
|
||||
def generate(suite_name: str) -> None:
|
||||
"""Generate a single suite."""
|
||||
_generate(suite_name)
|
||||
|
||||
|
||||
@cli.command('generate-all')
|
||||
def generate_all():
|
||||
"""Generate all suites."""
|
||||
for path in os.listdir(_SUITES_PATH):
|
||||
if os.path.isfile(os.path.join(_SUITES_PATH, path)):
|
||||
suite = path.split(".")[0]
|
||||
_generate(suite)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
|
|
@ -1,139 +0,0 @@
|
|||
##########################################################
|
||||
# THIS IS A GENERATED FILE -- DO NOT MODIFY.
|
||||
# IF YOU WISH TO MODIFY THIS SUITE, MODIFY THE CORRESPONDING MATRIX SUITE MAPPING FILE
|
||||
# AND REGENERATE THE MATRIX SUITES.
|
||||
#
|
||||
# matrix suite mapping file: buildscripts/resmokeconfig/matrix_suites/mappings/antithesis_concurrency_sharded_with_stepdowns_and_balancer.yml
|
||||
# regenerate matrix suites: buildscripts/resmoke.py generate-matrix-suites
|
||||
##########################################################
|
||||
antithesis: true
|
||||
executor:
|
||||
config:
|
||||
shell_options:
|
||||
eval: jsTestLog = Function.prototype;
|
||||
global_vars:
|
||||
TestData:
|
||||
runningWithBalancer: true
|
||||
runningWithConfigStepdowns: true
|
||||
runningWithShardStepdowns: true
|
||||
useActionPermittedFile: false
|
||||
fixture:
|
||||
class: ExternalShardedClusterFixture
|
||||
original_suite_name: concurrency_sharded_with_stepdowns_and_balancer
|
||||
hooks:
|
||||
- class: AntithesisLogging
|
||||
- class: CheckShardFilteringMetadata
|
||||
- class: CheckReplDBHash
|
||||
- class: CheckRoutingTableConsistency
|
||||
- class: ValidateCollections
|
||||
- class: CleanupConcurrencyWorkloads
|
||||
matrix_suite: true
|
||||
selector:
|
||||
exclude_files:
|
||||
- jstests/concurrency/fsm_workloads/distinct.js
|
||||
- jstests/concurrency/fsm_workloads/distinct_noindex.js
|
||||
- jstests/concurrency/fsm_workloads/distinct_projection.js
|
||||
- jstests/concurrency/fsm_workloads/remove_where.js
|
||||
- jstests/concurrency/fsm_workloads/agg_match.js
|
||||
- jstests/concurrency/fsm_workloads/map_reduce_inline.js
|
||||
- jstests/concurrency/fsm_workloads/map_reduce_interrupt.js
|
||||
- jstests/concurrency/fsm_workloads/map_reduce_merge.js
|
||||
- jstests/concurrency/fsm_workloads/map_reduce_reduce.js
|
||||
- jstests/concurrency/fsm_workloads/map_reduce_replace.js
|
||||
- jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js
|
||||
- jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js
|
||||
- jstests/concurrency/fsm_workloads/auth_create_role.js
|
||||
- jstests/concurrency/fsm_workloads/auth_create_user.js
|
||||
- jstests/concurrency/fsm_workloads/auth_drop_role.js
|
||||
- jstests/concurrency/fsm_workloads/auth_drop_user.js
|
||||
- jstests/concurrency/fsm_workloads/agg_group_external.js
|
||||
- jstests/concurrency/fsm_workloads/agg_sort_external.js
|
||||
- jstests/concurrency/fsm_workloads/compact.js
|
||||
- jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js
|
||||
- jstests/concurrency/fsm_workloads/convert_to_capped_collection.js
|
||||
- jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js
|
||||
- jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js
|
||||
- jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js
|
||||
- jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js
|
||||
- jstests/concurrency/fsm_workloads/findAndModify_update_queue.js
|
||||
- jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js
|
||||
- jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js
|
||||
- jstests/concurrency/fsm_workloads/findAndModify_update_grow.js
|
||||
- jstests/concurrency/fsm_workloads/plan_cache_drop_database.js
|
||||
- jstests/concurrency/fsm_workloads/remove_single_document.js
|
||||
- jstests/concurrency/fsm_workloads/update_where.js
|
||||
- jstests/concurrency/fsm_workloads/upsert_where.js
|
||||
- jstests/concurrency/fsm_workloads/yield_and_hashed.js
|
||||
- jstests/concurrency/fsm_workloads/yield_and_sorted.js
|
||||
- jstests/concurrency/fsm_workloads/sharded_base_partitioned.js
|
||||
- jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js
|
||||
- jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js
|
||||
- jstests/concurrency/fsm_workloads/sharded_splitChunk_partitioned.js
|
||||
- jstests/concurrency/fsm_workloads/kill_aggregation.js
|
||||
- jstests/concurrency/fsm_workloads/kill_rooted_or.js
|
||||
- jstests/concurrency/fsm_workloads/agg_base.js
|
||||
- jstests/concurrency/fsm_workloads/agg_unionWith_interrupt_cleanup.js
|
||||
- jstests/concurrency/fsm_workloads/create_index_background.js
|
||||
- jstests/concurrency/fsm_workloads/create_index_background_partial_filter.js
|
||||
- jstests/concurrency/fsm_workloads/create_index_background_wildcard.js
|
||||
- jstests/concurrency/fsm_workloads/globally_managed_cursors.js
|
||||
- jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js
|
||||
- jstests/concurrency/fsm_workloads/indexed_insert_text.js
|
||||
- jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js
|
||||
- jstests/concurrency/fsm_workloads/indexed_insert_upsert.js
|
||||
- jstests/concurrency/fsm_workloads/indexed_insert_where.js
|
||||
- jstests/concurrency/fsm_workloads/list_indexes.js
|
||||
- jstests/concurrency/fsm_workloads/query_stats_concurrent.js
|
||||
- jstests/concurrency/fsm_workloads/reindex.js
|
||||
- jstests/concurrency/fsm_workloads/reindex_background.js
|
||||
- jstests/concurrency/fsm_workloads/reindex_writeconflict.js
|
||||
- jstests/concurrency/fsm_workloads/remove_multiple_documents.js
|
||||
- jstests/concurrency/fsm_workloads/server_status_with_time_out_cursors.js
|
||||
- jstests/concurrency/fsm_workloads/update_where.js
|
||||
- jstests/concurrency/fsm_workloads/yield.js
|
||||
- jstests/concurrency/fsm_workloads/yield_fetch.js
|
||||
- jstests/concurrency/fsm_workloads/yield_rooted_or.js
|
||||
- jstests/concurrency/fsm_workloads/yield_sort.js
|
||||
- jstests/concurrency/fsm_workloads/yield_sort_merge.js
|
||||
- jstests/concurrency/fsm_workloads/yield_text.js
|
||||
- jstests/concurrency/fsm_workloads/yield_with_drop.js
|
||||
- jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js
|
||||
- jstests/concurrency/fsm_workloads/update_and_bulk_insert.js
|
||||
- jstests/concurrency/fsm_workloads/update_check_index.js
|
||||
- jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js
|
||||
- jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js
|
||||
- jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js
|
||||
- jstests/concurrency/fsm_workloads/yield_id_hack.js
|
||||
- jstests/concurrency/fsm_workloads/agg_out.js
|
||||
- jstests/concurrency/fsm_workloads/agg_sort.js
|
||||
- jstests/concurrency/fsm_workloads/collmod.js
|
||||
- jstests/concurrency/fsm_workloads/collmod_separate_collections.js
|
||||
- jstests/concurrency/fsm_workloads/collmod_writeconflict.js
|
||||
- jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js
|
||||
- jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js
|
||||
- jstests/concurrency/fsm_workloads/invalidated_cursors.js
|
||||
- jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js
|
||||
- jstests/concurrency/fsm_workloads/view_catalog.js
|
||||
- jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js
|
||||
- jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js
|
||||
- jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js
|
||||
- jstests/concurrency/fsm_workloads/drop_collection.js
|
||||
- jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js
|
||||
- jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js
|
||||
- jstests/concurrency/fsm_workloads/snapshot_read_kill_op_only.js
|
||||
- jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js
|
||||
- jstests/concurrency/fsm_workloads/create_timeseries_collection.js
|
||||
- jstests/concurrency/fsm_workloads/map_reduce_drop.js
|
||||
exclude_with_any_tags:
|
||||
- assumes_balancer_off
|
||||
- requires_replication
|
||||
- requires_non_retryable_writes
|
||||
- uses_curop_agg_stage
|
||||
- requires_profiling
|
||||
- does_not_support_stepdowns
|
||||
- assumes_unsharded_collection
|
||||
- antithesis_incompatible
|
||||
roots:
|
||||
- jstests/concurrency/fsm_workloads/**/*.js
|
||||
- src/mongo/db/modules/*/jstests/concurrency/fsm_workloads/*.js
|
||||
test_kind: fsm_workload_test
|
||||
|
|
@ -1,4 +0,0 @@
|
|||
base_suite: concurrency_sharded_with_stepdowns_and_balancer
|
||||
antithesis: true
|
||||
overrides:
|
||||
- "antithesis.concurrency_sharded_with_stepdowns_and_balancer"
|
||||
|
|
@ -1,8 +0,0 @@
|
|||
### Overrides for various antithesis suite external fixture configurations ###
|
||||
|
||||
- name: concurrency_sharded_with_stepdowns_and_balancer
|
||||
value:
|
||||
executor:
|
||||
fixture:
|
||||
class: ExternalShardedClusterFixture
|
||||
original_suite_name: concurrency_sharded_with_stepdowns_and_balancer
|
||||
|
|
@ -169,6 +169,18 @@ DEFAULTS = {
|
|||
"otel_parent_id": None,
|
||||
"otel_collector_endpoint": None,
|
||||
"otel_collector_file": None,
|
||||
|
||||
# The images to build for an External System Under Test
|
||||
"docker_compose_build_images": None,
|
||||
|
||||
# Where the `--dockerComposeBuildImages` is happening.
|
||||
"docker_compose_build_env": "local",
|
||||
|
||||
# Tag to use for images built & used for an External System Under Test
|
||||
"docker_compose_tag": "development",
|
||||
|
||||
# Whether or not this resmoke suite is running against an External System Under Test
|
||||
"external_sut": False,
|
||||
}
|
||||
|
||||
_SuiteOptions = collections.namedtuple("_SuiteOptions", [
|
||||
|
|
@ -629,3 +641,15 @@ SYMBOLIZER_CLIENT_ID = None
|
|||
|
||||
# Sanity check
|
||||
SANITY_CHECK = False
|
||||
|
||||
# The images to build for an External System Under Test
|
||||
DOCKER_COMPOSE_BUILD_IMAGES = None
|
||||
|
||||
# Where the `--dockerComposeBuildImages` is happening.
|
||||
DOCKER_COMPOSE_BUILD_ENV = "local"
|
||||
|
||||
# Tag to use for images built & used for an External System Under Test
|
||||
DOCKER_COMPOSE_TAG = "development"
|
||||
|
||||
# Whether or not this resmoke suite is running against an External System Under Test
|
||||
EXTERNAL_SUT = False
|
||||
|
|
|
|||
|
|
@ -429,6 +429,14 @@ or explicitly pass --installDir to the run subcommand of buildscripts/resmoke.py
|
|||
_config.TRANSPORT_LAYER = config.pop("transport_layer")
|
||||
_config.USER_FRIENDLY_OUTPUT = config.pop("user_friendly_output")
|
||||
_config.SANITY_CHECK = config.pop("sanity_check")
|
||||
_config.DOCKER_COMPOSE_BUILD_IMAGES = config.pop("docker_compose_build_images")
|
||||
if _config.DOCKER_COMPOSE_BUILD_IMAGES is not None:
|
||||
_config.DOCKER_COMPOSE_BUILD_IMAGES = _config.DOCKER_COMPOSE_BUILD_IMAGES.split(",")
|
||||
_config.DOCKER_COMPOSE_BUILD_ENV = config.pop("docker_compose_build_env")
|
||||
_config.DOCKER_COMPOSE_TAG = config.pop("docker_compose_tag")
|
||||
# Always set this to True if we are building images for docker compose
|
||||
_config.EXTERNAL_SUT = config.pop(
|
||||
"external_sut") or _config.DOCKER_COMPOSE_BUILD_IMAGES is not None
|
||||
|
||||
# Internal testing options.
|
||||
_config.INTERNAL_PARAMS = config.pop("internal_params")
|
||||
|
|
|
|||
|
|
@ -98,7 +98,12 @@ class Process(object):
|
|||
if env_vars is not None:
|
||||
self.env.update(env_vars)
|
||||
|
||||
self.pid = None
|
||||
# If we are running against an External System Under Test & this is a `mongo{d,s}` process, we make this process a NOOP.
|
||||
# `mongo{d,s}` processes are not running locally for an External System Under Test.
|
||||
self.NOOP = _config.EXTERNAL_SUT and os.path.basename(self.args[0]) in ["mongod", "mongos"]
|
||||
|
||||
# The `pid` attribute is assigned after the local process is started. If this process is a NOOP, we assign it a dummy value.
|
||||
self.pid = 1 if self.NOOP else None
|
||||
|
||||
self._process = None
|
||||
self._recorder = None
|
||||
|
|
@ -108,6 +113,8 @@ class Process(object):
|
|||
|
||||
def start(self):
|
||||
"""Start the process and the logger pipes for its stdout and stderr."""
|
||||
if self.NOOP:
|
||||
return None
|
||||
|
||||
creation_flags = 0
|
||||
if sys.platform == "win32" and _JOB_OBJECT is not None:
|
||||
|
|
@ -175,6 +182,9 @@ class Process(object):
|
|||
|
||||
def stop(self, mode=None):
|
||||
"""Terminate the process."""
|
||||
if self.NOOP:
|
||||
return None
|
||||
|
||||
if mode is None:
|
||||
mode = fixture_interface.TeardownMode.TERMINATE
|
||||
|
||||
|
|
@ -244,10 +254,14 @@ class Process(object):
|
|||
|
||||
def poll(self):
|
||||
"""Poll."""
|
||||
if self.NOOP:
|
||||
return None
|
||||
return self._process.poll()
|
||||
|
||||
def wait(self, timeout=None):
|
||||
"""Wait until process has terminated and all output has been consumed by the logger pipes."""
|
||||
if self.NOOP:
|
||||
return None
|
||||
|
||||
return_code = self._process.wait(timeout)
|
||||
|
||||
|
|
@ -286,6 +300,8 @@ class Process(object):
|
|||
|
||||
def pause(self):
|
||||
"""Send the SIGSTOP signal to the process and wait for it to be stopped."""
|
||||
if self.NOOP:
|
||||
return None
|
||||
while True:
|
||||
self._process.send_signal(signal.SIGSTOP)
|
||||
mongod_process = psutil.Process(self.pid)
|
||||
|
|
@ -297,6 +313,8 @@ class Process(object):
|
|||
|
||||
def resume(self):
|
||||
"""Send the SIGCONT signal to the process."""
|
||||
if self.NOOP:
|
||||
return None
|
||||
self._process.send_signal(signal.SIGCONT)
|
||||
|
||||
def __str__(self):
|
||||
|
|
|
|||
|
|
@ -85,6 +85,18 @@ def mongod_program(logger, job_num, executable, process_kwargs, mongod_options):
|
|||
args = [executable]
|
||||
mongod_options = mongod_options.copy()
|
||||
|
||||
if config.EXTERNAL_SUT:
|
||||
args[0] = os.path.basename(args[0])
|
||||
mongod_options["set_parameters"]["fassertOnLockTimeoutForStepUpDown"] = 0
|
||||
mongod_options["set_parameters"].pop("backtraceLogFile", None)
|
||||
mongod_options.update({
|
||||
"logpath": "/var/log/mongodb/mongodb.log",
|
||||
"dbpath": "/data/db",
|
||||
"bind_ip": "0.0.0.0",
|
||||
"oplogSize": "256",
|
||||
"wiredTigerCacheSizeGB": "1",
|
||||
})
|
||||
|
||||
if "port" not in mongod_options:
|
||||
mongod_options["port"] = network.PortAllocator.next_fixture_port(job_num)
|
||||
|
||||
|
|
@ -118,6 +130,11 @@ def mongos_program(logger, job_num, executable=None, process_kwargs=None, mongos
|
|||
|
||||
mongos_options = mongos_options.copy()
|
||||
|
||||
if config.EXTERNAL_SUT:
|
||||
args[0] = os.path.basename(args[0])
|
||||
mongos_options["set_parameters"]["fassertOnLockTimeoutForStepUpDown"] = 0
|
||||
mongos_options.update({"logpath": "/var/log/mongodb/mongodb.log", "bind_ip": "0.0.0.0"})
|
||||
|
||||
if "port" not in mongos_options:
|
||||
mongos_options["port"] = network.PortAllocator.next_fixture_port(job_num)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,109 +0,0 @@
|
|||
"""Generate a docker compose configuration and all necessary infrastructure."""
|
||||
|
||||
import os
|
||||
import sys
|
||||
from buildscripts.resmokelib.errors import InvalidMatrixSuiteError, RequiresForceRemove
|
||||
from buildscripts.resmokelib.plugin import PluginInterface, Subcommand
|
||||
from buildscripts.resmokelib.testing.docker_cluster_image_builder import DockerComposeImageBuilder
|
||||
|
||||
_HELP = """
|
||||
Generate a docker compose configuration and all necessary infrastructure -- including base images.
|
||||
"""
|
||||
_COMMAND = "generate-docker-compose"
|
||||
|
||||
|
||||
class GenerateDockerCompose(Subcommand):
|
||||
"""Generate docker compose configuration and infrastructure."""
|
||||
|
||||
def __init__(self, antithesis_suite_name, build_base_images, tag, in_evergreen):
|
||||
"""
|
||||
Constructor for GenerateDockerCompose subcommand.
|
||||
|
||||
:param antithesis_suite_name: The antithesis suite to generate a docker compose configuration for.
|
||||
:param build_base_images: Whether to build the base images or not.
|
||||
:param tag: The tag to use for the docker images and docker-compose file.
|
||||
:param in_evergreen: Whether this is running in Evergreen or not.
|
||||
"""
|
||||
self._antithesis_suite_name = antithesis_suite_name
|
||||
self._build_base_images = build_base_images
|
||||
self._tag = tag
|
||||
self._in_evergreen = in_evergreen
|
||||
|
||||
def execute(self) -> None:
|
||||
"""
|
||||
Generate docker compose configuration and infrastructure.
|
||||
|
||||
:return: None
|
||||
"""
|
||||
try:
|
||||
image_builder = DockerComposeImageBuilder(self._tag, self._in_evergreen)
|
||||
if self._antithesis_suite_name:
|
||||
image_builder.build_config_image(self._antithesis_suite_name)
|
||||
if self._build_base_images:
|
||||
image_builder.build_base_images()
|
||||
if self._build_base_images and self._antithesis_suite_name:
|
||||
success_message = f"""
|
||||
Successfully generated docker compose configuration and built required base images.
|
||||
|
||||
You can run the following command to verify that this docker compose configuration works:
|
||||
`cd antithesis/antithesis_config/{self._antithesis_suite_name} && bash run_suite.sh`
|
||||
"""
|
||||
print(success_message)
|
||||
|
||||
except RequiresForceRemove as exc:
|
||||
print(exc)
|
||||
sys.exit(2)
|
||||
except AssertionError as exc:
|
||||
print(exc)
|
||||
sys.exit(3)
|
||||
except InvalidMatrixSuiteError as exc:
|
||||
print(exc)
|
||||
sys.exit(4)
|
||||
except Exception as exc:
|
||||
raise Exception(
|
||||
"Something unexpected happened while building antithesis images.") from exc
|
||||
|
||||
|
||||
class GenerateDockerComposePlugin(PluginInterface):
|
||||
"""Generate docker compose configuration and infrastructure."""
|
||||
|
||||
def add_subcommand(self, subparsers):
|
||||
"""
|
||||
Add 'generate-docker-compose' subcommand.
|
||||
|
||||
:param subparsers: argparse parser to add to
|
||||
:return: None
|
||||
"""
|
||||
parser = subparsers.add_parser(_COMMAND, help=_HELP)
|
||||
parser.add_argument("-t", "--tag", dest="tag", metavar="TAG", default="local-development",
|
||||
help="Build base images needed for the docker compose configuration.")
|
||||
parser.add_argument("-s", "--skip-base-image-build", dest="skip_base_image_build",
|
||||
default=False, action="store_true",
|
||||
help="Skip building images for the docker compose configuration.")
|
||||
parser.add_argument(
|
||||
"--in-evergreen", dest="in_evergreen", default=False, action="store_true",
|
||||
help="If this is running in Evergreen, certain artifacts are expected to already exist."
|
||||
)
|
||||
parser.add_argument(
|
||||
nargs="?", dest="antithesis_suite", metavar="SUITE", help=
|
||||
("Antithesis Matrix Suite file from the resmokeconfig/matrix_suites/mappings directory."
|
||||
" Use the basename without the .yml extension. If empty, only base images will be built."
|
||||
))
|
||||
|
||||
def parse(self, subcommand, parser, parsed_args, **kwargs):
|
||||
"""
|
||||
Return the GenerateDockerCompose subcommand for execution.
|
||||
|
||||
:param subcommand: equivalent to parsed_args.command
|
||||
:param parser: parser used
|
||||
:param parsed_args: output of parsing
|
||||
:param kwargs: additional args
|
||||
:return: None or a Subcommand
|
||||
"""
|
||||
if subcommand != _COMMAND:
|
||||
return None
|
||||
|
||||
build_base_images = parsed_args.skip_base_image_build is False
|
||||
|
||||
return GenerateDockerCompose(parsed_args.antithesis_suite, build_base_images,
|
||||
parsed_args.tag, parsed_args.in_evergreen)
|
||||
|
|
@ -159,16 +159,18 @@ def new_job_logger(test_kind, job_num) -> logging.Logger:
|
|||
class FixtureLogger(logging.Logger):
|
||||
"""Custom fixture logger."""
|
||||
|
||||
def __init__(self, name, full_name):
|
||||
def __init__(self, name, full_name, external_sut_hostname=None):
|
||||
"""Initialize fixture logger."""
|
||||
self.full_name = full_name
|
||||
self.external_sut_hostname = external_sut_hostname
|
||||
super().__init__(name)
|
||||
|
||||
|
||||
def new_fixture_logger(fixture_class, job_num):
|
||||
"""Create a logger for a particular fixture class."""
|
||||
full_name = "%s:job%d" % (fixture_class, job_num)
|
||||
logger = FixtureLogger(_shorten(full_name), full_name)
|
||||
external_sut_hostname = full_name.replace(":", "_").lower()
|
||||
logger = FixtureLogger(_shorten(full_name), full_name, external_sut_hostname)
|
||||
logger.parent = ROOT_FIXTURE_LOGGER
|
||||
_add_build_logger_handler(logger, job_num)
|
||||
|
||||
|
|
@ -179,7 +181,8 @@ def new_fixture_logger(fixture_class, job_num):
|
|||
def new_fixture_node_logger(fixture_class, job_num, node_name):
|
||||
"""Create a logger for a particular element in a multi-process fixture."""
|
||||
full_name = "%s:job%d:%s" % (fixture_class, job_num, node_name)
|
||||
logger = FixtureLogger(_shorten(full_name), full_name)
|
||||
external_sut_hostname = node_name.replace(":", "_")
|
||||
logger = FixtureLogger(_shorten(full_name), full_name, external_sut_hostname)
|
||||
logger.parent = _FIXTURE_LOGGER_REGISTRY[job_num]
|
||||
return logger
|
||||
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ from buildscripts.resmokelib import configure_resmoke
|
|||
from buildscripts.resmokelib.discovery import DiscoveryPlugin
|
||||
from buildscripts.resmokelib.generate_fcv_constants import \
|
||||
GenerateFCVConstantsPlugin
|
||||
from buildscripts.resmokelib.generate_docker_compose import GenerateDockerComposePlugin
|
||||
from buildscripts.resmokelib.generate_fuzz_config import GenerateFuzzConfigPlugin
|
||||
from buildscripts.resmokelib.hang_analyzer import HangAnalyzerPlugin
|
||||
from buildscripts.resmokelib.hang_analyzer.core_analyzer import CoreAnalyzerPlugin
|
||||
|
|
@ -26,7 +25,6 @@ _PLUGINS = [
|
|||
DiscoveryPlugin(),
|
||||
MultiversionPlugin(),
|
||||
GenerateFuzzConfigPlugin(),
|
||||
GenerateDockerComposePlugin(),
|
||||
]
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ from buildscripts.resmokelib.run import runtime_recorder
|
|||
from buildscripts.resmokelib.run import list_tags
|
||||
from buildscripts.resmokelib.run.runtime_recorder import compare_start_time
|
||||
from buildscripts.resmokelib.suitesconfig import get_suite_files
|
||||
from buildscripts.resmokelib.testing.docker_cluster_image_builder import build_images
|
||||
from buildscripts.resmokelib.testing.suite import Suite
|
||||
from buildscripts.resmokelib.utils.dictionary import get_dict_value
|
||||
|
||||
|
|
@ -515,7 +516,13 @@ class TestRunner(Subcommand):
|
|||
try:
|
||||
executor = testing.executor.TestSuiteExecutor(
|
||||
self._exec_logger, suite, archive_instance=self._archive, **executor_config)
|
||||
executor.run()
|
||||
# If this is a "docker compose build", we just build the docker compose images for
|
||||
# this resmoke configuration and exit.
|
||||
if config.DOCKER_COMPOSE_BUILD_IMAGES:
|
||||
build_images(suite.get_name(), executor._jobs[0].fixture)
|
||||
suite.return_code = 0
|
||||
else:
|
||||
executor.run()
|
||||
except (errors.UserInterrupt, errors.LoggerRuntimeConfigError) as err:
|
||||
self._exec_logger.error("Encountered an error when running %ss of suite %s: %s",
|
||||
suite.test_kind, suite.get_display_name(), err)
|
||||
|
|
@ -828,6 +835,35 @@ class RunPlugin(PluginInterface):
|
|||
" only tests which have at least one of the specified tags will be"
|
||||
" run."))
|
||||
|
||||
parser.add_argument(
|
||||
"--dockerComposeBuildImages", dest="docker_compose_build_images",
|
||||
metavar="IMAGE1,IMAGE2,IMAGE3", help=
|
||||
("Comma separated list of base images to build for running resmoke against an External System Under Test:"
|
||||
" (1) `workload`: Your mongo repo with a python development environment setup."
|
||||
" (2) `mongo-binaries`: The `mongo`, `mongod`, `mongos` binaries to run tests with."
|
||||
" (3) `config`: The target suite's `docker-compose.yml` file, startup scripts & configuration."
|
||||
" All three images are needed to successfully setup an External System Under Test."
|
||||
" This will not run any tests. It will just build the images and generate"
|
||||
" the `docker-compose.yml` configuration to set up the External System Under Test for the desired suite."
|
||||
))
|
||||
|
||||
parser.add_argument(
|
||||
"--dockerComposeBuildEnv", dest="docker_compose_build_env",
|
||||
choices=["local", "evergreen"], default="local", help=
|
||||
("Set the environment where this `--dockerComposeBuildImages` is happening -- defaults to: `local`."
|
||||
))
|
||||
|
||||
parser.add_argument(
|
||||
"--dockerComposeTag", dest="docker_compose_tag", metavar="TAG", default="development",
|
||||
help=("The `tag` name to use for images built during a `--dockerComposeBuildImages`."))
|
||||
|
||||
parser.add_argument(
|
||||
"--externalSUT", dest="external_sut", action="store_true", default=False, help=
|
||||
("This option should only be used when running resmoke against an External System Under Test."
|
||||
" The External System Under Test should be setup via the command generated after"
|
||||
" running: `buildscripts/resmoke.py run --suite [suite_name] ... --dockerComposeBuildImages"
|
||||
" config,workload,mongo-binaries`."))
|
||||
|
||||
parser.add_argument(
|
||||
"--sanityCheck", action="store_true", dest="sanity_check", help=
|
||||
"Truncate the test queue to 1 item, just in order to verify the suite is properly set up."
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ from threading import Lock
|
|||
from typing import Dict, List
|
||||
|
||||
import yaml
|
||||
from buildscripts.antithesis_suite import get_antithesis_suite_config
|
||||
from buildscripts.resmokelib.utils.external_suite import make_external
|
||||
|
||||
import buildscripts.resmokelib.utils.filesystem as fs
|
||||
from buildscripts.resmokelib.logging import loggers
|
||||
|
|
@ -275,17 +275,12 @@ class MatrixSuiteConfig(SuiteConfigInterface):
|
|||
def process_overrides(cls, suite, overrides, suite_name):
|
||||
"""Provide override key-value pairs for a given matrix suite."""
|
||||
base_suite_name = suite["base_suite"]
|
||||
antithesis = suite.get("antithesis", None)
|
||||
override_names = suite.get("overrides", None)
|
||||
excludes_names = suite.get("excludes", None)
|
||||
eval_names = suite.get("eval", None)
|
||||
description = suite.get("description")
|
||||
|
||||
if antithesis:
|
||||
base_suite = get_antithesis_suite_config(base_suite_name)
|
||||
base_suite["antithesis"] = True
|
||||
else:
|
||||
base_suite = ExplicitSuiteConfig.get_config_obj_no_verify(base_suite_name)
|
||||
base_suite = ExplicitSuiteConfig.get_config_obj_no_verify(base_suite_name)
|
||||
|
||||
if base_suite is None:
|
||||
raise ValueError(f"Unknown base suite {base_suite_name} for matrix suite {suite_name}")
|
||||
|
|
@ -475,7 +470,13 @@ class SuiteFinder(object):
|
|||
raise errors.DuplicateSuiteDefinition(
|
||||
"Multiple definitions for suite '%s'" % suite_path)
|
||||
|
||||
return matrix_suite or explicit_suite
|
||||
suite = matrix_suite or explicit_suite
|
||||
|
||||
# If this is running against an External System Under Test, we need to make the suite compatible.
|
||||
if _config.EXTERNAL_SUT:
|
||||
make_external(suite)
|
||||
|
||||
return suite
|
||||
|
||||
|
||||
def get_suite(suite_name_or_path) -> _suite.Suite:
|
||||
|
|
|
|||
|
|
@ -1,321 +0,0 @@
|
|||
"""Create necessary Docker files and topology for a suite to be able to use in Antithesis."""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
|
||||
from buildscripts.resmokelib.core import programs
|
||||
from buildscripts.resmokelib.errors import InvalidMatrixSuiteError, RequiresForceRemove
|
||||
from buildscripts.resmokelib.suitesconfig import get_suite
|
||||
from buildscripts.resmokelib.testing.fixtures import _builder
|
||||
|
||||
MONGOS_PORT = 27017
|
||||
MONGOD_PORT = 27018
|
||||
CONFIG_PORT = 27019
|
||||
|
||||
|
||||
def get_antithesis_base_suite_fixture(antithesis_suite_name) -> None:
|
||||
"""
|
||||
Get the base suite fixture to use for generating docker compose configuration.
|
||||
|
||||
:param antithesis_suite_name: The antithesis suite to find the base suite fixture for.
|
||||
"""
|
||||
antithesis_suite = get_suite(antithesis_suite_name)
|
||||
if not antithesis_suite.is_matrix_suite() or not antithesis_suite.is_antithesis_suite():
|
||||
raise InvalidMatrixSuiteError(
|
||||
f"The specified suite is not an antithesis matrix suite: {antithesis_suite.get_name()}")
|
||||
|
||||
antithesis_fixture = antithesis_suite.get_executor_config()["fixture"]["class"]
|
||||
if antithesis_fixture != "ExternalShardedClusterFixture":
|
||||
raise InvalidMatrixSuiteError(
|
||||
"Generating docker compose infrastructure for this external fixture is not yet supported"
|
||||
)
|
||||
|
||||
antithesis_base_suite = antithesis_suite.get_executor_config()["fixture"]["original_suite_name"]
|
||||
base_suite_fixture = _builder.make_dummy_fixture(antithesis_base_suite)
|
||||
if base_suite_fixture.__class__.__name__ != "ShardedClusterFixture":
|
||||
raise InvalidMatrixSuiteError(
|
||||
"Generating docker compose infrastructure for this base suite fixture is not yet supported.{}"
|
||||
)
|
||||
|
||||
return base_suite_fixture
|
||||
|
||||
|
||||
class DockerClusterConfigWriter(object):
|
||||
"""Create necessary files and topology for a suite to run with Antithesis."""
|
||||
|
||||
def __init__(self, antithesis_suite_name, tag):
|
||||
"""
|
||||
Initialize the class with the specified fixture.
|
||||
|
||||
:param antithesis_suite_name: Suite we wish to generate files and topology for.
|
||||
:param tag: Tag to use for the docker compose configuration and/or base images.
|
||||
"""
|
||||
self.ip_address = 1
|
||||
self.antithesis_suite_name = antithesis_suite_name
|
||||
self.fixture = get_antithesis_base_suite_fixture(antithesis_suite_name)
|
||||
self.tag = tag
|
||||
self.build_context = os.path.join(os.getcwd(),
|
||||
f"antithesis/antithesis_config/{antithesis_suite_name}")
|
||||
self.jinja_env = Environment(
|
||||
loader=FileSystemLoader(os.path.join(os.getcwd(), "antithesis/templates/")))
|
||||
|
||||
def generate_docker_sharded_cluster_config(self):
|
||||
"""
|
||||
Generate all necessary files and topology for the suite fixture.
|
||||
|
||||
:return: None.
|
||||
"""
|
||||
# Create volume directory structure
|
||||
self.create_volume_directories()
|
||||
# Create configsvr init scripts
|
||||
self.create_configsvr_init()
|
||||
# Create mongod init scripts
|
||||
self.create_mongod_init()
|
||||
# Create mongos init scripts
|
||||
self.create_mongos_init()
|
||||
# Create workload init
|
||||
self.write_workload_init()
|
||||
# Create docker-compose
|
||||
self.write_docker_compose()
|
||||
# Create dockerfile
|
||||
self.write_dockerfile()
|
||||
# Create run suite script
|
||||
self.write_run_suite_script()
|
||||
|
||||
def write_docker_compose(self):
|
||||
"""
|
||||
Write the docker-compose.yml file utilizing information from the suite fixture.
|
||||
|
||||
:return: None.
|
||||
"""
|
||||
with open(os.path.join(self.build_context, "docker-compose.yml"), 'w') as file:
|
||||
template = self.jinja_env.get_template("docker_compose_template.yml.jinja")
|
||||
file.write(
|
||||
template.render(
|
||||
num_configsvr=self.fixture.configsvr_options.get("num_nodes", 1),
|
||||
num_shard=self.fixture.num_shards, num_node_per_shard=self.fixture.
|
||||
num_rs_nodes_per_shard, num_mongos=self.fixture.num_mongos, tag=self.tag,
|
||||
get_and_increment_ip_address=self.get_and_increment_ip_address) + "\n")
|
||||
|
||||
def write_workload_init(self):
|
||||
"""
|
||||
Write the workload_init.py file to be Antithesis compatible.
|
||||
|
||||
:return: None.
|
||||
"""
|
||||
with open(os.path.join(self.build_context, "scripts/workload_init.py"), 'w') as file:
|
||||
template = self.jinja_env.get_template("workload_init_template.py.jinja")
|
||||
file.write(template.render() + "\n")
|
||||
|
||||
def write_dockerfile(self):
|
||||
"""
|
||||
Write the Dockerfile for the suite.
|
||||
|
||||
:return: None.
|
||||
"""
|
||||
with open(os.path.join(self.build_context, "Dockerfile"), 'w') as file:
|
||||
template = self.jinja_env.get_template("dockerfile_template.jinja")
|
||||
file.write(template.render() + "\n")
|
||||
|
||||
def create_volume_directories(self):
|
||||
"""
|
||||
Create the necessary volume directories for the Docker topology.
|
||||
|
||||
:return: None.
|
||||
"""
|
||||
paths = [
|
||||
self.build_context,
|
||||
os.path.join(self.build_context, "scripts"),
|
||||
os.path.join(self.build_context, "logs"),
|
||||
os.path.join(self.build_context, "data"),
|
||||
os.path.join(self.build_context, "debug")
|
||||
]
|
||||
for p in paths:
|
||||
if os.path.exists(p):
|
||||
try:
|
||||
shutil.rmtree(p)
|
||||
except Exception as exc:
|
||||
exception_text = f"""
|
||||
Could not remove directory due to old artifacts from a previous run.
|
||||
|
||||
Please remove this directory and try again -- you may need to force remove:
|
||||
`{os.path.relpath(p)}`
|
||||
"""
|
||||
raise RequiresForceRemove(exception_text) from exc
|
||||
|
||||
os.makedirs(p)
|
||||
|
||||
def get_and_increment_ip_address(self):
|
||||
"""
|
||||
Increment and return ip_address attribute for this suite if it is between 0-24, else exit with error code 2.
|
||||
|
||||
:return: ip_address.
|
||||
"""
|
||||
if self.ip_address > 24:
|
||||
print(f"Exiting with code 2 -- ipv4_address exceeded 10.20.20.24: {self.ip_address}")
|
||||
sys.exit(2)
|
||||
self.ip_address += 1
|
||||
return self.ip_address
|
||||
|
||||
def create_configsvr_init(self):
|
||||
"""
|
||||
Create configsvr init scripts for all of the configsvr nodes for this suite fixture.
|
||||
|
||||
:return: None.
|
||||
"""
|
||||
for i, node in enumerate(self.fixture.configsvr.nodes):
|
||||
mongod_options = node.get_options()
|
||||
args = self.construct_mongod_args(mongod_options)
|
||||
self.write_node_init(f"configsvr{i}", args)
|
||||
|
||||
def create_mongod_init(self):
|
||||
"""
|
||||
Create mongod init scripts for all of the mongod nodes for this suite fixture.
|
||||
|
||||
:return: None.
|
||||
"""
|
||||
for s, shard in enumerate(self.fixture.shards):
|
||||
for i, node in enumerate(shard.nodes):
|
||||
mongod_options = node.get_options()
|
||||
args = self.construct_mongod_args(mongod_options)
|
||||
self.write_node_init(f"mongod{s*self.fixture.num_rs_nodes_per_shard+i}", args)
|
||||
|
||||
def write_node_init(self, node_name, args):
|
||||
"""
|
||||
Write init script for a node based on arguments and name provided.
|
||||
|
||||
:param node_name: String with the name of the node to write init script for.
|
||||
:param args: List of arguments for initiating the current node.
|
||||
:return: None.
|
||||
"""
|
||||
script_path = os.path.join(self.build_context, f"scripts/{node_name}_init.sh")
|
||||
with open(script_path, 'w') as file:
|
||||
template = self.jinja_env.get_template("node_init_template.sh.jinja")
|
||||
file.write(template.render(command=' '.join(args)) + "\n")
|
||||
|
||||
def construct_mongod_args(self, mongod_options):
|
||||
"""
|
||||
Return list of mongod args that are Antithesis compatible.
|
||||
|
||||
:param mongod_options: Dictionary of options that mongod is initiated with.
|
||||
:return: List of mongod args.
|
||||
"""
|
||||
d_args = ["mongod"]
|
||||
suite_set_parameters = mongod_options.get("set_parameters", {})
|
||||
self.update_mongod_for_antithesis(mongod_options, suite_set_parameters)
|
||||
programs._apply_set_parameters(d_args, suite_set_parameters)
|
||||
mongod_options.pop("set_parameters")
|
||||
programs._apply_kwargs(d_args, mongod_options)
|
||||
return d_args
|
||||
|
||||
def update_mongod_for_antithesis(self, mongod_options, suite_set_parameters):
|
||||
"""
|
||||
Add and remove certain options and params so mongod init is Antithesis compatible.
|
||||
|
||||
:param mongod_options: Dictionary of options that mongod is initiated with.
|
||||
:param suite_set_parameters: Dictionary of parameters that need to be set for mongod init.
|
||||
:return: None.
|
||||
"""
|
||||
suite_set_parameters["fassertOnLockTimeoutForStepUpDown"] = 0
|
||||
suite_set_parameters.pop("logComponentVerbosity", None)
|
||||
suite_set_parameters.pop("backtraceLogFile", None)
|
||||
mongod_options.pop("dbpath", None)
|
||||
if "configsvr" in mongod_options:
|
||||
mongod_options["port"] = CONFIG_PORT
|
||||
else:
|
||||
mongod_options["port"] = MONGOD_PORT
|
||||
mongod_options.update({
|
||||
"logpath": "/var/log/mongodb/mongodb.log", "bind_ip": "0.0.0.0", "oplogSize": "256",
|
||||
"wiredTigerCacheSizeGB": "1"
|
||||
})
|
||||
if "shardsvr" in mongod_options:
|
||||
s = int(re.search(r'\d+$', mongod_options["replSet"]).group())
|
||||
mongod_options["replSet"] = f"Shard{s}"
|
||||
|
||||
def create_mongos_init(self):
|
||||
"""
|
||||
Set up the creation of mongos init scripts.
|
||||
|
||||
:return: None.
|
||||
"""
|
||||
for m in range(self.fixture.num_mongos):
|
||||
mongos_options = self.fixture.mongos[m].get_options()
|
||||
args = self.construct_mongos_args(mongos_options)
|
||||
self.write_mongos_init(f"mongos{m}", args)
|
||||
|
||||
def write_mongos_init(self, mongos_name, args):
|
||||
"""
|
||||
Write the mongos init scripts utilizing information from the suite fixture.
|
||||
|
||||
:param mongos_name: String with the name of the mongos to write init script for.
|
||||
:param args: List of arguments that need to be set for mongod init.
|
||||
:return: None.
|
||||
"""
|
||||
with open(os.path.join(self.build_context, f"scripts/{mongos_name}_init.py"), 'w') as file:
|
||||
template = self.jinja_env.get_template("mongos_init_template.py.jinja")
|
||||
file.write(
|
||||
template.render(mongos_name=mongos_name, configsvr=self.fixture.configsvr,
|
||||
shards=self.fixture.shards, MONGOS_PORT=MONGOS_PORT,
|
||||
MONGOD_PORT=MONGOD_PORT, CONFIG_PORT=CONFIG_PORT, mongos_args=args,
|
||||
get_replset_settings=self.get_replset_settings) + "\n")
|
||||
|
||||
def construct_mongos_args(self, mongos_options):
|
||||
"""
|
||||
Return list of mongos args that are Antithesis compatible.
|
||||
|
||||
:param mongos_options: Dictionary of options that mongos is initiated with.
|
||||
:return: List of mongos args.
|
||||
"""
|
||||
d_args = ["mongos"]
|
||||
self.update_mongos_for_antithesis(mongos_options)
|
||||
suite_set_parameters = mongos_options.get("set_parameters", {})
|
||||
programs._apply_set_parameters(d_args, suite_set_parameters)
|
||||
mongos_options.pop("set_parameters")
|
||||
programs._apply_kwargs(d_args, mongos_options)
|
||||
return d_args
|
||||
|
||||
def update_mongos_for_antithesis(self, mongos_options):
|
||||
"""
|
||||
Add and remove certain options and params so mongos init is Antithesis compatible.
|
||||
|
||||
:param mongos_options: Dictionary of options that mongos is initiated with.
|
||||
:return: None.
|
||||
"""
|
||||
members = [
|
||||
f"configsvr{i}:{CONFIG_PORT}"
|
||||
for i in range(self.fixture.configsvr_options.get("num_nodes", 1))
|
||||
]
|
||||
mongos_options["configdb"] = f"config-rs/{','.join(members)}"
|
||||
mongos_options.pop("port", None)
|
||||
|
||||
def get_replset_settings(self, replset):
|
||||
"""
|
||||
Return dictionary of settings for a specific replset that are Antithesis compatible.
|
||||
|
||||
:param replset: Replset that contains config options.
|
||||
:return: Dictionary of settings.
|
||||
"""
|
||||
settings = {}
|
||||
if replset.replset_config_options.get("settings"):
|
||||
replset_settings = replset.replset_config_options["settings"]
|
||||
settings = replset_settings.to_storable_dict()["object_value"]
|
||||
settings.update({
|
||||
"electionTimeoutMillis": 2000, "heartbeatTimeoutSecs": 1, "chainingAllowed": False
|
||||
})
|
||||
return settings
|
||||
|
||||
def write_run_suite_script(self):
|
||||
"""
|
||||
Write the `run_suite.sh` file which starts up the docker cluster and runs a sanity check.
|
||||
|
||||
This ensures that the configuration for the suite works as expected.
|
||||
|
||||
:return: None.
|
||||
"""
|
||||
with open(os.path.join(self.build_context, "run_suite.sh"), 'w') as file:
|
||||
template = self.jinja_env.get_template("run_suite_template.sh.jinja")
|
||||
file.write(template.render(suite=self.antithesis_suite_name) + "\n")
|
||||
|
|
@ -1,27 +1,55 @@
|
|||
import os
|
||||
from shlex import quote
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
import git
|
||||
import yaml
|
||||
from buildscripts.resmokelib import config
|
||||
from buildscripts.resmokelib.errors import RequiresForceRemove
|
||||
|
||||
from buildscripts.resmokelib.testing.docker_cluster_config_writer import DockerClusterConfigWriter
|
||||
|
||||
def build_images(suite_name, fixture_instance):
|
||||
"""Build images needed to run the resmoke suite against docker containers."""
|
||||
image_builder = DockerComposeImageBuilder(suite_name, fixture_instance)
|
||||
if "config" in config.DOCKER_COMPOSE_BUILD_IMAGES: # pylint: disable=unsupported-membership-test
|
||||
image_builder.build_config_image()
|
||||
if "mongo-binaries" in config.DOCKER_COMPOSE_BUILD_IMAGES: # pylint: disable=unsupported-membership-test
|
||||
image_builder.build_mongo_binaries_image()
|
||||
if "workload" in config.DOCKER_COMPOSE_BUILD_IMAGES: # pylint: disable=unsupported-membership-test
|
||||
image_builder.build_workload_image()
|
||||
if config.DOCKER_COMPOSE_BUILD_IMAGES:
|
||||
repro_command = f"""
|
||||
Built image(s): {config.DOCKER_COMPOSE_BUILD_IMAGES}
|
||||
|
||||
SUCCESS - Run this suite against an External System Under Test (SUT) with the following command:
|
||||
`docker compose -f docker_compose/{suite_name}/docker-compose.yml run --rm workload buildscripts/resmoke.py run --suite {suite_name} --externalSUT`
|
||||
|
||||
DISCLAIMER - Make sure you have built all images with the following command first:
|
||||
`buildscripts/resmoke.py run --suite {suite_name} --dockerComposeBuildImages workload,mongo-binaries,config`
|
||||
"""
|
||||
print(repro_command)
|
||||
|
||||
|
||||
class DockerComposeImageBuilder:
|
||||
"""Build images needed to run a resmoke suite against a MongoDB Docker Container topology."""
|
||||
|
||||
def __init__(self, tag, in_evergreen):
|
||||
def __init__(self, suite_name, suite_fixture):
|
||||
"""
|
||||
Constructs a `DockerComposeImageBuilder` which can build images locally and in CI.
|
||||
|
||||
:param tag: The tag to use for these images.
|
||||
:param in_evergreen: Whether this is running in Evergreen or not.
|
||||
:param suite_name: The name of the suite we are building images for.
|
||||
:param suite_fixture: The fixture to base the `docker-compose.yml` generation off of.
|
||||
"""
|
||||
self.tag = tag
|
||||
self.in_evergreen = in_evergreen
|
||||
self.suite_name = suite_name
|
||||
self.suite_fixture = suite_fixture
|
||||
self.tag = config.DOCKER_COMPOSE_TAG
|
||||
self.in_evergreen = config.DOCKER_COMPOSE_BUILD_ENV == "evergreen"
|
||||
|
||||
# Build context constants
|
||||
self.DOCKER_COMPOSE_BUILD_CONTEXT = f"docker_compose/{self.suite_name}"
|
||||
|
||||
self.WORKLOAD_BUILD_CONTEXT = "buildscripts/antithesis/base_images/workload"
|
||||
self.WORKLOAD_DOCKERFILE = f"{self.WORKLOAD_BUILD_CONTEXT}/Dockerfile"
|
||||
|
||||
|
|
@ -29,7 +57,7 @@ class DockerComposeImageBuilder:
|
|||
self.MONGO_BINARIES_DOCKERFILE = f"{self.MONGO_BINARIES_BUILD_CONTEXT}/Dockerfile"
|
||||
|
||||
# Artifact constants
|
||||
self.MONGODB_BINARIES_RELATIVE_DIR = "dist-test" if in_evergreen else "antithesis-dist-test"
|
||||
self.MONGODB_BINARIES_RELATIVE_DIR = "dist-test" if self.in_evergreen else "antithesis-dist-test"
|
||||
self.MONGO_BINARY = f"{self.MONGODB_BINARIES_RELATIVE_DIR}/bin/mongo"
|
||||
self.MONGOD_BINARY = f"{self.MONGODB_BINARIES_RELATIVE_DIR}/bin/mongod"
|
||||
self.MONGOS_BINARY = f"{self.MONGODB_BINARIES_RELATIVE_DIR}/bin/mongos"
|
||||
|
|
@ -37,30 +65,118 @@ class DockerComposeImageBuilder:
|
|||
self.LIBVOIDSTAR_PATH = "/usr/lib/libvoidstar.so"
|
||||
self.MONGODB_DEBUGSYMBOLS = "mongo-debugsymbols.tgz"
|
||||
|
||||
def build_base_images(self):
|
||||
"""
|
||||
Build the base images needed for the docker configuration.
|
||||
# Port suffix ranging from 1-24 is subject to fault injection while ports 130+ are safe.
|
||||
self.next_available_fault_enabled_ip = 2
|
||||
self.next_available_fault_disabled_ip = 130
|
||||
|
||||
:return: None.
|
||||
def _add_docker_compose_configuration_to_build_context(self, build_context) -> None:
|
||||
"""
|
||||
self._fetch_mongodb_binaries()
|
||||
print("Building base images...")
|
||||
self._build_mongo_binaries_image()
|
||||
self._build_workload_image()
|
||||
print("Done building base images.")
|
||||
Create init scripts for all of the mongo{d,s} processes and a `docker-compose.yml` file.
|
||||
|
||||
def build_config_image(self, antithesis_suite_name):
|
||||
"""
|
||||
Build the antithesis config image containing the `docker-compose.yml` file and volumes for the suite.
|
||||
|
||||
:param antithesis_suite_name: The antithesis suite to build a docker compose config image for.
|
||||
:param tag: Tag to use for the docker compose configuration and/or base images.
|
||||
:param build_context: Filepath where the configuration is going to be set up.
|
||||
"""
|
||||
|
||||
# Build out the directory structure and write the startup scripts for the config image at runtime
|
||||
print(f"Prepping antithesis config image build context for `{antithesis_suite_name}`...")
|
||||
config_image_writer = DockerClusterConfigWriter(antithesis_suite_name, self.tag)
|
||||
config_image_writer.generate_docker_sharded_cluster_config()
|
||||
def create_docker_compose_service(name, fault_injection, depends_on):
|
||||
"""
|
||||
Create a service section of a docker-compose.yml for a service with this name.
|
||||
|
||||
:param name: Whether or not this service should be subject to fault injection.
|
||||
:param fault_injection: Whether or not this service should be subject to fault injection.
|
||||
:param depends_on: Any services that this service depends on to successfully run.
|
||||
"""
|
||||
if fault_injection:
|
||||
ip_suffix = self.next_available_fault_enabled_ip
|
||||
self.next_available_fault_enabled_ip += 1
|
||||
else:
|
||||
ip_suffix = self.next_available_fault_disabled_ip
|
||||
self.next_available_fault_disabled_ip += 1
|
||||
return {
|
||||
"container_name": name, "hostname": name,
|
||||
"image": f'{"workload" if name == "workload" else "mongo-binaries"}:{self.tag}',
|
||||
"volumes": [
|
||||
f"./logs/{name}:/var/log/mongodb/",
|
||||
"./scripts:/scripts/",
|
||||
f"./data/{name}:/data/db",
|
||||
], "command": f"/bin/bash /scripts/{name}.sh", "networks": {
|
||||
"antithesis-net": {"ipv4_address": f"10.20.20.{ip_suffix}"}
|
||||
}, "depends_on": depends_on
|
||||
}
|
||||
|
||||
docker_compose_yml = {
|
||||
"version": "3.0", "services": {
|
||||
"workload":
|
||||
create_docker_compose_service(
|
||||
"workload", fault_injection=False, depends_on=[
|
||||
process.logger.external_sut_hostname
|
||||
for process in self.suite_fixture.all_processes()
|
||||
])
|
||||
}, "networks": {
|
||||
"antithesis-net": {
|
||||
"driver": "bridge", "ipam": {"config": [{"subnet": "10.20.20.0/24"}]}
|
||||
}
|
||||
}
|
||||
}
|
||||
print("Writing workload init script...")
|
||||
with open(os.path.join(build_context, "scripts", "workload.sh"), "w") as workload_init:
|
||||
workload_init.write("tail -f /dev/null\n")
|
||||
|
||||
print("Writing mongo{d,s} init scripts...")
|
||||
for process in self.suite_fixture.all_processes():
|
||||
# Add the `Process` as a service in the docker-compose.yml
|
||||
service_name = process.logger.external_sut_hostname
|
||||
docker_compose_yml["services"][service_name] = create_docker_compose_service(
|
||||
service_name, fault_injection=True, depends_on=[])
|
||||
|
||||
# Write the `Process` args as an init script
|
||||
with open(os.path.join(build_context, "scripts", f"{service_name}.sh"), "w") as file:
|
||||
file.write(" ".join(map(quote, process.args)) + '\n')
|
||||
|
||||
print("Writing `docker-compose.yml`...")
|
||||
with open(os.path.join(build_context, "docker-compose.yml"), "w") as docker_compose:
|
||||
docker_compose.write(yaml.dump(docker_compose_yml) + '\n')
|
||||
|
||||
print("Writing Dockerfile...")
|
||||
with open(os.path.join(build_context, "Dockerfile"), "w") as dockerfile:
|
||||
dockerfile.write("FROM scratch\n")
|
||||
dockerfile.write("COPY docker-compose.yml /\n")
|
||||
dockerfile.write("ADD scripts /scripts\n")
|
||||
dockerfile.write("ADD logs /logs\n")
|
||||
dockerfile.write("ADD data /data\n")
|
||||
dockerfile.write("ADD debug /debug\n")
|
||||
|
||||
def _initialize_docker_compose_build_context(self, build_context) -> None:
|
||||
"""
|
||||
Remove the old docker compose build context and create a new one.
|
||||
|
||||
:param build_context: Filepath where the configuration is going to be set up.
|
||||
"""
|
||||
try:
|
||||
shutil.rmtree(build_context)
|
||||
except FileNotFoundError as _:
|
||||
# `shutil.rmtree` throws FileNotFoundError if the path DNE. In that case continue as normal.
|
||||
pass
|
||||
except Exception as exc:
|
||||
exception_text = f"""
|
||||
Could not remove directory due to old artifacts from a previous run.
|
||||
|
||||
Please remove this directory and try again -- you may need to force remove:
|
||||
`{os.path.relpath(build_context)}`
|
||||
"""
|
||||
raise RequiresForceRemove(exception_text) from exc
|
||||
|
||||
for volume in ["scripts", "logs", "data", "debug"]:
|
||||
os.makedirs(os.path.join(build_context, volume))
|
||||
|
||||
def build_config_image(self):
|
||||
"""
|
||||
Build the config image containing the `docker-compose.yml` file, init scripts and volumes for the suite.
|
||||
|
||||
:return: None
|
||||
"""
|
||||
# Build out the directory structure and write the startup scripts for the config image
|
||||
print(f"Preparing antithesis config image build context for `{self.suite_name}`...")
|
||||
self._initialize_docker_compose_build_context(self.DOCKER_COMPOSE_BUILD_CONTEXT)
|
||||
self._add_docker_compose_configuration_to_build_context(self.DOCKER_COMPOSE_BUILD_CONTEXT)
|
||||
|
||||
# Our official builds happen in Evergreen. Assert debug symbols are on system.
|
||||
# If this is running locally, this is for development purposes only and debug symbols are not required.
|
||||
|
|
@ -69,19 +185,17 @@ class DockerComposeImageBuilder:
|
|||
), f"No debug symbols available at: {self.MONGODB_DEBUGSYMBOLS}"
|
||||
print("Running in Evergreen -- copying debug symbols to build context...")
|
||||
shutil.copy(self.MONGODB_DEBUGSYMBOLS,
|
||||
os.path.join(config_image_writer.build_context, "debug"))
|
||||
os.path.join(self.DOCKER_COMPOSE_BUILD_CONTEXT, "debug"))
|
||||
|
||||
print(
|
||||
f"Done setting up antithesis config image build context for `{antithesis_suite_name}..."
|
||||
)
|
||||
print(f"Done setting up antithesis config image build context for `{self.suite_name}...")
|
||||
print("Building antithesis config image...")
|
||||
subprocess.run([
|
||||
"docker", "build", "-t", f"{antithesis_suite_name}:{self.tag}", "-f",
|
||||
f"{config_image_writer.build_context}/Dockerfile", config_image_writer.build_context
|
||||
"docker", "build", "-t", f"{self.suite_name}:{self.tag}", "-f",
|
||||
f"{self.DOCKER_COMPOSE_BUILD_CONTEXT}/Dockerfile", self.DOCKER_COMPOSE_BUILD_CONTEXT
|
||||
], stdout=sys.stdout, stderr=sys.stderr, check=True)
|
||||
print("Done building antithesis config image.")
|
||||
|
||||
def _build_workload_image(self):
|
||||
def build_workload_image(self):
|
||||
"""
|
||||
Build the workload image.
|
||||
|
||||
|
|
@ -91,6 +205,7 @@ class DockerComposeImageBuilder:
|
|||
|
||||
print("Prepping `workload` image build context...")
|
||||
# Set up build context
|
||||
self._fetch_mongodb_binaries()
|
||||
self._copy_mongo_binary_to_build_context(self.WORKLOAD_BUILD_CONTEXT)
|
||||
self._clone_mongo_repo_to_build_context(self.WORKLOAD_BUILD_CONTEXT)
|
||||
self._add_libvoidstar_to_build_context(self.WORKLOAD_BUILD_CONTEXT)
|
||||
|
|
@ -103,15 +218,16 @@ class DockerComposeImageBuilder:
|
|||
], stdout=sys.stdout, stderr=sys.stderr, check=True)
|
||||
print("Done building workload image.")
|
||||
|
||||
def _build_mongo_binaries_image(self):
|
||||
def build_mongo_binaries_image(self):
|
||||
"""
|
||||
Build the mongo-binaries image.
|
||||
|
||||
:return: None.
|
||||
"""
|
||||
|
||||
print("Prepping `mongo binaries` image build context...")
|
||||
# Set up build context
|
||||
print("Prepping `mongo binaries` image build context...")
|
||||
|
||||
self._fetch_mongodb_binaries()
|
||||
self._copy_mongodb_binaries_to_build_context(self.MONGO_BINARIES_BUILD_CONTEXT)
|
||||
self._add_libvoidstar_to_build_context(self.MONGO_BINARIES_BUILD_CONTEXT)
|
||||
|
||||
|
|
@ -134,13 +250,10 @@ class DockerComposeImageBuilder:
|
|||
"""
|
||||
mongodb_binaries_destination = os.path.join(self.MONGODB_BINARIES_RELATIVE_DIR, "bin")
|
||||
|
||||
if os.path.exists(mongodb_binaries_destination):
|
||||
print(f"\n\tFound existing MongoDB binaries at: {mongodb_binaries_destination}\n")
|
||||
# If local, fetch the binaries.
|
||||
if not self.in_evergreen:
|
||||
# Clean up any old artifacts in the build context.
|
||||
if os.path.exists(mongodb_binaries_destination):
|
||||
print("Removing old MongoDB binaries...")
|
||||
shutil.rmtree(mongodb_binaries_destination)
|
||||
|
||||
elif not self.in_evergreen:
|
||||
# Ensure that `db-contrib-tool` is installed locally
|
||||
db_contrib_tool_error = """
|
||||
Could not find `db-contrib-tool` installation locally.
|
||||
|
|
@ -164,7 +277,7 @@ class DockerComposeImageBuilder:
|
|||
for required_binary in [self.MONGO_BINARY, self.MONGOD_BINARY, self.MONGOS_BINARY]:
|
||||
assert os.path.exists(
|
||||
required_binary
|
||||
), f"Could not find Ubuntu 18.04 MongoDB binary at: {required_binary}"
|
||||
), f"Could not find Ubuntu 22.04 MongoDB binary at: {required_binary}"
|
||||
|
||||
# Our official builds happen in Evergreen.
|
||||
# We want to ensure the binaries are linked with `libvoidstar.so` during image build.
|
||||
|
|
@ -223,7 +336,8 @@ class DockerComposeImageBuilder:
|
|||
|
||||
# Copy the mongo repo to the build context.
|
||||
# If this fails to clone, the `git` library will raise an exception.
|
||||
git.Repo("./").clone(mongo_repo_destination)
|
||||
active_branch = git.Repo("./").active_branch.name
|
||||
git.Repo.clone_from("./", mongo_repo_destination, branch=active_branch)
|
||||
print("Done cloning MongoDB repo to build context.")
|
||||
|
||||
def _copy_mongodb_binaries_to_build_context(self, dir_path):
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ from buildscripts.resmokelib.testing import hooks as _hooks
|
|||
from buildscripts.resmokelib.testing import job as _job
|
||||
from buildscripts.resmokelib.testing import report as _report
|
||||
from buildscripts.resmokelib.testing import testcases
|
||||
from buildscripts.resmokelib.testing import docker_cluster_config_writer as _docker_cluster_config_writer
|
||||
from buildscripts.resmokelib.testing.docker_cluster_image_builder import DockerComposeImageBuilder, build_images
|
||||
from buildscripts.resmokelib.testing.fixtures.interface import Fixture
|
||||
from buildscripts.resmokelib.testing.hooks.interface import Hook
|
||||
from buildscripts.resmokelib.testing.queue_element import QueueElemRepeatTime, queue_elem_factory, QueueElem
|
||||
|
|
|
|||
|
|
@ -152,3 +152,5 @@ class _FixtureConfig(object):
|
|||
self.MONGOS_SET_PARAMETERS = config.MONGOS_SET_PARAMETERS
|
||||
self.DBPATH_PREFIX = config.DBPATH_PREFIX
|
||||
self.DEFAULT_DBPATH_PREFIX = config.DEFAULT_DBPATH_PREFIX
|
||||
self.EXTERNAL_SUT = config.EXTERNAL_SUT
|
||||
self.DOCKER_COMPOSE_BUILD_IMAGES = config.DOCKER_COMPOSE_BUILD_IMAGES
|
||||
|
|
|
|||
|
|
@ -214,6 +214,58 @@ class Fixture(object, metaclass=registry.make_registry_metaclass(_FIXTURES)): #
|
|||
return "%r(%r, %r)" % (self.__class__.__name__, self.logger, self.job_num)
|
||||
|
||||
|
||||
class DockerComposeException(Exception):
|
||||
"""Exception to use when there is a failure in the docker compose interface."""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class _DockerComposeInterface:
|
||||
"""
|
||||
Implement the `_all_mongo_d_s_instances` method which returns all `mongo{d,s}` instances.
|
||||
|
||||
Fixtures that use this interface can programmatically generate `docker-compose.yml` configurations
|
||||
by leveraging the `all_processes` method to access the startup args.
|
||||
"""
|
||||
|
||||
def _all_mongo_d_s(self) -> List[Fixture]:
|
||||
"""
|
||||
Return a list of all mongo{d,s} `Fixture` instances in this fixture.
|
||||
|
||||
:return: A list of `mongo{d,s}` `Fixture` instances.
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"_all_mongo_d_s_instances must be implemented by Fixture subclasses that support `docker-compose.yml` generation."
|
||||
)
|
||||
|
||||
def all_processes(self) -> List['Process']:
|
||||
"""
|
||||
Return a list of all `mongo{d,s}` `Process` instances in the fixture.
|
||||
|
||||
:return: A list of mongo{d,s} processes for the current fixture.
|
||||
"""
|
||||
if not self.config.DOCKER_COMPOSE_BUILD_IMAGES:
|
||||
raise DockerComposeException(
|
||||
"This method is reserved for `--dockerComposeBuildImages` only.")
|
||||
|
||||
processes = []
|
||||
|
||||
# If `mongo_d_s.EXTERNAL_SUT=True`, `mongo_d_s.setup()` will setup a dummy process
|
||||
# to extract args from instead of a real `mongo{d,s}`.
|
||||
for mongo_d_s in self._all_mongo_d_s():
|
||||
if mongo_d_s.__class__.__name__ == "MongoDFixture":
|
||||
mongo_d_s.setup()
|
||||
processes += [mongo_d_s.mongod]
|
||||
elif mongo_d_s.__class__.__name__ == "_MongoSFixture":
|
||||
mongo_d_s.setup()
|
||||
processes += [mongo_d_s.mongos]
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"Support for this class has not yet been added to docker compose: {mongo_d_s.__class__.__name__}"
|
||||
)
|
||||
return processes
|
||||
|
||||
|
||||
class MultiClusterFixture(Fixture):
|
||||
"""
|
||||
Base class for fixtures that may consist of multiple independent participant clusters.
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ def compare_optime(optime1, optime2):
|
|||
return compare_timestamp(optime1["ts"], optime2["ts"])
|
||||
|
||||
|
||||
class ReplicaSetFixture(interface.ReplFixture):
|
||||
class ReplicaSetFixture(interface.ReplFixture, interface._DockerComposeInterface):
|
||||
"""Fixture which provides JSTests with a replica set to run against."""
|
||||
|
||||
def __init__(self, logger, job_num, fixturelib, mongod_executable=None, mongod_options=None,
|
||||
|
|
@ -250,6 +250,10 @@ class ReplicaSetFixture(interface.ReplFixture):
|
|||
self._await_secondaries()
|
||||
self._await_newly_added_removals()
|
||||
|
||||
def _all_mongo_d_s(self):
|
||||
"""Return a list of all `mongo{d,s}` `Process` instances in this fixture."""
|
||||
return sum([node._all_mongo_d_s() for node in self.nodes], [])
|
||||
|
||||
def pids(self):
|
||||
""":return: all pids owned by this fixture if any."""
|
||||
pids = []
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ from buildscripts.resmokelib.testing.fixtures import external
|
|||
from buildscripts.resmokelib.testing.fixtures import _builder
|
||||
|
||||
|
||||
class ShardedClusterFixture(interface.Fixture):
|
||||
class ShardedClusterFixture(interface.Fixture, interface._DockerComposeInterface):
|
||||
"""Fixture which provides JSTests with a sharded cluster to run against."""
|
||||
|
||||
_CONFIGSVR_REPLSET_NAME = "config-rs"
|
||||
|
|
@ -100,6 +100,13 @@ class ShardedClusterFixture(interface.Fixture):
|
|||
for shard in self.shards:
|
||||
shard.setup()
|
||||
|
||||
def _all_mongo_d_s(self):
|
||||
"""Return a list of all `mongo{d,s}` `Process` instances in this fixture."""
|
||||
# When config_shard is None, we have an additional replset for the configsvr.
|
||||
all_nodes = [self.configsvr] if self.config_shard is None else []
|
||||
all_nodes += self.mongos + self.shards
|
||||
return sum([node._all_mongo_d_s() for node in all_nodes], [])
|
||||
|
||||
def refresh_logical_session_cache(self, target):
|
||||
"""Refresh logical session cache with no timeout."""
|
||||
primary = target.get_primary().mongo_client()
|
||||
|
|
@ -516,7 +523,7 @@ class ExternalShardedClusterFixture(external.ExternalFixture, ShardedClusterFixt
|
|||
return external.ExternalFixture.get_node_info(self)
|
||||
|
||||
|
||||
class _MongoSFixture(interface.Fixture):
|
||||
class _MongoSFixture(interface.Fixture, interface._DockerComposeInterface):
|
||||
"""Fixture which provides JSTests with a mongos to connect to."""
|
||||
|
||||
def __init__(self, logger, job_num, fixturelib, dbpath_prefix, mongos_executable=None,
|
||||
|
|
@ -568,13 +575,9 @@ class _MongoSFixture(interface.Fixture):
|
|||
|
||||
self.mongos = mongos
|
||||
|
||||
def get_options(self):
|
||||
"""Return the mongos options of this fixture."""
|
||||
launcher = MongosLauncher(self.fixturelib)
|
||||
_, mongos_options = launcher.launch_mongos_program(self.logger, self.job_num,
|
||||
executable=self.mongos_executable,
|
||||
mongos_options=self.mongos_options)
|
||||
return mongos_options
|
||||
def _all_mongo_d_s(self):
|
||||
"""Return the standalone `mongos` `Process` instance."""
|
||||
return [self]
|
||||
|
||||
def pids(self):
|
||||
""":return: pids owned by this fixture if any."""
|
||||
|
|
@ -617,6 +620,12 @@ class _MongoSFixture(interface.Fixture):
|
|||
self.logger.info("Successfully contacted the mongos on port %d.", self.port)
|
||||
|
||||
def _do_teardown(self, mode=None):
|
||||
if self.config.EXTERNAL_SUT:
|
||||
self.logger.info(
|
||||
"This is running against an External System Under Test setup with `docker-compose.yml` -- skipping teardown."
|
||||
)
|
||||
return
|
||||
|
||||
if self.mongos is None:
|
||||
self.logger.warning("The mongos fixture has not been set up yet.")
|
||||
return # Teardown is still a success even if nothing is running.
|
||||
|
|
@ -654,7 +663,7 @@ class _MongoSFixture(interface.Fixture):
|
|||
|
||||
def get_internal_connection_string(self):
|
||||
"""Return the internal connection string."""
|
||||
return "localhost:%d" % self.port
|
||||
return f"{self.logger.external_sut_hostname if self.config.EXTERNAL_SUT else 'localhost'}:{self.port}"
|
||||
|
||||
def get_driver_connection_url(self):
|
||||
"""Return the driver connection URL."""
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import pymongo.errors
|
|||
from buildscripts.resmokelib.testing.fixtures import interface
|
||||
|
||||
|
||||
class MongoDFixture(interface.Fixture):
|
||||
class MongoDFixture(interface.Fixture, interface._DockerComposeInterface):
|
||||
"""Fixture which provides JSTests with a standalone mongod to run against."""
|
||||
|
||||
def __init__(self, logger, job_num, fixturelib, mongod_executable=None, mongod_options=None,
|
||||
|
|
@ -87,13 +87,9 @@ class MongoDFixture(interface.Fixture):
|
|||
|
||||
self.mongod = mongod
|
||||
|
||||
def get_options(self):
|
||||
"""Return the mongod options of this fixture."""
|
||||
launcher = MongodLauncher(self.fixturelib)
|
||||
_, mongod_options = launcher.launch_mongod_program(self.logger, self.job_num,
|
||||
executable=self.mongod_executable,
|
||||
mongod_options=self.mongod_options)
|
||||
return mongod_options
|
||||
def _all_mongo_d_s(self):
|
||||
"""Return the standalone `mongod` `Process` instance."""
|
||||
return [self]
|
||||
|
||||
def pids(self):
|
||||
""":return: pids owned by this fixture if any."""
|
||||
|
|
@ -135,6 +131,12 @@ class MongoDFixture(interface.Fixture):
|
|||
self.logger.info("Successfully contacted the mongod on port %d.", self.port)
|
||||
|
||||
def _do_teardown(self, mode=None):
|
||||
if self.config.EXTERNAL_SUT:
|
||||
self.logger.info(
|
||||
"This is running against an External System Under Test setup with `docker-compose.yml` -- skipping teardown."
|
||||
)
|
||||
return
|
||||
|
||||
if self.mongod is None:
|
||||
self.logger.warning("The mongod fixture has not been set up yet.")
|
||||
return # Still a success even if nothing is running.
|
||||
|
|
@ -186,7 +188,7 @@ class MongoDFixture(interface.Fixture):
|
|||
|
||||
def get_internal_connection_string(self):
|
||||
"""Return the internal connection string."""
|
||||
return "localhost:%d" % self.port
|
||||
return f"{self.logger.external_sut_hostname if self.config.EXTERNAL_SUT else 'localhost'}:{self.port}"
|
||||
|
||||
def get_driver_connection_url(self):
|
||||
"""Return the driver connection URL."""
|
||||
|
|
|
|||
|
|
@ -427,9 +427,6 @@ class Suite(object):
|
|||
def is_matrix_suite(self):
|
||||
return "matrix_suite" in self.get_config()
|
||||
|
||||
def is_antithesis_suite(self):
|
||||
return "antithesis" in self.get_config()
|
||||
|
||||
def get_description(self):
|
||||
if "description" not in self.get_config():
|
||||
return None
|
||||
|
|
@ -442,7 +439,6 @@ class Suite(object):
|
|||
NUM_JOBS_TO_START = "suite_num_jobs_to_start"
|
||||
NUM_TIMES_TO_REPEAT_TESTS = "suite_num_times_to_repeat_tests"
|
||||
IS_MATRIX_SUITE = "suite_is_matrix_suite"
|
||||
IS_ANTITHESIS_SUITE = "suite_is_antithesis_suite"
|
||||
KIND = "suite_kind"
|
||||
RETURN_CODE = "suite_return_code"
|
||||
RETURN_STATUS = "suite_return_status"
|
||||
|
|
@ -455,7 +451,6 @@ class Suite(object):
|
|||
Suite.METRIC_NAMES.NUM_JOBS_TO_START: self.get_num_jobs_to_start(),
|
||||
Suite.METRIC_NAMES.NUM_TIMES_TO_REPEAT_TESTS: self.get_num_times_to_repeat_tests(),
|
||||
Suite.METRIC_NAMES.IS_MATRIX_SUITE: self.is_matrix_suite(),
|
||||
Suite.METRIC_NAMES.IS_ANTITHESIS_SUITE: self.is_antithesis_suite(),
|
||||
}
|
||||
# Note '' and 0 we want to return and those are both falsey
|
||||
if self.test_kind is not None:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,93 @@
|
|||
"""Utility for making resmoke suites external compatible."""
|
||||
|
||||
# The `make_external` util function in this file makes a suite
|
||||
# compatible to run with an External System Under Test (SUT).
|
||||
# For more info on External SUT, look into the `--externalSUT`
|
||||
# flag in `resmoke.py run --help`. Currently, External SUT
|
||||
# testing is only used in Antithesis, so some of these changes
|
||||
# are specific to Antithesis testing with External SUTs
|
||||
|
||||
from buildscripts.resmokelib import logging
|
||||
|
||||
INCOMPATIBLE_HOOKS = [
|
||||
"CleanEveryN",
|
||||
"ContinuousStepdown",
|
||||
"CheckOrphansDeleted",
|
||||
# TODO SERVER-70396 re-enable hook once the checkMetadata feature flag is removed
|
||||
# To check the feature flag we need to contact directly the config server that is not exposed in the ExternalFixture
|
||||
"CheckMetadataConsistencyInBackground",
|
||||
]
|
||||
|
||||
|
||||
def delete_archival(suite):
|
||||
"""Remove archival for External Suites."""
|
||||
logging.loggers.ROOT_EXECUTOR_LOGGER.warning(
|
||||
"`archive` is not supported for external suites and will be removed if it exists.")
|
||||
suite.pop("archive", None)
|
||||
suite.get("executor", {}).pop("archive", None)
|
||||
|
||||
|
||||
def make_hooks_compatible(suite):
|
||||
"""Make hooks compatible for external suites."""
|
||||
logging.loggers.ROOT_EXECUTOR_LOGGER.warning(
|
||||
"Some hooks are automatically disabled for external suites: %s", INCOMPATIBLE_HOOKS)
|
||||
logging.loggers.ROOT_EXECUTOR_LOGGER.warning(
|
||||
"The `AntithesisLogging` hook is automatically added for external suites.")
|
||||
if suite.get("executor", {}).get("hooks", None):
|
||||
# it's either a list of strings, or a list of dicts, each with key 'class'
|
||||
if isinstance(suite["executor"]["hooks"][0], str):
|
||||
suite["executor"]["hooks"] = ["AntithesisLogging"] + [
|
||||
hook for hook in suite["executor"]["hooks"] if hook not in INCOMPATIBLE_HOOKS
|
||||
]
|
||||
elif isinstance(suite["executor"]["hooks"][0], dict):
|
||||
suite["executor"]["hooks"] = [{"class": "AntithesisLogging"}] + [
|
||||
hook
|
||||
for hook in suite["executor"]["hooks"] if hook["class"] not in INCOMPATIBLE_HOOKS
|
||||
]
|
||||
else:
|
||||
raise RuntimeError(
|
||||
f'Unknown structure in hook. Please reach out in #server-testing: {suite["executor"]["hooks"][0]}'
|
||||
)
|
||||
|
||||
|
||||
def update_test_data(suite):
|
||||
"""Update TestData to be compatible with external suites."""
|
||||
logging.loggers.ROOT_EXECUTOR_LOGGER.warning(
|
||||
"`useActionPermittedFile` is incompatible with external suites and will always be set to `False`."
|
||||
)
|
||||
suite.setdefault("executor", {}).setdefault(
|
||||
"config", {}).setdefault("shell_options", {}).setdefault("global_vars", {}).setdefault(
|
||||
"TestData", {}).update({"useActionPermittedFile": False})
|
||||
|
||||
|
||||
def update_shell(suite):
|
||||
"""Update shell for when running external suites."""
|
||||
logging.loggers.ROOT_EXECUTOR_LOGGER.warning(
|
||||
"`jsTestLog` is a no-op on external suites to reduce logging.")
|
||||
suite.setdefault("executor", {}).setdefault("config", {}).setdefault("shell_options",
|
||||
{}).setdefault("eval", "")
|
||||
suite["executor"]["config"]["shell_options"]["eval"] += "jsTestLog = Function.prototype;"
|
||||
|
||||
|
||||
def update_exclude_tags(suite):
|
||||
"""Update the exclude tags to exclude external suite incompatible tests."""
|
||||
logging.loggers.ROOT_EXECUTOR_LOGGER.warning(
|
||||
"The `antithesis_incompatible` tagged tests will be excluded for external suites.")
|
||||
suite.setdefault('selector', {})
|
||||
if not suite.get('selector').get('exclude_with_any_tags'):
|
||||
suite['selector']['exclude_with_any_tags'] = ["antithesis_incompatible"]
|
||||
else:
|
||||
suite['selector']['exclude_with_any_tags'].append('antithesis_incompatible')
|
||||
|
||||
|
||||
def make_external(suite):
|
||||
"""Modify suite in-place to be external compatible."""
|
||||
logging.loggers.ROOT_EXECUTOR_LOGGER.warning(
|
||||
"This suite is being converted to an 'External Suite': %s", suite)
|
||||
delete_archival(suite)
|
||||
make_hooks_compatible(suite)
|
||||
update_test_data(suite)
|
||||
update_shell(suite)
|
||||
update_exclude_tags(suite)
|
||||
|
||||
return suite
|
||||
|
|
@ -2547,6 +2547,26 @@ functions:
|
|||
files:
|
||||
- wiki_page_location.json
|
||||
|
||||
### Helps with debugging docker compose generation failures ###
|
||||
"upload docker compose":
|
||||
- command: archive.targz_pack
|
||||
params:
|
||||
target: "docker_compose.tgz"
|
||||
source_dir: "src"
|
||||
include:
|
||||
- "docker_compose/**"
|
||||
- command: s3.put
|
||||
params:
|
||||
optional: true
|
||||
aws_key: ${aws_key}
|
||||
aws_secret: ${aws_secret}
|
||||
local_file: src/docker_compose.tgz
|
||||
remote_file: ${project}/${build_variant}/${revision}/docker-compose-${task_id}-${execution}.tgz
|
||||
bucket: mciuploads
|
||||
permissions: public-read
|
||||
content_type: application/gzip
|
||||
display_name: Docker Compose
|
||||
|
||||
"attach local resmoke invocation":
|
||||
command: s3.put
|
||||
params:
|
||||
|
|
@ -2596,6 +2616,7 @@ post:
|
|||
- func: "attach artifacts"
|
||||
- func: "save ec2 task artifacts"
|
||||
- func: "attach wiki page"
|
||||
- func: "upload docker compose"
|
||||
- func: "upload jstestfuzz minimized output"
|
||||
- func: "kill processes"
|
||||
- func: "save local client logs"
|
||||
|
|
@ -2771,11 +2792,11 @@ tasks:
|
|||
- "./etc/evergreen_yml_components/**"
|
||||
- "./etc/repo_config.yaml"
|
||||
- "./etc/scons/**"
|
||||
- "docker_compose/**"
|
||||
- "buildscripts/**"
|
||||
- "jstests/**"
|
||||
- "patch_files.txt"
|
||||
- "evergreen/**"
|
||||
- "antithesis/**"
|
||||
- "src/**.idl"
|
||||
- "src/mongo/client/sdam/json_tests/sdam_tests/**"
|
||||
- "src/mongo/client/sdam/json_tests/server_selection_tests/**"
|
||||
|
|
@ -8747,7 +8768,34 @@ tasks:
|
|||
- func: "do setup for antithesis"
|
||||
- func: "antithesis image build and push"
|
||||
vars:
|
||||
suite: antithesis_concurrency_sharded_with_stepdowns_and_balancer
|
||||
suite: concurrency_sharded_with_stepdowns_and_balancer
|
||||
|
||||
- <<: *antithesis_task_template
|
||||
name: antithesis_concurrency_sharded_multi_stmt_txn_kill_primary
|
||||
commands:
|
||||
- func: "do setup"
|
||||
- func: "do setup for antithesis"
|
||||
- func: "antithesis image build and push"
|
||||
vars:
|
||||
suite: concurrency_sharded_multi_stmt_txn_kill_primary
|
||||
|
||||
- <<: *antithesis_task_template
|
||||
name: antithesis_replica_sets_jscore_passthrough
|
||||
commands:
|
||||
- func: "do setup"
|
||||
- func: "do setup for antithesis"
|
||||
- func: "antithesis image build and push"
|
||||
vars:
|
||||
suite: replica_sets_jscore_passthrough
|
||||
|
||||
- <<: *antithesis_task_template
|
||||
name: antithesis_core
|
||||
commands:
|
||||
- func: "do setup"
|
||||
- func: "do setup for antithesis"
|
||||
- func: "antithesis image build and push"
|
||||
vars:
|
||||
suite: core
|
||||
|
||||
- <<: *task_template
|
||||
name: query_golden_classic
|
||||
|
|
|
|||
|
|
@ -17,21 +17,28 @@ if [ -n "${antithesis_image_tag:-}" ]; then
|
|||
tag=$antithesis_image_tag
|
||||
fi
|
||||
|
||||
# Build Image
|
||||
cd src
|
||||
activate_venv
|
||||
$python buildscripts/resmoke.py generate-docker-compose --in-evergreen --tag $tag ${suite}
|
||||
# Clean up any leftover docker artifacts
|
||||
sudo docker logout
|
||||
sudo docker rm $(docker ps -a -q) --force || echo "No pre-existing containers"
|
||||
sudo docker network prune --force
|
||||
|
||||
# Test Image
|
||||
cd antithesis/antithesis_config/${suite}
|
||||
bash run_suite.sh
|
||||
|
||||
# Push Image
|
||||
# login, push, and logout
|
||||
# Login
|
||||
echo "${antithesis_repo_key}" > mongodb.key.json
|
||||
cat mongodb.key.json | sudo docker login -u _json_key https://us-central1-docker.pkg.dev --password-stdin
|
||||
rm mongodb.key.json
|
||||
|
||||
# Build Image
|
||||
cd src
|
||||
activate_venv
|
||||
$python buildscripts/resmoke.py run --suite ${suite} --dockerComposeTag $tag --dockerComposeBuildImages workload,config,mongo-binaries --dockerComposeBuildEnv evergreen
|
||||
|
||||
# Test Image
|
||||
docker-compose -f docker_compose/${suite}/docker-compose.yml up -d
|
||||
echo "ALL RUNNING CONTAINERS: "
|
||||
docker ps
|
||||
docker exec workload buildscripts/resmoke.py run --suite ${suite} --sanityCheck --externalSUT
|
||||
|
||||
# Push Image
|
||||
sudo docker tag "${suite}:$tag" "$antithesis_repo/${suite}:$tag"
|
||||
sudo docker push "$antithesis_repo/${suite}:$tag"
|
||||
|
||||
|
|
@ -41,4 +48,5 @@ sudo docker push "$antithesis_repo/mongo-binaries:$tag"
|
|||
sudo docker tag "workload:$tag" "$antithesis_repo/workload:$tag"
|
||||
sudo docker push "$antithesis_repo/workload:$tag"
|
||||
|
||||
# Logout
|
||||
sudo docker logout https://us-central1-docker.pkg.dev
|
||||
|
|
|
|||
Loading…
Reference in New Issue