mirror of https://github.com/mongodb/mongo
SERVER-23312 Format Python files with yapf
This commit is contained in:
parent
d62d631f0c
commit
36148ad8bb
|
|
@ -3,4 +3,5 @@
|
|||
based_on_style = pep8
|
||||
column_limit = 100
|
||||
indent_dictionary_value = True
|
||||
|
||||
split_before_named_assigns = False
|
||||
each_dict_entry_on_separate_line = False
|
||||
|
|
@ -1,10 +1,11 @@
|
|||
# See https://www.pylint.org/
|
||||
[MESSAGES CONTROL]
|
||||
# C0301 - line-too-long - some of the type annotations are longer then 100 columns
|
||||
# C0330 - bad-continuation - ignore conflicts produced by yapf formatting
|
||||
# E0401 - import-error - ignore imports that fail to load
|
||||
# I0011 - locally-disabled - ignore warnings about disable pylint checks
|
||||
# R0903 - too-few-public-method - pylint does not always know best
|
||||
# W0511 - fixme - ignore TODOs in comments
|
||||
# W0611 - unused-import - typing module is needed for mypy
|
||||
|
||||
disable=fixme,import-error,line-too-long,locally-disabled,too-few-public-methods,unused-import
|
||||
disable=bad-continuation,fixme,import-error,line-too-long,locally-disabled,too-few-public-methods,unused-import
|
||||
|
|
|
|||
|
|
@ -2,11 +2,12 @@ import subprocess
|
|||
import os
|
||||
import sys
|
||||
from optparse import OptionParser
|
||||
|
||||
""" This script aggregates several tracefiles into one tracefile
|
||||
All but the last argument are input tracefiles or .txt files which list tracefiles.
|
||||
The last argument is the tracefile to which the output will be written
|
||||
"""
|
||||
|
||||
|
||||
def aggregate(inputs, output):
|
||||
"""Aggregates the tracefiles given in inputs to a tracefile given by output"""
|
||||
args = ['lcov']
|
||||
|
|
@ -17,18 +18,20 @@ def aggregate(inputs, output):
|
|||
args += ['-o', output]
|
||||
|
||||
print ' '.join(args)
|
||||
|
||||
return subprocess.call(args)
|
||||
|
||||
return subprocess.call(args)
|
||||
|
||||
|
||||
def getfilesize(path):
|
||||
if not os.path.isfile(path):
|
||||
return 0
|
||||
return os.path.getsize(path)
|
||||
|
||||
def main ():
|
||||
|
||||
def main():
|
||||
inputs = []
|
||||
|
||||
usage = "usage: %prog input1.info input2.info ... output.info"
|
||||
usage = "usage: %prog input1.info input2.info ... output.info"
|
||||
parser = OptionParser(usage=usage)
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
|
|
@ -43,12 +46,12 @@ def main ():
|
|||
inputs.append(path)
|
||||
|
||||
elif ext == '.txt':
|
||||
inputs += [line.strip() for line in open(path)
|
||||
if getfilesize(line.strip()) > 0]
|
||||
inputs += [line.strip() for line in open(path) if getfilesize(line.strip()) > 0]
|
||||
else:
|
||||
return "unrecognized file type"
|
||||
|
||||
return aggregate(inputs, args[-1])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.exit(main())
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
"""AWS EC2 instance launcher and controller."""
|
||||
|
||||
from __future__ import print_function
|
||||
|
|
@ -21,16 +20,9 @@ class AwsEc2(object):
|
|||
"""Class to support controlling AWS EC2 istances."""
|
||||
|
||||
InstanceStatus = collections.namedtuple("InstanceStatus", [
|
||||
"instance_id",
|
||||
"image_id",
|
||||
"instance_type",
|
||||
"state",
|
||||
"private_ip_address",
|
||||
"public_ip_address",
|
||||
"private_dns_name",
|
||||
"public_dns_name",
|
||||
"tags"
|
||||
])
|
||||
"instance_id", "image_id", "instance_type", "state", "private_ip_address",
|
||||
"public_ip_address", "private_dns_name", "public_dns_name", "tags"
|
||||
])
|
||||
|
||||
def __init__(self):
|
||||
try:
|
||||
|
|
@ -46,8 +38,7 @@ class AwsEc2(object):
|
|||
"""Wait up to 'wait_time_secs' for instance to be in 'state'.
|
||||
Return 0 if 'state' reached, 1 otherwise."""
|
||||
if show_progress:
|
||||
print("Waiting for instance {} to reach '{}' state".format(instance, state),
|
||||
end="",
|
||||
print("Waiting for instance {} to reach '{}' state".format(instance, state), end="",
|
||||
file=sys.stdout)
|
||||
reached_state = False
|
||||
end_time = time.time() + wait_time_secs
|
||||
|
|
@ -83,8 +74,7 @@ class AwsEc2(object):
|
|||
def control_instance(self, mode, image_id, wait_time_secs=0, show_progress=False):
|
||||
"""Controls an AMI instance. Returns 0 & status information, if successful."""
|
||||
if mode not in _MODES:
|
||||
raise ValueError(
|
||||
"Invalid mode '{}' specified, choose from {}.".format(mode, _MODES))
|
||||
raise ValueError("Invalid mode '{}' specified, choose from {}.".format(mode, _MODES))
|
||||
|
||||
sys.stdout.flush()
|
||||
instance = self.connection.Instance(image_id)
|
||||
|
|
@ -112,23 +102,17 @@ class AwsEc2(object):
|
|||
|
||||
ret = 0
|
||||
if wait_time_secs > 0:
|
||||
ret = self.wait_for_state(
|
||||
instance=instance,
|
||||
state=state,
|
||||
wait_time_secs=wait_time_secs,
|
||||
show_progress=show_progress)
|
||||
ret = self.wait_for_state(instance=instance, state=state, wait_time_secs=wait_time_secs,
|
||||
show_progress=show_progress)
|
||||
try:
|
||||
# Always provide status after executing command.
|
||||
status = self.InstanceStatus(
|
||||
getattr(instance, "instance_id", None),
|
||||
getattr(instance, "image_id", None),
|
||||
getattr(instance, "instance_type", None),
|
||||
getattr(instance, "state", None),
|
||||
getattr(instance, "instance_id", None), getattr(instance, "image_id", None),
|
||||
getattr(instance, "instance_type", None), getattr(instance, "state", None),
|
||||
getattr(instance, "private_ip_address", None),
|
||||
getattr(instance, "public_ip_address", None),
|
||||
getattr(instance, "private_dns_name", None),
|
||||
getattr(instance, "public_dns_name", None),
|
||||
getattr(instance, "tags", None))
|
||||
getattr(instance, "public_dns_name", None), getattr(instance, "tags", None))
|
||||
except botocore.exceptions.ClientError as err:
|
||||
return 1, err.message
|
||||
|
||||
|
|
@ -151,18 +135,9 @@ class AwsEc2(object):
|
|||
time.sleep(i + 1)
|
||||
instance.create_tags(Tags=tags)
|
||||
|
||||
def launch_instance(self,
|
||||
ami,
|
||||
instance_type,
|
||||
block_devices=None,
|
||||
key_name=None,
|
||||
security_group_ids=None,
|
||||
security_groups=None,
|
||||
subnet_id=None,
|
||||
tags=None,
|
||||
wait_time_secs=0,
|
||||
show_progress=False,
|
||||
**kwargs):
|
||||
def launch_instance(self, ami, instance_type, block_devices=None, key_name=None,
|
||||
security_group_ids=None, security_groups=None, subnet_id=None, tags=None,
|
||||
wait_time_secs=0, show_progress=False, **kwargs):
|
||||
"""Launches and tags an AMI instance.
|
||||
|
||||
Returns the tuple (0, status_information), if successful."""
|
||||
|
|
@ -187,22 +162,15 @@ class AwsEc2(object):
|
|||
kwargs["KeyName"] = key_name
|
||||
|
||||
try:
|
||||
instances = self.connection.create_instances(
|
||||
ImageId=ami,
|
||||
InstanceType=instance_type,
|
||||
MaxCount=1,
|
||||
MinCount=1,
|
||||
**kwargs)
|
||||
instances = self.connection.create_instances(ImageId=ami, InstanceType=instance_type,
|
||||
MaxCount=1, MinCount=1, **kwargs)
|
||||
except (botocore.exceptions.ClientError, botocore.exceptions.ParamValidationError) as err:
|
||||
return 1, err.message
|
||||
|
||||
instance = instances[0]
|
||||
if wait_time_secs > 0:
|
||||
self.wait_for_state(
|
||||
instance=instance,
|
||||
state="running",
|
||||
wait_time_secs=wait_time_secs,
|
||||
show_progress=show_progress)
|
||||
self.wait_for_state(instance=instance, state="running", wait_time_secs=wait_time_secs,
|
||||
show_progress=show_progress)
|
||||
|
||||
self.tag_instance(instance.instance_id, tags)
|
||||
|
||||
|
|
@ -218,93 +186,60 @@ def main():
|
|||
control_options = optparse.OptionGroup(parser, "Control options")
|
||||
create_options = optparse.OptionGroup(parser, "Create options")
|
||||
|
||||
parser.add_option("--mode",
|
||||
dest="mode",
|
||||
choices=_MODES,
|
||||
default="status",
|
||||
help="Operations to perform on an EC2 instance, choose one of"
|
||||
" '{}', defaults to '%default'.".format(", ".join(_MODES)))
|
||||
parser.add_option("--mode", dest="mode", choices=_MODES, default="status",
|
||||
help=("Operations to perform on an EC2 instance, choose one of"
|
||||
" '{}', defaults to '%default'.".format(", ".join(_MODES))))
|
||||
|
||||
control_options.add_option("--imageId",
|
||||
dest="image_id",
|
||||
default=None,
|
||||
control_options.add_option("--imageId", dest="image_id", default=None,
|
||||
help="EC2 image_id to perform operation on [REQUIRED for control].")
|
||||
|
||||
control_options.add_option("--waitTimeSecs",
|
||||
dest="wait_time_secs",
|
||||
type=int,
|
||||
default=5 * 60,
|
||||
help="Time to wait for EC2 instance to reach it's new state,"
|
||||
" defaults to '%default'.")
|
||||
control_options.add_option("--waitTimeSecs", dest="wait_time_secs", type=int, default=5 * 60,
|
||||
help=("Time to wait for EC2 instance to reach it's new state,"
|
||||
" defaults to '%default'."))
|
||||
|
||||
create_options.add_option("--ami",
|
||||
dest="ami",
|
||||
default=None,
|
||||
create_options.add_option("--ami", dest="ami", default=None,
|
||||
help="EC2 AMI to launch [REQUIRED for create].")
|
||||
|
||||
create_options.add_option("--blockDevice",
|
||||
dest="block_devices",
|
||||
metavar="DEVICE-NAME DEVICE-SIZE-GB",
|
||||
action="append",
|
||||
default=[],
|
||||
create_options.add_option("--blockDevice", dest="block_devices",
|
||||
metavar="DEVICE-NAME DEVICE-SIZE-GB", action="append", default=[],
|
||||
nargs=2,
|
||||
help="EBS device name and volume size in GiB."
|
||||
" More than one device can be attached, by specifying"
|
||||
" this option more than once."
|
||||
" The device will be deleted on termination of the instance.")
|
||||
help=("EBS device name and volume size in GiB."
|
||||
" More than one device can be attached, by specifying"
|
||||
" this option more than once."
|
||||
" The device will be deleted on termination of the instance."))
|
||||
|
||||
create_options.add_option("--instanceType",
|
||||
dest="instance_type",
|
||||
default="t1.micro",
|
||||
create_options.add_option("--instanceType", dest="instance_type", default="t1.micro",
|
||||
help="EC2 instance type to launch, defaults to '%default'.")
|
||||
|
||||
create_options.add_option("--keyName",
|
||||
dest="key_name",
|
||||
default=None,
|
||||
create_options.add_option("--keyName", dest="key_name", default=None,
|
||||
help="EC2 key name [REQUIRED for create].")
|
||||
|
||||
create_options.add_option("--securityGroupIds",
|
||||
dest="security_group_ids",
|
||||
action="append",
|
||||
create_options.add_option("--securityGroupIds", dest="security_group_ids", action="append",
|
||||
default=[],
|
||||
help="EC2 security group ids. More than one security group id can be"
|
||||
" added, by specifying this option more than once.")
|
||||
help=("EC2 security group ids. More than one security group id can be"
|
||||
" added, by specifying this option more than once."))
|
||||
|
||||
create_options.add_option("--securityGroup",
|
||||
dest="security_groups",
|
||||
action="append",
|
||||
create_options.add_option("--securityGroup", dest="security_groups", action="append",
|
||||
default=[],
|
||||
help="EC2 security group. More than one security group can be added,"
|
||||
" by specifying this option more than once.")
|
||||
help=("EC2 security group. More than one security group can be added,"
|
||||
" by specifying this option more than once."))
|
||||
|
||||
create_options.add_option("--subnetId",
|
||||
dest="subnet_id",
|
||||
default=None,
|
||||
create_options.add_option("--subnetId", dest="subnet_id", default=None,
|
||||
help="EC2 subnet id to use in VPC.")
|
||||
|
||||
create_options.add_option("--tagExpireHours",
|
||||
dest="tag_expire_hours",
|
||||
type=int,
|
||||
default=2,
|
||||
create_options.add_option("--tagExpireHours", dest="tag_expire_hours", type=int, default=2,
|
||||
help="EC2 tag expire time in hours, defaults to '%default'.")
|
||||
|
||||
create_options.add_option("--tagName",
|
||||
dest="tag_name",
|
||||
default="",
|
||||
create_options.add_option("--tagName", dest="tag_name", default="",
|
||||
help="EC2 tag and instance name.")
|
||||
|
||||
create_options.add_option("--tagOwner",
|
||||
dest="tag_owner",
|
||||
default="",
|
||||
help="EC2 tag owner.")
|
||||
create_options.add_option("--tagOwner", dest="tag_owner", default="", help="EC2 tag owner.")
|
||||
|
||||
create_options.add_option("--extraArgs",
|
||||
dest="extra_args",
|
||||
metavar="{key1: value1, key2: value2, ..., keyN: valueN}",
|
||||
default=None,
|
||||
help="EC2 create instance keyword args. The argument is specified as"
|
||||
" bracketed YAML - i.e. JSON with support for single quoted"
|
||||
" and unquoted keys. Example, '{DryRun: True}'")
|
||||
create_options.add_option(
|
||||
"--extraArgs", dest="extra_args", metavar="{key1: value1, key2: value2, ..., keyN: valueN}",
|
||||
default=None, help=("EC2 create instance keyword args. The argument is specified as"
|
||||
" bracketed YAML - i.e. JSON with support for single quoted"
|
||||
" and unquoted keys. Example, '{DryRun: True}'"))
|
||||
|
||||
parser.add_option_group(control_options)
|
||||
parser.add_option_group(create_options)
|
||||
|
|
@ -331,34 +266,25 @@ def main():
|
|||
# The 'expire-on' key is a UTC time.
|
||||
expire_dt = datetime.datetime.utcnow() + datetime.timedelta(hours=options.tag_expire_hours)
|
||||
tags = [{"Key": "expire-on", "Value": expire_dt.strftime("%Y-%m-%d %H:%M:%S")},
|
||||
{"Key": "Name", "Value": options.tag_name},
|
||||
{"Key": "owner", "Value": options.tag_owner}]
|
||||
{"Key": "Name",
|
||||
"Value": options.tag_name}, {"Key": "owner", "Value": options.tag_owner}]
|
||||
|
||||
my_kwargs = {}
|
||||
if options.extra_args is not None:
|
||||
my_kwargs = yaml.safe_load(options.extra_args)
|
||||
|
||||
(ret_code, instance_status) = aws_ec2.launch_instance(
|
||||
ami=options.ami,
|
||||
instance_type=options.instance_type,
|
||||
block_devices=block_devices,
|
||||
key_name=options.key_name,
|
||||
security_group_ids=options.security_group_ids,
|
||||
security_groups=options.security_groups,
|
||||
subnet_id=options.subnet_id,
|
||||
tags=tags,
|
||||
wait_time_secs=options.wait_time_secs,
|
||||
show_progress=True,
|
||||
**my_kwargs)
|
||||
ami=options.ami, instance_type=options.instance_type, block_devices=block_devices,
|
||||
key_name=options.key_name, security_group_ids=options.security_group_ids,
|
||||
security_groups=options.security_groups, subnet_id=options.subnet_id, tags=tags,
|
||||
wait_time_secs=options.wait_time_secs, show_progress=True, **my_kwargs)
|
||||
else:
|
||||
if not getattr(options, "image_id", None):
|
||||
parser.print_help()
|
||||
parser.error("Missing required control option")
|
||||
|
||||
(ret_code, instance_status) = aws_ec2.control_instance(
|
||||
mode=options.mode,
|
||||
image_id=options.image_id,
|
||||
wait_time_secs=options.wait_time_secs,
|
||||
mode=options.mode, image_id=options.image_id, wait_time_secs=options.wait_time_secs,
|
||||
show_progress=True)
|
||||
|
||||
print("Return code: {}, Instance status:".format(ret_code))
|
||||
|
|
@ -370,5 +296,6 @@ def main():
|
|||
|
||||
sys.exit(ret_code)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
Command line utility for determining what jstests have been added or modified
|
||||
"""
|
||||
|
|
@ -19,14 +18,12 @@ import sys
|
|||
import urlparse
|
||||
import yaml
|
||||
|
||||
|
||||
# Get relative imports to work when the package is not installed on the PYTHONPATH.
|
||||
if __name__ == "__main__" and __package__ is None:
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
from buildscripts import resmokelib
|
||||
from buildscripts.ciconfig import evergreen
|
||||
|
||||
|
||||
API_SERVER_DEFAULT = "https://evergreen.mongodb.com"
|
||||
|
||||
|
||||
|
|
@ -43,12 +40,12 @@ def parse_command_line():
|
|||
help="The base commit to compare to for determining changes.")
|
||||
|
||||
parser.add_option("--buildVariant", dest="buildvariant",
|
||||
help="The buildvariant the tasks will execute on. \
|
||||
Required when generating the JSON file with test executor information")
|
||||
help=("The buildvariant the tasks will execute on. Required when"
|
||||
" generating the JSON file with test executor information"))
|
||||
|
||||
parser.add_option("--checkEvergreen", dest="check_evergreen", action="store_true",
|
||||
help="Checks Evergreen for the last commit that was scheduled. \
|
||||
This way all the tests that haven't been burned in will be run.")
|
||||
help=("Checks Evergreen for the last commit that was scheduled."
|
||||
" This way all the tests that haven't been burned in will be run."))
|
||||
|
||||
parser.add_option("--noExec", dest="no_exec", action="store_true",
|
||||
help="Do not run resmoke loop on new tests.")
|
||||
|
|
@ -64,18 +61,10 @@ def parse_command_line():
|
|||
|
||||
# The executor_file and suite_files defaults are required to make the
|
||||
# suite resolver work correctly.
|
||||
parser.set_defaults(base_commit=None,
|
||||
branch="master",
|
||||
buildvariant=None,
|
||||
check_evergreen=False,
|
||||
evergreen_file="etc/evergreen.yml",
|
||||
selector_file="etc/burn_in_tests.yml",
|
||||
max_revisions=25,
|
||||
no_exec=False,
|
||||
executor_file=None,
|
||||
report_file="report.json",
|
||||
suite_files="with_server",
|
||||
test_list_file=None,
|
||||
parser.set_defaults(base_commit=None, branch="master", buildvariant=None, check_evergreen=False,
|
||||
evergreen_file="etc/evergreen.yml", selector_file="etc/burn_in_tests.yml",
|
||||
max_revisions=25, no_exec=False, executor_file=None,
|
||||
report_file="report.json", suite_files="with_server", test_list_file=None,
|
||||
test_list_outfile=None)
|
||||
|
||||
# This disables argument parsing on the first unrecognized parameter. This allows us to pass
|
||||
|
|
@ -96,7 +85,8 @@ def read_evg_config():
|
|||
file_list = [
|
||||
"./.evergreen.yml",
|
||||
os.path.expanduser("~/.evergreen.yml"),
|
||||
os.path.expanduser("~/cli_bin/.evergreen.yml")]
|
||||
os.path.expanduser("~/cli_bin/.evergreen.yml")
|
||||
]
|
||||
|
||||
for filename in file_list:
|
||||
if os.path.isfile(filename):
|
||||
|
|
@ -153,8 +143,8 @@ def find_changed_tests(branch_name, base_commit, max_revisions, buildvariant, ch
|
|||
# The current commit will be activated in Evergreen; we use --skip to start at the
|
||||
# previous commit when trying to find the most recent preceding commit that has been
|
||||
# activated.
|
||||
revs_to_check = callo(["git", "rev-list", base_commit,
|
||||
"--max-count=200", "--skip=1"]).splitlines()
|
||||
revs_to_check = callo(["git", "rev-list", base_commit, "--max-count=200",
|
||||
"--skip=1"]).splitlines()
|
||||
last_activated = find_last_activated_task(revs_to_check, buildvariant, branch_name)
|
||||
if last_activated is None:
|
||||
# When the current commit is the first time 'buildvariant' has run, there won't be a
|
||||
|
|
@ -210,8 +200,8 @@ def find_exclude_tests(selector_file):
|
|||
try:
|
||||
js_test = yml['selector']['js_test']
|
||||
except KeyError:
|
||||
raise Exception("The selector file " + selector_file +
|
||||
" is missing the 'selector.js_test' key")
|
||||
raise Exception(
|
||||
"The selector file " + selector_file + " is missing the 'selector.js_test' key")
|
||||
|
||||
return (resmokelib.utils.default_if_none(js_test.get("exclude_suites"), []),
|
||||
resmokelib.utils.default_if_none(js_test.get("exclude_tasks"), []),
|
||||
|
|
@ -299,10 +289,7 @@ def create_task_list(evergreen_conf, buildvariant, suites, exclude_tasks):
|
|||
for task_name, task_arg in variant_task_args.items():
|
||||
# Find the resmoke_args for matching suite names.
|
||||
if re.compile('--suites=' + suite + '(?:\s+|$)').match(task_arg):
|
||||
tasks_to_run[task_name] = {
|
||||
"resmoke_args": task_arg,
|
||||
"tests": suites[suite]
|
||||
}
|
||||
tasks_to_run[task_name] = {"resmoke_args": task_arg, "tests": suites[suite]}
|
||||
|
||||
return tasks_to_run
|
||||
|
||||
|
|
@ -371,11 +358,8 @@ def main():
|
|||
"\t", "\n\t".join(sorted(evergreen_conf.variant_names))
|
||||
sys.exit(1)
|
||||
|
||||
changed_tests = find_changed_tests(values.branch,
|
||||
values.base_commit,
|
||||
values.max_revisions,
|
||||
values.buildvariant,
|
||||
values.check_evergreen)
|
||||
changed_tests = find_changed_tests(values.branch, values.base_commit, values.max_revisions,
|
||||
values.buildvariant, values.check_evergreen)
|
||||
exclude_suites, exclude_tasks, exclude_tests = find_exclude_tests(values.selector_file)
|
||||
changed_tests = filter_tests(changed_tests, exclude_tests)
|
||||
# If there are no changed tests, exit cleanly.
|
||||
|
|
@ -385,12 +369,9 @@ def main():
|
|||
_write_report_file({}, values.test_list_outfile)
|
||||
sys.exit(0)
|
||||
suites = resmokelib.suitesconfig.get_suites(
|
||||
suite_files=values.suite_files.split(","),
|
||||
test_files=changed_tests)
|
||||
suite_files=values.suite_files.split(","), test_files=changed_tests)
|
||||
tests_by_executor = create_executor_list(suites, exclude_suites)
|
||||
tests_by_task = create_task_list(evergreen_conf,
|
||||
values.buildvariant,
|
||||
tests_by_executor,
|
||||
tests_by_task = create_task_list(evergreen_conf, values.buildvariant, tests_by_executor,
|
||||
exclude_tasks)
|
||||
if values.test_list_outfile is not None:
|
||||
_write_report_file(tests_by_task, values.test_list_outfile)
|
||||
|
|
|
|||
|
|
@ -85,17 +85,17 @@ def generate_bypass_expansions(project, build_variant, revision, build_id):
|
|||
# With compile bypass we need to update the URL to point to the correct name of the base commit
|
||||
# binaries.
|
||||
expansions["mongo_binaries"] = (archive_name("{}/{}/{}/binaries/mongo-{}".format(
|
||||
project, build_variant, revision, build_id)))
|
||||
project, build_variant, revision, build_id)))
|
||||
|
||||
# With compile bypass we need to update the URL to point to the correct name of the base commit
|
||||
# debug symbols.
|
||||
expansions["mongo_debugsymbols"] = (archive_name("{}/{}/{}/debugsymbols/debugsymbols-{}".format(
|
||||
project, build_variant, revision, build_id)))
|
||||
project, build_variant, revision, build_id)))
|
||||
|
||||
# With compile bypass we need to update the URL to point to the correct name of the base commit
|
||||
# mongo shell.
|
||||
expansions["mongo_shell"] = (archive_name("{}/{}/{}/binaries/mongo-shell-{}".format(
|
||||
project, build_variant, revision, build_id)))
|
||||
project, build_variant, revision, build_id)))
|
||||
|
||||
# Enable bypass compile
|
||||
expansions["bypass_compile"] = True
|
||||
|
|
@ -155,9 +155,8 @@ def should_bypass_compile():
|
|||
if os.path.isdir(filename):
|
||||
continue
|
||||
|
||||
if (filename in requires_compile_files
|
||||
or any(filename.startswith(directory)
|
||||
for directory in requires_compile_directories)):
|
||||
if (filename in requires_compile_files or any(
|
||||
filename.startswith(directory) for directory in requires_compile_directories)):
|
||||
print("Compile bypass disabled after detecting {} as being modified because"
|
||||
" it is a file known to affect compilation.".format(filename))
|
||||
return False
|
||||
|
|
@ -173,28 +172,21 @@ def should_bypass_compile():
|
|||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--project",
|
||||
required=True,
|
||||
parser.add_argument("--project", required=True,
|
||||
help="The Evergreen project. e.g mongodb-mongo-master")
|
||||
|
||||
parser.add_argument("--buildVariant",
|
||||
required=True,
|
||||
parser.add_argument("--buildVariant", required=True,
|
||||
help="The build variant. e.g enterprise-rhel-62-64-bit")
|
||||
|
||||
parser.add_argument("--revision",
|
||||
required=True,
|
||||
help="The base commit hash.")
|
||||
parser.add_argument("--revision", required=True, help="The base commit hash.")
|
||||
|
||||
parser.add_argument("--patchFile",
|
||||
required=True,
|
||||
parser.add_argument("--patchFile", required=True,
|
||||
help="A list of all files modified in patch build.")
|
||||
|
||||
parser.add_argument("--outFile",
|
||||
required=True,
|
||||
parser.add_argument("--outFile", required=True,
|
||||
help="The YAML file to write out the macro expansions.")
|
||||
|
||||
parser.add_argument("--jsonArtifact",
|
||||
required=True,
|
||||
parser.add_argument("--jsonArtifact", required=True,
|
||||
help="The JSON file to write out the metadata of files to attach to task.")
|
||||
|
||||
return parser.parse_args()
|
||||
|
|
@ -224,7 +216,7 @@ def main():
|
|||
api_server = "{url.scheme}://{url.netloc}".format(
|
||||
url=urlparse(evg_config.get("api_server_host")))
|
||||
revision_url = "{}/rest/v1/projects/{}/revisions/{}".format(api_server, args.project,
|
||||
args.revision)
|
||||
args.revision)
|
||||
revisions = requests_get_json(revision_url)
|
||||
|
||||
match = None
|
||||
|
|
@ -240,7 +232,7 @@ def main():
|
|||
break
|
||||
else:
|
||||
print("Could not find build id for revision {} on project {}."
|
||||
" Default compile bypass to false.".format(args.revision, args.project))
|
||||
" Default compile bypass to false.".format(args.revision, args.project))
|
||||
return
|
||||
|
||||
# Generate the compile task id.
|
||||
|
|
@ -270,16 +262,20 @@ def main():
|
|||
return
|
||||
|
||||
# Need to extract certain files from the pre-existing artifacts.tgz.
|
||||
extract_files = [executable_name("dbtest"), executable_name("mongobridge"),
|
||||
"build/integration_tests.txt"]
|
||||
extract_files = [
|
||||
executable_name("dbtest"),
|
||||
executable_name("mongobridge"),
|
||||
"build/integration_tests.txt",
|
||||
]
|
||||
with tarfile.open(filename, "r:gz") as tar:
|
||||
# The repo/ directory contains files needed by the package task. May
|
||||
# need to add other files that would otherwise be generated by SCons
|
||||
# if we did not bypass compile.
|
||||
subdir = [tarinfo for tarinfo in tar.getmembers()
|
||||
if tarinfo.name.startswith("build/integration_tests/")
|
||||
or tarinfo.name.startswith("repo/")
|
||||
or tarinfo.name in extract_files]
|
||||
subdir = [
|
||||
tarinfo for tarinfo in tar.getmembers()
|
||||
if tarinfo.name.startswith("build/integration_tests/")
|
||||
or tarinfo.name.startswith("repo/") or tarinfo.name in extract_files
|
||||
]
|
||||
print("Extracting the following files from {0}...\n{1}".format(
|
||||
filename, "\n".join(tarinfo.name for tarinfo in subdir)))
|
||||
tar.extractall(members=subdir)
|
||||
|
|
@ -318,8 +314,9 @@ def main():
|
|||
|
||||
# Need to apply these expansions for bypassing SCons.
|
||||
expansions = generate_bypass_expansions(args.project, args.buildVariant, args.revision,
|
||||
build_id)
|
||||
build_id)
|
||||
write_out_bypass_compile_expansions(args.outFile, **expansions)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
|||
|
|
@ -0,0 +1 @@
|
|||
"""Empty."""
|
||||
|
|
@ -20,8 +20,10 @@ class EvergreenProjectConfig(object):
|
|||
self.path = path
|
||||
self.tasks = [Task(task_dict) for task_dict in self._conf["tasks"]]
|
||||
self._tasks_by_name = {task.name: task for task in self.tasks}
|
||||
self.variants = [Variant(variant_dict, self._tasks_by_name)
|
||||
for variant_dict in self._conf["buildvariants"]]
|
||||
self.variants = [
|
||||
Variant(variant_dict, self._tasks_by_name)
|
||||
for variant_dict in self._conf["buildvariants"]
|
||||
]
|
||||
self._variants_by_name = {variant.name: variant for variant in self.variants}
|
||||
self.distro_names = set()
|
||||
for variant in self.variants:
|
||||
|
|
@ -106,8 +108,10 @@ class Variant(object):
|
|||
def __init__(self, conf_dict, task_map):
|
||||
self.raw = conf_dict
|
||||
run_on = self.run_on
|
||||
self.tasks = [VariantTask(task_map.get(t["name"]), t.get("distros", run_on), self)
|
||||
for t in conf_dict["tasks"]]
|
||||
self.tasks = [
|
||||
VariantTask(task_map.get(t["name"]), t.get("distros", run_on), self)
|
||||
for t in conf_dict["tasks"]
|
||||
]
|
||||
self.distro_names = set(run_on)
|
||||
for task in self.tasks:
|
||||
self.distro_names.update(task.run_on)
|
||||
|
|
@ -176,6 +180,7 @@ class Variant(object):
|
|||
|
||||
class VariantTask(Task):
|
||||
"""Represent a task definition in the context of a build variant."""
|
||||
|
||||
def __init__(self, task, run_on, variant):
|
||||
Task.__init__(self, task.raw)
|
||||
self.run_on = run_on
|
||||
|
|
@ -199,7 +204,6 @@ class VariantTask(Task):
|
|||
|
||||
|
||||
class ResmokeArgs(object):
|
||||
|
||||
@staticmethod
|
||||
def get_arg(resmoke_args, name):
|
||||
"""Return the value of the option --'name' in the 'resmoke_args' string or
|
||||
|
|
|
|||
|
|
@ -14,7 +14,9 @@ import yaml
|
|||
def _represent_dict_order(self, data):
|
||||
return self.represent_mapping("tag:yaml.org,2002:map", data.items())
|
||||
|
||||
|
||||
yaml.add_representer(collections.OrderedDict, _represent_dict_order)
|
||||
|
||||
# End setup
|
||||
|
||||
|
||||
|
|
@ -108,11 +110,8 @@ class TagsConfig(object):
|
|||
"""
|
||||
with open(filename, "w") as fstream:
|
||||
if preamble:
|
||||
print(textwrap.fill(preamble,
|
||||
width=100,
|
||||
initial_indent="# ",
|
||||
subsequent_indent="# "),
|
||||
file=fstream)
|
||||
print(textwrap.fill(preamble, width=100, initial_indent="# ",
|
||||
subsequent_indent="# "), file=fstream)
|
||||
|
||||
# We use yaml.safe_dump() in order avoid having strings being written to the file as
|
||||
# "!!python/unicode ..." and instead have them written as plain 'str' instances.
|
||||
|
|
@ -138,4 +137,3 @@ def setdefault(doc, key, default):
|
|||
else:
|
||||
doc[key] = default
|
||||
return default
|
||||
|
||||
|
|
|
|||
|
|
@ -52,7 +52,9 @@ CLANG_FORMAT_HTTP_LINUX_CACHE = "https://s3.amazonaws.com/boxes.10gen.com/build/
|
|||
CLANG_FORMAT_HTTP_DARWIN_CACHE = "https://s3.amazonaws.com/boxes.10gen.com/build/clang%2Bllvm-3.8.0-x86_64-apple-darwin.tar.xz"
|
||||
|
||||
# Path in the tarball to the clang-format binary
|
||||
CLANG_FORMAT_SOURCE_TAR_BASE = string.Template("clang+llvm-$version-$tar_path/bin/" + CLANG_FORMAT_PROGNAME)
|
||||
CLANG_FORMAT_SOURCE_TAR_BASE = string.Template(
|
||||
"clang+llvm-$version-$tar_path/bin/" + CLANG_FORMAT_PROGNAME)
|
||||
|
||||
|
||||
##############################################################################
|
||||
def callo(args):
|
||||
|
|
@ -60,18 +62,18 @@ def callo(args):
|
|||
"""
|
||||
return subprocess.check_output(args)
|
||||
|
||||
|
||||
def get_tar_path(version, tar_path):
|
||||
""" Get the path to clang-format in the llvm tarball
|
||||
"""
|
||||
return CLANG_FORMAT_SOURCE_TAR_BASE.substitute(
|
||||
version=version,
|
||||
tar_path=tar_path)
|
||||
return CLANG_FORMAT_SOURCE_TAR_BASE.substitute(version=version, tar_path=tar_path)
|
||||
|
||||
|
||||
def extract_clang_format(tar_path):
|
||||
# Extract just the clang-format binary
|
||||
# On OSX, we shell out to tar because tarfile doesn't support xz compression
|
||||
if sys.platform == 'darwin':
|
||||
subprocess.call(['tar', '-xzf', tar_path, '*clang-format*'])
|
||||
subprocess.call(['tar', '-xzf', tar_path, '*clang-format*'])
|
||||
# Otherwise we use tarfile because some versions of tar don't support wildcards without
|
||||
# a special flag
|
||||
else:
|
||||
|
|
@ -81,6 +83,7 @@ def extract_clang_format(tar_path):
|
|||
tarfp.extract(name)
|
||||
tarfp.close()
|
||||
|
||||
|
||||
def get_clang_format_from_cache_and_extract(url, tarball_ext):
|
||||
"""Get clang-format from mongodb's cache
|
||||
and extract the tarball
|
||||
|
|
@ -89,8 +92,8 @@ def get_clang_format_from_cache_and_extract(url, tarball_ext):
|
|||
temp_tar_file = os.path.join(dest_dir, "temp.tar" + tarball_ext)
|
||||
|
||||
# Download from file
|
||||
print("Downloading clang-format %s from %s, saving to %s" % (CLANG_FORMAT_VERSION,
|
||||
url, temp_tar_file))
|
||||
print("Downloading clang-format %s from %s, saving to %s" % (CLANG_FORMAT_VERSION, url,
|
||||
temp_tar_file))
|
||||
|
||||
# Retry download up to 5 times.
|
||||
num_tries = 5
|
||||
|
|
@ -98,7 +101,7 @@ def get_clang_format_from_cache_and_extract(url, tarball_ext):
|
|||
try:
|
||||
resp = urllib2.urlopen(url)
|
||||
with open(temp_tar_file, 'wb') as f:
|
||||
f.write(resp.read())
|
||||
f.write(resp.read())
|
||||
break
|
||||
except urllib2.URLError:
|
||||
if attempt == num_tries - 1:
|
||||
|
|
@ -107,6 +110,7 @@ def get_clang_format_from_cache_and_extract(url, tarball_ext):
|
|||
|
||||
extract_clang_format(temp_tar_file)
|
||||
|
||||
|
||||
def get_clang_format_from_darwin_cache(dest_file):
|
||||
"""Download clang-format from llvm.org, unpack the tarball,
|
||||
and put clang-format in the specified place
|
||||
|
|
@ -116,6 +120,7 @@ def get_clang_format_from_darwin_cache(dest_file):
|
|||
# Destination Path
|
||||
shutil.move(get_tar_path(CLANG_FORMAT_VERSION, "x86_64-apple-darwin"), dest_file)
|
||||
|
||||
|
||||
def get_clang_format_from_linux_cache(dest_file):
|
||||
"""Get clang-format from mongodb's cache
|
||||
"""
|
||||
|
|
@ -124,10 +129,12 @@ def get_clang_format_from_linux_cache(dest_file):
|
|||
# Destination Path
|
||||
shutil.move("build/bin/clang-format", dest_file)
|
||||
|
||||
|
||||
class ClangFormat(object):
|
||||
"""Class encapsulates finding a suitable copy of clang-format,
|
||||
and linting/formating an individual file
|
||||
"""
|
||||
|
||||
def __init__(self, path, cache_dir):
|
||||
self.path = None
|
||||
clang_format_progname_ext = ""
|
||||
|
|
@ -154,10 +161,10 @@ class ClangFormat(object):
|
|||
# Check for various versions staring with binaries with version specific suffixes in the
|
||||
# user's path
|
||||
programs = [
|
||||
CLANG_FORMAT_PROGNAME + "-" + CLANG_FORMAT_VERSION,
|
||||
CLANG_FORMAT_PROGNAME + "-" + CLANG_FORMAT_SHORT_VERSION,
|
||||
CLANG_FORMAT_PROGNAME,
|
||||
]
|
||||
CLANG_FORMAT_PROGNAME + "-" + CLANG_FORMAT_VERSION,
|
||||
CLANG_FORMAT_PROGNAME + "-" + CLANG_FORMAT_SHORT_VERSION,
|
||||
CLANG_FORMAT_PROGNAME,
|
||||
]
|
||||
|
||||
if sys.platform == "win32":
|
||||
for i in range(len(programs)):
|
||||
|
|
@ -178,7 +185,7 @@ class ClangFormat(object):
|
|||
programfiles = [
|
||||
os.environ["ProgramFiles"],
|
||||
os.environ["ProgramFiles(x86)"],
|
||||
]
|
||||
]
|
||||
|
||||
for programfile in programfiles:
|
||||
win32bin = os.path.join(programfile, "LLVM\\bin\\clang-format.exe")
|
||||
|
|
@ -191,7 +198,9 @@ class ClangFormat(object):
|
|||
if not os.path.isdir(cache_dir):
|
||||
os.makedirs(cache_dir)
|
||||
|
||||
self.path = os.path.join(cache_dir, CLANG_FORMAT_PROGNAME + "-" + CLANG_FORMAT_VERSION + clang_format_progname_ext)
|
||||
self.path = os.path.join(
|
||||
cache_dir,
|
||||
CLANG_FORMAT_PROGNAME + "-" + CLANG_FORMAT_VERSION + clang_format_progname_ext)
|
||||
|
||||
# Download a new version if the cache is empty or stale
|
||||
if not os.path.isfile(self.path) or not self._validate_version():
|
||||
|
|
@ -201,7 +210,7 @@ class ClangFormat(object):
|
|||
get_clang_format_from_darwin_cache(self.path)
|
||||
else:
|
||||
print("ERROR: clang-format.py does not support downloading clang-format " +
|
||||
" on this platform, please install clang-format " + CLANG_FORMAT_VERSION)
|
||||
" on this platform, please install clang-format " + CLANG_FORMAT_VERSION)
|
||||
|
||||
# Validate we have the correct version
|
||||
# We only can fail here if the user specified a clang-format binary and it is the wrong
|
||||
|
|
@ -220,8 +229,8 @@ class ClangFormat(object):
|
|||
if CLANG_FORMAT_VERSION in cf_version:
|
||||
return True
|
||||
|
||||
print("WARNING: clang-format found in path, but incorrect version found at " +
|
||||
self.path + " with version: " + cf_version)
|
||||
print("WARNING: clang-format found in path, but incorrect version found at " + self.path +
|
||||
" with version: " + cf_version)
|
||||
|
||||
return False
|
||||
|
||||
|
|
@ -243,8 +252,8 @@ class ClangFormat(object):
|
|||
# Take a lock to ensure diffs do not get mixed when printed to the screen
|
||||
with self.print_lock:
|
||||
print("ERROR: Found diff for " + file_name)
|
||||
print("To fix formatting errors, run %s --style=file -i %s" %
|
||||
(self.path, file_name))
|
||||
print("To fix formatting errors, run %s --style=file -i %s" % (self.path,
|
||||
file_name))
|
||||
for line in result:
|
||||
print(line.rstrip())
|
||||
|
||||
|
|
@ -275,8 +284,10 @@ class ClangFormat(object):
|
|||
|
||||
return formatted
|
||||
|
||||
|
||||
files_re = re.compile('\\.(h|hpp|ipp|cpp|js)$')
|
||||
|
||||
|
||||
def is_interesting_file(file_name):
|
||||
""""Return true if this file should be checked
|
||||
"""
|
||||
|
|
@ -284,16 +295,19 @@ def is_interesting_file(file_name):
|
|||
and not file_name.startswith("src/third_party/")
|
||||
and not file_name.startswith("src/mongo/gotools/")) and files_re.search(file_name)
|
||||
|
||||
|
||||
def get_list_from_lines(lines):
|
||||
""""Convert a string containing a series of lines into a list of strings
|
||||
"""
|
||||
return [line.rstrip() for line in lines.splitlines()]
|
||||
|
||||
|
||||
def _get_build_dir():
|
||||
"""Get the location of the scons' build directory in case we need to download clang-format
|
||||
"""
|
||||
return os.path.join(git.get_base_dir(), "build")
|
||||
|
||||
|
||||
def _lint_files(clang_format, files):
|
||||
"""Lint a list of files with clang-format
|
||||
"""
|
||||
|
|
@ -305,6 +319,7 @@ def _lint_files(clang_format, files):
|
|||
print("ERROR: Code Style does not match coding style")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def lint_patch(clang_format, infile):
|
||||
"""Lint patch command entry point
|
||||
"""
|
||||
|
|
@ -314,6 +329,7 @@ def lint_patch(clang_format, infile):
|
|||
if files:
|
||||
_lint_files(clang_format, files)
|
||||
|
||||
|
||||
def lint(clang_format):
|
||||
"""Lint files command entry point
|
||||
"""
|
||||
|
|
@ -323,6 +339,7 @@ def lint(clang_format):
|
|||
|
||||
return True
|
||||
|
||||
|
||||
def lint_all(clang_format):
|
||||
"""Lint files command entry point based on working tree
|
||||
"""
|
||||
|
|
@ -332,18 +349,20 @@ def lint_all(clang_format):
|
|||
|
||||
return True
|
||||
|
||||
|
||||
def _format_files(clang_format, files):
|
||||
"""Format a list of files with clang-format
|
||||
"""
|
||||
clang_format = ClangFormat(clang_format, _get_build_dir())
|
||||
|
||||
format_clean = parallel.parallel_process([os.path.abspath(f) for f in files],
|
||||
clang_format.format)
|
||||
clang_format.format)
|
||||
|
||||
if not format_clean:
|
||||
print("ERROR: failed to format files")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def format_func(clang_format):
|
||||
"""Format files command entry point
|
||||
"""
|
||||
|
|
@ -351,6 +370,7 @@ def format_func(clang_format):
|
|||
|
||||
_format_files(clang_format, files)
|
||||
|
||||
|
||||
def reformat_branch(clang_format, commit_prior_to_reformat, commit_after_reformat):
|
||||
"""Reformat a branch made before a clang-format run
|
||||
"""
|
||||
|
|
@ -367,15 +387,16 @@ def reformat_branch(clang_format, commit_prior_to_reformat, commit_after_reforma
|
|||
# Validate that user passes valid commits
|
||||
if not repo.is_commit(commit_prior_to_reformat):
|
||||
raise ValueError("Commit Prior to Reformat '%s' is not a valid commit in this repo" %
|
||||
commit_prior_to_reformat)
|
||||
commit_prior_to_reformat)
|
||||
|
||||
if not repo.is_commit(commit_after_reformat):
|
||||
raise ValueError("Commit After Reformat '%s' is not a valid commit in this repo" %
|
||||
commit_after_reformat)
|
||||
raise ValueError(
|
||||
"Commit After Reformat '%s' is not a valid commit in this repo" % commit_after_reformat)
|
||||
|
||||
if not repo.is_ancestor(commit_prior_to_reformat, commit_after_reformat):
|
||||
raise ValueError(("Commit Prior to Reformat '%s' is not a valid ancestor of Commit After" +
|
||||
" Reformat '%s' in this repo") % (commit_prior_to_reformat, commit_after_reformat))
|
||||
" Reformat '%s' in this repo") % (commit_prior_to_reformat,
|
||||
commit_after_reformat))
|
||||
|
||||
# Validate the user is on a local branch that has the right merge base
|
||||
if repo.is_detached():
|
||||
|
|
@ -383,27 +404,36 @@ def reformat_branch(clang_format, commit_prior_to_reformat, commit_after_reforma
|
|||
|
||||
# Validate the user has no pending changes
|
||||
if repo.is_working_tree_dirty():
|
||||
raise ValueError("Your working tree has pending changes. You must have a clean working tree before proceeding.")
|
||||
raise ValueError(
|
||||
"Your working tree has pending changes. You must have a clean working tree before proceeding."
|
||||
)
|
||||
|
||||
merge_base = repo.get_merge_base(commit_prior_to_reformat)
|
||||
|
||||
if not merge_base == commit_prior_to_reformat:
|
||||
raise ValueError("Please rebase to '%s' and resolve all conflicts before running this script" % (commit_prior_to_reformat))
|
||||
raise ValueError(
|
||||
"Please rebase to '%s' and resolve all conflicts before running this script" %
|
||||
(commit_prior_to_reformat))
|
||||
|
||||
# We assume the target branch is master, it could be a different branch if needed for testing
|
||||
merge_base = repo.get_merge_base("master")
|
||||
|
||||
if not merge_base == commit_prior_to_reformat:
|
||||
raise ValueError("This branch appears to already have advanced too far through the merge process")
|
||||
raise ValueError(
|
||||
"This branch appears to already have advanced too far through the merge process")
|
||||
|
||||
# Everything looks good so lets start going through all the commits
|
||||
branch_name = repo.get_branch_name()
|
||||
new_branch = "%s-reformatted" % branch_name
|
||||
|
||||
if repo.does_branch_exist(new_branch):
|
||||
raise ValueError("The branch '%s' already exists. Please delete the branch '%s', or rename the current branch." % (new_branch, new_branch))
|
||||
raise ValueError(
|
||||
"The branch '%s' already exists. Please delete the branch '%s', or rename the current branch."
|
||||
% (new_branch, new_branch))
|
||||
|
||||
commits = get_list_from_lines(repo.log(["--reverse", "--pretty=format:%H", "%s..HEAD" % commit_prior_to_reformat]))
|
||||
commits = get_list_from_lines(
|
||||
repo.log(["--reverse", "--pretty=format:%H",
|
||||
"%s..HEAD" % commit_prior_to_reformat]))
|
||||
|
||||
previous_commit_base = commit_after_reformat
|
||||
|
||||
|
|
@ -423,8 +453,8 @@ def reformat_branch(clang_format, commit_prior_to_reformat, commit_after_reforma
|
|||
|
||||
# Format each file needed if it was not deleted
|
||||
if not os.path.exists(commit_file):
|
||||
print("Skipping file '%s' since it has been deleted in commit '%s'" % (
|
||||
commit_file, commit_hash))
|
||||
print("Skipping file '%s' since it has been deleted in commit '%s'" % (commit_file,
|
||||
commit_hash))
|
||||
deleted_files.append(commit_file)
|
||||
continue
|
||||
|
||||
|
|
@ -432,11 +462,11 @@ def reformat_branch(clang_format, commit_prior_to_reformat, commit_after_reforma
|
|||
clang_format.format(commit_file)
|
||||
else:
|
||||
print("Skipping file '%s' since it is not a file clang_format should format" %
|
||||
commit_file)
|
||||
commit_file)
|
||||
|
||||
# Check if anything needed reformatting, and if so amend the commit
|
||||
if not repo.is_working_tree_dirty():
|
||||
print ("Commit %s needed no reformatting" % commit_hash)
|
||||
print("Commit %s needed no reformatting" % commit_hash)
|
||||
else:
|
||||
repo.commit(["--all", "--amend", "--no-edit"])
|
||||
|
||||
|
|
@ -448,8 +478,8 @@ def reformat_branch(clang_format, commit_prior_to_reformat, commit_after_reforma
|
|||
repo.checkout(["--quiet", previous_commit_base])
|
||||
|
||||
# Copy each file from the reformatted commit on top of the post reformat
|
||||
diff_files = get_list_from_lines(repo.diff(["%s~..%s" % (previous_commit, previous_commit),
|
||||
"--name-only"]))
|
||||
diff_files = get_list_from_lines(
|
||||
repo.diff(["%s~..%s" % (previous_commit, previous_commit), "--name-only"]))
|
||||
|
||||
for diff_file in diff_files:
|
||||
# If the file was deleted in the commit we are reformatting, we need to delete it again
|
||||
|
|
@ -478,7 +508,8 @@ def reformat_branch(clang_format, commit_prior_to_reformat, commit_after_reforma
|
|||
repo.checkout(["-b", new_branch])
|
||||
|
||||
print("reformat-branch is done running.\n")
|
||||
print("A copy of your branch has been made named '%s', and formatted with clang-format.\n" % new_branch)
|
||||
print("A copy of your branch has been made named '%s', and formatted with clang-format.\n" %
|
||||
new_branch)
|
||||
print("The original branch has been left unchanged.")
|
||||
print("The next step is to rebase the new branch on 'master'.")
|
||||
|
||||
|
|
@ -486,7 +517,10 @@ def reformat_branch(clang_format, commit_prior_to_reformat, commit_after_reforma
|
|||
def usage():
|
||||
"""Print usage
|
||||
"""
|
||||
print("clang-format.py supports 5 commands [ lint, lint-all, lint-patch, format, reformat-branch].")
|
||||
print(
|
||||
"clang-format.py supports 5 commands [ lint, lint-all, lint-patch, format, reformat-branch]."
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point
|
||||
|
|
@ -510,7 +544,9 @@ def main():
|
|||
elif command == "reformat-branch":
|
||||
|
||||
if len(args) < 3:
|
||||
print("ERROR: reformat-branch takes two parameters: commit_prior_to_reformat commit_after_reformat")
|
||||
print(
|
||||
"ERROR: reformat-branch takes two parameters: commit_prior_to_reformat commit_after_reformat"
|
||||
)
|
||||
return
|
||||
|
||||
reformat_branch(options.clang_format, args[2], args[3])
|
||||
|
|
@ -519,5 +555,6 @@ def main():
|
|||
else:
|
||||
usage()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
Collect system resource information on processes running in Evergreen on a given interval.
|
||||
"""
|
||||
|
|
@ -16,29 +15,23 @@ import time
|
|||
from bson.json_util import dumps
|
||||
import requests
|
||||
|
||||
|
||||
# Get relative imports to work when the package is not installed on the PYTHONPATH.
|
||||
if __name__ == "__main__" and __package__ is None:
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
from buildscripts.resmokelib import utils
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
usage = "usage: %prog [options]"
|
||||
parser = optparse.OptionParser(description=__doc__, usage=usage)
|
||||
parser.add_option("-i", "--interval",
|
||||
dest="interval",
|
||||
default=5,
|
||||
type="int",
|
||||
parser.add_option("-i", "--interval", dest="interval", default=5, type="int",
|
||||
help="Collect system resource information every <interval> seconds. "
|
||||
"Default is every 5 seconds.")
|
||||
parser.add_option("-o", "--output-file",
|
||||
dest="outfile",
|
||||
default="-",
|
||||
"Default is every 5 seconds.")
|
||||
parser.add_option("-o", "--output-file", dest="outfile", default="-",
|
||||
help="If '-', then the file is written to stdout."
|
||||
" Any other value is treated as the output file name. By default,"
|
||||
" output is written to stdout.")
|
||||
" Any other value is treated as the output file name. By default,"
|
||||
" output is written to stdout.")
|
||||
|
||||
(options, _) = parser.parse_args()
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
Combines JSON report files used in Evergreen
|
||||
"""
|
||||
|
|
@ -53,15 +52,11 @@ def check_error(input_count, output_count):
|
|||
def main():
|
||||
usage = "usage: %prog [options] report1.json report2.json ..."
|
||||
parser = OptionParser(description=__doc__, usage=usage)
|
||||
parser.add_option("-o", "--output-file",
|
||||
dest="outfile",
|
||||
default="-",
|
||||
help="If '-', then the combined report file is written to stdout."
|
||||
" Any other value is treated as the output file name. By default,"
|
||||
" output is written to stdout.")
|
||||
parser.add_option("-x", "--no-report-exit",
|
||||
dest="report_exit",
|
||||
default=True,
|
||||
parser.add_option("-o", "--output-file", dest="outfile", default="-",
|
||||
help=("If '-', then the combined report file is written to stdout."
|
||||
" Any other value is treated as the output file name. By default,"
|
||||
" output is written to stdout."))
|
||||
parser.add_option("-x", "--no-report-exit", dest="report_exit", default=True,
|
||||
action="store_false",
|
||||
help="Do not exit with a non-zero code if any test in the report fails.")
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
"""Produces a report of all assertions in the MongoDB server codebase.
|
||||
|
||||
Parses .cpp files for assertions and verifies assertion codes are distinct.
|
||||
|
|
@ -19,49 +18,50 @@ except ImportError:
|
|||
print("*** Run 'pip2 install --user regex' to speed up error code checking")
|
||||
import re
|
||||
|
||||
ASSERT_NAMES = [ "uassert" , "massert", "fassert", "fassertFailed" ]
|
||||
ASSERT_NAMES = ["uassert", "massert", "fassert", "fassertFailed"]
|
||||
MINIMUM_CODE = 10000
|
||||
|
||||
codes = []
|
||||
|
||||
# Each AssertLocation identifies the C++ source location of an assertion
|
||||
AssertLocation = namedtuple( "AssertLocation", ['sourceFile', 'byteOffset', 'lines', 'code'] )
|
||||
AssertLocation = namedtuple("AssertLocation", ['sourceFile', 'byteOffset', 'lines', 'code'])
|
||||
|
||||
list_files = False
|
||||
|
||||
|
||||
# Of historical interest only
|
||||
def assignErrorCodes():
|
||||
cur = MINIMUM_CODE
|
||||
for root in ASSERT_NAMES:
|
||||
for x in utils.getAllSourceFiles():
|
||||
print( x )
|
||||
print(x)
|
||||
didAnything = False
|
||||
fixed = ""
|
||||
for line in open( x ):
|
||||
s = line.partition( root + "(" )
|
||||
if s[1] == "" or line.startswith( "#define " + root):
|
||||
for line in open(x):
|
||||
s = line.partition(root + "(")
|
||||
if s[1] == "" or line.startswith("#define " + root):
|
||||
fixed += line
|
||||
continue
|
||||
fixed += s[0] + root + "( " + str( cur ) + " , " + s[2]
|
||||
fixed += s[0] + root + "( " + str(cur) + " , " + s[2]
|
||||
cur = cur + 1
|
||||
didAnything = True
|
||||
if didAnything:
|
||||
out = open( x , 'w' )
|
||||
out.write( fixed )
|
||||
out = open(x, 'w')
|
||||
out.write(fixed)
|
||||
out.close()
|
||||
|
||||
|
||||
def parseSourceFiles( callback ):
|
||||
def parseSourceFiles(callback):
|
||||
"""Walks MongoDB sourcefiles and invokes callback for each AssertLocation found."""
|
||||
|
||||
quick = ["assert", "Exception", "ErrorCodes::Error"]
|
||||
|
||||
patterns = [
|
||||
re.compile( r"(?:u|m(?:sg)?)asser(?:t|ted)(?:NoTrace)?\s*\(\s*(\d+)", re.MULTILINE ) ,
|
||||
re.compile( r"(?:DB|Assertion)Exception\s*[({]\s*(\d+)", re.MULTILINE ),
|
||||
re.compile( r"fassert(?:Failed)?(?:WithStatus)?(?:NoTrace)?(?:StatusOK)?\s*\(\s*(\d+)",
|
||||
re.MULTILINE ),
|
||||
re.compile( r"ErrorCodes::Error\s*[({]\s*(\d+)", re.MULTILINE )
|
||||
re.compile(r"(?:u|m(?:sg)?)asser(?:t|ted)(?:NoTrace)?\s*\(\s*(\d+)", re.MULTILINE),
|
||||
re.compile(r"(?:DB|Assertion)Exception\s*[({]\s*(\d+)", re.MULTILINE),
|
||||
re.compile(r"fassert(?:Failed)?(?:WithStatus)?(?:NoTrace)?(?:StatusOK)?\s*\(\s*(\d+)",
|
||||
re.MULTILINE),
|
||||
re.compile(r"ErrorCodes::Error\s*[({]\s*(\d+)", re.MULTILINE)
|
||||
]
|
||||
|
||||
for sourceFile in utils.getAllSourceFiles(prefix='src/mongo/'):
|
||||
|
|
@ -83,12 +83,11 @@ def parseSourceFiles( callback ):
|
|||
# Note that this will include the text of the full match but will report the
|
||||
# position of the beginning of the code portion rather than the beginning of the
|
||||
# match. This is to position editors on the spot that needs to change.
|
||||
thisLoc = AssertLocation(sourceFile,
|
||||
codeOffset,
|
||||
text[match.start():match.end()],
|
||||
code)
|
||||
thisLoc = AssertLocation(sourceFile, codeOffset,
|
||||
text[match.start():match.end()], code)
|
||||
|
||||
callback(thisLoc)
|
||||
|
||||
callback( thisLoc )
|
||||
|
||||
# Converts an absolute position in a file into a line number.
|
||||
def getLineAndColumnForPosition(loc, _file_cache={}):
|
||||
|
|
@ -105,7 +104,8 @@ def getLineAndColumnForPosition(loc, _file_cache={}):
|
|||
column = loc.byteOffset - _file_cache[loc.sourceFile][line - 1] + 1
|
||||
return (line, column)
|
||||
|
||||
def isTerminated( lines ):
|
||||
|
||||
def isTerminated(lines):
|
||||
"""Given .cpp/.h source lines as text, determine if assert is terminated."""
|
||||
x = " ".join(lines)
|
||||
return ';' in x \
|
||||
|
|
@ -121,8 +121,7 @@ def getNextCode():
|
|||
if not len(codes) > 0:
|
||||
readErrorCodes()
|
||||
|
||||
highest = reduce( lambda x, y: max(int(x), int(y)),
|
||||
(loc.code for loc in codes) )
|
||||
highest = reduce(lambda x, y: max(int(x), int(y)), (loc.code for loc in codes))
|
||||
return highest + 1
|
||||
|
||||
|
||||
|
|
@ -130,7 +129,7 @@ def checkErrorCodes():
|
|||
"""SConstruct expects a boolean response from this function.
|
||||
"""
|
||||
(codes, errors) = readErrorCodes()
|
||||
return len( errors ) == 0
|
||||
return len(errors) == 0
|
||||
|
||||
|
||||
def readErrorCodes():
|
||||
|
|
@ -142,8 +141,8 @@ def readErrorCodes():
|
|||
dups = defaultdict(list)
|
||||
|
||||
# define callback
|
||||
def checkDups( assertLoc ):
|
||||
codes.append( assertLoc )
|
||||
def checkDups(assertLoc):
|
||||
codes.append(assertLoc)
|
||||
code = assertLoc.code
|
||||
|
||||
if not code in seen:
|
||||
|
|
@ -151,32 +150,32 @@ def readErrorCodes():
|
|||
else:
|
||||
if not code in dups:
|
||||
# on first duplicate, add original to dups, errors
|
||||
dups[code].append( seen[code] )
|
||||
errors.append( seen[code] )
|
||||
dups[code].append(seen[code])
|
||||
errors.append(seen[code])
|
||||
|
||||
dups[code].append( assertLoc )
|
||||
errors.append( assertLoc )
|
||||
dups[code].append(assertLoc)
|
||||
errors.append(assertLoc)
|
||||
|
||||
parseSourceFiles( checkDups )
|
||||
parseSourceFiles(checkDups)
|
||||
|
||||
if seen.has_key("0"):
|
||||
code = "0"
|
||||
bad = seen[code]
|
||||
errors.append( bad )
|
||||
errors.append(bad)
|
||||
line, col = getLineAndColumnForPosition(bad)
|
||||
print( "ZERO_CODE:" )
|
||||
print( " %s:%d:%d:%s" % (bad.sourceFile, line, col, bad.lines) )
|
||||
print("ZERO_CODE:")
|
||||
print(" %s:%d:%d:%s" % (bad.sourceFile, line, col, bad.lines))
|
||||
|
||||
for code, locations in dups.items():
|
||||
print( "DUPLICATE IDS: %s" % code )
|
||||
print("DUPLICATE IDS: %s" % code)
|
||||
for loc in locations:
|
||||
line, col = getLineAndColumnForPosition(loc)
|
||||
print( " %s:%d:%d:%s" % (loc.sourceFile, line, col, loc.lines) )
|
||||
print(" %s:%d:%d:%s" % (loc.sourceFile, line, col, loc.lines))
|
||||
|
||||
return (codes, errors)
|
||||
|
||||
|
||||
def replaceBadCodes( errors, nextCode ):
|
||||
def replaceBadCodes(errors, nextCode):
|
||||
"""Modifies C++ source files to replace invalid assertion codes.
|
||||
For now, we only modify zero codes.
|
||||
|
||||
|
|
@ -189,8 +188,7 @@ def replaceBadCodes( errors, nextCode ):
|
|||
|
||||
for loc in skip_errors:
|
||||
line, col = getLineAndColumnForPosition(loc)
|
||||
print ("SKIPPING NONZERO code=%s: %s:%d:%d"
|
||||
% (loc.code, loc.sourceFile, line, col))
|
||||
print("SKIPPING NONZERO code=%s: %s:%d:%d" % (loc.code, loc.sourceFile, line, col))
|
||||
|
||||
# Dedupe, sort, and reverse so we don't have to update offsets as we go.
|
||||
for assertLoc in reversed(sorted(set(zero_errors))):
|
||||
|
|
@ -209,14 +207,14 @@ def replaceBadCodes( errors, nextCode ):
|
|||
f.seek(0)
|
||||
f.write(text[:byteOffset])
|
||||
f.write(str(nextCode))
|
||||
f.write(text[byteOffset+1:])
|
||||
f.write(text[byteOffset + 1:])
|
||||
f.seek(0)
|
||||
|
||||
print "LINE_%d_AFTER :%s" % (lineNum, f.readlines()[ln].rstrip())
|
||||
nextCode += 1
|
||||
|
||||
|
||||
def getBestMessage( lines , codeStr ):
|
||||
def getBestMessage(lines, codeStr):
|
||||
"""Extracts message from one AssertionLocation.lines entry
|
||||
|
||||
Args:
|
||||
|
|
@ -225,7 +223,7 @@ def getBestMessage( lines , codeStr ):
|
|||
"""
|
||||
line = lines if isinstance(lines, str) else " ".join(lines)
|
||||
|
||||
err = line.partition( codeStr )[2]
|
||||
err = line.partition(codeStr)[2]
|
||||
if not err:
|
||||
return ""
|
||||
|
||||
|
|
@ -249,16 +247,14 @@ def getBestMessage( lines , codeStr ):
|
|||
|
||||
return err.strip()
|
||||
|
||||
|
||||
def main():
|
||||
parser = OptionParser(description=__doc__.strip())
|
||||
parser.add_option("--fix", dest="replace",
|
||||
action="store_true", default=False,
|
||||
parser.add_option("--fix", dest="replace", action="store_true", default=False,
|
||||
help="Fix zero codes in source files [default: %default]")
|
||||
parser.add_option("-q", "--quiet", dest="quiet",
|
||||
action="store_true", default=False,
|
||||
parser.add_option("-q", "--quiet", dest="quiet", action="store_true", default=False,
|
||||
help="Suppress output on success [default: %default]")
|
||||
parser.add_option("--list-files", dest="list_files",
|
||||
action="store_true", default=False,
|
||||
parser.add_option("--list-files", dest="list_files", action="store_true", default=False,
|
||||
help="Print the name of each file as it is scanned [default: %default]")
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
|
|
|
|||
|
|
@ -52,11 +52,13 @@ ESLINT_HTTP_DARWIN_CACHE = "https://s3.amazonaws.com/boxes.10gen.com/build/eslin
|
|||
# Path in the tarball to the ESLint binary.
|
||||
ESLINT_SOURCE_TAR_BASE = string.Template(ESLINT_PROGNAME + "-$platform-$arch")
|
||||
|
||||
|
||||
def callo(args):
|
||||
"""Call a program, and capture its output
|
||||
"""
|
||||
return subprocess.check_output(args)
|
||||
|
||||
|
||||
def extract_eslint(tar_path, target_file):
|
||||
tarfp = tarfile.open(tar_path)
|
||||
for name in tarfp.getnames():
|
||||
|
|
@ -64,6 +66,7 @@ def extract_eslint(tar_path, target_file):
|
|||
tarfp.extract(name)
|
||||
tarfp.close()
|
||||
|
||||
|
||||
def get_eslint_from_cache(dest_file, platform, arch):
|
||||
"""Get ESLint binary from mongodb's cache
|
||||
"""
|
||||
|
|
@ -79,8 +82,7 @@ def get_eslint_from_cache(dest_file, platform, arch):
|
|||
temp_tar_file = os.path.join(dest_dir, "temp.tar.gz")
|
||||
|
||||
# Download the file
|
||||
print("Downloading ESLint %s from %s, saving to %s" % (ESLINT_VERSION,
|
||||
url, temp_tar_file))
|
||||
print("Downloading ESLint %s from %s, saving to %s" % (ESLINT_VERSION, url, temp_tar_file))
|
||||
urllib.urlretrieve(url, temp_tar_file)
|
||||
|
||||
eslint_distfile = ESLINT_SOURCE_TAR_BASE.substitute(platform=platform, arch=arch)
|
||||
|
|
@ -91,6 +93,7 @@ def get_eslint_from_cache(dest_file, platform, arch):
|
|||
class ESLint(object):
|
||||
"""Class encapsulates finding a suitable copy of ESLint, and linting an individual file
|
||||
"""
|
||||
|
||||
def __init__(self, path, cache_dir):
|
||||
eslint_progname = ESLINT_PROGNAME
|
||||
|
||||
|
|
@ -155,8 +158,8 @@ class ESLint(object):
|
|||
return True
|
||||
|
||||
if warn:
|
||||
print("WARNING: eslint found in path, but incorrect version found at " +
|
||||
self.path + " with version: " + esl_version)
|
||||
print("WARNING: eslint found in path, but incorrect version found at " + self.path +
|
||||
" with version: " + esl_version)
|
||||
return False
|
||||
|
||||
def _lint(self, file_name, print_diff):
|
||||
|
|
@ -189,17 +192,20 @@ class ESLint(object):
|
|||
"""
|
||||
return not subprocess.call([self.path, "--fix", file_name])
|
||||
|
||||
|
||||
def is_interesting_file(file_name):
|
||||
""""Return true if this file should be checked
|
||||
"""
|
||||
return ((file_name.startswith("src/mongo") or file_name.startswith("jstests"))
|
||||
and file_name.endswith(".js"))
|
||||
|
||||
|
||||
def _get_build_dir():
|
||||
"""Get the location of the scons build directory in case we need to download ESLint
|
||||
"""
|
||||
return os.path.join(git.get_base_dir(), "build")
|
||||
|
||||
|
||||
def _lint_files(eslint, files):
|
||||
"""Lint a list of files with ESLint
|
||||
"""
|
||||
|
|
@ -214,6 +220,7 @@ def _lint_files(eslint, files):
|
|||
|
||||
return True
|
||||
|
||||
|
||||
def lint_patch(eslint, infile):
|
||||
"""Lint patch command entry point
|
||||
"""
|
||||
|
|
@ -224,6 +231,7 @@ def lint_patch(eslint, infile):
|
|||
return _lint_files(eslint, files)
|
||||
return True
|
||||
|
||||
|
||||
def lint(eslint, dirmode, glob):
|
||||
"""Lint files command entry point
|
||||
"""
|
||||
|
|
@ -236,6 +244,7 @@ def lint(eslint, dirmode, glob):
|
|||
|
||||
return True
|
||||
|
||||
|
||||
def _autofix_files(eslint, files):
|
||||
"""Auto-fix the specified files with ESLint.
|
||||
"""
|
||||
|
|
@ -247,6 +256,7 @@ def _autofix_files(eslint, files):
|
|||
print("ERROR: failed to auto-fix files")
|
||||
return False
|
||||
|
||||
|
||||
def autofix_func(eslint, dirmode, glob):
|
||||
"""Auto-fix files command entry point
|
||||
"""
|
||||
|
|
@ -268,11 +278,16 @@ def main():
|
|||
"provided patch file (for upload.py). "\
|
||||
"fix runs ESLint with --fix on provided patterns "\
|
||||
"or files under jstests/ and src/mongo."
|
||||
epilog ="*Unless you specify -d a separate ESLint process will be launched for every file"
|
||||
epilog = "*Unless you specify -d a separate ESLint process will be launched for every file"
|
||||
parser = OptionParser()
|
||||
parser = OptionParser(usage=usage, description=description, epilog=epilog)
|
||||
parser.add_option("-e", "--eslint", type="string", dest="eslint",
|
||||
help="Fully qualified path to eslint executable",)
|
||||
parser.add_option(
|
||||
"-e",
|
||||
"--eslint",
|
||||
type="string",
|
||||
dest="eslint",
|
||||
help="Fully qualified path to eslint executable",
|
||||
)
|
||||
parser.add_option("-d", "--dirmode", action="store_true", default=True, dest="dirmode",
|
||||
help="Considers the glob patterns as directories and runs ESLint process " \
|
||||
"against each pattern",)
|
||||
|
|
@ -301,5 +316,7 @@ def main():
|
|||
parser.print_help()
|
||||
|
||||
sys.exit(0 if success else 1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
Command line utility for executing MongoDB tests in Evergreen.
|
||||
"""
|
||||
|
|
@ -17,7 +16,6 @@ if __name__ == "__main__" and __package__ is None:
|
|||
from buildscripts import resmoke
|
||||
from buildscripts import resmokelib
|
||||
|
||||
|
||||
_TagInfo = collections.namedtuple("_TagInfo", ["tag_name", "evergreen_aware", "suite_options"])
|
||||
|
||||
|
||||
|
|
@ -27,23 +25,18 @@ class Main(resmoke.Main):
|
|||
additional options for running unreliable tests in Evergreen.
|
||||
"""
|
||||
|
||||
UNRELIABLE_TAG = _TagInfo(tag_name="unreliable",
|
||||
evergreen_aware=True,
|
||||
UNRELIABLE_TAG = _TagInfo(tag_name="unreliable", evergreen_aware=True,
|
||||
suite_options=resmokelib.config.SuiteOptions.ALL_INHERITED._replace(
|
||||
report_failure_status="silentfail"))
|
||||
|
||||
RESOURCE_INTENSIVE_TAG = _TagInfo(
|
||||
tag_name="resource_intensive",
|
||||
evergreen_aware=False,
|
||||
tag_name="resource_intensive", evergreen_aware=False,
|
||||
suite_options=resmokelib.config.SuiteOptions.ALL_INHERITED._replace(num_jobs=1))
|
||||
|
||||
RETRY_ON_FAILURE_TAG = _TagInfo(
|
||||
tag_name="retry_on_failure",
|
||||
evergreen_aware=True,
|
||||
tag_name="retry_on_failure", evergreen_aware=True,
|
||||
suite_options=resmokelib.config.SuiteOptions.ALL_INHERITED._replace(
|
||||
fail_fast=False,
|
||||
num_repeats=2,
|
||||
report_failure_status="silentfail"))
|
||||
fail_fast=False, num_repeats=2, report_failure_status="silentfail"))
|
||||
|
||||
def _make_evergreen_aware_tags(self, tag_name):
|
||||
"""
|
||||
|
|
@ -61,11 +54,11 @@ class Main(resmoke.Main):
|
|||
if resmokelib.config.EVERGREEN_DISTRO_ID is not None:
|
||||
tags_format.append("{tag_name}|{task_name}|{variant_name}|{distro_id}")
|
||||
|
||||
return [tag.format(tag_name=tag_name,
|
||||
task_name=resmokelib.config.EVERGREEN_TASK_NAME,
|
||||
variant_name=resmokelib.config.EVERGREEN_VARIANT_NAME,
|
||||
distro_id=resmokelib.config.EVERGREEN_DISTRO_ID)
|
||||
for tag in tags_format]
|
||||
return [
|
||||
tag.format(tag_name=tag_name, task_name=resmokelib.config.EVERGREEN_TASK_NAME,
|
||||
variant_name=resmokelib.config.EVERGREEN_VARIANT_NAME,
|
||||
distro_id=resmokelib.config.EVERGREEN_DISTRO_ID) for tag in tags_format
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def _make_tag_combinations(cls):
|
||||
|
|
@ -77,31 +70,28 @@ class Main(resmoke.Main):
|
|||
combinations = []
|
||||
|
||||
if resmokelib.config.EVERGREEN_PATCH_BUILD:
|
||||
combinations.append((
|
||||
"unreliable and resource intensive",
|
||||
((cls.UNRELIABLE_TAG, True), (cls.RESOURCE_INTENSIVE_TAG, True))))
|
||||
combinations.append((
|
||||
"unreliable and not resource intensive",
|
||||
((cls.UNRELIABLE_TAG, True), (cls.RESOURCE_INTENSIVE_TAG, False))))
|
||||
combinations.append((
|
||||
"reliable and resource intensive",
|
||||
((cls.UNRELIABLE_TAG, False), (cls.RESOURCE_INTENSIVE_TAG, True))))
|
||||
combinations.append((
|
||||
"reliable and not resource intensive",
|
||||
((cls.UNRELIABLE_TAG, False), (cls.RESOURCE_INTENSIVE_TAG, False))))
|
||||
combinations.append(("unreliable and resource intensive",
|
||||
((cls.UNRELIABLE_TAG, True), (cls.RESOURCE_INTENSIVE_TAG, True))))
|
||||
combinations.append(("unreliable and not resource intensive",
|
||||
((cls.UNRELIABLE_TAG, True), (cls.RESOURCE_INTENSIVE_TAG, False))))
|
||||
combinations.append(("reliable and resource intensive",
|
||||
((cls.UNRELIABLE_TAG, False), (cls.RESOURCE_INTENSIVE_TAG, True))))
|
||||
combinations.append(("reliable and not resource intensive",
|
||||
((cls.UNRELIABLE_TAG, False), (cls.RESOURCE_INTENSIVE_TAG,
|
||||
False))))
|
||||
else:
|
||||
combinations.append((
|
||||
"retry on failure and resource intensive",
|
||||
((cls.RETRY_ON_FAILURE_TAG, True), (cls.RESOURCE_INTENSIVE_TAG, True))))
|
||||
combinations.append((
|
||||
"retry on failure and not resource intensive",
|
||||
((cls.RETRY_ON_FAILURE_TAG, True), (cls.RESOURCE_INTENSIVE_TAG, False))))
|
||||
combinations.append((
|
||||
"run once and resource intensive",
|
||||
((cls.RETRY_ON_FAILURE_TAG, False), (cls.RESOURCE_INTENSIVE_TAG, True))))
|
||||
combinations.append((
|
||||
"run once and not resource intensive",
|
||||
((cls.RETRY_ON_FAILURE_TAG, False), (cls.RESOURCE_INTENSIVE_TAG, False))))
|
||||
combinations.append(("retry on failure and resource intensive",
|
||||
((cls.RETRY_ON_FAILURE_TAG, True), (cls.RESOURCE_INTENSIVE_TAG,
|
||||
True))))
|
||||
combinations.append(("retry on failure and not resource intensive",
|
||||
((cls.RETRY_ON_FAILURE_TAG, True), (cls.RESOURCE_INTENSIVE_TAG,
|
||||
False))))
|
||||
combinations.append(("run once and resource intensive",
|
||||
((cls.RETRY_ON_FAILURE_TAG, False), (cls.RESOURCE_INTENSIVE_TAG,
|
||||
True))))
|
||||
combinations.append(("run once and not resource intensive",
|
||||
((cls.RETRY_ON_FAILURE_TAG, False), (cls.RESOURCE_INTENSIVE_TAG,
|
||||
False))))
|
||||
|
||||
return combinations
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
"""Script to retrieve the etc/test_lifecycle.yml tag file from the metadata repository that
|
||||
corresponds to the current repository.
|
||||
|
||||
|
|
@ -26,7 +25,6 @@ if __name__ == "__main__" and __package__ is None:
|
|||
|
||||
from buildscripts import git
|
||||
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
|
@ -77,8 +75,8 @@ class MetadataRepository(object):
|
|||
|
||||
def get_lifecycle_file_content(self, metadata_revision):
|
||||
"""Return the content of the test lifecycle file as it was at the given revision."""
|
||||
return self._repository.git_cat_file(["blob", "%s:%s" % (metadata_revision,
|
||||
self._lifecycle_file)])
|
||||
return self._repository.git_cat_file(
|
||||
["blob", "%s:%s" % (metadata_revision, self._lifecycle_file)])
|
||||
|
||||
|
||||
def _clone_repository(url, branch):
|
||||
|
|
@ -116,8 +114,8 @@ def fetch_test_lifecycle(metadata_repo_url, references_file, lifecycle_file, pro
|
|||
project: the Evergreen project name.
|
||||
revision: the current repository revision.
|
||||
"""
|
||||
metadata_repo = MetadataRepository(_clone_repository(metadata_repo_url, project),
|
||||
references_file, lifecycle_file)
|
||||
metadata_repo = MetadataRepository(
|
||||
_clone_repository(metadata_repo_url, project), references_file, lifecycle_file)
|
||||
mongo_repo = git.Repository(os.getcwd())
|
||||
metadata_revision = _get_metadata_revision(metadata_repo, mongo_repo, project, revision)
|
||||
if metadata_revision:
|
||||
|
|
@ -133,49 +131,39 @@ def main():
|
|||
Utility to fetch the etc/test_lifecycle.yml file corresponding to a given revision from
|
||||
the mongo-test-metadata repository.
|
||||
"""
|
||||
parser = optparse.OptionParser(description=textwrap.dedent(main.__doc__),
|
||||
usage="Usage: %prog [options] evergreen-project")
|
||||
parser = optparse.OptionParser(
|
||||
description=textwrap.dedent(main.__doc__), usage="Usage: %prog [options] evergreen-project")
|
||||
|
||||
parser.add_option("--revision", dest="revision",
|
||||
metavar="<revision>",
|
||||
default="HEAD",
|
||||
parser.add_option("--revision", dest="revision", metavar="<revision>", default="HEAD",
|
||||
help=("The project revision for which to retrieve the test lifecycle tags"
|
||||
" file."))
|
||||
|
||||
parser.add_option("--metadataRepo", dest="metadata_repo_url",
|
||||
metavar="<metadata-repo-url>",
|
||||
parser.add_option("--metadataRepo", dest="metadata_repo_url", metavar="<metadata-repo-url>",
|
||||
default="git@github.com:mongodb/mongo-test-metadata.git",
|
||||
help=("The URL to the metadata repository that contains the test lifecycle"
|
||||
" tags file."))
|
||||
|
||||
parser.add_option("--lifecycleFile", dest="lifecycle_file",
|
||||
metavar="<lifecycle-file>",
|
||||
parser.add_option("--lifecycleFile", dest="lifecycle_file", metavar="<lifecycle-file>",
|
||||
default="etc/test_lifecycle.yml",
|
||||
help=("The path to the test lifecycle tags file, relative to the root of the"
|
||||
" metadata repository. Defaults to '%default'."))
|
||||
|
||||
parser.add_option("--referencesFile", dest="references_file",
|
||||
metavar="<references-file>",
|
||||
parser.add_option("--referencesFile", dest="references_file", metavar="<references-file>",
|
||||
default="references.yml",
|
||||
help=("The path to the metadata references file, relative to the root of the"
|
||||
" metadata repository. Defaults to '%default'."))
|
||||
|
||||
parser.add_option("--destinationFile", dest="destination_file",
|
||||
metavar="<destination-file>",
|
||||
parser.add_option("--destinationFile", dest="destination_file", metavar="<destination-file>",
|
||||
default="etc/test_lifecycle.yml",
|
||||
help=("The path where the lifecycle file should be available when this script"
|
||||
" completes successfully. This path is absolute or relative to the"
|
||||
" current working directory. Defaults to '%default'."))
|
||||
|
||||
parser.add_option("--logLevel", dest="log_level",
|
||||
metavar="<log-level>",
|
||||
choices=["DEBUG", "INFO", "WARNING", "ERROR"],
|
||||
default="INFO",
|
||||
parser.add_option("--logLevel", dest="log_level", metavar="<log-level>",
|
||||
choices=["DEBUG", "INFO", "WARNING", "ERROR"], default="INFO",
|
||||
help="The log level: DEBUG, INFO, WARNING or ERROR. Defaults to '%default'.")
|
||||
|
||||
parser.add_option("--logFile", dest="log_file",
|
||||
metavar="<log-file>",
|
||||
default=None,
|
||||
parser.add_option("--logFile", dest="log_file", metavar="<log-file>", default=None,
|
||||
help=("The destination file for the logs. If not set the script will log to"
|
||||
" the standard output"))
|
||||
|
||||
|
|
@ -187,14 +175,12 @@ def main():
|
|||
parser.error("Must specify an Evergreen project")
|
||||
evergreen_project = args[0]
|
||||
|
||||
logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s",
|
||||
level=options.log_level, filename=options.log_file)
|
||||
logging.basicConfig(format="%(asctime)s %(levelname)s %(message)s", level=options.log_level,
|
||||
filename=options.log_file)
|
||||
|
||||
lifecycle_file_content = fetch_test_lifecycle(options.metadata_repo_url,
|
||||
options.references_file,
|
||||
options.lifecycle_file,
|
||||
evergreen_project,
|
||||
options.revision)
|
||||
options.references_file, options.lifecycle_file,
|
||||
evergreen_project, options.revision)
|
||||
if not lifecycle_file_content:
|
||||
LOGGER.error("Failed to fetch the test lifecycle tag file.")
|
||||
sys.exit(1)
|
||||
|
|
|
|||
|
|
@ -62,6 +62,7 @@ class DumpGlobalServiceContext(gdb.Command):
|
|||
def invoke(self, arg, _from_tty):
|
||||
gdb.execute("print *('mongo::(anonymous namespace)::globalServiceContext')")
|
||||
|
||||
|
||||
# Register command
|
||||
DumpGlobalServiceContext()
|
||||
|
||||
|
|
@ -92,6 +93,7 @@ class MongoDBDumpLocks(gdb.Command):
|
|||
except gdb.error as gdberr:
|
||||
print("Ignoring error '%s' in dump_mongod_locks" % str(gdberr))
|
||||
|
||||
|
||||
# Register command
|
||||
MongoDBDumpLocks()
|
||||
|
||||
|
|
@ -113,6 +115,7 @@ class BtIfActive(gdb.Command):
|
|||
else:
|
||||
gdb.execute("bt")
|
||||
|
||||
|
||||
# Register command
|
||||
BtIfActive()
|
||||
|
||||
|
|
@ -204,6 +207,7 @@ class MongoDBUniqueStack(gdb.Command):
|
|||
print(stack['output'])
|
||||
print() # leave extra blank line after each thread stack
|
||||
|
||||
|
||||
# Register command
|
||||
MongoDBUniqueStack()
|
||||
|
||||
|
|
@ -263,6 +267,7 @@ class MongoDBHelp(gdb.Command):
|
|||
for key in mongo_commands:
|
||||
print("%s - %s" % (key, mongo_commands[key]))
|
||||
|
||||
|
||||
# Register command
|
||||
MongoDBHelp()
|
||||
|
||||
|
|
|
|||
|
|
@ -131,8 +131,8 @@ class Graph(object):
|
|||
color = ""
|
||||
if nodes and node_key in nodes:
|
||||
color = "color = red"
|
||||
sb.append(' "{}" [label="{}" {}]'.format(
|
||||
node_key, self.nodes[node_key]['node'], color))
|
||||
sb.append(' "{}" [label="{}" {}]'.format(node_key, self.nodes[node_key]['node'],
|
||||
color))
|
||||
sb.append("}")
|
||||
return "\n".join(sb)
|
||||
|
||||
|
|
@ -222,8 +222,8 @@ def find_mutex_holder(graph, thread_dict, show):
|
|||
# Use the thread LWP as a substitute for showing output or generating the graph.
|
||||
if mutex_holder not in thread_dict:
|
||||
print("Warning: Mutex at {} held by thread with LWP {}"
|
||||
" not found in thread_dict. Using LWP to track thread.".format(mutex_value,
|
||||
mutex_holder))
|
||||
" not found in thread_dict. Using LWP to track thread.".format(
|
||||
mutex_value, mutex_holder))
|
||||
mutex_holder_id = mutex_holder
|
||||
else:
|
||||
mutex_holder_id = thread_dict[mutex_holder]
|
||||
|
|
@ -232,14 +232,11 @@ def find_mutex_holder(graph, thread_dict, show):
|
|||
mutex_waiter_id = thread_dict[mutex_waiter_lwpid]
|
||||
if show:
|
||||
print("Mutex at {} held by thread 0x{:x} (LWP {})"
|
||||
" waited on by thread 0x{:x} (LWP {})".format(mutex_value,
|
||||
mutex_holder_id,
|
||||
mutex_holder,
|
||||
mutex_waiter_id,
|
||||
mutex_waiter_lwpid))
|
||||
" waited on by thread 0x{:x} (LWP {})".format(
|
||||
mutex_value, mutex_holder_id, mutex_holder, mutex_waiter_id, mutex_waiter_lwpid))
|
||||
if graph:
|
||||
graph.add_edge(Thread(mutex_waiter_id, mutex_waiter_lwpid),
|
||||
Lock(long(mutex_value), "Mutex"))
|
||||
graph.add_edge(
|
||||
Thread(mutex_waiter_id, mutex_waiter_lwpid), Lock(long(mutex_value), "Mutex"))
|
||||
graph.add_edge(Lock(long(mutex_value), "Mutex"), Thread(mutex_holder_id, mutex_holder))
|
||||
|
||||
|
||||
|
|
@ -268,11 +265,11 @@ def find_lock_manager_holders(graph, thread_dict, show):
|
|||
if show:
|
||||
print("MongoDB Lock at {} ({}) held by thread id 0x{:x} (LWP {})".format(
|
||||
lock_head, lock_request["mode"], lock_thread_id, lock_thread_lwpid) +
|
||||
" waited on by thread 0x{:x} (LWP {})".format(thread_dict[lwpid], lwpid))
|
||||
" waited on by thread 0x{:x} (LWP {})".format(thread_dict[lwpid], lwpid))
|
||||
if graph:
|
||||
graph.add_edge(Thread(thread_dict[lwpid], lwpid), Lock(long(lock_head), "MongoDB lock"))
|
||||
graph.add_edge(Lock(long(lock_head), "MongoDB lock"),
|
||||
Thread(lock_thread_id, lock_thread_lwpid))
|
||||
graph.add_edge(
|
||||
Lock(long(lock_head), "MongoDB lock"), Thread(lock_thread_id, lock_thread_lwpid))
|
||||
lock_request_ptr = lock_request["next"]
|
||||
|
||||
|
||||
|
|
@ -311,6 +308,7 @@ def get_threads_info(graph=None):
|
|||
|
||||
class MongoDBShowLocks(gdb.Command):
|
||||
"""Show MongoDB locks & pthread mutexes"""
|
||||
|
||||
def __init__(self):
|
||||
register_mongo_command(self, "mongodb-show-locks", gdb.COMMAND_DATA)
|
||||
|
||||
|
|
@ -325,11 +323,13 @@ class MongoDBShowLocks(gdb.Command):
|
|||
except gdb.error as err:
|
||||
print("Ignoring GDB error '%s' in mongodb_show_locks" % str(err))
|
||||
|
||||
|
||||
MongoDBShowLocks()
|
||||
|
||||
|
||||
class MongoDBWaitsForGraph(gdb.Command):
|
||||
"""Create MongoDB WaitsFor lock graph [graph_file]"""
|
||||
|
||||
def __init__(self):
|
||||
register_mongo_command(self, "mongodb-waitsfor-graph", gdb.COMMAND_DATA)
|
||||
|
||||
|
|
|
|||
|
|
@ -51,6 +51,7 @@ class StatusPrinter(object):
|
|||
|
||||
class StatusWithPrinter:
|
||||
"""Pretty-printer for mongo::StatusWith<>"""
|
||||
|
||||
def __init__(self, val):
|
||||
self.val = val
|
||||
|
||||
|
|
@ -190,8 +191,7 @@ class DecorablePrinter:
|
|||
return 'map'
|
||||
|
||||
def to_string(self):
|
||||
return "Decorable<%s> with %s elems " % (self.val.type.template_argument(0),
|
||||
self.count)
|
||||
return "Decorable<%s> with %s elems " % (self.val.type.template_argument(0), self.count)
|
||||
|
||||
def children(self):
|
||||
decorationData = get_unique_ptr(self.val["_decorations"]["_decorationData"])
|
||||
|
|
@ -205,7 +205,7 @@ class DecorablePrinter:
|
|||
# TODO: abstract out navigating a std::function
|
||||
type_name = str(descriptor["constructor"]["_M_functor"]["_M_unused"]["_M_object"])
|
||||
type_name = type_name[0:len(type_name) - 1]
|
||||
type_name = type_name[0: type_name.rindex(">")]
|
||||
type_name = type_name[0:type_name.rindex(">")]
|
||||
type_name = type_name[type_name.index("constructAt<"):].replace("constructAt<", "")
|
||||
|
||||
# If the type is a pointer type, strip the * at the end.
|
||||
|
|
@ -287,8 +287,8 @@ class MongoPrettyPrinterCollection(gdb.printing.PrettyPrinter):
|
|||
if index == -1 or index + 1 == len(lookup_tag):
|
||||
for printer in self.subprinters:
|
||||
if printer.enabled and (
|
||||
(printer.is_template and lookup_tag.find(printer.prefix) == 0) or
|
||||
(not printer.is_template and lookup_tag == printer.prefix)):
|
||||
(printer.is_template and lookup_tag.find(printer.prefix) == 0) or
|
||||
(not printer.is_template and lookup_tag == printer.prefix)):
|
||||
return printer.printer(val)
|
||||
|
||||
return None
|
||||
|
|
@ -301,9 +301,11 @@ def build_pretty_printer():
|
|||
pp.add('Status', 'mongo::Status', False, StatusPrinter)
|
||||
pp.add('StatusWith', 'mongo::StatusWith', True, StatusWithPrinter)
|
||||
pp.add('StringData', 'mongo::StringData', False, StringDataPrinter)
|
||||
pp.add('UnorderedFastKeyTable', 'mongo::UnorderedFastKeyTable', True, UnorderedFastKeyTablePrinter)
|
||||
pp.add('UnorderedFastKeyTable', 'mongo::UnorderedFastKeyTable', True,
|
||||
UnorderedFastKeyTablePrinter)
|
||||
return pp
|
||||
|
||||
|
||||
###################################################################################################
|
||||
#
|
||||
# Setup
|
||||
|
|
@ -311,9 +313,6 @@ def build_pretty_printer():
|
|||
###################################################################################################
|
||||
|
||||
# Register pretty-printers, replace existing mongo printers
|
||||
gdb.printing.register_pretty_printer(
|
||||
gdb.current_objfile(),
|
||||
build_pretty_printer(),
|
||||
True)
|
||||
gdb.printing.register_pretty_printer(gdb.current_objfile(), build_pretty_printer(), True)
|
||||
|
||||
print("MongoDB GDB pretty-printers loaded")
|
||||
|
|
|
|||
|
|
@ -96,10 +96,12 @@ def generate_scons_cache_expansions():
|
|||
|
||||
# Patches are read only
|
||||
if os.getenv("IS_PATCH"):
|
||||
expansions["scons_cache_args"] = "--cache={0} --cache-dir='{1}' --cache-readonly".format(
|
||||
scons_cache_mode, default_cache_path)
|
||||
expansions[
|
||||
"scons_cache_args"] = "--cache={0} --cache-dir='{1}' --cache-readonly".format(
|
||||
scons_cache_mode, default_cache_path)
|
||||
else:
|
||||
expansions["scons_cache_args"] = "--cache={0} --cache-dir='{1}'".format(scons_cache_mode, default_cache_path)
|
||||
expansions["scons_cache_args"] = "--cache={0} --cache-dir='{1}'".format(
|
||||
scons_cache_mode, default_cache_path)
|
||||
|
||||
# Local shared cache - host-based
|
||||
elif os.getenv("SCONS_CACHE_SCOPE") == "local":
|
||||
|
|
@ -111,7 +113,8 @@ def generate_scons_cache_expansions():
|
|||
|
||||
default_cache_path = os.path.join(default_cache_path_base, system_uuid)
|
||||
expansions["scons_cache_path"] = default_cache_path
|
||||
expansions["scons_cache_args"] = "--cache={0} --cache-dir='{1}'".format(scons_cache_mode, default_cache_path)
|
||||
expansions["scons_cache_args"] = "--cache={0} --cache-dir='{1}'".format(
|
||||
scons_cache_mode, default_cache_path)
|
||||
# No cache
|
||||
else:
|
||||
# Anything else is 'none'
|
||||
|
|
|
|||
|
|
@ -21,18 +21,17 @@ if os.name == "posix" and sys.version_info[0] == 2:
|
|||
import warnings
|
||||
warnings.warn(("Falling back to using the subprocess module because subprocess32 isn't"
|
||||
" available. When using the subprocess module, a child process may trigger"
|
||||
" an invalid free(). See SERVER-22219 for more details."),
|
||||
RuntimeWarning)
|
||||
" an invalid free(). See SERVER-22219 for more details."), RuntimeWarning)
|
||||
import subprocess
|
||||
else:
|
||||
import subprocess
|
||||
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Repository(object):
|
||||
"""Represent a local git repository."""
|
||||
|
||||
def __init__(self, directory):
|
||||
self.directory = directory
|
||||
|
||||
|
|
@ -94,8 +93,7 @@ class Repository(object):
|
|||
|
||||
def get_origin_url(self):
|
||||
"""Return the URL of the origin repository."""
|
||||
return self._callgito(
|
||||
"config", ["--local", "--get", "remote.origin.url"]).rstrip()
|
||||
return self._callgito("config", ["--local", "--get", "remote.origin.url"]).rstrip()
|
||||
|
||||
def get_branch_name(self):
|
||||
"""
|
||||
|
|
@ -126,8 +124,7 @@ class Repository(object):
|
|||
"""Return True if the specified parent hash an ancestor of child hash."""
|
||||
# If the common point between parent_revision and child_revision is
|
||||
# parent_revision, then parent_revision is an ancestor of child_revision.
|
||||
merge_base = self._callgito("merge-base", [parent_revision,
|
||||
child_revision]).rstrip()
|
||||
merge_base = self._callgito("merge-base", [parent_revision, child_revision]).rstrip()
|
||||
return parent_revision == merge_base
|
||||
|
||||
def is_commit(self, revision):
|
||||
|
|
@ -253,8 +250,9 @@ class GitException(Exception):
|
|||
element) that were run, if any.
|
||||
stderr: the error output of the git command.
|
||||
"""
|
||||
def __init__(self, message, returncode=None, cmd=None, process_args=None,
|
||||
stdout=None, stderr=None):
|
||||
|
||||
def __init__(self, message, returncode=None, cmd=None, process_args=None, stdout=None,
|
||||
stderr=None):
|
||||
Exception.__init__(self, message)
|
||||
self.returncode = returncode
|
||||
self.cmd = cmd
|
||||
|
|
@ -284,7 +282,6 @@ class GitCommandResult(object):
|
|||
def check_returncode(self):
|
||||
"""Raise GitException if the exit code is non-zero."""
|
||||
if self.returncode:
|
||||
raise GitException(
|
||||
"Command '{0}' failed with code '{1}'".format(" ".join(self.process_args),
|
||||
self.returncode),
|
||||
self.returncode, self.cmd, self.process_args, self.stdout, self.stderr)
|
||||
raise GitException("Command '{0}' failed with code '{1}'".format(
|
||||
" ".join(self.process_args), self.returncode), self.returncode, self.cmd,
|
||||
self.process_args, self.stdout, self.stderr)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
"""Hang Analyzer
|
||||
|
||||
A prototype hang analyzer for Evergreen integration to help investigate test timeouts
|
||||
|
|
@ -34,7 +33,6 @@ if _is_windows:
|
|||
import win32event
|
||||
import win32api
|
||||
|
||||
|
||||
# Get relative imports to work when the package is not installed on the PYTHONPATH.
|
||||
if __name__ == "__main__" and __package__ is None:
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
|
@ -99,7 +97,6 @@ def get_process_logger(debugger_output, pid, process_name):
|
|||
|
||||
|
||||
class WindowsDumper(object):
|
||||
|
||||
def __find_debugger(self, logger, debugger):
|
||||
"""Finds the installed debugger"""
|
||||
# We are looking for c:\Program Files (x86)\Windows Kits\8.1\Debuggers\x64
|
||||
|
|
@ -115,7 +112,7 @@ class WindowsDumper(object):
|
|||
for i in range(0, 2):
|
||||
pathToTest = os.path.join(rootDir, "Windows Kits", "8." + str(i), "Debuggers", "x64")
|
||||
logger.info("Checking for debugger in %s" % pathToTest)
|
||||
if(os.path.exists(pathToTest)):
|
||||
if (os.path.exists(pathToTest)):
|
||||
return os.path.join(pathToTest, debugger)
|
||||
|
||||
return None
|
||||
|
|
@ -129,15 +126,12 @@ class WindowsDumper(object):
|
|||
root_logger.warning("Debugger %s not found, skipping dumping of %d" % (debugger, pid))
|
||||
return
|
||||
|
||||
root_logger.info("Debugger %s, analyzing %s process with PID %d" % (dbg,
|
||||
process_name,
|
||||
pid))
|
||||
root_logger.info("Debugger %s, analyzing %s process with PID %d" % (dbg, process_name, pid))
|
||||
|
||||
dump_command = ""
|
||||
if take_dump:
|
||||
# Dump to file, dump_<process name>.<pid>.mdmp
|
||||
dump_file = "dump_%s.%d.%s" % (os.path.splitext(process_name)[0],
|
||||
pid,
|
||||
dump_file = "dump_%s.%d.%s" % (os.path.splitext(process_name)[0], pid,
|
||||
self.get_dump_ext())
|
||||
dump_command = ".dump /ma %s" % dump_file
|
||||
root_logger.info("Dumping core to %s" % dump_file)
|
||||
|
|
@ -146,14 +140,14 @@ class WindowsDumper(object):
|
|||
".symfix", # Fixup symbol path
|
||||
".symopt +0x10", # Enable line loading (off by default in CDB, on by default in WinDBG)
|
||||
".reload", # Reload symbols
|
||||
"!peb", # Dump current exe, & environment variables
|
||||
"lm", # Dump loaded modules
|
||||
"!peb", # Dump current exe, & environment variables
|
||||
"lm", # Dump loaded modules
|
||||
dump_command,
|
||||
"!uniqstack -pn", # Dump All unique Threads with function arguments
|
||||
"!cs -l", # Dump all locked critical sections
|
||||
"!cs -l", # Dump all locked critical sections
|
||||
".detach", # Detach
|
||||
"q" # Quit
|
||||
]
|
||||
"q" # Quit
|
||||
]
|
||||
|
||||
call([dbg, '-c', ";".join(cmds), '-p', str(pid)], logger)
|
||||
|
||||
|
|
@ -164,7 +158,6 @@ class WindowsDumper(object):
|
|||
|
||||
|
||||
class WindowsProcessList(object):
|
||||
|
||||
def __find_ps(self):
|
||||
"""Finds tasklist """
|
||||
return os.path.join(os.environ["WINDIR"], "system32", "tasklist.exe")
|
||||
|
|
@ -187,7 +180,6 @@ class WindowsProcessList(object):
|
|||
|
||||
# LLDB dumper is for MacOS X
|
||||
class LLDBDumper(object):
|
||||
|
||||
def __find_debugger(self, debugger):
|
||||
"""Finds the installed debugger"""
|
||||
return find_program(debugger, ['/usr/bin'])
|
||||
|
|
@ -200,9 +192,7 @@ class LLDBDumper(object):
|
|||
root_logger.warning("Debugger %s not found, skipping dumping of %d" % (debugger, pid))
|
||||
return
|
||||
|
||||
root_logger.info("Debugger %s, analyzing %s process with PID %d" % (dbg,
|
||||
process_name,
|
||||
pid))
|
||||
root_logger.info("Debugger %s, analyzing %s process with PID %d" % (dbg, process_name, pid))
|
||||
|
||||
lldb_version = callo([dbg, "--version"], logger)
|
||||
|
||||
|
|
@ -236,7 +226,7 @@ class LLDBDumper(object):
|
|||
dump_command,
|
||||
"settings set interpreter.prompt-on-quit false",
|
||||
"quit",
|
||||
]
|
||||
]
|
||||
|
||||
tf = tempfile.NamedTemporaryFile()
|
||||
|
||||
|
|
@ -257,7 +247,6 @@ class LLDBDumper(object):
|
|||
|
||||
|
||||
class DarwinProcessList(object):
|
||||
|
||||
def __find_ps(self):
|
||||
"""Finds ps"""
|
||||
return find_program('ps', ['/bin'])
|
||||
|
|
@ -280,7 +269,6 @@ class DarwinProcessList(object):
|
|||
|
||||
# GDB dumper is for Linux & Solaris
|
||||
class GDBDumper(object):
|
||||
|
||||
def __find_debugger(self, debugger):
|
||||
"""Finds the installed debugger"""
|
||||
return find_program(debugger, ['/opt/mongodbtoolchain/gdb/bin', '/usr/bin'])
|
||||
|
|
@ -293,9 +281,7 @@ class GDBDumper(object):
|
|||
logger.warning("Debugger %s not found, skipping dumping of %d" % (debugger, pid))
|
||||
return
|
||||
|
||||
root_logger.info("Debugger %s, analyzing %s process with PID %d" % (dbg,
|
||||
process_name,
|
||||
pid))
|
||||
root_logger.info("Debugger %s, analyzing %s process with PID %d" % (dbg, process_name, pid))
|
||||
|
||||
dump_command = ""
|
||||
if take_dump:
|
||||
|
|
@ -348,26 +334,23 @@ class GDBDumper(object):
|
|||
'set logging on',
|
||||
'thread apply all bt',
|
||||
'set logging off',
|
||||
]
|
||||
]
|
||||
|
||||
cmds = [
|
||||
"set interactive-mode off",
|
||||
"set print thread-events off", # Python calls to gdb.parse_and_eval may cause threads
|
||||
# to start and finish. This suppresses those messages
|
||||
# from appearing in the return output.
|
||||
"file %s" % process_name, # Solaris must load the process to read the symbols.
|
||||
"set print thread-events off", # Suppress GDB messages of threads starting/finishing.
|
||||
"file %s" % process_name, # Solaris must load the process to read the symbols.
|
||||
"attach %d" % pid,
|
||||
"info sharedlibrary",
|
||||
"info threads", # Dump a simple list of commands to get the thread name
|
||||
"info threads", # Dump a simple list of commands to get the thread name
|
||||
"set python print-stack full",
|
||||
] + raw_stacks_commands + [
|
||||
] + raw_stacks_commands + [
|
||||
source_mongo,
|
||||
source_mongo_printers,
|
||||
source_mongo_lock,
|
||||
mongodb_uniqstack,
|
||||
"set scheduler-locking on", # Lock the scheduler, before running any of the
|
||||
# following commands, which executes code in the
|
||||
# attached process.
|
||||
# Lock the scheduler, before running commands, which execute code in the attached process.
|
||||
"set scheduler-locking on",
|
||||
dump_command,
|
||||
mongodb_dump_locks,
|
||||
mongodb_show_locks,
|
||||
|
|
@ -375,11 +358,10 @@ class GDBDumper(object):
|
|||
mongodb_javascript_stack,
|
||||
"set confirm off",
|
||||
"quit",
|
||||
]
|
||||
]
|
||||
|
||||
call([dbg, "--quiet", "--nx"] +
|
||||
list(itertools.chain.from_iterable([['-ex', b] for b in cmds])),
|
||||
logger)
|
||||
list(itertools.chain.from_iterable([['-ex', b] for b in cmds])), logger)
|
||||
|
||||
root_logger.info("Done analyzing %s process with PID %d" % (process_name, pid))
|
||||
|
||||
|
|
@ -396,7 +378,6 @@ class GDBDumper(object):
|
|||
|
||||
|
||||
class LinuxProcessList(object):
|
||||
|
||||
def __find_ps(self):
|
||||
"""Finds ps"""
|
||||
return find_program('ps', ['/bin', '/usr/bin'])
|
||||
|
|
@ -420,7 +401,6 @@ class LinuxProcessList(object):
|
|||
|
||||
|
||||
class SolarisProcessList(object):
|
||||
|
||||
def __find_ps(self):
|
||||
"""Finds ps"""
|
||||
return find_program('ps', ['/bin', '/usr/bin'])
|
||||
|
|
@ -443,7 +423,6 @@ class SolarisProcessList(object):
|
|||
|
||||
# jstack is a JDK utility
|
||||
class JstackDumper(object):
|
||||
|
||||
def __find_debugger(self, debugger):
|
||||
"""Finds the installed jstack debugger"""
|
||||
return find_program(debugger, ['/usr/bin'])
|
||||
|
|
@ -457,8 +436,7 @@ class JstackDumper(object):
|
|||
logger.warning("Debugger %s not found, skipping dumping of %d" % (debugger, pid))
|
||||
return
|
||||
|
||||
root_logger.info("Debugger %s, analyzing %s process with PID %d" % (jstack,
|
||||
process_name,
|
||||
root_logger.info("Debugger %s, analyzing %s process with PID %d" % (jstack, process_name,
|
||||
pid))
|
||||
|
||||
call([jstack, "-l", str(pid)], logger)
|
||||
|
|
@ -468,7 +446,6 @@ class JstackDumper(object):
|
|||
|
||||
# jstack is a JDK utility
|
||||
class JstackWindowsDumper(object):
|
||||
|
||||
def dump_info(self, root_logger, logger, pid, process_name):
|
||||
"""Dump java thread stack traces to the logger"""
|
||||
|
||||
|
|
@ -520,9 +497,7 @@ def signal_event_object(logger, pid):
|
|||
try:
|
||||
desired_access = win32event.EVENT_MODIFY_STATE
|
||||
inherit_handle = False
|
||||
task_timeout_handle = win32event.OpenEvent(desired_access,
|
||||
inherit_handle,
|
||||
event_name)
|
||||
task_timeout_handle = win32event.OpenEvent(desired_access, inherit_handle, event_name)
|
||||
except win32event.error as err:
|
||||
logger.info("Exception from win32event.OpenEvent with error: %s" % err)
|
||||
return
|
||||
|
|
@ -555,8 +530,7 @@ def signal_process(logger, pid, signalnum):
|
|||
def pname_match(match_type, pname, interesting_processes):
|
||||
pname = os.path.splitext(pname)[0]
|
||||
for ip in interesting_processes:
|
||||
if (match_type == 'exact' and pname == ip or
|
||||
match_type == 'contains' and ip in pname):
|
||||
if (match_type == 'exact' and pname == ip or match_type == 'contains' and ip in pname):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
|
@ -601,46 +575,32 @@ def main():
|
|||
process_ids = []
|
||||
|
||||
parser = OptionParser(description=__doc__)
|
||||
parser.add_option('-m', '--process-match',
|
||||
dest='process_match',
|
||||
choices=['contains', 'exact'],
|
||||
parser.add_option('-m', '--process-match', dest='process_match', choices=['contains', 'exact'],
|
||||
default='contains',
|
||||
help="Type of match for process names (-p & -g), specify 'contains', or"
|
||||
" 'exact'. Note that the process name match performs the following"
|
||||
" conversions: change all process names to lowecase, strip off the file"
|
||||
" extension, like '.exe' on Windows. Default is 'contains'.")
|
||||
parser.add_option('-p', '--process-names',
|
||||
dest='process_names',
|
||||
help=("Type of match for process names (-p & -g), specify 'contains', or"
|
||||
" 'exact'. Note that the process name match performs the following"
|
||||
" conversions: change all process names to lowecase, strip off the file"
|
||||
" extension, like '.exe' on Windows. Default is 'contains'."))
|
||||
parser.add_option('-p', '--process-names', dest='process_names',
|
||||
help='Comma separated list of process names to analyze')
|
||||
parser.add_option('-g', '--go-process-names',
|
||||
dest='go_process_names',
|
||||
parser.add_option('-g', '--go-process-names', dest='go_process_names',
|
||||
help='Comma separated list of go process names to analyze')
|
||||
parser.add_option('-d', '--process-ids',
|
||||
dest='process_ids',
|
||||
default=None,
|
||||
parser.add_option('-d', '--process-ids', dest='process_ids', default=None,
|
||||
help='Comma separated list of process ids (PID) to analyze, overrides -p &'
|
||||
' -g')
|
||||
parser.add_option('-c', '--dump-core',
|
||||
dest='dump_core',
|
||||
action="store_true",
|
||||
default=False,
|
||||
' -g')
|
||||
parser.add_option('-c', '--dump-core', dest='dump_core', action="store_true", default=False,
|
||||
help='Dump core file for each analyzed process')
|
||||
parser.add_option('-s', '--max-core-dumps-size',
|
||||
dest='max_core_dumps_size',
|
||||
default=10000,
|
||||
parser.add_option('-s', '--max-core-dumps-size', dest='max_core_dumps_size', default=10000,
|
||||
help='Maximum total size of core dumps to keep in megabytes')
|
||||
parser.add_option('-o', '--debugger-output',
|
||||
dest='debugger_output',
|
||||
action="append",
|
||||
choices=['file', 'stdout'],
|
||||
default=None,
|
||||
help="If 'stdout', then the debugger's output is written to the Python"
|
||||
" process's stdout. If 'file', then the debugger's output is written"
|
||||
" to a file named debugger_<process>_<pid>.log for each process it"
|
||||
" attaches to. This option can be specified multiple times on the"
|
||||
" command line to have the debugger's output written to multiple"
|
||||
" locations. By default, the debugger's output is written only to the"
|
||||
" Python process's stdout.")
|
||||
parser.add_option('-o', '--debugger-output', dest='debugger_output', action="append",
|
||||
choices=['file', 'stdout'], default=None,
|
||||
help=("If 'stdout', then the debugger's output is written to the Python"
|
||||
" process's stdout. If 'file', then the debugger's output is written"
|
||||
" to a file named debugger_<process>_<pid>.log for each process it"
|
||||
" attaches to. This option can be specified multiple times on the"
|
||||
" command line to have the debugger's output written to multiple"
|
||||
" locations. By default, the debugger's output is written only to the"
|
||||
" Python process's stdout."))
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
|
|
@ -680,12 +640,12 @@ def main():
|
|||
running_pids = set([pid for (pid, pname) in all_processes])
|
||||
missing_pids = set(process_ids) - running_pids
|
||||
if missing_pids:
|
||||
root_logger.warning("The following requested process ids are not running %s" %
|
||||
list(missing_pids))
|
||||
root_logger.warning(
|
||||
"The following requested process ids are not running %s" % list(missing_pids))
|
||||
else:
|
||||
processes = [(pid, pname) for (pid, pname) in all_processes
|
||||
if pname_match(options.process_match, pname, interesting_processes) and
|
||||
pid != os.getpid()]
|
||||
if pname_match(options.process_match, pname, interesting_processes)
|
||||
and pid != os.getpid()]
|
||||
|
||||
root_logger.info("Found %d interesting processes %s" % (len(processes), processes))
|
||||
|
||||
|
|
@ -708,16 +668,12 @@ def main():
|
|||
trapped_exceptions = []
|
||||
|
||||
# Dump all processes, except python & java.
|
||||
for (pid, process_name) in [(p, pn) for (p, pn) in processes
|
||||
if not re.match("^(java|python)", pn)]:
|
||||
for (pid,
|
||||
process_name) in [(p, pn) for (p, pn) in processes if not re.match("^(java|python)", pn)]:
|
||||
process_logger = get_process_logger(options.debugger_output, pid, process_name)
|
||||
try:
|
||||
dbg.dump_info(
|
||||
root_logger,
|
||||
process_logger,
|
||||
pid,
|
||||
process_name,
|
||||
options.dump_core and check_dump_quota(max_dump_size_bytes, dbg.get_dump_ext()))
|
||||
dbg.dump_info(root_logger, process_logger, pid, process_name, options.dump_core
|
||||
and check_dump_quota(max_dump_size_bytes, dbg.get_dump_ext()))
|
||||
except Exception as err:
|
||||
root_logger.info("Error encountered when invoking debugger %s" % err)
|
||||
trapped_exceptions.append(traceback.format_exc())
|
||||
|
|
@ -736,8 +692,8 @@ def main():
|
|||
# TerminateProcess.
|
||||
# Note: The stacktrace output may be captured elsewhere (i.e. resmoke).
|
||||
for (pid, process_name) in [(p, pn) for (p, pn) in processes if pn in go_processes]:
|
||||
root_logger.info("Sending signal SIGABRT to go process %s with PID %d" %
|
||||
(process_name, pid))
|
||||
root_logger.info("Sending signal SIGABRT to go process %s with PID %d" % (process_name,
|
||||
pid))
|
||||
signal_process(root_logger, pid, signal.SIGABRT)
|
||||
|
||||
root_logger.info("Done analyzing all processes for hangs")
|
||||
|
|
@ -747,5 +703,6 @@ def main():
|
|||
if trapped_exceptions:
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
|||
|
|
@ -35,8 +35,8 @@ class IDLBoundSpec(object):
|
|||
def __init__(self, spec, error_collection):
|
||||
# type: (IDLAST, errors.ParserErrorCollection) -> None
|
||||
"""Must specify either an IDL document or errors, not both."""
|
||||
assert (spec is None and error_collection is not None) or (spec is not None and
|
||||
error_collection is None)
|
||||
assert (spec is None and error_collection is not None) or (spec is not None
|
||||
and error_collection is None)
|
||||
self.spec = spec
|
||||
self.errors = error_collection
|
||||
|
||||
|
|
|
|||
|
|
@ -26,77 +26,29 @@ from typing import Dict, List
|
|||
# scalar: True if the type is not an array or object
|
||||
# bson_type_enum: The BSONType enum value for the given type
|
||||
_BSON_TYPE_INFORMATION = {
|
||||
"double": {
|
||||
'scalar': True,
|
||||
'bson_type_enum': 'NumberDouble'
|
||||
},
|
||||
"string": {
|
||||
'scalar': True,
|
||||
'bson_type_enum': 'String'
|
||||
},
|
||||
"object": {
|
||||
'scalar': False,
|
||||
'bson_type_enum': 'Object'
|
||||
},
|
||||
"double": {'scalar': True, 'bson_type_enum': 'NumberDouble'},
|
||||
"string": {'scalar': True, 'bson_type_enum': 'String'},
|
||||
"object": {'scalar': False, 'bson_type_enum': 'Object'},
|
||||
# TODO: add support: "array" : { 'scalar' : False, 'bson_type_enum' : 'Array'},
|
||||
"bindata": {
|
||||
'scalar': True,
|
||||
'bson_type_enum': 'BinData'
|
||||
},
|
||||
"undefined": {
|
||||
'scalar': True,
|
||||
'bson_type_enum': 'Undefined'
|
||||
},
|
||||
"objectid": {
|
||||
'scalar': True,
|
||||
'bson_type_enum': 'jstOID'
|
||||
},
|
||||
"bool": {
|
||||
'scalar': True,
|
||||
'bson_type_enum': 'Bool'
|
||||
},
|
||||
"date": {
|
||||
'scalar': True,
|
||||
'bson_type_enum': 'Date'
|
||||
},
|
||||
"null": {
|
||||
'scalar': True,
|
||||
'bson_type_enum': 'jstNULL'
|
||||
},
|
||||
"regex": {
|
||||
'scalar': True,
|
||||
'bson_type_enum': 'RegEx'
|
||||
},
|
||||
"int": {
|
||||
'scalar': True,
|
||||
'bson_type_enum': 'NumberInt'
|
||||
},
|
||||
"timestamp": {
|
||||
'scalar': True,
|
||||
'bson_type_enum': 'bsonTimestamp'
|
||||
},
|
||||
"long": {
|
||||
'scalar': True,
|
||||
'bson_type_enum': 'NumberLong'
|
||||
},
|
||||
"decimal": {
|
||||
'scalar': True,
|
||||
'bson_type_enum': 'NumberDecimal'
|
||||
},
|
||||
"bindata": {'scalar': True, 'bson_type_enum': 'BinData'},
|
||||
"undefined": {'scalar': True, 'bson_type_enum': 'Undefined'},
|
||||
"objectid": {'scalar': True, 'bson_type_enum': 'jstOID'},
|
||||
"bool": {'scalar': True, 'bson_type_enum': 'Bool'},
|
||||
"date": {'scalar': True, 'bson_type_enum': 'Date'},
|
||||
"null": {'scalar': True, 'bson_type_enum': 'jstNULL'},
|
||||
"regex": {'scalar': True, 'bson_type_enum': 'RegEx'},
|
||||
"int": {'scalar': True, 'bson_type_enum': 'NumberInt'},
|
||||
"timestamp": {'scalar': True, 'bson_type_enum': 'bsonTimestamp'},
|
||||
"long": {'scalar': True, 'bson_type_enum': 'NumberLong'},
|
||||
"decimal": {'scalar': True, 'bson_type_enum': 'NumberDecimal'},
|
||||
}
|
||||
|
||||
# Dictionary of BinData subtype type Information
|
||||
# scalar: True if the type is not an array or object
|
||||
# bindata_enum: The BinDataType enum value for the given type
|
||||
_BINDATA_SUBTYPE = {
|
||||
"generic": {
|
||||
'scalar': True,
|
||||
'bindata_enum': 'BinDataGeneral'
|
||||
},
|
||||
"function": {
|
||||
'scalar': True,
|
||||
'bindata_enum': 'Function'
|
||||
},
|
||||
"generic": {'scalar': True, 'bindata_enum': 'BinDataGeneral'},
|
||||
"function": {'scalar': True, 'bindata_enum': 'Function'},
|
||||
# Also simply known as type 2, deprecated, and requires special handling
|
||||
#"binary": {
|
||||
# 'scalar': False,
|
||||
|
|
@ -107,14 +59,8 @@ _BINDATA_SUBTYPE = {
|
|||
# 'scalar': False,
|
||||
# 'bindata_enum': 'bdtUUID'
|
||||
# },
|
||||
"uuid": {
|
||||
'scalar': True,
|
||||
'bindata_enum': 'newUUID'
|
||||
},
|
||||
"md5": {
|
||||
'scalar': True,
|
||||
'bindata_enum': 'MD5Type'
|
||||
},
|
||||
"uuid": {'scalar': True, 'bindata_enum': 'newUUID'},
|
||||
"md5": {'scalar': True, 'bindata_enum': 'MD5Type'},
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -266,10 +266,8 @@ class _CppTypeView(CppTypeBase):
|
|||
|
||||
def get_setter_body(self, member_name):
|
||||
# type: (unicode) -> unicode
|
||||
return common.template_args(
|
||||
'${member_name} = ${value};',
|
||||
member_name=member_name,
|
||||
value=self.get_transform_to_storage_type("value"))
|
||||
return common.template_args('${member_name} = ${value};', member_name=member_name,
|
||||
value=self.get_transform_to_storage_type("value"))
|
||||
|
||||
def get_transform_to_getter_type(self, expression):
|
||||
# type: (unicode) -> Optional[unicode]
|
||||
|
|
@ -279,7 +277,8 @@ class _CppTypeView(CppTypeBase):
|
|||
# type: (unicode) -> Optional[unicode]
|
||||
return common.template_args(
|
||||
'${expression}.toString()',
|
||||
expression=expression, )
|
||||
expression=expression,
|
||||
)
|
||||
|
||||
|
||||
class _CppTypeVector(CppTypeBase):
|
||||
|
|
@ -325,10 +324,8 @@ class _CppTypeVector(CppTypeBase):
|
|||
|
||||
def get_setter_body(self, member_name):
|
||||
# type: (unicode) -> unicode
|
||||
return common.template_args(
|
||||
'${member_name} = ${value};',
|
||||
member_name=member_name,
|
||||
value=self.get_transform_to_storage_type("value"))
|
||||
return common.template_args('${member_name} = ${value};', member_name=member_name,
|
||||
value=self.get_transform_to_storage_type("value"))
|
||||
|
||||
def get_transform_to_getter_type(self, expression):
|
||||
# type: (unicode) -> Optional[unicode]
|
||||
|
|
@ -432,8 +429,8 @@ class _CppTypeArray(_CppTypeDelegating):
|
|||
# type: (unicode) -> unicode
|
||||
convert = self.get_transform_to_storage_type("value")
|
||||
if convert:
|
||||
return common.template_args(
|
||||
'${member_name} = ${convert};', member_name=member_name, convert=convert)
|
||||
return common.template_args('${member_name} = ${convert};', member_name=member_name,
|
||||
convert=convert)
|
||||
else:
|
||||
return self._base.get_setter_body(member_name)
|
||||
|
||||
|
|
@ -442,7 +439,8 @@ class _CppTypeArray(_CppTypeDelegating):
|
|||
if self._base.get_storage_type() != self._base.get_getter_setter_type():
|
||||
return common.template_args(
|
||||
'transformVector(${expression})',
|
||||
expression=expression, )
|
||||
expression=expression,
|
||||
)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
|
@ -451,7 +449,8 @@ class _CppTypeArray(_CppTypeDelegating):
|
|||
if self._base.get_storage_type() != self._base.get_getter_setter_type():
|
||||
return common.template_args(
|
||||
'transformVector(${expression})',
|
||||
expression=expression, )
|
||||
expression=expression,
|
||||
)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
|
@ -497,15 +496,12 @@ class _CppTypeOptional(_CppTypeDelegating):
|
|||
} else {
|
||||
return boost::none;
|
||||
}
|
||||
"""),
|
||||
member_name=member_name,
|
||||
convert=convert)
|
||||
"""), member_name=member_name, convert=convert)
|
||||
elif self.is_view_type():
|
||||
# For optionals around view types, do an explicit construction
|
||||
return common.template_args(
|
||||
'return ${param_type}{${member_name}};',
|
||||
param_type=self.get_getter_setter_type(),
|
||||
member_name=member_name)
|
||||
return common.template_args('return ${param_type}{${member_name}};',
|
||||
param_type=self.get_getter_setter_type(),
|
||||
member_name=member_name)
|
||||
else:
|
||||
return common.template_args('return ${member_name};', member_name=member_name)
|
||||
|
||||
|
|
@ -520,9 +516,7 @@ class _CppTypeOptional(_CppTypeDelegating):
|
|||
} else {
|
||||
${member_name} = boost::none;
|
||||
}
|
||||
"""),
|
||||
member_name=member_name,
|
||||
convert=convert)
|
||||
"""), member_name=member_name, convert=convert)
|
||||
else:
|
||||
return self._base.get_setter_body(member_name)
|
||||
|
||||
|
|
@ -590,11 +584,11 @@ def _call_method_or_global_function(expression, method_name):
|
|||
"""
|
||||
short_method_name = writer.get_method_name(method_name)
|
||||
if writer.is_function(method_name):
|
||||
return common.template_args(
|
||||
'${method_name}(${expression})', expression=expression, method_name=short_method_name)
|
||||
return common.template_args('${method_name}(${expression})', expression=expression,
|
||||
method_name=short_method_name)
|
||||
|
||||
return common.template_args(
|
||||
'${expression}.${method_name}()', expression=expression, method_name=short_method_name)
|
||||
return common.template_args('${expression}.${method_name}()', expression=expression,
|
||||
method_name=short_method_name)
|
||||
|
||||
|
||||
class _CommonBsonCppTypeBase(BsonCppTypeBase):
|
||||
|
|
@ -607,10 +601,9 @@ class _CommonBsonCppTypeBase(BsonCppTypeBase):
|
|||
|
||||
def gen_deserializer_expression(self, indented_writer, object_instance):
|
||||
# type: (writer.IndentedTextWriter, unicode) -> unicode
|
||||
return common.template_args(
|
||||
'${object_instance}.${method_name}()',
|
||||
object_instance=object_instance,
|
||||
method_name=self._deserialize_method_name)
|
||||
return common.template_args('${object_instance}.${method_name}()',
|
||||
object_instance=object_instance,
|
||||
method_name=self._deserialize_method_name)
|
||||
|
||||
def has_serializer(self):
|
||||
# type: () -> bool
|
||||
|
|
@ -633,9 +626,8 @@ class _ObjectBsonCppTypeBase(BsonCppTypeBase):
|
|||
if self._field.deserializer:
|
||||
# Call a method like: Class::method(const BSONObj& value)
|
||||
indented_writer.write_line(
|
||||
common.template_args(
|
||||
'const BSONObj localObject = ${object_instance}.Obj();',
|
||||
object_instance=object_instance))
|
||||
common.template_args('const BSONObj localObject = ${object_instance}.Obj();',
|
||||
object_instance=object_instance))
|
||||
return "localObject"
|
||||
|
||||
else:
|
||||
|
|
@ -650,10 +642,8 @@ class _ObjectBsonCppTypeBase(BsonCppTypeBase):
|
|||
# type: (writer.IndentedTextWriter, unicode) -> unicode
|
||||
method_name = writer.get_method_name(self._field.serializer)
|
||||
indented_writer.write_line(
|
||||
common.template_args(
|
||||
'const BSONObj localObject = ${expression}.${method_name}();',
|
||||
expression=expression,
|
||||
method_name=method_name))
|
||||
common.template_args('const BSONObj localObject = ${expression}.${method_name}();',
|
||||
expression=expression, method_name=method_name))
|
||||
return "localObject"
|
||||
|
||||
|
||||
|
|
@ -667,11 +657,11 @@ class _BinDataBsonCppTypeBase(BsonCppTypeBase):
|
|||
def gen_deserializer_expression(self, indented_writer, object_instance):
|
||||
# type: (writer.IndentedTextWriter, unicode) -> unicode
|
||||
if self._field.bindata_subtype == 'uuid':
|
||||
return common.template_args(
|
||||
'${object_instance}.uuid()', object_instance=object_instance)
|
||||
return common.template_args('${object_instance}.uuid()',
|
||||
object_instance=object_instance)
|
||||
else:
|
||||
return common.template_args(
|
||||
'${object_instance}._binDataVector()', object_instance=object_instance)
|
||||
return common.template_args('${object_instance}._binDataVector()',
|
||||
object_instance=object_instance)
|
||||
|
||||
def has_serializer(self):
|
||||
# type: () -> bool
|
||||
|
|
@ -682,14 +672,12 @@ class _BinDataBsonCppTypeBase(BsonCppTypeBase):
|
|||
if self._field.serializer:
|
||||
method_name = writer.get_method_name(self._field.serializer)
|
||||
indented_writer.write_line(
|
||||
common.template_args(
|
||||
'ConstDataRange tempCDR = ${expression}.${method_name}();',
|
||||
expression=expression,
|
||||
method_name=method_name))
|
||||
common.template_args('ConstDataRange tempCDR = ${expression}.${method_name}();',
|
||||
expression=expression, method_name=method_name))
|
||||
else:
|
||||
indented_writer.write_line(
|
||||
common.template_args(
|
||||
'ConstDataRange tempCDR = makeCDR(${expression});', expression=expression))
|
||||
common.template_args('ConstDataRange tempCDR = makeCDR(${expression});',
|
||||
expression=expression))
|
||||
|
||||
return common.template_args(
|
||||
'BSONBinData(tempCDR.data(), tempCDR.length(), ${bindata_subtype})',
|
||||
|
|
|
|||
|
|
@ -60,8 +60,8 @@ class EnumTypeInfoBase(object):
|
|||
def _get_enum_deserializer_name(self):
|
||||
# type: () -> unicode
|
||||
"""Return the name of deserializer function without prefix."""
|
||||
return common.template_args(
|
||||
"${enum_name}_parse", enum_name=common.title_case(self._enum.name))
|
||||
return common.template_args("${enum_name}_parse", enum_name=common.title_case(
|
||||
self._enum.name))
|
||||
|
||||
def get_enum_deserializer_name(self):
|
||||
# type: () -> unicode
|
||||
|
|
@ -72,8 +72,8 @@ class EnumTypeInfoBase(object):
|
|||
def _get_enum_serializer_name(self):
|
||||
# type: () -> unicode
|
||||
"""Return the name of serializer function without prefix."""
|
||||
return common.template_args(
|
||||
"${enum_name}_serializer", enum_name=common.title_case(self._enum.name))
|
||||
return common.template_args("${enum_name}_serializer", enum_name=common.title_case(
|
||||
self._enum.name))
|
||||
|
||||
def get_enum_serializer_name(self):
|
||||
# type: () -> unicode
|
||||
|
|
@ -137,8 +137,7 @@ class _EnumTypeInt(EnumTypeInfoBase):
|
|||
# type: () -> unicode
|
||||
return common.template_args(
|
||||
"${enum_name} ${function_name}(const IDLParserErrorContext& ctxt, std::int32_t value)",
|
||||
enum_name=self.get_cpp_type_name(),
|
||||
function_name=self._get_enum_deserializer_name())
|
||||
enum_name=self.get_cpp_type_name(), function_name=self._get_enum_deserializer_name())
|
||||
|
||||
def gen_deserializer_definition(self, indented_writer):
|
||||
# type: (writer.IndentedTextWriter) -> None
|
||||
|
|
@ -168,10 +167,9 @@ class _EnumTypeInt(EnumTypeInfoBase):
|
|||
def get_serializer_declaration(self):
|
||||
# type: () -> unicode
|
||||
"""Get the serializer function declaration minus trailing semicolon."""
|
||||
return common.template_args(
|
||||
"std::int32_t ${function_name}(${enum_name} value)",
|
||||
enum_name=self.get_cpp_type_name(),
|
||||
function_name=self._get_enum_serializer_name())
|
||||
return common.template_args("std::int32_t ${function_name}(${enum_name} value)",
|
||||
enum_name=self.get_cpp_type_name(),
|
||||
function_name=self._get_enum_serializer_name())
|
||||
|
||||
def gen_serializer_definition(self, indented_writer):
|
||||
# type: (writer.IndentedTextWriter) -> None
|
||||
|
|
@ -189,8 +187,8 @@ class _EnumTypeInt(EnumTypeInfoBase):
|
|||
def _get_constant_enum_name(idl_enum, enum_value):
|
||||
# type: (Union[syntax.Enum,ast.Enum], Union[syntax.EnumValue,ast.EnumValue]) -> unicode
|
||||
"""Return the C++ name for a string constant of string enum value."""
|
||||
return common.template_args(
|
||||
'k${enum_name}_${name}', enum_name=common.title_case(idl_enum.name), name=enum_value.name)
|
||||
return common.template_args('k${enum_name}_${name}', enum_name=common.title_case(idl_enum.name),
|
||||
name=enum_value.name)
|
||||
|
||||
|
||||
class _EnumTypeString(EnumTypeInfoBase):
|
||||
|
|
@ -204,8 +202,8 @@ class _EnumTypeString(EnumTypeInfoBase):
|
|||
|
||||
def get_cpp_type_name(self):
|
||||
# type: () -> unicode
|
||||
return common.template_args(
|
||||
"${enum_name}Enum", enum_name=common.title_case(self._enum.name))
|
||||
return common.template_args("${enum_name}Enum", enum_name=common.title_case(
|
||||
self._enum.name))
|
||||
|
||||
def get_bson_types(self):
|
||||
# type: () -> List[unicode]
|
||||
|
|
@ -219,8 +217,7 @@ class _EnumTypeString(EnumTypeInfoBase):
|
|||
# type: () -> unicode
|
||||
return common.template_args(
|
||||
"${enum_name} ${function_name}(const IDLParserErrorContext& ctxt, StringData value)",
|
||||
enum_name=self.get_cpp_type_name(),
|
||||
function_name=self._get_enum_deserializer_name())
|
||||
enum_name=self.get_cpp_type_name(), function_name=self._get_enum_deserializer_name())
|
||||
|
||||
def gen_deserializer_definition(self, indented_writer):
|
||||
# type: (writer.IndentedTextWriter) -> None
|
||||
|
|
@ -234,17 +231,16 @@ class _EnumTypeString(EnumTypeInfoBase):
|
|||
with writer.NamespaceScopeBlock(indented_writer, ['']):
|
||||
for enum_value in self._enum.values:
|
||||
indented_writer.write_line(
|
||||
common.template_args(
|
||||
'constexpr StringData ${constant_name} = "${value}"_sd;',
|
||||
constant_name=_get_constant_enum_name(self._enum, enum_value),
|
||||
value=enum_value.value))
|
||||
common.template_args('constexpr StringData ${constant_name} = "${value}"_sd;',
|
||||
constant_name=_get_constant_enum_name(
|
||||
self._enum, enum_value), value=enum_value.value))
|
||||
indented_writer.write_empty_line()
|
||||
|
||||
with writer.TemplateContext(indented_writer, template_params):
|
||||
with writer.IndentedScopedBlock(indented_writer, "${function_name} {", "}"):
|
||||
for enum_value in self._enum.values:
|
||||
predicate = 'if (value == %s) {' % (_get_constant_enum_name(self._enum,
|
||||
enum_value))
|
||||
predicate = 'if (value == %s) {' % (
|
||||
_get_constant_enum_name(self._enum, enum_value))
|
||||
with writer.IndentedScopedBlock(indented_writer, predicate, "}"):
|
||||
indented_writer.write_template('return ${enum_name}::%s;' %
|
||||
(enum_value.name))
|
||||
|
|
@ -254,10 +250,9 @@ class _EnumTypeString(EnumTypeInfoBase):
|
|||
def get_serializer_declaration(self):
|
||||
# type: () -> unicode
|
||||
"""Get the serializer function declaration minus trailing semicolon."""
|
||||
return common.template_args(
|
||||
"StringData ${function_name}(${enum_name} value)",
|
||||
enum_name=self.get_cpp_type_name(),
|
||||
function_name=self._get_enum_serializer_name())
|
||||
return common.template_args("StringData ${function_name}(${enum_name} value)",
|
||||
enum_name=self.get_cpp_type_name(),
|
||||
function_name=self._get_enum_serializer_name())
|
||||
|
||||
def gen_serializer_definition(self, indented_writer):
|
||||
# type: (writer.IndentedTextWriter) -> None
|
||||
|
|
@ -273,8 +268,8 @@ class _EnumTypeString(EnumTypeInfoBase):
|
|||
with writer.IndentedScopedBlock(indented_writer,
|
||||
'if (value == ${enum_name}::%s) {' %
|
||||
(enum_value.name), "}"):
|
||||
indented_writer.write_line('return %s;' % (_get_constant_enum_name(
|
||||
self._enum, enum_value)))
|
||||
indented_writer.write_line(
|
||||
'return %s;' % (_get_constant_enum_name(self._enum, enum_value)))
|
||||
|
||||
indented_writer.write_line('MONGO_UNREACHABLE;')
|
||||
indented_writer.write_line('return StringData();')
|
||||
|
|
|
|||
|
|
@ -210,9 +210,10 @@ class ParserContext(object):
|
|||
def add_unknown_root_node_error(self, node):
|
||||
# type: (yaml.nodes.Node) -> None
|
||||
"""Add an error about an unknown YAML root node."""
|
||||
self._add_node_error(node, ERROR_ID_UNKNOWN_ROOT, (
|
||||
"Unrecognized IDL specification root level node '%s', only " +
|
||||
" (global, import, types, commands, and structs) are accepted") % (node.value))
|
||||
self._add_node_error(node, ERROR_ID_UNKNOWN_ROOT,
|
||||
("Unrecognized IDL specification root level node '%s', only " +
|
||||
" (global, import, types, commands, and structs) are accepted") %
|
||||
(node.value))
|
||||
|
||||
def add_unknown_node_error(self, node, name):
|
||||
# type: (yaml.nodes.Node, unicode) -> None
|
||||
|
|
@ -287,9 +288,9 @@ class ParserContext(object):
|
|||
return False
|
||||
|
||||
if not (node.value == "true" or node.value == "false"):
|
||||
self._add_node_error(node, ERROR_ID_IS_NODE_VALID_BOOL,
|
||||
"Illegal bool value for '%s', expected either 'true' or 'false'." %
|
||||
node_name)
|
||||
self._add_node_error(
|
||||
node, ERROR_ID_IS_NODE_VALID_BOOL,
|
||||
"Illegal bool value for '%s', expected either 'true' or 'false'." % node_name)
|
||||
return False
|
||||
|
||||
return True
|
||||
|
|
@ -331,16 +332,16 @@ class ParserContext(object):
|
|||
"""Add an error about a YAML node missing a required child."""
|
||||
# pylint: disable=invalid-name
|
||||
self._add_node_error(node, ERROR_ID_MISSING_REQUIRED_FIELD,
|
||||
"IDL node '%s' is missing required scalar '%s'" %
|
||||
(node_parent, node_name))
|
||||
"IDL node '%s' is missing required scalar '%s'" % (node_parent,
|
||||
node_name))
|
||||
|
||||
def add_missing_ast_required_field_error(self, location, ast_type, ast_parent, ast_name):
|
||||
# type: (common.SourceLocation, unicode, unicode, unicode) -> None
|
||||
"""Add an error about a AST node missing a required child."""
|
||||
# pylint: disable=invalid-name
|
||||
self._add_error(location, ERROR_ID_MISSING_AST_REQUIRED_FIELD,
|
||||
"%s '%s' is missing required scalar '%s'" %
|
||||
(ast_type, ast_parent, ast_name))
|
||||
"%s '%s' is missing required scalar '%s'" % (ast_type, ast_parent,
|
||||
ast_name))
|
||||
|
||||
def add_array_not_valid_error(self, location, ast_type, name):
|
||||
# type: (common.SourceLocation, unicode, unicode) -> None
|
||||
|
|
@ -352,8 +353,8 @@ class ParserContext(object):
|
|||
# type: (common.SourceLocation, unicode, unicode, unicode) -> None
|
||||
"""Add an error about a bad bson type."""
|
||||
self._add_error(location, ERROR_ID_BAD_BSON_TYPE,
|
||||
"BSON Type '%s' is not recognized for %s '%s'." %
|
||||
(bson_type_name, ast_type, ast_parent))
|
||||
"BSON Type '%s' is not recognized for %s '%s'." % (bson_type_name, ast_type,
|
||||
ast_parent))
|
||||
|
||||
def add_bad_bson_scalar_type_error(self, location, ast_type, ast_parent, bson_type_name):
|
||||
# type: (common.SourceLocation, unicode, unicode, unicode) -> None
|
||||
|
|
@ -390,9 +391,10 @@ class ParserContext(object):
|
|||
# type: (common.SourceLocation, unicode, unicode) -> None
|
||||
"""Add an error about field must be empty for ignored fields."""
|
||||
# pylint: disable=invalid-name
|
||||
self._add_error(location, ERROR_ID_FIELD_MUST_BE_EMPTY_FOR_IGNORED, (
|
||||
"Field '%s' cannot contain a value for property '%s' when a field is marked as ignored")
|
||||
% (name, field_name))
|
||||
self._add_error(
|
||||
location, ERROR_ID_FIELD_MUST_BE_EMPTY_FOR_IGNORED,
|
||||
("Field '%s' cannot contain a value for property '%s' when a field is marked as ignored"
|
||||
) % (name, field_name))
|
||||
|
||||
def add_struct_field_must_be_empty_error(self, location, name, field_name):
|
||||
# type: (common.SourceLocation, unicode, unicode) -> None
|
||||
|
|
@ -407,27 +409,31 @@ class ParserContext(object):
|
|||
# type: (common.SourceLocation, unicode, unicode, unicode) -> None
|
||||
# pylint: disable=invalid-name
|
||||
"""Add an error about field must be empty for fields of type struct."""
|
||||
self._add_error(location, ERROR_ID_CUSTOM_SCALAR_SERIALIZATION_NOT_SUPPORTED, (
|
||||
"Custom serialization for a scalar is only supported for 'string'. The %s '%s' cannot" +
|
||||
" use bson type '%s', use a bson_serialization_type of 'any' instead.") %
|
||||
(ast_type, ast_parent, bson_type_name))
|
||||
self._add_error(
|
||||
location, ERROR_ID_CUSTOM_SCALAR_SERIALIZATION_NOT_SUPPORTED,
|
||||
("Custom serialization for a scalar is only supported for 'string'. The %s '%s' cannot"
|
||||
+ " use bson type '%s', use a bson_serialization_type of 'any' instead.") %
|
||||
(ast_type, ast_parent, bson_type_name))
|
||||
|
||||
def add_bad_any_type_use_error(self, location, bson_type, ast_type, ast_parent):
|
||||
# type: (common.SourceLocation, unicode, unicode, unicode) -> None
|
||||
# pylint: disable=invalid-name
|
||||
"""Add an error about any being used in a list of bson types."""
|
||||
self._add_error(location, ERROR_ID_BAD_ANY_TYPE_USE, (
|
||||
"The BSON Type '%s' is not allowed in a list of bson serialization types for" +
|
||||
"%s '%s'. It must be only a single bson type.") % (bson_type, ast_type, ast_parent))
|
||||
self._add_error(
|
||||
location, ERROR_ID_BAD_ANY_TYPE_USE,
|
||||
("The BSON Type '%s' is not allowed in a list of bson serialization types for" +
|
||||
"%s '%s'. It must be only a single bson type.") % (bson_type, ast_type, ast_parent))
|
||||
|
||||
def add_bad_cpp_numeric_type_use_error(self, location, ast_type, ast_parent, cpp_type):
|
||||
# type: (common.SourceLocation, unicode, unicode, unicode) -> None
|
||||
# pylint: disable=invalid-name
|
||||
"""Add an error about any being used in a list of bson types."""
|
||||
self._add_error(location, ERROR_ID_BAD_NUMERIC_CPP_TYPE, (
|
||||
"The C++ numeric type '%s' is not allowed for %s '%s'. Only 'std::int32_t'," +
|
||||
" 'std::uint32_t', 'std::uint64_t', and 'std::int64_t' are supported.") %
|
||||
(cpp_type, ast_type, ast_parent))
|
||||
self._add_error(
|
||||
location, ERROR_ID_BAD_NUMERIC_CPP_TYPE,
|
||||
("The C++ numeric type '%s' is not allowed for %s '%s'. Only 'std::int32_t'," +
|
||||
" 'std::uint32_t', 'std::uint64_t', and 'std::int64_t' are supported.") % (cpp_type,
|
||||
ast_type,
|
||||
ast_parent))
|
||||
|
||||
def add_bad_array_type_name_error(self, location, field_name, type_name):
|
||||
# type: (common.SourceLocation, unicode, unicode) -> None
|
||||
|
|
@ -555,9 +561,10 @@ class ParserContext(object):
|
|||
# type: (common.SourceLocation, unicode, unicode) -> None
|
||||
"""Add an error about field must be empty for fields of type enum."""
|
||||
# pylint: disable=invalid-name
|
||||
self._add_error(location, ERROR_ID_FIELD_MUST_BE_EMPTY_FOR_ENUM, (
|
||||
"Field '%s' cannot contain a value for property '%s' when a field's type is a enum") %
|
||||
(name, field_name))
|
||||
self._add_error(
|
||||
location, ERROR_ID_FIELD_MUST_BE_EMPTY_FOR_ENUM,
|
||||
("Field '%s' cannot contain a value for property '%s' when a field's type is a enum") %
|
||||
(name, field_name))
|
||||
|
||||
def add_bad_command_namespace_error(self, location, command_name, command_namespace,
|
||||
valid_commands):
|
||||
|
|
@ -571,9 +578,10 @@ class ParserContext(object):
|
|||
def add_bad_command_as_field_error(self, location, command_name):
|
||||
# type: (common.SourceLocation, unicode) -> None
|
||||
"""Add an error about using a command for a field."""
|
||||
self._add_error(location, ERROR_ID_FIELD_NO_COMMAND,
|
||||
("Command '%s' cannot be used as a field type'. Commands must be top-level"
|
||||
+ " types due to their serialization rules.") % (command_name))
|
||||
self._add_error(
|
||||
location, ERROR_ID_FIELD_NO_COMMAND,
|
||||
("Command '%s' cannot be used as a field type'. Commands must be top-level" +
|
||||
" types due to their serialization rules.") % (command_name))
|
||||
|
||||
def add_bad_array_of_chain(self, location, field_name):
|
||||
# type: (common.SourceLocation, unicode) -> None
|
||||
|
|
@ -585,9 +593,10 @@ class ParserContext(object):
|
|||
# type: (common.SourceLocation, unicode) -> None
|
||||
"""Add an error about a field being optional and having a default value."""
|
||||
# pylint: disable=invalid-name
|
||||
self._add_error(location, ERROR_ID_ILLEGAL_FIELD_DEFAULT_AND_OPTIONAL, (
|
||||
"Field '%s' can only be marked as optional or have a default value," + " not both.") %
|
||||
(field_name))
|
||||
self._add_error(
|
||||
location, ERROR_ID_ILLEGAL_FIELD_DEFAULT_AND_OPTIONAL,
|
||||
("Field '%s' can only be marked as optional or have a default value," + " not both.") %
|
||||
(field_name))
|
||||
|
||||
def add_bad_struct_field_as_doc_sequence_error(self, location, struct_name, field_name):
|
||||
# type: (common.SourceLocation, unicode, unicode) -> None
|
||||
|
|
@ -637,8 +646,8 @@ class ParserContext(object):
|
|||
|
||||
except ValueError as value_error:
|
||||
self._add_node_error(node, ERROR_ID_IS_NODE_VALID_INT,
|
||||
"Illegal integer value for '%s', message '%s'." %
|
||||
(node_name, value_error))
|
||||
"Illegal integer value for '%s', message '%s'." % (node_name,
|
||||
value_error))
|
||||
return False
|
||||
|
||||
return True
|
||||
|
|
|
|||
|
|
@ -72,8 +72,8 @@ def _is_required_serializer_field(field):
|
|||
def _get_field_constant_name(field):
|
||||
# type: (ast.Field) -> unicode
|
||||
"""Get the C++ string constant name for a field."""
|
||||
return common.template_args(
|
||||
'k${constant_name}FieldName', constant_name=common.title_case(field.cpp_name))
|
||||
return common.template_args('k${constant_name}FieldName', constant_name=common.title_case(
|
||||
field.cpp_name))
|
||||
|
||||
|
||||
def _access_member(field):
|
||||
|
|
@ -188,8 +188,8 @@ class _SlowFieldUsageChecker(_FieldUsageCheckerBase):
|
|||
(_get_field_constant_name(field))
|
||||
with writer.IndentedScopedBlock(self._writer, pred, '}'):
|
||||
if field.default:
|
||||
self._writer.write_line('%s = %s;' %
|
||||
(_get_field_member_name(field), field.default))
|
||||
self._writer.write_line('%s = %s;' % (_get_field_member_name(field),
|
||||
field.default))
|
||||
else:
|
||||
self._writer.write_line('ctxt.throwMissingField(%s);' %
|
||||
(_get_field_constant_name(field)))
|
||||
|
|
@ -221,8 +221,8 @@ class _FastFieldUsageChecker(_FieldUsageCheckerBase):
|
|||
if field.chained:
|
||||
continue
|
||||
|
||||
self._writer.write_line('const size_t %s = %d;' %
|
||||
(_gen_field_usage_constant(field), bit_id))
|
||||
self._writer.write_line('const size_t %s = %d;' % (_gen_field_usage_constant(field),
|
||||
bit_id))
|
||||
bit_id += 1
|
||||
|
||||
def add_store(self, field_name):
|
||||
|
|
@ -255,12 +255,13 @@ class _FastFieldUsageChecker(_FieldUsageCheckerBase):
|
|||
(_gen_field_usage_constant(field)), '}'):
|
||||
if field.default:
|
||||
if field.chained_struct_field:
|
||||
self._writer.write_line('%s.%s(%s);' % (
|
||||
_get_field_member_name(field.chained_struct_field),
|
||||
_get_field_member_setter_name(field), field.default))
|
||||
else:
|
||||
self._writer.write_line(
|
||||
'%s = %s;' % (_get_field_member_name(field), field.default))
|
||||
'%s.%s(%s);' %
|
||||
(_get_field_member_name(field.chained_struct_field),
|
||||
_get_field_member_setter_name(field), field.default))
|
||||
else:
|
||||
self._writer.write_line('%s = %s;' % (_get_field_member_name(field),
|
||||
field.default))
|
||||
else:
|
||||
self._writer.write_line('ctxt.throwMissingField(%s);' %
|
||||
(_get_field_constant_name(field)))
|
||||
|
|
@ -452,9 +453,9 @@ class _CppHeaderFileWriter(_CppFileWriterBase):
|
|||
|
||||
if field.chained_struct_field:
|
||||
self._writer.write_template(
|
||||
'${const_type} ${param_type} ${method_name}() const { return %s.%s(); }' % (
|
||||
(_get_field_member_name(field.chained_struct_field),
|
||||
_get_field_member_getter_name(field))))
|
||||
'${const_type} ${param_type} ${method_name}() const { return %s.%s(); }' %
|
||||
((_get_field_member_name(field.chained_struct_field),
|
||||
_get_field_member_getter_name(field))))
|
||||
|
||||
elif cpp_type_info.disable_xvalue():
|
||||
self._writer.write_template(
|
||||
|
|
@ -492,8 +493,8 @@ class _CppHeaderFileWriter(_CppFileWriterBase):
|
|||
}
|
||||
|
||||
with self._with_template(template_params):
|
||||
self._writer.write_template('void ${method_name}(${param_type} value) & ' +
|
||||
'{ ${body} ${post_body} }')
|
||||
self._writer.write_template(
|
||||
'void ${method_name}(${param_type} value) & ' + '{ ${body} ${post_body} }')
|
||||
|
||||
self._writer.write_empty_line()
|
||||
|
||||
|
|
@ -524,16 +525,14 @@ class _CppHeaderFileWriter(_CppFileWriterBase):
|
|||
|
||||
for field in _get_all_fields(struct):
|
||||
self._writer.write_line(
|
||||
common.template_args(
|
||||
'static constexpr auto ${constant_name} = "${field_name}"_sd;',
|
||||
constant_name=_get_field_constant_name(field),
|
||||
field_name=field.name))
|
||||
common.template_args('static constexpr auto ${constant_name} = "${field_name}"_sd;',
|
||||
constant_name=_get_field_constant_name(field),
|
||||
field_name=field.name))
|
||||
|
||||
if isinstance(struct, ast.Command):
|
||||
self._writer.write_line(
|
||||
common.template_args(
|
||||
'static constexpr auto kCommandName = "${struct_name}"_sd;',
|
||||
struct_name=struct.name))
|
||||
common.template_args('static constexpr auto kCommandName = "${struct_name}"_sd;',
|
||||
struct_name=struct.name))
|
||||
|
||||
def gen_enum_functions(self, idl_enum):
|
||||
# type: (ast.Enum) -> None
|
||||
|
|
@ -553,10 +552,8 @@ class _CppHeaderFileWriter(_CppFileWriterBase):
|
|||
'};'):
|
||||
for enum_value in idl_enum.values:
|
||||
self._writer.write_line(
|
||||
common.template_args(
|
||||
'${name} ${value},',
|
||||
name=enum_value.name,
|
||||
value=enum_type_info.get_cpp_value_assignment(enum_value)))
|
||||
common.template_args('${name} ${value},', name=enum_value.name,
|
||||
value=enum_type_info.get_cpp_value_assignment(enum_value)))
|
||||
|
||||
def gen_op_msg_request_methods(self, command):
|
||||
# type: (ast.Command) -> None
|
||||
|
|
@ -608,24 +605,21 @@ class _CppHeaderFileWriter(_CppFileWriterBase):
|
|||
"""Generate comparison operators definitions for the type."""
|
||||
# pylint: disable=invalid-name
|
||||
|
||||
sorted_fields = sorted(
|
||||
[
|
||||
field for field in struct.fields
|
||||
if (not field.ignore) and field.comparison_order != -1
|
||||
],
|
||||
key=lambda f: f.comparison_order)
|
||||
sorted_fields = sorted([
|
||||
field for field in struct.fields if (not field.ignore) and field.comparison_order != -1
|
||||
], key=lambda f: f.comparison_order)
|
||||
fields = [_get_field_member_name(field) for field in sorted_fields]
|
||||
|
||||
for rel_op in ['==', '!=', '<']:
|
||||
decl = common.template_args(
|
||||
"inline bool operator${rel_op}(const ${class_name}& left, const ${class_name}& right) {",
|
||||
rel_op=rel_op,
|
||||
class_name=common.title_case(struct.name))
|
||||
rel_op=rel_op, class_name=common.title_case(struct.name))
|
||||
|
||||
with self._block(decl, "}"):
|
||||
self._writer.write_line('return std::tie(%s) %s std::tie(%s);' % (','.join(
|
||||
["left.%s" % (field) for field in fields]), rel_op, ','.join(
|
||||
["right.%s" % (field) for field in fields])))
|
||||
self._writer.write_line('return std::tie(%s) %s std::tie(%s);' %
|
||||
(','.join(["left.%s" % (field) for field in fields]),
|
||||
rel_op,
|
||||
','.join(["right.%s" % (field) for field in fields])))
|
||||
|
||||
self.write_empty_line()
|
||||
|
||||
|
|
@ -794,15 +788,11 @@ class _CppSourceFileWriter(_CppFileWriterBase):
|
|||
if field.enum_type:
|
||||
self._writer.write_line('IDLParserErrorContext tempContext(%s, &ctxt);' %
|
||||
(_get_field_constant_name(field)))
|
||||
return common.template_args(
|
||||
"${method_name}(tempContext, ${expression})",
|
||||
method_name=method_name,
|
||||
expression=expression)
|
||||
return common.template_args("${method_name}(tempContext, ${expression})",
|
||||
method_name=method_name, expression=expression)
|
||||
else:
|
||||
return common.template_args(
|
||||
"${method_name}(${expression})",
|
||||
method_name=method_name,
|
||||
expression=expression)
|
||||
return common.template_args("${method_name}(${expression})",
|
||||
method_name=method_name, expression=expression)
|
||||
else:
|
||||
# BSONObjects are allowed to be pass through without deserialization
|
||||
assert field.bson_serialization_type == ['object']
|
||||
|
|
@ -901,8 +891,8 @@ class _CppSourceFileWriter(_CppFileWriterBase):
|
|||
(_get_field_member_name(field.chained_struct_field),
|
||||
_get_field_member_setter_name(field), object_value))
|
||||
else:
|
||||
self._writer.write_line('%s = %s;' %
|
||||
(_get_field_member_name(field), object_value))
|
||||
self._writer.write_line('%s = %s;' % (_get_field_member_name(field),
|
||||
object_value))
|
||||
|
||||
def gen_doc_sequence_deserializer(self, field):
|
||||
# type: (ast.Field) -> None
|
||||
|
|
@ -980,8 +970,8 @@ class _CppSourceFileWriter(_CppFileWriterBase):
|
|||
# Serialize has fields third
|
||||
# Add _has{FIELD} bool members to ensure fields are set before serialization.
|
||||
for field in struct.fields:
|
||||
if _is_required_serializer_field(field) and not (field.name == "$db" and
|
||||
initializes_db_name):
|
||||
if _is_required_serializer_field(field) and not (field.name == "$db"
|
||||
and initializes_db_name):
|
||||
initializers.append('%s(false)' % _get_has_field_member_name(field))
|
||||
|
||||
if initializes_db_name:
|
||||
|
|
@ -1152,8 +1142,7 @@ class _CppSourceFileWriter(_CppFileWriterBase):
|
|||
struct_type_info = struct_types.get_struct_info(struct)
|
||||
|
||||
self.get_bson_deserializer_static_common(
|
||||
struct,
|
||||
struct_type_info.get_op_msg_request_deserializer_static_method(),
|
||||
struct, struct_type_info.get_op_msg_request_deserializer_static_method(),
|
||||
struct_type_info.get_op_msg_request_deserializer_method())
|
||||
|
||||
func_def = struct_type_info.get_op_msg_request_deserializer_method().get_definition()
|
||||
|
|
@ -1227,8 +1216,8 @@ class _CppSourceFileWriter(_CppFileWriterBase):
|
|||
template_params['expression'] = expression
|
||||
self._writer.write_template('arrayBuilder.append(${expression});')
|
||||
else:
|
||||
expression = bson_cpp_type.gen_serializer_expression(self._writer,
|
||||
_access_member(field))
|
||||
expression = bson_cpp_type.gen_serializer_expression(
|
||||
self._writer, _access_member(field))
|
||||
template_params['expression'] = expression
|
||||
self._writer.write_template('builder->append(${field_name}, ${expression});')
|
||||
|
||||
|
|
@ -1304,8 +1293,8 @@ class _CppSourceFileWriter(_CppFileWriterBase):
|
|||
# Is this a scalar bson C++ type?
|
||||
bson_cpp_type = cpp_types.get_bson_cpp_type(field)
|
||||
|
||||
needs_custom_serializer = field.serializer or (bson_cpp_type and
|
||||
bson_cpp_type.has_serializer())
|
||||
needs_custom_serializer = field.serializer or (bson_cpp_type
|
||||
and bson_cpp_type.has_serializer())
|
||||
|
||||
optional_block_start = None
|
||||
if field.optional:
|
||||
|
|
@ -1323,8 +1312,8 @@ class _CppSourceFileWriter(_CppFileWriterBase):
|
|||
# Generate default serialization using BSONObjBuilder::append
|
||||
# Note: BSONObjBuilder::append has overrides for std::vector also
|
||||
self._writer.write_line(
|
||||
'builder->append(%s, %s);' %
|
||||
(_get_field_constant_name(field), _access_member(field)))
|
||||
'builder->append(%s, %s);' % (_get_field_constant_name(field),
|
||||
_access_member(field)))
|
||||
else:
|
||||
self._gen_serializer_method_struct(field)
|
||||
|
||||
|
|
@ -1474,16 +1463,14 @@ class _CppSourceFileWriter(_CppFileWriterBase):
|
|||
|
||||
for field in _get_all_fields(struct):
|
||||
self._writer.write_line(
|
||||
common.template_args(
|
||||
'constexpr StringData ${class_name}::${constant_name};',
|
||||
class_name=common.title_case(struct.cpp_name),
|
||||
constant_name=_get_field_constant_name(field)))
|
||||
common.template_args('constexpr StringData ${class_name}::${constant_name};',
|
||||
class_name=common.title_case(struct.cpp_name),
|
||||
constant_name=_get_field_constant_name(field)))
|
||||
|
||||
if isinstance(struct, ast.Command):
|
||||
self._writer.write_line(
|
||||
common.template_args(
|
||||
'constexpr StringData ${class_name}::kCommandName;',
|
||||
class_name=common.title_case(struct.cpp_name)))
|
||||
common.template_args('constexpr StringData ${class_name}::kCommandName;',
|
||||
class_name=common.title_case(struct.cpp_name)))
|
||||
|
||||
def gen_enum_definition(self, idl_enum):
|
||||
# type: (ast.Enum) -> None
|
||||
|
|
@ -1511,13 +1498,12 @@ class _CppSourceFileWriter(_CppFileWriterBase):
|
|||
for field in sorted_fields:
|
||||
self._writer.write_line(
|
||||
common.template_args(
|
||||
'${class_name}::${constant_name},',
|
||||
class_name=common.title_case(struct.cpp_name),
|
||||
constant_name=_get_field_constant_name(field)))
|
||||
'${class_name}::${constant_name},', class_name=common.title_case(
|
||||
struct.cpp_name), constant_name=_get_field_constant_name(field)))
|
||||
|
||||
self._writer.write_line(
|
||||
common.template_args(
|
||||
'${class_name}::kCommandName,', class_name=common.title_case(struct.cpp_name)))
|
||||
common.template_args('${class_name}::kCommandName,', class_name=common.title_case(
|
||||
struct.cpp_name)))
|
||||
|
||||
def generate(self, spec, header_file_name):
|
||||
# type: (ast.IDLAST, unicode) -> None
|
||||
|
|
|
|||
|
|
@ -97,8 +97,8 @@ def _generic_parser(
|
|||
syntax_node.__dict__[first_name] = ctxt.get_list(second_node)
|
||||
elif rule_desc.node_type == "mapping":
|
||||
if ctxt.is_mapping_node(second_node, first_name):
|
||||
syntax_node.__dict__[first_name] = rule_desc.mapping_parser_func(ctxt,
|
||||
second_node)
|
||||
syntax_node.__dict__[first_name] = rule_desc.mapping_parser_func(
|
||||
ctxt, second_node)
|
||||
else:
|
||||
raise errors.IDLError("Unknown node_type '%s' for parser rule" %
|
||||
(rule_desc.node_type))
|
||||
|
|
@ -177,15 +177,16 @@ def _parse_type(ctxt, spec, name, node):
|
|||
idltype = syntax.Type(ctxt.file_name, node.start_mark.line, node.start_mark.column)
|
||||
idltype.name = name
|
||||
|
||||
_generic_parser(ctxt, node, "type", idltype, {
|
||||
"description": _RuleDesc('scalar', _RuleDesc.REQUIRED),
|
||||
"cpp_type": _RuleDesc('scalar', _RuleDesc.REQUIRED),
|
||||
"bson_serialization_type": _RuleDesc('scalar_or_sequence', _RuleDesc.REQUIRED),
|
||||
"bindata_subtype": _RuleDesc('scalar'),
|
||||
"serializer": _RuleDesc('scalar'),
|
||||
"deserializer": _RuleDesc('scalar'),
|
||||
"default": _RuleDesc('scalar'),
|
||||
})
|
||||
_generic_parser(
|
||||
ctxt, node, "type", idltype, {
|
||||
"description": _RuleDesc('scalar', _RuleDesc.REQUIRED),
|
||||
"cpp_type": _RuleDesc('scalar', _RuleDesc.REQUIRED),
|
||||
"bson_serialization_type": _RuleDesc('scalar_or_sequence', _RuleDesc.REQUIRED),
|
||||
"bindata_subtype": _RuleDesc('scalar'),
|
||||
"serializer": _RuleDesc('scalar'),
|
||||
"deserializer": _RuleDesc('scalar'),
|
||||
"default": _RuleDesc('scalar'),
|
||||
})
|
||||
|
||||
spec.symbols.add_type(ctxt, idltype)
|
||||
|
||||
|
|
@ -196,16 +197,17 @@ def _parse_field(ctxt, name, node):
|
|||
field = syntax.Field(ctxt.file_name, node.start_mark.line, node.start_mark.column)
|
||||
field.name = name
|
||||
|
||||
_generic_parser(ctxt, node, "field", field, {
|
||||
"description": _RuleDesc('scalar'),
|
||||
"cpp_name": _RuleDesc('scalar'),
|
||||
"type": _RuleDesc('scalar', _RuleDesc.REQUIRED),
|
||||
"ignore": _RuleDesc("bool_scalar"),
|
||||
"optional": _RuleDesc("bool_scalar"),
|
||||
"default": _RuleDesc('scalar'),
|
||||
"supports_doc_sequence": _RuleDesc("bool_scalar"),
|
||||
"comparison_order": _RuleDesc("int_scalar"),
|
||||
})
|
||||
_generic_parser(
|
||||
ctxt, node, "field", field, {
|
||||
"description": _RuleDesc('scalar'),
|
||||
"cpp_name": _RuleDesc('scalar'),
|
||||
"type": _RuleDesc('scalar', _RuleDesc.REQUIRED),
|
||||
"ignore": _RuleDesc("bool_scalar"),
|
||||
"optional": _RuleDesc("bool_scalar"),
|
||||
"default": _RuleDesc('scalar'),
|
||||
"supports_doc_sequence": _RuleDesc("bool_scalar"),
|
||||
"comparison_order": _RuleDesc("int_scalar"),
|
||||
})
|
||||
|
||||
return field
|
||||
|
||||
|
|
@ -336,16 +338,17 @@ def _parse_struct(ctxt, spec, name, node):
|
|||
struct = syntax.Struct(ctxt.file_name, node.start_mark.line, node.start_mark.column)
|
||||
struct.name = name
|
||||
|
||||
_generic_parser(ctxt, node, "struct", struct, {
|
||||
"description": _RuleDesc('scalar', _RuleDesc.REQUIRED),
|
||||
"fields": _RuleDesc('mapping', mapping_parser_func=_parse_fields),
|
||||
"chained_types": _RuleDesc('mapping', mapping_parser_func=_parse_chained_types),
|
||||
"chained_structs": _RuleDesc('mapping', mapping_parser_func=_parse_chained_structs),
|
||||
"strict": _RuleDesc("bool_scalar"),
|
||||
"inline_chained_structs": _RuleDesc("bool_scalar"),
|
||||
"immutable": _RuleDesc('bool_scalar'),
|
||||
"generate_comparison_operators": _RuleDesc("bool_scalar"),
|
||||
})
|
||||
_generic_parser(
|
||||
ctxt, node, "struct", struct, {
|
||||
"description": _RuleDesc('scalar', _RuleDesc.REQUIRED),
|
||||
"fields": _RuleDesc('mapping', mapping_parser_func=_parse_fields),
|
||||
"chained_types": _RuleDesc('mapping', mapping_parser_func=_parse_chained_types),
|
||||
"chained_structs": _RuleDesc('mapping', mapping_parser_func=_parse_chained_structs),
|
||||
"strict": _RuleDesc("bool_scalar"),
|
||||
"inline_chained_structs": _RuleDesc("bool_scalar"),
|
||||
"immutable": _RuleDesc('bool_scalar'),
|
||||
"generate_comparison_operators": _RuleDesc("bool_scalar"),
|
||||
})
|
||||
|
||||
# TODO: SHOULD WE ALLOW STRUCTS ONLY WITH CHAINED STUFF and no fields???
|
||||
if struct.fields is None and struct.chained_types is None and struct.chained_structs is None:
|
||||
|
|
@ -392,11 +395,12 @@ def _parse_enum(ctxt, spec, name, node):
|
|||
idl_enum = syntax.Enum(ctxt.file_name, node.start_mark.line, node.start_mark.column)
|
||||
idl_enum.name = name
|
||||
|
||||
_generic_parser(ctxt, node, "enum", idl_enum, {
|
||||
"description": _RuleDesc('scalar', _RuleDesc.REQUIRED),
|
||||
"type": _RuleDesc('scalar', _RuleDesc.REQUIRED),
|
||||
"values": _RuleDesc('mapping', mapping_parser_func=_parse_enum_values),
|
||||
})
|
||||
_generic_parser(
|
||||
ctxt, node, "enum", idl_enum, {
|
||||
"description": _RuleDesc('scalar', _RuleDesc.REQUIRED),
|
||||
"type": _RuleDesc('scalar', _RuleDesc.REQUIRED),
|
||||
"values": _RuleDesc('mapping', mapping_parser_func=_parse_enum_values),
|
||||
})
|
||||
|
||||
if idl_enum.values is None:
|
||||
ctxt.add_empty_enum_error(node, idl_enum.name)
|
||||
|
|
@ -413,19 +417,20 @@ def _parse_command(ctxt, spec, name, node):
|
|||
command = syntax.Command(ctxt.file_name, node.start_mark.line, node.start_mark.column)
|
||||
command.name = name
|
||||
|
||||
_generic_parser(ctxt, node, "command", command, {
|
||||
"description": _RuleDesc('scalar', _RuleDesc.REQUIRED),
|
||||
"chained_types": _RuleDesc('mapping', mapping_parser_func=_parse_chained_types),
|
||||
"chained_structs": _RuleDesc('mapping', mapping_parser_func=_parse_chained_structs),
|
||||
"fields": _RuleDesc('mapping', mapping_parser_func=_parse_fields),
|
||||
"namespace": _RuleDesc('scalar', _RuleDesc.REQUIRED),
|
||||
"cpp_name": _RuleDesc('scalar'),
|
||||
"type": _RuleDesc('scalar'),
|
||||
"strict": _RuleDesc("bool_scalar"),
|
||||
"inline_chained_structs": _RuleDesc("bool_scalar"),
|
||||
"immutable": _RuleDesc('bool_scalar'),
|
||||
"generate_comparison_operators": _RuleDesc("bool_scalar"),
|
||||
})
|
||||
_generic_parser(
|
||||
ctxt, node, "command", command, {
|
||||
"description": _RuleDesc('scalar', _RuleDesc.REQUIRED),
|
||||
"chained_types": _RuleDesc('mapping', mapping_parser_func=_parse_chained_types),
|
||||
"chained_structs": _RuleDesc('mapping', mapping_parser_func=_parse_chained_structs),
|
||||
"fields": _RuleDesc('mapping', mapping_parser_func=_parse_fields),
|
||||
"namespace": _RuleDesc('scalar', _RuleDesc.REQUIRED),
|
||||
"cpp_name": _RuleDesc('scalar'),
|
||||
"type": _RuleDesc('scalar'),
|
||||
"strict": _RuleDesc("bool_scalar"),
|
||||
"inline_chained_structs": _RuleDesc("bool_scalar"),
|
||||
"immutable": _RuleDesc('bool_scalar'),
|
||||
"generate_comparison_operators": _RuleDesc("bool_scalar"),
|
||||
})
|
||||
|
||||
# TODO: support the first argument as UUID depending on outcome of Catalog Versioning changes.
|
||||
valid_commands = [
|
||||
|
|
|
|||
|
|
@ -44,13 +44,7 @@ class ArgumentInfo(object):
|
|||
class MethodInfo(object):
|
||||
"""Class that encapslates information about a method and how to declare, define, and call it."""
|
||||
|
||||
def __init__(self,
|
||||
class_name,
|
||||
method_name,
|
||||
args,
|
||||
return_type=None,
|
||||
static=False,
|
||||
const=False,
|
||||
def __init__(self, class_name, method_name, args, return_type=None, static=False, const=False,
|
||||
explicit=False):
|
||||
# type: (unicode, unicode, List[unicode], unicode, bool, bool, bool) -> None
|
||||
# pylint: disable=too-many-arguments
|
||||
|
|
@ -84,11 +78,8 @@ class MethodInfo(object):
|
|||
|
||||
return common.template_args(
|
||||
"${pre_modifiers}${return_type}${method_name}(${args})${post_modifiers};",
|
||||
pre_modifiers=pre_modifiers,
|
||||
return_type=return_type_str,
|
||||
method_name=self.method_name,
|
||||
args=', '.join([str(arg) for arg in self.args]),
|
||||
post_modifiers=post_modifiers)
|
||||
pre_modifiers=pre_modifiers, return_type=return_type_str, method_name=self.method_name,
|
||||
args=', '.join([str(arg) for arg in self.args]), post_modifiers=post_modifiers)
|
||||
|
||||
def get_definition(self):
|
||||
# type: () -> unicode
|
||||
|
|
@ -105,12 +96,9 @@ class MethodInfo(object):
|
|||
|
||||
return common.template_args(
|
||||
"${pre_modifiers}${return_type}${class_name}::${method_name}(${args})${post_modifiers}",
|
||||
pre_modifiers=pre_modifiers,
|
||||
return_type=return_type_str,
|
||||
class_name=self.class_name,
|
||||
method_name=self.method_name,
|
||||
args=', '.join([str(arg) for arg in self.args]),
|
||||
post_modifiers=post_modifiers)
|
||||
pre_modifiers=pre_modifiers, return_type=return_type_str, class_name=self.class_name,
|
||||
method_name=self.method_name, args=', '.join(
|
||||
[str(arg) for arg in self.args]), post_modifiers=post_modifiers)
|
||||
|
||||
def get_call(self, obj):
|
||||
# type: (Optional[unicode]) -> unicode
|
||||
|
|
@ -119,11 +107,11 @@ class MethodInfo(object):
|
|||
args = ', '.join([arg.name for arg in self.args])
|
||||
|
||||
if obj:
|
||||
return common.template_args(
|
||||
"${obj}.${method_name}(${args});", obj=obj, method_name=self.method_name, args=args)
|
||||
return common.template_args("${obj}.${method_name}(${args});", obj=obj,
|
||||
method_name=self.method_name, args=args)
|
||||
|
||||
return common.template_args(
|
||||
"${method_name}(${args});", method_name=self.method_name, args=args)
|
||||
return common.template_args("${method_name}(${args});", method_name=self.method_name,
|
||||
args=args)
|
||||
|
||||
|
||||
class StructTypeInfoBase(object):
|
||||
|
|
@ -223,11 +211,9 @@ class _StructTypeInfo(StructTypeInfoBase):
|
|||
def get_deserializer_static_method(self):
|
||||
# type: () -> MethodInfo
|
||||
class_name = common.title_case(self._struct.cpp_name)
|
||||
return MethodInfo(
|
||||
class_name,
|
||||
'parse', ['const IDLParserErrorContext& ctxt', 'const BSONObj& bsonObject'],
|
||||
class_name,
|
||||
static=True)
|
||||
return MethodInfo(class_name, 'parse',
|
||||
['const IDLParserErrorContext& ctxt', 'const BSONObj& bsonObject'],
|
||||
class_name, static=True)
|
||||
|
||||
def get_deserializer_method(self):
|
||||
# type: () -> MethodInfo
|
||||
|
|
@ -238,10 +224,8 @@ class _StructTypeInfo(StructTypeInfoBase):
|
|||
def get_serializer_method(self):
|
||||
# type: () -> MethodInfo
|
||||
return MethodInfo(
|
||||
common.title_case(self._struct.cpp_name),
|
||||
'serialize', ['BSONObjBuilder* builder'],
|
||||
'void',
|
||||
const=True)
|
||||
common.title_case(self._struct.cpp_name), 'serialize', ['BSONObjBuilder* builder'],
|
||||
'void', const=True)
|
||||
|
||||
def get_to_bson_method(self):
|
||||
# type: () -> MethodInfo
|
||||
|
|
@ -290,19 +274,15 @@ class _CommandBaseTypeInfo(_StructTypeInfo):
|
|||
def get_op_msg_request_serializer_method(self):
|
||||
# type: () -> Optional[MethodInfo]
|
||||
return MethodInfo(
|
||||
common.title_case(self._struct.cpp_name),
|
||||
'serialize', ['const BSONObj& commandPassthroughFields'],
|
||||
'OpMsgRequest',
|
||||
const=True)
|
||||
common.title_case(self._struct.cpp_name), 'serialize',
|
||||
['const BSONObj& commandPassthroughFields'], 'OpMsgRequest', const=True)
|
||||
|
||||
def get_op_msg_request_deserializer_static_method(self):
|
||||
# type: () -> Optional[MethodInfo]
|
||||
class_name = common.title_case(self._struct.cpp_name)
|
||||
return MethodInfo(
|
||||
class_name,
|
||||
'parse', ['const IDLParserErrorContext& ctxt', 'const OpMsgRequest& request'],
|
||||
class_name,
|
||||
static=True)
|
||||
return MethodInfo(class_name, 'parse',
|
||||
['const IDLParserErrorContext& ctxt', 'const OpMsgRequest& request'],
|
||||
class_name, static=True)
|
||||
|
||||
def get_op_msg_request_deserializer_method(self):
|
||||
# type: () -> Optional[MethodInfo]
|
||||
|
|
@ -324,19 +304,16 @@ class _IgnoredCommandTypeInfo(_CommandBaseTypeInfo):
|
|||
def get_serializer_method(self):
|
||||
# type: () -> MethodInfo
|
||||
return MethodInfo(
|
||||
common.title_case(self._struct.cpp_name),
|
||||
'serialize', ['const BSONObj& commandPassthroughFields', 'BSONObjBuilder* builder'],
|
||||
'void',
|
||||
common.title_case(self._struct.cpp_name), 'serialize',
|
||||
['const BSONObj& commandPassthroughFields', 'BSONObjBuilder* builder'], 'void',
|
||||
const=True)
|
||||
|
||||
def get_to_bson_method(self):
|
||||
# type: () -> MethodInfo
|
||||
# Commands that require namespaces require it as a parameter to serialize()
|
||||
return MethodInfo(
|
||||
common.title_case(self._struct.cpp_name),
|
||||
'toBSON', ['const BSONObj& commandPassthroughFields'],
|
||||
'BSONObj',
|
||||
const=True)
|
||||
common.title_case(self._struct.cpp_name), 'toBSON',
|
||||
['const BSONObj& commandPassthroughFields'], 'BSONObj', const=True)
|
||||
|
||||
def get_deserializer_static_method(self):
|
||||
# type: () -> MethodInfo
|
||||
|
|
@ -388,18 +365,15 @@ class _CommandFromType(_CommandBaseTypeInfo):
|
|||
def get_serializer_method(self):
|
||||
# type: () -> MethodInfo
|
||||
return MethodInfo(
|
||||
common.title_case(self._struct.cpp_name),
|
||||
'serialize', ['const BSONObj& commandPassthroughFields', 'BSONObjBuilder* builder'],
|
||||
'void',
|
||||
common.title_case(self._struct.cpp_name), 'serialize',
|
||||
['const BSONObj& commandPassthroughFields', 'BSONObjBuilder* builder'], 'void',
|
||||
const=True)
|
||||
|
||||
def get_to_bson_method(self):
|
||||
# type: () -> MethodInfo
|
||||
return MethodInfo(
|
||||
common.title_case(self._struct.cpp_name),
|
||||
'toBSON', ['const BSONObj& commandPassthroughFields'],
|
||||
'BSONObj',
|
||||
const=True)
|
||||
common.title_case(self._struct.cpp_name), 'toBSON',
|
||||
['const BSONObj& commandPassthroughFields'], 'BSONObj', const=True)
|
||||
|
||||
def get_deserializer_method(self):
|
||||
# type: () -> MethodInfo
|
||||
|
|
@ -443,18 +417,15 @@ class _CommandWithNamespaceTypeInfo(_CommandBaseTypeInfo):
|
|||
def get_serializer_method(self):
|
||||
# type: () -> MethodInfo
|
||||
return MethodInfo(
|
||||
common.title_case(self._struct.cpp_name),
|
||||
'serialize', ['const BSONObj& commandPassthroughFields', 'BSONObjBuilder* builder'],
|
||||
'void',
|
||||
common.title_case(self._struct.cpp_name), 'serialize',
|
||||
['const BSONObj& commandPassthroughFields', 'BSONObjBuilder* builder'], 'void',
|
||||
const=True)
|
||||
|
||||
def get_to_bson_method(self):
|
||||
# type: () -> MethodInfo
|
||||
return MethodInfo(
|
||||
common.title_case(self._struct.cpp_name),
|
||||
'toBSON', ['const BSONObj& commandPassthroughFields'],
|
||||
'BSONObj',
|
||||
const=True)
|
||||
common.title_case(self._struct.cpp_name), 'toBSON',
|
||||
['const BSONObj& commandPassthroughFields'], 'BSONObj', const=True)
|
||||
|
||||
def get_deserializer_method(self):
|
||||
# type: () -> MethodInfo
|
||||
|
|
@ -480,8 +451,8 @@ class _CommandWithNamespaceTypeInfo(_CommandBaseTypeInfo):
|
|||
# type: (writer.IndentedTextWriter, unicode, unicode) -> None
|
||||
# TODO: should the name of the first element be validated??
|
||||
indented_writer.write_line('invariant(_nss.isEmpty());')
|
||||
indented_writer.write_line('_nss = ctxt.parseNSCollectionRequired(%s, %s);' %
|
||||
(db_name, element))
|
||||
indented_writer.write_line('_nss = ctxt.parseNSCollectionRequired(%s, %s);' % (db_name,
|
||||
element))
|
||||
|
||||
|
||||
def get_struct_info(struct):
|
||||
|
|
|
|||
|
|
@ -35,8 +35,8 @@ class IDLParsedSpec(object):
|
|||
def __init__(self, spec, error_collection):
|
||||
# type: (IDLSpec, errors.ParserErrorCollection) -> None
|
||||
"""Must specify either an IDL document or errors, not both."""
|
||||
assert (spec is None and error_collection is not None) or (spec is not None and
|
||||
error_collection is None)
|
||||
assert (spec is None and error_collection is not None) or (spec is not None
|
||||
and error_collection is None)
|
||||
self.spec = spec
|
||||
self.errors = error_collection
|
||||
|
||||
|
|
@ -81,8 +81,8 @@ def _zip_scalar(items, obj):
|
|||
def _item_and_type(dic):
|
||||
# type: (Dict[Any, List[Any]]) -> Iterator[Tuple[Any, Any]]
|
||||
"""Return an Iterator of (key, value) pairs from a dictionary."""
|
||||
return itertools.chain.from_iterable((_zip_scalar(value, key)
|
||||
for (key, value) in dic.viewitems()))
|
||||
return itertools.chain.from_iterable(
|
||||
(_zip_scalar(value, key) for (key, value) in dic.viewitems()))
|
||||
|
||||
|
||||
class SymbolTable(object):
|
||||
|
|
|
|||
|
|
@ -35,26 +35,18 @@ def main():
|
|||
|
||||
parser.add_argument('--header', type=str, help="IDL output header file")
|
||||
|
||||
parser.add_argument(
|
||||
'-i',
|
||||
'--include',
|
||||
type=str,
|
||||
action="append",
|
||||
help="Directory to search for IDL import files")
|
||||
parser.add_argument('-i', '--include', type=str, action="append",
|
||||
help="Directory to search for IDL import files")
|
||||
|
||||
parser.add_argument('-v', '--verbose', action='count', help="Enable verbose tracing")
|
||||
|
||||
parser.add_argument('--base_dir', type=str, help="IDL output relative base directory")
|
||||
|
||||
parser.add_argument(
|
||||
'--write-dependencies',
|
||||
action='store_true',
|
||||
help='only print out a list of dependent imports')
|
||||
parser.add_argument('--write-dependencies', action='store_true',
|
||||
help='only print out a list of dependent imports')
|
||||
|
||||
parser.add_argument(
|
||||
'--target_arch',
|
||||
type=str,
|
||||
help="IDL target archiecture (amd64, s390x). defaults to current machine")
|
||||
parser.add_argument('--target_arch', type=str,
|
||||
help="IDL target archiecture (amd64, s390x). defaults to current machine")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
|
|
|||
|
|
@ -48,7 +48,8 @@ class TestGenerator(testcase.IDLTestcase):
|
|||
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
||||
src_dir = os.path.join(
|
||||
base_dir,
|
||||
'src', )
|
||||
'src',
|
||||
)
|
||||
idl_dir = os.path.join(src_dir, 'mongo', 'idl')
|
||||
|
||||
args = idl.compiler.CompilerArgs()
|
||||
|
|
|
|||
|
|
@ -236,8 +236,7 @@ class TestImport(testcase.IDLTestcase):
|
|||
strict: false
|
||||
fields:
|
||||
foo: string
|
||||
"""),
|
||||
resolver=resolver)
|
||||
"""), resolver=resolver)
|
||||
|
||||
# Test nested import
|
||||
self.assert_bind(
|
||||
|
|
@ -256,8 +255,7 @@ class TestImport(testcase.IDLTestcase):
|
|||
foo: string
|
||||
foo1: int
|
||||
foo2: double
|
||||
"""),
|
||||
resolver=resolver)
|
||||
"""), resolver=resolver)
|
||||
|
||||
# Test diamond import
|
||||
self.assert_bind(
|
||||
|
|
@ -278,8 +276,7 @@ class TestImport(testcase.IDLTestcase):
|
|||
foo1: int
|
||||
foo2: double
|
||||
foo3: bool
|
||||
"""),
|
||||
resolver=resolver)
|
||||
"""), resolver=resolver)
|
||||
|
||||
# Test cycle import
|
||||
self.assert_bind(
|
||||
|
|
@ -297,8 +294,7 @@ class TestImport(testcase.IDLTestcase):
|
|||
fields:
|
||||
foo: string
|
||||
foo1: bool
|
||||
"""),
|
||||
resolver=resolver)
|
||||
"""), resolver=resolver)
|
||||
|
||||
# Test self cycle import
|
||||
self.assert_bind(
|
||||
|
|
@ -315,8 +311,7 @@ class TestImport(testcase.IDLTestcase):
|
|||
strict: false
|
||||
fields:
|
||||
foo: string
|
||||
"""),
|
||||
resolver=resolver)
|
||||
"""), resolver=resolver)
|
||||
|
||||
def test_import_negative(self):
|
||||
# type: () -> None
|
||||
|
|
@ -373,9 +368,7 @@ class TestImport(testcase.IDLTestcase):
|
|||
textwrap.dedent("""
|
||||
imports:
|
||||
- "notfound.idl"
|
||||
"""),
|
||||
idl.errors.ERROR_ID_BAD_IMPORT,
|
||||
resolver=resolver)
|
||||
"""), idl.errors.ERROR_ID_BAD_IMPORT, resolver=resolver)
|
||||
|
||||
# Duplicate types
|
||||
self.assert_parse_fail(
|
||||
|
|
@ -388,9 +381,7 @@ class TestImport(testcase.IDLTestcase):
|
|||
description: foo
|
||||
cpp_type: foo
|
||||
bson_serialization_type: string
|
||||
"""),
|
||||
idl.errors.ERROR_ID_DUPLICATE_SYMBOL,
|
||||
resolver=resolver)
|
||||
"""), idl.errors.ERROR_ID_DUPLICATE_SYMBOL, resolver=resolver)
|
||||
|
||||
# Duplicate structs
|
||||
self.assert_parse_fail(
|
||||
|
|
@ -403,9 +394,7 @@ class TestImport(testcase.IDLTestcase):
|
|||
description: foo
|
||||
fields:
|
||||
foo1: string
|
||||
"""),
|
||||
idl.errors.ERROR_ID_DUPLICATE_SYMBOL,
|
||||
resolver=resolver)
|
||||
"""), idl.errors.ERROR_ID_DUPLICATE_SYMBOL, resolver=resolver)
|
||||
|
||||
# Duplicate struct and type
|
||||
self.assert_parse_fail(
|
||||
|
|
@ -418,9 +407,7 @@ class TestImport(testcase.IDLTestcase):
|
|||
description: foo
|
||||
fields:
|
||||
foo1: string
|
||||
"""),
|
||||
idl.errors.ERROR_ID_DUPLICATE_SYMBOL,
|
||||
resolver=resolver)
|
||||
"""), idl.errors.ERROR_ID_DUPLICATE_SYMBOL, resolver=resolver)
|
||||
|
||||
# Duplicate type and struct
|
||||
self.assert_parse_fail(
|
||||
|
|
@ -433,9 +420,7 @@ class TestImport(testcase.IDLTestcase):
|
|||
description: foo
|
||||
cpp_type: foo
|
||||
bson_serialization_type: string
|
||||
"""),
|
||||
idl.errors.ERROR_ID_DUPLICATE_SYMBOL,
|
||||
resolver=resolver)
|
||||
"""), idl.errors.ERROR_ID_DUPLICATE_SYMBOL, resolver=resolver)
|
||||
|
||||
# Duplicate enums
|
||||
self.assert_parse_fail(
|
||||
|
|
@ -450,9 +435,7 @@ class TestImport(testcase.IDLTestcase):
|
|||
values:
|
||||
a0: 0
|
||||
b1: 1
|
||||
"""),
|
||||
idl.errors.ERROR_ID_DUPLICATE_SYMBOL,
|
||||
resolver=resolver)
|
||||
"""), idl.errors.ERROR_ID_DUPLICATE_SYMBOL, resolver=resolver)
|
||||
|
||||
# Import a file with errors
|
||||
self.assert_parse_fail(
|
||||
|
|
@ -466,9 +449,7 @@ class TestImport(testcase.IDLTestcase):
|
|||
description: foo
|
||||
cpp_type: foo
|
||||
bson_serialization_type: string
|
||||
"""),
|
||||
idl.errors.ERROR_ID_MISSING_REQUIRED_FIELD,
|
||||
resolver=resolver)
|
||||
"""), idl.errors.ERROR_ID_MISSING_REQUIRED_FIELD, resolver=resolver)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
|||
|
|
@ -223,9 +223,7 @@ class TestParser(testcase.IDLTestcase):
|
|||
textwrap.dedent("""
|
||||
types:
|
||||
- foo:
|
||||
"""),
|
||||
idl.errors.ERROR_ID_IS_NODE_TYPE,
|
||||
multiple=True)
|
||||
"""), idl.errors.ERROR_ID_IS_NODE_TYPE, multiple=True)
|
||||
|
||||
# test list instead of scalar
|
||||
self.assert_parse_fail(
|
||||
|
|
@ -233,9 +231,7 @@ class TestParser(testcase.IDLTestcase):
|
|||
types:
|
||||
foo:
|
||||
- bar
|
||||
"""),
|
||||
idl.errors.ERROR_ID_IS_NODE_TYPE,
|
||||
multiple=True)
|
||||
"""), idl.errors.ERROR_ID_IS_NODE_TYPE, multiple=True)
|
||||
|
||||
# test map instead of scalar
|
||||
self.assert_parse_fail(
|
||||
|
|
@ -244,9 +240,7 @@ class TestParser(testcase.IDLTestcase):
|
|||
foo:
|
||||
description:
|
||||
foo: bar
|
||||
"""),
|
||||
idl.errors.ERROR_ID_IS_NODE_TYPE,
|
||||
multiple=True)
|
||||
"""), idl.errors.ERROR_ID_IS_NODE_TYPE, multiple=True)
|
||||
|
||||
# test missing bson_serialization_type field
|
||||
self.assert_parse_fail(
|
||||
|
|
@ -723,9 +717,7 @@ class TestParser(testcase.IDLTestcase):
|
|||
textwrap.dedent("""
|
||||
enums:
|
||||
- foo:
|
||||
"""),
|
||||
idl.errors.ERROR_ID_IS_NODE_TYPE,
|
||||
multiple=True)
|
||||
"""), idl.errors.ERROR_ID_IS_NODE_TYPE, multiple=True)
|
||||
|
||||
# test list instead of scalar
|
||||
self.assert_parse_fail(
|
||||
|
|
@ -733,9 +725,7 @@ class TestParser(testcase.IDLTestcase):
|
|||
enums:
|
||||
foo:
|
||||
- bar
|
||||
"""),
|
||||
idl.errors.ERROR_ID_IS_NODE_TYPE,
|
||||
multiple=True)
|
||||
"""), idl.errors.ERROR_ID_IS_NODE_TYPE, multiple=True)
|
||||
|
||||
# test missing type field
|
||||
self.assert_parse_fail(
|
||||
|
|
|
|||
|
|
@ -12,24 +12,16 @@ class JiraClient(object):
|
|||
FIXED_RESOLUTION_NAME = "Fixed"
|
||||
WONT_FIX_RESOLUTION_NAME = "Won't Fix"
|
||||
|
||||
def __init__(self,
|
||||
server,
|
||||
username=None,
|
||||
password=None,
|
||||
access_token=None,
|
||||
access_token_secret=None,
|
||||
consumer_key=None,
|
||||
key_cert=None):
|
||||
def __init__(self, server, username=None, password=None, access_token=None,
|
||||
access_token_secret=None, consumer_key=None, key_cert=None):
|
||||
"""Initialize the JiraClient with the server URL and user credentials."""
|
||||
opts = {"server": server, "verify": True}
|
||||
basic_auth = None
|
||||
oauth_dict = None
|
||||
if access_token and access_token_secret and consumer_key and key_cert:
|
||||
oauth_dict = {
|
||||
"access_token": access_token,
|
||||
"access_token_secret": access_token_secret,
|
||||
"consumer_key": consumer_key,
|
||||
"key_cert": key_cert
|
||||
"access_token": access_token, "access_token_secret": access_token_secret,
|
||||
"consumer_key": consumer_key, "key_cert": key_cert
|
||||
}
|
||||
elif username and password:
|
||||
basic_auth = (username, password)
|
||||
|
|
@ -37,18 +29,17 @@ class JiraClient(object):
|
|||
raise TypeError("Must specify Basic Auth (using arguments username & password)"
|
||||
" or OAuth (using arguments access_token, access_token_secret,"
|
||||
" consumer_key & key_cert_file) credentials")
|
||||
self._jira = jira.JIRA(
|
||||
options=opts, basic_auth=basic_auth, oauth=oauth_dict, validate=True)
|
||||
self._jira = jira.JIRA(options=opts, basic_auth=basic_auth, oauth=oauth_dict, validate=True)
|
||||
|
||||
self._transitions = {}
|
||||
self._resolutions = {}
|
||||
|
||||
def create_issue(self, project, summary, description, labels=None):
|
||||
"""Create an issue."""
|
||||
fields = {"project": project,
|
||||
"issuetype": {"name": "Task"},
|
||||
"summary": summary,
|
||||
"description": description}
|
||||
fields = {
|
||||
"project": project, "issuetype": {"name": "Task"}, "summary": summary,
|
||||
"description": description
|
||||
}
|
||||
new_issue = self._jira.create_issue(fields=fields)
|
||||
if labels:
|
||||
new_issue.update(fields={"labels": labels})
|
||||
|
|
|
|||
|
|
@ -1,10 +1,10 @@
|
|||
|
||||
import sys
|
||||
import codecs
|
||||
|
||||
import cpplint
|
||||
import utils
|
||||
|
||||
|
||||
class CheckForConfigH:
|
||||
def __init__(self):
|
||||
self.found_configh = False
|
||||
|
|
@ -20,112 +20,107 @@ class CheckForConfigH:
|
|||
error(filename, line_num, 'build/config_h_include', 5,
|
||||
'MONGO_CONFIG define used without prior inclusion of config.h.')
|
||||
|
||||
def run_lint( paths, nudgeOn=False ):
|
||||
|
||||
def run_lint(paths, nudgeOn=False):
|
||||
# errors are as of 10/14
|
||||
# idea is not to let it any new type of error
|
||||
# as we knock one out, we should remove line
|
||||
# note: not all of these are things we want, so please check first
|
||||
|
||||
nudge = [] # things we'd like to turn on sson, so don't make worse
|
||||
later = [] # things that are unlikely anytime soon, so meh
|
||||
never = [] # things we totally disagree with
|
||||
nudge = [] # things we'd like to turn on sson, so don't make worse
|
||||
later = [] # things that are unlikely anytime soon, so meh
|
||||
never = [] # things we totally disagree with
|
||||
|
||||
nudge.append( '-build/c++11' ) # errors found: 6
|
||||
never.append( '-build/header_guard' ) # errors found: 345
|
||||
nudge.append( '-build/include' ) # errors found: 924
|
||||
nudge.append( '-build/include_order' ) # errors found: 511
|
||||
nudge.append( '-build/include_what_you_use' ) # errors found: 986
|
||||
nudge.append( '-build/namespaces' ) # errors found: 131
|
||||
never.append( '-readability/braces' ) # errors found: 880
|
||||
later.append( '-readability/casting' ) # errors found: 748
|
||||
nudge.append( '-readability/check' ) # errors found: 7
|
||||
nudge.append( '-readability/fn_size' ) # errors found: 1
|
||||
nudge.append( '-readability/function' ) # errors found: 49
|
||||
nudge.append( '-readability/inheritance' ) # errors found: 7
|
||||
nudge.append( '-readability/multiline_comment' ) # errors found: 1
|
||||
later.append( '-readability/namespace' ) # errors found: 876
|
||||
later.append( '-readability/streams' ) # errors found: 72
|
||||
later.append( '-readability/todo' ) # errors found: 309
|
||||
nudge.append( '-runtime/arrays' ) # errors found: 5
|
||||
later.append( '-runtime/explicit' ) # errors found: 322
|
||||
never.append( '-runtime/indentation_namespace') # errors found: 4601
|
||||
later.append( '-runtime/int' ) # errors found: 1420
|
||||
later.append( '-runtime/printf' ) # errors found: 29
|
||||
nudge.append( '-runtime/references' ) # errors found: 1338
|
||||
nudge.append( '-runtime/string' ) # errors found: 6
|
||||
nudge.append( '-runtime/threadsafe_fn' ) # errors found: 46
|
||||
never.append( '-whitespace/blank_line' ) # errors found: 2080
|
||||
never.append( '-whitespace/braces' ) # errors found: 962
|
||||
later.append( '-whitespace/comma' ) # errors found: 621
|
||||
later.append( '-whitespace/comments' ) # errors found: 2189
|
||||
nudge.append( '-whitespace/empty_loop_body' ) # errors found: 19
|
||||
later.append( '-whitespace/end_of_line' ) # errors found: 4340
|
||||
later.append( '-whitespace/line_length' ) # errors found: 14500
|
||||
never.append( '-whitespace/indent' ) # errors found: 4108
|
||||
later.append( '-whitespace/newline' ) # errors found: 1520
|
||||
nudge.append( '-whitespace/operators' ) # errors found: 2297
|
||||
never.append( '-whitespace/parens' ) # errors found: 49058
|
||||
nudge.append( '-whitespace/semicolon' ) # errors found: 121
|
||||
nudge.append( '-whitespace/tab' ) # errors found: 233
|
||||
nudge.append('-build/c++11') # errors found: 6
|
||||
never.append('-build/header_guard') # errors found: 345
|
||||
nudge.append('-build/include') # errors found: 924
|
||||
nudge.append('-build/include_order') # errors found: 511
|
||||
nudge.append('-build/include_what_you_use') # errors found: 986
|
||||
nudge.append('-build/namespaces') # errors found: 131
|
||||
never.append('-readability/braces') # errors found: 880
|
||||
later.append('-readability/casting') # errors found: 748
|
||||
nudge.append('-readability/check') # errors found: 7
|
||||
nudge.append('-readability/fn_size') # errors found: 1
|
||||
nudge.append('-readability/function') # errors found: 49
|
||||
nudge.append('-readability/inheritance') # errors found: 7
|
||||
nudge.append('-readability/multiline_comment') # errors found: 1
|
||||
later.append('-readability/namespace') # errors found: 876
|
||||
later.append('-readability/streams') # errors found: 72
|
||||
later.append('-readability/todo') # errors found: 309
|
||||
nudge.append('-runtime/arrays') # errors found: 5
|
||||
later.append('-runtime/explicit') # errors found: 322
|
||||
never.append('-runtime/indentation_namespace') # errors found: 4601
|
||||
later.append('-runtime/int') # errors found: 1420
|
||||
later.append('-runtime/printf') # errors found: 29
|
||||
nudge.append('-runtime/references') # errors found: 1338
|
||||
nudge.append('-runtime/string') # errors found: 6
|
||||
nudge.append('-runtime/threadsafe_fn') # errors found: 46
|
||||
never.append('-whitespace/blank_line') # errors found: 2080
|
||||
never.append('-whitespace/braces') # errors found: 962
|
||||
later.append('-whitespace/comma') # errors found: 621
|
||||
later.append('-whitespace/comments') # errors found: 2189
|
||||
nudge.append('-whitespace/empty_loop_body') # errors found: 19
|
||||
later.append('-whitespace/end_of_line') # errors found: 4340
|
||||
later.append('-whitespace/line_length') # errors found: 14500
|
||||
never.append('-whitespace/indent') # errors found: 4108
|
||||
later.append('-whitespace/newline') # errors found: 1520
|
||||
nudge.append('-whitespace/operators') # errors found: 2297
|
||||
never.append('-whitespace/parens') # errors found: 49058
|
||||
nudge.append('-whitespace/semicolon') # errors found: 121
|
||||
nudge.append('-whitespace/tab') # errors found: 233
|
||||
|
||||
filters = later + never
|
||||
if not nudgeOn:
|
||||
filters = filters + nudge
|
||||
|
||||
|
||||
sourceFiles = []
|
||||
for x in paths:
|
||||
utils.getAllSourceFiles( sourceFiles, x )
|
||||
utils.getAllSourceFiles(sourceFiles, x)
|
||||
|
||||
|
||||
args = ["--linelength=100",
|
||||
"--filter=" + ",".join( filters ),
|
||||
"--counting=detailed" ] + sourceFiles
|
||||
filenames = cpplint.ParseArguments( args )
|
||||
args = ["--linelength=100", "--filter=" + ",".join(filters), "--counting=detailed"
|
||||
] + sourceFiles
|
||||
filenames = cpplint.ParseArguments(args)
|
||||
|
||||
def _ourIsTestFilename(fn):
|
||||
if fn.find( "dbtests" ) >= 0:
|
||||
if fn.find("dbtests") >= 0:
|
||||
return True
|
||||
if fn.endswith( "_test.cpp" ):
|
||||
if fn.endswith("_test.cpp"):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
cpplint._IsTestFilename = _ourIsTestFilename
|
||||
|
||||
# Change stderr to write with replacement characters so we don't die
|
||||
# if we try to print something containing non-ASCII characters.
|
||||
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
|
||||
codecs.getreader('utf8'),
|
||||
codecs.getwriter('utf8'),
|
||||
'replace')
|
||||
sys.stderr = codecs.StreamReaderWriter(sys.stderr, codecs.getreader('utf8'),
|
||||
codecs.getwriter('utf8'), 'replace')
|
||||
cpplint._cpplint_state.ResetErrorCounts()
|
||||
for filename in filenames:
|
||||
config_h_check_obj = CheckForConfigH()
|
||||
cpplint.ProcessFile(filename,
|
||||
cpplint._cpplint_state.verbose_level,
|
||||
extra_check_functions=[config_h_check_obj])
|
||||
cpplint.ProcessFile(filename, cpplint._cpplint_state.verbose_level,
|
||||
extra_check_functions=[config_h_check_obj])
|
||||
cpplint._cpplint_state.PrintErrorCounts()
|
||||
|
||||
|
||||
return cpplint._cpplint_state.error_count == 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
paths = []
|
||||
nudge = False
|
||||
|
||||
|
||||
for arg in sys.argv[1:]:
|
||||
if arg.startswith( "--" ):
|
||||
if arg.startswith("--"):
|
||||
arg = arg[2:]
|
||||
if arg == "nudge":
|
||||
nudge = True
|
||||
continue
|
||||
else:
|
||||
print( "unknown arg [%s]" % arg )
|
||||
print("unknown arg [%s]" % arg)
|
||||
sys.exit(-1)
|
||||
paths.append( arg )
|
||||
paths.append(arg)
|
||||
|
||||
if len(paths) == 0:
|
||||
paths.append( "src/mongo/" )
|
||||
paths.append("src/mongo/")
|
||||
|
||||
if not run_lint( paths, nudge ):
|
||||
if not run_lint(paths, nudge):
|
||||
sys.exit(-1)
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ class YapfLinter(base.LinterBase):
|
|||
def __init__(self):
|
||||
# type: () -> None
|
||||
"""Create a yapf linter."""
|
||||
super(YapfLinter, self).__init__("yapf", "yapf 0.16.0")
|
||||
super(YapfLinter, self).__init__("yapf", "yapf 0.21.0")
|
||||
|
||||
def get_lint_version_cmd_args(self):
|
||||
# type: () -> List[str]
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
'''Helper script for constructing an archive (zip or tar) from a list of files.
|
||||
|
||||
The output format (tar, tgz, zip) is determined from the file name, unless the user specifies
|
||||
|
|
@ -35,6 +34,7 @@ import zipfile
|
|||
import tempfile
|
||||
from subprocess import (Popen, PIPE, STDOUT)
|
||||
|
||||
|
||||
def main(argv):
|
||||
args = []
|
||||
for arg in argv[1:]:
|
||||
|
|
@ -54,6 +54,7 @@ def main(argv):
|
|||
else:
|
||||
raise ValueError('Unsupported archive format "%s"' % opts.archive_format)
|
||||
|
||||
|
||||
def delete_directory(dir):
|
||||
'''Recursively deletes a directory and its contents.
|
||||
'''
|
||||
|
|
@ -62,6 +63,7 @@ def delete_directory(dir):
|
|||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def make_tar_archive(opts):
|
||||
'''Given the parsed options, generates the 'opt.output_filename'
|
||||
tarball containing all the files in 'opt.input_filename' renamed
|
||||
|
|
@ -81,10 +83,7 @@ def make_tar_archive(opts):
|
|||
tar_options += "z"
|
||||
|
||||
# clean and create a temp directory to copy files to
|
||||
enclosing_archive_directory = tempfile.mkdtemp(
|
||||
prefix='archive_',
|
||||
dir=os.path.abspath('build')
|
||||
)
|
||||
enclosing_archive_directory = tempfile.mkdtemp(prefix='archive_', dir=os.path.abspath('build'))
|
||||
output_tarfile = os.path.join(os.getcwd(), opts.output_filename)
|
||||
|
||||
tar_command = ["tar", tar_options, output_tarfile]
|
||||
|
|
@ -111,6 +110,7 @@ def make_tar_archive(opts):
|
|||
# delete temp directory
|
||||
delete_directory(enclosing_archive_directory)
|
||||
|
||||
|
||||
def make_zip_archive(opts):
|
||||
'''Given the parsed options, generates the 'opt.output_filename'
|
||||
zipfile containing all the files in 'opt.input_filename' renamed
|
||||
|
|
@ -122,8 +122,8 @@ def make_zip_archive(opts):
|
|||
archive = open_zip_archive_for_write(opts.output_filename)
|
||||
try:
|
||||
for input_filename in opts.input_filenames:
|
||||
archive.add(input_filename, arcname=get_preferred_filename(input_filename,
|
||||
opts.transformations))
|
||||
archive.add(input_filename, arcname=get_preferred_filename(
|
||||
input_filename, opts.transformations))
|
||||
finally:
|
||||
archive.close()
|
||||
|
||||
|
|
@ -132,10 +132,10 @@ def parse_options(args):
|
|||
parser = optparse.OptionParser()
|
||||
parser.add_option('-o', dest='output_filename', default=None,
|
||||
help='Name of the archive to output.', metavar='FILE')
|
||||
parser.add_option('--format', dest='archive_format', default=None,
|
||||
choices=('zip', 'tar', 'tgz'),
|
||||
help='Format of archive to create. '
|
||||
'If omitted, use the suffix of the output filename to decide.')
|
||||
parser.add_option('--format', dest='archive_format', default=None, choices=('zip', 'tar',
|
||||
'tgz'),
|
||||
help=('Format of archive to create. '
|
||||
'If omitted, use the suffix of the output filename to decide.'))
|
||||
parser.add_option('--transform', action='append', dest='transformations', default=[])
|
||||
|
||||
(opts, input_filenames) = parser.parse_args(args)
|
||||
|
|
@ -158,28 +158,33 @@ def parse_options(args):
|
|||
elif opts.output_filename.endswith('.tar'):
|
||||
opts.archive_format = 'tar'
|
||||
else:
|
||||
parser.error('Could not deduce archive format from output filename "%s"' %
|
||||
opts.output_filename)
|
||||
parser.error(
|
||||
'Could not deduce archive format from output filename "%s"' % opts.output_filename)
|
||||
|
||||
try:
|
||||
opts.transformations = [
|
||||
xform.replace(os.path.altsep or os.path.sep, os.path.sep).split('=', 1)
|
||||
for xform in opts.transformations]
|
||||
for xform in opts.transformations
|
||||
]
|
||||
except Exception, e:
|
||||
parser.error(e)
|
||||
|
||||
return opts
|
||||
|
||||
|
||||
def open_zip_archive_for_write(filename):
|
||||
'''Open a zip archive for writing and return it.
|
||||
'''
|
||||
|
||||
# Infuriatingly, Zipfile calls the "add" method "write", but they're otherwise identical,
|
||||
# for our purposes. WrappedZipFile is a minimal adapter class.
|
||||
class WrappedZipFile(zipfile.ZipFile):
|
||||
def add(self, filename, arcname):
|
||||
return self.write(filename, arcname)
|
||||
|
||||
return WrappedZipFile(filename, 'w', zipfile.ZIP_DEFLATED)
|
||||
|
||||
|
||||
def get_preferred_filename(input_filename, transformations):
|
||||
'''Does a prefix subsitution on 'input_filename' for the
|
||||
first matching transformation in 'transformations' and
|
||||
|
|
@ -192,6 +197,7 @@ def get_preferred_filename(input_filename, transformations):
|
|||
return replace + input_filename[len(match):]
|
||||
return input_filename
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv)
|
||||
sys.exit(0)
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ VCXPROJ_FOOTER = r"""
|
|||
</Project>
|
||||
"""
|
||||
|
||||
|
||||
def get_defines(args):
|
||||
"""Parse a compiler argument list looking for defines"""
|
||||
ret = set()
|
||||
|
|
@ -41,6 +42,7 @@ def get_defines(args):
|
|||
ret.add(arg[2:])
|
||||
return ret
|
||||
|
||||
|
||||
def get_includes(args):
|
||||
"""Parse a compiler argument list looking for includes"""
|
||||
ret = set()
|
||||
|
|
@ -49,8 +51,10 @@ def get_includes(args):
|
|||
ret.add(arg[2:])
|
||||
return ret
|
||||
|
||||
|
||||
class ProjFileGenerator(object):
|
||||
"""Generate a .vcxproj and .vcxprof.filters file"""
|
||||
|
||||
def __init__(self, target):
|
||||
# we handle DEBUG in the vcxproj header:
|
||||
self.common_defines = set()
|
||||
|
|
@ -75,8 +79,8 @@ class ProjFileGenerator(object):
|
|||
with open('buildscripts/vcxproj.header', 'r') as header_file:
|
||||
header_str = header_file.read()
|
||||
header_str = header_str.replace("%_TARGET_%", self.target)
|
||||
header_str = header_str.replace("%AdditionalIncludeDirectories%",
|
||||
';'.join(sorted(self.includes)))
|
||||
header_str = header_str.replace("%AdditionalIncludeDirectories%", ';'.join(
|
||||
sorted(self.includes)))
|
||||
self.vcxproj.write(header_str)
|
||||
|
||||
common_defines = self.all_defines
|
||||
|
|
@ -84,19 +88,18 @@ class ProjFileGenerator(object):
|
|||
common_defines = common_defines.intersection(c['defines'])
|
||||
|
||||
self.vcxproj.write("<!-- common_defines -->\n")
|
||||
self.vcxproj.write("<ItemDefinitionGroup><ClCompile><PreprocessorDefinitions>"
|
||||
+ ';'.join(common_defines) + ";%(PreprocessorDefinitions)\n")
|
||||
self.vcxproj.write("<ItemDefinitionGroup><ClCompile><PreprocessorDefinitions>" +
|
||||
';'.join(common_defines) + ";%(PreprocessorDefinitions)\n")
|
||||
self.vcxproj.write("</PreprocessorDefinitions></ClCompile></ItemDefinitionGroup>\n")
|
||||
|
||||
self.vcxproj.write(" <ItemGroup>\n")
|
||||
for command in self.compiles:
|
||||
defines = command["defines"].difference(common_defines)
|
||||
if len(defines) > 0:
|
||||
self.vcxproj.write(" <ClCompile Include=\"" + command["file"] +
|
||||
"\"><PreprocessorDefinitions>" +
|
||||
';'.join(defines) +
|
||||
";%(PreprocessorDefinitions)" +
|
||||
"</PreprocessorDefinitions></ClCompile>\n")
|
||||
self.vcxproj.write(
|
||||
" <ClCompile Include=\"" + command["file"] + "\"><PreprocessorDefinitions>" +
|
||||
';'.join(defines) + ";%(PreprocessorDefinitions)" +
|
||||
"</PreprocessorDefinitions></ClCompile>\n")
|
||||
else:
|
||||
self.vcxproj.write(" <ClCompile Include=\"" + command["file"] + "\" />\n")
|
||||
self.vcxproj.write(" </ItemGroup>\n")
|
||||
|
|
@ -141,7 +144,7 @@ class ProjFileGenerator(object):
|
|||
for arg in get_includes(args):
|
||||
self.includes.add(arg)
|
||||
|
||||
self.compiles.append({"file" : file_name, "defines" : file_defines})
|
||||
self.compiles.append({"file": file_name, "defines": file_defines})
|
||||
|
||||
def __is_header(self, name):
|
||||
"""Is this a header file?"""
|
||||
|
|
@ -167,7 +170,7 @@ class ProjFileGenerator(object):
|
|||
for directory in dirs:
|
||||
if not os.path.exists(directory):
|
||||
print(("Warning: skipping include file scan for directory '%s'" +
|
||||
" because it does not exist.") % str(directory))
|
||||
" because it does not exist.") % str(directory))
|
||||
continue
|
||||
|
||||
# Get all the header files
|
||||
|
|
@ -239,6 +242,7 @@ class ProjFileGenerator(object):
|
|||
self.vcxproj.write(" <None Include='%s' />\n" % file_name)
|
||||
self.vcxproj.write(" </ItemGroup>\n")
|
||||
|
||||
|
||||
def main():
|
||||
if len(sys.argv) != 2:
|
||||
print r"Usage: python buildscripts\make_vcxproj.py FILE_NAME"
|
||||
|
|
@ -253,4 +257,5 @@ def main():
|
|||
command_str = command["command"]
|
||||
projfile.parse_line(command_str)
|
||||
|
||||
|
||||
main()
|
||||
|
|
|
|||
|
|
@ -26,12 +26,13 @@ MongoDB SConscript files do.
|
|||
from __future__ import print_function
|
||||
|
||||
__all__ = ('discover_modules', 'discover_module_directories', 'configure_modules',
|
||||
'register_module_test')
|
||||
'register_module_test')
|
||||
|
||||
import imp
|
||||
import inspect
|
||||
import os
|
||||
|
||||
|
||||
def discover_modules(module_root, allowed_modules):
|
||||
"""Scans module_root for subdirectories that look like MongoDB modules.
|
||||
|
||||
|
|
@ -71,6 +72,7 @@ def discover_modules(module_root, allowed_modules):
|
|||
|
||||
return found_modules
|
||||
|
||||
|
||||
def discover_module_directories(module_root, allowed_modules):
|
||||
"""Scans module_root for subdirectories that look like MongoDB modules.
|
||||
|
||||
|
|
@ -101,6 +103,7 @@ def discover_module_directories(module_root, allowed_modules):
|
|||
|
||||
return found_modules
|
||||
|
||||
|
||||
def configure_modules(modules, conf):
|
||||
""" Run the configure() function in the build.py python modules for each module in "modules"
|
||||
(as created by discover_modules).
|
||||
|
|
@ -114,6 +117,7 @@ def configure_modules(modules, conf):
|
|||
root = os.path.dirname(module.__file__)
|
||||
module.configure(conf, conf.env)
|
||||
|
||||
|
||||
def get_module_sconscripts(modules):
|
||||
sconscripts = []
|
||||
for m in modules:
|
||||
|
|
@ -121,6 +125,7 @@ def get_module_sconscripts(modules):
|
|||
sconscripts.append(os.path.join(module_dir_path, 'SConscript'))
|
||||
return sconscripts
|
||||
|
||||
|
||||
def __get_src_relative_path(path):
|
||||
"""Return a path relative to ./src.
|
||||
|
||||
|
|
@ -135,6 +140,7 @@ def __get_src_relative_path(path):
|
|||
result = path[len(src_dir) + 1:]
|
||||
return result
|
||||
|
||||
|
||||
def __get_module_path(module_frame_depth):
|
||||
"""Return the path to the MongoDB module whose build.py is executing "module_frame_depth" frames
|
||||
above this function, relative to the "src" directory.
|
||||
|
|
@ -142,6 +148,7 @@ def __get_module_path(module_frame_depth):
|
|||
module_filename = inspect.stack()[module_frame_depth + 1][1]
|
||||
return os.path.dirname(__get_src_relative_path(module_filename))
|
||||
|
||||
|
||||
def __get_module_src_path(module_frame_depth):
|
||||
"""Return the path relative to the SConstruct file of the MongoDB module's source tree.
|
||||
|
||||
|
|
@ -150,6 +157,7 @@ def __get_module_src_path(module_frame_depth):
|
|||
"""
|
||||
return os.path.join('src', __get_module_path(module_frame_depth + 1))
|
||||
|
||||
|
||||
def __get_module_build_path(module_frame_depth):
|
||||
"""Return the path relative to the SConstruct file of the MongoDB module's build tree.
|
||||
|
||||
|
|
@ -158,6 +166,7 @@ def __get_module_build_path(module_frame_depth):
|
|||
"""
|
||||
return os.path.join('$BUILD_DIR', __get_module_path(module_frame_depth + 1))
|
||||
|
||||
|
||||
def get_current_module_src_path():
|
||||
"""Return the path relative to the SConstruct file of the current MongoDB module's source tree.
|
||||
|
||||
|
|
@ -165,6 +174,7 @@ def get_current_module_src_path():
|
|||
"""
|
||||
return __get_module_src_path(1)
|
||||
|
||||
|
||||
def get_current_module_build_path():
|
||||
"""Return the path relative to the SConstruct file of the current MongoDB module's build tree.
|
||||
|
||||
|
|
@ -173,6 +183,7 @@ def get_current_module_build_path():
|
|||
|
||||
return __get_module_build_path(1)
|
||||
|
||||
|
||||
def get_current_module_libdep_name(libdep_rel_path):
|
||||
"""Return a $BUILD_DIR relative path to a "libdep_rel_path", where "libdep_rel_path"
|
||||
is specified relative to the MongoDB module's build.py file.
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ import os
|
|||
import subprocess
|
||||
import sys
|
||||
|
||||
|
||||
def symbolize_frames(trace_doc, dbg_path_resolver, symbolizer_path=None, dsym_hint=None):
|
||||
"""Given a trace_doc in MongoDB stack dump format, returns a list of symbolized stack frames.
|
||||
"""
|
||||
|
|
@ -36,7 +37,7 @@ def symbolize_frames(trace_doc, dbg_path_resolver, symbolizer_path=None, dsym_hi
|
|||
"""Makes a map from binary load address to description of library from the somap, which is
|
||||
a list of dictionaries describing individual loaded libraries.
|
||||
"""
|
||||
return { so_entry["b"] : so_entry for so_entry in somap_list if so_entry.has_key("b") }
|
||||
return {so_entry["b"]: so_entry for so_entry in somap_list if so_entry.has_key("b")}
|
||||
|
||||
base_addr_map = make_base_addr_map(trace_doc["processInfo"]["somap"])
|
||||
|
||||
|
|
@ -57,21 +58,17 @@ def symbolize_frames(trace_doc, dbg_path_resolver, symbolizer_path=None, dsym_hi
|
|||
# address of instructions that cause signals (such as segfaults and divide-by-zero) which
|
||||
# are already correct, but there doesn't seem to be a reliable way to detect that case.
|
||||
addr -= 1
|
||||
frames.append(dict(path=dbg_path_resolver.get_dbg_file(soinfo),
|
||||
buildId=soinfo.get("buildId", None),
|
||||
offset=frame["o"],
|
||||
addr=addr,
|
||||
symbol=frame.get("s", None)))
|
||||
frames.append(
|
||||
dict(
|
||||
path=dbg_path_resolver.get_dbg_file(soinfo), buildId=soinfo.get("buildId", None),
|
||||
offset=frame["o"], addr=addr, symbol=frame.get("s", None)))
|
||||
|
||||
symbolizer_args = [symbolizer_path]
|
||||
for dh in dsym_hint:
|
||||
symbolizer_args.append("-dsym-hint=%s" %dh)
|
||||
symbolizer_process = subprocess.Popen(
|
||||
args=symbolizer_args,
|
||||
close_fds=True,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=open("/dev/null"))
|
||||
symbolizer_args.append("-dsym-hint=%s" % dh)
|
||||
symbolizer_process = subprocess.Popen(args=symbolizer_args, close_fds=True,
|
||||
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
|
||||
stderr=open("/dev/null"))
|
||||
|
||||
def extract_symbols(stdin):
|
||||
"""Extracts symbol information from the output of llvm-symbolizer.
|
||||
|
|
@ -93,7 +90,7 @@ def symbolize_frames(trace_doc, dbg_path_resolver, symbolizer_path=None, dsym_hi
|
|||
if line == "\n":
|
||||
break
|
||||
if step == 0:
|
||||
result.append({"fn" : line.strip()})
|
||||
result.append({"fn": line.strip()})
|
||||
step = 1
|
||||
else:
|
||||
file_name, line, column = line.strip().rsplit(':', 3)
|
||||
|
|
@ -111,6 +108,7 @@ def symbolize_frames(trace_doc, dbg_path_resolver, symbolizer_path=None, dsym_hi
|
|||
symbolizer_process.wait()
|
||||
return frames
|
||||
|
||||
|
||||
class path_dbg_file_resolver(object):
|
||||
def __init__(self, bin_path_guess):
|
||||
self._bin_path_guess = bin_path_guess
|
||||
|
|
@ -118,6 +116,7 @@ class path_dbg_file_resolver(object):
|
|||
def get_dbg_file(self, soinfo):
|
||||
return soinfo.get("path", self._bin_path_guess)
|
||||
|
||||
|
||||
class s3_buildid_dbg_file_resolver(object):
|
||||
def __init__(self, cache_dir, s3_bucket):
|
||||
self._cache_dir = cache_dir
|
||||
|
|
@ -134,7 +133,7 @@ class s3_buildid_dbg_file_resolver(object):
|
|||
self._get_from_s3(buildId)
|
||||
except:
|
||||
ex = sys.exc_info()[0]
|
||||
sys.stderr.write("Failed to find debug symbols for %s in s3: %s\n" %(buildId, ex))
|
||||
sys.stderr.write("Failed to find debug symbols for %s in s3: %s\n" % (buildId, ex))
|
||||
return None
|
||||
if not os.path.exists(buildIdPath):
|
||||
return None
|
||||
|
|
@ -142,10 +141,11 @@ class s3_buildid_dbg_file_resolver(object):
|
|||
|
||||
def _get_from_s3(self, buildId):
|
||||
subprocess.check_call(
|
||||
['wget', 'https://s3.amazonaws.com/%s/%s.debug.gz' % (self._s3_bucket, buildId)],
|
||||
cwd=self._cache_dir)
|
||||
['wget', 'https://s3.amazonaws.com/%s/%s.debug.gz' %
|
||||
(self._s3_bucket, buildId)], cwd=self._cache_dir)
|
||||
subprocess.check_call(['gunzip', buildId + ".debug.gz"], cwd=self._cache_dir)
|
||||
|
||||
|
||||
def classic_output(frames, outfile, **kwargs):
|
||||
for frame in frames:
|
||||
symbinfo = frame["symbinfo"]
|
||||
|
|
@ -155,6 +155,7 @@ def classic_output(frames, outfile, **kwargs):
|
|||
else:
|
||||
outfile.write(" %(path)s!!!\n" % symbinfo)
|
||||
|
||||
|
||||
def main(argv):
|
||||
parser = optparse.OptionParser()
|
||||
parser.add_option("--dsym-hint", action="append", dest="dsym_hint")
|
||||
|
|
@ -173,7 +174,6 @@ def main(argv):
|
|||
sys.stderr.write("Invalid output-format argument: %s\n" % options.output_format)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# Skip over everything before the first '{' since it is likely to be log line prefixes.
|
||||
# Additionally, using raw_decode() to ignore extra data after the closing '}' to allow maximal
|
||||
# sloppiness in copy-pasting input.
|
||||
|
|
@ -182,12 +182,11 @@ def main(argv):
|
|||
trace_doc = json.JSONDecoder().raw_decode(trace_doc)[0]
|
||||
|
||||
resolver = resolver_constructor(*args[1:])
|
||||
frames = symbolize_frames(trace_doc,
|
||||
resolver,
|
||||
symbolizer_path=options.symbolizer_path,
|
||||
frames = symbolize_frames(trace_doc, resolver, symbolizer_path=options.symbolizer_path,
|
||||
dsym_hint=options.dsym_hint)
|
||||
output_fn(frames, sys.stdout, indent=2)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main(sys.argv)
|
||||
sys.exit(0)
|
||||
|
|
|
|||
|
|
@ -1,43 +1,52 @@
|
|||
"""Script to fix up our MSI files """
|
||||
|
||||
import argparse;
|
||||
import msilib
|
||||
import shutil;
|
||||
|
||||
parser = argparse.ArgumentParser(description='Trim MSI.')
|
||||
parser.add_argument('file', type=argparse.FileType('r'), help='file to trim')
|
||||
parser.add_argument('out', type=argparse.FileType('w'), help='file to output to')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
def exec_delete(query):
|
||||
view = db.OpenView(query)
|
||||
view.Execute(None)
|
||||
|
||||
cur_record = view.Fetch()
|
||||
view.Modify(msilib.MSIMODIFY_DELETE, cur_record)
|
||||
view.Close()
|
||||
|
||||
|
||||
def exec_update(query, column, value):
|
||||
view = db.OpenView(query)
|
||||
view.Execute(None)
|
||||
|
||||
cur_record = view.Fetch()
|
||||
cur_record.SetString(column, value)
|
||||
view.Modify(msilib.MSIMODIFY_REPLACE, cur_record)
|
||||
view.Close()
|
||||
|
||||
|
||||
print "Trimming MSI"
|
||||
|
||||
db = msilib.OpenDatabase(args.file.name, msilib.MSIDBOPEN_DIRECT)
|
||||
|
||||
exec_delete("select * from ControlEvent WHERE Dialog_ = 'LicenseAgreementDlg' AND Control_ = 'Next' AND Event = 'NewDialog' AND Argument = 'CustomizeDlg'")
|
||||
exec_delete("select * from ControlEvent WHERE Dialog_ = 'CustomizeDlg' AND Control_ = 'Back' AND Event = 'NewDialog' AND Argument = 'LicenseAgreementDlg'")
|
||||
exec_delete("select * from ControlEvent WHERE Dialog_ = 'CustomizeDlg' AND Control_ = 'Next' AND Event = 'NewDialog' AND Argument = 'VerifyReadyDlg'")
|
||||
exec_delete("select * from ControlEvent WHERE Dialog_ = 'VerifyReadyDlg' AND Control_ = 'Back' AND Event = 'NewDialog' AND Argument = 'CustomizeDlg'")
|
||||
|
||||
db.Commit()
|
||||
|
||||
shutil.copyfile(args.file.name, args.out.name);
|
||||
"""Script to fix up our MSI files """
|
||||
|
||||
import argparse
|
||||
import msilib
|
||||
import shutil
|
||||
|
||||
parser = argparse.ArgumentParser(description='Trim MSI.')
|
||||
parser.add_argument('file', type=argparse.FileType('r'), help='file to trim')
|
||||
parser.add_argument('out', type=argparse.FileType('w'), help='file to output to')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
|
||||
def exec_delete(query):
|
||||
view = db.OpenView(query)
|
||||
view.Execute(None)
|
||||
|
||||
cur_record = view.Fetch()
|
||||
view.Modify(msilib.MSIMODIFY_DELETE, cur_record)
|
||||
view.Close()
|
||||
|
||||
|
||||
def exec_update(query, column, value):
|
||||
view = db.OpenView(query)
|
||||
view.Execute(None)
|
||||
|
||||
cur_record = view.Fetch()
|
||||
cur_record.SetString(column, value)
|
||||
view.Modify(msilib.MSIMODIFY_REPLACE, cur_record)
|
||||
view.Close()
|
||||
|
||||
|
||||
print "Trimming MSI"
|
||||
|
||||
db = msilib.OpenDatabase(args.file.name, msilib.MSIDBOPEN_DIRECT)
|
||||
|
||||
exec_delete(
|
||||
"select * from ControlEvent WHERE Dialog_ = 'LicenseAgreementDlg' AND Control_ = 'Next' AND Event = 'NewDialog' AND Argument = 'CustomizeDlg'"
|
||||
)
|
||||
exec_delete(
|
||||
"select * from ControlEvent WHERE Dialog_ = 'CustomizeDlg' AND Control_ = 'Back' AND Event = 'NewDialog' AND Argument = 'LicenseAgreementDlg'"
|
||||
)
|
||||
exec_delete(
|
||||
"select * from ControlEvent WHERE Dialog_ = 'CustomizeDlg' AND Control_ = 'Next' AND Event = 'NewDialog' AND Argument = 'VerifyReadyDlg'"
|
||||
)
|
||||
exec_delete(
|
||||
"select * from ControlEvent WHERE Dialog_ = 'VerifyReadyDlg' AND Control_ = 'Back' AND Event = 'NewDialog' AND Argument = 'CustomizeDlg'"
|
||||
)
|
||||
|
||||
db.Commit()
|
||||
|
||||
shutil.copyfile(args.file.name, args.out.name)
|
||||
|
|
|
|||
|
|
@ -42,15 +42,15 @@ import time
|
|||
import urlparse
|
||||
|
||||
# The MongoDB names for the architectures we support.
|
||||
ARCH_CHOICES=["x86_64", "ppc64le", "s390x", "arm64"]
|
||||
ARCH_CHOICES = ["x86_64", "ppc64le", "s390x", "arm64"]
|
||||
|
||||
# Made up names for the flavors of distribution we package for.
|
||||
DISTROS=["suse", "debian","redhat","ubuntu","amazon"]
|
||||
DISTROS = ["suse", "debian", "redhat", "ubuntu", "amazon"]
|
||||
|
||||
|
||||
class EnterpriseSpec(packager.Spec):
|
||||
def suffix(self):
|
||||
return "-enterprise" if int(self.ver.split(".")[1])%2==0 else "-enterprise-unstable"
|
||||
return "-enterprise" if int(self.ver.split(".")[1]) % 2 == 0 else "-enterprise-unstable"
|
||||
|
||||
|
||||
class EnterpriseDistro(packager.Distro):
|
||||
|
|
@ -92,16 +92,20 @@ class EnterpriseDistro(packager.Distro):
|
|||
repo_directory = ""
|
||||
|
||||
if spec.is_pre_release():
|
||||
repo_directory = "testing"
|
||||
repo_directory = "testing"
|
||||
else:
|
||||
repo_directory = spec.branch()
|
||||
repo_directory = spec.branch()
|
||||
|
||||
if re.search("^(debian|ubuntu)", self.n):
|
||||
return "repo/apt/%s/dists/%s/mongodb-enterprise/%s/%s/binary-%s/" % (self.n, self.repo_os_version(build_os), repo_directory, self.repo_component(), self.archname(arch))
|
||||
return "repo/apt/%s/dists/%s/mongodb-enterprise/%s/%s/binary-%s/" % (
|
||||
self.n, self.repo_os_version(build_os), repo_directory, self.repo_component(),
|
||||
self.archname(arch))
|
||||
elif re.search("(redhat|fedora|centos|amazon)", self.n):
|
||||
return "repo/yum/%s/%s/mongodb-enterprise/%s/%s/RPMS/" % (self.n, self.repo_os_version(build_os), repo_directory, self.archname(arch))
|
||||
return "repo/yum/%s/%s/mongodb-enterprise/%s/%s/RPMS/" % (
|
||||
self.n, self.repo_os_version(build_os), repo_directory, self.archname(arch))
|
||||
elif re.search("(suse)", self.n):
|
||||
return "repo/zypper/%s/%s/mongodb-enterprise/%s/%s/RPMS/" % (self.n, self.repo_os_version(build_os), repo_directory, self.archname(arch))
|
||||
return "repo/zypper/%s/%s/mongodb-enterprise/%s/%s/RPMS/" % (
|
||||
self.n, self.repo_os_version(build_os), repo_directory, self.archname(arch))
|
||||
else:
|
||||
raise Exception("BUG: unsupported platform?")
|
||||
|
||||
|
|
@ -111,80 +115,83 @@ class EnterpriseDistro(packager.Distro):
|
|||
"""
|
||||
if arch == "ppc64le":
|
||||
if self.n == 'ubuntu':
|
||||
return [ "ubuntu1604" ]
|
||||
return ["ubuntu1604"]
|
||||
if self.n == 'redhat':
|
||||
return [ "rhel71" ]
|
||||
return ["rhel71"]
|
||||
else:
|
||||
return []
|
||||
if arch == "s390x":
|
||||
if self.n == 'redhat':
|
||||
return [ "rhel67", "rhel72" ]
|
||||
return ["rhel67", "rhel72"]
|
||||
if self.n == 'suse':
|
||||
return [ "suse11", "suse12" ]
|
||||
return ["suse11", "suse12"]
|
||||
if self.n == 'ubuntu':
|
||||
return [ "ubuntu1604" ]
|
||||
return ["ubuntu1604"]
|
||||
else:
|
||||
return []
|
||||
if arch == "arm64":
|
||||
if self.n == 'ubuntu':
|
||||
return [ "ubuntu1604" ]
|
||||
return ["ubuntu1604"]
|
||||
else:
|
||||
return []
|
||||
|
||||
if re.search("(redhat|fedora|centos)", self.n):
|
||||
return [ "rhel70", "rhel62", "rhel57" ]
|
||||
return ["rhel70", "rhel62", "rhel57"]
|
||||
else:
|
||||
return super(EnterpriseDistro, self).build_os(arch)
|
||||
|
||||
|
||||
def main(argv):
|
||||
|
||||
distros=[EnterpriseDistro(distro) for distro in DISTROS]
|
||||
distros = [EnterpriseDistro(distro) for distro in DISTROS]
|
||||
|
||||
args = packager.get_args(distros, ARCH_CHOICES)
|
||||
|
||||
spec = EnterpriseSpec(args.server_version, args.metadata_gitspec, args.release_number)
|
||||
|
||||
oldcwd=os.getcwd()
|
||||
srcdir=oldcwd+"/../"
|
||||
oldcwd = os.getcwd()
|
||||
srcdir = oldcwd + "/../"
|
||||
|
||||
# Where to do all of our work. Use a randomly-created directory if one
|
||||
# is not passed in.
|
||||
prefix = args.prefix
|
||||
if prefix is None:
|
||||
prefix=tempfile.mkdtemp()
|
||||
prefix = tempfile.mkdtemp()
|
||||
|
||||
print "Working in directory %s" % prefix
|
||||
|
||||
os.chdir(prefix)
|
||||
try:
|
||||
made_pkg = False
|
||||
# Build a package for each distro/spec/arch tuple, and
|
||||
# accumulate the repository-layout directories.
|
||||
for (distro, arch) in packager.crossproduct(distros, args.arches):
|
||||
made_pkg = False
|
||||
# Build a package for each distro/spec/arch tuple, and
|
||||
# accumulate the repository-layout directories.
|
||||
for (distro, arch) in packager.crossproduct(distros, args.arches):
|
||||
|
||||
for build_os in distro.build_os(arch):
|
||||
if build_os in args.distros or not args.distros:
|
||||
for build_os in distro.build_os(arch):
|
||||
if build_os in args.distros or not args.distros:
|
||||
|
||||
filename = tarfile(build_os, arch, spec)
|
||||
packager.ensure_dir(filename)
|
||||
shutil.copyfile(args.tarball, filename)
|
||||
filename = tarfile(build_os, arch, spec)
|
||||
packager.ensure_dir(filename)
|
||||
shutil.copyfile(args.tarball, filename)
|
||||
|
||||
repo = make_package(distro, build_os, arch, spec, srcdir)
|
||||
make_repo(repo, distro, build_os, spec)
|
||||
repo = make_package(distro, build_os, arch, spec, srcdir)
|
||||
make_repo(repo, distro, build_os, spec)
|
||||
|
||||
made_pkg = True
|
||||
made_pkg = True
|
||||
|
||||
if not made_pkg:
|
||||
raise Exception("No valid combination of distro and arch selected")
|
||||
if not made_pkg:
|
||||
raise Exception("No valid combination of distro and arch selected")
|
||||
|
||||
finally:
|
||||
os.chdir(oldcwd)
|
||||
|
||||
|
||||
def tarfile(build_os, arch, spec):
|
||||
"""Return the location where we store the downloaded tarball for
|
||||
this package"""
|
||||
return "dl/mongodb-linux-%s-enterprise-%s-%s.tar.gz" % (spec.version(), build_os, arch)
|
||||
|
||||
|
||||
def setupdir(distro, build_os, arch, spec):
|
||||
# The setupdir will be a directory containing all inputs to the
|
||||
# distro's packaging tools (e.g., package metadata files, init
|
||||
|
|
@ -192,11 +199,13 @@ def setupdir(distro, build_os, arch, spec):
|
|||
# the following format string is unclear, an example setupdir
|
||||
# would be dst/x86_64/debian-sysvinit/wheezy/mongodb-org-unstable/
|
||||
# or dst/x86_64/redhat/rhel57/mongodb-org-unstable/
|
||||
return "dst/%s/%s/%s/%s%s-%s/" % (arch, distro.name(), build_os, distro.pkgbase(), spec.suffix(), spec.pversion(distro))
|
||||
return "dst/%s/%s/%s/%s%s-%s/" % (arch, distro.name(), build_os, distro.pkgbase(),
|
||||
spec.suffix(), spec.pversion(distro))
|
||||
|
||||
|
||||
def unpack_binaries_into(build_os, arch, spec, where):
|
||||
"""Unpack the tarfile for (build_os, arch, spec) into directory where."""
|
||||
rootdir=os.getcwd()
|
||||
rootdir = os.getcwd()
|
||||
packager.ensure_dir(where)
|
||||
# Note: POSIX tar doesn't require support for gtar's "-C" option,
|
||||
# and Python's tarfile module prior to Python 2.7 doesn't have the
|
||||
|
|
@ -204,23 +213,24 @@ def unpack_binaries_into(build_os, arch, spec, where):
|
|||
# thing and chdir into where and run tar there.
|
||||
os.chdir(where)
|
||||
try:
|
||||
packager.sysassert(["tar", "xvzf", rootdir+"/"+tarfile(build_os, arch, spec)])
|
||||
release_dir = glob('mongodb-linux-*')[0]
|
||||
packager.sysassert(["tar", "xvzf", rootdir + "/" + tarfile(build_os, arch, spec)])
|
||||
release_dir = glob('mongodb-linux-*')[0]
|
||||
for releasefile in "bin", "snmp", "LICENSE.txt", "README", "THIRD-PARTY-NOTICES", "MPL-2":
|
||||
os.rename("%s/%s" % (release_dir, releasefile), releasefile)
|
||||
os.rmdir(release_dir)
|
||||
except Exception:
|
||||
exc=sys.exc_value
|
||||
exc = sys.exc_value
|
||||
os.chdir(rootdir)
|
||||
raise exc
|
||||
os.chdir(rootdir)
|
||||
|
||||
|
||||
def make_package(distro, build_os, arch, spec, srcdir):
|
||||
"""Construct the package for (arch, distro, spec), getting
|
||||
packaging files from srcdir and any user-specified suffix from
|
||||
suffixes"""
|
||||
|
||||
sdir=setupdir(distro, build_os, arch, spec)
|
||||
sdir = setupdir(distro, build_os, arch, spec)
|
||||
packager.ensure_dir(sdir)
|
||||
# Note that the RPM packages get their man pages from the debian
|
||||
# directory, so the debian directory is needed in all cases (and
|
||||
|
|
@ -228,7 +238,11 @@ def make_package(distro, build_os, arch, spec, srcdir):
|
|||
for pkgdir in ["debian", "rpm"]:
|
||||
print "Copying packaging files from %s to %s" % ("%s/%s" % (srcdir, pkgdir), sdir)
|
||||
# FIXME: sh-dash-cee is bad. See if tarfile can do this.
|
||||
packager.sysassert(["sh", "-c", "(cd \"%s\" && git archive %s %s/ ) | (cd \"%s\" && tar xvf -)" % (srcdir, spec.metadata_gitspec(), pkgdir, sdir)])
|
||||
packager.sysassert([
|
||||
"sh", "-c",
|
||||
"(cd \"%s\" && git archive %s %s/ ) | (cd \"%s\" && tar xvf -)" %
|
||||
(srcdir, spec.metadata_gitspec(), pkgdir, sdir)
|
||||
])
|
||||
# Splat the binaries and snmp files under sdir. The "build" stages of the
|
||||
# packaging infrastructure will move the files to wherever they
|
||||
# need to go.
|
||||
|
|
@ -236,9 +250,10 @@ def make_package(distro, build_os, arch, spec, srcdir):
|
|||
# Remove the mongoreplay binary due to libpcap dynamic
|
||||
# linkage.
|
||||
if os.path.exists(sdir + "bin/mongoreplay"):
|
||||
os.unlink(sdir + "bin/mongoreplay")
|
||||
os.unlink(sdir + "bin/mongoreplay")
|
||||
return distro.make_pkg(build_os, arch, spec, srcdir)
|
||||
|
||||
|
||||
def make_repo(repodir, distro, build_os, spec):
|
||||
if re.search("(debian|ubuntu)", repodir):
|
||||
make_deb_repo(repodir, distro, build_os, spec)
|
||||
|
|
@ -247,26 +262,30 @@ def make_repo(repodir, distro, build_os, spec):
|
|||
else:
|
||||
raise Exception("BUG: unsupported platform?")
|
||||
|
||||
|
||||
def make_deb_repo(repo, distro, build_os, spec):
|
||||
# Note: the Debian repository Packages files must be generated
|
||||
# very carefully in order to be usable.
|
||||
oldpwd=os.getcwd()
|
||||
os.chdir(repo+"../../../../../../")
|
||||
oldpwd = os.getcwd()
|
||||
os.chdir(repo + "../../../../../../")
|
||||
try:
|
||||
dirs=set([os.path.dirname(deb)[2:] for deb in packager.backtick(["find", ".", "-name", "*.deb"]).split()])
|
||||
dirs = set([
|
||||
os.path.dirname(deb)[2:]
|
||||
for deb in packager.backtick(["find", ".", "-name", "*.deb"]).split()
|
||||
])
|
||||
for d in dirs:
|
||||
s=packager.backtick(["dpkg-scanpackages", d, "/dev/null"])
|
||||
with open(d+"/Packages", "w") as f:
|
||||
s = packager.backtick(["dpkg-scanpackages", d, "/dev/null"])
|
||||
with open(d + "/Packages", "w") as f:
|
||||
f.write(s)
|
||||
b=packager.backtick(["gzip", "-9c", d+"/Packages"])
|
||||
with open(d+"/Packages.gz", "wb") as f:
|
||||
b = packager.backtick(["gzip", "-9c", d + "/Packages"])
|
||||
with open(d + "/Packages.gz", "wb") as f:
|
||||
f.write(b)
|
||||
finally:
|
||||
os.chdir(oldpwd)
|
||||
# Notes: the Release{,.gpg} files must live in a special place,
|
||||
# and must be created after all the Packages.gz files have been
|
||||
# done.
|
||||
s="""Origin: mongodb
|
||||
s = """Origin: mongodb
|
||||
Label: mongodb
|
||||
Suite: %s
|
||||
Codename: %s/mongodb-enterprise
|
||||
|
|
@ -274,13 +293,13 @@ Architectures: amd64 ppc64el s390x arm64
|
|||
Components: %s
|
||||
Description: MongoDB packages
|
||||
""" % (distro.repo_os_version(build_os), distro.repo_os_version(build_os), distro.repo_component())
|
||||
if os.path.exists(repo+"../../Release"):
|
||||
os.unlink(repo+"../../Release")
|
||||
if os.path.exists(repo+"../../Release.gpg"):
|
||||
os.unlink(repo+"../../Release.gpg")
|
||||
oldpwd=os.getcwd()
|
||||
os.chdir(repo+"../../")
|
||||
s2=packager.backtick(["apt-ftparchive", "release", "."])
|
||||
if os.path.exists(repo + "../../Release"):
|
||||
os.unlink(repo + "../../Release")
|
||||
if os.path.exists(repo + "../../Release.gpg"):
|
||||
os.unlink(repo + "../../Release.gpg")
|
||||
oldpwd = os.getcwd()
|
||||
os.chdir(repo + "../../")
|
||||
s2 = packager.backtick(["apt-ftparchive", "release", "."])
|
||||
try:
|
||||
with open("Release", 'w') as f:
|
||||
f.write(s)
|
||||
|
|
@ -296,20 +315,20 @@ def move_repos_into_place(src, dst):
|
|||
# one. This feels like a lot of hooey for something so trivial.
|
||||
|
||||
# First, make a crispy fresh new directory to put the stuff in.
|
||||
i=0
|
||||
i = 0
|
||||
while True:
|
||||
date_suffix=time.strftime("%Y-%m-%d")
|
||||
dname=dst+".%s.%d" % (date_suffix, i)
|
||||
date_suffix = time.strftime("%Y-%m-%d")
|
||||
dname = dst + ".%s.%d" % (date_suffix, i)
|
||||
try:
|
||||
os.mkdir(dname)
|
||||
break
|
||||
except OSError:
|
||||
exc=sys.exc_value
|
||||
exc = sys.exc_value
|
||||
if exc.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
raise exc
|
||||
i=i+1
|
||||
i = i + 1
|
||||
|
||||
# Put the stuff in our new directory.
|
||||
for r in os.listdir(src):
|
||||
|
|
@ -317,40 +336,41 @@ def move_repos_into_place(src, dst):
|
|||
|
||||
# Make a symlink to the new directory; the symlink will be renamed
|
||||
# to dst shortly.
|
||||
i=0
|
||||
i = 0
|
||||
while True:
|
||||
tmpnam=dst+".TMP.%d" % i
|
||||
tmpnam = dst + ".TMP.%d" % i
|
||||
try:
|
||||
os.symlink(dname, tmpnam)
|
||||
break
|
||||
except OSError: # as exc: # Python >2.5
|
||||
exc=sys.exc_value
|
||||
except OSError: # as exc: # Python >2.5
|
||||
exc = sys.exc_value
|
||||
if exc.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
raise exc
|
||||
i=i+1
|
||||
i = i + 1
|
||||
|
||||
# Make a symlink to the old directory; this symlink will be
|
||||
# renamed shortly, too.
|
||||
oldnam=None
|
||||
oldnam = None
|
||||
if os.path.exists(dst):
|
||||
i=0
|
||||
while True:
|
||||
oldnam=dst+".old.%d" % i
|
||||
try:
|
||||
os.symlink(os.readlink(dst), oldnam)
|
||||
break
|
||||
except OSError: # as exc: # Python >2.5
|
||||
exc=sys.exc_value
|
||||
if exc.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
raise exc
|
||||
i = 0
|
||||
while True:
|
||||
oldnam = dst + ".old.%d" % i
|
||||
try:
|
||||
os.symlink(os.readlink(dst), oldnam)
|
||||
break
|
||||
except OSError: # as exc: # Python >2.5
|
||||
exc = sys.exc_value
|
||||
if exc.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
raise exc
|
||||
|
||||
os.rename(tmpnam, dst)
|
||||
if oldnam:
|
||||
os.rename(oldnam, dst+".old")
|
||||
os.rename(oldnam, dst + ".old")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv)
|
||||
|
|
|
|||
|
|
@ -38,14 +38,14 @@ import tempfile
|
|||
import time
|
||||
|
||||
# The MongoDB names for the architectures we support.
|
||||
ARCH_CHOICES=["x86_64", "arm64"]
|
||||
ARCH_CHOICES = ["x86_64", "arm64"]
|
||||
|
||||
# Made up names for the flavors of distribution we package for.
|
||||
DISTROS=["suse", "debian","redhat","ubuntu", "amazon"]
|
||||
DISTROS = ["suse", "debian", "redhat", "ubuntu", "amazon"]
|
||||
|
||||
|
||||
class Spec(object):
|
||||
def __init__(self, ver, gitspec = None, rel = None):
|
||||
def __init__(self, ver, gitspec=None, rel=None):
|
||||
self.ver = ver
|
||||
self.gitspec = gitspec
|
||||
self.rel = rel
|
||||
|
|
@ -54,7 +54,8 @@ class Spec(object):
|
|||
# Patch builds version numbers are in the form: 3.5.5-64-g03945fa-patch-58debcdb3ff1223c9d00005b
|
||||
#
|
||||
def is_nightly(self):
|
||||
return bool(re.search("-$", self.version())) or bool(re.search("\d-\d+-g[0-9a-f]+$", self.version()))
|
||||
return bool(re.search("-$", self.version())) or bool(
|
||||
re.search("\d-\d+-g[0-9a-f]+$", self.version()))
|
||||
|
||||
def is_patch(self):
|
||||
return bool(re.search("\d-\d+-g[0-9a-f]+-patch-[0-9a-f]+$", self.version()))
|
||||
|
|
@ -77,10 +78,10 @@ class Spec(object):
|
|||
def metadata_gitspec(self):
|
||||
"""Git revision to use for spec+control+init+manpage files.
|
||||
The default is the release tag for the version being packaged."""
|
||||
if(self.gitspec):
|
||||
return self.gitspec
|
||||
if (self.gitspec):
|
||||
return self.gitspec
|
||||
else:
|
||||
return 'r' + self.version()
|
||||
return 'r' + self.version()
|
||||
|
||||
def version_better_than(self, version_string):
|
||||
# FIXME: this is wrong, but I'm in a hurry.
|
||||
|
|
@ -88,33 +89,33 @@ class Spec(object):
|
|||
return self.ver > version_string
|
||||
|
||||
def suffix(self):
|
||||
return "-org" if int(self.ver.split(".")[1])%2==0 else "-org-unstable"
|
||||
return "-org" if int(self.ver.split(".")[1]) % 2 == 0 else "-org-unstable"
|
||||
|
||||
def prelease(self):
|
||||
# NOTE: This is only called for RPM packages, and only after
|
||||
# pversion() below has been called. If you want to change this format
|
||||
# and want DEB packages to match, make sure to update pversion()
|
||||
# below
|
||||
#
|
||||
# "N" is either passed in on the command line, or "1"
|
||||
if self.rel:
|
||||
corenum = self.rel
|
||||
else:
|
||||
corenum = 1
|
||||
# NOTE: This is only called for RPM packages, and only after
|
||||
# pversion() below has been called. If you want to change this format
|
||||
# and want DEB packages to match, make sure to update pversion()
|
||||
# below
|
||||
#
|
||||
# "N" is either passed in on the command line, or "1"
|
||||
if self.rel:
|
||||
corenum = self.rel
|
||||
else:
|
||||
corenum = 1
|
||||
|
||||
# Version suffix for RPM packages:
|
||||
# 1) RC's - "0.N.rcX"
|
||||
# 2) Nightly (snapshot) - "0.N.latest"
|
||||
# 3) Patch builds - "0.N.patch.<patch_id>"
|
||||
# 4) Standard release - "N"
|
||||
if self.is_rc():
|
||||
return "0.%s.%s" % (corenum, re.sub('.*-','',self.version()))
|
||||
elif self.is_nightly():
|
||||
return "0.%s.latest" % (corenum)
|
||||
elif self.is_patch():
|
||||
return "0.%s.patch.%s" % (corenum, self.patch_id())
|
||||
else:
|
||||
return str(corenum)
|
||||
# Version suffix for RPM packages:
|
||||
# 1) RC's - "0.N.rcX"
|
||||
# 2) Nightly (snapshot) - "0.N.latest"
|
||||
# 3) Patch builds - "0.N.patch.<patch_id>"
|
||||
# 4) Standard release - "N"
|
||||
if self.is_rc():
|
||||
return "0.%s.%s" % (corenum, re.sub('.*-', '', self.version()))
|
||||
elif self.is_nightly():
|
||||
return "0.%s.latest" % (corenum)
|
||||
elif self.is_patch():
|
||||
return "0.%s.patch.%s" % (corenum, self.patch_id())
|
||||
else:
|
||||
return str(corenum)
|
||||
|
||||
def pversion(self, distro):
|
||||
# Note: Debian packages have funny rules about dashes in
|
||||
|
|
@ -149,9 +150,10 @@ class Spec(object):
|
|||
"""
|
||||
return ".".join(self.ver.split(".")[0:2])
|
||||
|
||||
|
||||
class Distro(object):
|
||||
def __init__(self, string):
|
||||
self.n=string
|
||||
self.n = string
|
||||
|
||||
def name(self):
|
||||
return self.n
|
||||
|
|
@ -172,18 +174,18 @@ class Distro(object):
|
|||
elif arch == "arm64":
|
||||
return "arm64"
|
||||
elif arch.endswith("86"):
|
||||
return "i386"
|
||||
return "i386"
|
||||
else:
|
||||
return "amd64"
|
||||
return "amd64"
|
||||
elif re.search("^(suse|centos|redhat|fedora|amazon)", self.n):
|
||||
if arch == "ppc64le":
|
||||
return "ppc64le"
|
||||
elif arch == "s390x":
|
||||
return "s390x"
|
||||
elif arch.endswith("86"):
|
||||
return "i686"
|
||||
return "i686"
|
||||
else:
|
||||
return "x86_64"
|
||||
return "x86_64"
|
||||
else:
|
||||
raise Exception("BUG: unsupported platform?")
|
||||
|
||||
|
|
@ -215,16 +217,23 @@ class Distro(object):
|
|||
repo_directory = ""
|
||||
|
||||
if spec.is_pre_release():
|
||||
repo_directory = "testing"
|
||||
repo_directory = "testing"
|
||||
else:
|
||||
repo_directory = spec.branch()
|
||||
repo_directory = spec.branch()
|
||||
|
||||
if re.search("^(debian|ubuntu)", self.n):
|
||||
return "repo/apt/%s/dists/%s/mongodb-org/%s/%s/binary-%s/" % (self.n, self.repo_os_version(build_os), repo_directory, self.repo_component(), self.archname(arch))
|
||||
return "repo/apt/%s/dists/%s/mongodb-org/%s/%s/binary-%s/" % (
|
||||
self.n, self.repo_os_version(build_os), repo_directory, self.repo_component(),
|
||||
self.archname(arch))
|
||||
elif re.search("(redhat|fedora|centos|amazon)", self.n):
|
||||
return "repo/yum/%s/%s/mongodb-org/%s/%s/RPMS/" % (self.n, self.repo_os_version(build_os), repo_directory, self.archname(arch))
|
||||
return "repo/yum/%s/%s/mongodb-org/%s/%s/RPMS/" % (self.n,
|
||||
self.repo_os_version(build_os),
|
||||
repo_directory, self.archname(arch))
|
||||
elif re.search("(suse)", self.n):
|
||||
return "repo/zypper/%s/%s/mongodb-org/%s/%s/RPMS/" % (self.n, self.repo_os_version(build_os), repo_directory, self.archname(arch))
|
||||
return "repo/zypper/%s/%s/mongodb-org/%s/%s/RPMS/" % (self.n,
|
||||
self.repo_os_version(build_os),
|
||||
repo_directory,
|
||||
self.archname(arch))
|
||||
else:
|
||||
raise Exception("BUG: unsupported platform?")
|
||||
|
||||
|
|
@ -232,9 +241,9 @@ class Distro(object):
|
|||
"""Return the name of the section/component/pool we are publishing into -
|
||||
e.g. "multiverse" for Ubuntu, "main" for debian."""
|
||||
if self.n == 'ubuntu':
|
||||
return "multiverse"
|
||||
return "multiverse"
|
||||
elif self.n == 'debian':
|
||||
return "main"
|
||||
return "main"
|
||||
else:
|
||||
raise Exception("unsupported distro: %s" % self.n)
|
||||
|
||||
|
|
@ -285,15 +294,19 @@ class Distro(object):
|
|||
raise Exception("BUG: unsupported architecture (%s)" % arch)
|
||||
|
||||
if re.search("(suse)", self.n):
|
||||
return [ "suse11", "suse12" ]
|
||||
return ["suse11", "suse12"]
|
||||
elif re.search("(redhat|fedora|centos)", self.n):
|
||||
return [ "rhel70", "rhel71", "rhel72", "rhel62", "rhel55" ]
|
||||
return ["rhel70", "rhel71", "rhel72", "rhel62", "rhel55"]
|
||||
elif self.n == 'amazon':
|
||||
return [ "amazon" ]
|
||||
return ["amazon"]
|
||||
elif self.n == 'ubuntu':
|
||||
return [ "ubuntu1204", "ubuntu1404", "ubuntu1604", ]
|
||||
return [
|
||||
"ubuntu1204",
|
||||
"ubuntu1404",
|
||||
"ubuntu1604",
|
||||
]
|
||||
elif self.n == 'debian':
|
||||
return [ "debian71", "debian81", "debian92" ]
|
||||
return ["debian71", "debian81", "debian92"]
|
||||
else:
|
||||
raise Exception("BUG: unsupported platform?")
|
||||
|
||||
|
|
@ -302,70 +315,79 @@ class Distro(object):
|
|||
"el6" for rhel 6.x, return anything else unchanged"""
|
||||
|
||||
if self.n == 'amazon':
|
||||
return 'amzn1'
|
||||
return 'amzn1'
|
||||
else:
|
||||
return re.sub(r'^rh(el\d).*$', r'\1', build_os)
|
||||
return re.sub(r'^rh(el\d).*$', r'\1', build_os)
|
||||
|
||||
|
||||
def get_args(distros, arch_choices):
|
||||
|
||||
distro_choices=[]
|
||||
distro_choices = []
|
||||
for distro in distros:
|
||||
for arch in arch_choices:
|
||||
distro_choices.extend(distro.build_os(arch))
|
||||
distro_choices.extend(distro.build_os(arch))
|
||||
|
||||
parser = argparse.ArgumentParser(description='Build MongoDB Packages')
|
||||
parser.add_argument("-s", "--server-version", help="Server version to build (e.g. 2.7.8-rc0)", required=True)
|
||||
parser.add_argument("-m", "--metadata-gitspec", help="Gitspec to use for package metadata files", required=False)
|
||||
parser.add_argument("-r", "--release-number", help="RPM release number base", type=int, required=False)
|
||||
parser.add_argument("-d", "--distros", help="Distros to build for", choices=distro_choices, required=False, default=[], action='append')
|
||||
parser.add_argument("-s", "--server-version", help="Server version to build (e.g. 2.7.8-rc0)",
|
||||
required=True)
|
||||
parser.add_argument("-m", "--metadata-gitspec",
|
||||
help="Gitspec to use for package metadata files", required=False)
|
||||
parser.add_argument("-r", "--release-number", help="RPM release number base", type=int,
|
||||
required=False)
|
||||
parser.add_argument("-d", "--distros", help="Distros to build for", choices=distro_choices,
|
||||
required=False, default=[], action='append')
|
||||
parser.add_argument("-p", "--prefix", help="Directory to build into", required=False)
|
||||
parser.add_argument("-a", "--arches", help="Architecture to build", choices=arch_choices, default=[], required=False, action='append')
|
||||
parser.add_argument("-t", "--tarball", help="Local tarball to package", required=True, type=lambda x: is_valid_file(parser, x))
|
||||
parser.add_argument("-a", "--arches", help="Architecture to build", choices=arch_choices,
|
||||
default=[], required=False, action='append')
|
||||
parser.add_argument("-t", "--tarball", help="Local tarball to package", required=True,
|
||||
type=lambda x: is_valid_file(parser, x))
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if len(args.distros) * len(args.arches) > 1 and args.tarball:
|
||||
parser.error("Can only specify local tarball with one distro/arch combination")
|
||||
parser.error("Can only specify local tarball with one distro/arch combination")
|
||||
|
||||
return args
|
||||
|
||||
|
||||
def main(argv):
|
||||
|
||||
distros=[Distro(distro) for distro in DISTROS]
|
||||
distros = [Distro(distro) for distro in DISTROS]
|
||||
|
||||
args = get_args(distros, ARCH_CHOICES)
|
||||
|
||||
spec = Spec(args.server_version, args.metadata_gitspec, args.release_number)
|
||||
|
||||
oldcwd=os.getcwd()
|
||||
srcdir=oldcwd+"/../"
|
||||
oldcwd = os.getcwd()
|
||||
srcdir = oldcwd + "/../"
|
||||
|
||||
# Where to do all of our work. Use a randomly-created directory if one
|
||||
# is not passed in.
|
||||
prefix = args.prefix
|
||||
if prefix is None:
|
||||
prefix = tempfile.mkdtemp()
|
||||
prefix = tempfile.mkdtemp()
|
||||
print "Working in directory %s" % prefix
|
||||
|
||||
os.chdir(prefix)
|
||||
try:
|
||||
# Build a package for each distro/spec/arch tuple, and
|
||||
# accumulate the repository-layout directories.
|
||||
for (distro, arch) in crossproduct(distros, args.arches):
|
||||
# Build a package for each distro/spec/arch tuple, and
|
||||
# accumulate the repository-layout directories.
|
||||
for (distro, arch) in crossproduct(distros, args.arches):
|
||||
|
||||
for build_os in distro.build_os(arch):
|
||||
if build_os in args.distros or not args.distros:
|
||||
for build_os in distro.build_os(arch):
|
||||
if build_os in args.distros or not args.distros:
|
||||
|
||||
filename = tarfile(build_os, arch, spec)
|
||||
ensure_dir(filename)
|
||||
shutil.copyfile(args.tarball, filename)
|
||||
filename = tarfile(build_os, arch, spec)
|
||||
ensure_dir(filename)
|
||||
shutil.copyfile(args.tarball, filename)
|
||||
|
||||
repo = make_package(distro, build_os, arch, spec, srcdir)
|
||||
make_repo(repo, distro, build_os, spec)
|
||||
repo = make_package(distro, build_os, arch, spec, srcdir)
|
||||
make_repo(repo, distro, build_os, spec)
|
||||
|
||||
finally:
|
||||
os.chdir(oldcwd)
|
||||
|
||||
|
||||
def crossproduct(*seqs):
|
||||
"""A generator for iterating all the tuples consisting of elements
|
||||
of seqs."""
|
||||
|
|
@ -378,16 +400,18 @@ def crossproduct(*seqs):
|
|||
else:
|
||||
for lst in crossproduct(*seqs[:-1]):
|
||||
for i in seqs[-1]:
|
||||
lst2=list(lst)
|
||||
lst2 = list(lst)
|
||||
lst2.append(i)
|
||||
yield lst2
|
||||
|
||||
|
||||
def sysassert(argv):
|
||||
"""Run argv and assert that it exited with status 0."""
|
||||
print "In %s, running %s" % (os.getcwd(), " ".join(argv))
|
||||
sys.stdout.flush()
|
||||
sys.stderr.flush()
|
||||
assert(subprocess.Popen(argv).wait()==0)
|
||||
assert (subprocess.Popen(argv).wait() == 0)
|
||||
|
||||
|
||||
def backtick(argv):
|
||||
"""Run argv and return its output string."""
|
||||
|
|
@ -396,11 +420,13 @@ def backtick(argv):
|
|||
sys.stderr.flush()
|
||||
return subprocess.Popen(argv, stdout=subprocess.PIPE).communicate()[0]
|
||||
|
||||
|
||||
def tarfile(build_os, arch, spec):
|
||||
"""Return the location where we store the downloaded tarball for
|
||||
this package"""
|
||||
return "dl/mongodb-linux-%s-%s-%s.tar.gz" % (spec.version(), build_os, arch)
|
||||
|
||||
|
||||
def setupdir(distro, build_os, arch, spec):
|
||||
# The setupdir will be a directory containing all inputs to the
|
||||
# distro's packaging tools (e.g., package metadata files, init
|
||||
|
|
@ -408,11 +434,13 @@ def setupdir(distro, build_os, arch, spec):
|
|||
# the following format string is unclear, an example setupdir
|
||||
# would be dst/x86_64/debian-sysvinit/wheezy/mongodb-org-unstable/
|
||||
# or dst/x86_64/redhat/rhel55/mongodb-org-unstable/
|
||||
return "dst/%s/%s/%s/%s%s-%s/" % (arch, distro.name(), build_os, distro.pkgbase(), spec.suffix(), spec.pversion(distro))
|
||||
return "dst/%s/%s/%s/%s%s-%s/" % (arch, distro.name(), build_os, distro.pkgbase(),
|
||||
spec.suffix(), spec.pversion(distro))
|
||||
|
||||
|
||||
def unpack_binaries_into(build_os, arch, spec, where):
|
||||
"""Unpack the tarfile for (build_os, arch, spec) into directory where."""
|
||||
rootdir=os.getcwd()
|
||||
rootdir = os.getcwd()
|
||||
ensure_dir(where)
|
||||
# Note: POSIX tar doesn't require support for gtar's "-C" option,
|
||||
# and Python's tarfile module prior to Python 2.7 doesn't have the
|
||||
|
|
@ -420,24 +448,25 @@ def unpack_binaries_into(build_os, arch, spec, where):
|
|||
# thing and chdir into where and run tar there.
|
||||
os.chdir(where)
|
||||
try:
|
||||
sysassert(["tar", "xvzf", rootdir+"/"+tarfile(build_os, arch, spec)])
|
||||
sysassert(["tar", "xvzf", rootdir + "/" + tarfile(build_os, arch, spec)])
|
||||
release_dir = glob('mongodb-linux-*')[0]
|
||||
for releasefile in "bin", "GNU-AGPL-3.0", "README", "THIRD-PARTY-NOTICES", "MPL-2":
|
||||
print "moving file: %s/%s" % (release_dir, releasefile)
|
||||
os.rename("%s/%s" % (release_dir, releasefile), releasefile)
|
||||
os.rmdir(release_dir)
|
||||
except Exception:
|
||||
exc=sys.exc_value
|
||||
exc = sys.exc_value
|
||||
os.chdir(rootdir)
|
||||
raise exc
|
||||
os.chdir(rootdir)
|
||||
|
||||
|
||||
def make_package(distro, build_os, arch, spec, srcdir):
|
||||
"""Construct the package for (arch, distro, spec), getting
|
||||
packaging files from srcdir and any user-specified suffix from
|
||||
suffixes"""
|
||||
|
||||
sdir=setupdir(distro, build_os, arch, spec)
|
||||
sdir = setupdir(distro, build_os, arch, spec)
|
||||
ensure_dir(sdir)
|
||||
# Note that the RPM packages get their man pages from the debian
|
||||
# directory, so the debian directory is needed in all cases (and
|
||||
|
|
@ -445,7 +474,11 @@ def make_package(distro, build_os, arch, spec, srcdir):
|
|||
for pkgdir in ["debian", "rpm"]:
|
||||
print "Copying packaging files from %s to %s" % ("%s/%s" % (srcdir, pkgdir), sdir)
|
||||
# FIXME: sh-dash-cee is bad. See if tarfile can do this.
|
||||
sysassert(["sh", "-c", "(cd \"%s\" && git archive %s %s/ ) | (cd \"%s\" && tar xvf -)" % (srcdir, spec.metadata_gitspec(), pkgdir, sdir)])
|
||||
sysassert([
|
||||
"sh", "-c",
|
||||
"(cd \"%s\" && git archive %s %s/ ) | (cd \"%s\" && tar xvf -)" %
|
||||
(srcdir, spec.metadata_gitspec(), pkgdir, sdir)
|
||||
])
|
||||
# Splat the binaries under sdir. The "build" stages of the
|
||||
# packaging infrastructure will move the files to wherever they
|
||||
# need to go.
|
||||
|
|
@ -453,9 +486,10 @@ def make_package(distro, build_os, arch, spec, srcdir):
|
|||
# Remove the mongoreplay binary due to libpcap dynamic
|
||||
# linkage.
|
||||
if os.path.exists(sdir + "bin/mongoreplay"):
|
||||
os.unlink(sdir + "bin/mongoreplay")
|
||||
os.unlink(sdir + "bin/mongoreplay")
|
||||
return distro.make_pkg(build_os, arch, spec, srcdir)
|
||||
|
||||
|
||||
def make_repo(repodir, distro, build_os, spec):
|
||||
if re.search("(debian|ubuntu)", repodir):
|
||||
make_deb_repo(repodir, distro, build_os, spec)
|
||||
|
|
@ -464,81 +498,92 @@ def make_repo(repodir, distro, build_os, spec):
|
|||
else:
|
||||
raise Exception("BUG: unsupported platform?")
|
||||
|
||||
|
||||
def make_deb(distro, build_os, arch, spec, srcdir):
|
||||
# I can't remember the details anymore, but the initscript/upstart
|
||||
# job files' names must match the package name in some way; and
|
||||
# see also the --name flag to dh_installinit in the generated
|
||||
# debian/rules file.
|
||||
suffix=spec.suffix()
|
||||
sdir=setupdir(distro, build_os, arch, spec)
|
||||
suffix = spec.suffix()
|
||||
sdir = setupdir(distro, build_os, arch, spec)
|
||||
if re.search("debian", distro.name()):
|
||||
os.unlink(sdir+"debian/mongod.upstart")
|
||||
os.unlink(sdir + "debian/mongod.upstart")
|
||||
if build_os == "debian71":
|
||||
os.link(sdir+"debian/init.d", sdir+"debian/%s%s-server.mongod.init" % (distro.pkgbase(), suffix))
|
||||
os.unlink(sdir+"debian/mongod.service")
|
||||
os.link(sdir + "debian/init.d",
|
||||
sdir + "debian/%s%s-server.mongod.init" % (distro.pkgbase(), suffix))
|
||||
os.unlink(sdir + "debian/mongod.service")
|
||||
else:
|
||||
os.link(sdir+"debian/mongod.service", sdir+"debian/%s%s-server.mongod.service" % (distro.pkgbase(), suffix))
|
||||
os.unlink(sdir+"debian/init.d")
|
||||
os.link(sdir + "debian/mongod.service",
|
||||
sdir + "debian/%s%s-server.mongod.service" % (distro.pkgbase(), suffix))
|
||||
os.unlink(sdir + "debian/init.d")
|
||||
elif re.search("ubuntu", distro.name()):
|
||||
os.unlink(sdir+"debian/init.d")
|
||||
os.unlink(sdir + "debian/init.d")
|
||||
if build_os in ("ubuntu1204", "ubuntu1404", "ubuntu1410"):
|
||||
os.link(sdir+"debian/mongod.upstart", sdir+"debian/%s%s-server.mongod.upstart" % (distro.pkgbase(), suffix))
|
||||
os.unlink(sdir+"debian/mongod.service")
|
||||
os.link(sdir + "debian/mongod.upstart",
|
||||
sdir + "debian/%s%s-server.mongod.upstart" % (distro.pkgbase(), suffix))
|
||||
os.unlink(sdir + "debian/mongod.service")
|
||||
else:
|
||||
os.link(sdir+"debian/mongod.service", sdir+"debian/%s%s-server.mongod.service" % (distro.pkgbase(), suffix))
|
||||
os.unlink(sdir+"debian/mongod.upstart")
|
||||
os.link(sdir + "debian/mongod.service",
|
||||
sdir + "debian/%s%s-server.mongod.service" % (distro.pkgbase(), suffix))
|
||||
os.unlink(sdir + "debian/mongod.upstart")
|
||||
else:
|
||||
raise Exception("unknown debianoid flavor: not debian or ubuntu?")
|
||||
# Rewrite the control and rules files
|
||||
write_debian_changelog(sdir+"debian/changelog", spec, srcdir)
|
||||
distro_arch=distro.archname(arch)
|
||||
sysassert(["cp", "-v", srcdir+"debian/%s%s.control" % (distro.pkgbase(), suffix), sdir+"debian/control"])
|
||||
sysassert(["cp", "-v", srcdir+"debian/%s%s.rules" % (distro.pkgbase(), suffix), sdir+"debian/rules"])
|
||||
|
||||
write_debian_changelog(sdir + "debian/changelog", spec, srcdir)
|
||||
distro_arch = distro.archname(arch)
|
||||
sysassert([
|
||||
"cp", "-v", srcdir + "debian/%s%s.control" % (distro.pkgbase(), suffix),
|
||||
sdir + "debian/control"
|
||||
])
|
||||
sysassert([
|
||||
"cp", "-v", srcdir + "debian/%s%s.rules" % (distro.pkgbase(), suffix), sdir + "debian/rules"
|
||||
])
|
||||
|
||||
# old non-server-package postinst will be hanging around for old versions
|
||||
#
|
||||
if os.path.exists(sdir+"debian/postinst"):
|
||||
os.unlink(sdir+"debian/postinst")
|
||||
if os.path.exists(sdir + "debian/postinst"):
|
||||
os.unlink(sdir + "debian/postinst")
|
||||
|
||||
# copy our postinst files
|
||||
#
|
||||
sysassert(["sh", "-c", "cp -v \"%sdebian/\"*.postinst \"%sdebian/\""%(srcdir, sdir)])
|
||||
sysassert(["sh", "-c", "cp -v \"%sdebian/\"*.postinst \"%sdebian/\"" % (srcdir, sdir)])
|
||||
|
||||
# Do the packaging.
|
||||
oldcwd=os.getcwd()
|
||||
oldcwd = os.getcwd()
|
||||
try:
|
||||
os.chdir(sdir)
|
||||
sysassert(["dpkg-buildpackage", "-uc", "-us", "-a" + distro_arch])
|
||||
finally:
|
||||
os.chdir(oldcwd)
|
||||
r=distro.repodir(arch, build_os, spec)
|
||||
r = distro.repodir(arch, build_os, spec)
|
||||
ensure_dir(r)
|
||||
# FIXME: see if shutil.copyfile or something can do this without
|
||||
# much pain.
|
||||
sysassert(["sh", "-c", "cp -v \"%s/../\"*.deb \"%s\""%(sdir, r)])
|
||||
sysassert(["sh", "-c", "cp -v \"%s/../\"*.deb \"%s\"" % (sdir, r)])
|
||||
return r
|
||||
|
||||
|
||||
def make_deb_repo(repo, distro, build_os, spec):
|
||||
# Note: the Debian repository Packages files must be generated
|
||||
# very carefully in order to be usable.
|
||||
oldpwd=os.getcwd()
|
||||
os.chdir(repo+"../../../../../../")
|
||||
oldpwd = os.getcwd()
|
||||
os.chdir(repo + "../../../../../../")
|
||||
try:
|
||||
dirs=set([os.path.dirname(deb)[2:] for deb in backtick(["find", ".", "-name", "*.deb"]).split()])
|
||||
dirs = set(
|
||||
[os.path.dirname(deb)[2:] for deb in backtick(["find", ".", "-name", "*.deb"]).split()])
|
||||
for d in dirs:
|
||||
s=backtick(["dpkg-scanpackages", d, "/dev/null"])
|
||||
with open(d+"/Packages", "w") as f:
|
||||
s = backtick(["dpkg-scanpackages", d, "/dev/null"])
|
||||
with open(d + "/Packages", "w") as f:
|
||||
f.write(s)
|
||||
b=backtick(["gzip", "-9c", d+"/Packages"])
|
||||
with open(d+"/Packages.gz", "wb") as f:
|
||||
b = backtick(["gzip", "-9c", d + "/Packages"])
|
||||
with open(d + "/Packages.gz", "wb") as f:
|
||||
f.write(b)
|
||||
finally:
|
||||
os.chdir(oldpwd)
|
||||
# Notes: the Release{,.gpg} files must live in a special place,
|
||||
# and must be created after all the Packages.gz files have been
|
||||
# done.
|
||||
s="""Origin: mongodb
|
||||
s = """Origin: mongodb
|
||||
Label: mongodb
|
||||
Suite: %s
|
||||
Codename: %s/mongodb-org
|
||||
|
|
@ -546,13 +591,13 @@ Architectures: amd64 arm64
|
|||
Components: %s
|
||||
Description: MongoDB packages
|
||||
""" % (distro.repo_os_version(build_os), distro.repo_os_version(build_os), distro.repo_component())
|
||||
if os.path.exists(repo+"../../Release"):
|
||||
os.unlink(repo+"../../Release")
|
||||
if os.path.exists(repo+"../../Release.gpg"):
|
||||
os.unlink(repo+"../../Release.gpg")
|
||||
oldpwd=os.getcwd()
|
||||
os.chdir(repo+"../../")
|
||||
s2=backtick(["apt-ftparchive", "release", "."])
|
||||
if os.path.exists(repo + "../../Release"):
|
||||
os.unlink(repo + "../../Release")
|
||||
if os.path.exists(repo + "../../Release.gpg"):
|
||||
os.unlink(repo + "../../Release.gpg")
|
||||
oldpwd = os.getcwd()
|
||||
os.chdir(repo + "../../")
|
||||
s2 = backtick(["apt-ftparchive", "release", "."])
|
||||
try:
|
||||
with open("Release", 'w') as f:
|
||||
f.write(s)
|
||||
|
|
@ -568,20 +613,20 @@ def move_repos_into_place(src, dst):
|
|||
# one. This feels like a lot of hooey for something so trivial.
|
||||
|
||||
# First, make a crispy fresh new directory to put the stuff in.
|
||||
i=0
|
||||
i = 0
|
||||
while True:
|
||||
date_suffix=time.strftime("%Y-%m-%d")
|
||||
dname=dst+".%s.%d" % (date_suffix, i)
|
||||
date_suffix = time.strftime("%Y-%m-%d")
|
||||
dname = dst + ".%s.%d" % (date_suffix, i)
|
||||
try:
|
||||
os.mkdir(dname)
|
||||
break
|
||||
except OSError:
|
||||
exc=sys.exc_value
|
||||
exc = sys.exc_value
|
||||
if exc.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
raise exc
|
||||
i=i+1
|
||||
i = i + 1
|
||||
|
||||
# Put the stuff in our new directory.
|
||||
for r in os.listdir(src):
|
||||
|
|
@ -589,65 +634,69 @@ def move_repos_into_place(src, dst):
|
|||
|
||||
# Make a symlink to the new directory; the symlink will be renamed
|
||||
# to dst shortly.
|
||||
i=0
|
||||
i = 0
|
||||
while True:
|
||||
tmpnam=dst+".TMP.%d" % i
|
||||
tmpnam = dst + ".TMP.%d" % i
|
||||
try:
|
||||
os.symlink(dname, tmpnam)
|
||||
break
|
||||
except OSError: # as exc: # Python >2.5
|
||||
exc=sys.exc_value
|
||||
except OSError: # as exc: # Python >2.5
|
||||
exc = sys.exc_value
|
||||
if exc.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
raise exc
|
||||
i=i+1
|
||||
i = i + 1
|
||||
|
||||
# Make a symlink to the old directory; this symlink will be
|
||||
# renamed shortly, too.
|
||||
oldnam=None
|
||||
oldnam = None
|
||||
if os.path.exists(dst):
|
||||
i=0
|
||||
while True:
|
||||
oldnam=dst+".old.%d" % i
|
||||
try:
|
||||
os.symlink(os.readlink(dst), oldnam)
|
||||
break
|
||||
except OSError: # as exc: # Python >2.5
|
||||
exc=sys.exc_value
|
||||
if exc.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
raise exc
|
||||
i = 0
|
||||
while True:
|
||||
oldnam = dst + ".old.%d" % i
|
||||
try:
|
||||
os.symlink(os.readlink(dst), oldnam)
|
||||
break
|
||||
except OSError: # as exc: # Python >2.5
|
||||
exc = sys.exc_value
|
||||
if exc.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
raise exc
|
||||
|
||||
os.rename(tmpnam, dst)
|
||||
if oldnam:
|
||||
os.rename(oldnam, dst+".old")
|
||||
os.rename(oldnam, dst + ".old")
|
||||
|
||||
|
||||
def write_debian_changelog(path, spec, srcdir):
|
||||
oldcwd=os.getcwd()
|
||||
oldcwd = os.getcwd()
|
||||
os.chdir(srcdir)
|
||||
preamble=""
|
||||
preamble = ""
|
||||
try:
|
||||
s=preamble+backtick(["sh", "-c", "git archive %s debian/changelog | tar xOf -" % spec.metadata_gitspec()])
|
||||
s = preamble + backtick(
|
||||
["sh", "-c",
|
||||
"git archive %s debian/changelog | tar xOf -" % spec.metadata_gitspec()])
|
||||
finally:
|
||||
os.chdir(oldcwd)
|
||||
lines=s.split("\n")
|
||||
lines = s.split("\n")
|
||||
# If the first line starts with "mongodb", it's not a revision
|
||||
# preamble, and so frob the version number.
|
||||
lines[0]=re.sub("^mongodb \\(.*\\)", "mongodb (%s)" % (spec.pversion(Distro("debian"))), lines[0])
|
||||
lines[0] = re.sub("^mongodb \\(.*\\)", "mongodb (%s)" % (spec.pversion(Distro("debian"))),
|
||||
lines[0])
|
||||
# Rewrite every changelog entry starting in mongodb<space>
|
||||
lines=[re.sub("^mongodb ", "mongodb%s " % (spec.suffix()), l) for l in lines]
|
||||
lines=[re.sub("^ --", " --", l) for l in lines]
|
||||
s="\n".join(lines)
|
||||
lines = [re.sub("^mongodb ", "mongodb%s " % (spec.suffix()), l) for l in lines]
|
||||
lines = [re.sub("^ --", " --", l) for l in lines]
|
||||
s = "\n".join(lines)
|
||||
with open(path, 'w') as f:
|
||||
f.write(s)
|
||||
|
||||
|
||||
def make_rpm(distro, build_os, arch, spec, srcdir):
|
||||
# Create the specfile.
|
||||
suffix=spec.suffix()
|
||||
sdir=setupdir(distro, build_os, arch, spec)
|
||||
suffix = spec.suffix()
|
||||
sdir = setupdir(distro, build_os, arch, spec)
|
||||
|
||||
specfile = srcdir + "rpm/mongodb%s.spec" % suffix
|
||||
init_spec = specfile.replace(".spec", "-init.spec")
|
||||
|
|
@ -662,8 +711,8 @@ def make_rpm(distro, build_os, arch, spec, srcdir):
|
|||
# distros.
|
||||
#
|
||||
if distro.name() == "suse" and distro.repo_os_version(build_os) in ("10", "11"):
|
||||
os.unlink(sdir+"rpm/init.d-mongod")
|
||||
os.link(sdir+"rpm/init.d-mongod.suse", sdir+"rpm/init.d-mongod")
|
||||
os.unlink(sdir + "rpm/init.d-mongod")
|
||||
os.link(sdir + "rpm/init.d-mongod.suse", sdir + "rpm/init.d-mongod")
|
||||
|
||||
os.unlink(specfile)
|
||||
os.link(init_spec, specfile)
|
||||
|
|
@ -674,10 +723,10 @@ def make_rpm(distro, build_os, arch, spec, srcdir):
|
|||
os.unlink(specfile)
|
||||
os.link(init_spec, specfile)
|
||||
|
||||
topdir=ensure_dir('%s/rpmbuild/%s/' % (os.getcwd(), build_os))
|
||||
topdir = ensure_dir('%s/rpmbuild/%s/' % (os.getcwd(), build_os))
|
||||
for subdir in ["BUILD", "RPMS", "SOURCES", "SPECS", "SRPMS"]:
|
||||
ensure_dir("%s/%s/" % (topdir, subdir))
|
||||
distro_arch=distro.archname(arch)
|
||||
distro_arch = distro.archname(arch)
|
||||
# RPM tools take these macro files that define variables in
|
||||
# RPMland. Unfortunately, there's no way to tell RPM tools to use
|
||||
# a given file *in addition* to the files that it would already
|
||||
|
|
@ -697,53 +746,68 @@ def make_rpm(distro, build_os, arch, spec, srcdir):
|
|||
# On RHEL systems, --rcfile will generally be used and
|
||||
# --macros will be used in Ubuntu.
|
||||
#
|
||||
macrofiles=[l for l in backtick(["rpm", "--showrc"]).split("\n") if l.startswith("macrofiles")]
|
||||
flags=[]
|
||||
macropath=os.getcwd()+"/macros"
|
||||
macrofiles = [
|
||||
l for l in backtick(["rpm", "--showrc"]).split("\n") if l.startswith("macrofiles")
|
||||
]
|
||||
flags = []
|
||||
macropath = os.getcwd() + "/macros"
|
||||
|
||||
write_rpm_macros_file(macropath, topdir, distro.release_dist(build_os))
|
||||
if len(macrofiles)>0:
|
||||
macrofiles=macrofiles[0]+":"+macropath
|
||||
rcfile=os.getcwd()+"/rpmrc"
|
||||
if len(macrofiles) > 0:
|
||||
macrofiles = macrofiles[0] + ":" + macropath
|
||||
rcfile = os.getcwd() + "/rpmrc"
|
||||
write_rpmrc_file(rcfile, macrofiles)
|
||||
flags=["--rcfile", rcfile]
|
||||
flags = ["--rcfile", rcfile]
|
||||
else:
|
||||
# This hard-coded hooey came from some box running RPM
|
||||
# 4.4.2.3. It may not work over time, but RPM isn't sanely
|
||||
# configurable.
|
||||
flags=["--macros", "/usr/lib/rpm/macros:/usr/lib/rpm/%s-linux/macros:/usr/lib/rpm/suse/macros:/etc/rpm/macros.*:/etc/rpm/macros:/etc/rpm/%s-linux/macros:~/.rpmmacros:%s" % (distro_arch, distro_arch, macropath)]
|
||||
flags = [
|
||||
"--macros",
|
||||
"/usr/lib/rpm/macros:/usr/lib/rpm/%s-linux/macros:/usr/lib/rpm/suse/macros:/etc/rpm/macros.*:/etc/rpm/macros:/etc/rpm/%s-linux/macros:~/.rpmmacros:%s"
|
||||
% (distro_arch, distro_arch, macropath)
|
||||
]
|
||||
# Put the specfile and the tar'd up binaries and stuff in
|
||||
# place.
|
||||
#
|
||||
# The version of rpm and rpm tools in RHEL 5.5 can't interpolate the
|
||||
# %{dynamic_version} macro, so do it manually
|
||||
with open(specfile, "r") as spec_source:
|
||||
with open(topdir+"SPECS/" + os.path.basename(specfile), "w") as spec_dest:
|
||||
for line in spec_source:
|
||||
line = line.replace('%{dynamic_version}', spec.pversion(distro))
|
||||
line = line.replace('%{dynamic_release}', spec.prelease())
|
||||
spec_dest.write(line)
|
||||
with open(topdir + "SPECS/" + os.path.basename(specfile), "w") as spec_dest:
|
||||
for line in spec_source:
|
||||
line = line.replace('%{dynamic_version}', spec.pversion(distro))
|
||||
line = line.replace('%{dynamic_release}', spec.prelease())
|
||||
spec_dest.write(line)
|
||||
|
||||
oldcwd=os.getcwd()
|
||||
os.chdir(sdir+"/../")
|
||||
oldcwd = os.getcwd()
|
||||
os.chdir(sdir + "/../")
|
||||
try:
|
||||
sysassert(["tar", "-cpzf", topdir+"SOURCES/mongodb%s-%s.tar.gz" % (suffix, spec.pversion(distro)), os.path.basename(os.path.dirname(sdir))])
|
||||
sysassert([
|
||||
"tar", "-cpzf",
|
||||
topdir + "SOURCES/mongodb%s-%s.tar.gz" % (suffix, spec.pversion(distro)),
|
||||
os.path.basename(os.path.dirname(sdir))
|
||||
])
|
||||
finally:
|
||||
os.chdir(oldcwd)
|
||||
# Do the build.
|
||||
|
||||
flags.extend(["-D", "dynamic_version " + spec.pversion(distro), "-D", "dynamic_release " + spec.prelease(), "-D", "_topdir " + topdir])
|
||||
sysassert(["rpmbuild", "-ba", "--target", distro_arch] + flags + ["%s/SPECS/mongodb%s.spec" % (topdir, suffix)])
|
||||
r=distro.repodir(arch, build_os, spec)
|
||||
flags.extend([
|
||||
"-D", "dynamic_version " + spec.pversion(distro), "-D",
|
||||
"dynamic_release " + spec.prelease(), "-D", "_topdir " + topdir
|
||||
])
|
||||
sysassert(["rpmbuild", "-ba", "--target", distro_arch] + flags +
|
||||
["%s/SPECS/mongodb%s.spec" % (topdir, suffix)])
|
||||
r = distro.repodir(arch, build_os, spec)
|
||||
ensure_dir(r)
|
||||
# FIXME: see if some combination of shutil.copy<hoohah> and glob
|
||||
# can do this without shelling out.
|
||||
sysassert(["sh", "-c", "cp -v \"%s/RPMS/%s/\"*.rpm \"%s\""%(topdir, distro_arch, r)])
|
||||
sysassert(["sh", "-c", "cp -v \"%s/RPMS/%s/\"*.rpm \"%s\"" % (topdir, distro_arch, r)])
|
||||
return r
|
||||
|
||||
|
||||
def make_rpm_repo(repo):
|
||||
oldpwd=os.getcwd()
|
||||
os.chdir(repo+"../")
|
||||
oldpwd = os.getcwd()
|
||||
os.chdir(repo + "../")
|
||||
try:
|
||||
sysassert(["createrepo", "."])
|
||||
finally:
|
||||
|
|
@ -754,26 +818,29 @@ def write_rpmrc_file(path, string):
|
|||
with open(path, 'w') as f:
|
||||
f.write(string)
|
||||
|
||||
|
||||
def write_rpm_macros_file(path, topdir, release_dist):
|
||||
with open(path, 'w') as f:
|
||||
f.write("%%_topdir %s\n" % topdir)
|
||||
f.write("%%dist .%s\n" % release_dist)
|
||||
f.write("%_use_internal_dependency_generator 0\n")
|
||||
|
||||
|
||||
def ensure_dir(filename):
|
||||
"""Make sure that the directory that's the dirname part of
|
||||
filename exists, and return filename."""
|
||||
dirpart = os.path.dirname(filename)
|
||||
try:
|
||||
os.makedirs(dirpart)
|
||||
except OSError: # as exc: # Python >2.5
|
||||
exc=sys.exc_value
|
||||
except OSError: # as exc: # Python >2.5
|
||||
exc = sys.exc_value
|
||||
if exc.errno == errno.EEXIST:
|
||||
pass
|
||||
else:
|
||||
raise exc
|
||||
return filename
|
||||
|
||||
|
||||
def is_valid_file(parser, filename):
|
||||
"""Check if file exists, and return the filename"""
|
||||
if not os.path.exists(filename):
|
||||
|
|
@ -781,5 +848,6 @@ def is_valid_file(parser, filename):
|
|||
else:
|
||||
return filename
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(sys.argv)
|
||||
|
|
|
|||
|
|
@ -15,7 +15,6 @@ import optparse
|
|||
import os
|
||||
import sys
|
||||
|
||||
|
||||
# Get relative imports to work when the package is not installed on the PYTHONPATH.
|
||||
if __name__ == "__main__" and __package__ is None:
|
||||
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
|
@ -31,12 +30,10 @@ def main():
|
|||
|
||||
usage = "usage: %prog [options] report.json"
|
||||
parser = optparse.OptionParser(usage=usage)
|
||||
parser.add_option("-o", "--output-file",
|
||||
dest="outfile",
|
||||
default="-",
|
||||
help="If '-', then the report file is written to stdout."
|
||||
" Any other value is treated as the output file name. By default,"
|
||||
" output is written to stdout.")
|
||||
parser.add_option("-o", "--output-file", dest="outfile", default="-",
|
||||
help=("If '-', then the report file is written to stdout."
|
||||
" Any other value is treated as the output file name. By default,"
|
||||
" output is written to stdout."))
|
||||
|
||||
(options, args) = parser.parse_args()
|
||||
|
||||
|
|
@ -64,5 +61,6 @@ def main():
|
|||
else:
|
||||
print(json.dumps(result_report))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
#!/usr/bin/env python2
|
||||
|
||||
""" This program stamps the shared scons directory with a timestamp so we can
|
||||
determine the last prune time and run the prune script on a schedule.
|
||||
It is meant to be invoked from the shell:
|
||||
|
|
@ -76,12 +75,14 @@ def check_last_prune_time(args):
|
|||
# A 0 return code signals our Evergreen task that we should run the prune script.
|
||||
# Otherwise, return 1 and skip pruning.
|
||||
if diff.total_seconds() > seconds_since_last_prune:
|
||||
print("It has been {0:.2f} seconds ({1:.2f} hours) since last prune."
|
||||
.format(diff.total_seconds(), diff.total_seconds()/60/60))
|
||||
print("It has been {0:.2f} seconds ({1:.2f} hours) since last prune.".format(
|
||||
diff.total_seconds(),
|
||||
diff.total_seconds() / 60 / 60))
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("It has been {0:.2f} seconds ({1:.2f} hours) since last prune."
|
||||
.format(diff.total_seconds(), diff.total_seconds()/60/60))
|
||||
print("It has been {0:.2f} seconds ({1:.2f} hours) since last prune.".format(
|
||||
diff.total_seconds(),
|
||||
diff.total_seconds() / 60 / 60))
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -57,9 +57,9 @@ def get_py_linter(linter_filter):
|
|||
def is_interesting_file(file_name):
|
||||
# type: (str) -> bool
|
||||
""""Return true if this file should be checked."""
|
||||
return file_name.endswith(".py") and (file_name.startswith("buildscripts/idl") or
|
||||
file_name.startswith("buildscripts/linter") or
|
||||
file_name.startswith("buildscripts/pylinters.py"))
|
||||
return file_name.endswith(".py") and (file_name.startswith("buildscripts/idl")
|
||||
or file_name.startswith("buildscripts/linter")
|
||||
or file_name.startswith("buildscripts/pylinters.py"))
|
||||
|
||||
|
||||
def _get_build_dir():
|
||||
|
|
@ -161,14 +161,12 @@ def main():
|
|||
dest_prefix = "linter_"
|
||||
for linter1 in linters:
|
||||
msg = 'Path to linter %s' % (linter1.cmd_name)
|
||||
parser.add_argument(
|
||||
'--' + linter1.cmd_name, type=str, help=msg, dest=dest_prefix + linter1.cmd_name)
|
||||
parser.add_argument('--' + linter1.cmd_name, type=str, help=msg,
|
||||
dest=dest_prefix + linter1.cmd_name)
|
||||
|
||||
parser.add_argument(
|
||||
'--linters',
|
||||
type=str,
|
||||
help="Comma separated list of filters to use, defaults to 'all'",
|
||||
default="all")
|
||||
parser.add_argument('--linters', type=str,
|
||||
help="Comma separated list of filters to use, defaults to 'all'",
|
||||
default="all")
|
||||
|
||||
parser.add_argument('-v', "--verbose", action='store_true', help="Enable verbose logging")
|
||||
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
"""Remote access utilities, via ssh & scp."""
|
||||
|
||||
from __future__ import print_function
|
||||
|
|
@ -21,8 +20,7 @@ if os.name == "posix" and sys.version_info[0] == 2:
|
|||
import warnings
|
||||
warnings.warn(("Falling back to using the subprocess module because subprocess32 isn't"
|
||||
" available. When using the subprocess module, a child process may trigger"
|
||||
" an invalid free(). See SERVER-22219 for more details."),
|
||||
RuntimeWarning)
|
||||
" an invalid free(). See SERVER-22219 for more details."), RuntimeWarning)
|
||||
import subprocess
|
||||
else:
|
||||
import subprocess
|
||||
|
|
@ -52,26 +50,15 @@ def posix_path(path):
|
|||
path = path[1:-1]
|
||||
drive, new_path = os.path.splitdrive(path)
|
||||
if drive:
|
||||
new_path = posixpath.join(
|
||||
"/cygdrive",
|
||||
drive.split(":")[0],
|
||||
*re.split("/|\\\\", new_path))
|
||||
new_path = posixpath.join("/cygdrive", drive.split(":")[0], *re.split("/|\\\\", new_path))
|
||||
return "{quote}{path}{quote}".format(quote=path_quote, path=new_path)
|
||||
|
||||
|
||||
class RemoteOperations(object):
|
||||
"""Class to support remote operations."""
|
||||
|
||||
def __init__(self,
|
||||
user_host,
|
||||
ssh_connection_options=None,
|
||||
ssh_options=None,
|
||||
scp_options=None,
|
||||
retries=0,
|
||||
retry_sleep=0,
|
||||
debug=False,
|
||||
shell_binary="/bin/bash",
|
||||
use_shell=False):
|
||||
def __init__(self, user_host, ssh_connection_options=None, ssh_options=None, scp_options=None,
|
||||
retries=0, retry_sleep=0, debug=False, shell_binary="/bin/bash", use_shell=False):
|
||||
|
||||
self.user_host = user_host
|
||||
self.ssh_connection_options = ssh_connection_options if ssh_connection_options else ""
|
||||
|
|
@ -92,17 +79,15 @@ class RemoteOperations(object):
|
|||
if not self.use_shell:
|
||||
cmd = shlex.split(cmd)
|
||||
# Use a common pipe for stdout & stderr for logging.
|
||||
process = subprocess.Popen(cmd,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
||||
shell=self.use_shell)
|
||||
buff_stdout, _ = process.communicate()
|
||||
return process.poll(), buff_stdout
|
||||
|
||||
def _remote_access(self):
|
||||
""" This will check if a remote session is possible. """
|
||||
cmd = "ssh {} {} {} date".format(
|
||||
self.ssh_connection_options, self.ssh_options, self.user_host)
|
||||
cmd = "ssh {} {} {} date".format(self.ssh_connection_options, self.ssh_options,
|
||||
self.user_host)
|
||||
attempt_num = 0
|
||||
buff = ""
|
||||
while True:
|
||||
|
|
@ -159,13 +144,9 @@ class RemoteOperations(object):
|
|||
operation_param = "{}".format(operation_param.replace("'", r"\'"))
|
||||
operation_param = "{}".format(operation_param.replace("\"", r"\""))
|
||||
dollar = "$"
|
||||
cmd = "ssh {} {} {} {} -c \"{}'{}'\"".format(
|
||||
self.ssh_connection_options,
|
||||
self.ssh_options,
|
||||
self.user_host,
|
||||
self.shell_binary,
|
||||
dollar,
|
||||
operation_param)
|
||||
cmd = "ssh {} {} {} {} -c \"{}'{}'\"".format(self.ssh_connection_options,
|
||||
self.ssh_options, self.user_host,
|
||||
self.shell_binary, dollar, operation_param)
|
||||
cmds.append(cmd)
|
||||
|
||||
elif operation_type == "copy_to":
|
||||
|
|
@ -182,16 +163,15 @@ class RemoteOperations(object):
|
|||
elif operation_type == "copy_from":
|
||||
operation_dir = operation_dir if operation_dir else "."
|
||||
if not os.path.isdir(operation_dir):
|
||||
raise ValueError(
|
||||
"Local directory '{}' does not exist.".format(operation_dir))
|
||||
raise ValueError("Local directory '{}' does not exist.".format(operation_dir))
|
||||
|
||||
# We support multiple files being copied from the remote host
|
||||
# by invoking scp for each file specified.
|
||||
# Note - this is a method which scp does not support directly.
|
||||
for copy_file in operation_param:
|
||||
copy_file = posix_path(copy_file)
|
||||
cmd = "scp -r {} {} {}:".format(
|
||||
self.ssh_connection_options, self.scp_options, self.user_host)
|
||||
cmd = "scp -r {} {} {}:".format(self.ssh_connection_options, self.scp_options,
|
||||
self.user_host)
|
||||
# Quote (on Posix), and escape the file if there are spaces.
|
||||
# Note - we do not support other non-ASCII characters in a file name.
|
||||
quote = "\"" if not _IS_WINDOWS else ""
|
||||
|
|
@ -202,9 +182,8 @@ class RemoteOperations(object):
|
|||
cmds.append(cmd)
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
"Invalid operation '{}' specified, choose from {}.".format(
|
||||
operation_type, _OPERATIONS))
|
||||
raise ValueError("Invalid operation '{}' specified, choose from {}.".format(
|
||||
operation_type, _OPERATIONS))
|
||||
|
||||
final_ret = 0
|
||||
buff = ""
|
||||
|
|
@ -217,24 +196,18 @@ class RemoteOperations(object):
|
|||
|
||||
def shell(self, operation_param, operation_dir=None):
|
||||
""" Helper for remote shell operations. """
|
||||
return self.operation(
|
||||
operation_type="shell",
|
||||
operation_param=operation_param,
|
||||
operation_dir=operation_dir)
|
||||
return self.operation(operation_type="shell", operation_param=operation_param,
|
||||
operation_dir=operation_dir)
|
||||
|
||||
def copy_to(self, operation_param, operation_dir=None):
|
||||
""" Helper for remote copy_to operations. """
|
||||
return self.operation(
|
||||
operation_type="copy_to",
|
||||
operation_param=operation_param,
|
||||
operation_dir=operation_dir)
|
||||
return self.operation(operation_type="copy_to", operation_param=operation_param,
|
||||
operation_dir=operation_dir)
|
||||
|
||||
def copy_from(self, operation_param, operation_dir=None):
|
||||
""" Helper for remote copy_from operations. """
|
||||
return self.operation(
|
||||
operation_type="copy_from",
|
||||
operation_param=operation_param,
|
||||
operation_dir=operation_dir)
|
||||
return self.operation(operation_type="copy_from", operation_param=operation_param,
|
||||
operation_dir=operation_dir)
|
||||
|
||||
|
||||
def main():
|
||||
|
|
@ -245,114 +218,77 @@ def main():
|
|||
shell_options = optparse.OptionGroup(parser, "Shell options")
|
||||
copy_options = optparse.OptionGroup(parser, "Copy options")
|
||||
|
||||
parser.add_option("--userHost",
|
||||
dest="user_host",
|
||||
default=None,
|
||||
help="User and remote host to execute commands on [REQUIRED]."
|
||||
" Examples, 'user@1.2.3.4' or 'user@myhost.com'.")
|
||||
parser.add_option("--userHost", dest="user_host", default=None,
|
||||
help=("User and remote host to execute commands on [REQUIRED]."
|
||||
" Examples, 'user@1.2.3.4' or 'user@myhost.com'."))
|
||||
|
||||
parser.add_option("--operation",
|
||||
dest="operation",
|
||||
default="shell",
|
||||
choices=_OPERATIONS,
|
||||
help="Remote operation to perform, choose one of '{}',"
|
||||
" defaults to '%default'.".format(", ".join(_OPERATIONS)))
|
||||
parser.add_option("--operation", dest="operation", default="shell", choices=_OPERATIONS,
|
||||
help=("Remote operation to perform, choose one of '{}',"
|
||||
" defaults to '%default'.".format(", ".join(_OPERATIONS))))
|
||||
|
||||
control_options.add_option("--sshConnectionOptions",
|
||||
dest="ssh_connection_options",
|
||||
default=None,
|
||||
action="append",
|
||||
help="SSH connection options which are common to ssh and scp."
|
||||
" More than one option can be specified either"
|
||||
" in one quoted string or by specifying"
|
||||
" this option more than once. Example options:"
|
||||
" '-i $HOME/.ssh/access.pem -o ConnectTimeout=10"
|
||||
" -o ConnectionAttempts=10'")
|
||||
control_options.add_option("--sshConnectionOptions", dest="ssh_connection_options",
|
||||
default=None, action="append",
|
||||
help=("SSH connection options which are common to ssh and scp."
|
||||
" More than one option can be specified either"
|
||||
" in one quoted string or by specifying"
|
||||
" this option more than once. Example options:"
|
||||
" '-i $HOME/.ssh/access.pem -o ConnectTimeout=10"
|
||||
" -o ConnectionAttempts=10'"))
|
||||
|
||||
control_options.add_option("--sshOptions",
|
||||
dest="ssh_options",
|
||||
default=None,
|
||||
action="append",
|
||||
help="SSH specific options."
|
||||
" More than one option can be specified either"
|
||||
" in one quoted string or by specifying"
|
||||
" this option more than once. Example options:"
|
||||
" '-t' or '-T'")
|
||||
control_options.add_option("--sshOptions", dest="ssh_options", default=None, action="append",
|
||||
help=("SSH specific options."
|
||||
" More than one option can be specified either"
|
||||
" in one quoted string or by specifying"
|
||||
" this option more than once. Example options:"
|
||||
" '-t' or '-T'"))
|
||||
|
||||
control_options.add_option("--scpOptions",
|
||||
dest="scp_options",
|
||||
default=None,
|
||||
action="append",
|
||||
help="SCP specific options."
|
||||
" More than one option can be specified either"
|
||||
" in one quoted string or by specifying"
|
||||
" this option more than once. Example options:"
|
||||
" '-l 5000'")
|
||||
control_options.add_option("--scpOptions", dest="scp_options", default=None, action="append",
|
||||
help=("SCP specific options."
|
||||
" More than one option can be specified either"
|
||||
" in one quoted string or by specifying"
|
||||
" this option more than once. Example options:"
|
||||
" '-l 5000'"))
|
||||
|
||||
control_options.add_option("--retries",
|
||||
dest="retries",
|
||||
type=int,
|
||||
default=0,
|
||||
help="Number of retries to attempt for operation,"
|
||||
" defaults to '%default'.")
|
||||
control_options.add_option("--retries", dest="retries", type=int, default=0,
|
||||
help=("Number of retries to attempt for operation,"
|
||||
" defaults to '%default'."))
|
||||
|
||||
control_options.add_option("--retrySleep",
|
||||
dest="retry_sleep",
|
||||
type=int,
|
||||
default=10,
|
||||
help="Number of seconds to wait between retries,"
|
||||
" defaults to '%default'.")
|
||||
control_options.add_option("--retrySleep", dest="retry_sleep", type=int, default=10,
|
||||
help=("Number of seconds to wait between retries,"
|
||||
" defaults to '%default'."))
|
||||
|
||||
control_options.add_option("--debug",
|
||||
dest="debug",
|
||||
action="store_true",
|
||||
default=False,
|
||||
control_options.add_option("--debug", dest="debug", action="store_true", default=False,
|
||||
help="Provides debug output.")
|
||||
|
||||
control_options.add_option("--verbose",
|
||||
dest="verbose",
|
||||
action="store_true",
|
||||
default=False,
|
||||
control_options.add_option("--verbose", dest="verbose", action="store_true", default=False,
|
||||
help="Print exit status and output at end.")
|
||||
|
||||
shell_options.add_option("--commands",
|
||||
dest="remote_commands",
|
||||
default=None,
|
||||
action="append",
|
||||
help="Commands to excute on the remote host. The"
|
||||
" commands must be separated by a ';' and can either"
|
||||
" be specifed in a quoted string or by specifying"
|
||||
" this option more than once. A ';' will be added"
|
||||
" between commands when this option is specifed"
|
||||
" more than once.")
|
||||
shell_options.add_option("--commands", dest="remote_commands", default=None, action="append",
|
||||
help=("Commands to excute on the remote host. The"
|
||||
" commands must be separated by a ';' and can either"
|
||||
" be specifed in a quoted string or by specifying"
|
||||
" this option more than once. A ';' will be added"
|
||||
" between commands when this option is specifed"
|
||||
" more than once."))
|
||||
|
||||
shell_options.add_option("--commandDir",
|
||||
dest="command_dir",
|
||||
default=None,
|
||||
help="Working directory on remote to execute commands"
|
||||
" form. Defaults to remote login directory.")
|
||||
shell_options.add_option("--commandDir", dest="command_dir", default=None,
|
||||
help=("Working directory on remote to execute commands"
|
||||
" form. Defaults to remote login directory."))
|
||||
|
||||
copy_options.add_option("--file",
|
||||
dest="files",
|
||||
default=None,
|
||||
action="append",
|
||||
help="The file to copy to/from remote host. To"
|
||||
" support spaces in the file, each file must be"
|
||||
" specified using this option more than once.")
|
||||
copy_options.add_option("--file", dest="files", default=None, action="append",
|
||||
help=("The file to copy to/from remote host. To"
|
||||
" support spaces in the file, each file must be"
|
||||
" specified using this option more than once."))
|
||||
|
||||
copy_options.add_option("--remoteDir",
|
||||
dest="remote_dir",
|
||||
default=None,
|
||||
help="Remote directory to copy to, only applies when"
|
||||
" operation is 'copy_to'. Defaults to the login"
|
||||
" directory on the remote host.")
|
||||
copy_options.add_option("--remoteDir", dest="remote_dir", default=None,
|
||||
help=("Remote directory to copy to, only applies when"
|
||||
" operation is 'copy_to'. Defaults to the login"
|
||||
" directory on the remote host."))
|
||||
|
||||
copy_options.add_option("--localDir",
|
||||
dest="local_dir",
|
||||
default=".",
|
||||
help="Local directory to copy to, only applies when"
|
||||
" operation is 'copy_from'. Defaults to the"
|
||||
" current directory, '%default'.")
|
||||
copy_options.add_option("--localDir", dest="local_dir", default=".",
|
||||
help=("Local directory to copy to, only applies when"
|
||||
" operation is 'copy_from'. Defaults to the"
|
||||
" current directory, '%default'."))
|
||||
|
||||
parser.add_option_group(control_options)
|
||||
parser.add_option_group(shell_options)
|
||||
|
|
@ -367,15 +303,14 @@ def main():
|
|||
if options.operation == "shell":
|
||||
if not getattr(options, "remote_commands", None):
|
||||
parser.print_help()
|
||||
parser.error("Missing required '{}' option '{}'".format(
|
||||
options.operation, "--commands"))
|
||||
parser.error("Missing required '{}' option '{}'".format(options.operation,
|
||||
"--commands"))
|
||||
operation_param = ";".join(options.remote_commands)
|
||||
operation_dir = options.command_dir
|
||||
else:
|
||||
if not getattr(options, "files", None):
|
||||
parser.print_help()
|
||||
parser.error("Missing required '{}' option '{}'".format(
|
||||
options.operation, "--file"))
|
||||
parser.error("Missing required '{}' option '{}'".format(options.operation, "--file"))
|
||||
operation_param = options.files
|
||||
if options.operation == "copy_to":
|
||||
operation_dir = options.remote_dir
|
||||
|
|
@ -398,13 +333,9 @@ def main():
|
|||
scp_options = " ".join(options.scp_options)
|
||||
|
||||
remote_op = RemoteOperations(
|
||||
user_host=options.user_host,
|
||||
ssh_connection_options=ssh_connection_options,
|
||||
ssh_options=ssh_options,
|
||||
scp_options=scp_options,
|
||||
retries=options.retries,
|
||||
retry_sleep=options.retry_sleep,
|
||||
debug=options.debug)
|
||||
user_host=options.user_host, ssh_connection_options=ssh_connection_options,
|
||||
ssh_options=ssh_options, scp_options=scp_options, retries=options.retries,
|
||||
retry_sleep=options.retry_sleep, debug=options.debug)
|
||||
ret_code, buffer = remote_op.operation(options.operation, operation_param, operation_dir)
|
||||
if options.verbose:
|
||||
print("Return code: {} for command {}".format(ret_code, sys.argv))
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ pyjwt == 1.5.3
|
|||
pyyaml == 3.11
|
||||
unittest-xml-reporting == 2.1.0
|
||||
# Linters
|
||||
yapf == 0.16.0
|
||||
yapf == 0.21.0
|
||||
mypy == 0.501 ; python_version > "3"
|
||||
# typing in Python 2 for mypy
|
||||
typing == 3.6.1; python_version < "3"
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
"""
|
||||
Command line utility for executing MongoDB tests of all kinds.
|
||||
"""
|
||||
|
|
@ -70,8 +69,7 @@ def _execute_suite(suite):
|
|||
archive = resmokelib.utils.archival.Archival(
|
||||
archival_json_file=resmokelib.config.ARCHIVE_FILE,
|
||||
limit_size_mb=resmokelib.config.ARCHIVE_LIMIT_MB,
|
||||
limit_files=resmokelib.config.ARCHIVE_LIMIT_TESTS,
|
||||
logger=logger)
|
||||
limit_files=resmokelib.config.ARCHIVE_LIMIT_TESTS, logger=logger)
|
||||
|
||||
executor_config = suite.get_executor_config()
|
||||
executor = resmokelib.testing.executor.TestSuiteExecutor(
|
||||
|
|
@ -88,8 +86,8 @@ def _execute_suite(suite):
|
|||
suite.return_code = 74 # Exit code for IOError on POSIX systems.
|
||||
return True
|
||||
except:
|
||||
logger.exception("Encountered an error when running %ss of suite %s.",
|
||||
suite.test_kind, suite.get_display_name())
|
||||
logger.exception("Encountered an error when running %ss of suite %s.", suite.test_kind,
|
||||
suite.get_display_name())
|
||||
suite.return_code = 2
|
||||
return False
|
||||
finally:
|
||||
|
|
@ -169,8 +167,7 @@ class Main(object):
|
|||
"""
|
||||
|
||||
return resmokelib.suitesconfig.get_suites(
|
||||
suite_files=self.__values.suite_files.split(","),
|
||||
test_files=self.__args)
|
||||
suite_files=self.__values.suite_files.split(","), test_files=self.__args)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
|
|
@ -209,8 +206,8 @@ class Main(object):
|
|||
suites_by_test = find_suites_by_test(suites)
|
||||
for test in sorted(suites_by_test):
|
||||
suite_names = suites_by_test[test]
|
||||
resmoke_logger.info("%s will be run by the following suite(s): %s",
|
||||
test, suite_names)
|
||||
resmoke_logger.info("%s will be run by the following suite(s): %s", test,
|
||||
suite_names)
|
||||
sys.exit(0)
|
||||
|
||||
try:
|
||||
|
|
@ -222,8 +219,8 @@ class Main(object):
|
|||
suite.record_suite_end()
|
||||
|
||||
resmoke_logger.info("=" * 80)
|
||||
resmoke_logger.info("Summary of %s suite: %s",
|
||||
suite.get_display_name(), _summarize_suite(suite))
|
||||
resmoke_logger.info("Summary of %s suite: %s", suite.get_display_name(),
|
||||
_summarize_suite(suite))
|
||||
|
||||
if interrupted or (suite.options.fail_fast and suite.return_code != 0):
|
||||
time_taken = time.time() - self.__start_time
|
||||
|
|
|
|||
|
|
@ -33,4 +33,5 @@ def _get_named_loggers():
|
|||
|
||||
return named_loggers
|
||||
|
||||
|
||||
NAMED_LOGGERS = _get_named_loggers()
|
||||
|
|
|
|||
|
|
@ -33,4 +33,5 @@ def _get_named_suites():
|
|||
|
||||
return named_suites
|
||||
|
||||
|
||||
NAMED_SUITES = _get_named_suites()
|
||||
|
|
|
|||
|
|
@ -1,3 +1,5 @@
|
|||
"""Empty."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
|
||||
from . import config
|
||||
|
|
|
|||
|
|
@ -10,7 +10,6 @@ import itertools
|
|||
import os.path
|
||||
import time
|
||||
|
||||
|
||||
# Subdirectory under the dbpath prefix that contains directories with data files of mongod's started
|
||||
# by resmoke.py.
|
||||
FIXTURE_SUBDIR = "resmoke"
|
||||
|
|
@ -166,14 +165,15 @@ class SuiteOptions(_SuiteOptions):
|
|||
|
||||
description = None
|
||||
include_tags = None
|
||||
parent = dict(zip(SuiteOptions._fields, [
|
||||
description,
|
||||
FAIL_FAST,
|
||||
include_tags,
|
||||
JOBS,
|
||||
REPEAT,
|
||||
REPORT_FAILURE_STATUS,
|
||||
]))
|
||||
parent = dict(
|
||||
zip(SuiteOptions._fields, [
|
||||
description,
|
||||
FAIL_FAST,
|
||||
include_tags,
|
||||
JOBS,
|
||||
REPEAT,
|
||||
REPORT_FAILURE_STATUS,
|
||||
]))
|
||||
|
||||
options = self._asdict()
|
||||
for field in SuiteOptions._fields:
|
||||
|
|
@ -183,8 +183,8 @@ class SuiteOptions(_SuiteOptions):
|
|||
return SuiteOptions(**options)
|
||||
|
||||
|
||||
SuiteOptions.ALL_INHERITED = SuiteOptions(**dict(zip(SuiteOptions._fields,
|
||||
itertools.repeat(SuiteOptions.INHERIT))))
|
||||
SuiteOptions.ALL_INHERITED = SuiteOptions(**dict(
|
||||
zip(SuiteOptions._fields, itertools.repeat(SuiteOptions.INHERIT))))
|
||||
|
||||
##
|
||||
# Variables that are set by the user at the command line or with --options.
|
||||
|
|
@ -380,7 +380,5 @@ DEFAULT_INTEGRATION_TEST_LIST = "build/integration_tests.txt"
|
|||
|
||||
# External files or executables, used as suite selectors, that are created during the build and
|
||||
# therefore might not be available when creating a test membership map.
|
||||
EXTERNAL_SUITE_SELECTORS = (DEFAULT_BENCHMARK_TEST_LIST,
|
||||
DEFAULT_UNIT_TEST_LIST,
|
||||
DEFAULT_INTEGRATION_TEST_LIST,
|
||||
DEFAULT_DBTEST_EXECUTABLE)
|
||||
EXTERNAL_SUITE_SELECTORS = (DEFAULT_BENCHMARK_TEST_LIST, DEFAULT_UNIT_TEST_LIST,
|
||||
DEFAULT_INTEGRATION_TEST_LIST, DEFAULT_DBTEST_EXECUTABLE)
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ class PortAllocator(object):
|
|||
"""
|
||||
|
||||
# A PortAllocator will not return any port greater than this number.
|
||||
MAX_PORT = 2 ** 16 - 1
|
||||
MAX_PORT = 2**16 - 1
|
||||
|
||||
# Each job gets a contiguous range of _PORTS_PER_JOB ports, with job 0 getting the first block
|
||||
# of ports, job 1 getting the second block, and so on.
|
||||
|
|
@ -83,8 +83,8 @@ class PortAllocator(object):
|
|||
|
||||
if next_port >= start_port + cls._PORTS_PER_FIXTURE:
|
||||
raise errors.PortAllocationError(
|
||||
"Fixture has requested more than the %d ports reserved per fixture"
|
||||
% cls._PORTS_PER_FIXTURE)
|
||||
"Fixture has requested more than the %d ports reserved per fixture" %
|
||||
cls._PORTS_PER_FIXTURE)
|
||||
|
||||
return next_port
|
||||
|
||||
|
|
|
|||
|
|
@ -29,8 +29,7 @@ if os.name == "posix" and sys.version_info[0] == 2:
|
|||
import warnings
|
||||
warnings.warn(("Falling back to using the subprocess module because subprocess32 isn't"
|
||||
" available. When using the subprocess module, a child process may trigger"
|
||||
" an invalid free(). See SERVER-22219 for more details."),
|
||||
RuntimeWarning)
|
||||
" an invalid free(). See SERVER-22219 for more details."), RuntimeWarning)
|
||||
import subprocess
|
||||
else:
|
||||
import subprocess
|
||||
|
|
@ -74,8 +73,7 @@ if sys.platform == "win32":
|
|||
win32job.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE
|
||||
|
||||
# Update the limits of the job object.
|
||||
win32job.SetInformationJobObject(job_object,
|
||||
win32job.JobObjectExtendedLimitInformation,
|
||||
win32job.SetInformationJobObject(job_object, win32job.JobObjectExtendedLimitInformation,
|
||||
job_info)
|
||||
|
||||
return job_object
|
||||
|
|
@ -138,13 +136,9 @@ class Process(object):
|
|||
close_fds = (sys.platform != "win32")
|
||||
|
||||
with _POPEN_LOCK:
|
||||
self._process = subprocess.Popen(self.args,
|
||||
bufsize=buffer_size,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
close_fds=close_fds,
|
||||
env=self.env,
|
||||
creationflags=creation_flags)
|
||||
self._process = subprocess.Popen(self.args, bufsize=buffer_size, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE, close_fds=close_fds,
|
||||
env=self.env, creationflags=creation_flags)
|
||||
self.pid = self._process.pid
|
||||
|
||||
self._stdout_pipe = pipe.LoggerPipe(self.logger, logging.INFO, self._process.stdout)
|
||||
|
|
@ -173,16 +167,15 @@ class Process(object):
|
|||
mongo_signal_handle = None
|
||||
try:
|
||||
mongo_signal_handle = win32event.OpenEvent(
|
||||
win32event.EVENT_MODIFY_STATE, False, "Global\\Mongo_" +
|
||||
str(self._process.pid))
|
||||
win32event.EVENT_MODIFY_STATE, False,
|
||||
"Global\\Mongo_" + str(self._process.pid))
|
||||
|
||||
if not mongo_signal_handle:
|
||||
# The process has already died.
|
||||
return
|
||||
win32event.SetEvent(mongo_signal_handle)
|
||||
# Wait 60 seconds for the program to exit.
|
||||
status = win32event.WaitForSingleObject(
|
||||
self._process._handle, 60 * 1000)
|
||||
status = win32event.WaitForSingleObject(self._process._handle, 60 * 1000)
|
||||
if status == win32event.WAIT_OBJECT_0:
|
||||
return
|
||||
except win32process.error as err:
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ import requests
|
|||
from . import handlers
|
||||
from .. import config as _config
|
||||
|
||||
|
||||
CREATE_BUILD_ENDPOINT = "/build"
|
||||
APPEND_GLOBAL_LOGS_ENDPOINT = "/build/%(build_id)s"
|
||||
CREATE_TEST_ENDPOINT = "/build/%(build_id)s/test"
|
||||
|
|
@ -94,10 +93,7 @@ class _BaseBuildloggerHandler(handlers.BufferedHandler):
|
|||
handler for the test logs.
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
build_config,
|
||||
endpoint,
|
||||
capacity=_SEND_AFTER_LINES,
|
||||
def __init__(self, build_config, endpoint, capacity=_SEND_AFTER_LINES,
|
||||
interval_secs=_SEND_AFTER_SECS):
|
||||
"""
|
||||
Initializes the buildlogger handler with the build id and
|
||||
|
|
@ -213,8 +209,8 @@ class BuildloggerTestHandler(_BaseBuildloggerHandler):
|
|||
Buildlogger handler for the test logs.
|
||||
"""
|
||||
|
||||
def __init__(self, build_config, build_id, test_id,
|
||||
capacity=_SEND_AFTER_LINES, interval_secs=_SEND_AFTER_SECS):
|
||||
def __init__(self, build_config, build_id, test_id, capacity=_SEND_AFTER_LINES,
|
||||
interval_secs=_SEND_AFTER_SECS):
|
||||
"""Initializes the buildlogger handler with the credentials, build id, and test id."""
|
||||
endpoint = APPEND_TEST_LOGS_ENDPOINT % {
|
||||
"build_id": build_id,
|
||||
|
|
@ -249,8 +245,8 @@ class BuildloggerGlobalHandler(_BaseBuildloggerHandler):
|
|||
Buildlogger handler for the global logs.
|
||||
"""
|
||||
|
||||
def __init__(self, build_config, build_id,
|
||||
capacity=_SEND_AFTER_LINES, interval_secs=_SEND_AFTER_SECS):
|
||||
def __init__(self, build_config, build_id, capacity=_SEND_AFTER_LINES,
|
||||
interval_secs=_SEND_AFTER_SECS):
|
||||
"""Initializes the buildlogger handler with the credentials and build id."""
|
||||
endpoint = APPEND_GLOBAL_LOGS_ENDPOINT % {"build_id": build_id}
|
||||
_BaseBuildloggerHandler.__init__(self, build_config, endpoint, capacity, interval_secs)
|
||||
|
|
@ -289,10 +285,8 @@ class BuildloggerServer(object):
|
|||
builder = "%s_%s" % (self.config["builder"], suffix)
|
||||
build_num = int(self.config["build_num"])
|
||||
|
||||
handler = handlers.HTTPHandler(
|
||||
url_root=_config.BUILDLOGGER_URL,
|
||||
username=username,
|
||||
password=password)
|
||||
handler = handlers.HTTPHandler(url_root=_config.BUILDLOGGER_URL, username=username,
|
||||
password=password)
|
||||
|
||||
response = handler.post(CREATE_BUILD_ENDPOINT, data={
|
||||
"builder": builder,
|
||||
|
|
@ -307,18 +301,18 @@ class BuildloggerServer(object):
|
|||
"""
|
||||
Returns a new test id for sending test logs to.
|
||||
"""
|
||||
handler = handlers.HTTPHandler(
|
||||
url_root=_config.BUILDLOGGER_URL,
|
||||
username=self.config["username"],
|
||||
password=self.config["password"])
|
||||
handler = handlers.HTTPHandler(url_root=_config.BUILDLOGGER_URL,
|
||||
username=self.config["username"],
|
||||
password=self.config["password"])
|
||||
|
||||
endpoint = CREATE_TEST_ENDPOINT % {"build_id": build_id}
|
||||
response = handler.post(endpoint, data={
|
||||
"test_filename": test_filename,
|
||||
"command": test_command,
|
||||
"phase": self.config.get("build_phase", "unknown"),
|
||||
"task_id": _config.EVERGREEN_TASK_ID,
|
||||
})
|
||||
response = handler.post(
|
||||
endpoint, data={
|
||||
"test_filename": test_filename,
|
||||
"command": test_command,
|
||||
"phase": self.config.get("build_phase", "unknown"),
|
||||
"task_id": _config.EVERGREEN_TASK_ID,
|
||||
})
|
||||
|
||||
return response["id"]
|
||||
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ import time
|
|||
|
||||
from ..utils import scheduler
|
||||
|
||||
|
||||
_FLUSH_THREAD_LOCK = threading.Lock()
|
||||
_FLUSH_THREAD = None
|
||||
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ class ISO8601Formatter(logging.Formatter):
|
|||
|
||||
# The offset is positive if the local timezone is behind (east of) UTC, and negative if it
|
||||
# is ahead (west) of UTC.
|
||||
utc_offset_prefix = "-" if utc_offset_secs > 0 else "+"
|
||||
utc_offset_prefix = "-" if utc_offset_secs > 0 else "+"
|
||||
utc_offset_secs = abs(utc_offset_secs)
|
||||
|
||||
utc_offset_mins = (utc_offset_secs / 60) % 60
|
||||
|
|
|
|||
|
|
@ -225,12 +225,8 @@ class HTTPHandler(object):
|
|||
# that defined InsecureRequestWarning.
|
||||
pass
|
||||
|
||||
response = requests.post(url,
|
||||
data=data,
|
||||
headers=headers,
|
||||
timeout=timeout_secs,
|
||||
auth=self.auth_handler,
|
||||
verify=should_validate_certificates)
|
||||
response = requests.post(url, data=data, headers=headers, timeout=timeout_secs,
|
||||
auth=self.auth_handler, verify=should_validate_certificates)
|
||||
|
||||
response.raise_for_status()
|
||||
|
||||
|
|
|
|||
|
|
@ -40,8 +40,8 @@ def configure_loggers(logging_config):
|
|||
fixture_logger = FixtureRootLogger(logging_config, build_logger_server)
|
||||
tests_logger = TestsRootLogger(logging_config, build_logger_server)
|
||||
global EXECUTOR_LOGGER
|
||||
EXECUTOR_LOGGER = ExecutorRootLogger(logging_config, build_logger_server,
|
||||
fixture_logger, tests_logger)
|
||||
EXECUTOR_LOGGER = ExecutorRootLogger(logging_config, build_logger_server, fixture_logger,
|
||||
tests_logger)
|
||||
|
||||
|
||||
class BaseLogger(logging.Logger):
|
||||
|
|
@ -50,6 +50,7 @@ class BaseLogger(logging.Logger):
|
|||
Custom loggers share access to the logging configuration and provide methods
|
||||
to create other loggers.
|
||||
"""
|
||||
|
||||
def __init__(self, name, logging_config=None, build_logger_server=None, parent=None):
|
||||
"""Initialize a BaseLogger.
|
||||
|
||||
|
|
@ -93,6 +94,7 @@ class BaseLogger(logging.Logger):
|
|||
|
||||
class RootLogger(BaseLogger):
|
||||
"""A custom class for top-level loggers (executor, fixture, tests)."""
|
||||
|
||||
def __init__(self, name, logging_config, build_logger_server):
|
||||
"""Initialize a RootLogger.
|
||||
|
||||
|
|
@ -115,8 +117,8 @@ class RootLogger(BaseLogger):
|
|||
def _add_handler(self, handler_info, formatter):
|
||||
handler_class = handler_info["class"]
|
||||
if handler_class == "logging.FileHandler":
|
||||
handler = logging.FileHandler(filename=handler_info["filename"],
|
||||
mode=handler_info.get("mode", "w"))
|
||||
handler = logging.FileHandler(filename=handler_info["filename"], mode=handler_info.get(
|
||||
"mode", "w"))
|
||||
elif handler_class == "logging.NullHandler":
|
||||
handler = logging.NullHandler()
|
||||
elif handler_class == "logging.StreamHandler":
|
||||
|
|
@ -131,6 +133,7 @@ class RootLogger(BaseLogger):
|
|||
|
||||
class ExecutorRootLogger(RootLogger):
|
||||
"""Class for the "executor" top-level logger."""
|
||||
|
||||
def __init__(self, logging_config, build_logger_server, fixture_root_logger, tests_root_logger):
|
||||
"""Initialize an ExecutorRootLogger."""
|
||||
RootLogger.__init__(self, EXECUTOR_LOGGER_NAME, logging_config, build_logger_server)
|
||||
|
|
@ -231,6 +234,7 @@ class TestLogger(BaseLogger):
|
|||
|
||||
class FixtureRootLogger(RootLogger):
|
||||
"""Class for the "fixture" top-level logger."""
|
||||
|
||||
def __init__(self, logging_config, build_logger_server):
|
||||
"""Initialize a FixtureRootLogger.
|
||||
|
||||
|
|
@ -289,12 +293,13 @@ class FixtureNodeLogger(BaseLogger):
|
|||
|
||||
def new_fixture_node_logger(self, node_name):
|
||||
"""Create a new child FixtureNodeLogger."""
|
||||
return FixtureNodeLogger(self.fixture_class, self.job_num,
|
||||
"%s:%s" % (self.node_name, node_name), self)
|
||||
return FixtureNodeLogger(self.fixture_class, self.job_num, "%s:%s" % (self.node_name,
|
||||
node_name), self)
|
||||
|
||||
|
||||
class TestsRootLogger(RootLogger):
|
||||
"""Class for the "tests" top-level logger."""
|
||||
|
||||
def __init__(self, logging_config, build_logger_server):
|
||||
"""Initialize a TestsRootLogger.
|
||||
|
||||
|
|
@ -330,6 +335,7 @@ class HookLogger(BaseLogger):
|
|||
|
||||
# Util methods
|
||||
|
||||
|
||||
def _fallback_buildlogger_handler(include_logger_name=True):
|
||||
"""
|
||||
Returns a handler that writes to stderr.
|
||||
|
|
|
|||
|
|
@ -138,27 +138,26 @@ def parse_command_line():
|
|||
help=("Enables or disables preallocation of journal files for all mongod"
|
||||
" processes. Defaults to %default."))
|
||||
|
||||
parser.add_option("--shellConnString", dest="shell_conn_string",
|
||||
metavar="CONN_STRING",
|
||||
parser.add_option("--shellConnString", dest="shell_conn_string", metavar="CONN_STRING",
|
||||
help="Overrides the default fixture and connect to an existing MongoDB"
|
||||
" cluster instead. This is useful for connecting to a MongoDB"
|
||||
" deployment started outside of resmoke.py including one running in a"
|
||||
" debugger.")
|
||||
" cluster instead. This is useful for connecting to a MongoDB"
|
||||
" deployment started outside of resmoke.py including one running in a"
|
||||
" debugger.")
|
||||
|
||||
parser.add_option("--shellPort", dest="shell_port", metavar="PORT",
|
||||
help="Convenience form of --shellConnString for connecting to an"
|
||||
" existing MongoDB cluster with the URL mongodb://localhost:[PORT]."
|
||||
" This is useful for connecting to a server running in a debugger.")
|
||||
" existing MongoDB cluster with the URL mongodb://localhost:[PORT]."
|
||||
" This is useful for connecting to a server running in a debugger.")
|
||||
|
||||
parser.add_option("--repeat", type="int", dest="repeat", metavar="N",
|
||||
help="Repeats the given suite(s) N times, or until one fails.")
|
||||
|
||||
parser.add_option("--reportFailureStatus", type="choice", action="store",
|
||||
dest="report_failure_status", choices=("fail", "silentfail"),
|
||||
metavar="STATUS",
|
||||
dest="report_failure_status", choices=("fail",
|
||||
"silentfail"), metavar="STATUS",
|
||||
help="Controls if the test failure status should be reported as failed"
|
||||
" or be silently ignored (STATUS=silentfail). Dynamic test failures will"
|
||||
" never be silently ignored. Defaults to STATUS=%default.")
|
||||
" or be silently ignored (STATUS=silentfail). Dynamic test failures will"
|
||||
" never be silently ignored. Defaults to STATUS=%default.")
|
||||
|
||||
parser.add_option("--reportFile", dest="report_file", metavar="REPORT",
|
||||
help="Writes a JSON file with test status and timing information.")
|
||||
|
|
@ -201,7 +200,7 @@ def parse_command_line():
|
|||
|
||||
parser.add_option("--storageEngineCacheSizeGB", dest="storage_engine_cache_size_gb",
|
||||
metavar="CONFIG", help="Sets the storage engine cache size configuration"
|
||||
" setting for all mongod's.")
|
||||
" setting for all mongod's.")
|
||||
|
||||
parser.add_option("--tagFile", dest="tag_file", metavar="OPTIONS",
|
||||
help="A YAML file that associates tests and tags.")
|
||||
|
|
@ -217,11 +216,10 @@ def parse_command_line():
|
|||
|
||||
parser.add_option("--executor", dest="executor_file",
|
||||
help="OBSOLETE: Superceded by --suites; specify --suites=SUITE path/to/test"
|
||||
" to run a particular test under a particular suite configuration.")
|
||||
" to run a particular test under a particular suite configuration.")
|
||||
|
||||
evergreen_options = optparse.OptionGroup(
|
||||
parser,
|
||||
title="Evergreen options",
|
||||
parser, title="Evergreen options",
|
||||
description=("Options used to propagate information about the Evergreen task running this"
|
||||
" script."))
|
||||
parser.add_option_group(evergreen_options)
|
||||
|
|
@ -247,8 +245,7 @@ def parse_command_line():
|
|||
" patch build."))
|
||||
|
||||
evergreen_options.add_option("--projectName", dest="project_name", metavar="PROJECT_NAME",
|
||||
help=("Sets the name of the Evergreen project running the tests."
|
||||
))
|
||||
help=("Sets the name of the Evergreen project running the tests."))
|
||||
|
||||
evergreen_options.add_option("--revisionOrderId", dest="revision_order_id",
|
||||
metavar="REVISION_ORDER_ID",
|
||||
|
|
@ -267,11 +264,8 @@ def parse_command_line():
|
|||
evergreen_options.add_option("--versionId", dest="version_id", metavar="VERSION_ID",
|
||||
help="Sets the version ID of the task.")
|
||||
|
||||
benchmark_options = optparse.OptionGroup(
|
||||
parser,
|
||||
title="Benchmark test options",
|
||||
description="Options for running Benchmark tests"
|
||||
)
|
||||
benchmark_options = optparse.OptionGroup(parser, title="Benchmark test options",
|
||||
description="Options for running Benchmark tests")
|
||||
|
||||
parser.add_option_group(benchmark_options)
|
||||
|
||||
|
|
@ -280,8 +274,7 @@ def parse_command_line():
|
|||
help="Regex to filter benchmark tests to run.")
|
||||
|
||||
benchmark_options.add_option("--benchmarkListTests", dest="benchmark_list_tests",
|
||||
action="store_true",
|
||||
metavar="BENCHMARK_LIST_TESTS",
|
||||
action="store_true", metavar="BENCHMARK_LIST_TESTS",
|
||||
help="Lists all benchmark test configurations in each test file.")
|
||||
|
||||
benchmark_min_time_help = (
|
||||
|
|
@ -297,16 +290,10 @@ def parse_command_line():
|
|||
"runs; use --benchmarkMinTimeSecs if you'd like to run a test for a longer or shorter "
|
||||
"duration.")
|
||||
benchmark_options.add_option("--benchmarkRepetitions", type="int", dest="benchmark_repetitions",
|
||||
metavar="BENCHMARK_REPETITIONS",
|
||||
help=benchmark_repetitions_help)
|
||||
metavar="BENCHMARK_REPETITIONS", help=benchmark_repetitions_help)
|
||||
|
||||
parser.set_defaults(logger_file="console",
|
||||
dry_run="off",
|
||||
find_suites=False,
|
||||
list_suites=False,
|
||||
suite_files="with_server",
|
||||
prealloc_journal="off",
|
||||
shuffle="auto",
|
||||
parser.set_defaults(logger_file="console", dry_run="off", find_suites=False, list_suites=False,
|
||||
suite_files="with_server", prealloc_journal="off", shuffle="auto",
|
||||
stagger_jobs="off")
|
||||
|
||||
options, args = parser.parse_args()
|
||||
|
|
@ -326,8 +313,8 @@ def validate_options(parser, options, args):
|
|||
|
||||
if options.executor_file:
|
||||
parser.error("--executor is superseded by --suites; specify --suites={} {} to run the"
|
||||
" test(s) under those suite configuration(s)"
|
||||
.format(options.executor_file, " ".join(args)))
|
||||
" test(s) under those suite configuration(s)".format(
|
||||
options.executor_file, " ".join(args)))
|
||||
|
||||
|
||||
def validate_benchmark_options():
|
||||
|
|
@ -347,9 +334,7 @@ def validate_benchmark_options():
|
|||
raise optparse.OptionValueError(
|
||||
"--jobs=%d cannot be used for benchmark tests. Parallel jobs affect CPU cache access "
|
||||
"patterns and cause additional context switching, which lead to inaccurate benchmark "
|
||||
"results. Please use --jobs=1"
|
||||
% _config.JOBS
|
||||
)
|
||||
"results. Please use --jobs=1" % _config.JOBS)
|
||||
|
||||
|
||||
def get_logging_config(values):
|
||||
|
|
|
|||
|
|
@ -21,7 +21,6 @@ from . import utils
|
|||
from .utils import globstar
|
||||
from .utils import jscomment
|
||||
|
||||
|
||||
########################
|
||||
# Test file explorer #
|
||||
########################
|
||||
|
|
@ -32,6 +31,7 @@ class TestFileExplorer(object):
|
|||
|
||||
The file related code has been confined to this class for testability.
|
||||
"""
|
||||
|
||||
def is_glob_pattern(self, path):
|
||||
"""Indicates if the provided path is a glob pattern.
|
||||
|
||||
|
|
@ -139,6 +139,7 @@ class _TestList(object):
|
|||
glob expansion of paths and check if they are existing files. If not, calling
|
||||
'include_files()' or 'exclude_files()' will raise an TypeError.
|
||||
"""
|
||||
|
||||
def __init__(self, test_file_explorer, roots, tests_are_files=True):
|
||||
"""Initializes the _TestList with a TestFileExplorer component and a list of root tests."""
|
||||
self._test_file_explorer = test_file_explorer
|
||||
|
|
@ -208,13 +209,13 @@ class _TestList(object):
|
|||
get_tags: a callable object that takes a test and returns the corresponding list of
|
||||
tags.
|
||||
"""
|
||||
self._filtered = {test for test in self._filtered
|
||||
if tag_expression(get_tags(test))}
|
||||
self._filtered = {test for test in self._filtered if tag_expression(get_tags(test))}
|
||||
|
||||
def include_any_pattern(self, patterns):
|
||||
"""
|
||||
Filters the test list to only include tests that match any of the given glob patterns.
|
||||
"""
|
||||
|
||||
def match(test):
|
||||
for pattern in patterns:
|
||||
if test == pattern or fnmatch.fnmatchcase(test, pattern):
|
||||
|
|
@ -244,6 +245,7 @@ class _TestList(object):
|
|||
# Tag matching expressions #
|
||||
##############################
|
||||
|
||||
|
||||
class _AllOfExpression(object):
|
||||
"""A tag matching expression that requires all child expressions to match."""
|
||||
|
||||
|
|
@ -266,6 +268,7 @@ class _AnyOfExpression(object):
|
|||
|
||||
class _NotExpression(object):
|
||||
"""A tag matching expression that matches if and only if the child expression does not match."""
|
||||
|
||||
def __init__(self, child):
|
||||
self.__child = child
|
||||
|
||||
|
|
@ -275,6 +278,7 @@ class _NotExpression(object):
|
|||
|
||||
class _MatchExpression(object):
|
||||
"""A tag matching expression that matches when a specific tag is present."""
|
||||
|
||||
def __init__(self, tag):
|
||||
self.__tag = tag
|
||||
|
||||
|
|
@ -320,10 +324,10 @@ def _make_expression_list(configs):
|
|||
|
||||
class _SelectorConfig(object):
|
||||
"""Base object to represent the configuration for test selection."""
|
||||
def __init__(self, root=None, roots=None,
|
||||
include_files=None, exclude_files=None,
|
||||
include_tags=None, exclude_tags=None,
|
||||
include_with_any_tags=None, exclude_with_any_tags=None):
|
||||
|
||||
def __init__(self, root=None, roots=None, include_files=None, exclude_files=None,
|
||||
include_tags=None, exclude_tags=None, include_with_any_tags=None,
|
||||
exclude_with_any_tags=None):
|
||||
"""
|
||||
Initializes the _SelectorConfig from the configuration elements.
|
||||
|
||||
|
|
@ -353,10 +357,8 @@ class _SelectorConfig(object):
|
|||
exclude_with_any_tags = self.__merge_lists(exclude_with_any_tags,
|
||||
config.EXCLUDE_WITH_ANY_TAGS)
|
||||
|
||||
self.tags_expression = self.__make_tags_expression(include_tags,
|
||||
exclude_tags,
|
||||
include_with_any_tags,
|
||||
exclude_with_any_tags)
|
||||
self.tags_expression = self.__make_tags_expression(
|
||||
include_tags, exclude_tags, include_with_any_tags, exclude_with_any_tags)
|
||||
|
||||
@staticmethod
|
||||
def __merge_lists(list_a, list_b):
|
||||
|
|
@ -371,20 +373,18 @@ class _SelectorConfig(object):
|
|||
return None
|
||||
|
||||
@staticmethod
|
||||
def __make_tags_expression(include_tags, exclude_tags,
|
||||
include_with_any_tags, exclude_with_any_tags):
|
||||
def __make_tags_expression(include_tags, exclude_tags, include_with_any_tags,
|
||||
exclude_with_any_tags):
|
||||
expressions = []
|
||||
if include_tags:
|
||||
expressions.append(make_expression(include_tags))
|
||||
elif exclude_tags:
|
||||
expressions.append(_NotExpression(make_expression(exclude_tags)))
|
||||
if include_with_any_tags:
|
||||
include_with_any_expr = make_expression(
|
||||
{"$anyOf": include_with_any_tags})
|
||||
include_with_any_expr = make_expression({"$anyOf": include_with_any_tags})
|
||||
expressions.append(include_with_any_expr)
|
||||
if exclude_with_any_tags:
|
||||
exclude_with_any_expr = make_expression(
|
||||
{"$not": {"$anyOf": exclude_with_any_tags}})
|
||||
exclude_with_any_expr = make_expression({"$not": {"$anyOf": exclude_with_any_tags}})
|
||||
expressions.append(exclude_with_any_expr)
|
||||
|
||||
if expressions:
|
||||
|
|
@ -395,6 +395,7 @@ class _SelectorConfig(object):
|
|||
|
||||
class _Selector(object):
|
||||
"""Selection algorithm to select tests matching a selector configuration."""
|
||||
|
||||
def __init__(self, test_file_explorer, tests_are_files=True):
|
||||
"""
|
||||
Initializes the _Selector.
|
||||
|
|
@ -440,12 +441,12 @@ class _Selector(object):
|
|||
|
||||
class _JSTestSelectorConfig(_SelectorConfig):
|
||||
"""_SelectorConfig subclass for js_test tests."""
|
||||
def __init__(self, roots=None,
|
||||
include_files=None, exclude_files=None,
|
||||
include_with_any_tags=None, exclude_with_any_tags=None,
|
||||
include_tags=None, exclude_tags=None):
|
||||
_SelectorConfig.__init__(self, roots=roots,
|
||||
include_files=include_files, exclude_files=exclude_files,
|
||||
|
||||
def __init__(self, roots=None, include_files=None, exclude_files=None,
|
||||
include_with_any_tags=None, exclude_with_any_tags=None, include_tags=None,
|
||||
exclude_tags=None):
|
||||
_SelectorConfig.__init__(self, roots=roots, include_files=include_files,
|
||||
exclude_files=exclude_files,
|
||||
include_with_any_tags=include_with_any_tags,
|
||||
exclude_with_any_tags=exclude_with_any_tags,
|
||||
include_tags=include_tags, exclude_tags=exclude_tags)
|
||||
|
|
@ -453,6 +454,7 @@ class _JSTestSelectorConfig(_SelectorConfig):
|
|||
|
||||
class _JSTestSelector(_Selector):
|
||||
"""_Selector subclass for js_test tests."""
|
||||
|
||||
def __init__(self, test_file_explorer):
|
||||
_Selector.__init__(self, test_file_explorer)
|
||||
self._tags = self._test_file_explorer.parse_tag_file("js_test")
|
||||
|
|
@ -466,20 +468,22 @@ class _JSTestSelector(_Selector):
|
|||
|
||||
class _CppTestSelectorConfig(_SelectorConfig):
|
||||
"""_SelectorConfig subclass for cpp_integration_test and cpp_unit_test tests."""
|
||||
def __init__(self, root=config.DEFAULT_INTEGRATION_TEST_LIST, roots=None,
|
||||
include_files=None, exclude_files=None):
|
||||
|
||||
def __init__(self, root=config.DEFAULT_INTEGRATION_TEST_LIST, roots=None, include_files=None,
|
||||
exclude_files=None):
|
||||
if roots:
|
||||
# The 'roots' argument is only present when tests are specified on the command line
|
||||
# and in that case they take precedence over the tests in the root file.
|
||||
_SelectorConfig.__init__(self, roots=roots,
|
||||
include_files=include_files, exclude_files=exclude_files)
|
||||
_SelectorConfig.__init__(self, roots=roots, include_files=include_files,
|
||||
exclude_files=exclude_files)
|
||||
else:
|
||||
_SelectorConfig.__init__(self, root=root,
|
||||
include_files=include_files, exclude_files=exclude_files)
|
||||
_SelectorConfig.__init__(self, root=root, include_files=include_files,
|
||||
exclude_files=exclude_files)
|
||||
|
||||
|
||||
class _CppTestSelector(_Selector):
|
||||
"""_Selector subclass for cpp_integration_test and cpp_unit_test tests."""
|
||||
|
||||
def __init__(self, test_file_explorer):
|
||||
_Selector.__init__(self, test_file_explorer)
|
||||
|
||||
|
|
@ -494,6 +498,7 @@ class _CppTestSelector(_Selector):
|
|||
|
||||
class _DbTestSelectorConfig(_SelectorConfig):
|
||||
"""_Selector config subclass for db_test tests."""
|
||||
|
||||
def __init__(self, binary=None, roots=None, include_suites=None):
|
||||
_SelectorConfig.__init__(self, roots=roots)
|
||||
self.include_suites = utils.default_if_none(include_suites, [])
|
||||
|
|
@ -510,6 +515,7 @@ class _DbTestSelectorConfig(_SelectorConfig):
|
|||
|
||||
class _DbTestSelector(_Selector):
|
||||
"""_Selector subclass for db_test tests."""
|
||||
|
||||
def __init__(self, test_file_explorer):
|
||||
_Selector.__init__(self, test_file_explorer, tests_are_files=False)
|
||||
|
||||
|
|
@ -542,19 +548,22 @@ class _DbTestSelector(_Selector):
|
|||
|
||||
class _JsonSchemaTestSelectorConfig(_SelectorConfig):
|
||||
"""_SelectorConfig subclass for json_schema_test tests."""
|
||||
|
||||
def __init__(self, roots, include_files=None, exclude_files=None):
|
||||
_SelectorConfig.__init__(self, roots=roots,
|
||||
include_files=include_files, exclude_files=exclude_files)
|
||||
_SelectorConfig.__init__(self, roots=roots, include_files=include_files,
|
||||
exclude_files=exclude_files)
|
||||
|
||||
|
||||
class _SleepTestCaseSelectorConfig(_SelectorConfig):
|
||||
"""_SelectorConfig subclass for sleep_test tests."""
|
||||
|
||||
def __init__(self, roots):
|
||||
_SelectorConfig.__init__(self, roots=roots)
|
||||
|
||||
|
||||
class _SleepTestCaseSelector(_Selector):
|
||||
"""_Selector subclass for sleep_test tests."""
|
||||
|
||||
def __init__(self, test_file_explorer):
|
||||
_Selector.__init__(self, test_file_explorer, tests_are_files=False)
|
||||
|
||||
|
|
@ -565,7 +574,6 @@ class _SleepTestCaseSelector(_Selector):
|
|||
|
||||
_DEFAULT_TEST_FILE_EXPLORER = TestFileExplorer()
|
||||
|
||||
|
||||
_SELECTOR_REGISTRY = {
|
||||
"cpp_integration_test": (_CppTestSelectorConfig, _CppTestSelector),
|
||||
"cpp_unit_test": (_CppTestSelectorConfig, _CppTestSelector),
|
||||
|
|
|
|||
|
|
@ -66,7 +66,6 @@ def register(logger, suites, start_time):
|
|||
|
||||
testing.suite.Suite.log_summaries(logger, suites, time.time() - start_time)
|
||||
|
||||
|
||||
# On Windows spawn a thread to wait on an event object for signal to dump stacks. For Cygwin
|
||||
# platforms, we use a signal handler since it supports POSIX signals.
|
||||
if _is_windows:
|
||||
|
|
@ -77,10 +76,8 @@ def register(logger, suites, start_time):
|
|||
security_attributes = None
|
||||
manual_reset = False
|
||||
initial_state = False
|
||||
task_timeout_handle = win32event.CreateEvent(security_attributes,
|
||||
manual_reset,
|
||||
initial_state,
|
||||
event_name)
|
||||
task_timeout_handle = win32event.CreateEvent(security_attributes, manual_reset,
|
||||
initial_state, event_name)
|
||||
except win32event.error as err:
|
||||
logger.error("Exception from win32event.CreateEvent with error: %s" % err)
|
||||
return
|
||||
|
|
|
|||
|
|
@ -71,9 +71,9 @@ def get_suites(suite_files, test_files):
|
|||
if test_files:
|
||||
# Do not change the execution order of the tests passed as args, unless a tag option is
|
||||
# specified. If an option is specified, then sort the tests for consistent execution order.
|
||||
_config.ORDER_TESTS_BY_NAME = any(tag_filter is not None for
|
||||
tag_filter in (_config.EXCLUDE_WITH_ANY_TAGS,
|
||||
_config.INCLUDE_WITH_ANY_TAGS))
|
||||
_config.ORDER_TESTS_BY_NAME = any(
|
||||
tag_filter is not None
|
||||
for tag_filter in (_config.EXCLUDE_WITH_ANY_TAGS, _config.INCLUDE_WITH_ANY_TAGS))
|
||||
# Build configuration for list of files to run.
|
||||
suite_roots = _make_suite_roots(test_files)
|
||||
|
||||
|
|
@ -109,6 +109,6 @@ def _get_yaml_config(kind, pathname):
|
|||
pathname = resmokeconfig.NAMED_SUITES[pathname] # Expand 'pathname' to full path.
|
||||
|
||||
if not utils.is_yaml_file(pathname) or not os.path.isfile(pathname):
|
||||
raise optparse.OptionValueError("Expected a %s YAML config, but got '%s'"
|
||||
% (kind, pathname))
|
||||
raise optparse.OptionValueError("Expected a %s YAML config, but got '%s'" % (kind,
|
||||
pathname))
|
||||
return utils.load_yaml_file(pathname)
|
||||
|
|
|
|||
|
|
@ -30,14 +30,8 @@ class TestSuiteExecutor(object):
|
|||
|
||||
_TIMEOUT = 24 * 60 * 60 # =1 day (a long time to have tests run)
|
||||
|
||||
def __init__(self,
|
||||
exec_logger,
|
||||
suite,
|
||||
config=None,
|
||||
fixture=None,
|
||||
hooks=None,
|
||||
archive_instance=None,
|
||||
archive=None):
|
||||
def __init__(self, exec_logger, suite, config=None, fixture=None, hooks=None,
|
||||
archive_instance=None, archive=None):
|
||||
"""
|
||||
Initializes the TestSuiteExecutor with the test suite to run.
|
||||
"""
|
||||
|
|
@ -55,8 +49,8 @@ class TestSuiteExecutor(object):
|
|||
|
||||
self.archival = None
|
||||
if archive_instance:
|
||||
self.archival = archival.HookTestArchival(
|
||||
suite, self.hooks_config, archive_instance, archive)
|
||||
self.archival = archival.HookTestArchival(suite, self.hooks_config, archive_instance,
|
||||
archive)
|
||||
|
||||
self._suite = suite
|
||||
|
||||
|
|
@ -147,8 +141,7 @@ class TestSuiteExecutor(object):
|
|||
try:
|
||||
job.fixture.setup()
|
||||
except:
|
||||
self.logger.exception(
|
||||
"Encountered an error while setting up %s.", job.fixture)
|
||||
self.logger.exception("Encountered an error while setting up %s.", job.fixture)
|
||||
return False
|
||||
|
||||
# Once they have all been started, wait for them to become available.
|
||||
|
|
@ -156,8 +149,8 @@ class TestSuiteExecutor(object):
|
|||
try:
|
||||
job.fixture.await_ready()
|
||||
except:
|
||||
self.logger.exception(
|
||||
"Encountered an error while waiting for %s to be ready", job.fixture)
|
||||
self.logger.exception("Encountered an error while waiting for %s to be ready",
|
||||
job.fixture)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
|
@ -177,8 +170,7 @@ class TestSuiteExecutor(object):
|
|||
try:
|
||||
# Run each Job instance in its own thread.
|
||||
for job in self._jobs:
|
||||
t = threading.Thread(target=job,
|
||||
args=(test_queue, interrupt_flag),
|
||||
t = threading.Thread(target=job, args=(test_queue, interrupt_flag),
|
||||
kwargs=dict(teardown_flag=teardown_flag))
|
||||
# Do not wait for tests to finish executing if interrupted by the user.
|
||||
t.daemon = True
|
||||
|
|
@ -258,10 +250,7 @@ class TestSuiteExecutor(object):
|
|||
hook_class = hook_config.pop("class")
|
||||
|
||||
hook_logger = self.logger.new_hook_logger(hook_class, fixture.logger)
|
||||
hook = _hooks.make_hook(hook_class,
|
||||
hook_logger,
|
||||
fixture,
|
||||
**hook_config)
|
||||
hook = _hooks.make_hook(hook_class, hook_logger, fixture, **hook_config)
|
||||
hooks.append(hook)
|
||||
|
||||
return hooks
|
||||
|
|
@ -278,12 +267,7 @@ class TestSuiteExecutor(object):
|
|||
|
||||
report = _report.TestReport(job_logger, self._suite.options)
|
||||
|
||||
return _job.Job(job_logger,
|
||||
fixture,
|
||||
hooks,
|
||||
report,
|
||||
self.archival,
|
||||
self._suite.options)
|
||||
return _job.Job(job_logger, fixture, hooks, report, self.archival, self._suite.options)
|
||||
|
||||
def _make_test_queue(self):
|
||||
"""
|
||||
|
|
@ -297,10 +281,8 @@ class TestSuiteExecutor(object):
|
|||
# Put all the test cases in a queue.
|
||||
queue = _queue.Queue()
|
||||
for test_name in self._suite.tests:
|
||||
test_case = testcases.make_test_case(self._suite.test_kind,
|
||||
test_queue_logger,
|
||||
test_name,
|
||||
**self.test_config)
|
||||
test_case = testcases.make_test_case(self._suite.test_kind, test_queue_logger,
|
||||
test_name, **self.test_config)
|
||||
queue.put(test_case)
|
||||
|
||||
# Add sentinel value for each job to indicate when there are no more items to process.
|
||||
|
|
|
|||
|
|
@ -8,10 +8,8 @@ from .interface import NoOpFixture as _NoOpFixture
|
|||
from .interface import make_fixture
|
||||
from ...utils import autoloader as _autoloader
|
||||
|
||||
|
||||
NOOP_FIXTURE_CLASS = _NoOpFixture.REGISTERED_NAME
|
||||
|
||||
|
||||
# We dynamically load all modules in the fixtures/ package so that any Fixture classes declared
|
||||
# within them are automatically registered.
|
||||
_autoloader.load_all_modules(name=__name__, path=__path__)
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@ from ... import logging
|
|||
from ... import utils
|
||||
from ...utils import registry
|
||||
|
||||
|
||||
_FIXTURES = {}
|
||||
|
||||
|
||||
|
|
@ -145,8 +144,7 @@ class Fixture(object):
|
|||
kwargs["connect"] = True
|
||||
|
||||
return pymongo.MongoClient(host=self.get_driver_connection_url(),
|
||||
read_preference=read_preference,
|
||||
**kwargs)
|
||||
read_preference=read_preference, **kwargs)
|
||||
|
||||
def __str__(self):
|
||||
return "%s (Job #%d)" % (self.__class__.__name__, self.job_num)
|
||||
|
|
|
|||
|
|
@ -25,21 +25,11 @@ class ReplicaSetFixture(interface.ReplFixture):
|
|||
# Error response codes copied from mongo/base/error_codes.err.
|
||||
_NODE_NOT_FOUND = 74
|
||||
|
||||
def __init__(self,
|
||||
logger,
|
||||
job_num,
|
||||
mongod_executable=None,
|
||||
mongod_options=None,
|
||||
dbpath_prefix=None,
|
||||
preserve_dbpath=False,
|
||||
num_nodes=2,
|
||||
start_initial_sync_node=False,
|
||||
write_concern_majority_journal_default=None,
|
||||
auth_options=None,
|
||||
replset_config_options=None,
|
||||
voting_secondaries=None,
|
||||
all_nodes_electable=False,
|
||||
use_replica_set_connection_string=None):
|
||||
def __init__(self, logger, job_num, mongod_executable=None, mongod_options=None,
|
||||
dbpath_prefix=None, preserve_dbpath=False, num_nodes=2,
|
||||
start_initial_sync_node=False, write_concern_majority_journal_default=None,
|
||||
auth_options=None, replset_config_options=None, voting_secondaries=None,
|
||||
all_nodes_electable=False, use_replica_set_connection_string=None):
|
||||
|
||||
interface.ReplFixture.__init__(self, logger, job_num, dbpath_prefix=dbpath_prefix)
|
||||
|
||||
|
|
@ -117,11 +107,11 @@ class ReplicaSetFixture(interface.ReplFixture):
|
|||
member_info["votes"] = 0
|
||||
members.append(member_info)
|
||||
if self.initial_sync_node:
|
||||
members.append({"_id": self.initial_sync_node_idx,
|
||||
"host": self.initial_sync_node.get_internal_connection_string(),
|
||||
"priority": 0,
|
||||
"hidden": 1,
|
||||
"votes": 0})
|
||||
members.append({
|
||||
"_id": self.initial_sync_node_idx,
|
||||
"host": self.initial_sync_node.get_internal_connection_string(), "priority": 0,
|
||||
"hidden": 1, "votes": 0
|
||||
})
|
||||
|
||||
config = {"_id": self.replset_name}
|
||||
client = self.nodes[0].mongo_client()
|
||||
|
|
@ -137,13 +127,13 @@ class ReplicaSetFixture(interface.ReplFixture):
|
|||
return
|
||||
|
||||
if self.write_concern_majority_journal_default is not None:
|
||||
config["writeConcernMajorityJournalDefault"] = self.write_concern_majority_journal_default
|
||||
config[
|
||||
"writeConcernMajorityJournalDefault"] = self.write_concern_majority_journal_default
|
||||
else:
|
||||
server_status = client.admin.command({"serverStatus": 1})
|
||||
cmd_line_opts = client.admin.command({"getCmdLineOpts": 1})
|
||||
if not (server_status["storageEngine"]["persistent"] and
|
||||
cmd_line_opts["parsed"].get("storage", {}).get(
|
||||
"journal", {}).get("enabled", True)):
|
||||
if not (server_status["storageEngine"]["persistent"] and cmd_line_opts["parsed"].get(
|
||||
"storage", {}).get("journal", {}).get("enabled", True)):
|
||||
config["writeConcernMajorityJournalDefault"] = False
|
||||
|
||||
if self.replset_config_options.get("configsvr", False):
|
||||
|
|
@ -326,11 +316,9 @@ class ReplicaSetFixture(interface.ReplFixture):
|
|||
mongod_options["replSet"] = replset_name
|
||||
mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "node{}".format(index))
|
||||
|
||||
return standalone.MongoDFixture(mongod_logger,
|
||||
self.job_num,
|
||||
mongod_executable=self.mongod_executable,
|
||||
mongod_options=mongod_options,
|
||||
preserve_dbpath=self.preserve_dbpath)
|
||||
return standalone.MongoDFixture(
|
||||
mongod_logger, self.job_num, mongod_executable=self.mongod_executable,
|
||||
mongod_options=mongod_options, preserve_dbpath=self.preserve_dbpath)
|
||||
|
||||
def _get_logger_for_mongod(self, index):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -29,23 +29,11 @@ class ShardedClusterFixture(interface.Fixture):
|
|||
_CONFIGSVR_REPLSET_NAME = "config-rs"
|
||||
_SHARD_REPLSET_NAME_PREFIX = "shard-rs"
|
||||
|
||||
def __init__(self,
|
||||
logger,
|
||||
job_num,
|
||||
mongos_executable=None,
|
||||
mongos_options=None,
|
||||
mongod_executable=None,
|
||||
mongod_options=None,
|
||||
dbpath_prefix=None,
|
||||
preserve_dbpath=False,
|
||||
num_shards=1,
|
||||
num_rs_nodes_per_shard=None,
|
||||
separate_configsvr=True,
|
||||
enable_sharding=None,
|
||||
enable_balancer=True,
|
||||
auth_options=None,
|
||||
configsvr_options=None,
|
||||
shard_options=None):
|
||||
def __init__(self, logger, job_num, mongos_executable=None, mongos_options=None,
|
||||
mongod_executable=None, mongod_options=None, dbpath_prefix=None,
|
||||
preserve_dbpath=False, num_shards=1, num_rs_nodes_per_shard=None,
|
||||
separate_configsvr=True, enable_sharding=None, enable_balancer=True,
|
||||
auth_options=None, configsvr_options=None, shard_options=None):
|
||||
"""
|
||||
Initializes ShardedClusterFixture with the different options to
|
||||
the mongod and mongos processes.
|
||||
|
|
@ -174,9 +162,9 @@ class ShardedClusterFixture(interface.Fixture):
|
|||
Returns true if the config server, all shards, and the mongos
|
||||
are all still operating, and false otherwise.
|
||||
"""
|
||||
return (self.configsvr is not None and self.configsvr.is_running() and
|
||||
all(shard.is_running() for shard in self.shards) and
|
||||
self.mongos is not None and self.mongos.is_running())
|
||||
return (self.configsvr is not None and self.configsvr.is_running()
|
||||
and all(shard.is_running() for shard in self.shards) and self.mongos is not None
|
||||
and self.mongos.is_running())
|
||||
|
||||
def get_internal_connection_string(self):
|
||||
if self.mongos is None:
|
||||
|
|
@ -212,15 +200,11 @@ class ShardedClusterFixture(interface.Fixture):
|
|||
mongod_options["replSet"] = ShardedClusterFixture._CONFIGSVR_REPLSET_NAME
|
||||
mongod_options["storageEngine"] = "wiredTiger"
|
||||
|
||||
return replicaset.ReplicaSetFixture(mongod_logger,
|
||||
self.job_num,
|
||||
mongod_executable=mongod_executable,
|
||||
mongod_options=mongod_options,
|
||||
preserve_dbpath=preserve_dbpath,
|
||||
num_nodes=num_nodes,
|
||||
auth_options=auth_options,
|
||||
replset_config_options=replset_config_options,
|
||||
**configsvr_options)
|
||||
return replicaset.ReplicaSetFixture(
|
||||
mongod_logger, self.job_num, mongod_executable=mongod_executable,
|
||||
mongod_options=mongod_options, preserve_dbpath=preserve_dbpath, num_nodes=num_nodes,
|
||||
auth_options=auth_options, replset_config_options=replset_config_options,
|
||||
**configsvr_options)
|
||||
|
||||
def _new_rs_shard(self, index, num_rs_nodes_per_shard):
|
||||
"""
|
||||
|
|
@ -245,15 +229,11 @@ class ShardedClusterFixture(interface.Fixture):
|
|||
mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "shard{}".format(index))
|
||||
mongod_options["replSet"] = ShardedClusterFixture._SHARD_REPLSET_NAME_PREFIX + str(index)
|
||||
|
||||
return replicaset.ReplicaSetFixture(mongod_logger,
|
||||
self.job_num,
|
||||
mongod_executable=mongod_executable,
|
||||
mongod_options=mongod_options,
|
||||
preserve_dbpath=preserve_dbpath,
|
||||
num_nodes=num_rs_nodes_per_shard,
|
||||
auth_options=auth_options,
|
||||
replset_config_options=replset_config_options,
|
||||
**shard_options)
|
||||
return replicaset.ReplicaSetFixture(
|
||||
mongod_logger, self.job_num, mongod_executable=mongod_executable,
|
||||
mongod_options=mongod_options, preserve_dbpath=preserve_dbpath,
|
||||
num_nodes=num_rs_nodes_per_shard, auth_options=auth_options,
|
||||
replset_config_options=replset_config_options, **shard_options)
|
||||
|
||||
def _new_standalone_shard(self, index):
|
||||
"""
|
||||
|
|
@ -273,12 +253,9 @@ class ShardedClusterFixture(interface.Fixture):
|
|||
mongod_options["shardsvr"] = ""
|
||||
mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "shard{}".format(index))
|
||||
|
||||
return standalone.MongoDFixture(mongod_logger,
|
||||
self.job_num,
|
||||
mongod_executable=mongod_executable,
|
||||
mongod_options=mongod_options,
|
||||
preserve_dbpath=preserve_dbpath,
|
||||
**shard_options)
|
||||
return standalone.MongoDFixture(
|
||||
mongod_logger, self.job_num, mongod_executable=mongod_executable,
|
||||
mongod_options=mongod_options, preserve_dbpath=preserve_dbpath, **shard_options)
|
||||
|
||||
def _new_mongos(self):
|
||||
"""
|
||||
|
|
@ -295,9 +272,7 @@ class ShardedClusterFixture(interface.Fixture):
|
|||
else:
|
||||
mongos_options["configdb"] = "localhost:{}".format(self.shards[0].port)
|
||||
|
||||
return _MongoSFixture(mongos_logger,
|
||||
self.job_num,
|
||||
mongos_executable=self.mongos_executable,
|
||||
return _MongoSFixture(mongos_logger, self.job_num, mongos_executable=self.mongos_executable,
|
||||
mongos_options=mongos_options)
|
||||
|
||||
def _add_shard(self, client, shard):
|
||||
|
|
@ -321,11 +296,7 @@ class _MongoSFixture(interface.Fixture):
|
|||
|
||||
REGISTERED_NAME = registry.LEAVE_UNREGISTERED
|
||||
|
||||
def __init__(self,
|
||||
logger,
|
||||
job_num,
|
||||
mongos_executable=None,
|
||||
mongos_options=None):
|
||||
def __init__(self, logger, job_num, mongos_executable=None, mongos_options=None):
|
||||
|
||||
interface.Fixture.__init__(self, logger, job_num)
|
||||
|
||||
|
|
@ -342,8 +313,7 @@ class _MongoSFixture(interface.Fixture):
|
|||
self.mongos_options["port"] = core.network.PortAllocator.next_fixture_port(self.job_num)
|
||||
self.port = self.mongos_options["port"]
|
||||
|
||||
mongos = core.programs.mongos_program(self.logger,
|
||||
executable=self.mongos_executable,
|
||||
mongos = core.programs.mongos_program(self.logger, executable=self.mongos_executable,
|
||||
**self.mongos_options)
|
||||
try:
|
||||
self.logger.info("Starting mongos on port %d...\n%s", self.port, mongos.as_command())
|
||||
|
|
@ -367,8 +337,8 @@ class _MongoSFixture(interface.Fixture):
|
|||
exit_code = self.mongos.poll()
|
||||
if exit_code is not None:
|
||||
raise errors.ServerFailure("Could not connect to mongos on port {}, process ended"
|
||||
" unexpectedly with code {}.".format(self.port,
|
||||
exit_code))
|
||||
" unexpectedly with code {}.".format(
|
||||
self.port, exit_code))
|
||||
|
||||
try:
|
||||
# Use a shorter connection timeout to more closely satisfy the requested deadline.
|
||||
|
|
|
|||
|
|
@ -27,13 +27,8 @@ class MongoDFixture(interface.Fixture):
|
|||
|
||||
AWAIT_READY_TIMEOUT_SECS = 300
|
||||
|
||||
def __init__(self,
|
||||
logger,
|
||||
job_num,
|
||||
mongod_executable=None,
|
||||
mongod_options=None,
|
||||
dbpath_prefix=None,
|
||||
preserve_dbpath=False):
|
||||
def __init__(self, logger, job_num, mongod_executable=None, mongod_options=None,
|
||||
dbpath_prefix=None, preserve_dbpath=False):
|
||||
|
||||
interface.Fixture.__init__(self, logger, job_num, dbpath_prefix=dbpath_prefix)
|
||||
|
||||
|
|
@ -49,8 +44,7 @@ class MongoDFixture(interface.Fixture):
|
|||
# The dbpath in mongod_options takes precedence over other settings to make it easier for
|
||||
# users to specify a dbpath containing data to test against.
|
||||
if "dbpath" not in self.mongod_options:
|
||||
self.mongod_options["dbpath"] = os.path.join(
|
||||
self._dbpath_prefix, config.FIXTURE_SUBDIR)
|
||||
self.mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, config.FIXTURE_SUBDIR)
|
||||
self._dbpath = self.mongod_options["dbpath"]
|
||||
|
||||
self.mongod = None
|
||||
|
|
@ -70,8 +64,7 @@ class MongoDFixture(interface.Fixture):
|
|||
self.mongod_options["port"] = core.network.PortAllocator.next_fixture_port(self.job_num)
|
||||
self.port = self.mongod_options["port"]
|
||||
|
||||
mongod = core.programs.mongod_program(self.logger,
|
||||
executable=self.mongod_executable,
|
||||
mongod = core.programs.mongod_program(self.logger, executable=self.mongod_executable,
|
||||
**self.mongod_options)
|
||||
try:
|
||||
self.logger.info("Starting mongod on port %d...\n%s", self.port, mongod.as_command())
|
||||
|
|
|
|||
|
|
@ -92,25 +92,19 @@ class HookTestArchival(object):
|
|||
# Normalize test path from a test or hook name.
|
||||
test_path = \
|
||||
test_name.replace("/", "_").replace("\\", "_").replace(".", "_").replace(":", "_")
|
||||
file_name = "mongo-data-{}-{}-{}-{}.tgz".format(
|
||||
config.EVERGREEN_TASK_ID,
|
||||
test_path,
|
||||
config.EVERGREEN_EXECUTION,
|
||||
self._tests_repeat[test_name])
|
||||
file_name = "mongo-data-{}-{}-{}-{}.tgz".format(config.EVERGREEN_TASK_ID, test_path,
|
||||
config.EVERGREEN_EXECUTION,
|
||||
self._tests_repeat[test_name])
|
||||
# Retrieve root directory for all dbPaths from fixture.
|
||||
input_files = test.fixture.get_dbpath_prefix()
|
||||
s3_bucket = config.ARCHIVE_BUCKET
|
||||
s3_path = "{}/{}/{}/datafiles/{}".format(
|
||||
config.EVERGREEN_PROJECT_NAME,
|
||||
config.EVERGREEN_VARIANT_NAME,
|
||||
config.EVERGREEN_REVISION,
|
||||
file_name)
|
||||
s3_path = "{}/{}/{}/datafiles/{}".format(config.EVERGREEN_PROJECT_NAME,
|
||||
config.EVERGREEN_VARIANT_NAME,
|
||||
config.EVERGREEN_REVISION, file_name)
|
||||
display_name = "Data files {} - Execution {} Repetition {}".format(
|
||||
test_name,
|
||||
config.EVERGREEN_EXECUTION,
|
||||
self._tests_repeat[test_name])
|
||||
test_name, config.EVERGREEN_EXECUTION, self._tests_repeat[test_name])
|
||||
logger.info("Archiving data files for test %s from %s", test_name, input_files)
|
||||
status, message = self.archive_instance.archive_files_to_s3(
|
||||
display_name, input_files, s3_bucket, s3_path)
|
||||
status, message = self.archive_instance.archive_files_to_s3(display_name, input_files,
|
||||
s3_bucket, s3_path)
|
||||
if status:
|
||||
logger.warning("Archive failed for %s: %s", test_name, message)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
"""
|
||||
"""Testing hooks package.
|
||||
|
||||
Package containing classes to customize the behavior of a test fixture
|
||||
by allowing special code to be executed before or after each test, and
|
||||
before or after each suite.
|
||||
|
|
@ -9,7 +10,6 @@ from __future__ import absolute_import
|
|||
from .interface import make_hook
|
||||
from ...utils import autoloader as _autoloader
|
||||
|
||||
|
||||
# We dynamically load all modules in the hooks/ package so that any Hook classes declared
|
||||
# within them are automatically registered.
|
||||
_autoloader.load_all_modules(name=__name__, path=__path__)
|
||||
|
|
|
|||
|
|
@ -35,8 +35,8 @@ class CleanEveryN(interface.Hook):
|
|||
if self.tests_run < self.n:
|
||||
return
|
||||
|
||||
hook_test_case = CleanEveryNTestCase.create_after_test(
|
||||
self.logger.test_case_logger, test, self)
|
||||
hook_test_case = CleanEveryNTestCase.create_after_test(self.logger.test_case_logger, test,
|
||||
self)
|
||||
hook_test_case.configure(self.fixture)
|
||||
hook_test_case.run_dynamic_test(test_report)
|
||||
|
||||
|
|
|
|||
|
|
@ -68,8 +68,7 @@ class CombineBenchmarkResults(interface.Hook):
|
|||
|
||||
for name, report in self.benchmark_reports.items():
|
||||
test_report = {
|
||||
"name": name,
|
||||
"results": report.generate_perf_plugin_dict(),
|
||||
"name": name, "results": report.generate_perf_plugin_dict(),
|
||||
"context": report.context._asdict()
|
||||
}
|
||||
|
||||
|
|
@ -124,11 +123,7 @@ class _BenchmarkThreadsReport(object):
|
|||
}
|
||||
"""
|
||||
CONTEXT_FIELDS = [
|
||||
"date",
|
||||
"cpu_scaling_enabled",
|
||||
"num_cpus",
|
||||
"mhz_per_cpu",
|
||||
"library_build_type"
|
||||
"date", "cpu_scaling_enabled", "num_cpus", "mhz_per_cpu", "library_build_type"
|
||||
]
|
||||
Context = collections.namedtuple("Context", CONTEXT_FIELDS)
|
||||
|
||||
|
|
@ -163,8 +158,8 @@ class _BenchmarkThreadsReport(object):
|
|||
|
||||
res = {}
|
||||
for thread_count, reports in self.thread_benchmark_map.items():
|
||||
if (thread_count.endswith("median") or thread_count.endswith("mean") or
|
||||
thread_count.endswith("stddev")):
|
||||
if (thread_count.endswith("median") or thread_count.endswith("mean")
|
||||
or thread_count.endswith("stddev")):
|
||||
# We don't use Benchmark's included statistics for now because they clutter up the
|
||||
# graph.
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -14,12 +14,9 @@ class CheckReplDBHash(jsfile.JSHook):
|
|||
Checks that the dbhashes of all non-local databases and non-replicated system collections
|
||||
match on the primary and secondaries.
|
||||
"""
|
||||
|
||||
def __init__(self, hook_logger, fixture, shell_options=None):
|
||||
description = "Check dbhashes of all replica set or master/slave members"
|
||||
js_filename = os.path.join("jstests", "hooks", "run_check_repl_dbhash.js")
|
||||
jsfile.JSHook.__init__(self,
|
||||
hook_logger,
|
||||
fixture,
|
||||
js_filename,
|
||||
description,
|
||||
jsfile.JSHook.__init__(self, hook_logger, fixture, js_filename, description,
|
||||
shell_options=shell_options)
|
||||
|
|
|
|||
|
|
@ -68,15 +68,12 @@ class BackgroundInitialSyncTestCase(jsfile.DynamicJSTestCase):
|
|||
|
||||
# If it's been 'n' tests so far, wait for the initial sync node to finish syncing.
|
||||
if self._hook.tests_run >= self._hook.n:
|
||||
self.logger.info(
|
||||
"%d tests have been run against the fixture, waiting for initial sync"
|
||||
" node to go into SECONDARY state",
|
||||
self._hook.tests_run)
|
||||
self.logger.info("%d tests have been run against the fixture, waiting for initial sync"
|
||||
" node to go into SECONDARY state", self._hook.tests_run)
|
||||
self._hook.tests_run = 0
|
||||
|
||||
cmd = bson.SON([("replSetTest", 1),
|
||||
("waitForMemberState", 2),
|
||||
("timeoutMillis", 20 * 60 * 1000)])
|
||||
cmd = bson.SON([("replSetTest", 1), ("waitForMemberState", 2), ("timeoutMillis",
|
||||
20 * 60 * 1000)])
|
||||
sync_node_conn.admin.command(cmd)
|
||||
|
||||
# Check if the initial sync node is in SECONDARY state. If it's been 'n' tests, then it
|
||||
|
|
@ -90,11 +87,9 @@ class BackgroundInitialSyncTestCase(jsfile.DynamicJSTestCase):
|
|||
self.logger.exception("{0} failed: {1}".format(self._hook.description, msg))
|
||||
raise errors.TestFailure(msg)
|
||||
|
||||
self.logger.info(
|
||||
"Initial sync node is in state %d, not state SECONDARY (2)."
|
||||
" Skipping BackgroundInitialSync hook for %s",
|
||||
state,
|
||||
self._base_test_name)
|
||||
self.logger.info("Initial sync node is in state %d, not state SECONDARY (2)."
|
||||
" Skipping BackgroundInitialSync hook for %s", state,
|
||||
self._base_test_name)
|
||||
|
||||
# If we have not restarted initial sync since the last time we ran the data
|
||||
# validation, restart initial sync with a 20% probability.
|
||||
|
|
@ -175,8 +170,8 @@ class IntermediateInitialSyncTestCase(jsfile.DynamicJSTestCase):
|
|||
JS_FILENAME = os.path.join("jstests", "hooks", "run_initial_sync_node_validation.js")
|
||||
|
||||
def __init__(self, logger, test_name, description, base_test_name, hook):
|
||||
jsfile.DynamicJSTestCase.__init__(self, logger, test_name, description,
|
||||
base_test_name, hook, self.JS_FILENAME)
|
||||
jsfile.DynamicJSTestCase.__init__(self, logger, test_name, description, base_test_name,
|
||||
hook, self.JS_FILENAME)
|
||||
|
||||
def run_test(self):
|
||||
sync_node = self.fixture.get_initial_sync_node()
|
||||
|
|
@ -190,9 +185,8 @@ class IntermediateInitialSyncTestCase(jsfile.DynamicJSTestCase):
|
|||
|
||||
# Do initial sync round.
|
||||
self.logger.info("Waiting for initial sync node to go into SECONDARY state")
|
||||
cmd = bson.SON([("replSetTest", 1),
|
||||
("waitForMemberState", 2),
|
||||
("timeoutMillis", 20 * 60 * 1000)])
|
||||
cmd = bson.SON([("replSetTest", 1), ("waitForMemberState", 2), ("timeoutMillis",
|
||||
20 * 60 * 1000)])
|
||||
sync_node_conn.admin.command(cmd)
|
||||
|
||||
# Run data validation and dbhash checking.
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ from ... import errors
|
|||
from ...logging import loggers
|
||||
from ...utils import registry
|
||||
|
||||
|
||||
_HOOKS = {}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -5,7 +5,6 @@ JavaScript file.
|
|||
|
||||
from __future__ import absolute_import
|
||||
|
||||
|
||||
from . import interface
|
||||
from ..testcases import jstest
|
||||
from ...utils import registry
|
||||
|
|
@ -38,10 +37,11 @@ class JSHook(interface.Hook):
|
|||
|
||||
class DynamicJSTestCase(interface.DynamicTestCase):
|
||||
"""A dynamic TestCase that runs a JavaScript file."""
|
||||
def __init__(self, logger, test_name, description, base_test_name, hook,
|
||||
js_filename, shell_options=None):
|
||||
interface.DynamicTestCase.__init__(self, logger, test_name, description,
|
||||
base_test_name, hook)
|
||||
|
||||
def __init__(self, logger, test_name, description, base_test_name, hook, js_filename,
|
||||
shell_options=None):
|
||||
interface.DynamicTestCase.__init__(self, logger, test_name, description, base_test_name,
|
||||
hook)
|
||||
self._js_test = jstest.JSTestCase(logger, js_filename, shell_options=shell_options)
|
||||
|
||||
def override_logger(self, new_logger):
|
||||
|
|
|
|||
|
|
@ -14,12 +14,9 @@ class CheckReplOplogs(jsfile.JSHook):
|
|||
"""
|
||||
Checks that local.oplog.rs matches on the primary and secondaries.
|
||||
"""
|
||||
|
||||
def __init__(self, hook_logger, fixture, shell_options=None):
|
||||
description = "Check oplogs of all replica set members"
|
||||
js_filename = os.path.join("jstests", "hooks", "run_check_repl_oplogs.js")
|
||||
jsfile.JSHook.__init__(self,
|
||||
hook_logger,
|
||||
fixture,
|
||||
js_filename,
|
||||
description,
|
||||
jsfile.JSHook.__init__(self, hook_logger, fixture, js_filename, description,
|
||||
shell_options=shell_options)
|
||||
|
|
|
|||
|
|
@ -91,12 +91,11 @@ class PeriodicKillSecondaries(interface.Hook):
|
|||
# applying any oplog entries while the test is running.
|
||||
client = secondary.mongo_client()
|
||||
try:
|
||||
client.admin.command(bson.SON([
|
||||
("configureFailPoint", "rsSyncApplyStop"),
|
||||
("mode", "alwaysOn")]))
|
||||
client.admin.command(
|
||||
bson.SON([("configureFailPoint", "rsSyncApplyStop"), ("mode", "alwaysOn")]))
|
||||
except pymongo.errors.OperationFailure as err:
|
||||
self.logger.exception(
|
||||
"Unable to disable oplog application on the mongod on port %d", secondary.port)
|
||||
self.logger.exception("Unable to disable oplog application on the mongod on port %d",
|
||||
secondary.port)
|
||||
raise errors.ServerFailure(
|
||||
"Unable to disable oplog application on the mongod on port {}: {}".format(
|
||||
secondary.port, err.args[0]))
|
||||
|
|
@ -106,13 +105,11 @@ class PeriodicKillSecondaries(interface.Hook):
|
|||
# oplog entries.
|
||||
client = secondary.mongo_client()
|
||||
try:
|
||||
client.admin.command(bson.SON([
|
||||
("configureFailPoint", "rsSyncApplyStop"),
|
||||
("mode", "off")]))
|
||||
client.admin.command(
|
||||
bson.SON([("configureFailPoint", "rsSyncApplyStop"), ("mode", "off")]))
|
||||
except pymongo.errors.OperationFailure as err:
|
||||
self.logger.exception(
|
||||
"Unable to re-enable oplog application on the mongod on port %d",
|
||||
secondary.port)
|
||||
self.logger.exception("Unable to re-enable oplog application on the mongod on port %d",
|
||||
secondary.port)
|
||||
raise errors.ServerFailure(
|
||||
"Unable to re-enable oplog application on the mongod on port {}: {}".format(
|
||||
secondary.port, err.args[0]))
|
||||
|
|
@ -120,8 +117,8 @@ class PeriodicKillSecondaries(interface.Hook):
|
|||
|
||||
class PeriodicKillSecondariesTestCase(interface.DynamicTestCase):
|
||||
def __init__(self, logger, test_name, description, base_test_name, hook, test_report):
|
||||
interface.DynamicTestCase.__init__(self, logger, test_name, description,
|
||||
base_test_name, hook)
|
||||
interface.DynamicTestCase.__init__(self, logger, test_name, description, base_test_name,
|
||||
hook)
|
||||
self._test_report = test_report
|
||||
|
||||
def run_test(self):
|
||||
|
|
@ -243,10 +240,11 @@ class PeriodicKillSecondariesTestCase(interface.DynamicTestCase):
|
|||
client = secondary.mongo_client()
|
||||
minvalid_doc = client.local["replset.minvalid"].find_one()
|
||||
oplog_truncate_after_doc = client.local["replset.oplogTruncateAfterPoint"].find_one()
|
||||
self.logger.info("minValid: {}, oTAP: {}".format(minvalid_doc, oplog_truncate_after_doc))
|
||||
self.logger.info("minValid: {}, oTAP: {}".format(minvalid_doc,
|
||||
oplog_truncate_after_doc))
|
||||
|
||||
latest_oplog_doc = client.local["oplog.rs"].find_one(
|
||||
sort=[("$natural", pymongo.DESCENDING)])
|
||||
latest_oplog_doc = client.local["oplog.rs"].find_one(sort=[("$natural",
|
||||
pymongo.DESCENDING)])
|
||||
|
||||
null_ts = bson.Timestamp(0, 0)
|
||||
|
||||
|
|
@ -255,8 +253,8 @@ class PeriodicKillSecondariesTestCase(interface.DynamicTestCase):
|
|||
if latest_oplog_doc is not None:
|
||||
latest_oplog_entry_ts = latest_oplog_doc.get("ts")
|
||||
if latest_oplog_entry_ts is None:
|
||||
raise errors.ServerFailure("Latest oplog entry had no 'ts' field: {}".format(
|
||||
latest_oplog_doc))
|
||||
raise errors.ServerFailure(
|
||||
"Latest oplog entry had no 'ts' field: {}".format(latest_oplog_doc))
|
||||
|
||||
# The "oplogTruncateAfterPoint" document may not exist at startup. If so, we default
|
||||
# it to null.
|
||||
|
|
@ -310,9 +308,9 @@ class PeriodicKillSecondariesTestCase(interface.DynamicTestCase):
|
|||
raise errors.ServerFailure(
|
||||
"The condition minValid <= oplogTruncateAfterPoint ({} <= {}) doesn't"
|
||||
" hold: minValid document={}, oplogTruncateAfterPoint document={},"
|
||||
" latest oplog entry={}".format(
|
||||
minvalid_ts, oplog_truncate_after_ts, minvalid_doc,
|
||||
oplog_truncate_after_doc, latest_oplog_doc))
|
||||
" latest oplog entry={}".format(minvalid_ts, oplog_truncate_after_ts,
|
||||
minvalid_doc, oplog_truncate_after_doc,
|
||||
latest_oplog_doc))
|
||||
|
||||
# minvalid <= latest oplog entry
|
||||
# "minValid" is set to the end of a batch after the batch is written to the oplog.
|
||||
|
|
@ -321,8 +319,7 @@ class PeriodicKillSecondariesTestCase(interface.DynamicTestCase):
|
|||
raise errors.ServerFailure(
|
||||
"The condition minValid <= top of oplog ({} <= {}) doesn't"
|
||||
" hold: minValid document={}, latest oplog entry={}".format(
|
||||
minvalid_ts, latest_oplog_entry_ts, minvalid_doc,
|
||||
latest_oplog_doc))
|
||||
minvalid_ts, latest_oplog_entry_ts, minvalid_doc, latest_oplog_doc))
|
||||
|
||||
try:
|
||||
secondary.teardown()
|
||||
|
|
@ -346,15 +343,16 @@ class PeriodicKillSecondariesTestCase(interface.DynamicTestCase):
|
|||
def _await_secondary_state(self, secondary):
|
||||
client = secondary.mongo_client()
|
||||
try:
|
||||
client.admin.command(bson.SON([
|
||||
("replSetTest", 1),
|
||||
("waitForMemberState", 2), # 2 = SECONDARY
|
||||
("timeoutMillis", fixture.ReplFixture.AWAIT_REPL_TIMEOUT_MINS * 60 * 1000)]))
|
||||
client.admin.command(
|
||||
bson.SON([
|
||||
("replSetTest", 1),
|
||||
("waitForMemberState", 2), # 2 = SECONDARY
|
||||
("timeoutMillis", fixture.ReplFixture.AWAIT_REPL_TIMEOUT_MINS * 60 * 1000)
|
||||
]))
|
||||
except pymongo.errors.OperationFailure as err:
|
||||
self.logger.exception(
|
||||
"mongod on port %d failed to reach state SECONDARY after %d seconds",
|
||||
secondary.port,
|
||||
fixture.ReplFixture.AWAIT_REPL_TIMEOUT_MINS * 60)
|
||||
secondary.port, fixture.ReplFixture.AWAIT_REPL_TIMEOUT_MINS * 60)
|
||||
raise errors.ServerFailure(
|
||||
"mongod on port {} failed to reach state SECONDARY after {} seconds: {}".format(
|
||||
secondary.port, fixture.ReplFixture.AWAIT_REPL_TIMEOUT_MINS * 60, err.args[0]))
|
||||
|
|
|
|||
|
|
@ -24,11 +24,8 @@ class ContinuousStepdown(interface.Hook):
|
|||
DESCRIPTION = ("Continuous stepdown (steps down the primary of replica sets at regular"
|
||||
" intervals)")
|
||||
|
||||
def __init__(self, hook_logger, fixture,
|
||||
config_stepdown=True,
|
||||
shard_stepdown=True,
|
||||
stepdown_duration_secs=10,
|
||||
stepdown_interval_ms=8000):
|
||||
def __init__(self, hook_logger, fixture, config_stepdown=True, shard_stepdown=True,
|
||||
stepdown_duration_secs=10, stepdown_interval_ms=8000):
|
||||
"""Initializes the ContinuousStepdown.
|
||||
|
||||
Args:
|
||||
|
|
@ -39,8 +36,7 @@ class ContinuousStepdown(interface.Hook):
|
|||
stepdown_duration_secs: the number of seconds to step down the primary.
|
||||
stepdown_interval_ms: the number of milliseconds between stepdowns.
|
||||
"""
|
||||
interface.Hook.__init__(self, hook_logger, fixture,
|
||||
ContinuousStepdown.DESCRIPTION)
|
||||
interface.Hook.__init__(self, hook_logger, fixture, ContinuousStepdown.DESCRIPTION)
|
||||
|
||||
self._fixture = fixture
|
||||
self._config_stepdown = config_stepdown
|
||||
|
|
@ -190,17 +186,18 @@ class _StepdownThread(threading.Thread):
|
|||
# We'll try again after self._stepdown_interval_secs seconds.
|
||||
return
|
||||
|
||||
self.logger.info("Stepping down the primary on port %d of replica set '%s'.",
|
||||
primary.port, rs_fixture.replset_name)
|
||||
self.logger.info("Stepping down the primary on port %d of replica set '%s'.", primary.port,
|
||||
rs_fixture.replset_name)
|
||||
|
||||
secondaries = rs_fixture.get_secondaries()
|
||||
|
||||
try:
|
||||
client = primary.mongo_client()
|
||||
client.admin.command(bson.SON([
|
||||
("replSetStepDown", self._stepdown_duration_secs),
|
||||
("force", True),
|
||||
]))
|
||||
client.admin.command(
|
||||
bson.SON([
|
||||
("replSetStepDown", self._stepdown_duration_secs),
|
||||
("force", True),
|
||||
]))
|
||||
except pymongo.errors.AutoReconnect:
|
||||
# AutoReconnect exceptions are expected as connections are closed during stepdown.
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -15,12 +15,9 @@ class ValidateCollections(jsfile.JSHook):
|
|||
Runs full validation on all collections in all databases on every stand-alone
|
||||
node, primary replica-set node, or primary shard node.
|
||||
"""
|
||||
|
||||
def __init__(self, hook_logger, fixture, shell_options=None):
|
||||
description = "Full collection validation"
|
||||
js_filename = os.path.join("jstests", "hooks", "run_validate_collections.js")
|
||||
jsfile.JSHook.__init__(self,
|
||||
hook_logger,
|
||||
fixture,
|
||||
js_filename,
|
||||
description,
|
||||
jsfile.JSHook.__init__(self, hook_logger, fixture, js_filename, description,
|
||||
shell_options=shell_options)
|
||||
|
|
|
|||
|
|
@ -110,8 +110,8 @@ class Job(object):
|
|||
test.shortDescription())
|
||||
self.report.setFailure(test, return_code=2)
|
||||
# Always fail fast if the fixture fails.
|
||||
raise errors.StopExecution("%s not running after %s" %
|
||||
(self.fixture, test.shortDescription()))
|
||||
raise errors.StopExecution("%s not running after %s" % (self.fixture,
|
||||
test.shortDescription()))
|
||||
finally:
|
||||
success = self.report._find_test_info(test).status == "pass"
|
||||
if self.archival:
|
||||
|
|
|
|||
|
|
@ -113,8 +113,8 @@ class TestReport(unittest.TestResult):
|
|||
self.num_dynamic += 1
|
||||
|
||||
# Set up the test-specific logger.
|
||||
test_logger = self.job_logger.new_test_logger(test.short_name(), test.basename(),
|
||||
command, test.logger)
|
||||
test_logger = self.job_logger.new_test_logger(test.short_name(), test.basename(), command,
|
||||
test.logger)
|
||||
test_info.url_endpoint = test_logger.url_endpoint
|
||||
|
||||
test.override_logger(test_logger)
|
||||
|
|
|
|||
|
|
@ -105,15 +105,19 @@ class Suite(object):
|
|||
|
||||
if self.options.include_tags is not None:
|
||||
if "include_tags" in selector:
|
||||
selector["include_tags"] = {"$allOf": [
|
||||
selector["include_tags"],
|
||||
self.options.include_tags,
|
||||
]}
|
||||
selector["include_tags"] = {
|
||||
"$allOf": [
|
||||
selector["include_tags"],
|
||||
self.options.include_tags,
|
||||
]
|
||||
}
|
||||
elif "exclude_tags" in selector:
|
||||
selector["exclude_tags"] = {"$anyOf": [
|
||||
selector["exclude_tags"],
|
||||
{"$not": self.options.include_tags},
|
||||
]}
|
||||
selector["exclude_tags"] = {
|
||||
"$anyOf": [
|
||||
selector["exclude_tags"],
|
||||
{"$not": self.options.include_tags},
|
||||
]
|
||||
}
|
||||
else:
|
||||
selector["include_tags"] = self.options.include_tags
|
||||
|
||||
|
|
@ -267,11 +271,8 @@ class Suite(object):
|
|||
for iteration in xrange(num_iterations):
|
||||
# Summarize each execution as a bulleted list of results.
|
||||
bulleter_sb = []
|
||||
summary = self._summarize_report(
|
||||
reports[iteration],
|
||||
start_times[iteration],
|
||||
end_times[iteration],
|
||||
bulleter_sb)
|
||||
summary = self._summarize_report(reports[iteration], start_times[iteration],
|
||||
end_times[iteration], bulleter_sb)
|
||||
combined_summary = _summary.combine(combined_summary, summary)
|
||||
|
||||
for (i, line) in enumerate(bulleter_sb):
|
||||
|
|
@ -288,10 +289,8 @@ class Suite(object):
|
|||
string builder 'sb'.
|
||||
"""
|
||||
|
||||
return self._summarize_report(self._reports[iteration],
|
||||
self._test_start_times[iteration],
|
||||
self._test_end_times[iteration],
|
||||
sb)
|
||||
return self._summarize_report(self._reports[iteration], self._test_start_times[iteration],
|
||||
self._test_end_times[iteration], sb)
|
||||
|
||||
def _summarize_report(self, report, start_time, end_time, sb):
|
||||
"""
|
||||
|
|
@ -335,8 +334,8 @@ class Suite(object):
|
|||
@staticmethod
|
||||
def log_summaries(logger, suites, time_taken):
|
||||
sb = []
|
||||
sb.append("Summary of all suites: %d suites ran in %0.2f seconds"
|
||||
% (len(suites), time_taken))
|
||||
sb.append("Summary of all suites: %d suites ran in %0.2f seconds" % (len(suites),
|
||||
time_taken))
|
||||
for suite in suites:
|
||||
suite_sb = []
|
||||
suite.summarize(suite_sb)
|
||||
|
|
|
|||
|
|
@ -6,10 +6,9 @@ from __future__ import absolute_import
|
|||
|
||||
import collections
|
||||
|
||||
|
||||
|
||||
Summary = collections.namedtuple("Summary", ["num_run", "time_taken", "num_succeeded",
|
||||
"num_skipped", "num_failed", "num_errored"])
|
||||
Summary = collections.namedtuple(
|
||||
"Summary",
|
||||
["num_run", "time_taken", "num_succeeded", "num_skipped", "num_failed", "num_errored"])
|
||||
|
||||
|
||||
def combine(summary1, summary2):
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@ from __future__ import absolute_import
|
|||
from .interface import make_test_case
|
||||
from ...utils import autoloader as _autoloader
|
||||
|
||||
|
||||
# We dynamically load all modules in the testcases/ package so that any TestCase classes declared
|
||||
# within them are automatically registered.
|
||||
_autoloader.load_all_modules(name=__name__, path=__path__)
|
||||
|
|
|
|||
|
|
@ -18,10 +18,7 @@ class BenchmarkTestCase(interface.ProcessTestCase):
|
|||
|
||||
REGISTERED_NAME = "benchmark_test"
|
||||
|
||||
def __init__(self,
|
||||
logger,
|
||||
program_executable,
|
||||
program_options=None):
|
||||
def __init__(self, logger, program_executable, program_options=None):
|
||||
"""
|
||||
Initializes the BenchmarkTestCase with the executable to run.
|
||||
"""
|
||||
|
|
@ -49,9 +46,8 @@ class BenchmarkTestCase(interface.ProcessTestCase):
|
|||
|
||||
# 3. Override Benchmark options with options set through resmoke's command line.
|
||||
resmoke_bm_options = {
|
||||
"benchmark_filter": _config.BENCHMARK_FILTER,
|
||||
"benchmark_list_tests": _config.BENCHMARK_LIST_TESTS,
|
||||
"benchmark_min_time": _config.BENCHMARK_MIN_TIME,
|
||||
"benchmark_filter": _config.BENCHMARK_FILTER, "benchmark_list_tests":
|
||||
_config.BENCHMARK_LIST_TESTS, "benchmark_min_time": _config.BENCHMARK_MIN_TIME,
|
||||
"benchmark_out_format": _config.BENCHMARK_OUT_FORMAT,
|
||||
"benchmark_repetitions": _config.BENCHMARK_REPETITIONS
|
||||
}
|
||||
|
|
@ -69,6 +65,4 @@ class BenchmarkTestCase(interface.ProcessTestCase):
|
|||
return self.bm_executable + ".json"
|
||||
|
||||
def _make_process(self):
|
||||
return core.programs.generic_program(self.logger,
|
||||
[self.bm_executable],
|
||||
**self.bm_options)
|
||||
return core.programs.generic_program(self.logger, [self.bm_executable], **self.bm_options)
|
||||
|
|
|
|||
|
|
@ -16,10 +16,7 @@ class CPPIntegrationTestCase(interface.ProcessTestCase):
|
|||
|
||||
REGISTERED_NAME = "cpp_integration_test"
|
||||
|
||||
def __init__(self,
|
||||
logger,
|
||||
program_executable,
|
||||
program_options=None):
|
||||
def __init__(self, logger, program_executable, program_options=None):
|
||||
"""
|
||||
Initializes the CPPIntegrationTestCase with the executable to run.
|
||||
"""
|
||||
|
|
@ -35,6 +32,5 @@ class CPPIntegrationTestCase(interface.ProcessTestCase):
|
|||
self.program_options["connectionString"] = self.fixture.get_internal_connection_string()
|
||||
|
||||
def _make_process(self):
|
||||
return core.programs.generic_program(self.logger,
|
||||
[self.program_executable],
|
||||
return core.programs.generic_program(self.logger, [self.program_executable],
|
||||
**self.program_options)
|
||||
|
|
|
|||
|
|
@ -16,10 +16,7 @@ class CPPUnitTestCase(interface.ProcessTestCase):
|
|||
|
||||
REGISTERED_NAME = "cpp_unit_test"
|
||||
|
||||
def __init__(self,
|
||||
logger,
|
||||
program_executable,
|
||||
program_options=None):
|
||||
def __init__(self, logger, program_executable, program_options=None):
|
||||
"""
|
||||
Initializes the CPPUnitTestCase with the executable to run.
|
||||
"""
|
||||
|
|
@ -30,6 +27,4 @@ class CPPUnitTestCase(interface.ProcessTestCase):
|
|||
self.program_options = utils.default_if_none(program_options, {}).copy()
|
||||
|
||||
def _make_process(self):
|
||||
return core.process.Process(self.logger,
|
||||
[self.program_executable],
|
||||
**self.program_options)
|
||||
return core.process.Process(self.logger, [self.program_executable], **self.program_options)
|
||||
|
|
|
|||
|
|
@ -21,11 +21,7 @@ class DBTestCase(interface.ProcessTestCase):
|
|||
|
||||
REGISTERED_NAME = "db_test"
|
||||
|
||||
def __init__(self,
|
||||
logger,
|
||||
dbtest_suite,
|
||||
dbtest_executable=None,
|
||||
dbtest_options=None):
|
||||
def __init__(self, logger, dbtest_suite, dbtest_executable=None, dbtest_options=None):
|
||||
"""
|
||||
Initializes the DBTestCase with the dbtest suite to run.
|
||||
"""
|
||||
|
|
@ -62,10 +58,8 @@ class DBTestCase(interface.ProcessTestCase):
|
|||
shutil.rmtree(self.dbtest_options["dbpath"], ignore_errors=True)
|
||||
|
||||
def _make_process(self):
|
||||
return core.programs.dbtest_program(self.logger,
|
||||
executable=self.dbtest_executable,
|
||||
suites=[self.dbtest_suite],
|
||||
**self.dbtest_options)
|
||||
return core.programs.dbtest_program(self.logger, executable=self.dbtest_executable,
|
||||
suites=[self.dbtest_suite], **self.dbtest_options)
|
||||
|
||||
@staticmethod
|
||||
def _get_dbpath_prefix():
|
||||
|
|
|
|||
|
|
@ -15,21 +15,13 @@ class FSMWorkloadTestCase(jsrunnerfile.JSRunnerFileTestCase):
|
|||
|
||||
REGISTERED_NAME = "fsm_workload_test"
|
||||
|
||||
def __init__(self,
|
||||
logger,
|
||||
fsm_workload,
|
||||
shell_executable=None,
|
||||
shell_options=None):
|
||||
def __init__(self, logger, fsm_workload, shell_executable=None, shell_options=None):
|
||||
"""Initializes the FSMWorkloadTestCase with the FSM workload file."""
|
||||
|
||||
jsrunnerfile.JSRunnerFileTestCase.__init__(
|
||||
self,
|
||||
logger,
|
||||
"FSM workload",
|
||||
fsm_workload,
|
||||
self, logger, "FSM workload", fsm_workload,
|
||||
test_runner_file="jstests/concurrency/fsm_libs/resmoke_runner.js",
|
||||
shell_executable=shell_executable,
|
||||
shell_options=shell_options)
|
||||
shell_executable=shell_executable, shell_options=shell_options)
|
||||
|
||||
@property
|
||||
def fsm_workload(self):
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ import unittest
|
|||
from ... import logging
|
||||
from ...utils import registry
|
||||
|
||||
|
||||
_TEST_CASES = {}
|
||||
|
||||
|
||||
|
|
@ -139,8 +138,8 @@ class ProcessTestCase(TestCase): # pylint: disable=abstract-method
|
|||
except self.failureException:
|
||||
raise
|
||||
except:
|
||||
self.logger.exception("Encountered an error running %s %s",
|
||||
self.test_kind, self.basename())
|
||||
self.logger.exception("Encountered an error running %s %s", self.test_kind,
|
||||
self.basename())
|
||||
raise
|
||||
|
||||
def as_command(self):
|
||||
|
|
|
|||
|
|
@ -15,21 +15,13 @@ class JSONSchemaTestCase(jsrunnerfile.JSRunnerFileTestCase):
|
|||
|
||||
REGISTERED_NAME = "json_schema_test"
|
||||
|
||||
def __init__(self,
|
||||
logger,
|
||||
json_filename,
|
||||
shell_executable=None,
|
||||
shell_options=None):
|
||||
def __init__(self, logger, json_filename, shell_executable=None, shell_options=None):
|
||||
"""Initializes the JSONSchemaTestCase with the JSON test file."""
|
||||
|
||||
jsrunnerfile.JSRunnerFileTestCase.__init__(
|
||||
self,
|
||||
logger,
|
||||
"JSON Schema test",
|
||||
json_filename,
|
||||
self, logger, "JSON Schema test", json_filename,
|
||||
test_runner_file="jstests/libs/json_schema_test_runner.js",
|
||||
shell_executable=shell_executable,
|
||||
shell_options=shell_options)
|
||||
shell_executable=shell_executable, shell_options=shell_options)
|
||||
|
||||
@property
|
||||
def json_filename(self):
|
||||
|
|
|
|||
|
|
@ -16,12 +16,7 @@ class JSRunnerFileTestCase(interface.ProcessTestCase):
|
|||
|
||||
REGISTERED_NAME = registry.LEAVE_UNREGISTERED
|
||||
|
||||
def __init__(self,
|
||||
logger,
|
||||
test_kind,
|
||||
test_name,
|
||||
test_runner_file,
|
||||
shell_executable=None,
|
||||
def __init__(self, logger, test_kind, test_name, test_runner_file, shell_executable=None,
|
||||
shell_options=None):
|
||||
"""Initializes the JSRunnerFileTestCase with the 'test_name' file."""
|
||||
|
||||
|
|
@ -53,8 +48,6 @@ class JSRunnerFileTestCase(interface.ProcessTestCase):
|
|||
|
||||
def _make_process(self):
|
||||
return core.programs.mongo_shell_program(
|
||||
self.logger,
|
||||
executable=self.shell_executable,
|
||||
self.logger, executable=self.shell_executable,
|
||||
connection_string=self.fixture.get_driver_connection_url(),
|
||||
filename=self.test_runner_file,
|
||||
**self.shell_options)
|
||||
filename=self.test_runner_file, **self.shell_options)
|
||||
|
|
|
|||
|
|
@ -24,11 +24,7 @@ class _SingleJSTestCase(interface.ProcessTestCase):
|
|||
|
||||
REGISTERED_NAME = registry.LEAVE_UNREGISTERED
|
||||
|
||||
def __init__(self,
|
||||
logger,
|
||||
js_filename,
|
||||
shell_executable=None,
|
||||
shell_options=None):
|
||||
def __init__(self, logger, js_filename, shell_executable=None, shell_options=None):
|
||||
"""
|
||||
Initializes the _SingleJSTestCase with the JS file to run.
|
||||
"""
|
||||
|
|
@ -114,17 +110,13 @@ class _SingleJSTestCase(interface.ProcessTestCase):
|
|||
data_dir_prefix = utils.default_if_none(config.DBPATH_PREFIX,
|
||||
global_vars.get("MongoRunner.dataDir"))
|
||||
data_dir_prefix = utils.default_if_none(data_dir_prefix, config.DEFAULT_DBPATH_PREFIX)
|
||||
return os.path.join(data_dir_prefix,
|
||||
"job%d" % self.fixture.job_num,
|
||||
return os.path.join(data_dir_prefix, "job%d" % self.fixture.job_num,
|
||||
config.MONGO_RUNNER_SUBDIR)
|
||||
|
||||
def _make_process(self):
|
||||
return core.programs.mongo_shell_program(
|
||||
self.logger,
|
||||
executable=self.shell_executable,
|
||||
filename=self.js_filename,
|
||||
connection_string=self.fixture.get_driver_connection_url(),
|
||||
**self.shell_options)
|
||||
self.logger, executable=self.shell_executable, filename=self.js_filename,
|
||||
connection_string=self.fixture.get_driver_connection_url(), **self.shell_options)
|
||||
|
||||
|
||||
class JSTestCase(interface.ProcessTestCase):
|
||||
|
|
@ -151,11 +143,7 @@ class JSTestCase(interface.ProcessTestCase):
|
|||
|
||||
DEFAULT_CLIENT_NUM = 1
|
||||
|
||||
def __init__(self,
|
||||
logger,
|
||||
js_filename,
|
||||
shell_executable=None,
|
||||
shell_options=None):
|
||||
def __init__(self, logger, js_filename, shell_executable=None, shell_options=None):
|
||||
"""
|
||||
Initializes the JSTestCase with the JS file to run.
|
||||
"""
|
||||
|
|
@ -204,10 +192,8 @@ class JSTestCase(interface.ProcessTestCase):
|
|||
"""
|
||||
|
||||
shell_options = self._get_shell_options_for_thread(thread_id)
|
||||
test_case = _SingleJSTestCase(logger,
|
||||
self.test_case_template.js_filename,
|
||||
self.test_case_template.shell_executable,
|
||||
shell_options)
|
||||
test_case = _SingleJSTestCase(logger, self.test_case_template.js_filename,
|
||||
self.test_case_template.shell_executable, shell_options)
|
||||
|
||||
test_case.configure(self.fixture)
|
||||
return test_case
|
||||
|
|
@ -253,9 +239,8 @@ class JSTestCase(interface.ProcessTestCase):
|
|||
if thread.exc_info is not None:
|
||||
if not isinstance(thread.exc_info[1], self.failureException):
|
||||
self.logger.error(
|
||||
"Encountered an error inside thread %d running jstest %s.",
|
||||
thread_id, self.basename(),
|
||||
exc_info=thread.exc_info)
|
||||
"Encountered an error inside thread %d running jstest %s.", thread_id,
|
||||
self.basename(), exc_info=thread.exc_info)
|
||||
raise thread.exc_info
|
||||
|
||||
def run_test(self):
|
||||
|
|
|
|||
|
|
@ -17,9 +17,7 @@ class MongosTestCase(interface.ProcessTestCase):
|
|||
|
||||
REGISTERED_NAME = "mongos_test"
|
||||
|
||||
def __init__(self,
|
||||
logger,
|
||||
mongos_options):
|
||||
def __init__(self, logger, mongos_options):
|
||||
"""
|
||||
Initializes the mongos test and saves the options.
|
||||
"""
|
||||
|
|
@ -41,6 +39,5 @@ class MongosTestCase(interface.ProcessTestCase):
|
|||
self.options["test"] = ""
|
||||
|
||||
def _make_process(self):
|
||||
return core.programs.mongos_program(self.logger,
|
||||
executable=self.mongos_executable,
|
||||
return core.programs.mongos_program(self.logger, executable=self.mongos_executable,
|
||||
**self.options)
|
||||
|
|
|
|||
|
|
@ -20,8 +20,8 @@ class SleepTestCase(interface.TestCase):
|
|||
|
||||
sleep_duration_secs = int(sleep_duration_secs)
|
||||
|
||||
interface.TestCase.__init__(
|
||||
self, logger, "Sleep", "{:d} seconds".format(sleep_duration_secs))
|
||||
interface.TestCase.__init__(self, logger, "Sleep",
|
||||
"{:d} seconds".format(sleep_duration_secs))
|
||||
|
||||
self.__sleep_duration_secs = sleep_duration_secs
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue