mirror of https://github.com/mongodb/mongo
SERVER-101034 Use rules_lint shfmt formatter (#38448)
GitOrigin-RevId: e8ef1ba2000e12fa2cd5a115a9ceeab92332e938
This commit is contained in:
parent
5ac113adb1
commit
7ccc14bf91
|
|
@ -0,0 +1,9 @@
|
||||||
|
root = true
|
||||||
|
|
||||||
|
[*.sh]
|
||||||
|
charset = utf-8
|
||||||
|
end_of_line = lf
|
||||||
|
insert_final_newline = true
|
||||||
|
trim_trailing_whitespace = true
|
||||||
|
indent_style = space
|
||||||
|
indent_size = 4
|
||||||
|
|
@ -10,6 +10,7 @@ OWNERS.yml @10gen/server-root-ownership @svc-auto-approve-bot
|
||||||
.bazel* @10gen/devprod-build @svc-auto-approve-bot
|
.bazel* @10gen/devprod-build @svc-auto-approve-bot
|
||||||
.clang-format @10gen/server-programmability @svc-auto-approve-bot
|
.clang-format @10gen/server-programmability @svc-auto-approve-bot
|
||||||
.clang-tidy.in @10gen/server-programmability @svc-auto-approve-bot
|
.clang-tidy.in @10gen/server-programmability @svc-auto-approve-bot
|
||||||
|
/.editorconfig @10gen/devprod-build @svc-auto-approve-bot
|
||||||
.git* @10gen/devprod-build @svc-auto-approve-bot
|
.git* @10gen/devprod-build @svc-auto-approve-bot
|
||||||
.mypy.ini @10gen/devprod-build @10gen/devprod-correctness @svc-auto-approve-bot
|
.mypy.ini @10gen/devprod-build @10gen/devprod-correctness @svc-auto-approve-bot
|
||||||
.prettierignore @10gen/devprod-correctness @svc-auto-approve-bot
|
.prettierignore @10gen/devprod-correctness @svc-auto-approve-bot
|
||||||
|
|
|
||||||
|
|
@ -18,6 +18,9 @@ filters:
|
||||||
- ".clang-tidy.in":
|
- ".clang-tidy.in":
|
||||||
approvers:
|
approvers:
|
||||||
- 10gen/server-programmability
|
- 10gen/server-programmability
|
||||||
|
- "/.editorconfig":
|
||||||
|
approvers:
|
||||||
|
- 10gen/devprod-build
|
||||||
- ".git*":
|
- ".git*":
|
||||||
approvers:
|
approvers:
|
||||||
- 10gen/devprod-build
|
- 10gen/devprod-build
|
||||||
|
|
|
||||||
|
|
@ -6,8 +6,6 @@ py_binary(
|
||||||
args = [
|
args = [
|
||||||
"--prettier",
|
"--prettier",
|
||||||
"$(location //:prettier)",
|
"$(location //:prettier)",
|
||||||
"--shellscripts-linters",
|
|
||||||
"$(location //buildscripts:shellscripts_linters)",
|
|
||||||
"--rules-lint-format",
|
"--rules-lint-format",
|
||||||
"$(location :rules_lint_format)",
|
"$(location :rules_lint_format)",
|
||||||
"--rules-lint-format-check",
|
"--rules-lint-format-check",
|
||||||
|
|
@ -17,7 +15,6 @@ py_binary(
|
||||||
":rules_lint_format",
|
":rules_lint_format",
|
||||||
":rules_lint_format.check",
|
":rules_lint_format.check",
|
||||||
"//:prettier",
|
"//:prettier",
|
||||||
"//buildscripts:shellscripts_linters",
|
|
||||||
"@shfmt",
|
"@shfmt",
|
||||||
],
|
],
|
||||||
env = {
|
env = {
|
||||||
|
|
@ -38,9 +35,8 @@ format_multirun(
|
||||||
graphql = "//:prettier",
|
graphql = "//:prettier",
|
||||||
html = "//:prettier",
|
html = "//:prettier",
|
||||||
markdown = "//:prettier",
|
markdown = "//:prettier",
|
||||||
|
shell = "@shfmt//:shfmt",
|
||||||
sql = "//:prettier",
|
sql = "//:prettier",
|
||||||
starlark = "@buildifier_prebuilt//:buildifier",
|
starlark = "@buildifier_prebuilt//:buildifier",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
# TODO(SERVER-101034): Enable rules_lint shfmt after sh files are reformatted with .editorconfig
|
|
||||||
# shell = "@shfmt//:shfmt",
|
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -82,21 +82,6 @@ def run_rules_lint(
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def run_shellscripts_linters(shellscripts_linters: pathlib.Path, check: bool) -> bool:
|
|
||||||
try:
|
|
||||||
command = [str(shellscripts_linters)]
|
|
||||||
if not check:
|
|
||||||
print("Running shellscripts formatter")
|
|
||||||
command.append("fix")
|
|
||||||
else:
|
|
||||||
print("Running shellscripts linter")
|
|
||||||
repo_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
||||||
subprocess.run(command, check=True, env=os.environ, cwd=repo_path)
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def run_prettier(
|
def run_prettier(
|
||||||
prettier: pathlib.Path, check: bool, files_to_format: Union[List[str], str] = "all"
|
prettier: pathlib.Path, check: bool, files_to_format: Union[List[str], str] = "all"
|
||||||
) -> bool:
|
) -> bool:
|
||||||
|
|
@ -169,12 +154,6 @@ def main() -> int:
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--prettier", help="Set the path to prettier", required=True, type=pathlib.Path
|
"--prettier", help="Set the path to prettier", required=True, type=pathlib.Path
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
|
||||||
"--shellscripts-linters",
|
|
||||||
help="Set the path to shellscripts_linters",
|
|
||||||
required=True,
|
|
||||||
type=pathlib.Path,
|
|
||||||
)
|
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--rules-lint-format",
|
"--rules-lint-format",
|
||||||
help="Set the path to rules_lint's formatter",
|
help="Set the path to rules_lint's formatter",
|
||||||
|
|
@ -200,7 +179,6 @@ def main() -> int:
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
prettier_path: pathlib.Path = args.prettier.resolve()
|
prettier_path: pathlib.Path = args.prettier.resolve()
|
||||||
shellscripts_linters_path: pathlib.Path = args.shellscripts_linters.resolve()
|
|
||||||
|
|
||||||
os.chdir(default_dir)
|
os.chdir(default_dir)
|
||||||
|
|
||||||
|
|
@ -235,7 +213,6 @@ def main() -> int:
|
||||||
if run_rules_lint(
|
if run_rules_lint(
|
||||||
args.rules_lint_format, args.rules_lint_format_check, args.check, files_to_format
|
args.rules_lint_format, args.rules_lint_format_check, args.check, files_to_format
|
||||||
)
|
)
|
||||||
and run_shellscripts_linters(shellscripts_linters_path, args.check)
|
|
||||||
and run_prettier(prettier_path, args.check, files_to_format)
|
and run_prettier(prettier_path, args.check, files_to_format)
|
||||||
else 1
|
else 1
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -5,8 +5,8 @@ GREEN='\033[0;32m'
|
||||||
NO_COLOR='\033[0m'
|
NO_COLOR='\033[0m'
|
||||||
|
|
||||||
if [[ $1 == "ALL_PASSING" ]]; then
|
if [[ $1 == "ALL_PASSING" ]]; then
|
||||||
echo -e "${GREEN}INFO:${NO_COLOR} No linter errors found!"
|
echo -e "${GREEN}INFO:${NO_COLOR} No linter errors found!"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo -e "${RED}ERROR:${NO_COLOR} Linter run failed, see details above"
|
echo -e "${RED}ERROR:${NO_COLOR} Linter run failed, see details above"
|
||||||
|
|
|
||||||
|
|
@ -153,34 +153,34 @@ get_package_versions() {
|
||||||
pkg_manager=$(get_package_manager "$image")
|
pkg_manager=$(get_package_manager "$image")
|
||||||
|
|
||||||
case "$pkg_manager" in
|
case "$pkg_manager" in
|
||||||
yum)
|
yum)
|
||||||
docker run --rm "$image" bash -c "
|
docker run --rm "$image" bash -c "
|
||||||
yum info ${packages[*]} 2>/dev/null |
|
yum info ${packages[*]} 2>/dev/null |
|
||||||
awk '/^Name/ {name=\$3} /^Version/ {version=\$3} /^Release/ {release=\$3}
|
awk '/^Name/ {name=\$3} /^Version/ {version=\$3} /^Release/ {release=\$3}
|
||||||
/^Release/ {print name \"-\" version \"-\" release}' |
|
/^Release/ {print name \"-\" version \"-\" release}' |
|
||||||
sort -u"
|
sort -u"
|
||||||
;;
|
;;
|
||||||
apt)
|
apt)
|
||||||
docker run --rm "$image" bash -c "
|
docker run --rm "$image" bash -c "
|
||||||
apt-get update >/dev/null 2>&1 &&
|
apt-get update >/dev/null 2>&1 &&
|
||||||
apt-cache policy ${packages[*]} |
|
apt-cache policy ${packages[*]} |
|
||||||
awk '/^[^ ]/ {pkg=\$1} /Candidate:/ {print pkg \"=\" \$2}' |
|
awk '/^[^ ]/ {pkg=\$1} /Candidate:/ {print pkg \"=\" \$2}' |
|
||||||
sort -u"
|
sort -u"
|
||||||
;;
|
;;
|
||||||
zypper)
|
zypper)
|
||||||
# TODO(SERVER-93423): Pin suse package versions. At the moment this
|
# TODO(SERVER-93423): Pin suse package versions. At the moment this
|
||||||
# breaks the remote_execution_containers_generator.py script.
|
# breaks the remote_execution_containers_generator.py script.
|
||||||
printf '%s\n' "${packages[@]}" | sort -u
|
printf '%s\n' "${packages[@]}" | sort -u
|
||||||
# docker run --rm "$image" bash -c "
|
# docker run --rm "$image" bash -c "
|
||||||
# zypper --non-interactive refresh >/dev/null 2>&1 &&
|
# zypper --non-interactive refresh >/dev/null 2>&1 &&
|
||||||
# zypper --non-interactive info ${packages[*]} |
|
# zypper --non-interactive info ${packages[*]} |
|
||||||
# awk '/^Name/ {name=\$3} /^Version/ {version=\$3} /^Version/ {print name \"=\" version}' |
|
# awk '/^Name/ {name=\$3} /^Version/ {version=\$3} /^Version/ {print name \"=\" version}' |
|
||||||
# sort -u"
|
# sort -u"
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Unsupported package manager for image: $image" >&2
|
echo "Unsupported package manager for image: $image" >&2
|
||||||
return 1
|
return 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -207,25 +207,25 @@ generate_dockerfile() {
|
||||||
install_lines=$(get_package_versions "$image" "${packages[@]}" | sed 's/^/ /' | sed 's/$/\ \\/')
|
install_lines=$(get_package_versions "$image" "${packages[@]}" | sed 's/^/ /' | sed 's/$/\ \\/')
|
||||||
|
|
||||||
case "$pkg_manager" in
|
case "$pkg_manager" in
|
||||||
yum)
|
yum)
|
||||||
update_cmd="yum check-update || true"
|
update_cmd="yum check-update || true"
|
||||||
install_cmd="yum install -y"
|
install_cmd="yum install -y"
|
||||||
clean_cmd="&& yum clean all && rm -rf /var/cache/yum/*"
|
clean_cmd="&& yum clean all && rm -rf /var/cache/yum/*"
|
||||||
;;
|
;;
|
||||||
apt)
|
apt)
|
||||||
update_cmd="apt-get update"
|
update_cmd="apt-get update"
|
||||||
install_cmd="DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends"
|
install_cmd="DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends"
|
||||||
clean_cmd="&& rm -rf /var/lib/apt/lists/*"
|
clean_cmd="&& rm -rf /var/lib/apt/lists/*"
|
||||||
;;
|
;;
|
||||||
zypper)
|
zypper)
|
||||||
update_cmd="zypper refresh"
|
update_cmd="zypper refresh"
|
||||||
install_cmd="zypper install -y --no-recommends"
|
install_cmd="zypper install -y --no-recommends"
|
||||||
clean_cmd="&& zypper clean --all"
|
clean_cmd="&& zypper clean --all"
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Unsupported package manager for image: $image" >&2
|
echo "Unsupported package manager for image: $image" >&2
|
||||||
return 1
|
return 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
# Remove colons from package versions for Debian and Ubuntu
|
# Remove colons from package versions for Debian and Ubuntu
|
||||||
|
|
@ -234,7 +234,7 @@ generate_dockerfile() {
|
||||||
fi
|
fi
|
||||||
|
|
||||||
mkdir -p "$output_dir"
|
mkdir -p "$output_dir"
|
||||||
cat << EOF > "$output_dir/dockerfile"
|
cat <<EOF >"$output_dir/dockerfile"
|
||||||
# DO NOT EDIT.
|
# DO NOT EDIT.
|
||||||
#
|
#
|
||||||
# This Dockerfile is generated by the 'repin_dockerfiles.sh' script. To repin
|
# This Dockerfile is generated by the 'repin_dockerfiles.sh' script. To repin
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@
|
||||||
# This script is used as a workspace status command
|
# This script is used as a workspace status command
|
||||||
# bazel test --workspace_status_command=bazel/resmoke/volatile_status.sh
|
# bazel test --workspace_status_command=bazel/resmoke/volatile_status.sh
|
||||||
# to populate key-value pairs in bazel-out/volatile-status.txt.
|
# to populate key-value pairs in bazel-out/volatile-status.txt.
|
||||||
# This file and the key-values can be consumed by bazel rules, but bazel
|
# This file and the key-values can be consumed by bazel rules, but bazel
|
||||||
# pretends this file never changes when deciding what to rebuild.
|
# pretends this file never changes when deciding what to rebuild.
|
||||||
|
|
||||||
# Evergreen expansions used primarily for Resmoke telemetry
|
# Evergreen expansions used primarily for Resmoke telemetry
|
||||||
|
|
|
||||||
|
|
@ -5,8 +5,8 @@ set -e
|
||||||
RUNFILES_WORKING_DIRECTORY="$(pwd)"
|
RUNFILES_WORKING_DIRECTORY="$(pwd)"
|
||||||
|
|
||||||
if [ -z $BUILD_WORKING_DIRECTORY ]; then
|
if [ -z $BUILD_WORKING_DIRECTORY ]; then
|
||||||
echo "ERROR: BUILD_WORKING_DIRECTORY was not set, was this run from bazel?"
|
echo "ERROR: BUILD_WORKING_DIRECTORY was not set, was this run from bazel?"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd $BUILD_WORKING_DIRECTORY
|
cd $BUILD_WORKING_DIRECTORY
|
||||||
|
|
|
||||||
|
|
@ -111,18 +111,6 @@ py_binary(
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
sh_binary(
|
|
||||||
name = "shellscripts_linters",
|
|
||||||
srcs = ["shellscripts-linters.sh"],
|
|
||||||
data = [
|
|
||||||
"@shfmt",
|
|
||||||
],
|
|
||||||
env = {
|
|
||||||
"SHFMT_PATH": "$(rootpath @shfmt//:shfmt)",
|
|
||||||
},
|
|
||||||
visibility = ["//visibility:public"],
|
|
||||||
)
|
|
||||||
|
|
||||||
py_library(
|
py_library(
|
||||||
name = "mongo_toolchain",
|
name = "mongo_toolchain",
|
||||||
srcs = [
|
srcs = [
|
||||||
|
|
|
||||||
|
|
@ -8,70 +8,70 @@ echo "+-------------------------------------------------------------------------
|
||||||
echo
|
echo
|
||||||
|
|
||||||
if [[ -d "/opt/mongodbtoolchain/v4/bin" ]]; then
|
if [[ -d "/opt/mongodbtoolchain/v4/bin" ]]; then
|
||||||
export PATH="/opt/mongodbtoolchain/v4/bin:$PATH"
|
export PATH="/opt/mongodbtoolchain/v4/bin:$PATH"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -d "/opt/mongodbtoolchain/v5/bin" ]]; then
|
if [[ -d "/opt/mongodbtoolchain/v5/bin" ]]; then
|
||||||
export PATH="/opt/mongodbtoolchain/v5/bin:$PATH"
|
export PATH="/opt/mongodbtoolchain/v5/bin:$PATH"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
rc_file=""
|
rc_file=""
|
||||||
if [[ -f "$HOME/.bashrc" ]]; then
|
if [[ -f "$HOME/.bashrc" ]]; then
|
||||||
rc_file="$HOME/.bashrc"
|
rc_file="$HOME/.bashrc"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -f "$HOME/.zshrc" ]]; then
|
if [[ -f "$HOME/.zshrc" ]]; then
|
||||||
rc_file="$HOME/.zshrc"
|
rc_file="$HOME/.zshrc"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! command -v db-contrib-tool &> /dev/null; then
|
if ! command -v db-contrib-tool &>/dev/null; then
|
||||||
if ! python3 -c "import sys; sys.exit(sys.version_info < (3, 7))" &> /dev/null; then
|
if ! python3 -c "import sys; sys.exit(sys.version_info < (3, 7))" &>/dev/null; then
|
||||||
actual_version=$(python3 -c 'import sys; print(sys.version)')
|
actual_version=$(python3 -c 'import sys; print(sys.version)')
|
||||||
echo "You must have python3.7+ installed. Detected version $actual_version."
|
echo "You must have python3.7+ installed. Detected version $actual_version."
|
||||||
echo "To avoid unexpected issues, python3.7+ will not be automatically installed."
|
echo "To avoid unexpected issues, python3.7+ will not be automatically installed."
|
||||||
echo "Please, do it yourself."
|
echo "Please, do it yourself."
|
||||||
echo
|
echo
|
||||||
echo "On macOS you can run:"
|
echo "On macOS you can run:"
|
||||||
echo
|
echo
|
||||||
echo " brew install python3"
|
echo " brew install python3"
|
||||||
echo
|
echo
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
|
||||||
|
|
||||||
if command -v pipx &> /dev/null; then
|
|
||||||
echo "Found pipx: $(command -v pipx)."
|
|
||||||
echo "Using it to install 'db-contrib-tool'."
|
|
||||||
echo
|
|
||||||
|
|
||||||
pipx ensurepath &> /dev/null
|
|
||||||
if [[ -f "$rc_file" ]]; then
|
|
||||||
source "$rc_file"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
pipx install db-contrib-tool --python $(command -v python3) --force
|
if command -v pipx &>/dev/null; then
|
||||||
echo
|
echo "Found pipx: $(command -v pipx)."
|
||||||
else
|
echo "Using it to install 'db-contrib-tool'."
|
||||||
if ! python3 -m pipx --version &> /dev/null; then
|
echo
|
||||||
echo "Couldn't find pipx. Installing it as python3 module:"
|
|
||||||
echo " $(command -v python3) -m pip install pipx"
|
pipx ensurepath &>/dev/null
|
||||||
echo
|
if [[ -f "$rc_file" ]]; then
|
||||||
python3 -m pip install pipx
|
source "$rc_file"
|
||||||
echo
|
fi
|
||||||
|
|
||||||
|
pipx install db-contrib-tool --python $(command -v python3) --force
|
||||||
|
echo
|
||||||
else
|
else
|
||||||
echo "Found pipx installed as python3 module:"
|
if ! python3 -m pipx --version &>/dev/null; then
|
||||||
echo " $(command -v python3) -m pipx --version"
|
echo "Couldn't find pipx. Installing it as python3 module:"
|
||||||
echo "Using it to install 'db-contrib-tool'."
|
echo " $(command -v python3) -m pip install pipx"
|
||||||
echo
|
echo
|
||||||
fi
|
python3 -m pip install pipx
|
||||||
|
echo
|
||||||
|
else
|
||||||
|
echo "Found pipx installed as python3 module:"
|
||||||
|
echo " $(command -v python3) -m pipx --version"
|
||||||
|
echo "Using it to install 'db-contrib-tool'."
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
|
||||||
python3 -m pipx ensurepath &> /dev/null
|
python3 -m pipx ensurepath &>/dev/null
|
||||||
if [[ -f "$rc_file" ]]; then
|
if [[ -f "$rc_file" ]]; then
|
||||||
source "$rc_file"
|
source "$rc_file"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
python3 -m pipx install db-contrib-tool --force
|
python3 -m pipx install db-contrib-tool --force
|
||||||
echo
|
echo
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Please, open a new shell or run:"
|
echo "Please, open a new shell or run:"
|
||||||
|
|
|
||||||
|
|
@ -5,31 +5,31 @@ ARGS=("$@")
|
||||||
|
|
||||||
# Ordered list of possible clangd locations
|
# Ordered list of possible clangd locations
|
||||||
CANDIDATES=(
|
CANDIDATES=(
|
||||||
"$(command -v custom-clangd)"
|
"$(command -v custom-clangd)"
|
||||||
"$(find .compiledb/compiledb-*/external/mongo_toolchain_v5/v5/bin/clangd)"
|
"$(find .compiledb/compiledb-*/external/mongo_toolchain_v5/v5/bin/clangd)"
|
||||||
"$(find bazel-*/external/mongo_toolchain_v5/v5/bin/clangd)"
|
"$(find bazel-*/external/mongo_toolchain_v5/v5/bin/clangd)"
|
||||||
"/opt/mongodbtoolchain/v5/bin/clangd"
|
"/opt/mongodbtoolchain/v5/bin/clangd"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Find the first available clangd
|
# Find the first available clangd
|
||||||
for CANDIDATE in "${CANDIDATES[@]}"; do
|
for CANDIDATE in "${CANDIDATES[@]}"; do
|
||||||
if [[ -x "$CANDIDATE" ]]; then
|
if [[ -x "$CANDIDATE" ]]; then
|
||||||
CLANGD="$CANDIDATE"
|
CLANGD="$CANDIDATE"
|
||||||
echo "[INFO] Using clangd at: $CLANGD" >&2
|
echo "[INFO] Using clangd at: $CLANGD" >&2
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
# Fail if no clangd was found
|
# Fail if no clangd was found
|
||||||
if [[ -z "$CLANGD" ]]; then
|
if [[ -z "$CLANGD" ]]; then
|
||||||
echo "[ERROR] clangd not found in any of the expected locations." >&2
|
echo "[ERROR] clangd not found in any of the expected locations." >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
FINAL_ARGS=(
|
FINAL_ARGS=(
|
||||||
"${ARGS[@]}"
|
"${ARGS[@]}"
|
||||||
"--query-driver=./**/*{clang,gcc,g++}*" # allow any clang or gcc binary in the repo
|
"--query-driver=./**/*{clang,gcc,g++}*" # allow any clang or gcc binary in the repo
|
||||||
"--header-insertion=never"
|
"--header-insertion=never"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Log the full command (optional)
|
# Log the full command (optional)
|
||||||
|
|
|
||||||
|
|
@ -26,87 +26,75 @@ mkdir -p "$repodir/yum/redhat"
|
||||||
|
|
||||||
# to support different $releasever values in yum repo configurations
|
# to support different $releasever values in yum repo configurations
|
||||||
#
|
#
|
||||||
if [ ! -e "$repodir/yum/redhat/7Server" ]
|
if [ ! -e "$repodir/yum/redhat/7Server" ]; then
|
||||||
then
|
ln -s 7 "$repodir/yum/redhat/7Server"
|
||||||
ln -s 7 "$repodir/yum/redhat/7Server"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -e "$repodir/yum/redhat/6Server" ]
|
if [ ! -e "$repodir/yum/redhat/6Server" ]; then
|
||||||
then
|
ln -s 6 "$repodir/yum/redhat/6Server"
|
||||||
ln -s 6 "$repodir/yum/redhat/6Server"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -e "$repodir/yum/redhat/5Server" ]
|
if [ ! -e "$repodir/yum/redhat/5Server" ]; then
|
||||||
then
|
ln -s 5 "$repodir/yum/redhat/5Server"
|
||||||
ln -s 5 "$repodir/yum/redhat/5Server"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Scanning and copying package files from $source_dir"
|
echo "Scanning and copying package files from $source_dir"
|
||||||
echo ". = skipping existing file, @ = copying file"
|
echo ". = skipping existing file, @ = copying file"
|
||||||
for package in $(find "$source_dir" -not \( -path "$repodir" -prune \) -and \( -name \*.rpm -o -name \*.deb -o -name Release \))
|
for package in $(find "$source_dir" -not \( -path "$repodir" -prune \) -and \( -name \*.rpm -o -name \*.deb -o -name Release \)); do
|
||||||
do
|
new_package_location="$repodir$(echo "$package" | sed 's/\/var\/www-enterprise\/[^\/]*//;')"
|
||||||
new_package_location="$repodir$(echo "$package" | sed 's/\/var\/www-enterprise\/[^\/]*//;')"
|
# skip if the directory structure looks weird
|
||||||
# skip if the directory structure looks weird
|
#
|
||||||
#
|
if echo "$new_package_location" | grep -q /repo/; then
|
||||||
if echo "$new_package_location" | grep -q /repo/
|
continue
|
||||||
then
|
fi
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
# skip if not enterprise package
|
# skip if not enterprise package
|
||||||
#
|
#
|
||||||
if ! echo "$new_package_location" | grep -q enterprise
|
if ! echo "$new_package_location" | grep -q enterprise; then
|
||||||
then
|
continue
|
||||||
continue
|
fi
|
||||||
fi
|
# skip if it's already there
|
||||||
# skip if it's already there
|
#
|
||||||
#
|
if [ -e "$new_package_location" -a "$(basename "$package")" != "Release" ]; then
|
||||||
if [ -e "$new_package_location" -a "$(basename "$package")" != "Release" ]
|
echo -n .
|
||||||
then
|
else
|
||||||
echo -n .
|
mkdir -p "$(dirname "$new_package_location")"
|
||||||
else
|
echo -n @
|
||||||
mkdir -p "$(dirname "$new_package_location")"
|
cp "$package" "$new_package_location"
|
||||||
echo -n @
|
fi
|
||||||
cp "$package" "$new_package_location"
|
|
||||||
fi
|
|
||||||
done
|
done
|
||||||
echo
|
echo
|
||||||
|
|
||||||
# packages are in place, now create metadata
|
# packages are in place, now create metadata
|
||||||
#
|
#
|
||||||
for debian_dir in "$repodir"/apt/ubuntu "$repodir"/apt/debian
|
for debian_dir in "$repodir"/apt/ubuntu "$repodir"/apt/debian; do
|
||||||
do
|
cd "$debian_dir"
|
||||||
cd "$debian_dir"
|
for section_dir in $(find dists -type d -name multiverse -o -name main); do
|
||||||
for section_dir in $(find dists -type d -name multiverse -o -name main)
|
for arch_dir in "$section_dir"/{binary-i386,binary-amd64}; do
|
||||||
do
|
echo "Generating Packages file under $debian_dir/$arch_dir"
|
||||||
for arch_dir in "$section_dir"/{binary-i386,binary-amd64}
|
if [ ! -d $arch_dir ]; then
|
||||||
do
|
mkdir $arch_dir
|
||||||
echo "Generating Packages file under $debian_dir/$arch_dir"
|
fi
|
||||||
if [ ! -d $arch_dir ]
|
dpkg-scanpackages --multiversion "$arch_dir" >"$arch_dir"/Packages
|
||||||
then
|
gzip -9c "$arch_dir"/Packages >"$arch_dir"/Packages.gz
|
||||||
mkdir $arch_dir
|
done
|
||||||
fi
|
|
||||||
dpkg-scanpackages --multiversion "$arch_dir" > "$arch_dir"/Packages
|
|
||||||
gzip -9c "$arch_dir"/Packages > "$arch_dir"/Packages.gz
|
|
||||||
done
|
done
|
||||||
done
|
|
||||||
|
|
||||||
for release_file in $(find "$debian_dir" -name Release)
|
for release_file in $(find "$debian_dir" -name Release); do
|
||||||
do
|
release_dir=$(dirname "$release_file")
|
||||||
release_dir=$(dirname "$release_file")
|
echo "Generating Release file under $release_dir"
|
||||||
echo "Generating Release file under $release_dir"
|
cd $release_dir
|
||||||
cd $release_dir
|
tempfile=$(mktemp /tmp/ReleaseXXXXXX)
|
||||||
tempfile=$(mktemp /tmp/ReleaseXXXXXX)
|
tempfile2=$(mktemp /tmp/ReleaseXXXXXX)
|
||||||
tempfile2=$(mktemp /tmp/ReleaseXXXXXX)
|
mv Release $tempfile
|
||||||
mv Release $tempfile
|
head -7 $tempfile >$tempfile2
|
||||||
head -7 $tempfile > $tempfile2
|
apt-ftparchive release . >>$tempfile2
|
||||||
apt-ftparchive release . >> $tempfile2
|
cp $tempfile2 Release
|
||||||
cp $tempfile2 Release
|
chmod 644 Release
|
||||||
chmod 644 Release
|
rm Release.gpg
|
||||||
rm Release.gpg
|
echo "Signing Release file"
|
||||||
echo "Signing Release file"
|
gpg -r "$gpg_recip" --no-secmem-warning -abs --output Release.gpg Release
|
||||||
gpg -r "$gpg_recip" --no-secmem-warning -abs --output Release.gpg Release
|
done
|
||||||
done
|
|
||||||
done
|
done
|
||||||
|
|
||||||
# Create symlinks for stable and unstable branches
|
# Create symlinks for stable and unstable branches
|
||||||
|
|
@ -118,29 +106,24 @@ done
|
||||||
# /var/www-enterprise/repo.consolidated/apt/ubuntu/dists/precise/mongodb-enterprise/unstable -> 2.5
|
# /var/www-enterprise/repo.consolidated/apt/ubuntu/dists/precise/mongodb-enterprise/unstable -> 2.5
|
||||||
# /var/www-enterprise/repo.consolidated/apt/debian/dists/wheezy/mongodb-enterprise/unstable -> 2.5
|
# /var/www-enterprise/repo.consolidated/apt/debian/dists/wheezy/mongodb-enterprise/unstable -> 2.5
|
||||||
#
|
#
|
||||||
for unstable_branch_dir in "$repodir"/yum/redhat/*/*/$unstable_branch "$repodir"/yum/amazon/*/*/$unstable_branch "$repodir"/apt/debian/dists/*/*/$unstable_branch "$repodir"/apt/ubuntu/dists/*/*/$unstable_branch "$repodir"/zypper/suse/*/*/$unstable_branch
|
for unstable_branch_dir in "$repodir"/yum/redhat/*/*/$unstable_branch "$repodir"/yum/amazon/*/*/$unstable_branch "$repodir"/apt/debian/dists/*/*/$unstable_branch "$repodir"/apt/ubuntu/dists/*/*/$unstable_branch "$repodir"/zypper/suse/*/*/$unstable_branch; do
|
||||||
do
|
full_unstable_path=$(dirname "$unstable_branch_dir")/unstable
|
||||||
full_unstable_path=$(dirname "$unstable_branch_dir")/unstable
|
if [ -e "$unstable_branch_dir" -a ! -e "$full_unstable_path" ]; then
|
||||||
if [ -e "$unstable_branch_dir" -a ! -e "$full_unstable_path" ]
|
echo "Linking unstable branch directory $unstable_branch_dir to $full_unstable_path"
|
||||||
then
|
ln -s $unstable_branch $full_unstable_path
|
||||||
echo "Linking unstable branch directory $unstable_branch_dir to $full_unstable_path"
|
fi
|
||||||
ln -s $unstable_branch $full_unstable_path
|
|
||||||
fi
|
|
||||||
done
|
done
|
||||||
|
|
||||||
for stable_branch_dir in "$repodir"/yum/redhat/*/*/$stable_branch "$repodir"/yum/amazon/*/*/$stable_branch "$repodir"/apt/debian/dists/*/*/$stable_branch "$repodir"/apt/ubuntu/dists/*/*/$stable_branch "$repodir"/zypper/suse/*/*/$stable_branch
|
for stable_branch_dir in "$repodir"/yum/redhat/*/*/$stable_branch "$repodir"/yum/amazon/*/*/$stable_branch "$repodir"/apt/debian/dists/*/*/$stable_branch "$repodir"/apt/ubuntu/dists/*/*/$stable_branch "$repodir"/zypper/suse/*/*/$stable_branch; do
|
||||||
do
|
full_stable_path=$(dirname "$stable_branch_dir")/stable
|
||||||
full_stable_path=$(dirname "$stable_branch_dir")/stable
|
if [ -e "$stable_branch_dir" -a ! -e "$full_stable_path" ]; then
|
||||||
if [ -e "$stable_branch_dir" -a ! -e "$full_stable_path" ]
|
echo "Linking stable branch directory $stable_branch_dir to $full_stable_path"
|
||||||
then
|
ln -s $stable_branch $full_stable_path
|
||||||
echo "Linking stable branch directory $stable_branch_dir to $full_stable_path"
|
fi
|
||||||
ln -s $stable_branch $full_stable_path
|
|
||||||
fi
|
|
||||||
done
|
done
|
||||||
|
|
||||||
for rpm_dir in $(find "$repodir"/yum/redhat "$repodir"/zypper/suse -type d -name x86_64 -o -name i386)
|
for rpm_dir in $(find "$repodir"/yum/redhat "$repodir"/zypper/suse -type d -name x86_64 -o -name i386); do
|
||||||
do
|
echo "Generating redhat repo metadata under $rpm_dir"
|
||||||
echo "Generating redhat repo metadata under $rpm_dir"
|
cd "$rpm_dir"
|
||||||
cd "$rpm_dir"
|
createrepo .
|
||||||
createrepo .
|
|
||||||
done
|
done
|
||||||
|
|
|
||||||
|
|
@ -26,87 +26,75 @@ mkdir -p "$repodir/yum/redhat"
|
||||||
|
|
||||||
# to support different $releasever values in yum repo configurations
|
# to support different $releasever values in yum repo configurations
|
||||||
#
|
#
|
||||||
if [ ! -e "$repodir/yum/redhat/6Server" ]
|
if [ ! -e "$repodir/yum/redhat/6Server" ]; then
|
||||||
then
|
ln -s 6 "$repodir/yum/redhat/6Server"
|
||||||
ln -s 6 "$repodir/yum/redhat/6Server"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -e "$repodir/yum/redhat/7Server" ]
|
if [ ! -e "$repodir/yum/redhat/7Server" ]; then
|
||||||
then
|
ln -s 7 "$repodir/yum/redhat/7Server"
|
||||||
ln -s 7 "$repodir/yum/redhat/7Server"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -e "$repodir/yum/redhat/5Server" ]
|
if [ ! -e "$repodir/yum/redhat/5Server" ]; then
|
||||||
then
|
ln -s 5 "$repodir/yum/redhat/5Server"
|
||||||
ln -s 5 "$repodir/yum/redhat/5Server"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Scanning and copying package files from $source_dir"
|
echo "Scanning and copying package files from $source_dir"
|
||||||
echo ". = skipping existing file, @ = copying file"
|
echo ". = skipping existing file, @ = copying file"
|
||||||
for package in $(find "$source_dir" -not \( -path "$repodir" -prune \) -and \( -name \*.rpm -o -name \*.deb -o -name Release \))
|
for package in $(find "$source_dir" -not \( -path "$repodir" -prune \) -and \( -name \*.rpm -o -name \*.deb -o -name Release \)); do
|
||||||
do
|
new_package_location="$repodir$(echo "$package" | sed 's/\/var\/www-org\/[^\/]*//;')"
|
||||||
new_package_location="$repodir$(echo "$package" | sed 's/\/var\/www-org\/[^\/]*//;')"
|
# skip if the directory structure looks weird
|
||||||
# skip if the directory structure looks weird
|
#
|
||||||
#
|
if echo "$new_package_location" | grep -q /repo/; then
|
||||||
if echo "$new_package_location" | grep -q /repo/
|
continue
|
||||||
then
|
fi
|
||||||
continue
|
|
||||||
fi
|
|
||||||
|
|
||||||
# skip if not community package
|
# skip if not community package
|
||||||
#
|
#
|
||||||
if ! echo "$new_package_location" | grep -q org
|
if ! echo "$new_package_location" | grep -q org; then
|
||||||
then
|
continue
|
||||||
continue
|
fi
|
||||||
fi
|
# skip if it's already there
|
||||||
# skip if it's already there
|
#
|
||||||
#
|
if [ -e "$new_package_location" -a "$(basename "$package")" != "Release" ]; then
|
||||||
if [ -e "$new_package_location" -a "$(basename "$package")" != "Release" ]
|
echo -n .
|
||||||
then
|
else
|
||||||
echo -n .
|
mkdir -p "$(dirname "$new_package_location")"
|
||||||
else
|
echo -n @
|
||||||
mkdir -p "$(dirname "$new_package_location")"
|
cp "$package" "$new_package_location"
|
||||||
echo -n @
|
fi
|
||||||
cp "$package" "$new_package_location"
|
|
||||||
fi
|
|
||||||
done
|
done
|
||||||
echo
|
echo
|
||||||
|
|
||||||
# packages are in place, now create metadata
|
# packages are in place, now create metadata
|
||||||
#
|
#
|
||||||
for debian_dir in "$repodir"/apt/ubuntu "$repodir"/apt/debian
|
for debian_dir in "$repodir"/apt/ubuntu "$repodir"/apt/debian; do
|
||||||
do
|
cd "$debian_dir"
|
||||||
cd "$debian_dir"
|
for section_dir in $(find dists -type d -name multiverse -o -name main); do
|
||||||
for section_dir in $(find dists -type d -name multiverse -o -name main)
|
for arch_dir in "$section_dir"/{binary-i386,binary-amd64}; do
|
||||||
do
|
echo "Generating Packages file under $debian_dir/$arch_dir"
|
||||||
for arch_dir in "$section_dir"/{binary-i386,binary-amd64}
|
if [ ! -d $arch_dir ]; then
|
||||||
do
|
mkdir $arch_dir
|
||||||
echo "Generating Packages file under $debian_dir/$arch_dir"
|
fi
|
||||||
if [ ! -d $arch_dir ]
|
dpkg-scanpackages --multiversion "$arch_dir" >"$arch_dir"/Packages
|
||||||
then
|
gzip -9c "$arch_dir"/Packages >"$arch_dir"/Packages.gz
|
||||||
mkdir $arch_dir
|
done
|
||||||
fi
|
|
||||||
dpkg-scanpackages --multiversion "$arch_dir" > "$arch_dir"/Packages
|
|
||||||
gzip -9c "$arch_dir"/Packages > "$arch_dir"/Packages.gz
|
|
||||||
done
|
done
|
||||||
done
|
|
||||||
|
|
||||||
for release_file in $(find "$debian_dir" -name Release)
|
for release_file in $(find "$debian_dir" -name Release); do
|
||||||
do
|
release_dir=$(dirname "$release_file")
|
||||||
release_dir=$(dirname "$release_file")
|
echo "Generating Release file under $release_dir"
|
||||||
echo "Generating Release file under $release_dir"
|
cd $release_dir
|
||||||
cd $release_dir
|
tempfile=$(mktemp /tmp/ReleaseXXXXXX)
|
||||||
tempfile=$(mktemp /tmp/ReleaseXXXXXX)
|
tempfile2=$(mktemp /tmp/ReleaseXXXXXX)
|
||||||
tempfile2=$(mktemp /tmp/ReleaseXXXXXX)
|
mv Release $tempfile
|
||||||
mv Release $tempfile
|
head -7 $tempfile >$tempfile2
|
||||||
head -7 $tempfile > $tempfile2
|
apt-ftparchive release . >>$tempfile2
|
||||||
apt-ftparchive release . >> $tempfile2
|
cp $tempfile2 Release
|
||||||
cp $tempfile2 Release
|
chmod 644 Release
|
||||||
chmod 644 Release
|
rm Release.gpg
|
||||||
rm Release.gpg
|
echo "Signing Release file"
|
||||||
echo "Signing Release file"
|
gpg -r "$gpg_recip" --no-secmem-warning -abs --output Release.gpg Release
|
||||||
gpg -r "$gpg_recip" --no-secmem-warning -abs --output Release.gpg Release
|
done
|
||||||
done
|
|
||||||
done
|
done
|
||||||
|
|
||||||
# Create symlinks for stable and unstable branches
|
# Create symlinks for stable and unstable branches
|
||||||
|
|
@ -118,29 +106,24 @@ done
|
||||||
# /var/www-org/repo.consolidated/apt/ubuntu/dists/precise/mongodb-org/unstable -> 2.5
|
# /var/www-org/repo.consolidated/apt/ubuntu/dists/precise/mongodb-org/unstable -> 2.5
|
||||||
# /var/www-org/repo.consolidated/apt/debian/dists/wheezy/mongodb-org/unstable -> 2.5
|
# /var/www-org/repo.consolidated/apt/debian/dists/wheezy/mongodb-org/unstable -> 2.5
|
||||||
#
|
#
|
||||||
for unstable_branch_dir in "$repodir"/yum/redhat/*/*/$unstable_branch "$repodir"/yum/amazon/*/*/$unstable_branch "$repodir"/apt/debian/dists/*/*/$unstable_branch "$repodir"/apt/ubuntu/dists/*/*/$unstable_branch "$repodir"/zypper/suse/*/*/$unstable_branch
|
for unstable_branch_dir in "$repodir"/yum/redhat/*/*/$unstable_branch "$repodir"/yum/amazon/*/*/$unstable_branch "$repodir"/apt/debian/dists/*/*/$unstable_branch "$repodir"/apt/ubuntu/dists/*/*/$unstable_branch "$repodir"/zypper/suse/*/*/$unstable_branch; do
|
||||||
do
|
full_unstable_path=$(dirname "$unstable_branch_dir")/unstable
|
||||||
full_unstable_path=$(dirname "$unstable_branch_dir")/unstable
|
if [ -e "$unstable_branch_dir" -a ! -e "$full_unstable_path" ]; then
|
||||||
if [ -e "$unstable_branch_dir" -a ! -e "$full_unstable_path" ]
|
echo "Linking unstable branch directory $unstable_branch_dir to $full_unstable_path"
|
||||||
then
|
ln -s $unstable_branch $full_unstable_path
|
||||||
echo "Linking unstable branch directory $unstable_branch_dir to $full_unstable_path"
|
fi
|
||||||
ln -s $unstable_branch $full_unstable_path
|
|
||||||
fi
|
|
||||||
done
|
done
|
||||||
|
|
||||||
for stable_branch_dir in "$repodir"/yum/redhat/*/*/$stable_branch "$repodir"/yum/amazon/*/*/$stable_branch "$repodir"/apt/debian/dists/*/*/$stable_branch "$repodir"/apt/ubuntu/dists/*/*/$stable_branch "$repodir"/zypper/suse/*/*/$stable_branch
|
for stable_branch_dir in "$repodir"/yum/redhat/*/*/$stable_branch "$repodir"/yum/amazon/*/*/$stable_branch "$repodir"/apt/debian/dists/*/*/$stable_branch "$repodir"/apt/ubuntu/dists/*/*/$stable_branch "$repodir"/zypper/suse/*/*/$stable_branch; do
|
||||||
do
|
full_stable_path=$(dirname "$stable_branch_dir")/stable
|
||||||
full_stable_path=$(dirname "$stable_branch_dir")/stable
|
if [ -e "$stable_branch_dir" -a ! -e "$full_stable_path" ]; then
|
||||||
if [ -e "$stable_branch_dir" -a ! -e "$full_stable_path" ]
|
echo "Linking stable branch directory $stable_branch_dir to $full_stable_path"
|
||||||
then
|
ln -s $stable_branch $full_stable_path
|
||||||
echo "Linking stable branch directory $stable_branch_dir to $full_stable_path"
|
fi
|
||||||
ln -s $stable_branch $full_stable_path
|
|
||||||
fi
|
|
||||||
done
|
done
|
||||||
|
|
||||||
for rpm_dir in $(find "$repodir"/yum/redhat "$repodir"/yum/amazon "$repodir"/zypper/suse -type d -name x86_64 -o -name i386)
|
for rpm_dir in $(find "$repodir"/yum/redhat "$repodir"/yum/amazon "$repodir"/zypper/suse -type d -name x86_64 -o -name i386); do
|
||||||
do
|
echo "Generating redhat repo metadata under $rpm_dir"
|
||||||
echo "Generating redhat repo metadata under $rpm_dir"
|
cd "$rpm_dir"
|
||||||
cd "$rpm_dir"
|
createrepo .
|
||||||
createrepo .
|
|
||||||
done
|
done
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,7 @@ user_group=$USER:$(id -Gn $USER | cut -f1 -d ' ')
|
||||||
|
|
||||||
# _usage_: Provides usage infomation
|
# _usage_: Provides usage infomation
|
||||||
function _usage_ {
|
function _usage_ {
|
||||||
cat << EOF
|
cat <<EOF
|
||||||
usage: $0 options
|
usage: $0 options
|
||||||
This script supports the following parameters for Windows & Linux platforms:
|
This script supports the following parameters for Windows & Linux platforms:
|
||||||
-d <deviceNames>, REQUIRED, Space separated list of devices to mount /data on,
|
-d <deviceNames>, REQUIRED, Space separated list of devices to mount /data on,
|
||||||
|
|
@ -31,150 +31,144 @@ This script supports the following parameters for Windows & Linux platforms:
|
||||||
EOF
|
EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# Parse command line options
|
# Parse command line options
|
||||||
while getopts "d:l:o:r:t:u:?" option
|
while getopts "d:l:o:r:t:u:?" option; do
|
||||||
do
|
case $option in
|
||||||
case $option in
|
|
||||||
d)
|
d)
|
||||||
data_device_names=$OPTARG
|
data_device_names=$OPTARG
|
||||||
;;
|
;;
|
||||||
l)
|
l)
|
||||||
log_device_name=$OPTARG
|
log_device_name=$OPTARG
|
||||||
;;
|
;;
|
||||||
o)
|
o)
|
||||||
mount_options=$OPTARG
|
mount_options=$OPTARG
|
||||||
;;
|
;;
|
||||||
r)
|
r)
|
||||||
data_raid_device_name=$OPTARG
|
data_raid_device_name=$OPTARG
|
||||||
;;
|
;;
|
||||||
t)
|
t)
|
||||||
fs_type=$OPTARG
|
fs_type=$OPTARG
|
||||||
;;
|
;;
|
||||||
u)
|
u)
|
||||||
user_group=$OPTARG
|
user_group=$OPTARG
|
||||||
;;
|
;;
|
||||||
\?|*)
|
\? | *)
|
||||||
_usage_
|
_usage_
|
||||||
exit 0
|
exit 0
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
function mount_drive {
|
function mount_drive {
|
||||||
local root_dir=$1
|
local root_dir=$1
|
||||||
local sub_dirs=$2
|
local sub_dirs=$2
|
||||||
local device_names=$3
|
local device_names=$3
|
||||||
local raid_device_name=$4
|
local raid_device_name=$4
|
||||||
local mount_options=$5
|
local mount_options=$5
|
||||||
local fs_type=$6
|
local fs_type=$6
|
||||||
local user_group=$7
|
local user_group=$7
|
||||||
|
|
||||||
# Determine how many devices were specified
|
# Determine how many devices were specified
|
||||||
local num_devices=0
|
local num_devices=0
|
||||||
for device_name in $device_names
|
for device_name in $device_names; do
|
||||||
do
|
local devices="$devices /dev/$device_name"
|
||||||
local devices="$devices /dev/$device_name"
|
let num_devices=num_devices+1
|
||||||
let num_devices=num_devices+1
|
done
|
||||||
done
|
|
||||||
|
|
||||||
# $OS is defined in Cygwin
|
# $OS is defined in Cygwin
|
||||||
if [ "Windows_NT" = "$OS" ]; then
|
if [ "Windows_NT" = "$OS" ]; then
|
||||||
if [ $num_devices -ne 1 ]; then
|
if [ $num_devices -ne 1 ]; then
|
||||||
echo "Must specify only one drive"
|
echo "Must specify only one drive"
|
||||||
_usage_
|
_usage_
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
local drive_poll_retry=0
|
local drive_poll_retry=0
|
||||||
local drive_poll_delay=0
|
local drive_poll_delay=0
|
||||||
local drive_retry_max=40
|
local drive_retry_max=40
|
||||||
|
|
||||||
local drive=$device_names
|
local drive=$device_names
|
||||||
local system_drive=c
|
local system_drive=c
|
||||||
|
|
||||||
while true;
|
while true; do
|
||||||
do
|
sleep $drive_poll_delay
|
||||||
sleep $drive_poll_delay
|
echo "Looking for drive '$drive' to mount $root_dir"
|
||||||
echo "Looking for drive '$drive' to mount $root_dir"
|
if [ -d /cygdrive/$drive ]; then
|
||||||
if [ -d /cygdrive/$drive ]; then
|
echo "Found drive"
|
||||||
echo "Found drive"
|
rm -rf /$root_dir
|
||||||
rm -rf /$root_dir
|
rm -rf /cygdrive/$system_drive/$root_dir
|
||||||
rm -rf /cygdrive/$system_drive/$root_dir
|
mkdir $drive:\\$root_dir
|
||||||
mkdir $drive:\\$root_dir
|
cmd.exe /c mklink /J $system_drive:\\$root_dir $drive:\\$root_dir
|
||||||
cmd.exe /c mklink /J $system_drive:\\$root_dir $drive:\\$root_dir
|
ln -s /cygdrive/$drive/$root_dir /$root_dir
|
||||||
ln -s /cygdrive/$drive/$root_dir /$root_dir
|
setfacl -s user::rwx,group::rwx,other::rwx /cygdrive/$drive/$root_dir
|
||||||
setfacl -s user::rwx,group::rwx,other::rwx /cygdrive/$drive/$root_dir
|
for sub_dir in $sub_dirs; do
|
||||||
for sub_dir in $sub_dirs
|
mkdir -p /cygdrive/$drive/$root_dir/$sub_dir
|
||||||
do
|
done
|
||||||
mkdir -p /cygdrive/$drive/$root_dir/$sub_dir
|
chown -R $user_group /cygdrive/$system_drive/$root_dir
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
let drive_poll_retry=drive_poll_retry+1
|
||||||
|
if [ $drive_poll_retry -eq $drive_retry_max ]; then
|
||||||
|
echo "Timed out trying to mount $root_dir drive."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
let drive_poll_delay=drive_poll_delay+5
|
||||||
done
|
done
|
||||||
chown -R $user_group /cygdrive/$system_drive/$root_dir
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
let drive_poll_retry=drive_poll_retry+1
|
|
||||||
if [ $drive_poll_retry -eq $drive_retry_max ]; then
|
|
||||||
echo "Timed out trying to mount $root_dir drive."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
let drive_poll_delay=drive_poll_delay+5
|
|
||||||
done
|
|
||||||
|
|
||||||
elif [ $(uname | awk '{print tolower($0)}') = "linux" ]; then
|
elif [ $(uname | awk '{print tolower($0)}') = "linux" ]; then
|
||||||
if [ $num_devices -eq 0 ]; then
|
if [ $num_devices -eq 0 ]; then
|
||||||
echo "Must specify atleast one device"
|
echo "Must specify atleast one device"
|
||||||
_usage_
|
_usage_
|
||||||
exit 1
|
exit 1
|
||||||
elif [ $num_devices -gt 1 ]; then
|
elif [ $num_devices -gt 1 ]; then
|
||||||
if [ -z "$raid_device_name" ]; then
|
if [ -z "$raid_device_name" ]; then
|
||||||
echo "Missing RAID device name"
|
echo "Missing RAID device name"
|
||||||
_usage_
|
_usage_
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Unmount the current devices, if already mounted
|
# Unmount the current devices, if already mounted
|
||||||
umount /mnt || true
|
umount /mnt || true
|
||||||
umount $devices || true
|
umount $devices || true
|
||||||
|
|
||||||
# Determine if we have a RAID set
|
# Determine if we have a RAID set
|
||||||
if [ ! -z "$raid_device_name" ]; then
|
if [ ! -z "$raid_device_name" ]; then
|
||||||
echo "Creating RAID set on '$raid_device_name' for devices '$devices'"
|
echo "Creating RAID set on '$raid_device_name' for devices '$devices'"
|
||||||
device_name=/dev/$raid_device_name
|
device_name=/dev/$raid_device_name
|
||||||
/sbin/udevadm control --stop-exec-queue
|
/sbin/udevadm control --stop-exec-queue
|
||||||
yes | /sbin/mdadm --create $device_name --level=0 -c256 --raid-devices=$num_devices $devices
|
yes | /sbin/mdadm --create $device_name --level=0 -c256 --raid-devices=$num_devices $devices
|
||||||
/sbin/udevadm control --start-exec-queue
|
/sbin/udevadm control --start-exec-queue
|
||||||
/sbin/mdadm --detail --scan > /etc/mdadm.conf
|
/sbin/mdadm --detail --scan >/etc/mdadm.conf
|
||||||
/sbin/blockdev --setra 32 $device_name
|
/sbin/blockdev --setra 32 $device_name
|
||||||
|
else
|
||||||
|
device_name="/dev/$device_names"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Mount the $root_dir drive(s)
|
||||||
|
/sbin/mkfs.$fs_type $mount_options -f $device_name
|
||||||
|
# We add an entry for the device to /etc/fstab so it is automatically mounted following a
|
||||||
|
# machine reboot. The device is not guaranteed to be assigned the same name across restarts so
|
||||||
|
# we use its UUID in order to identify it.
|
||||||
|
#
|
||||||
|
# We also specify type=$fs_type in the /etc/fstab entry because specifying type=auto on
|
||||||
|
# Amazon Linux AMI 2018.03 leads to the drive not being mounted automatically following a
|
||||||
|
# machine reboot.
|
||||||
|
device_uuid=$(blkid -o value -s UUID "$device_name")
|
||||||
|
echo "Adding entry to /etc/fstab for device '$device_name' with UUID '$device_uuid'"
|
||||||
|
echo "UUID=$device_uuid /$root_dir $fs_type noatime 0 0" | tee -a /etc/fstab
|
||||||
|
mkdir /$root_dir || true
|
||||||
|
chmod 777 /$root_dir
|
||||||
|
mount -t $fs_type "UUID=$device_uuid" /$root_dir
|
||||||
|
for sub_dir in $sub_dirs; do
|
||||||
|
mkdir -p /$root_dir/$sub_dir
|
||||||
|
chmod 1777 /$root_dir/$sub_dir
|
||||||
|
done
|
||||||
|
chown -R $user_group /$root_dir
|
||||||
else
|
else
|
||||||
device_name="/dev/$device_names"
|
echo "Unsupported OS '$(uname)'"
|
||||||
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Mount the $root_dir drive(s)
|
|
||||||
/sbin/mkfs.$fs_type $mount_options -f $device_name
|
|
||||||
# We add an entry for the device to /etc/fstab so it is automatically mounted following a
|
|
||||||
# machine reboot. The device is not guaranteed to be assigned the same name across restarts so
|
|
||||||
# we use its UUID in order to identify it.
|
|
||||||
#
|
|
||||||
# We also specify type=$fs_type in the /etc/fstab entry because specifying type=auto on
|
|
||||||
# Amazon Linux AMI 2018.03 leads to the drive not being mounted automatically following a
|
|
||||||
# machine reboot.
|
|
||||||
device_uuid=$(blkid -o value -s UUID "$device_name")
|
|
||||||
echo "Adding entry to /etc/fstab for device '$device_name' with UUID '$device_uuid'"
|
|
||||||
echo "UUID=$device_uuid /$root_dir $fs_type noatime 0 0" | tee -a /etc/fstab
|
|
||||||
mkdir /$root_dir || true
|
|
||||||
chmod 777 /$root_dir
|
|
||||||
mount -t $fs_type "UUID=$device_uuid" /$root_dir
|
|
||||||
for sub_dir in $sub_dirs
|
|
||||||
do
|
|
||||||
mkdir -p /$root_dir/$sub_dir
|
|
||||||
chmod 1777 /$root_dir/$sub_dir
|
|
||||||
done
|
|
||||||
chown -R $user_group /$root_dir
|
|
||||||
else
|
|
||||||
echo "Unsupported OS '$(uname)'"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mount_drive data "db tmp" "$data_device_names" "$data_raid_device_name" "$mount_options" "$fs_type" "$user_group"
|
mount_drive data "db tmp" "$data_device_names" "$data_raid_device_name" "$mount_options" "$fs_type" "$user_group"
|
||||||
|
|
|
||||||
|
|
@ -40,7 +40,7 @@ while getopts p:fin opt; do
|
||||||
esac
|
esac
|
||||||
done
|
done
|
||||||
|
|
||||||
run () {
|
run() {
|
||||||
echo "$@"
|
echo "$@"
|
||||||
if [[ "${dry_run}" == 1 ]]; then
|
if [[ "${dry_run}" == 1 ]]; then
|
||||||
return
|
return
|
||||||
|
|
@ -79,17 +79,17 @@ if [[ "${allow_no_venv}" != 1 ]]; then
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# check if poetry should be installed via pip
|
# check if poetry should be installed via pip
|
||||||
need_poetry_install=0 # 0 = no, 1 = yes
|
need_poetry_install=0 # 0 = no, 1 = yes
|
||||||
if ! "${py3}" -m pip show poetry &>/dev/null; then
|
if ! "${py3}" -m pip show poetry &>/dev/null; then
|
||||||
echo "Poetry not found in this interpreter, installing via pip." >&2
|
echo "Poetry not found in this interpreter, installing via pip." >&2
|
||||||
need_poetry_install=1
|
need_poetry_install=1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# we'll need to use pip this time around
|
# we'll need to use pip this time around
|
||||||
if (( need_poetry_install )); then
|
if ((need_poetry_install)); then
|
||||||
run "${py3}" -m pip install "${pip_opts[@]}" -r poetry_requirements.txt
|
run "${py3}" -m pip install "${pip_opts[@]}" -r poetry_requirements.txt
|
||||||
fi
|
fi
|
||||||
|
|
||||||
run env \
|
run env \
|
||||||
PYTHON_KEYRING_BACKEND="keyring.backends.null.Keyring" \
|
PYTHON_KEYRING_BACKEND="keyring.backends.null.Keyring" \
|
||||||
"${py3}" -m poetry sync --no-root
|
"${py3}" -m poetry sync --no-root
|
||||||
|
|
|
||||||
|
|
@ -10,28 +10,28 @@ ZIP_FILE=$3
|
||||||
LOCAL=$4
|
LOCAL=$4
|
||||||
|
|
||||||
if [ -z "$REMOTE_USER" ] || [ -z "$REMOTE_HOST" ] || [ -z "$ZIP_FILE" ]; then
|
if [ -z "$REMOTE_USER" ] || [ -z "$REMOTE_HOST" ] || [ -z "$ZIP_FILE" ]; then
|
||||||
echo "Usage: $0 <remote_user> <remote_host> <zip_file>"
|
echo "Usage: $0 <remote_user> <remote_host> <zip_file>"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -z "$LOCAL" ]; then
|
if [ -z "$LOCAL" ]; then
|
||||||
ssh ${REMOTE_USER}@${REMOTE_HOST} "mkdir -p ~/.engflow/creds"
|
ssh ${REMOTE_USER}@${REMOTE_HOST} "mkdir -p ~/.engflow/creds"
|
||||||
scp ${ZIP_FILE} ${REMOTE_USER}@${REMOTE_HOST}:~/.engflow/creds
|
scp ${ZIP_FILE} ${REMOTE_USER}@${REMOTE_HOST}:~/.engflow/creds
|
||||||
ssh ${REMOTE_USER}@${REMOTE_HOST} "cd ~/.engflow/creds; unzip -o engflow-mTLS.zip; rm engflow-mTLS.zip"
|
ssh ${REMOTE_USER}@${REMOTE_HOST} "cd ~/.engflow/creds; unzip -o engflow-mTLS.zip; rm engflow-mTLS.zip"
|
||||||
|
|
||||||
ssh ${REMOTE_USER}@${REMOTE_HOST} "chown ${REMOTE_USER}:${REMOTE_USER} /home/${REMOTE_USER}/.engflow/creds/engflow.crt /home/${REMOTE_USER}/.engflow/creds/engflow.key"
|
ssh ${REMOTE_USER}@${REMOTE_HOST} "chown ${REMOTE_USER}:${REMOTE_USER} /home/${REMOTE_USER}/.engflow/creds/engflow.crt /home/${REMOTE_USER}/.engflow/creds/engflow.key"
|
||||||
ssh ${REMOTE_USER}@${REMOTE_HOST} "chmod 600 /home/${REMOTE_USER}/.engflow/creds/engflow.crt /home/${REMOTE_USER}/.engflow/creds/engflow.key"
|
ssh ${REMOTE_USER}@${REMOTE_HOST} "chmod 600 /home/${REMOTE_USER}/.engflow/creds/engflow.crt /home/${REMOTE_USER}/.engflow/creds/engflow.key"
|
||||||
|
|
||||||
ssh ${REMOTE_USER}@${REMOTE_HOST} "echo \"common --tls_client_certificate=/home/${REMOTE_USER}/.engflow/creds/engflow.crt\" >> ~/.bazelrc"
|
ssh ${REMOTE_USER}@${REMOTE_HOST} "echo \"common --tls_client_certificate=/home/${REMOTE_USER}/.engflow/creds/engflow.crt\" >> ~/.bazelrc"
|
||||||
ssh ${REMOTE_USER}@${REMOTE_HOST} "echo \"common --tls_client_key=/home/${REMOTE_USER}/.engflow/creds/engflow.key\" >> ~/.bazelrc"
|
ssh ${REMOTE_USER}@${REMOTE_HOST} "echo \"common --tls_client_key=/home/${REMOTE_USER}/.engflow/creds/engflow.key\" >> ~/.bazelrc"
|
||||||
else
|
else
|
||||||
mkdir -p $HOME/.engflow/creds
|
mkdir -p $HOME/.engflow/creds
|
||||||
unzip -o "$ZIP_FILE"
|
unzip -o "$ZIP_FILE"
|
||||||
rm "$ZIP_FILE"
|
rm "$ZIP_FILE"
|
||||||
mv engflow.crt $HOME/.engflow/creds
|
mv engflow.crt $HOME/.engflow/creds
|
||||||
mv engflow.key $HOME/.engflow/creds
|
mv engflow.key $HOME/.engflow/creds
|
||||||
chown $USER $HOME/.engflow/creds/engflow.crt $HOME/.engflow/creds/engflow.key
|
chown $USER $HOME/.engflow/creds/engflow.crt $HOME/.engflow/creds/engflow.key
|
||||||
chmod 600 $HOME/.engflow/creds/engflow.crt $HOME/.engflow/creds/engflow.key
|
chmod 600 $HOME/.engflow/creds/engflow.crt $HOME/.engflow/creds/engflow.key
|
||||||
echo "common --tls_client_certificate=$HOME/.engflow/creds/engflow.crt" >> $HOME/.bazelrc
|
echo "common --tls_client_certificate=$HOME/.engflow/creds/engflow.crt" >>$HOME/.bazelrc
|
||||||
echo "common --tls_client_key=$HOME/.engflow/creds/engflow.key" >> $HOME/.bazelrc
|
echo "common --tls_client_key=$HOME/.engflow/creds/engflow.key" >>$HOME/.bazelrc
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -1,41 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
set +o errexit
|
|
||||||
|
|
||||||
shfmt=shfmt
|
|
||||||
if [ -n "$SHFMT_PATH" ]; then
|
|
||||||
shfmt=$(readlink $SHFMT_PATH)
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "$BUILD_WORKSPACE_DIRECTORY" ]; then
|
|
||||||
cd $BUILD_WORKSPACE_DIRECTORY
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! command -v $shfmt &>/dev/null; then
|
|
||||||
echo "Could not find shfmt at $shfmt"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
lint_dirs="evergreen"
|
|
||||||
|
|
||||||
if [ "$1" = "fix" ]; then
|
|
||||||
$shfmt -w -i 2 -bn -sr "$lint_dirs"
|
|
||||||
fi
|
|
||||||
|
|
||||||
output_file="shfmt_output.txt"
|
|
||||||
exit_code=0
|
|
||||||
|
|
||||||
$shfmt -d -i 2 -bn -sr "$lint_dirs" >"$output_file"
|
|
||||||
if [ -s "$output_file" ]; then
|
|
||||||
echo "ERROR: Found formatting errors in shell script files in directories: $lint_dirs"
|
|
||||||
echo ""
|
|
||||||
cat "$output_file"
|
|
||||||
echo ""
|
|
||||||
echo "To fix formatting errors run"
|
|
||||||
echo ""
|
|
||||||
echo " ./buildscripts/shellscripts-linters.sh fix"
|
|
||||||
echo ""
|
|
||||||
exit_code=1
|
|
||||||
fi
|
|
||||||
rm -rf "$output_file"
|
|
||||||
|
|
||||||
exit "$exit_code"
|
|
||||||
|
|
@ -8,4 +8,3 @@ set -vx
|
||||||
|
|
||||||
NAME=protobuf
|
NAME=protobuf
|
||||||
VERSION="mongo/v4.25.0"
|
VERSION="mongo/v4.25.0"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -7,8 +7,8 @@ set -euo pipefail
|
||||||
IFS=$'\n\t'
|
IFS=$'\n\t'
|
||||||
|
|
||||||
if [ "$#" -ne 0 ]; then
|
if [ "$#" -ne 0 ]; then
|
||||||
echo "This script does not take any arguments"
|
echo "This script does not take any arguments"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create a temporary directory to clone and configure librdkafka
|
# Create a temporary directory to clone and configure librdkafka
|
||||||
|
|
@ -42,11 +42,11 @@ mv config.h $PLATFORM_DIR/$platformName/include
|
||||||
|
|
||||||
# Remove un-used files
|
# Remove un-used files
|
||||||
rm -rf CHANGELOG.md CODE_OF_CONDUCT.md CONFIGURATION.md CONTRIBUTING.md INTRODUCTION.md \
|
rm -rf CHANGELOG.md CODE_OF_CONDUCT.md CONFIGURATION.md CONTRIBUTING.md INTRODUCTION.md \
|
||||||
README.md README.win32 STATISTICS.md config.log.old dev-conf.sh examples/ \
|
README.md README.win32 STATISTICS.md config.log.old dev-conf.sh examples/ \
|
||||||
CMakeLists.txt lds-gen.py mklove/ packaging/ service.yml tests/ vcpkg.json win32/ \
|
CMakeLists.txt lds-gen.py mklove/ packaging/ service.yml tests/ vcpkg.json win32/ \
|
||||||
Makefile Makefile.config config.cache configure.self configure debian mainpage.doxy Doxyfile \
|
Makefile Makefile.config config.cache configure.self configure debian mainpage.doxy Doxyfile \
|
||||||
src/CMakeLists.txt src/Makefile src/generate_proto.sh src/librdkafka_cgrp_synch.png src/statistics_schema.json \
|
src/CMakeLists.txt src/Makefile src/generate_proto.sh src/librdkafka_cgrp_synch.png src/statistics_schema.json \
|
||||||
src-cpp/CMakeLists.txt src-cpp/Makefile src-cpp/README.md config.log
|
src-cpp/CMakeLists.txt src-cpp/Makefile src-cpp/README.md config.log
|
||||||
|
|
||||||
pushd src
|
pushd src
|
||||||
# Replace all instances of the string "LZ4" and "XXH" with "KLZ4" and "KXXH" in the C source code.
|
# Replace all instances of the string "LZ4" and "XXH" with "KLZ4" and "KXXH" in the C source code.
|
||||||
|
|
|
||||||
|
|
@ -7,8 +7,8 @@ set -euo pipefail
|
||||||
IFS=$'\n\t'
|
IFS=$'\n\t'
|
||||||
|
|
||||||
if [ "$#" -ne 0 ]; then
|
if [ "$#" -ne 0 ]; then
|
||||||
echo "This script does not take any arguments"
|
echo "This script does not take any arguments"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create a temporary directory to clone and configure librdkafka
|
# Create a temporary directory to clone and configure librdkafka
|
||||||
|
|
@ -43,11 +43,11 @@ mv config.h $PLATFORM_DIR/$platformName/include
|
||||||
|
|
||||||
# Remove un-used files
|
# Remove un-used files
|
||||||
rm -rf CHANGELOG.md CODE_OF_CONDUCT.md CONFIGURATION.md CONTRIBUTING.md INTRODUCTION.md \
|
rm -rf CHANGELOG.md CODE_OF_CONDUCT.md CONFIGURATION.md CONTRIBUTING.md INTRODUCTION.md \
|
||||||
README.md README.win32 STATISTICS.md config.log.old dev-conf.sh examples/ \
|
README.md README.win32 STATISTICS.md config.log.old dev-conf.sh examples/ \
|
||||||
CMakeLists.txt lds-gen.py mklove/ packaging/ service.yml tests/ vcpkg.json win32/ \
|
CMakeLists.txt lds-gen.py mklove/ packaging/ service.yml tests/ vcpkg.json win32/ \
|
||||||
Makefile Makefile.config config.cache configure.self configure debian mainpage.doxy Doxyfile \
|
Makefile Makefile.config config.cache configure.self configure debian mainpage.doxy Doxyfile \
|
||||||
src/CMakeLists.txt src/Makefile src/generate_proto.sh src/librdkafka_cgrp_synch.png src/statistics_schema.json \
|
src/CMakeLists.txt src/Makefile src/generate_proto.sh src/librdkafka_cgrp_synch.png src/statistics_schema.json \
|
||||||
src-cpp/CMakeLists.txt src-cpp/Makefile src-cpp/README.md config.log
|
src-cpp/CMakeLists.txt src-cpp/Makefile src-cpp/README.md config.log
|
||||||
|
|
||||||
pushd src
|
pushd src
|
||||||
# Replace all instances of the string "LZ4" and "XXH" with "KLZ4" and "KXXH" in the C source code.
|
# Replace all instances of the string "LZ4" and "XXH" with "KLZ4" and "KXXH" in the C source code.
|
||||||
|
|
|
||||||
|
|
@ -7,8 +7,8 @@ set -euo pipefail
|
||||||
IFS=$'\n\t'
|
IFS=$'\n\t'
|
||||||
|
|
||||||
if [ "$#" -ne 0 ]; then
|
if [ "$#" -ne 0 ]; then
|
||||||
echo "This script does not take any arguments"
|
echo "This script does not take any arguments"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Create a temporary directory to clone and configure librdkafka
|
# Create a temporary directory to clone and configure librdkafka
|
||||||
|
|
@ -43,11 +43,11 @@ mv config.h $PLATFORM_DIR/$platformName/include
|
||||||
|
|
||||||
# Remove un-used files
|
# Remove un-used files
|
||||||
rm -rf CHANGELOG.md CODE_OF_CONDUCT.md CONFIGURATION.md CONTRIBUTING.md INTRODUCTION.md \
|
rm -rf CHANGELOG.md CODE_OF_CONDUCT.md CONFIGURATION.md CONTRIBUTING.md INTRODUCTION.md \
|
||||||
README.md README.win32 STATISTICS.md config.log.old dev-conf.sh examples/ \
|
README.md README.win32 STATISTICS.md config.log.old dev-conf.sh examples/ \
|
||||||
CMakeLists.txt lds-gen.py mklove/ packaging/ service.yml tests/ vcpkg.json win32/ \
|
CMakeLists.txt lds-gen.py mklove/ packaging/ service.yml tests/ vcpkg.json win32/ \
|
||||||
Makefile Makefile.config config.cache configure.self configure debian mainpage.doxy Doxyfile \
|
Makefile Makefile.config config.cache configure.self configure debian mainpage.doxy Doxyfile \
|
||||||
src/CMakeLists.txt src/Makefile src/generate_proto.sh src/librdkafka_cgrp_synch.png src/statistics_schema.json \
|
src/CMakeLists.txt src/Makefile src/generate_proto.sh src/librdkafka_cgrp_synch.png src/statistics_schema.json \
|
||||||
src-cpp/CMakeLists.txt src-cpp/Makefile src-cpp/README.md config.log
|
src-cpp/CMakeLists.txt src-cpp/Makefile src-cpp/README.md config.log
|
||||||
|
|
||||||
pushd src
|
pushd src
|
||||||
# Replace all instances of the string "LZ4" and "XXH" with "KLZ4" and "KXXH" in the C source code.
|
# Replace all instances of the string "LZ4" and "XXH" with "KLZ4" and "KXXH" in the C source code.
|
||||||
|
|
|
||||||
|
|
@ -5,14 +5,14 @@ cd "$BASEDIR/../"
|
||||||
|
|
||||||
yamllint -c etc/yamllint_config.yml buildscripts etc jstests
|
yamllint -c etc/yamllint_config.yml buildscripts etc jstests
|
||||||
|
|
||||||
PATH="$PATH:$HOME" evergreen evaluate etc/evergreen.yml > etc/evaluated_evergreen.yml
|
PATH="$PATH:$HOME" evergreen evaluate etc/evergreen.yml >etc/evaluated_evergreen.yml
|
||||||
PATH="$PATH:$HOME" evergreen evaluate etc/evergreen_nightly.yml > etc/evaluated_evergreen_nightly.yml
|
PATH="$PATH:$HOME" evergreen evaluate etc/evergreen_nightly.yml >etc/evaluated_evergreen_nightly.yml
|
||||||
|
|
||||||
# Remove references to the DSI repo before evergreen evaluate.
|
# Remove references to the DSI repo before evergreen evaluate.
|
||||||
# The DSI module references break 'evaluate', the system_perf config should
|
# The DSI module references break 'evaluate', the system_perf config should
|
||||||
# parse without them, and we don't want changes to the DSI repository to
|
# parse without them, and we don't want changes to the DSI repository to
|
||||||
# break checking that the rest of the imports etc. work.
|
# break checking that the rest of the imports etc. work.
|
||||||
awk '/lint_yaml trim start/{drop=1} /lint_yaml trim end/{drop=0} !drop' etc/system_perf.yml > etc/trimmed_system_perf.yml
|
awk '/lint_yaml trim start/{drop=1} /lint_yaml trim end/{drop=0} !drop' etc/system_perf.yml >etc/trimmed_system_perf.yml
|
||||||
PATH="$PATH:$HOME" evergreen evaluate etc/trimmed_system_perf.yml > etc/evaluated_system_perf.yml
|
PATH="$PATH:$HOME" evergreen evaluate etc/trimmed_system_perf.yml >etc/evaluated_system_perf.yml
|
||||||
|
|
||||||
python -m evergreen_lint -c ./etc/evergreen_lint.yml lint
|
python -m evergreen_lint -c ./etc/evergreen_lint.yml lint
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
silent_grep() {
|
silent_grep() {
|
||||||
command grep -q > /dev/null 2>&1 "$@"
|
command grep -q "$@" >/dev/null 2>&1
|
||||||
}
|
}
|
||||||
|
|
||||||
idem_file_append() {
|
idem_file_append() {
|
||||||
|
|
@ -19,10 +19,10 @@ idem_file_append() {
|
||||||
local end_marker="# END $2"
|
local end_marker="# END $2"
|
||||||
if ! silent_grep "^$start_marker" "$1"; then
|
if ! silent_grep "^$start_marker" "$1"; then
|
||||||
{
|
{
|
||||||
echo -e "\n$start_marker";
|
echo -e "\n$start_marker"
|
||||||
echo -e "$3";
|
echo -e "$3"
|
||||||
echo -e "$end_marker";
|
echo -e "$end_marker"
|
||||||
} >> "$1"
|
} >>"$1"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -30,7 +30,8 @@ setup_bash() {
|
||||||
# Bash profile should source .bashrc
|
# Bash profile should source .bashrc
|
||||||
echo "################################################################################"
|
echo "################################################################################"
|
||||||
echo "Setting up bash..."
|
echo "Setting up bash..."
|
||||||
local block=$(cat <<BLOCK
|
local block=$(
|
||||||
|
cat <<BLOCK
|
||||||
if [[ -f ~/.bashrc ]]; then
|
if [[ -f ~/.bashrc ]]; then
|
||||||
source ~/.bashrc
|
source ~/.bashrc
|
||||||
fi
|
fi
|
||||||
|
|
@ -67,7 +68,7 @@ setup_poetry() {
|
||||||
echo "################################################################################"
|
echo "################################################################################"
|
||||||
echo "Installing 'poetry' command..."
|
echo "Installing 'poetry' command..."
|
||||||
export PATH="$PATH:$HOME/.local/bin"
|
export PATH="$PATH:$HOME/.local/bin"
|
||||||
if command -v poetry &> /dev/null; then
|
if command -v poetry &>/dev/null; then
|
||||||
echo "'poetry' command exists; skipping setup"
|
echo "'poetry' command exists; skipping setup"
|
||||||
else
|
else
|
||||||
pipx install poetry --pip-args="-r $(pwd)/poetry_requirements.txt"
|
pipx install poetry --pip-args="-r $(pwd)/poetry_requirements.txt"
|
||||||
|
|
@ -78,7 +79,7 @@ setup_poetry() {
|
||||||
setup_pipx() {
|
setup_pipx() {
|
||||||
echo "################################################################################"
|
echo "################################################################################"
|
||||||
echo "Installing 'pipx' command..."
|
echo "Installing 'pipx' command..."
|
||||||
if command -v pipx &> /dev/null; then
|
if command -v pipx &>/dev/null; then
|
||||||
echo "'pipx' command exists; skipping setup"
|
echo "'pipx' command exists; skipping setup"
|
||||||
else
|
else
|
||||||
export PATH="$PATH:$HOME/.local/bin"
|
export PATH="$PATH:$HOME/.local/bin"
|
||||||
|
|
@ -112,7 +113,7 @@ setup_db_contrib_tool() {
|
||||||
echo "################################################################################"
|
echo "################################################################################"
|
||||||
echo "Installing 'db-contrib-tool' command..."
|
echo "Installing 'db-contrib-tool' command..."
|
||||||
export PATH="$PATH:$HOME/.local/bin"
|
export PATH="$PATH:$HOME/.local/bin"
|
||||||
if command -v db-contrib-tool &> /dev/null; then
|
if command -v db-contrib-tool &>/dev/null; then
|
||||||
echo "'db-contrib-tool' command exists; skipping setup"
|
echo "'db-contrib-tool' command exists; skipping setup"
|
||||||
else
|
else
|
||||||
pipx install db-contrib-tool
|
pipx install db-contrib-tool
|
||||||
|
|
@ -125,7 +126,7 @@ setup_clang_config() {
|
||||||
echo "Installing clang config..."
|
echo "Installing clang config..."
|
||||||
|
|
||||||
bazel build compiledb
|
bazel build compiledb
|
||||||
|
|
||||||
echo "Finished installing clang config..."
|
echo "Finished installing clang config..."
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -155,14 +156,14 @@ run_setup() {
|
||||||
set +o nounset
|
set +o nounset
|
||||||
source ~/.bashrc
|
source ~/.bashrc
|
||||||
set -o nounset
|
set -o nounset
|
||||||
|
|
||||||
setup_bash
|
setup_bash
|
||||||
|
|
||||||
setup_clang_config
|
setup_clang_config
|
||||||
setup_gdb
|
setup_gdb
|
||||||
setup_pipx
|
setup_pipx
|
||||||
setup_db_contrib_tool # This step requires `setup_pipx` to have been run.
|
setup_db_contrib_tool # This step requires `setup_pipx` to have been run.
|
||||||
setup_poetry # This step requires `setup_pipx` to have been run.
|
setup_poetry # This step requires `setup_pipx` to have been run.
|
||||||
|
|
||||||
setup_mongo_venv # This step requires `setup_poetry` to have been run.
|
setup_mongo_venv # This step requires `setup_poetry` to have been run.
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -296,11 +296,6 @@ sh_binary(
|
||||||
srcs = ["lint_fuzzer_sanity_patch.sh"],
|
srcs = ["lint_fuzzer_sanity_patch.sh"],
|
||||||
)
|
)
|
||||||
|
|
||||||
sh_binary(
|
|
||||||
name = "lint_shellscripts",
|
|
||||||
srcs = ["lint_shellscripts.sh"],
|
|
||||||
)
|
|
||||||
|
|
||||||
sh_binary(
|
sh_binary(
|
||||||
name = "lint_yaml",
|
name = "lint_yaml",
|
||||||
srcs = ["lint_yaml.sh"],
|
srcs = ["lint_yaml.sh"],
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
set -o errexit
|
set -o errexit
|
||||||
|
|
@ -8,19 +8,19 @@ antithesis_repo="us-central1-docker.pkg.dev/molten-verve-216720/mongodb-reposito
|
||||||
|
|
||||||
# tag images as evergreen[-${antithesis_build_type}]-{latest,patch} or just ${antithesis_image_tag}
|
# tag images as evergreen[-${antithesis_build_type}]-{latest,patch} or just ${antithesis_image_tag}
|
||||||
if [ -n "${antithesis_image_tag:-}" ]; then
|
if [ -n "${antithesis_image_tag:-}" ]; then
|
||||||
echo "Using provided tag: '$antithesis_image_tag' for docker pushes"
|
echo "Using provided tag: '$antithesis_image_tag' for docker pushes"
|
||||||
tag=$antithesis_image_tag
|
tag=$antithesis_image_tag
|
||||||
else
|
else
|
||||||
tag="evergreen"
|
tag="evergreen"
|
||||||
if [[ -n "${antithesis_build_type}" ]]; then
|
if [[ -n "${antithesis_build_type}" ]]; then
|
||||||
tag="${tag}-${antithesis_build_type}"
|
tag="${tag}-${antithesis_build_type}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "${is_patch}" = "true" ]; then
|
if [ "${is_patch}" = "true" ]; then
|
||||||
tag="${tag}-patch"
|
tag="${tag}-patch"
|
||||||
else
|
else
|
||||||
tag="${tag}-latest-${branch_name}"
|
tag="${tag}-latest-${branch_name}"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Clean up any leftover docker artifacts
|
# Clean up any leftover docker artifacts
|
||||||
|
|
@ -33,9 +33,9 @@ sudo docker network prune --force
|
||||||
sudo service docker stop
|
sudo service docker stop
|
||||||
sudo mkdir -p /data/mci/docker
|
sudo mkdir -p /data/mci/docker
|
||||||
if ! sudo jq -e . /etc/docker/daemon.json; then
|
if ! sudo jq -e . /etc/docker/daemon.json; then
|
||||||
echo "docker daemon.json did not exist or was invalid"
|
echo "docker daemon.json did not exist or was invalid"
|
||||||
echo "setting docker daemon.json to {}"
|
echo "setting docker daemon.json to {}"
|
||||||
sudo sh -c 'echo "{}" > /etc/docker/daemon.json'
|
sudo sh -c 'echo "{}" > /etc/docker/daemon.json'
|
||||||
fi
|
fi
|
||||||
MODIFIED_JSON=$(sudo jq '."data-root" |= "/data/mci/docker"' /etc/docker/daemon.json)
|
MODIFIED_JSON=$(sudo jq '."data-root" |= "/data/mci/docker"' /etc/docker/daemon.json)
|
||||||
sudo echo "${MODIFIED_JSON}" | sudo tee /etc/docker/daemon.json
|
sudo echo "${MODIFIED_JSON}" | sudo tee /etc/docker/daemon.json
|
||||||
|
|
@ -43,7 +43,7 @@ echo "docker daemon.json: set data-root to /data/mci/docker"
|
||||||
sudo service docker start
|
sudo service docker start
|
||||||
|
|
||||||
# Login
|
# Login
|
||||||
echo "${antithesis_repo_key}" > mongodb.key.json
|
echo "${antithesis_repo_key}" >mongodb.key.json
|
||||||
cat mongodb.key.json | sudo docker login -u _json_key https://us-central1-docker.pkg.dev --password-stdin
|
cat mongodb.key.json | sudo docker login -u _json_key https://us-central1-docker.pkg.dev --password-stdin
|
||||||
rm mongodb.key.json
|
rm mongodb.key.json
|
||||||
|
|
||||||
|
|
@ -64,15 +64,15 @@ timeout -v 1800 docker exec workload buildscripts/resmoke.py run --suite ${suite
|
||||||
RET=$?
|
RET=$?
|
||||||
set -o errexit
|
set -o errexit
|
||||||
|
|
||||||
docker-compose -f docker_compose/${suite}/docker-compose.yml logs > docker_logs.txt
|
docker-compose -f docker_compose/${suite}/docker-compose.yml logs >docker_logs.txt
|
||||||
docker-compose -f docker_compose/${suite}/docker-compose.yml down
|
docker-compose -f docker_compose/${suite}/docker-compose.yml down
|
||||||
|
|
||||||
# Change the permissions of all of the files in the docker compose directory to the current user.
|
# Change the permissions of all of the files in the docker compose directory to the current user.
|
||||||
# Some of the data files cannot be archived otherwise.
|
# Some of the data files cannot be archived otherwise.
|
||||||
sudo chown -R $USER docker_compose/${suite}/
|
sudo chown -R $USER docker_compose/${suite}/
|
||||||
if [ $RET -ne 0 ]; then
|
if [ $RET -ne 0 ]; then
|
||||||
echo "Resmoke sanity check has failed"
|
echo "Resmoke sanity check has failed"
|
||||||
exit $RET
|
exit $RET
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Push Image
|
# Push Image
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
|
|
||||||
source "$DIR/bazel_utility_functions.sh"
|
source "$DIR/bazel_utility_functions.sh"
|
||||||
(
|
(
|
||||||
cd $DIR/..
|
cd $DIR/..
|
||||||
exec $(bazel_get_binary_path) "$@"
|
exec $(bazel_get_binary_path) "$@"
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,11 @@
|
||||||
bazel_rbe_supported() {
|
bazel_rbe_supported() {
|
||||||
|
|
||||||
OS="$(uname)"
|
OS="$(uname)"
|
||||||
ARCH="$(uname -m)"
|
ARCH="$(uname -m)"
|
||||||
|
|
||||||
if [ "$ARCH" == "aarch64" ] || [ "$ARCH" == "arm64" ] || [ "$ARCH" == "x86_64" ]; then
|
if [ "$ARCH" == "aarch64" ] || [ "$ARCH" == "arm64" ] || [ "$ARCH" == "x86_64" ]; then
|
||||||
return 0
|
return 0
|
||||||
else
|
else
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@
|
||||||
# * ${args} - List of additional Bazel arguments (e.g.: "--config=clang-tidy")
|
# * ${args} - List of additional Bazel arguments (e.g.: "--config=clang-tidy")
|
||||||
|
|
||||||
# Needed for evergreen scripts that use evergreen expansions and utility methods.
|
# Needed for evergreen scripts that use evergreen expansions and utility methods.
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -22,67 +22,67 @@ activate_venv
|
||||||
# evergreen patch.
|
# evergreen patch.
|
||||||
build_patch_id="${build_patch_id:-${reuse_compile_from}}"
|
build_patch_id="${build_patch_id:-${reuse_compile_from}}"
|
||||||
if [ -n "${build_patch_id}" ]; then
|
if [ -n "${build_patch_id}" ]; then
|
||||||
echo "build_patch_id detected, trying to skip task"
|
echo "build_patch_id detected, trying to skip task"
|
||||||
|
|
||||||
# On windows we change the extension to zip
|
# On windows we change the extension to zip
|
||||||
if [ -z "${ext}" ]; then
|
if [ -z "${ext}" ]; then
|
||||||
ext="tgz"
|
ext="tgz"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
extra_db_contrib_args=""
|
extra_db_contrib_args=""
|
||||||
|
|
||||||
# get the platform of the dist archive. This is needed if
|
# get the platform of the dist archive. This is needed if
|
||||||
# db-contrib-tool cannot autodetect the platform of the ec2 instance.
|
# db-contrib-tool cannot autodetect the platform of the ec2 instance.
|
||||||
regex='MONGO_DISTMOD=([a-z0-9]*)'
|
regex='MONGO_DISTMOD=([a-z0-9]*)'
|
||||||
if [[ ${bazel_compile_flags} =~ ${regex} ]]; then
|
if [[ ${bazel_compile_flags} =~ ${regex} ]]; then
|
||||||
extra_db_contrib_args="${extra_db_contrib_args} --platform=${BASH_REMATCH[1]}"
|
extra_db_contrib_args="${extra_db_contrib_args} --platform=${BASH_REMATCH[1]}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
download_dir="./tmp_db_contrib_tool_download_dir"
|
download_dir="./tmp_db_contrib_tool_download_dir"
|
||||||
rm -rf ${download_dir}
|
rm -rf ${download_dir}
|
||||||
|
|
||||||
if [ "${task_name}" = "archive_dist_test" ]; then
|
if [ "${task_name}" = "archive_dist_test" ]; then
|
||||||
file_name="dist-test-stripped.${ext}"
|
file_name="dist-test-stripped.${ext}"
|
||||||
invocation="db-contrib-tool setup-repro-env ${build_patch_id} \
|
invocation="db-contrib-tool setup-repro-env ${build_patch_id} \
|
||||||
--variant=${compile_variant} --extractDownloads=False \
|
--variant=${compile_variant} --extractDownloads=False \
|
||||||
--binariesName=${file_name} --installDir=${download_dir} ${extra_db_contrib_args}"
|
--binariesName=${file_name} --installDir=${download_dir} ${extra_db_contrib_args}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "${task_name}" = "archive_dist_test_debug" ]; then
|
if [ "${task_name}" = "archive_dist_test_debug" ]; then
|
||||||
file_name="dist-test-debug.${ext}"
|
file_name="dist-test-debug.${ext}"
|
||||||
invocation="db-contrib-tool setup-repro-env ${build_patch_id} \
|
invocation="db-contrib-tool setup-repro-env ${build_patch_id} \
|
||||||
--variant=${compile_variant} --extractDownloads=False \
|
--variant=${compile_variant} --extractDownloads=False \
|
||||||
--debugsymbolsName=${file_name} --installDir=${download_dir} \
|
--debugsymbolsName=${file_name} --installDir=${download_dir} \
|
||||||
--skipBinaries --downloadSymbols ${extra_db_contrib_args}"
|
--skipBinaries --downloadSymbols ${extra_db_contrib_args}"
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -n "${invocation}" ]; then
|
|
||||||
setup_db_contrib_tool
|
|
||||||
|
|
||||||
echo "db-contrib-tool invocation: ${invocation}"
|
|
||||||
eval ${invocation}
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Could not retrieve files with db-contrib-tool"
|
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
file_location=$(find "${download_dir}" -name "${file_name}")
|
|
||||||
echo "Downloaded: ${file_location}"
|
|
||||||
mkdir -p bazel-bin
|
|
||||||
mv "${file_location}" "bazel-bin/${file_name}"
|
|
||||||
echo "Moved ${file_name} to the correct location"
|
|
||||||
echo "Skipping ${task_name} compile"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Could not skip ${task_name} compile, compiling as normal"
|
if [ -n "${invocation}" ]; then
|
||||||
|
setup_db_contrib_tool
|
||||||
|
|
||||||
|
echo "db-contrib-tool invocation: ${invocation}"
|
||||||
|
eval ${invocation}
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
echo "Could not retrieve files with db-contrib-tool"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
file_location=$(find "${download_dir}" -name "${file_name}")
|
||||||
|
echo "Downloaded: ${file_location}"
|
||||||
|
mkdir -p bazel-bin
|
||||||
|
mv "${file_location}" "bazel-bin/${file_name}"
|
||||||
|
echo "Moved ${file_name} to the correct location"
|
||||||
|
echo "Skipping ${task_name} compile"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Could not skip ${task_name} compile, compiling as normal"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# --build-mongot is a compile flag used by the evergreen build variants that run end-to-end search
|
# --build-mongot is a compile flag used by the evergreen build variants that run end-to-end search
|
||||||
# suites, as it downloads the necessary mongot binary.
|
# suites, as it downloads the necessary mongot binary.
|
||||||
if [ "${build_mongot}" = "true" ]; then
|
if [ "${build_mongot}" = "true" ]; then
|
||||||
setup_db_contrib_tool
|
setup_db_contrib_tool
|
||||||
use_db_contrib_tool_mongot
|
use_db_contrib_tool_mongot
|
||||||
bazel_args="${bazel_args} --include_mongot=True"
|
bazel_args="${bazel_args} --include_mongot=True"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# This is hacky way to pass off build time from archive_dist_test to archive_dist_test_debug
|
# This is hacky way to pass off build time from archive_dist_test to archive_dist_test_debug
|
||||||
|
|
@ -91,14 +91,14 @@ fi
|
||||||
# build-id for debugging as they will be different when -Wl,-S is passed in.
|
# build-id for debugging as they will be different when -Wl,-S is passed in.
|
||||||
# The relinked binaries should still be hash identical when stripped with strip
|
# The relinked binaries should still be hash identical when stripped with strip
|
||||||
if [ "${skip_debug_link}" = "true" ]; then
|
if [ "${skip_debug_link}" = "true" ]; then
|
||||||
export compile_variant="${compile_variant}"
|
export compile_variant="${compile_variant}"
|
||||||
export version_id="${version_id}"
|
export version_id="${version_id}"
|
||||||
if [ "${task_name}" = "archive_dist_test" ]; then
|
if [ "${task_name}" = "archive_dist_test" ]; then
|
||||||
task_compile_flags="${task_compile_flags} --simple_build_id=True --linkopt='-Wl,-S' --separate_debug=False"
|
task_compile_flags="${task_compile_flags} --simple_build_id=True --linkopt='-Wl,-S' --separate_debug=False"
|
||||||
fi
|
fi
|
||||||
if [ "${task_name}" = "archive_dist_test_debug" ]; then
|
if [ "${task_name}" = "archive_dist_test_debug" ]; then
|
||||||
task_compile_flags="${task_compile_flags} --simple_build_id=True"
|
task_compile_flags="${task_compile_flags} --simple_build_id=True"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
@ -110,7 +110,7 @@ source ./evergreen/bazel_utility_functions.sh
|
||||||
source ./evergreen/bazel_RBE_supported.sh
|
source ./evergreen/bazel_RBE_supported.sh
|
||||||
|
|
||||||
if [[ "${evergreen_remote_exec}" != "on" ]]; then
|
if [[ "${evergreen_remote_exec}" != "on" ]]; then
|
||||||
LOCAL_ARG="$LOCAL_ARG --jobs=auto"
|
LOCAL_ARG="$LOCAL_ARG --jobs=auto"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
BAZEL_BINARY=$(bazel_get_binary_path)
|
BAZEL_BINARY=$(bazel_get_binary_path)
|
||||||
|
|
@ -119,19 +119,19 @@ BAZEL_BINARY=$(bazel_get_binary_path)
|
||||||
# for retries.
|
# for retries.
|
||||||
TIMEOUT_CMD=""
|
TIMEOUT_CMD=""
|
||||||
if [ -n "${build_timeout_seconds}" ]; then
|
if [ -n "${build_timeout_seconds}" ]; then
|
||||||
TIMEOUT_CMD="timeout ${build_timeout_seconds}"
|
TIMEOUT_CMD="timeout ${build_timeout_seconds}"
|
||||||
elif [[ "${evergreen_remote_exec}" == "on" ]]; then
|
elif [[ "${evergreen_remote_exec}" == "on" ]]; then
|
||||||
# Timeout remote execution runs in 60 minutes as a workaround for
|
# Timeout remote execution runs in 60 minutes as a workaround for
|
||||||
# scheduling timeout bugs
|
# scheduling timeout bugs
|
||||||
TIMEOUT_CMD="timeout 3600"
|
TIMEOUT_CMD="timeout 3600"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if is_ppc64le; then
|
if is_ppc64le; then
|
||||||
LOCAL_ARG="$LOCAL_ARG --jobs=48"
|
LOCAL_ARG="$LOCAL_ARG --jobs=48"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if is_s390x; then
|
if is_s390x; then
|
||||||
LOCAL_ARG="$LOCAL_ARG --jobs=16"
|
LOCAL_ARG="$LOCAL_ARG --jobs=16"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If we are doing a patch build or we are building a non-push
|
# If we are doing a patch build or we are building a non-push
|
||||||
|
|
@ -139,23 +139,23 @@ fi
|
||||||
# flag. Otherwise, this is potentially a build that "leaves
|
# flag. Otherwise, this is potentially a build that "leaves
|
||||||
# the building", so we do want that flag.
|
# the building", so we do want that flag.
|
||||||
if [ "${is_patch}" = "true" ] || [ -z "${push_bucket}" ] || [ "${compiling_for_test}" = "true" ]; then
|
if [ "${is_patch}" = "true" ] || [ -z "${push_bucket}" ] || [ "${compiling_for_test}" = "true" ]; then
|
||||||
echo "This is a non-release build."
|
echo "This is a non-release build."
|
||||||
else
|
else
|
||||||
LOCAL_ARG="$LOCAL_ARG --config=public-release"
|
LOCAL_ARG="$LOCAL_ARG --config=public-release"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
for i in {1..3}; do
|
for i in {1..3}; do
|
||||||
eval ${TIMEOUT_CMD} $BAZEL_BINARY build --verbose_failures $LOCAL_ARG ${bazel_args} ${bazel_compile_flags} ${task_compile_flags} \
|
eval ${TIMEOUT_CMD} $BAZEL_BINARY build --verbose_failures $LOCAL_ARG ${bazel_args} ${bazel_compile_flags} ${task_compile_flags} \
|
||||||
--define=MONGO_VERSION=${version} ${patch_compile_flags} ${targets} 2>&1 | tee bazel_stdout.log \
|
--define=MONGO_VERSION=${version} ${patch_compile_flags} ${targets} 2>&1 | tee bazel_stdout.log &&
|
||||||
&& RET=0 && break || RET=$? && sleep 60
|
RET=0 && break || RET=$? && sleep 60
|
||||||
if [ $RET -eq 124 ]; then
|
if [ $RET -eq 124 ]; then
|
||||||
echo "Bazel timed out after ${build_timeout_seconds} seconds, retrying..."
|
echo "Bazel timed out after ${build_timeout_seconds} seconds, retrying..."
|
||||||
else
|
else
|
||||||
echo "Errors were found during the bazel run, here are the errors:" 1>&2
|
echo "Errors were found during the bazel run, here are the errors:" 1>&2
|
||||||
grep "ERROR:" bazel_stdout.log 1>&2
|
grep "ERROR:" bazel_stdout.log 1>&2
|
||||||
echo "Bazel failed to execute, retrying..."
|
echo "Bazel failed to execute, retrying..."
|
||||||
fi
|
fi
|
||||||
$BAZEL_BINARY shutdown
|
$BAZEL_BINARY shutdown
|
||||||
done
|
done
|
||||||
|
|
||||||
exit $RET
|
exit $RET
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@
|
||||||
# * ${args} - Extra command line args to pass to "bazel coverage"
|
# * ${args} - Extra command line args to pass to "bazel coverage"
|
||||||
|
|
||||||
# Needed for evergreen scripts that use evergreen expansions and utility methods.
|
# Needed for evergreen scripts that use evergreen expansions and utility methods.
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -24,7 +24,7 @@ eval echo "Execution environment: Args: ${args} Target: ${target}"
|
||||||
BAZEL_BINARY=bazel
|
BAZEL_BINARY=bazel
|
||||||
|
|
||||||
# Print command being run to file that can be uploaded
|
# Print command being run to file that can be uploaded
|
||||||
echo "python buildscripts/install_bazel.py" > bazel-invocation.txt
|
echo "python buildscripts/install_bazel.py" >bazel-invocation.txt
|
||||||
|
|
||||||
echo " bazel coverage ${args} ${target}" >> bazel-invocation.txt
|
echo " bazel coverage ${args} ${target}" >>bazel-invocation.txt
|
||||||
$BAZEL_BINARY coverage ${args} ${target}
|
$BAZEL_BINARY coverage ${args} ${target}
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@
|
||||||
# * ${redact_args} - If set, redact the args in the report
|
# * ${redact_args} - If set, redact the args in the report
|
||||||
|
|
||||||
# Needed for evergreen scripts that use evergreen expansions and utility methods.
|
# Needed for evergreen scripts that use evergreen expansions and utility methods.
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -25,45 +25,45 @@ source ./evergreen/bazel_utility_functions.sh
|
||||||
source ./evergreen/bazel_RBE_supported.sh
|
source ./evergreen/bazel_RBE_supported.sh
|
||||||
|
|
||||||
if bazel_rbe_supported; then
|
if bazel_rbe_supported; then
|
||||||
LOCAL_ARG=""
|
LOCAL_ARG=""
|
||||||
else
|
else
|
||||||
LOCAL_ARG="--config=local"
|
LOCAL_ARG="--config=local"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${evergreen_remote_exec}" != "on" ]]; then
|
if [[ "${evergreen_remote_exec}" != "on" ]]; then
|
||||||
LOCAL_ARG="--config=local"
|
LOCAL_ARG="--config=local"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
BAZEL_BINARY=$(bazel_get_binary_path)
|
BAZEL_BINARY=$(bazel_get_binary_path)
|
||||||
|
|
||||||
# AL2 stores certs in a nonstandard location
|
# AL2 stores certs in a nonstandard location
|
||||||
if [[ -f /etc/os-release ]]; then
|
if [[ -f /etc/os-release ]]; then
|
||||||
DISTRO=$(awk -F '[="]*' '/^PRETTY_NAME/ { print $2 }' < /etc/os-release)
|
DISTRO=$(awk -F '[="]*' '/^PRETTY_NAME/ { print $2 }' </etc/os-release)
|
||||||
if [[ $DISTRO == "Amazon Linux 2" ]]; then
|
if [[ $DISTRO == "Amazon Linux 2" ]]; then
|
||||||
export SSL_CERT_DIR=/etc/pki/tls/certs
|
export SSL_CERT_DIR=/etc/pki/tls/certs
|
||||||
export SSL_CERT_FILE=/etc/pki/tls/certs/ca-bundle.crt
|
export SSL_CERT_FILE=/etc/pki/tls/certs/ca-bundle.crt
|
||||||
elif [[ $DISTRO == "Red Hat Enterprise Linux"* ]]; then
|
elif [[ $DISTRO == "Red Hat Enterprise Linux"* ]]; then
|
||||||
export SSL_CERT_DIR=/etc/pki/ca-trust/extracted/pem
|
export SSL_CERT_DIR=/etc/pki/ca-trust/extracted/pem
|
||||||
export SSL_CERT_FILE=/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
|
export SSL_CERT_FILE=/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n "$redact_args" ]]; then
|
if [[ -n "$redact_args" ]]; then
|
||||||
INVOCATION_WITH_REDACTION="${target}"
|
INVOCATION_WITH_REDACTION="${target}"
|
||||||
else
|
else
|
||||||
INVOCATION_WITH_REDACTION="${target} ${args}"
|
INVOCATION_WITH_REDACTION="${target} ${args}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Print command being run to file that can be uploaded
|
# Print command being run to file that can be uploaded
|
||||||
echo "python buildscripts/install_bazel.py" > bazel-invocation.txt
|
echo "python buildscripts/install_bazel.py" >bazel-invocation.txt
|
||||||
echo "bazel run --verbose_failures ${bazel_compile_flags} ${task_compile_flags} ${LOCAL_ARG} ${INVOCATION_WITH_REDACTION}" >> bazel-invocation.txt
|
echo "bazel run --verbose_failures ${bazel_compile_flags} ${task_compile_flags} ${LOCAL_ARG} ${INVOCATION_WITH_REDACTION}" >>bazel-invocation.txt
|
||||||
|
|
||||||
# Run bazel command, retrying up to five times
|
# Run bazel command, retrying up to five times
|
||||||
MAX_ATTEMPTS=5
|
MAX_ATTEMPTS=5
|
||||||
for ((i = 1; i <= $MAX_ATTEMPTS; i++)); do
|
for ((i = 1; i <= $MAX_ATTEMPTS; i++)); do
|
||||||
eval $env $BAZEL_BINARY run --verbose_failures $LOCAL_ARG ${target} ${args} >> bazel_output.log 2>&1 && RET=0 && break || RET=$? && sleep 10
|
eval $env $BAZEL_BINARY run --verbose_failures $LOCAL_ARG ${target} ${args} >>bazel_output.log 2>&1 && RET=0 && break || RET=$? && sleep 10
|
||||||
if [ $i -lt $MAX_ATTEMPTS ]; then echo "Bazel failed to execute, retrying ($(($i + 1)) of $MAX_ATTEMPTS attempts)... " >> bazel_output.log 2>&1; fi
|
if [ $i -lt $MAX_ATTEMPTS ]; then echo "Bazel failed to execute, retrying ($(($i + 1)) of $MAX_ATTEMPTS attempts)... " >>bazel_output.log 2>&1; fi
|
||||||
$BAZEL_BINARY shutdown
|
$BAZEL_BINARY shutdown
|
||||||
done
|
done
|
||||||
|
|
||||||
$python ./buildscripts/simple_report.py --test-name "bazel run ${INVOCATION_WITH_REDACTION}" --log-file bazel_output.log --exit-code $RET
|
$python ./buildscripts/simple_report.py --test-name "bazel run ${INVOCATION_WITH_REDACTION}" --log-file bazel_output.log --exit-code $RET
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,7 @@
|
||||||
# * ${targets} - Test targets
|
# * ${targets} - Test targets
|
||||||
# * ${bazel_args} - Extra command line args to pass to "bazel test"
|
# * ${bazel_args} - Extra command line args to pass to "bazel test"
|
||||||
|
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -25,7 +25,7 @@ source ./evergreen/bazel_RBE_supported.sh
|
||||||
|
|
||||||
LOCAL_ARG=""
|
LOCAL_ARG=""
|
||||||
if [[ "${evergreen_remote_exec}" != "on" ]]; then
|
if [[ "${evergreen_remote_exec}" != "on" ]]; then
|
||||||
LOCAL_ARG="$LOCAL_ARG --jobs=auto"
|
LOCAL_ARG="$LOCAL_ARG --jobs=auto"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
BAZEL_BINARY=$(bazel_get_binary_path)
|
BAZEL_BINARY=$(bazel_get_binary_path)
|
||||||
|
|
@ -34,15 +34,15 @@ BAZEL_BINARY=$(bazel_get_binary_path)
|
||||||
# for retries.
|
# for retries.
|
||||||
TIMEOUT_CMD=""
|
TIMEOUT_CMD=""
|
||||||
if [ -n "${build_timeout_seconds}" ]; then
|
if [ -n "${build_timeout_seconds}" ]; then
|
||||||
TIMEOUT_CMD="timeout ${build_timeout_seconds}"
|
TIMEOUT_CMD="timeout ${build_timeout_seconds}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if is_ppc64le; then
|
if is_ppc64le; then
|
||||||
LOCAL_ARG="$LOCAL_ARG --jobs=48"
|
LOCAL_ARG="$LOCAL_ARG --jobs=48"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if is_s390x; then
|
if is_s390x; then
|
||||||
LOCAL_ARG="$LOCAL_ARG --jobs=16"
|
LOCAL_ARG="$LOCAL_ARG --jobs=16"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If we are doing a patch build or we are building a non-push
|
# If we are doing a patch build or we are building a non-push
|
||||||
|
|
@ -50,55 +50,55 @@ fi
|
||||||
# flag. Otherwise, this is potentially a build that "leaves
|
# flag. Otherwise, this is potentially a build that "leaves
|
||||||
# the building", so we do want that flag.
|
# the building", so we do want that flag.
|
||||||
if [ "${is_patch}" = "true" ] || [ -z "${push_bucket}" ] || [ "${compiling_for_test}" = "true" ]; then
|
if [ "${is_patch}" = "true" ] || [ -z "${push_bucket}" ] || [ "${compiling_for_test}" = "true" ]; then
|
||||||
echo "This is a non-release build."
|
echo "This is a non-release build."
|
||||||
else
|
else
|
||||||
LOCAL_ARG="$LOCAL_ARG --config=public-release"
|
LOCAL_ARG="$LOCAL_ARG --config=public-release"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -n "${test_timeout_sec}" ]; then
|
if [ -n "${test_timeout_sec}" ]; then
|
||||||
# s390x and ppc64le often run slower than other architectures
|
# s390x and ppc64le often run slower than other architectures
|
||||||
if is_s390x_or_ppc64le; then
|
if is_s390x_or_ppc64le; then
|
||||||
test_timeout_sec=$(($test_timeout_sec * 4))
|
test_timeout_sec=$(($test_timeout_sec * 4))
|
||||||
fi
|
fi
|
||||||
bazel_args="${bazel_args} --test_timeout=${test_timeout_sec}"
|
bazel_args="${bazel_args} --test_timeout=${test_timeout_sec}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ALL_FLAGS="--verbose_failures ${LOCAL_ARG} ${bazel_args} ${bazel_compile_flags} ${task_compile_flags} --define=MONGO_VERSION=${version} ${patch_compile_flags}"
|
ALL_FLAGS="--verbose_failures ${LOCAL_ARG} ${bazel_args} ${bazel_compile_flags} ${task_compile_flags} --define=MONGO_VERSION=${version} ${patch_compile_flags}"
|
||||||
echo ${ALL_FLAGS} > .bazel_build_flags
|
echo ${ALL_FLAGS} >.bazel_build_flags
|
||||||
|
|
||||||
set +o errexit
|
set +o errexit
|
||||||
|
|
||||||
# Retry the build since it's deterministic and may fail due to transient issues.
|
# Retry the build since it's deterministic and may fail due to transient issues.
|
||||||
for i in {1..3}; do
|
for i in {1..3}; do
|
||||||
eval ${TIMEOUT_CMD} ${BAZEL_BINARY} build ${ALL_FLAGS} ${targets} && RET=0 && break || RET=$? && sleep 1
|
eval ${TIMEOUT_CMD} ${BAZEL_BINARY} build ${ALL_FLAGS} ${targets} && RET=0 && break || RET=$? && sleep 1
|
||||||
if [ $RET -eq 124 ]; then
|
if [ $RET -eq 124 ]; then
|
||||||
echo "Bazel build timed out after ${build_timeout_seconds} seconds, retrying..."
|
echo "Bazel build timed out after ${build_timeout_seconds} seconds, retrying..."
|
||||||
else
|
else
|
||||||
echo "Bazel build failed, retrying..."
|
echo "Bazel build failed, retrying..."
|
||||||
fi
|
fi
|
||||||
$BAZEL_BINARY shutdown
|
$BAZEL_BINARY shutdown
|
||||||
done
|
done
|
||||||
|
|
||||||
for i in {1..3}; do
|
for i in {1..3}; do
|
||||||
eval ${TIMEOUT_CMD} ${BAZEL_BINARY} test ${ALL_FLAGS} ${targets} 2>&1 | tee bazel_stdout.log \
|
eval ${TIMEOUT_CMD} ${BAZEL_BINARY} test ${ALL_FLAGS} ${targets} 2>&1 | tee bazel_stdout.log &&
|
||||||
&& RET=0 && break || RET=$? && sleep 1
|
RET=0 && break || RET=$? && sleep 1
|
||||||
if [ $RET -eq 124 ]; then
|
if [ $RET -eq 124 ]; then
|
||||||
echo "Bazel timed out after ${build_timeout_seconds} seconds, retrying..."
|
echo "Bazel timed out after ${build_timeout_seconds} seconds, retrying..."
|
||||||
else
|
else
|
||||||
echo "Errors were found during the bazel test, failing the execution"
|
echo "Errors were found during the bazel test, failing the execution"
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
$BAZEL_BINARY shutdown
|
$BAZEL_BINARY shutdown
|
||||||
done
|
done
|
||||||
|
|
||||||
set -o errexit
|
set -o errexit
|
||||||
|
|
||||||
if [[ $RET != 0 ]]; then
|
if [[ $RET != 0 ]]; then
|
||||||
# The --config flag needs to stay consistent between invocations to avoid evicting the previous results.
|
# The --config flag needs to stay consistent between invocations to avoid evicting the previous results.
|
||||||
# Strip out anything that isn't a --config flag that could interfere with the run command.
|
# Strip out anything that isn't a --config flag that could interfere with the run command.
|
||||||
CONFIG_FLAGS=$(echo "${ALL_FLAGS}" | tr ' ' '\n' | grep -- '--config' | tr '\n' ' ')
|
CONFIG_FLAGS=$(echo "${ALL_FLAGS}" | tr ' ' '\n' | grep -- '--config' | tr '\n' ' ')
|
||||||
|
|
||||||
eval ${BAZEL_BINARY} run ${CONFIG_FLAGS} //buildscripts:gather_failed_unittests
|
eval ${BAZEL_BINARY} run ${CONFIG_FLAGS} //buildscripts:gather_failed_unittests
|
||||||
fi
|
fi
|
||||||
|
|
||||||
exit $RET
|
exit $RET
|
||||||
|
|
|
||||||
|
|
@ -1,39 +1,39 @@
|
||||||
is_ppc64le() {
|
is_ppc64le() {
|
||||||
ARCH="$(uname -m)"
|
ARCH="$(uname -m)"
|
||||||
|
|
||||||
if [[ "$ARCH" == "ppc64le" || "$ARCH" == "ppc64" || "$ARCH" == "ppc" ]]; then
|
if [[ "$ARCH" == "ppc64le" || "$ARCH" == "ppc64" || "$ARCH" == "ppc" ]]; then
|
||||||
return 0
|
return 0
|
||||||
else
|
else
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
is_s390x() {
|
is_s390x() {
|
||||||
ARCH="$(uname -m)"
|
ARCH="$(uname -m)"
|
||||||
|
|
||||||
if [[ "$ARCH" == "s390x" || "$ARCH" == "s390" ]]; then
|
if [[ "$ARCH" == "s390x" || "$ARCH" == "s390" ]]; then
|
||||||
return 0
|
return 0
|
||||||
else
|
else
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
is_s390x_or_ppc64le() {
|
is_s390x_or_ppc64le() {
|
||||||
if is_ppc64le || is_s390x; then
|
if is_ppc64le || is_s390x; then
|
||||||
return 0
|
return 0
|
||||||
else
|
else
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
bazel_get_binary_path() {
|
bazel_get_binary_path() {
|
||||||
if is_s390x_or_ppc64le; then
|
if is_s390x_or_ppc64le; then
|
||||||
echo "bazel/bazelisk.py"
|
echo "bazel/bazelisk.py"
|
||||||
elif grep -q "ID=debian" /etc/os-release; then
|
elif grep -q "ID=debian" /etc/os-release; then
|
||||||
echo "bazel/bazelisk.py"
|
echo "bazel/bazelisk.py"
|
||||||
elif grep -q 'ID="sles"' /etc/os-release; then
|
elif grep -q 'ID="sles"' /etc/os-release; then
|
||||||
echo "bazel/bazelisk.py"
|
echo "bazel/bazelisk.py"
|
||||||
else
|
else
|
||||||
echo "bazel"
|
echo "bazel"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -11,21 +11,21 @@ $python buildscripts/idl/check_stable_api_commands_have_idl_definitions.py -v --
|
||||||
$python buildscripts/idl/checkout_idl_files_from_past_releases.py -v idls
|
$python buildscripts/idl/checkout_idl_files_from_past_releases.py -v idls
|
||||||
|
|
||||||
function run_idl_check_compatibility {
|
function run_idl_check_compatibility {
|
||||||
dir=$1
|
dir=$1
|
||||||
output=$(
|
output=$(
|
||||||
python buildscripts/idl/idl_check_compatibility.py -v \
|
python buildscripts/idl/idl_check_compatibility.py -v \
|
||||||
--old-include "$dir/src" \
|
--old-include "$dir/src" \
|
||||||
--old-include "$dir/src/mongo/db/modules/enterprise/src" \
|
--old-include "$dir/src/mongo/db/modules/enterprise/src" \
|
||||||
--new-include src \
|
--new-include src \
|
||||||
--new-include src/mongo/db/modules/enterprise/src \
|
--new-include src/mongo/db/modules/enterprise/src \
|
||||||
"$dir/src" src
|
"$dir/src" src
|
||||||
)
|
)
|
||||||
exit_code=$?
|
exit_code=$?
|
||||||
echo "Performing idl check compatibility with release: $dir:"
|
echo "Performing idl check compatibility with release: $dir:"
|
||||||
echo "$output"
|
echo "$output"
|
||||||
if [ $exit_code -ne 0 ]; then
|
if [ $exit_code -ne 0 ]; then
|
||||||
exit 255
|
exit 255
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
export -f run_idl_check_compatibility
|
export -f run_idl_check_compatibility
|
||||||
find idls -maxdepth 1 -mindepth 1 -type d | xargs -n 1 -P 0 -I % bash -c 'run_idl_check_compatibility "$@"' _ %
|
find idls -maxdepth 1 -mindepth 1 -type d | xargs -n 1 -P 0 -I % bash -c 'run_idl_check_compatibility "$@"' _ %
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
set -o verbose
|
set -o verbose
|
||||||
cd src
|
cd src
|
||||||
if [ -f resmoke_error_code ]; then
|
if [ -f resmoke_error_code ]; then
|
||||||
exit $(cat resmoke_error_code)
|
exit $(cat resmoke_error_code)
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
set -o verbose
|
set -o verbose
|
||||||
cd src
|
cd src
|
||||||
if [ -f run_tests_infrastructure_failure ]; then
|
if [ -f run_tests_infrastructure_failure ]; then
|
||||||
exit $(cat run_tests_infrastructure_failure)
|
exit $(cat run_tests_infrastructure_failure)
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -2,57 +2,57 @@ failed_setup=false
|
||||||
|
|
||||||
source ~/.bashrc
|
source ~/.bashrc
|
||||||
|
|
||||||
if command -v pipx &> /dev/null; then
|
if command -v pipx &>/dev/null; then
|
||||||
echo "'pipx' command exists"
|
echo "'pipx' command exists"
|
||||||
else
|
else
|
||||||
echo "pipx command not found - failed setup"
|
echo "pipx command not found - failed setup"
|
||||||
failed_setup=true
|
failed_setup=true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if command -v poetry &> /dev/null; then
|
if command -v poetry &>/dev/null; then
|
||||||
echo "'poetry' command exists"
|
echo "'poetry' command exists"
|
||||||
else
|
else
|
||||||
echo "poetry command not found - failed setup"
|
echo "poetry command not found - failed setup"
|
||||||
failed_setup=true
|
failed_setup=true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if command -v db-contrib-tool &> /dev/null; then
|
if command -v db-contrib-tool &>/dev/null; then
|
||||||
echo "'db-contrib-tool' command exists"
|
echo "'db-contrib-tool' command exists"
|
||||||
else
|
else
|
||||||
echo "db-contrib-tool command not found - failed setup"
|
echo "db-contrib-tool command not found - failed setup"
|
||||||
failed_setup=true
|
failed_setup=true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if test -d "./python3-venv"; then
|
if test -d "./python3-venv"; then
|
||||||
echo "Venv directory exists, checking activation"
|
echo "Venv directory exists, checking activation"
|
||||||
. python3-venv/bin/activate
|
. python3-venv/bin/activate
|
||||||
./buildscripts/resmoke.py run --help &> /dev/null
|
./buildscripts/resmoke.py run --help &>/dev/null
|
||||||
if [ $? -eq 0 ]; then
|
if [ $? -eq 0 ]; then
|
||||||
echo "Virtual workstation set up correctly"
|
echo "Virtual workstation set up correctly"
|
||||||
else
|
else
|
||||||
echo "Virtual workstation failed activation"
|
echo "Virtual workstation failed activation"
|
||||||
failed_setup=true
|
failed_setup=true
|
||||||
fi
|
fi
|
||||||
deactivate
|
deactivate
|
||||||
else
|
else
|
||||||
echo "mongo virtual environment not created correctly - failed setup"
|
echo "mongo virtual environment not created correctly - failed setup"
|
||||||
failed_setup=true
|
failed_setup=true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if test -d "../Boost-Pretty-Printer"; then
|
if test -d "../Boost-Pretty-Printer"; then
|
||||||
echo "Pretty printers set up correctly"
|
echo "Pretty printers set up correctly"
|
||||||
else
|
else
|
||||||
echo "Pretty printers failed setup"
|
echo "Pretty printers failed setup"
|
||||||
failed_setup=true
|
failed_setup=true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if test -f "./compile_commands.json"; then
|
if test -f "./compile_commands.json"; then
|
||||||
echo "Clang configuration set up correctly"
|
echo "Clang configuration set up correctly"
|
||||||
else
|
else
|
||||||
echo "Clang configuration failed setup"
|
echo "Clang configuration failed setup"
|
||||||
failed_setup=true
|
failed_setup=true
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if $failed_setup; then
|
if $failed_setup; then
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -1,13 +1,13 @@
|
||||||
set -o verbose
|
set -o verbose
|
||||||
|
|
||||||
rm -rf \
|
rm -rf \
|
||||||
/data/db/* \
|
/data/db/* \
|
||||||
mongo-diskstats* \
|
mongo-diskstats* \
|
||||||
mongo-*.tgz \
|
mongo-*.tgz \
|
||||||
~/.aws \
|
~/.aws \
|
||||||
~/.boto \
|
~/.boto \
|
||||||
venv \
|
venv \
|
||||||
/data/install \
|
/data/install \
|
||||||
/data/multiversion
|
/data/multiversion
|
||||||
|
|
||||||
exit 0
|
exit 0
|
||||||
|
|
|
||||||
|
|
@ -1,12 +1,12 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
||||||
activate_venv
|
activate_venv
|
||||||
if [ "Windows_NT" = "$OS" ]; then
|
if [ "Windows_NT" = "$OS" ]; then
|
||||||
vcvars="$(vswhere -latest -property installationPath | tr '\\' '/' | dos2unix.exe)/VC/Auxiliary/Build/"
|
vcvars="$(vswhere -latest -property installationPath | tr '\\' '/' | dos2unix.exe)/VC/Auxiliary/Build/"
|
||||||
cd "$vcvars" && cmd /K "vcvarsall.bat amd64 && cd ${workdir}\src"
|
cd "$vcvars" && cmd /K "vcvarsall.bat amd64 && cd ${workdir}\src"
|
||||||
fi
|
fi
|
||||||
python -m pip install ninja
|
python -m pip install ninja
|
||||||
ninja install-core
|
ninja install-core
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -20,19 +20,19 @@ version=${project#mongodb-mongo-}
|
||||||
version=${version#v}
|
version=${version#v}
|
||||||
|
|
||||||
if [ ! -z "${multiversion_platform_50_or_later}" ]; then
|
if [ ! -z "${multiversion_platform_50_or_later}" ]; then
|
||||||
platform="${multiversion_platform_50_or_later}"
|
platform="${multiversion_platform_50_or_later}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# This is primarily for tests for infrastructure which don't always need the latest
|
# This is primarily for tests for infrastructure which don't always need the latest
|
||||||
# binaries.
|
# binaries.
|
||||||
db-contrib-tool setup-repro-env \
|
db-contrib-tool setup-repro-env \
|
||||||
--installDir /data/install \
|
--installDir /data/install \
|
||||||
--linkDir /data/multiversion \
|
--linkDir /data/multiversion \
|
||||||
--edition $edition \
|
--edition $edition \
|
||||||
--platform $platform \
|
--platform $platform \
|
||||||
--architecture $architecture \
|
--architecture $architecture \
|
||||||
--evgVersionsFile multiversion-downloads.json \
|
--evgVersionsFile multiversion-downloads.json \
|
||||||
$version
|
$version
|
||||||
|
|
||||||
dist_test_dir=$(find /data/install -type d -iname "dist-test")
|
dist_test_dir=$(find /data/install -type d -iname "dist-test")
|
||||||
mv "$dist_test_dir" "$(pwd)"
|
mv "$dist_test_dir" "$(pwd)"
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -10,5 +10,5 @@ OUTPUT_FILE="build/benchmarks.txt"
|
||||||
|
|
||||||
# Concatenate all text files in the directory into the output file
|
# Concatenate all text files in the directory into the output file
|
||||||
for file in build/*_bm.txt; do
|
for file in build/*_bm.txt; do
|
||||||
cat "$file" >> "$OUTPUT_FILE"
|
cat "$file" >>"$OUTPUT_FILE"
|
||||||
done
|
done
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -8,7 +8,7 @@ attempts=0
|
||||||
max_attempts=4
|
max_attempts=4
|
||||||
|
|
||||||
while ! aws ecr get-login-password --region us-east-1 | podman login --password-stdin --username ${release_tools_container_registry_username_ecr} ${release_tools_container_registry_ecr}; do
|
while ! aws ecr get-login-password --region us-east-1 | podman login --password-stdin --username ${release_tools_container_registry_username_ecr} ${release_tools_container_registry_ecr}; do
|
||||||
[ "$attempts" -ge "$max_attempts" ] && exit 1
|
[ "$attempts" -ge "$max_attempts" ] && exit 1
|
||||||
((attempts++))
|
((attempts++))
|
||||||
sleep 10
|
sleep 10
|
||||||
done
|
done
|
||||||
|
|
|
||||||
|
|
@ -10,7 +10,7 @@ python buildscripts/install_bazel.py
|
||||||
bazel_bin="$HOME/.local/bin/bazelisk"
|
bazel_bin="$HOME/.local/bin/bazelisk"
|
||||||
# number of parallel jobs to use for build.
|
# number of parallel jobs to use for build.
|
||||||
# Even with scale=0 (the default), bc command adds decimal digits in case of multiplication. Division by 1 gives us a whole number with scale=0
|
# Even with scale=0 (the default), bc command adds decimal digits in case of multiplication. Division by 1 gives us a whole number with scale=0
|
||||||
bazel_jobs=$(bc <<< "$(grep -c '^processor' /proc/cpuinfo) * .85 / 1")
|
bazel_jobs=$(bc <<<"$(grep -c '^processor' /proc/cpuinfo) * .85 / 1")
|
||||||
build_config="--config=local --jobs=$bazel_jobs --compiler_type=gcc --opt=off --dbg=False --allocator=system"
|
build_config="--config=local --jobs=$bazel_jobs --compiler_type=gcc --opt=off --dbg=False --allocator=system"
|
||||||
bazel_query='mnemonic("CppCompile|LinkCompile", filter(//src/mongo, deps(//:install-core)) except //src/mongo/db/modules/enterprise/src/streams/third_party/...)'
|
bazel_query='mnemonic("CppCompile|LinkCompile", filter(//src/mongo, deps(//:install-core)) except //src/mongo/db/modules/enterprise/src/streams/third_party/...)'
|
||||||
bazel_cache="--output_user_root=$workdir/bazel_cache"
|
bazel_cache="--output_user_root=$workdir/bazel_cache"
|
||||||
|
|
@ -21,14 +21,14 @@ bazelBuildCommand="$bazel_bin $bazel_cache build $build_config //src/mongo/db/mo
|
||||||
echo "Bazel Build Command: $bazelBuildCommand"
|
echo "Bazel Build Command: $bazelBuildCommand"
|
||||||
covIdir="$workdir/covIdir"
|
covIdir="$workdir/covIdir"
|
||||||
if [ -d "$covIdir" ]; then
|
if [ -d "$covIdir" ]; then
|
||||||
echo "covIdir already exists, meaning idir extracted after download from S3"
|
echo "covIdir already exists, meaning idir extracted after download from S3"
|
||||||
else
|
else
|
||||||
mkdir $workdir/covIdir
|
mkdir $workdir/covIdir
|
||||||
fi
|
fi
|
||||||
$workdir/coverity/bin/cov-build --dir "$covIdir" --verbose 0 -j $bazel_jobs --return-emit-failures --parse-error-threshold=99 --bazel $bazelBuildCommand
|
$workdir/coverity/bin/cov-build --dir "$covIdir" --verbose 0 -j $bazel_jobs --return-emit-failures --parse-error-threshold=99 --bazel $bazelBuildCommand
|
||||||
ret=$?
|
ret=$?
|
||||||
if [ $ret -ne 0 ]; then
|
if [ $ret -ne 0 ]; then
|
||||||
echo "cov-build faild with exit code $ret"
|
echo "cov-build faild with exit code $ret"
|
||||||
else
|
else
|
||||||
echo "cov-build was successful"
|
echo "cov-build was successful"
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,7 @@
|
||||||
# This script verifies that specific symbols, and specific symbols only are
|
# This script verifies that specific symbols, and specific symbols only are
|
||||||
# exported in mongo_crypt_v1.so
|
# exported in mongo_crypt_v1.so
|
||||||
|
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -12,8 +12,8 @@ set -o errexit
|
||||||
set -o verbose
|
set -o verbose
|
||||||
|
|
||||||
if [ "$(uname)" != "Linux" ]; then
|
if [ "$(uname)" != "Linux" ]; then
|
||||||
echo "Skipping test, this is for linux only"
|
echo "Skipping test, this is for linux only"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
EXTRACT_DIR="bazel-bin/install"
|
EXTRACT_DIR="bazel-bin/install"
|
||||||
|
|
@ -25,8 +25,8 @@ GDB_PATH="/opt/mongodbtoolchain/v5/bin/gdb"
|
||||||
find $EXTRACT_DIR
|
find $EXTRACT_DIR
|
||||||
|
|
||||||
if [ ! -f "$SOPATH" ]; then
|
if [ ! -f "$SOPATH" ]; then
|
||||||
echo "Error: can not find library at: $SOPATH"
|
echo "Error: can not find library at: $SOPATH"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
#
|
#
|
||||||
|
|
@ -51,10 +51,10 @@ mongo_crypt_v1_status_get_explanation@@MONGO_CRYPT_1.0'
|
||||||
actual="$(readelf -W --dyn-syms "$SOPATH" | awk '$5 == "GLOBAL" && $7 != "UND" && $7 != "ABS" {print $8}' | sort)"
|
actual="$(readelf -W --dyn-syms "$SOPATH" | awk '$5 == "GLOBAL" && $7 != "UND" && $7 != "ABS" {print $8}' | sort)"
|
||||||
|
|
||||||
if [ "$actual" != "$expect" ]; then
|
if [ "$actual" != "$expect" ]; then
|
||||||
echo "Error: symbols are not as expected in: $SOPATH"
|
echo "Error: symbols are not as expected in: $SOPATH"
|
||||||
echo "Diff:"
|
echo "Diff:"
|
||||||
diff <(echo "$actual") <(echo "$expect")
|
diff <(echo "$actual") <(echo "$expect")
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Mongo Crypt Shared Library exported symbols test succeeded!"
|
echo "Mongo Crypt Shared Library exported symbols test succeeded!"
|
||||||
|
|
@ -64,8 +64,8 @@ echo "Mongo Crypt Shared Library exported symbols test succeeded!"
|
||||||
# and the verify it can be debugged with gdb
|
# and the verify it can be debugged with gdb
|
||||||
#
|
#
|
||||||
if [ ! -f "$UNITTEST_PATH" ]; then
|
if [ ! -f "$UNITTEST_PATH" ]; then
|
||||||
echo "Skipping Mongo Crypt Shared Library unit test. Test not found at $UNITTEST_PATH"
|
echo "Skipping Mongo Crypt Shared Library unit test. Test not found at $UNITTEST_PATH"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Running Mongo Crypt Shared Library unit test"
|
echo "Running Mongo Crypt Shared Library unit test"
|
||||||
|
|
@ -73,8 +73,8 @@ $UNITTEST_PATH
|
||||||
echo "Mongo Crypt Shared Library unit test succeeded!"
|
echo "Mongo Crypt Shared Library unit test succeeded!"
|
||||||
|
|
||||||
if [ ! -f "$GDB_PATH" ]; then
|
if [ ! -f "$GDB_PATH" ]; then
|
||||||
echo "Skipping Mongo Crypt Shared Library debuggability test. No gdb found at $GDB_PATH"
|
echo "Skipping Mongo Crypt Shared Library debuggability test. No gdb found at $GDB_PATH"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Running Mongo Crypt Shared Library debuggability test"
|
echo "Running Mongo Crypt Shared Library debuggability test"
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude.sh"
|
. "$DIR/../prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,8 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude.sh"
|
. "$DIR/../prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
||||||
set -o errexit
|
set -o errexit
|
||||||
activate_venv
|
activate_venv
|
||||||
$python -c 'import socket; num_nodes = 5; print("\n".join(["%s:%d" % (socket.gethostname(), port) for port in range(20000, 20000 + num_nodes)]))' > nodes.txt
|
$python -c 'import socket; num_nodes = 5; print("\n".join(["%s:%d" % (socket.gethostname(), port) for port in range(20000, 20000 + num_nodes)]))' >nodes.txt
|
||||||
|
|
|
||||||
|
|
@ -6,9 +6,9 @@ echo BUILD_SCM_REVISION $(git rev-parse --verify HEAD)
|
||||||
|
|
||||||
git diff-index --quiet HEAD --
|
git diff-index --quiet HEAD --
|
||||||
if [[ $? == 0 ]]; then
|
if [[ $? == 0 ]]; then
|
||||||
status="clean"
|
status="clean"
|
||||||
else
|
else
|
||||||
status="modified"
|
status="modified"
|
||||||
fi
|
fi
|
||||||
echo BUILD_SCM_STATUS $status
|
echo BUILD_SCM_STATUS $status
|
||||||
echo BUILD_SCM_REMOTE git@github.com:10gen/mongo.git
|
echo BUILD_SCM_REMOTE git@github.com:10gen/mongo.git
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,10 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
||||||
set -o errexit
|
set -o errexit
|
||||||
cat << EOF > aws_e2e_setup.json
|
cat <<EOF >aws_e2e_setup.json
|
||||||
{
|
{
|
||||||
"iam_auth_ecs_account" : "${iam_auth_ecs_account}",
|
"iam_auth_ecs_account" : "${iam_auth_ecs_account}",
|
||||||
"iam_auth_ecs_secret_access_key" : "${iam_auth_ecs_secret_access_key}",
|
"iam_auth_ecs_secret_access_key" : "${iam_auth_ecs_secret_access_key}",
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,10 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
||||||
set -o errexit
|
set -o errexit
|
||||||
cat << EOF > $HOME/azure_e2e_config.json
|
cat <<EOF >$HOME/azure_e2e_config.json
|
||||||
{
|
{
|
||||||
"tD548GwE1@outlook.com" : "${oidc_azure_test_user_account_one_secret}",
|
"tD548GwE1@outlook.com" : "${oidc_azure_test_user_account_one_secret}",
|
||||||
"tD548GwE2@outlook.com" : "${oidc_azure_test_user_account_two_secret}",
|
"tD548GwE2@outlook.com" : "${oidc_azure_test_user_account_two_secret}",
|
||||||
|
|
@ -24,12 +24,12 @@ cat << EOF > $HOME/azure_e2e_config.json
|
||||||
"oidc_azure_managed_identity_api_version": "${oidc_azure_managed_identity_api_version}"
|
"oidc_azure_managed_identity_api_version": "${oidc_azure_managed_identity_api_version}"
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
cat << EOF > $HOME/oidc_azure_container_key
|
cat <<EOF >$HOME/oidc_azure_container_key
|
||||||
${oidc_azure_container_key}
|
${oidc_azure_container_key}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# EVG project variables do not preserve line breaks so we store them as base64 and decode here
|
# EVG project variables do not preserve line breaks so we store them as base64 and decode here
|
||||||
sed s/[[:space:]]//g $HOME/oidc_azure_container_key | base64 --decode > $HOME/azure_remote_key
|
sed s/[[:space:]]//g $HOME/oidc_azure_container_key | base64 --decode >$HOME/azure_remote_key
|
||||||
|
|
||||||
# Clean up temp file
|
# Clean up temp file
|
||||||
rm -f $HOME/oidc_azure_container_key
|
rm -f $HOME/oidc_azure_container_key
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,11 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
set -o errexit
|
set -o errexit
|
||||||
|
|
||||||
# Only run this script for the external_auth_oidc_azure task.
|
# Only run this script for the external_auth_oidc_azure task.
|
||||||
if [ "${task_name}" != "external_auth_oidc_azure" ]; then
|
if [ "${task_name}" != "external_auth_oidc_azure" ]; then
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Cleaning up Azure OIDC test artifacts"
|
echo "Cleaning up Azure OIDC test artifacts"
|
||||||
|
|
@ -14,20 +14,20 @@ cd src
|
||||||
|
|
||||||
# Clean up the SSH keyfile, if it exists
|
# Clean up the SSH keyfile, if it exists
|
||||||
if [ -f "${HOME}/oidc_azure_container_key" ]; then
|
if [ -f "${HOME}/oidc_azure_container_key" ]; then
|
||||||
rm -f $HOME/oidc_azure_container_key
|
rm -f $HOME/oidc_azure_container_key
|
||||||
echo "Cleaned up container key"
|
echo "Cleaned up container key"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
python src/mongo/db/modules/enterprise/jstests/external_auth_oidc_azure/lib/toggle_ingress.py disable --config_file=$HOME/azure_e2e_config.json --lock_file=/tmp/azure_oidc.lock
|
python src/mongo/db/modules/enterprise/jstests/external_auth_oidc_azure/lib/toggle_ingress.py disable --config_file=$HOME/azure_e2e_config.json --lock_file=/tmp/azure_oidc.lock
|
||||||
|
|
||||||
# Clean up the config file, if it exists
|
# Clean up the config file, if it exists
|
||||||
if [ -f "${HOME}/azure_e2e_config.json" ]; then
|
if [ -f "${HOME}/azure_e2e_config.json" ]; then
|
||||||
rm -f $HOME/azure_e2e_config.json
|
rm -f $HOME/azure_e2e_config.json
|
||||||
echo "Cleaned up azure_e2e_config.json"
|
echo "Cleaned up azure_e2e_config.json"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Clean up the lock file, if it exists
|
# Clean up the lock file, if it exists
|
||||||
if [ -f "/tmp/azure_oidc.lock" ]; then
|
if [ -f "/tmp/azure_oidc.lock" ]; then
|
||||||
rm -f /tmp/azure_oidc.lock
|
rm -f /tmp/azure_oidc.lock
|
||||||
echo "Cleaned up /tmp/azure_oidc.lock"
|
echo "Cleaned up /tmp/azure_oidc.lock"
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -7,7 +7,7 @@ set -o errexit
|
||||||
|
|
||||||
# Create the config file, which will contain the GCE project/zone information along with
|
# Create the config file, which will contain the GCE project/zone information along with
|
||||||
# the expected audience that will appear on the VM's ID token.
|
# the expected audience that will appear on the VM's ID token.
|
||||||
cat << EOF > $HOME/gce_vm_config.json
|
cat <<EOF >$HOME/gce_vm_config.json
|
||||||
{
|
{
|
||||||
"audience" : "${oidc_gcp_vm_id_token_audience}",
|
"audience" : "${oidc_gcp_vm_id_token_audience}",
|
||||||
"projectID" : "${oidc_gcp_project_id}",
|
"projectID" : "${oidc_gcp_project_id}",
|
||||||
|
|
@ -19,7 +19,7 @@ EOF
|
||||||
# Create the SSH key file. Note that the SSH key has been base64 encoded and stored into an EVG
|
# Create the SSH key file. Note that the SSH key has been base64 encoded and stored into an EVG
|
||||||
# environment variable, so it is first trimmed of any whitespace via sed and base64 decoded before
|
# environment variable, so it is first trimmed of any whitespace via sed and base64 decoded before
|
||||||
# being output to the file.
|
# being output to the file.
|
||||||
echo ${oidc_gcp_ssh_key} | sed "s/[[:space:]]//g" | base64 --decode > $HOME/gcp_ssh_key
|
echo ${oidc_gcp_ssh_key} | sed "s/[[:space:]]//g" | base64 --decode >$HOME/gcp_ssh_key
|
||||||
|
|
||||||
# Reduce SSH keyfile privileges so that it is secure enough for OpenSSH.
|
# Reduce SSH keyfile privileges so that it is secure enough for OpenSSH.
|
||||||
chmod 600 $HOME/gcp_ssh_key
|
chmod 600 $HOME/gcp_ssh_key
|
||||||
|
|
@ -34,7 +34,7 @@ ls -al $HOME/gcp_ssh_key
|
||||||
# The contents of this file are expected to exist in base64 encoded format in
|
# The contents of this file are expected to exist in base64 encoded format in
|
||||||
# $oidc_gcp_service_account_key, so the same steps are taken as above before dumping it into a
|
# $oidc_gcp_service_account_key, so the same steps are taken as above before dumping it into a
|
||||||
# newly-created JSON file.
|
# newly-created JSON file.
|
||||||
echo ${oidc_gcp_service_account_key} | sed "s/[[:space:]]//g" | base64 --decode > ${GOOGLE_APPLICATION_CREDENTIALS}
|
echo ${oidc_gcp_service_account_key} | sed "s/[[:space:]]//g" | base64 --decode >${GOOGLE_APPLICATION_CREDENTIALS}
|
||||||
chmod 600 ${GOOGLE_APPLICATION_CREDENTIALS}
|
chmod 600 ${GOOGLE_APPLICATION_CREDENTIALS}
|
||||||
ls -al ${GOOGLE_APPLICATION_CREDENTIALS}
|
ls -al ${GOOGLE_APPLICATION_CREDENTIALS}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -7,27 +7,27 @@ set -o errexit
|
||||||
|
|
||||||
# Only run this script for the external_auth_oidc_gcp task.
|
# Only run this script for the external_auth_oidc_gcp task.
|
||||||
if [ "${task_name}" != "external_auth_oidc_gcp" ]; then
|
if [ "${task_name}" != "external_auth_oidc_gcp" ]; then
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Cleaning up OIDC GCP test artifacts"
|
echo "Cleaning up OIDC GCP test artifacts"
|
||||||
|
|
||||||
# Delete the GCP VM specified in gce_vm_info.json if GOOGLE_APPLICATION_CREDENTIALS is set, points
|
# Delete the GCP VM specified in gce_vm_info.json if GOOGLE_APPLICATION_CREDENTIALS is set, points
|
||||||
# to a file, and the GCE config and VM info files exist.
|
# to a file, and the GCE config and VM info files exist.
|
||||||
if [ ! -z "${GOOGLE_APPLICATION_CREDENTIALS}" ] \
|
if [ ! -z "${GOOGLE_APPLICATION_CREDENTIALS}" ] &&
|
||||||
&& [ -f "${GOOGLE_APPLICATION_CREDENTIALS}" ] \
|
[ -f "${GOOGLE_APPLICATION_CREDENTIALS}" ] &&
|
||||||
&& [ -f "${HOME}/gce_vm_config.json" ] \
|
[ -f "${HOME}/gce_vm_config.json" ] &&
|
||||||
&& [ -f "${HOME}/gce_vm_info.json" ]; then
|
[ -f "${HOME}/gce_vm_info.json" ]; then
|
||||||
# Install google-cloud-compute so that the script can run.
|
# Install google-cloud-compute so that the script can run.
|
||||||
$python -m pip install google-cloud-compute
|
$python -m pip install google-cloud-compute
|
||||||
$python src/mongo/db/modules/enterprise/jstests/external_auth_oidc_gcp/lib/gce_vm_manager.py delete --config_file $HOME/gce_vm_config.json --service_account_key_file ${GOOGLE_APPLICATION_CREDENTIALS} --output_file $HOME/gce_vm_info.json
|
$python src/mongo/db/modules/enterprise/jstests/external_auth_oidc_gcp/lib/gce_vm_manager.py delete --config_file $HOME/gce_vm_config.json --service_account_key_file ${GOOGLE_APPLICATION_CREDENTIALS} --output_file $HOME/gce_vm_info.json
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Clean up the SSH and service account keys if they exist.
|
# Clean up the SSH and service account keys if they exist.
|
||||||
if [ -f "${HOME}/gcp_ssh_key" ]; then
|
if [ -f "${HOME}/gcp_ssh_key" ]; then
|
||||||
rm -f $HOME/gcp_ssh_key
|
rm -f $HOME/gcp_ssh_key
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -z "${GOOGLE_APPLICATION_CREDENTIALS}" ] && [ -f "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then
|
if [ ! -z "${GOOGLE_APPLICATION_CREDENTIALS}" ] && [ -f "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then
|
||||||
rm -f ${GOOGLE_APPLICATION_CREDENTIALS}
|
rm -f ${GOOGLE_APPLICATION_CREDENTIALS}
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -6,7 +6,7 @@ cd src
|
||||||
set -o errexit
|
set -o errexit
|
||||||
|
|
||||||
# Should output contents to new file in home directory.
|
# Should output contents to new file in home directory.
|
||||||
cat << EOF > $HOME/oidc_e2e_setup.json
|
cat <<EOF >$HOME/oidc_e2e_setup.json
|
||||||
{
|
{
|
||||||
"testserversecurityone@ping-test.com" : "${oidc_ping_test_user_account_one_secret}",
|
"testserversecurityone@ping-test.com" : "${oidc_ping_test_user_account_one_secret}",
|
||||||
"testserversecuritytwo@ping-test.com" : "${oidc_ping_test_user_account_two_secret}",
|
"testserversecuritytwo@ping-test.com" : "${oidc_ping_test_user_account_two_secret}",
|
||||||
|
|
|
||||||
|
|
@ -1,15 +1,15 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
# Only run this script for the external_auth_oidc_azure task.
|
# Only run this script for the external_auth_oidc_azure task.
|
||||||
if [ "${task_name}" != "external_auth_oidc" ]; then
|
if [ "${task_name}" != "external_auth_oidc" ]; then
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Cleaning up OIDC Okta test artifacts"
|
echo "Cleaning up OIDC Okta test artifacts"
|
||||||
|
|
||||||
#Clean up the config file, if it exists
|
#Clean up the config file, if it exists
|
||||||
if [ -f "${HOME}/oidc_e2e_setup.json" ]; then
|
if [ -f "${HOME}/oidc_e2e_setup.json" ]; then
|
||||||
rm -f $HOME/oidc_e2e_setup.json
|
rm -f $HOME/oidc_e2e_setup.json
|
||||||
echo "Cleaned up oidc_e2e_setup.json"
|
echo "Cleaned up oidc_e2e_setup.json"
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -6,88 +6,88 @@ cd src
|
||||||
set -eou pipefail
|
set -eou pipefail
|
||||||
|
|
||||||
# Only run on unit test tasks so we don't target mongod binaries from cores.
|
# Only run on unit test tasks so we don't target mongod binaries from cores.
|
||||||
if [ "${task_name}" != "run_dbtest" ] \
|
if [ "${task_name}" != "run_dbtest" ] &&
|
||||||
&& [[ ${task_name} != integration_tests* ]] \
|
[[ ${task_name} != integration_tests* ]] &&
|
||||||
&& [[ "${task_name}" != unit_test_group*_no_sandbox ]]; then
|
[[ "${task_name}" != unit_test_group*_no_sandbox ]]; then
|
||||||
echo "Not gathering failed unittests binaries as this is not a unittest task: ${task_name}"
|
echo "Not gathering failed unittests binaries as this is not a unittest task: ${task_name}"
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
unittest_bin_dir=dist-unittests/bin
|
unittest_bin_dir=dist-unittests/bin
|
||||||
mkdir -p $unittest_bin_dir || true
|
mkdir -p $unittest_bin_dir || true
|
||||||
|
|
||||||
# Find all core files
|
# Find all core files
|
||||||
core_files=$(/usr/bin/find -H . \( -name "dump_*.core" -o -name "*.mdmp" \) 2> /dev/null)
|
core_files=$(/usr/bin/find -H . \( -name "dump_*.core" -o -name "*.mdmp" \) 2>/dev/null)
|
||||||
while read -r core_file; do
|
while read -r core_file; do
|
||||||
# A core file name does not always have the executable name that generated it.
|
# A core file name does not always have the executable name that generated it.
|
||||||
# See http://stackoverflow.com/questions/34801353/core-dump-filename-gets-thread-name-instead-of-executable-name-with-core-pattern
|
# See http://stackoverflow.com/questions/34801353/core-dump-filename-gets-thread-name-instead-of-executable-name-with-core-pattern
|
||||||
# On platforms with GDB, we get the binary name from core file
|
# On platforms with GDB, we get the binary name from core file
|
||||||
gdb=/opt/mongodbtoolchain/v5/bin/gdb
|
gdb=/opt/mongodbtoolchain/v5/bin/gdb
|
||||||
if [ -f $gdb ]; then
|
if [ -f $gdb ]; then
|
||||||
binary_file=$($gdb -batch --quiet -ex "core $core_file" 2> /dev/null | grep "Core was generated" | cut -f2 -d "\`" | cut -f1 -d "'" | cut -f1 -d " ")
|
binary_file=$($gdb -batch --quiet -ex "core $core_file" 2>/dev/null | grep "Core was generated" | cut -f2 -d "\`" | cut -f1 -d "'" | cut -f1 -d " ")
|
||||||
binary_file_locations=$binary_file
|
binary_file_locations=$binary_file
|
||||||
else
|
else
|
||||||
echo "Checking core file '$core_file'"
|
echo "Checking core file '$core_file'"
|
||||||
# Find the base file name from the core file name, note it may be truncated.
|
# Find the base file name from the core file name, note it may be truncated.
|
||||||
# Remove leading 'dump_' and trailing '.<pid>.core' or '.<pid or time>.mdmp'
|
# Remove leading 'dump_' and trailing '.<pid>.core' or '.<pid or time>.mdmp'
|
||||||
binary_file=$(echo "$core_file" | sed "s/.*\///;s/dump_//;s/\..*\.core//;s/\..*\.mdmp//")
|
binary_file=$(echo "$core_file" | sed "s/.*\///;s/dump_//;s/\..*\.core//;s/\..*\.mdmp//")
|
||||||
# Locate the binary file. Since the base file name might be truncated, the find
|
# Locate the binary file. Since the base file name might be truncated, the find
|
||||||
# may return more than 1 file.
|
# may return more than 1 file.
|
||||||
if [ "$binary_file" != "" ]; then
|
if [ "$binary_file" != "" ]; then
|
||||||
binary_file_locations=$(/usr/bin/find -H . -executable -name "$binary_file*${exe}" 2> /dev/null)
|
binary_file_locations=$(/usr/bin/find -H . -executable -name "$binary_file*${exe}" 2>/dev/null)
|
||||||
fi
|
fi
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "$binary_file_locations" ]; then
|
|
||||||
echo "Cannot locate the unittest binary file ($binary_file) that generated the core file $core_file"
|
|
||||||
else
|
|
||||||
echo "Files to save: $binary_file_locations"
|
|
||||||
fi
|
|
||||||
|
|
||||||
for binary_file_location in $binary_file_locations; do
|
|
||||||
new_binary_file=$unittest_bin_dir/$(echo "$binary_file_location" | sed "s/.*\///")
|
|
||||||
if [ -f "$binary_file_location" ] && [ ! -f "$new_binary_file" ]; then
|
|
||||||
echo "Direct Copy $binary_file_location to $new_binary_file"
|
|
||||||
cp "$binary_file_location" "$new_binary_file"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# On Windows if a .pdb symbol file exists, include it in the archive.
|
if [ -z "$binary_file_locations" ]; then
|
||||||
if [[ "$binary_file_location" == *".exe" ]]; then
|
echo "Cannot locate the unittest binary file ($binary_file) that generated the core file $core_file"
|
||||||
pdb_file=$(echo "$binary_file_location" | sed "s/\.exe/.pdb/")
|
else
|
||||||
if [ -f "$pdb_file" ]; then
|
echo "Files to save: $binary_file_locations"
|
||||||
new_pdb_file=$unittest_bin_dir/$(echo "$pdb_file" | sed "s/.*\///")
|
|
||||||
echo "PDB Copy $pdb_file to $new_pdb_file"
|
|
||||||
cp "$pdb_file" "$new_pdb_file"
|
|
||||||
fi
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# On binutils platforms, if a .debug symbol file exists, include it
|
for binary_file_location in $binary_file_locations; do
|
||||||
# in the archive
|
new_binary_file=$unittest_bin_dir/$(echo "$binary_file_location" | sed "s/.*\///")
|
||||||
debug_file=$binary_file_location.debug
|
if [ -f "$binary_file_location" ] && [ ! -f "$new_binary_file" ]; then
|
||||||
if [ -f "$debug_file" ]; then
|
echo "Direct Copy $binary_file_location to $new_binary_file"
|
||||||
echo "debug Copy $debug_file to $unittest_bin_dir"
|
cp "$binary_file_location" "$new_binary_file"
|
||||||
cp "$debug_file" "$unittest_bin_dir"
|
fi
|
||||||
fi
|
|
||||||
|
|
||||||
# Include any dwp symbol files to go with the .debug files
|
# On Windows if a .pdb symbol file exists, include it in the archive.
|
||||||
dwp_file=$binary_file_location.dwp
|
if [[ "$binary_file_location" == *".exe" ]]; then
|
||||||
if [ -f "$dwp_file" ]; then
|
pdb_file=$(echo "$binary_file_location" | sed "s/\.exe/.pdb/")
|
||||||
echo "dwp Copy $dwp_file to $unittest_bin_dir"
|
if [ -f "$pdb_file" ]; then
|
||||||
cp "$dwp_file" "$unittest_bin_dir"
|
new_pdb_file=$unittest_bin_dir/$(echo "$pdb_file" | sed "s/.*\///")
|
||||||
fi
|
echo "PDB Copy $pdb_file to $new_pdb_file"
|
||||||
|
cp "$pdb_file" "$new_pdb_file"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
# On macOS, these are called .dSYM and they are directories
|
# On binutils platforms, if a .debug symbol file exists, include it
|
||||||
dsym_dir=$binary_file_location.dSYM
|
# in the archive
|
||||||
if [ -d "$dsym_dir" ]; then
|
debug_file=$binary_file_location.debug
|
||||||
echo "dsym Copy $dsym_dir to $unittest_bin_dir"
|
if [ -f "$debug_file" ]; then
|
||||||
cp -r "$dsym_dir" "$unittest_bin_dir"
|
echo "debug Copy $debug_file to $unittest_bin_dir"
|
||||||
fi
|
cp "$debug_file" "$unittest_bin_dir"
|
||||||
|
fi
|
||||||
|
|
||||||
done
|
# Include any dwp symbol files to go with the .debug files
|
||||||
done <<< "${core_files}"
|
dwp_file=$binary_file_location.dwp
|
||||||
|
if [ -f "$dwp_file" ]; then
|
||||||
|
echo "dwp Copy $dwp_file to $unittest_bin_dir"
|
||||||
|
cp "$dwp_file" "$unittest_bin_dir"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# On macOS, these are called .dSYM and they are directories
|
||||||
|
dsym_dir=$binary_file_location.dSYM
|
||||||
|
if [ -d "$dsym_dir" ]; then
|
||||||
|
echo "dsym Copy $dsym_dir to $unittest_bin_dir"
|
||||||
|
cp -r "$dsym_dir" "$unittest_bin_dir"
|
||||||
|
fi
|
||||||
|
|
||||||
|
done
|
||||||
|
done <<<"${core_files}"
|
||||||
|
|
||||||
# Copy debug symbols for dynamic builds
|
# Copy debug symbols for dynamic builds
|
||||||
lib_dir=bazel-bin/install/lib
|
lib_dir=bazel-bin/install/lib
|
||||||
if [ -d "$lib_dir" ] && [ -n "$core_files" ]; then
|
if [ -d "$lib_dir" ] && [ -n "$core_files" ]; then
|
||||||
cp -r "$lib_dir" dist-unittests
|
cp -r "$lib_dir" dist-unittests
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -18,7 +18,7 @@ mv all_feature_flags.txt patch_all_feature_flags.txt
|
||||||
# get the list of feature flags from the base commit
|
# get the list of feature flags from the base commit
|
||||||
git --no-pager diff "$(git merge-base origin/${branch_name} HEAD)" --output="$diff_file_name" --binary
|
git --no-pager diff "$(git merge-base origin/${branch_name} HEAD)" --output="$diff_file_name" --binary
|
||||||
if [ -s "$diff_file_name" ]; then
|
if [ -s "$diff_file_name" ]; then
|
||||||
git apply -R "$diff_file_name"
|
git apply -R "$diff_file_name"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
$python buildscripts/idl/gen_all_feature_flag_list.py turned-on-by-default
|
$python buildscripts/idl/gen_all_feature_flag_list.py turned-on-by-default
|
||||||
|
|
|
||||||
|
|
@ -8,17 +8,17 @@ HAS_FULL_DISK=false
|
||||||
# 34% /dev/nvme1n1
|
# 34% /dev/nvme1n1
|
||||||
FILESYSTEMS=$(df -H | grep -vE '^Filesystem|tmpfs|cdrom' | awk '{ print $5 " " $1 }')
|
FILESYSTEMS=$(df -H | grep -vE '^Filesystem|tmpfs|cdrom' | awk '{ print $5 " " $1 }')
|
||||||
while read -r output; do
|
while read -r output; do
|
||||||
usep=$(echo "$output" | awk '{ print $1}' | cut -d'%' -f1)
|
usep=$(echo "$output" | awk '{ print $1}' | cut -d'%' -f1)
|
||||||
partition=$(echo "$output" | awk '{ print $2 }')
|
partition=$(echo "$output" | awk '{ print $2 }')
|
||||||
if [ $usep -ge $FULL_DISK_THRESHOLD ]; then
|
if [ $usep -ge $FULL_DISK_THRESHOLD ]; then
|
||||||
echo "Running out of space \"$partition ($usep%)\" on $(hostname) as on $(date)"
|
echo "Running out of space \"$partition ($usep%)\" on $(hostname) as on $(date)"
|
||||||
HAS_FULL_DISK=true
|
HAS_FULL_DISK=true
|
||||||
fi
|
fi
|
||||||
done <<< "$FILESYSTEMS"
|
done <<<"$FILESYSTEMS"
|
||||||
|
|
||||||
if $HAS_FULL_DISK; then
|
if $HAS_FULL_DISK; then
|
||||||
# print all files that are above one megabyte sorted
|
# print all files that are above one megabyte sorted
|
||||||
du -cha / 2> /dev/null | grep -E "^[0-9]+(\.[0-9]+)?[G|M|T]" | sort -h
|
du -cha / 2>/dev/null | grep -E "^[0-9]+(\.[0-9]+)?[G|M|T]" | sort -h
|
||||||
else
|
else
|
||||||
echo "No full partitions found, skipping"
|
echo "No full partitions found, skipping"
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -5,21 +5,21 @@ set -o verbose
|
||||||
|
|
||||||
tag=""
|
tag=""
|
||||||
if [ -n "$bv_future_git_tag" ]; then
|
if [ -n "$bv_future_git_tag" ]; then
|
||||||
tag="$bv_future_git_tag"
|
tag="$bv_future_git_tag"
|
||||||
fi
|
fi
|
||||||
if [ -n "$future_git_tag" ]; then
|
if [ -n "$future_git_tag" ]; then
|
||||||
tag="$future_git_tag"
|
tag="$future_git_tag"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "TAG: $tag"
|
echo "TAG: $tag"
|
||||||
|
|
||||||
if [ -n "$tag" ]; then
|
if [ -n "$tag" ]; then
|
||||||
if [ "Windows_NT" = "$OS" ]; then
|
if [ "Windows_NT" = "$OS" ]; then
|
||||||
# On Windows, we don't seem to have a local git identity, so we populate the config with this
|
# On Windows, we don't seem to have a local git identity, so we populate the config with this
|
||||||
# dummy email and name. Without a configured email/name, the 'git tag' command will fail.
|
# dummy email and name. Without a configured email/name, the 'git tag' command will fail.
|
||||||
git config user.email "no-reply@evergreen.@mongodb.com"
|
git config user.email "no-reply@evergreen.@mongodb.com"
|
||||||
git config user.name "Evergreen Agent"
|
git config user.name "Evergreen Agent"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
git tag -a "$tag" -m "$tag"
|
git tag -a "$tag" -m "$tag"
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude.sh"
|
. "$DIR/../prelude.sh"
|
||||||
|
|
||||||
set +o errexit
|
set +o errexit
|
||||||
|
|
@ -6,8 +6,8 @@ set +o errexit
|
||||||
cd src
|
cd src
|
||||||
|
|
||||||
if [ -z "${BOLT:-}" ]; then
|
if [ -z "${BOLT:-}" ]; then
|
||||||
echo "Not applying BOLT" >&2
|
echo "Not applying BOLT" >&2
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
tar -xvf bazel-bin/dist-test-stripped.tgz
|
tar -xvf bazel-bin/dist-test-stripped.tgz
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,9 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude.sh"
|
. "$DIR/../prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
||||||
cat > mci.buildlogger << END_OF_CREDS
|
cat >mci.buildlogger <<END_OF_CREDS
|
||||||
slavename='${slave}'
|
slavename='${slave}'
|
||||||
passwd='${passwd}'
|
passwd='${passwd}'
|
||||||
builder='${build_variant}_${project}'
|
builder='${build_variant}_${project}'
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude.sh"
|
. "$DIR/../prelude.sh"
|
||||||
|
|
||||||
set +o errexit
|
set +o errexit
|
||||||
|
|
@ -6,8 +6,8 @@ set +o errexit
|
||||||
cd src
|
cd src
|
||||||
|
|
||||||
if [ -z "${PGO_PROFILE_URL:-}" ]; then
|
if [ -z "${PGO_PROFILE_URL:-}" ]; then
|
||||||
echo "No pgo profile url specified" >&2
|
echo "No pgo profile url specified" >&2
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
wget $PGO_PROFILE_URL
|
wget $PGO_PROFILE_URL
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,10 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude.sh"
|
. "$DIR/../prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
||||||
# Create the Evergreen API credentials
|
# Create the Evergreen API credentials
|
||||||
cat > .evergreen.yml << END_OF_CREDS
|
cat >.evergreen.yml <<END_OF_CREDS
|
||||||
api_server_host: https://evergreen.mongodb.com/api
|
api_server_host: https://evergreen.mongodb.com/api
|
||||||
api_key: "${evergreen_api_key}"
|
api_key: "${evergreen_api_key}"
|
||||||
user: "${evergreen_api_user}"
|
user: "${evergreen_api_user}"
|
||||||
|
|
|
||||||
|
|
@ -1,12 +1,12 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude.sh"
|
. "$DIR/../prelude.sh"
|
||||||
|
|
||||||
if [ -z "${files}" ]; then
|
if [ -z "${files}" ]; then
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
for file in ${files}; do
|
for file in ${files}; do
|
||||||
if [ -f "$file" ]; then
|
if [ -f "$file" ]; then
|
||||||
echo "Removing file $file"
|
echo "Removing file $file"
|
||||||
rm -f $file
|
rm -f $file
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,9 @@
|
||||||
cd src
|
cd src
|
||||||
|
|
||||||
if [ -d /data/thrift ]; then
|
if [ -d /data/thrift ]; then
|
||||||
rm -rf /data/thrift
|
rm -rf /data/thrift
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ -d /data/charybdefs ]; then
|
if [ -d /data/charybdefs ]; then
|
||||||
rm -rf /data/charybdefs
|
rm -rf /data/charybdefs
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude.sh"
|
. "$DIR/../prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude.sh"
|
. "$DIR/../prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -8,12 +8,12 @@ set -o errexit
|
||||||
|
|
||||||
# For patch builds gather the modified patch files.
|
# For patch builds gather the modified patch files.
|
||||||
if [ "${is_patch}" = "true" ]; then
|
if [ "${is_patch}" = "true" ]; then
|
||||||
# Get list of patched files
|
# Get list of patched files
|
||||||
git diff HEAD --name-only >> patch_files.txt
|
git diff HEAD --name-only >>patch_files.txt
|
||||||
if [ -d src/mongo/db/modules/enterprise ]; then
|
if [ -d src/mongo/db/modules/enterprise ]; then
|
||||||
pushd src/mongo/db/modules/enterprise
|
pushd src/mongo/db/modules/enterprise
|
||||||
# Update the patch_files.txt in the mongo repo.
|
# Update the patch_files.txt in the mongo repo.
|
||||||
git diff HEAD --name-only >> ~1/patch_files.txt
|
git diff HEAD --name-only >>~1/patch_files.txt
|
||||||
popd
|
popd
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -1,14 +1,14 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude.sh"
|
. "$DIR/../prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
||||||
set -o errexit
|
set -o errexit
|
||||||
|
|
||||||
cat << EOF > notary_env.sh
|
cat <<EOF >notary_env.sh
|
||||||
export NOTARY_TOKEN=${signing_auth_token_70}
|
export NOTARY_TOKEN=${signing_auth_token_70}
|
||||||
export BARQUE_USERNAME=${barque_user}
|
export BARQUE_USERNAME=${barque_user}
|
||||||
export BARQUE_API_KEY=${barque_api_key}
|
export BARQUE_API_KEY=${barque_api_key}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
echo "${signing_auth_token_70}" > signing_auth_token
|
echo "${signing_auth_token_70}" >signing_auth_token
|
||||||
|
|
|
||||||
|
|
@ -1,46 +1,46 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude.sh"
|
. "$DIR/../prelude.sh"
|
||||||
|
|
||||||
proc_list="(java|lein|mongo|python|_test$|_test\.exe$)"
|
proc_list="(java|lein|mongo|python|_test$|_test\.exe$)"
|
||||||
if [ "Windows_NT" = "$OS" ]; then
|
if [ "Windows_NT" = "$OS" ]; then
|
||||||
get_pids() {
|
get_pids() {
|
||||||
proc_pids=$(tasklist /fo:csv \
|
proc_pids=$(tasklist /fo:csv |
|
||||||
| awk -F'","' '{x=$1; gsub("\"","",x); print $2, x}' \
|
awk -F'","' '{x=$1; gsub("\"","",x); print $2, x}' |
|
||||||
| grep -iE $1 \
|
grep -iE $1 |
|
||||||
| cut -f1 -d ' ')
|
cut -f1 -d ' ')
|
||||||
}
|
}
|
||||||
get_process_info() {
|
get_process_info() {
|
||||||
proc_name=""
|
proc_name=""
|
||||||
proc_info=$(wmic process where "ProcessId=\"$1\"" get "Name,ProcessId,ThreadCount" /format:csv 2> /dev/null | grep $1)
|
proc_info=$(wmic process where "ProcessId=\"$1\"" get "Name,ProcessId,ThreadCount" /format:csv 2>/dev/null | grep $1)
|
||||||
if [ ! -z $proc_info ]; then
|
if [ ! -z $proc_info ]; then
|
||||||
proc_name=$(echo $proc_info | cut -f2 -d ',')
|
proc_name=$(echo $proc_info | cut -f2 -d ',')
|
||||||
proc_threads=$(echo $proc_info | cut -f4 -d ',')
|
proc_threads=$(echo $proc_info | cut -f4 -d ',')
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
get_pids() { proc_pids=$(pgrep $1); }
|
get_pids() { proc_pids=$(pgrep $1); }
|
||||||
get_process_info() {
|
get_process_info() {
|
||||||
proc_name=$(ps -p $1 -o comm=)
|
proc_name=$(ps -p $1 -o comm=)
|
||||||
# /proc is available on Linux platforms
|
# /proc is available on Linux platforms
|
||||||
if [ -f /proc/$1/status ]; then
|
if [ -f /proc/$1/status ]; then
|
||||||
set_sudo
|
set_sudo
|
||||||
proc_threads=$($sudo grep Threads /proc/$1/status | sed "s/\s//g" | cut -f2 -d ":")
|
proc_threads=$($sudo grep Threads /proc/$1/status | sed "s/\s//g" | cut -f2 -d ":")
|
||||||
else
|
else
|
||||||
proc_threads=$(ps -AM $1 | grep -vc PID)
|
proc_threads=$(ps -AM $1 | grep -vc PID)
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
fi
|
fi
|
||||||
while [ 1 ]; do
|
while [ 1 ]; do
|
||||||
get_pids $proc_list
|
get_pids $proc_list
|
||||||
if [ ! -z "$proc_pids" ]; then
|
if [ ! -z "$proc_pids" ]; then
|
||||||
printf "Running process/thread counter\n"
|
printf "Running process/thread counter\n"
|
||||||
printf "PROCESS\tPID\tTHREADS\n"
|
printf "PROCESS\tPID\tTHREADS\n"
|
||||||
fi
|
|
||||||
for pid in $proc_pids; do
|
|
||||||
get_process_info $pid
|
|
||||||
if [ ! -z "$proc_name" ]; then
|
|
||||||
printf "$proc_name\t$pid\t$proc_threads\n"
|
|
||||||
fi
|
fi
|
||||||
done
|
for pid in $proc_pids; do
|
||||||
sleep 60
|
get_process_info $pid
|
||||||
|
if [ ! -z "$proc_name" ]; then
|
||||||
|
printf "$proc_name\t$pid\t$proc_threads\n"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
sleep 60
|
||||||
done
|
done
|
||||||
|
|
|
||||||
|
|
@ -1,16 +1,16 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude.sh"
|
. "$DIR/../prelude.sh"
|
||||||
|
|
||||||
# Since the macros 'private_key_remote' and 'private_key_file' are not always defined
|
# Since the macros 'private_key_remote' and 'private_key_file' are not always defined
|
||||||
# we default to /dev/null to avoid syntax errors of an empty expansion.
|
# we default to /dev/null to avoid syntax errors of an empty expansion.
|
||||||
if [ -n "$private_key_remote_bash_var" ]; then
|
if [ -n "$private_key_remote_bash_var" ]; then
|
||||||
private_key_remote="$private_key_remote_bash_var"
|
private_key_remote="$private_key_remote_bash_var"
|
||||||
fi
|
fi
|
||||||
if [ ! -z "${private_key_remote}" ] && [ ! -z "${private_key_file}" ]; then
|
if [ ! -z "${private_key_remote}" ] && [ ! -z "${private_key_file}" ]; then
|
||||||
mkdir -p ~/.ssh
|
mkdir -p ~/.ssh
|
||||||
private_key_file=$(eval echo "$private_key_file")
|
private_key_file=$(eval echo "$private_key_file")
|
||||||
echo -n "${private_key_remote}" > ${private_key_file}
|
echo -n "${private_key_remote}" >${private_key_file}
|
||||||
chmod 0600 ${private_key_file}
|
chmod 0600 ${private_key_file}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Ensure a clean aws configuration state
|
# Ensure a clean aws configuration state
|
||||||
|
|
@ -24,23 +24,23 @@ aws_profile="${aws_profile_remote}"
|
||||||
# The profile in the config file is specified as [profile <profile>], except
|
# The profile in the config file is specified as [profile <profile>], except
|
||||||
# for [default], see http://boto3.readthedocs.io/en/latest/guide/configuration.html
|
# for [default], see http://boto3.readthedocs.io/en/latest/guide/configuration.html
|
||||||
if [ $aws_profile = "default" ]; then
|
if [ $aws_profile = "default" ]; then
|
||||||
aws_profile_config="[default]"
|
aws_profile_config="[default]"
|
||||||
else
|
else
|
||||||
aws_profile_config="[profile $aws_profile]"
|
aws_profile_config="[profile $aws_profile]"
|
||||||
fi
|
fi
|
||||||
cat << EOF >> ~/.aws/config
|
cat <<EOF >>~/.aws/config
|
||||||
$aws_profile_config
|
$aws_profile_config
|
||||||
region = us-east-1
|
region = us-east-1
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# The profile in the credentials file is specified as [<profile>].
|
# The profile in the credentials file is specified as [<profile>].
|
||||||
cat << EOF >> ~/.aws/credentials
|
cat <<EOF >>~/.aws/credentials
|
||||||
[$aws_profile]
|
[$aws_profile]
|
||||||
aws_access_key_id = ${aws_key_remote}
|
aws_access_key_id = ${aws_key_remote}
|
||||||
aws_secret_access_key = ${aws_secret_remote}
|
aws_secret_access_key = ${aws_secret_remote}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
cat << EOF > ~/.boto
|
cat <<EOF >~/.boto
|
||||||
[Boto]
|
[Boto]
|
||||||
https_validate_certificates = False
|
https_validate_certificates = False
|
||||||
EOF
|
EOF
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude.sh"
|
. "$DIR/../prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -8,9 +8,9 @@ set -o errexit
|
||||||
|
|
||||||
activate_venv
|
activate_venv
|
||||||
$python buildscripts/evergreen_resmoke_job_count.py \
|
$python buildscripts/evergreen_resmoke_job_count.py \
|
||||||
--taskName ${task_name} \
|
--taskName ${task_name} \
|
||||||
--buildVariant ${build_variant} \
|
--buildVariant ${build_variant} \
|
||||||
--distro ${distro_id} \
|
--distro ${distro_id} \
|
||||||
--jobFactor ${resmoke_jobs_factor} \
|
--jobFactor ${resmoke_jobs_factor} \
|
||||||
--jobsMax ${resmoke_jobs_max} \
|
--jobsMax ${resmoke_jobs_max} \
|
||||||
--outFile resmoke_jobs_expansion.yml
|
--outFile resmoke_jobs_expansion.yml
|
||||||
|
|
|
||||||
|
|
@ -3,20 +3,20 @@ set -o verbose
|
||||||
|
|
||||||
# On Windows we can use typeperf.exe to dump performance counters.
|
# On Windows we can use typeperf.exe to dump performance counters.
|
||||||
if [ "Windows_NT" = "$OS" ]; then
|
if [ "Windows_NT" = "$OS" ]; then
|
||||||
typeperf -qx PhysicalDisk | grep Disk | grep -v _Total > disk_counters.txt
|
typeperf -qx PhysicalDisk | grep Disk | grep -v _Total >disk_counters.txt
|
||||||
typeperf -cf disk_counters.txt -si 5 -o mongo-diskstats
|
typeperf -cf disk_counters.txt -si 5 -o mongo-diskstats
|
||||||
# Linux: iostat -t option for timestamp.
|
# Linux: iostat -t option for timestamp.
|
||||||
elif iostat -tdmx > /dev/null 2>&1; then
|
elif iostat -tdmx >/dev/null 2>&1; then
|
||||||
iostat -tdmx 5 > mongo-diskstats
|
iostat -tdmx 5 >mongo-diskstats
|
||||||
# OSX: Simulate the iostat timestamp.
|
# OSX: Simulate the iostat timestamp.
|
||||||
elif iostat -d > /dev/null 2>&1; then
|
elif iostat -d >/dev/null 2>&1; then
|
||||||
iostat -d -w 5 | while IFS= read -r line; do printf '%s %s\n' "$(date +'%m/%d/%Y %H:%M:%S')" "$line" >> mongo-diskstats; done
|
iostat -d -w 5 | while IFS= read -r line; do printf '%s %s\n' "$(date +'%m/%d/%Y %H:%M:%S')" "$line" >>mongo-diskstats; done
|
||||||
# Check if vmstat -t is available.
|
# Check if vmstat -t is available.
|
||||||
elif vmstat -td > /dev/null 2>&1; then
|
elif vmstat -td >/dev/null 2>&1; then
|
||||||
vmstat -td 5 > mongo-diskstats
|
vmstat -td 5 >mongo-diskstats
|
||||||
# Check if vmstat -T d is available.
|
# Check if vmstat -T d is available.
|
||||||
elif vmstat -T d > /dev/null 2>&1; then
|
elif vmstat -T d >/dev/null 2>&1; then
|
||||||
vmstat -T d 5 > mongo-diskstats
|
vmstat -T d 5 >mongo-diskstats
|
||||||
else
|
else
|
||||||
printf "Cannot collect mongo-diskstats on this platform\n"
|
printf "Cannot collect mongo-diskstats on this platform\n"
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude.sh"
|
. "$DIR/../prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude.sh"
|
. "$DIR/../prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -9,46 +9,46 @@ set -o errexit
|
||||||
# Set the suite name to be the task name by default; unless overridden with the `suite`/`suite_config` expansion.
|
# Set the suite name to be the task name by default; unless overridden with the `suite`/`suite_config` expansion.
|
||||||
suite_name=${task_name}
|
suite_name=${task_name}
|
||||||
if [[ -n ${suite_config} ]]; then
|
if [[ -n ${suite_config} ]]; then
|
||||||
suite_name=${suite_config}
|
suite_name=${suite_config}
|
||||||
elif [[ -n ${suite} ]]; then
|
elif [[ -n ${suite} ]]; then
|
||||||
suite_name=${suite}
|
suite_name=${suite}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
timeout_factor=""
|
timeout_factor=""
|
||||||
if [[ -n "${exec_timeout_factor}" ]]; then
|
if [[ -n "${exec_timeout_factor}" ]]; then
|
||||||
timeout_factor="--exec-timeout-factor ${exec_timeout_factor}"
|
timeout_factor="--exec-timeout-factor ${exec_timeout_factor}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
build_variant_for_timeout=${build_variant}
|
build_variant_for_timeout=${build_variant}
|
||||||
if [[ -n "${burn_in_bypass}" ]]; then
|
if [[ -n "${burn_in_bypass}" ]]; then
|
||||||
# burn_in_tags may generate new build variants, if we are running on one of those build variants
|
# burn_in_tags may generate new build variants, if we are running on one of those build variants
|
||||||
# we should use the build variant it is based on for determining the timeout. This is stored in
|
# we should use the build variant it is based on for determining the timeout. This is stored in
|
||||||
# the `burn_in_bypass` expansion.
|
# the `burn_in_bypass` expansion.
|
||||||
build_variant_for_timeout=${burn_in_bypass}
|
build_variant_for_timeout=${burn_in_bypass}
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n "${alias}" ]]; then
|
if [[ -n "${alias}" ]]; then
|
||||||
evg_alias=${alias}
|
evg_alias=${alias}
|
||||||
else
|
else
|
||||||
evg_alias="evg-alias-absent"
|
evg_alias="evg-alias-absent"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
resmoke_test_flags=""
|
resmoke_test_flags=""
|
||||||
if [[ -n "${test_flags}" ]]; then
|
if [[ -n "${test_flags}" ]]; then
|
||||||
resmoke_test_flags="--test-flags='${test_flags}'"
|
resmoke_test_flags="--test-flags='${test_flags}'"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
activate_venv
|
activate_venv
|
||||||
PATH=$PATH:$HOME:/ eval $python buildscripts/evergreen_task_timeout.py \
|
PATH=$PATH:$HOME:/ eval $python buildscripts/evergreen_task_timeout.py \
|
||||||
$timeout_factor \
|
$timeout_factor \
|
||||||
$resmoke_test_flags \
|
$resmoke_test_flags \
|
||||||
--install-dir "${install_dir}" \
|
--install-dir "${install_dir}" \
|
||||||
--task-name ${task_name} \
|
--task-name ${task_name} \
|
||||||
--suite-name ${suite_name} \
|
--suite-name ${suite_name} \
|
||||||
--project ${project} \
|
--project ${project} \
|
||||||
--build-variant $build_variant_for_timeout \
|
--build-variant $build_variant_for_timeout \
|
||||||
--evg-alias $evg_alias \
|
--evg-alias $evg_alias \
|
||||||
--timeout ${timeout_secs} \
|
--timeout ${timeout_secs} \
|
||||||
--exec-timeout ${exec_timeout_secs} \
|
--exec-timeout ${exec_timeout_secs} \
|
||||||
--evg-project-config ${evergreen_config_file_path} \
|
--evg-project-config ${evergreen_config_file_path} \
|
||||||
--out-file task_timeout_expansions.yml
|
--out-file task_timeout_expansions.yml
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude.sh"
|
. "$DIR/../prelude.sh"
|
||||||
|
|
||||||
set -o errexit
|
set -o errexit
|
||||||
|
|
|
||||||
|
|
@ -1,11 +1,11 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude.sh"
|
. "$DIR/../prelude.sh"
|
||||||
|
|
||||||
if [ "$(uname)" != "Linux" ] && [ "$(uname)" != "Darwin" ]; then
|
if [ "$(uname)" != "Linux" ] && [ "$(uname)" != "Darwin" ]; then
|
||||||
echo "===== Skipping ulimit dump, OS is: $(uname)."
|
echo "===== Skipping ulimit dump, OS is: $(uname)."
|
||||||
else
|
else
|
||||||
echo "===== Collecting soft limits:"
|
echo "===== Collecting soft limits:"
|
||||||
ulimit -Sa
|
ulimit -Sa
|
||||||
echo "===== Collecting hard limits:"
|
echo "===== Collecting hard limits:"
|
||||||
ulimit -Ha
|
ulimit -Ha
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude_python.sh"
|
. "$DIR/../prelude_python.sh"
|
||||||
|
|
||||||
set -o errexit
|
set -o errexit
|
||||||
|
|
@ -11,36 +11,36 @@ popd
|
||||||
|
|
||||||
ARCH=$(uname -m)
|
ARCH=$(uname -m)
|
||||||
if [[ "$ARCH" == "arm64" || "$ARCH" == "aarch64" ]]; then
|
if [[ "$ARCH" == "arm64" || "$ARCH" == "aarch64" ]]; then
|
||||||
ARCH="arm64"
|
ARCH="arm64"
|
||||||
elif [[ "$ARCH" == "ppc64le" || "$ARCH" == "ppc64" || "$ARCH" == "ppc" || "$ARCH" == "ppcle" ]]; then
|
elif [[ "$ARCH" == "ppc64le" || "$ARCH" == "ppc64" || "$ARCH" == "ppc" || "$ARCH" == "ppcle" ]]; then
|
||||||
ARCH="ppc64le"
|
ARCH="ppc64le"
|
||||||
elif [[ "$ARCH" == "s390x" || "$ARCH" == "s390" ]]; then
|
elif [[ "$ARCH" == "s390x" || "$ARCH" == "s390" ]]; then
|
||||||
ARCH="s390x"
|
ARCH="s390x"
|
||||||
else
|
else
|
||||||
ARCH="x86_64"
|
ARCH="x86_64"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# TODO SERVER-105520
|
# TODO SERVER-105520
|
||||||
# try using downloaded venv once more reliability has been built into venv upload/download
|
# try using downloaded venv once more reliability has been built into venv upload/download
|
||||||
if [[ "$ARCH" == "ppc64le" ]]; then
|
if [[ "$ARCH" == "ppc64le" ]]; then
|
||||||
rm -rf $venv_dir
|
rm -rf $venv_dir
|
||||||
source "$DIR/venv_setup.sh"
|
source "$DIR/venv_setup.sh"
|
||||||
else
|
else
|
||||||
# Update virtual env directory in activate script
|
# Update virtual env directory in activate script
|
||||||
if [ "Windows_NT" = "$OS" ]; then
|
if [ "Windows_NT" = "$OS" ]; then
|
||||||
sed -i -e "s:VIRTUAL_ENV=\".*\":VIRTUAL_ENV=\"$venv_dir\":" "$venv_dir/Scripts/activate"
|
sed -i -e "s:VIRTUAL_ENV=\".*\":VIRTUAL_ENV=\"$venv_dir\":" "$venv_dir/Scripts/activate"
|
||||||
else
|
else
|
||||||
sed -i -e "s:VIRTUAL_ENV=\".*\":VIRTUAL_ENV=\"$venv_dir\":" "$venv_dir/bin/activate"
|
sed -i -e "s:VIRTUAL_ENV=\".*\":VIRTUAL_ENV=\"$venv_dir\":" "$venv_dir/bin/activate"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Add back python symlinks on linux platforms
|
# Add back python symlinks on linux platforms
|
||||||
if [ "Windows_NT" = "$OS" ]; then
|
if [ "Windows_NT" = "$OS" ]; then
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd "$venv_dir/bin"
|
cd "$venv_dir/bin"
|
||||||
|
|
||||||
rm python python3
|
rm python python3
|
||||||
ln -s "$python_loc" python3
|
ln -s "$python_loc" python3
|
||||||
ln -s python3 python
|
ln -s python3 python
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
# exit immediately if virtualenv is not found
|
# exit immediately if virtualenv is not found
|
||||||
set -o errexit
|
set -o errexit
|
||||||
|
|
||||||
evergreen_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)/.."
|
evergreen_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/.."
|
||||||
. "$evergreen_dir/prelude_workdir.sh"
|
. "$evergreen_dir/prelude_workdir.sh"
|
||||||
. "$evergreen_dir/prelude_python.sh"
|
. "$evergreen_dir/prelude_python.sh"
|
||||||
|
|
||||||
|
|
@ -10,7 +10,7 @@ echo "python_loc set to $python_loc"
|
||||||
|
|
||||||
venv_dir="${workdir}/venv"
|
venv_dir="${workdir}/venv"
|
||||||
if [ -d "$venv_dir" ]; then
|
if [ -d "$venv_dir" ]; then
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# We create a venv for poetry
|
# We create a venv for poetry
|
||||||
|
|
@ -19,9 +19,9 @@ fi
|
||||||
# See issue SERVER-80781
|
# See issue SERVER-80781
|
||||||
POETRY_VENV="${workdir}/poetry_venv"
|
POETRY_VENV="${workdir}/poetry_venv"
|
||||||
if [ "Windows_NT" = "$OS" ]; then
|
if [ "Windows_NT" = "$OS" ]; then
|
||||||
POETRY_VENV_PYTHON="$POETRY_VENV/Scripts/python.exe"
|
POETRY_VENV_PYTHON="$POETRY_VENV/Scripts/python.exe"
|
||||||
else
|
else
|
||||||
POETRY_VENV_PYTHON="$POETRY_VENV/bin/python3"
|
POETRY_VENV_PYTHON="$POETRY_VENV/bin/python3"
|
||||||
fi
|
fi
|
||||||
"$python_loc" -m venv "$POETRY_VENV"
|
"$python_loc" -m venv "$POETRY_VENV"
|
||||||
|
|
||||||
|
|
@ -36,20 +36,20 @@ export POETRY_CACHE_DIR="$poetry_dir/cache"
|
||||||
export PIP_CACHE_DIR="$poetry_dir/pip_cache"
|
export PIP_CACHE_DIR="$poetry_dir/pip_cache"
|
||||||
pushd src
|
pushd src
|
||||||
for i in {1..5}; do
|
for i in {1..5}; do
|
||||||
$POETRY_VENV_PYTHON -m pip install -r poetry_requirements.txt && RET=0 && break || RET=$? && sleep 1
|
$POETRY_VENV_PYTHON -m pip install -r poetry_requirements.txt && RET=0 && break || RET=$? && sleep 1
|
||||||
echo "Python failed to install poetry, retrying..."
|
echo "Python failed to install poetry, retrying..."
|
||||||
done
|
done
|
||||||
popd
|
popd
|
||||||
|
|
||||||
if [ $RET -ne 0 ]; then
|
if [ $RET -ne 0 ]; then
|
||||||
echo "Pip install error for poetry"
|
echo "Pip install error for poetry"
|
||||||
exit $RET
|
exit $RET
|
||||||
fi
|
fi
|
||||||
|
|
||||||
"$python_loc" -m venv "$venv_dir"
|
"$python_loc" -m venv "$venv_dir"
|
||||||
|
|
||||||
# Adding README file for using this venv locally
|
# Adding README file for using this venv locally
|
||||||
cat << EOF >> venv_readme.txt
|
cat <<EOF >>venv_readme.txt
|
||||||
This is an archive of the Python venv generated by this Evergreen build.
|
This is an archive of the Python venv generated by this Evergreen build.
|
||||||
You can use it locally to avoid needing to manually set up the Python environment.
|
You can use it locally to avoid needing to manually set up the Python environment.
|
||||||
|
|
||||||
|
|
@ -60,11 +60,11 @@ echo "Updating virtual env directory in activate script"
|
||||||
pushd venv; venv_dir=\$(pwd); popd
|
pushd venv; venv_dir=\$(pwd); popd
|
||||||
EOF
|
EOF
|
||||||
if [ "Windows_NT" = "$OS" ]; then
|
if [ "Windows_NT" = "$OS" ]; then
|
||||||
cat << EOF >> venv_readme.txt
|
cat <<EOF >>venv_readme.txt
|
||||||
sed -i -e "s:VIRTUAL_ENV=\".*\":VIRTUAL_ENV=\"\$venv_dir\":" "\$venv_dir/Scripts/activate"
|
sed -i -e "s:VIRTUAL_ENV=\".*\":VIRTUAL_ENV=\"\$venv_dir\":" "\$venv_dir/Scripts/activate"
|
||||||
EOF
|
EOF
|
||||||
else
|
else
|
||||||
cat << EOF >> venv_readme.txt
|
cat <<EOF >>venv_readme.txt
|
||||||
sed -i -e "s:VIRTUAL_ENV=\".*\":VIRTUAL_ENV=\"\$venv_dir\":" "\$venv_dir/bin/activate"
|
sed -i -e "s:VIRTUAL_ENV=\".*\":VIRTUAL_ENV=\"\$venv_dir\":" "\$venv_dir/bin/activate"
|
||||||
|
|
||||||
echo "Adding back python symlinks"
|
echo "Adding back python symlinks"
|
||||||
|
|
@ -85,7 +85,7 @@ fi # End of README file
|
||||||
# cygwin bash does not like. dos2unix it
|
# cygwin bash does not like. dos2unix it
|
||||||
# (See https://bugs.python.org/issue32451)
|
# (See https://bugs.python.org/issue32451)
|
||||||
if [ "Windows_NT" = "$OS" ]; then
|
if [ "Windows_NT" = "$OS" ]; then
|
||||||
dos2unix "${workdir}/venv/Scripts/activate"
|
dos2unix "${workdir}/venv/Scripts/activate"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
export VIRTUAL_ENV_DISABLE_PROMPT=yes
|
export VIRTUAL_ENV_DISABLE_PROMPT=yes
|
||||||
|
|
@ -101,13 +101,13 @@ echo "Upgrading pip to 21.0.1"
|
||||||
# We have seen weird network errors that can sometimes mess up the pip install
|
# We have seen weird network errors that can sometimes mess up the pip install
|
||||||
# By retrying we would like to only see errors that happen consistently
|
# By retrying we would like to only see errors that happen consistently
|
||||||
for i in {1..5}; do
|
for i in {1..5}; do
|
||||||
python -m pip --disable-pip-version-check install "pip==21.0.1" "wheel==0.37.0" && RET=0 && break || RET=$? && sleep 1
|
python -m pip --disable-pip-version-check install "pip==21.0.1" "wheel==0.37.0" && RET=0 && break || RET=$? && sleep 1
|
||||||
echo "Python failed to install pip and wheel, retrying..."
|
echo "Python failed to install pip and wheel, retrying..."
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ $RET -ne 0 ]; then
|
if [ $RET -ne 0 ]; then
|
||||||
echo "Pip install error for wheel and pip version"
|
echo "Pip install error for wheel and pip version"
|
||||||
exit $RET
|
exit $RET
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -117,20 +117,20 @@ cd src
|
||||||
# By retrying we would like to only see errors that happen consistently
|
# By retrying we would like to only see errors that happen consistently
|
||||||
count=0
|
count=0
|
||||||
for i in {1..5}; do
|
for i in {1..5}; do
|
||||||
yes | $POETRY_VENV_PYTHON -m poetry cache clear . --all
|
yes | $POETRY_VENV_PYTHON -m poetry cache clear . --all
|
||||||
rm -rf $poetry_dir/*
|
rm -rf $poetry_dir/*
|
||||||
$POETRY_VENV_PYTHON -m poetry install --no-root --sync && RET=0 && break || RET=$? && sleep 1
|
$POETRY_VENV_PYTHON -m poetry install --no-root --sync && RET=0 && break || RET=$? && sleep 1
|
||||||
|
|
||||||
echo "Python failed install required deps with poetry, retrying..."
|
echo "Python failed install required deps with poetry, retrying..."
|
||||||
sleep $((count * count * 20))
|
sleep $((count * count * 20))
|
||||||
count=$((count + 1))
|
count=$((count + 1))
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ $RET -ne 0 ]; then
|
if [ $RET -ne 0 ]; then
|
||||||
echo "Poetry install error for full venv"
|
echo "Poetry install error for full venv"
|
||||||
exit $RET
|
exit $RET
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd ..
|
cd ..
|
||||||
|
|
||||||
python -m pip freeze > pip-requirements.txt
|
python -m pip freeze >pip-requirements.txt
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude.sh"
|
. "$DIR/../prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -10,13 +10,13 @@ MONGO_VERSION=$(git describe --abbrev=7)
|
||||||
|
|
||||||
# If the project is sys-perf (or related), add the string -sys-perf to the version
|
# If the project is sys-perf (or related), add the string -sys-perf to the version
|
||||||
if [[ "${project}" == sys-perf* ]]; then
|
if [[ "${project}" == sys-perf* ]]; then
|
||||||
MONGO_VERSION="$MONGO_VERSION-sys-perf"
|
MONGO_VERSION="$MONGO_VERSION-sys-perf"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# If this is a patch build, we add the patch version id to the version string so we know
|
# If this is a patch build, we add the patch version id to the version string so we know
|
||||||
# this build was a patch, and which evergreen task it came from
|
# this build was a patch, and which evergreen task it came from
|
||||||
if [ "${is_patch}" = "true" ]; then
|
if [ "${is_patch}" = "true" ]; then
|
||||||
MONGO_VERSION="$MONGO_VERSION-patch-${version_id}"
|
MONGO_VERSION="$MONGO_VERSION-patch-${version_id}"
|
||||||
fi
|
fi
|
||||||
echo "MONGO_VERSION = ${MONGO_VERSION}"
|
echo "MONGO_VERSION = ${MONGO_VERSION}"
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/../prelude.sh"
|
. "$DIR/../prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -6,10 +6,10 @@ cd src
|
||||||
set -o errexit
|
set -o errexit
|
||||||
set -o verbose
|
set -o verbose
|
||||||
if [ "${use_wt_develop}" = "true" ]; then
|
if [ "${use_wt_develop}" = "true" ]; then
|
||||||
echo "Using the wtdevelop module instead..."
|
echo "Using the wtdevelop module instead..."
|
||||||
cd src/third_party
|
cd src/third_party
|
||||||
for wtdir in dist examples ext lang src test tools; do
|
for wtdir in dist examples ext lang src test tools; do
|
||||||
rm -rf wiredtiger/$wtdir
|
rm -rf wiredtiger/$wtdir
|
||||||
mv wtdevelop/$wtdir wiredtiger/
|
mv wtdevelop/$wtdir wiredtiger/
|
||||||
done
|
done
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,10 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
||||||
echo "GRS_CONFIG_USER1_USERNAME=${garasign_gpg_username_80}" >> "signing-envfile"
|
echo "GRS_CONFIG_USER1_USERNAME=${garasign_gpg_username_80}" >>"signing-envfile"
|
||||||
echo "GRS_CONFIG_USER1_PASSWORD=${garasign_gpg_password_80}" >> "signing-envfile"
|
echo "GRS_CONFIG_USER1_PASSWORD=${garasign_gpg_password_80}" >>"signing-envfile"
|
||||||
|
|
||||||
set -o errexit
|
set -o errexit
|
||||||
set -o verbose
|
set -o verbose
|
||||||
|
|
@ -20,14 +20,14 @@ shasum -a 256 $crypt_file_name | tee $crypt_file_name.sha256
|
||||||
md5sum $crypt_file_name | tee $crypt_file_name.md5
|
md5sum $crypt_file_name | tee $crypt_file_name.md5
|
||||||
|
|
||||||
# signing crypt linux artifact with gpg
|
# signing crypt linux artifact with gpg
|
||||||
cat << EOF >> gpg_signing_commands.sh
|
cat <<EOF >>gpg_signing_commands.sh
|
||||||
gpgloader # loading gpg keys.
|
gpgloader # loading gpg keys.
|
||||||
gpg --yes -v --armor -o $crypt_file_name.sig --detach-sign $crypt_file_name
|
gpg --yes -v --armor -o $crypt_file_name.sig --detach-sign $crypt_file_name
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
podman run \
|
podman run \
|
||||||
--env-file=signing-envfile \
|
--env-file=signing-envfile \
|
||||||
--rm \
|
--rm \
|
||||||
-v $(pwd):$(pwd) -w $(pwd) \
|
-v $(pwd):$(pwd) -w $(pwd) \
|
||||||
${garasign_gpg_image_ecr} \
|
${garasign_gpg_image_ecr} \
|
||||||
/bin/bash -c "$(cat ./gpg_signing_commands.sh)"
|
/bin/bash -c "$(cat ./gpg_signing_commands.sh)"
|
||||||
|
|
|
||||||
|
|
@ -1,17 +1,17 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
||||||
echo "GRS_CONFIG_USER1_USERNAME=${garasign_gpg_username_80}" >> "signing-envfile"
|
echo "GRS_CONFIG_USER1_USERNAME=${garasign_gpg_username_80}" >>"signing-envfile"
|
||||||
echo "GRS_CONFIG_USER1_PASSWORD=${garasign_gpg_password_80}" >> "signing-envfile"
|
echo "GRS_CONFIG_USER1_PASSWORD=${garasign_gpg_password_80}" >>"signing-envfile"
|
||||||
|
|
||||||
set -o errexit
|
set -o errexit
|
||||||
set -o verbose
|
set -o verbose
|
||||||
|
|
||||||
long_ext=${ext}
|
long_ext=${ext}
|
||||||
if [ "$long_ext" == "tgz" ]; then
|
if [ "$long_ext" == "tgz" ]; then
|
||||||
long_ext="tar.gz"
|
long_ext="tar.gz"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
mv mongo-binaries.tgz mongodb-${push_name}-${push_arch}-${suffix}.${ext}
|
mv mongo-binaries.tgz mongodb-${push_name}-${push_arch}-${suffix}.${ext}
|
||||||
|
|
@ -22,13 +22,13 @@ mv distsrc.${ext} mongodb-src-${src_suffix}.${long_ext} || true
|
||||||
|
|
||||||
# generating checksums
|
# generating checksums
|
||||||
function gen_checksums() {
|
function gen_checksums() {
|
||||||
if [ -e $1 ]; then
|
if [ -e $1 ]; then
|
||||||
shasum -a 1 $1 | tee $1.sha1
|
shasum -a 1 $1 | tee $1.sha1
|
||||||
shasum -a 256 $1 | tee $1.sha256
|
shasum -a 256 $1 | tee $1.sha256
|
||||||
md5sum $1 | tee $1.md5
|
md5sum $1 | tee $1.md5
|
||||||
else
|
else
|
||||||
echo "$1 does not exist. Skipping checksum generation"
|
echo "$1 does not exist. Skipping checksum generation"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
gen_checksums mongodb-$push_name-$push_arch-$suffix.$ext
|
gen_checksums mongodb-$push_name-$push_arch-$suffix.$ext
|
||||||
|
|
@ -37,7 +37,7 @@ gen_checksums mongodb-src-$src_suffix.$long_ext
|
||||||
gen_checksums mongodb-cryptd-$push_name-$push_arch-$suffix.$ext
|
gen_checksums mongodb-cryptd-$push_name-$push_arch-$suffix.$ext
|
||||||
|
|
||||||
# signing linux artifacts with gpg
|
# signing linux artifacts with gpg
|
||||||
cat << 'EOF' > gpg_signing_commands.sh
|
cat <<'EOF' >gpg_signing_commands.sh
|
||||||
gpgloader # loading gpg keys.
|
gpgloader # loading gpg keys.
|
||||||
function sign(){
|
function sign(){
|
||||||
if [ -e $1 ]
|
if [ -e $1 ]
|
||||||
|
|
@ -50,7 +50,7 @@ function sign(){
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
cat << EOF >> gpg_signing_commands.sh
|
cat <<EOF >>gpg_signing_commands.sh
|
||||||
sign mongodb-$push_name-$push_arch-$suffix.$ext
|
sign mongodb-$push_name-$push_arch-$suffix.$ext
|
||||||
sign mongodb-$push_name-$push_arch-debugsymbols-$suffix.$ext
|
sign mongodb-$push_name-$push_arch-debugsymbols-$suffix.$ext
|
||||||
sign mongodb-src-$src_suffix.$long_ext
|
sign mongodb-src-$src_suffix.$long_ext
|
||||||
|
|
@ -58,8 +58,8 @@ sign mongodb-cryptd-$push_name-$push_arch-$suffix.$ext
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
podman run \
|
podman run \
|
||||||
--env-file=signing-envfile \
|
--env-file=signing-envfile \
|
||||||
--rm \
|
--rm \
|
||||||
-v $(pwd):$(pwd) -w $(pwd) \
|
-v $(pwd):$(pwd) -w $(pwd) \
|
||||||
${garasign_gpg_image_ecr} \
|
${garasign_gpg_image_ecr} \
|
||||||
/bin/bash -c "$(cat ./gpg_signing_commands.sh)"
|
/bin/bash -c "$(cat ./gpg_signing_commands.sh)"
|
||||||
|
|
|
||||||
|
|
@ -1,14 +1,14 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
if [ "${push_name}" != "windows" ]; then
|
if [ "${push_name}" != "windows" ]; then
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
||||||
echo "GRS_CONFIG_USER1_USERNAME=${garasign_jsign_username}" >> "signing-envfile"
|
echo "GRS_CONFIG_USER1_USERNAME=${garasign_jsign_username}" >>"signing-envfile"
|
||||||
echo "GRS_CONFIG_USER1_PASSWORD=${garasign_jsign_password}" >> "signing-envfile"
|
echo "GRS_CONFIG_USER1_PASSWORD=${garasign_jsign_password}" >>"signing-envfile"
|
||||||
|
|
||||||
set -o errexit
|
set -o errexit
|
||||||
set -o verbose
|
set -o verbose
|
||||||
|
|
@ -17,8 +17,8 @@ msi_filename=mongodb-${push_name}-${push_arch}-${suffix}.msi
|
||||||
cp bazel-bin/src/mongo/installer/msi/mongodb-win32-x86_64-windows-${version}.msi $msi_filename
|
cp bazel-bin/src/mongo/installer/msi/mongodb-win32-x86_64-windows-${version}.msi $msi_filename
|
||||||
|
|
||||||
if [ "${is_patch}" != "true" ]; then
|
if [ "${is_patch}" != "true" ]; then
|
||||||
# signing windows artifacts with jsign
|
# signing windows artifacts with jsign
|
||||||
cat << 'EOF' > jsign_signing_commands.sh
|
cat <<'EOF' >jsign_signing_commands.sh
|
||||||
function sign(){
|
function sign(){
|
||||||
if [ -e $1 ]
|
if [ -e $1 ]
|
||||||
then
|
then
|
||||||
|
|
@ -28,25 +28,25 @@ function sign(){
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
EOF
|
EOF
|
||||||
cat << EOF >> jsign_signing_commands.sh
|
cat <<EOF >>jsign_signing_commands.sh
|
||||||
sign $msi_filename
|
sign $msi_filename
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
podman run \
|
podman run \
|
||||||
--env-file=signing-envfile \
|
--env-file=signing-envfile \
|
||||||
--rm \
|
--rm \
|
||||||
-v $(pwd):$(pwd) -w $(pwd) \
|
-v $(pwd):$(pwd) -w $(pwd) \
|
||||||
${garasign_jsign_image_ecr} \
|
${garasign_jsign_image_ecr} \
|
||||||
/bin/bash -c "$(cat ./jsign_signing_commands.sh)"
|
/bin/bash -c "$(cat ./jsign_signing_commands.sh)"
|
||||||
else
|
else
|
||||||
echo "Not signing windows msi due to it being a patch build"
|
echo "Not signing windows msi due to it being a patch build"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# generating checksums
|
# generating checksums
|
||||||
if [ -e $msi_filename ]; then
|
if [ -e $msi_filename ]; then
|
||||||
shasum -a 1 $msi_filename | tee $msi_filename.sha1
|
shasum -a 1 $msi_filename | tee $msi_filename.sha1
|
||||||
shasum -a 256 $msi_filename | tee $msi_filename.sha256
|
shasum -a 256 $msi_filename | tee $msi_filename.sha256
|
||||||
md5sum $msi_filename | tee $msi_filename.md5
|
md5sum $msi_filename | tee $msi_filename.md5
|
||||||
else
|
else
|
||||||
echo "$msi_filename does not exist. Skipping checksum generation"
|
echo "$msi_filename does not exist. Skipping checksum generation"
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,10 @@
|
||||||
cd src
|
cd src
|
||||||
# Find all core files and move to src
|
# Find all core files and move to src
|
||||||
core_files=$(/usr/bin/find -H .. \( -name "*.core" -o -name "*.mdmp" \) 2> /dev/null)
|
core_files=$(/usr/bin/find -H .. \( -name "*.core" -o -name "*.mdmp" \) 2>/dev/null)
|
||||||
for core_file in $core_files; do
|
for core_file in $core_files; do
|
||||||
base_name=$(echo $core_file | sed "s/.*\///")
|
base_name=$(echo $core_file | sed "s/.*\///")
|
||||||
# Move file if it does not already exist
|
# Move file if it does not already exist
|
||||||
if [ ! -f $base_name ]; then
|
if [ ! -f $base_name ]; then
|
||||||
mv $core_file .
|
mv $core_file .
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -8,79 +8,79 @@ set -o verbose
|
||||||
activate_venv
|
activate_venv
|
||||||
|
|
||||||
if [ -z "${build_patch_id}" ] || [ -z "${reuse_compile_from}" ] || [ "${is_patch:-false}" = "false" ]; then
|
if [ -z "${build_patch_id}" ] || [ -z "${reuse_compile_from}" ] || [ "${is_patch:-false}" = "false" ]; then
|
||||||
# Create target folder
|
# Create target folder
|
||||||
mkdir -p mongodb/
|
mkdir -p mongodb/
|
||||||
|
|
||||||
# Generate feature flag list
|
# Generate feature flag list
|
||||||
$python buildscripts/idl/gen_all_feature_flag_list.py turned-off-by-default
|
$python buildscripts/idl/gen_all_feature_flag_list.py turned-off-by-default
|
||||||
mkdir -p mongodb/feature_flags
|
mkdir -p mongodb/feature_flags
|
||||||
cp ./all_feature_flags.txt mongodb/feature_flags
|
cp ./all_feature_flags.txt mongodb/feature_flags
|
||||||
|
|
||||||
# Generate server params list
|
# Generate server params list
|
||||||
$python buildscripts/idl/gen_all_server_params_list.py
|
$python buildscripts/idl/gen_all_server_params_list.py
|
||||||
mkdir -p mongodb/server_params
|
mkdir -p mongodb/server_params
|
||||||
cp ./all_server_params.txt mongodb/server_params
|
cp ./all_server_params.txt mongodb/server_params
|
||||||
|
|
||||||
# Download mongo tools
|
# Download mongo tools
|
||||||
arch=$(uname -m)
|
arch=$(uname -m)
|
||||||
if [ -f /etc/os-release ]; then
|
if [ -f /etc/os-release ]; then
|
||||||
. /etc/os-release
|
. /etc/os-release
|
||||||
if [ "$ID" == "amzn" ]; then
|
if [ "$ID" == "amzn" ]; then
|
||||||
case $arch in
|
case $arch in
|
||||||
"x86_64" | "aarch64")
|
"x86_64" | "aarch64")
|
||||||
case $VERSION_ID in
|
case $VERSION_ID in
|
||||||
"2" | "2023")
|
"2" | "2023")
|
||||||
binary_url="https://fastdl.mongodb.org/tools/db/mongodb-database-tools-amazon${VERSION_ID}-${arch}-100.9.4.tgz"
|
binary_url="https://fastdl.mongodb.org/tools/db/mongodb-database-tools-amazon${VERSION_ID}-${arch}-100.9.4.tgz"
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Unsupported Amazon Linux version: $VERSION_ID"
|
echo "Unsupported Amazon Linux version: $VERSION_ID"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
echo "Unsupported architecture: $arch"
|
echo "Unsupported architecture: $arch"
|
||||||
exit 1
|
exit 1
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
else
|
||||||
|
echo "Unsupported Linux distribution: $ID"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
echo "Unsupported Linux distribution: $ID"
|
echo "Unable to determine Linux distribution"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
else
|
|
||||||
echo "Unable to determine Linux distribution"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
wget "$binary_url" -O mongo-tools.tar.gz
|
wget "$binary_url" -O mongo-tools.tar.gz
|
||||||
tar -xzvf mongo-tools.tar.gz -C mongodb/ --strip-components=1 "mong*/bin"
|
tar -xzvf mongo-tools.tar.gz -C mongodb/ --strip-components=1 "mong*/bin"
|
||||||
|
|
||||||
# generate atlas info
|
# generate atlas info
|
||||||
uarch=$(uname -p)
|
uarch=$(uname -p)
|
||||||
os=$(uname -r)
|
os=$(uname -r)
|
||||||
json="{ \"version\": \"${version}\", \"gitVersion\": \"${revision}\", \"uarch\": \"$uarch\", \"os\": \"$os\" }"
|
json="{ \"version\": \"${version}\", \"gitVersion\": \"${revision}\", \"uarch\": \"$uarch\", \"os\": \"$os\" }"
|
||||||
echo $json | jq '.' > mongodb/atlas_info.json
|
echo $json | jq '.' >mongodb/atlas_info.json
|
||||||
|
|
||||||
# Add custom run_validate_collections.js wrapper
|
# Add custom run_validate_collections.js wrapper
|
||||||
mv jstests/hooks/run_validate_collections.js jstests/hooks/run_validate_collections.actual.js
|
mv jstests/hooks/run_validate_collections.js jstests/hooks/run_validate_collections.actual.js
|
||||||
cat << EOF > jstests/hooks/run_validate_collections.js
|
cat <<EOF >jstests/hooks/run_validate_collections.js
|
||||||
print("NOTE: run_validate_collections.js will skip the oplog!");
|
print("NOTE: run_validate_collections.js will skip the oplog!");
|
||||||
TestData = { skipValidationNamespaces: ['local.oplog.rs'] };
|
TestData = { skipValidationNamespaces: ['local.oplog.rs'] };
|
||||||
await import("jstests/hooks/run_validate_collections.actual.js");
|
await import("jstests/hooks/run_validate_collections.actual.js");
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
# Copy the js tests
|
# Copy the js tests
|
||||||
mkdir -p mongodb/jstests/hooks
|
mkdir -p mongodb/jstests/hooks
|
||||||
cp -a jstests/* mongodb/jstests
|
cp -a jstests/* mongodb/jstests
|
||||||
|
|
||||||
# Copy the build scripts
|
# Copy the build scripts
|
||||||
mkdir -p mongodb/buildscripts
|
mkdir -p mongodb/buildscripts
|
||||||
cp -a buildscripts/* mongodb/buildscripts
|
cp -a buildscripts/* mongodb/buildscripts
|
||||||
|
|
||||||
# Create the final archive
|
# Create the final archive
|
||||||
tar czf supplementary-data.tgz mongodb
|
tar czf supplementary-data.tgz mongodb
|
||||||
else
|
else
|
||||||
# Evergreen does not handle nested escaped expansions well
|
# Evergreen does not handle nested escaped expansions well
|
||||||
version_to_reuse_from=$(if [ -n "${build_patch_id}" ]; then echo "${build_patch_id}"; else echo "${reuse_compile_from}"; fi)
|
version_to_reuse_from=$(if [ -n "${build_patch_id}" ]; then echo "${build_patch_id}"; else echo "${reuse_compile_from}"; fi)
|
||||||
curl -o supplementary-data.tgz https://s3.amazonaws.com/mciuploads/"${project}"/"${compile_variant}"/"${version_to_reuse_from}"/dsi/supplementary-data.tgz
|
curl -o supplementary-data.tgz https://s3.amazonaws.com/mciuploads/"${project}"/"${compile_variant}"/"${version_to_reuse_from}"/dsi/supplementary-data.tgz
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -7,5 +7,5 @@ set -o errexit
|
||||||
|
|
||||||
activate_venv
|
activate_venv
|
||||||
$python buildscripts/evergreen_activate_gen_tasks.py \
|
$python buildscripts/evergreen_activate_gen_tasks.py \
|
||||||
--expansion-file ../expansions.yml \
|
--expansion-file ../expansions.yml \
|
||||||
--verbose
|
--verbose
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -8,19 +8,19 @@ set -o verbose
|
||||||
|
|
||||||
build_patch_id="${build_patch_id:-${reuse_compile_from}}"
|
build_patch_id="${build_patch_id:-${reuse_compile_from}}"
|
||||||
if [ -n "${build_patch_id}" ]; then
|
if [ -n "${build_patch_id}" ]; then
|
||||||
exit 0
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
is_san_variant_arg=""
|
is_san_variant_arg=""
|
||||||
if [[ -n "${san_options}" ]]; then
|
if [[ -n "${san_options}" ]]; then
|
||||||
is_san_variant_arg="--is-san-variant"
|
is_san_variant_arg="--is-san-variant"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
activate_venv
|
activate_venv
|
||||||
|
|
||||||
$python buildscripts/debugsymb_mapper.py \
|
$python buildscripts/debugsymb_mapper.py \
|
||||||
--version "${version_id}" \
|
--version "${version_id}" \
|
||||||
--client-id "${symbolizer_client_id}" \
|
--client-id "${symbolizer_client_id}" \
|
||||||
--client-secret "${symbolizer_client_secret}" \
|
--client-secret "${symbolizer_client_secret}" \
|
||||||
--variant "${build_variant}" \
|
--variant "${build_variant}" \
|
||||||
$is_san_variant_arg
|
$is_san_variant_arg
|
||||||
|
|
|
||||||
|
|
@ -1,10 +1,10 @@
|
||||||
set -o errexit
|
set -o errexit
|
||||||
|
|
||||||
curl --fail-with-body \
|
curl --fail-with-body \
|
||||||
--header "Api-User: ${EVERGREEN_API_USER}" \
|
--header "Api-User: ${EVERGREEN_API_USER}" \
|
||||||
--header "Api-Key: ${EVERGREEN_API_KEY}" \
|
--header "Api-Key: ${EVERGREEN_API_KEY}" \
|
||||||
-L https://evergreen.mongodb.com/rest/v2/tasks/${PROMOTE_TASK_ID} \
|
-L https://evergreen.mongodb.com/rest/v2/tasks/${PROMOTE_TASK_ID} \
|
||||||
--output ./task_data.json
|
--output ./task_data.json
|
||||||
|
|
||||||
echo ".................."
|
echo ".................."
|
||||||
echo "task data"
|
echo "task data"
|
||||||
|
|
@ -20,7 +20,7 @@ promote_revision=$(cat task_data.json | jq -r ".revision")
|
||||||
|
|
||||||
artifact_address="https://internal-downloads.mongodb.com/server-custom-builds/${promote_project_id}/${promote_version_id}/${promote_build_variant}/mongo-${promote_build_id}.tgz"
|
artifact_address="https://internal-downloads.mongodb.com/server-custom-builds/${promote_project_id}/${promote_version_id}/${promote_build_variant}/mongo-${promote_build_id}.tgz"
|
||||||
|
|
||||||
cat << EOT > ./promote-expansions.yml
|
cat <<EOT >./promote-expansions.yml
|
||||||
promote_project_id: "$promote_project_id"
|
promote_project_id: "$promote_project_id"
|
||||||
promote_version_id: "$promote_version_id"
|
promote_version_id: "$promote_version_id"
|
||||||
promote_build_id: "$promote_build_id"
|
promote_build_id: "$promote_build_id"
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -9,48 +9,48 @@ set -o verbose
|
||||||
# Use the Evergreen temp directory to avoid filling up the disk.
|
# Use the Evergreen temp directory to avoid filling up the disk.
|
||||||
mkdir -p $TMPDIR
|
mkdir -p $TMPDIR
|
||||||
if [[ "$OSTYPE" == "cygwin" ]] || [[ "$OSTYPE" == "win32" ]]; then
|
if [[ "$OSTYPE" == "cygwin" ]] || [[ "$OSTYPE" == "win32" ]]; then
|
||||||
mkdir -p Z:/bazel_tmp
|
mkdir -p Z:/bazel_tmp
|
||||||
touch Z:/bazel_tmp/mci_path
|
touch Z:/bazel_tmp/mci_path
|
||||||
# TODO(SERVER-94605): remove when Windows temp directory is cleared between task runs
|
# TODO(SERVER-94605): remove when Windows temp directory is cleared between task runs
|
||||||
if [[ "$PWD" != "$(cat Z:/bazel_tmp/mci_path)" ]]; then
|
if [[ "$PWD" != "$(cat Z:/bazel_tmp/mci_path)" ]]; then
|
||||||
echo "Clearing bazel output root from previous task mci '$(cat Z:/bazel_tmp/mci_path)'"
|
echo "Clearing bazel output root from previous task mci '$(cat Z:/bazel_tmp/mci_path)'"
|
||||||
rm -rf Z:/bazel_tmp/* || true
|
rm -rf Z:/bazel_tmp/* || true
|
||||||
echo $PWD > Z:/bazel_tmp/mci_path
|
echo $PWD >Z:/bazel_tmp/mci_path
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Z:/ path is necessary to avoid running into MSVC's file length limit,
|
# Z:/ path is necessary to avoid running into MSVC's file length limit,
|
||||||
# see https://jira.mongodb.org/browse/DEVPROD-11126
|
# see https://jira.mongodb.org/browse/DEVPROD-11126
|
||||||
abs_path=$(cygpath -w "$TMPDIR" | tr '\\' '/')
|
abs_path=$(cygpath -w "$TMPDIR" | tr '\\' '/')
|
||||||
echo "startup --output_user_root=Z:/bazel_tmp" > .bazelrc.evergreen
|
echo "startup --output_user_root=Z:/bazel_tmp" >.bazelrc.evergreen
|
||||||
echo "common --action_env=TMP=Z:/bazel_tmp" >> .bazelrc.evergreen
|
echo "common --action_env=TMP=Z:/bazel_tmp" >>.bazelrc.evergreen
|
||||||
echo "common --action_env=TEMP=Z:/bazel_tmp" >> .bazelrc.evergreen
|
echo "common --action_env=TEMP=Z:/bazel_tmp" >>.bazelrc.evergreen
|
||||||
echo "BAZELISK_HOME=${abs_path}/bazelisk_home" >> .bazeliskrc
|
echo "BAZELISK_HOME=${abs_path}/bazelisk_home" >>.bazeliskrc
|
||||||
# echo "common --define GIT_COMMIT_HASH=$(git rev-parse HEAD)" >> .bazelrc.git
|
# echo "common --define GIT_COMMIT_HASH=$(git rev-parse HEAD)" >> .bazelrc.git
|
||||||
echo "common --define GIT_COMMIT_HASH=nogitversion" >> .bazelrc.git
|
echo "common --define GIT_COMMIT_HASH=nogitversion" >>.bazelrc.git
|
||||||
else
|
else
|
||||||
echo "startup --output_user_root=${TMPDIR}/bazel-output-root" > .bazelrc.evergreen
|
echo "startup --output_user_root=${TMPDIR}/bazel-output-root" >.bazelrc.evergreen
|
||||||
echo "BAZELISK_HOME=${TMPDIR}/bazelisk_home" >> .bazeliskrc
|
echo "BAZELISK_HOME=${TMPDIR}/bazelisk_home" >>.bazeliskrc
|
||||||
echo "common --define GIT_COMMIT_HASH=$(git rev-parse HEAD)" >> .bazelrc.git
|
echo "common --define GIT_COMMIT_HASH=$(git rev-parse HEAD)" >>.bazelrc.git
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ "${evergreen_remote_exec}" != "on" ]]; then
|
if [[ "${evergreen_remote_exec}" != "on" ]]; then
|
||||||
# Temporarily disable remote exec and only use remote cache
|
# Temporarily disable remote exec and only use remote cache
|
||||||
echo "common --remote_executor=" >> .bazelrc.evergreen
|
echo "common --remote_executor=" >>.bazelrc.evergreen
|
||||||
echo "common --modify_execution_info=.*=+no-remote-exec" >> .bazelrc.evergreen
|
echo "common --modify_execution_info=.*=+no-remote-exec" >>.bazelrc.evergreen
|
||||||
echo "common --jobs=auto" >> .bazelrc.evergreen
|
echo "common --jobs=auto" >>.bazelrc.evergreen
|
||||||
fi
|
fi
|
||||||
|
|
||||||
uri="https://spruce.mongodb.com/task/${task_id:?}?execution=${execution:?}"
|
uri="https://spruce.mongodb.com/task/${task_id:?}?execution=${execution:?}"
|
||||||
|
|
||||||
echo "common --tls_client_certificate=./engflow.cert" >> .bazelrc.evergreen
|
echo "common --tls_client_certificate=./engflow.cert" >>.bazelrc.evergreen
|
||||||
echo "common --tls_client_key=./engflow.key" >> .bazelrc.evergreen
|
echo "common --tls_client_key=./engflow.key" >>.bazelrc.evergreen
|
||||||
echo "common --bes_keywords=engflow:CiCdPipelineName=${build_variant:?}" >> .bazelrc.evergreen
|
echo "common --bes_keywords=engflow:CiCdPipelineName=${build_variant:?}" >>.bazelrc.evergreen
|
||||||
echo "common --bes_keywords=engflow:CiCdJobName=${task_name:?}" >> .bazelrc.evergreen
|
echo "common --bes_keywords=engflow:CiCdJobName=${task_name:?}" >>.bazelrc.evergreen
|
||||||
echo "common --bes_keywords=engflow:CiCdUri=${uri:?}" >> .bazelrc.evergreen
|
echo "common --bes_keywords=engflow:CiCdUri=${uri:?}" >>.bazelrc.evergreen
|
||||||
echo "common --bes_keywords=evg:project=${project:?}" >> .bazelrc.evergreen
|
echo "common --bes_keywords=evg:project=${project:?}" >>.bazelrc.evergreen
|
||||||
echo "common --remote_upload_local_results=True" >> .bazelrc.evergreen
|
echo "common --remote_upload_local_results=True" >>.bazelrc.evergreen
|
||||||
echo "common --test_output=summary" >> .bazelrc.evergreen
|
echo "common --test_output=summary" >>.bazelrc.evergreen
|
||||||
|
|
||||||
# Disable remote execution in evergreen only since it runs on every PR, but we still
|
# Disable remote execution in evergreen only since it runs on every PR, but we still
|
||||||
# want it to be fast on workstations
|
# want it to be fast on workstations
|
||||||
echo "coverage --config=no-remote-exec" >> .bazelrc.evergreen
|
echo "coverage --config=no-remote-exec" >>.bazelrc.evergreen
|
||||||
|
|
|
||||||
|
|
@ -9,19 +9,19 @@ virtualenv -p python3.12 .venv
|
||||||
source .venv/bin/activate
|
source .venv/bin/activate
|
||||||
pip install -r sast_reporting/requirements.txt
|
pip install -r sast_reporting/requirements.txt
|
||||||
if [ -z "${TRIGGERED_BY_GIT_TAG}" ]; then
|
if [ -z "${TRIGGERED_BY_GIT_TAG}" ]; then
|
||||||
echo "Evergreen version was NOT triggered by a git tag"
|
echo "Evergreen version was NOT triggered by a git tag"
|
||||||
echo "Setting Google Drive folder ID for non-release"
|
echo "Setting Google Drive folder ID for non-release"
|
||||||
google_drive_folder_id="${SAST_REPORT_TEST_GOOGLE_DRIVE_FOLDER_ID}"
|
google_drive_folder_id="${SAST_REPORT_TEST_GOOGLE_DRIVE_FOLDER_ID}"
|
||||||
else
|
else
|
||||||
echo "Evergreen version was triggered by git tag '${TRIGGERED_BY_GIT_TAG}'"
|
echo "Evergreen version was triggered by git tag '${TRIGGERED_BY_GIT_TAG}'"
|
||||||
echo "Setting Google Drive folder ID for release"
|
echo "Setting Google Drive folder ID for release"
|
||||||
google_drive_folder_id="${SAST_REPORT_RELEASES_GOOGLE_DRIVE_FOLDER_ID}"
|
google_drive_folder_id="${SAST_REPORT_RELEASES_GOOGLE_DRIVE_FOLDER_ID}"
|
||||||
fi
|
fi
|
||||||
python3 -m sast_reporting.src.mongodb_server \
|
python3 -m sast_reporting.src.mongodb_server \
|
||||||
--version ${MONGODB_VERSION} \
|
--version ${MONGODB_VERSION} \
|
||||||
--branch ${MONGODB_RELEASE_BRANCH} \
|
--branch ${MONGODB_RELEASE_BRANCH} \
|
||||||
--commit-date $commit_datetime \
|
--commit-date $commit_datetime \
|
||||||
--output-path ${MODULE_PATH}/sast_report_${MONGODB_VERSION}.xlsx \
|
--output-path ${MODULE_PATH}/sast_report_${MONGODB_VERSION}.xlsx \
|
||||||
--upload-file-name "[${MONGODB_VERSION}] MongoDB Server Enterprise SAST Report" \
|
--upload-file-name "[${MONGODB_VERSION}] MongoDB Server Enterprise SAST Report" \
|
||||||
--google-drive-folder-id $google_drive_folder_id \
|
--google-drive-folder-id $google_drive_folder_id \
|
||||||
--env-file ${WORK_DIR}/sast_report_generation_credentials.env
|
--env-file ${WORK_DIR}/sast_report_generation_credentials.env
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -9,11 +9,11 @@ set -o verbose
|
||||||
setup_mongo_task_generator
|
setup_mongo_task_generator
|
||||||
activate_venv
|
activate_venv
|
||||||
RUST_BACKTRACE=full PATH=$PATH:$HOME:/ ./mongo-task-generator \
|
RUST_BACKTRACE=full PATH=$PATH:$HOME:/ ./mongo-task-generator \
|
||||||
--expansion-file ../expansions.yml \
|
--expansion-file ../expansions.yml \
|
||||||
--evg-auth-file ./.evergreen.yml \
|
--evg-auth-file ./.evergreen.yml \
|
||||||
--evg-project-file ${evergreen_config_file_path} \
|
--evg-project-file ${evergreen_config_file_path} \
|
||||||
--generate-sub-tasks-config etc/generate_subtasks_config.yml \
|
--generate-sub-tasks-config etc/generate_subtasks_config.yml \
|
||||||
--s3-test-stats-bucket mongo-test-stats \
|
--s3-test-stats-bucket mongo-test-stats \
|
||||||
--include-fully-disabled-feature-tests \
|
--include-fully-disabled-feature-tests \
|
||||||
--bazel-suite-configs resmoke_suite_configs.yml \
|
--bazel-suite-configs resmoke_suite_configs.yml \
|
||||||
$@
|
$@
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -15,12 +15,12 @@ echo "Base patch revision: $base_revision"
|
||||||
$python buildscripts/burn_in_tests.py generate-test-membership-map-file-for-ci
|
$python buildscripts/burn_in_tests.py generate-test-membership-map-file-for-ci
|
||||||
|
|
||||||
RUST_BACKTRACE=full PATH=$PATH:$HOME:/ ./mongo-task-generator \
|
RUST_BACKTRACE=full PATH=$PATH:$HOME:/ ./mongo-task-generator \
|
||||||
--expansion-file ../expansions.yml \
|
--expansion-file ../expansions.yml \
|
||||||
--evg-auth-file ./.evergreen.yml \
|
--evg-auth-file ./.evergreen.yml \
|
||||||
--evg-project-file ${evergreen_config_file_path} \
|
--evg-project-file ${evergreen_config_file_path} \
|
||||||
--generate-sub-tasks-config etc/generate_subtasks_config.yml \
|
--generate-sub-tasks-config etc/generate_subtasks_config.yml \
|
||||||
--s3-test-stats-bucket mongo-test-stats \
|
--s3-test-stats-bucket mongo-test-stats \
|
||||||
--include-fully-disabled-feature-tests \
|
--include-fully-disabled-feature-tests \
|
||||||
--burn-in \
|
--burn-in \
|
||||||
--burn-in-tests-command "python buildscripts/burn_in_tests.py run --origin-rev=$base_revision" \
|
--burn-in-tests-command "python buildscripts/burn_in_tests.py run --origin-rev=$base_revision" \
|
||||||
$@
|
$@
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@
|
||||||
# Usage:
|
# Usage:
|
||||||
# bash get_all_resmoke_suite_configs.sh
|
# bash get_all_resmoke_suite_configs.sh
|
||||||
|
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -20,5 +20,5 @@ BAZEL_BINARY=$(bazel_get_binary_path)
|
||||||
# str(target.label).replace('@@','') -> the target name, like //buildscripts/resmokeconfig:core_config
|
# str(target.label).replace('@@','') -> the target name, like //buildscripts/resmokeconfig:core_config
|
||||||
# f.path for f in target.files.to_list() -> the path to the config file, like bazel-out/k8-fastbuild/bin/buildscripts/resmokeconfig/core.yml
|
# f.path for f in target.files.to_list() -> the path to the config file, like bazel-out/k8-fastbuild/bin/buildscripts/resmokeconfig/core.yml
|
||||||
${BAZEL_BINARY} cquery ${bazel_args} ${bazel_compile_flags} ${task_compile_flags} \
|
${BAZEL_BINARY} cquery ${bazel_args} ${bazel_compile_flags} ${task_compile_flags} \
|
||||||
--define=MONGO_VERSION=${version} ${patch_compile_flags} "kind(resmoke_config, //...)" \
|
--define=MONGO_VERSION=${version} ${patch_compile_flags} "kind(resmoke_config, //...)" \
|
||||||
--output=starlark --starlark:expr "': '.join([str(target.label).replace('@@','')] + [f.path for f in target.files.to_list()])" > resmoke_suite_configs.yml
|
--output=starlark --starlark:expr "': '.join([str(target.label).replace('@@','')] + [f.path for f in target.files.to_list()])" >resmoke_suite_configs.yml
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,7 @@
|
||||||
# Required environment variables:
|
# Required environment variables:
|
||||||
# * ${suite} - Resmoke bazel target, like //buildscripts/resmokeconfig:core
|
# * ${suite} - Resmoke bazel target, like //buildscripts/resmokeconfig:core
|
||||||
|
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -18,4 +18,4 @@ source ./evergreen/bazel_utility_functions.sh
|
||||||
BAZEL_BINARY=$(bazel_get_binary_path)
|
BAZEL_BINARY=$(bazel_get_binary_path)
|
||||||
|
|
||||||
echo "suite_config: $(${BAZEL_BINARY} cquery ${bazel_args} ${bazel_compile_flags} ${task_compile_flags} \
|
echo "suite_config: $(${BAZEL_BINARY} cquery ${bazel_args} ${bazel_compile_flags} ${task_compile_flags} \
|
||||||
--define=MONGO_VERSION=${version} ${patch_compile_flags} ${suite}_config --output files)" > suite_config_expansion.yml
|
--define=MONGO_VERSION=${version} ${patch_compile_flags} ${suite}_config --output files)" >suite_config_expansion.yml
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
@ -8,9 +8,9 @@ set -o verbose
|
||||||
# Set what processes to look for. For most tasks, we rely on resmoke to figure out its subprocesses
|
# Set what processes to look for. For most tasks, we rely on resmoke to figure out its subprocesses
|
||||||
# and run the hang analyzer on those. For non-resmoke tasks, we enumerate the process list here.
|
# and run the hang analyzer on those. For non-resmoke tasks, we enumerate the process list here.
|
||||||
if [[ ${task_name} == *"jepsen"* ]]; then
|
if [[ ${task_name} == *"jepsen"* ]]; then
|
||||||
hang_analyzer_option="-o file -o stdout -p dbtest,java,mongo,mongod,mongos,python,_test"
|
hang_analyzer_option="-o file -o stdout -p dbtest,java,mongo,mongod,mongos,python,_test"
|
||||||
else
|
else
|
||||||
hang_analyzer_option="-o file -o stdout -m exact -p python"
|
hang_analyzer_option="-o file -o stdout -m exact -p python"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
activate_venv
|
activate_venv
|
||||||
|
|
@ -19,5 +19,5 @@ $python buildscripts/resmoke.py hang-analyzer $hang_analyzer_option
|
||||||
|
|
||||||
# Call hang analyzer for tasks that are running remote mongo processes
|
# Call hang analyzer for tasks that are running remote mongo processes
|
||||||
if [ -n "${private_ip_address}" ]; then
|
if [ -n "${private_ip_address}" ]; then
|
||||||
$python buildscripts/resmoke.py powercycle remote-hang-analyzer
|
$python buildscripts/resmoke.py powercycle remote-hang-analyzer
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
|
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
. "$DIR/prelude.sh"
|
. "$DIR/prelude.sh"
|
||||||
|
|
||||||
cd src
|
cd src
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,8 @@
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
if [ -d jepsen ]; then
|
if [ -d jepsen ]; then
|
||||||
echo "Cleanup docker containers"
|
echo "Cleanup docker containers"
|
||||||
# docker ps -q fails when no containers are running
|
# docker ps -q fails when no containers are running
|
||||||
sudo docker container kill $(docker ps -q) || true
|
sudo docker container kill $(docker ps -q) || true
|
||||||
sudo docker system prune -f
|
sudo docker system prune -f
|
||||||
fi
|
fi
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
cd jepsen/docker
|
cd jepsen/docker
|
||||||
./bin/up -n 9 -d 2>&1 > docker.log
|
./bin/up -n 9 -d 2>&1 >docker.log
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue