SERVER-101034 Use rules_lint shfmt formatter (#38448)

GitOrigin-RevId: e8ef1ba2000e12fa2cd5a115a9ceeab92332e938
This commit is contained in:
Zack Winter 2025-07-15 18:19:08 -07:00 committed by MongoDB Bot
parent 5ac113adb1
commit 7ccc14bf91
174 changed files with 2259 additions and 2387 deletions

9
.editorconfig Normal file
View File

@ -0,0 +1,9 @@
root = true
[*.sh]
charset = utf-8
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
indent_style = space
indent_size = 4

1
.github/CODEOWNERS vendored
View File

@ -10,6 +10,7 @@ OWNERS.yml @10gen/server-root-ownership @svc-auto-approve-bot
.bazel* @10gen/devprod-build @svc-auto-approve-bot
.clang-format @10gen/server-programmability @svc-auto-approve-bot
.clang-tidy.in @10gen/server-programmability @svc-auto-approve-bot
/.editorconfig @10gen/devprod-build @svc-auto-approve-bot
.git* @10gen/devprod-build @svc-auto-approve-bot
.mypy.ini @10gen/devprod-build @10gen/devprod-correctness @svc-auto-approve-bot
.prettierignore @10gen/devprod-correctness @svc-auto-approve-bot

View File

@ -18,6 +18,9 @@ filters:
- ".clang-tidy.in":
approvers:
- 10gen/server-programmability
- "/.editorconfig":
approvers:
- 10gen/devprod-build
- ".git*":
approvers:
- 10gen/devprod-build

View File

@ -6,8 +6,6 @@ py_binary(
args = [
"--prettier",
"$(location //:prettier)",
"--shellscripts-linters",
"$(location //buildscripts:shellscripts_linters)",
"--rules-lint-format",
"$(location :rules_lint_format)",
"--rules-lint-format-check",
@ -17,7 +15,6 @@ py_binary(
":rules_lint_format",
":rules_lint_format.check",
"//:prettier",
"//buildscripts:shellscripts_linters",
"@shfmt",
],
env = {
@ -38,9 +35,8 @@ format_multirun(
graphql = "//:prettier",
html = "//:prettier",
markdown = "//:prettier",
shell = "@shfmt//:shfmt",
sql = "//:prettier",
starlark = "@buildifier_prebuilt//:buildifier",
visibility = ["//visibility:public"],
# TODO(SERVER-101034): Enable rules_lint shfmt after sh files are reformatted with .editorconfig
# shell = "@shfmt//:shfmt",
)

View File

@ -82,21 +82,6 @@ def run_rules_lint(
return True
def run_shellscripts_linters(shellscripts_linters: pathlib.Path, check: bool) -> bool:
try:
command = [str(shellscripts_linters)]
if not check:
print("Running shellscripts formatter")
command.append("fix")
else:
print("Running shellscripts linter")
repo_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
subprocess.run(command, check=True, env=os.environ, cwd=repo_path)
except subprocess.CalledProcessError:
return False
return True
def run_prettier(
prettier: pathlib.Path, check: bool, files_to_format: Union[List[str], str] = "all"
) -> bool:
@ -169,12 +154,6 @@ def main() -> int:
parser.add_argument(
"--prettier", help="Set the path to prettier", required=True, type=pathlib.Path
)
parser.add_argument(
"--shellscripts-linters",
help="Set the path to shellscripts_linters",
required=True,
type=pathlib.Path,
)
parser.add_argument(
"--rules-lint-format",
help="Set the path to rules_lint's formatter",
@ -200,7 +179,6 @@ def main() -> int:
args = parser.parse_args()
prettier_path: pathlib.Path = args.prettier.resolve()
shellscripts_linters_path: pathlib.Path = args.shellscripts_linters.resolve()
os.chdir(default_dir)
@ -235,7 +213,6 @@ def main() -> int:
if run_rules_lint(
args.rules_lint_format, args.rules_lint_format_check, args.check, files_to_format
)
and run_shellscripts_linters(shellscripts_linters_path, args.check)
and run_prettier(prettier_path, args.check, files_to_format)
else 1
)

View File

@ -5,8 +5,8 @@ GREEN='\033[0;32m'
NO_COLOR='\033[0m'
if [[ $1 == "ALL_PASSING" ]]; then
echo -e "${GREEN}INFO:${NO_COLOR} No linter errors found!"
exit 0
echo -e "${GREEN}INFO:${NO_COLOR} No linter errors found!"
exit 0
fi
echo -e "${RED}ERROR:${NO_COLOR} Linter run failed, see details above"

View File

@ -153,34 +153,34 @@ get_package_versions() {
pkg_manager=$(get_package_manager "$image")
case "$pkg_manager" in
yum)
docker run --rm "$image" bash -c "
yum)
docker run --rm "$image" bash -c "
yum info ${packages[*]} 2>/dev/null |
awk '/^Name/ {name=\$3} /^Version/ {version=\$3} /^Release/ {release=\$3}
/^Release/ {print name \"-\" version \"-\" release}' |
sort -u"
;;
apt)
docker run --rm "$image" bash -c "
;;
apt)
docker run --rm "$image" bash -c "
apt-get update >/dev/null 2>&1 &&
apt-cache policy ${packages[*]} |
awk '/^[^ ]/ {pkg=\$1} /Candidate:/ {print pkg \"=\" \$2}' |
sort -u"
;;
zypper)
# TODO(SERVER-93423): Pin suse package versions. At the moment this
# breaks the remote_execution_containers_generator.py script.
printf '%s\n' "${packages[@]}" | sort -u
# docker run --rm "$image" bash -c "
# zypper --non-interactive refresh >/dev/null 2>&1 &&
# zypper --non-interactive info ${packages[*]} |
# awk '/^Name/ {name=\$3} /^Version/ {version=\$3} /^Version/ {print name \"=\" version}' |
# sort -u"
;;
*)
echo "Unsupported package manager for image: $image" >&2
return 1
;;
;;
zypper)
# TODO(SERVER-93423): Pin suse package versions. At the moment this
# breaks the remote_execution_containers_generator.py script.
printf '%s\n' "${packages[@]}" | sort -u
# docker run --rm "$image" bash -c "
# zypper --non-interactive refresh >/dev/null 2>&1 &&
# zypper --non-interactive info ${packages[*]} |
# awk '/^Name/ {name=\$3} /^Version/ {version=\$3} /^Version/ {print name \"=\" version}' |
# sort -u"
;;
*)
echo "Unsupported package manager for image: $image" >&2
return 1
;;
esac
}
@ -207,25 +207,25 @@ generate_dockerfile() {
install_lines=$(get_package_versions "$image" "${packages[@]}" | sed 's/^/ /' | sed 's/$/\ \\/')
case "$pkg_manager" in
yum)
update_cmd="yum check-update || true"
install_cmd="yum install -y"
clean_cmd="&& yum clean all && rm -rf /var/cache/yum/*"
;;
apt)
update_cmd="apt-get update"
install_cmd="DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends"
clean_cmd="&& rm -rf /var/lib/apt/lists/*"
;;
zypper)
update_cmd="zypper refresh"
install_cmd="zypper install -y --no-recommends"
clean_cmd="&& zypper clean --all"
;;
*)
echo "Unsupported package manager for image: $image" >&2
return 1
;;
yum)
update_cmd="yum check-update || true"
install_cmd="yum install -y"
clean_cmd="&& yum clean all && rm -rf /var/cache/yum/*"
;;
apt)
update_cmd="apt-get update"
install_cmd="DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends"
clean_cmd="&& rm -rf /var/lib/apt/lists/*"
;;
zypper)
update_cmd="zypper refresh"
install_cmd="zypper install -y --no-recommends"
clean_cmd="&& zypper clean --all"
;;
*)
echo "Unsupported package manager for image: $image" >&2
return 1
;;
esac
# Remove colons from package versions for Debian and Ubuntu
@ -234,7 +234,7 @@ generate_dockerfile() {
fi
mkdir -p "$output_dir"
cat << EOF > "$output_dir/dockerfile"
cat <<EOF >"$output_dir/dockerfile"
# DO NOT EDIT.
#
# This Dockerfile is generated by the 'repin_dockerfiles.sh' script. To repin

View File

@ -5,8 +5,8 @@ set -e
RUNFILES_WORKING_DIRECTORY="$(pwd)"
if [ -z $BUILD_WORKING_DIRECTORY ]; then
echo "ERROR: BUILD_WORKING_DIRECTORY was not set, was this run from bazel?"
exit 1
echo "ERROR: BUILD_WORKING_DIRECTORY was not set, was this run from bazel?"
exit 1
fi
cd $BUILD_WORKING_DIRECTORY

View File

@ -111,18 +111,6 @@ py_binary(
],
)
sh_binary(
name = "shellscripts_linters",
srcs = ["shellscripts-linters.sh"],
data = [
"@shfmt",
],
env = {
"SHFMT_PATH": "$(rootpath @shfmt//:shfmt)",
},
visibility = ["//visibility:public"],
)
py_library(
name = "mongo_toolchain",
srcs = [

View File

@ -8,70 +8,70 @@ echo "+-------------------------------------------------------------------------
echo
if [[ -d "/opt/mongodbtoolchain/v4/bin" ]]; then
export PATH="/opt/mongodbtoolchain/v4/bin:$PATH"
export PATH="/opt/mongodbtoolchain/v4/bin:$PATH"
fi
if [[ -d "/opt/mongodbtoolchain/v5/bin" ]]; then
export PATH="/opt/mongodbtoolchain/v5/bin:$PATH"
export PATH="/opt/mongodbtoolchain/v5/bin:$PATH"
fi
rc_file=""
if [[ -f "$HOME/.bashrc" ]]; then
rc_file="$HOME/.bashrc"
rc_file="$HOME/.bashrc"
fi
if [[ -f "$HOME/.zshrc" ]]; then
rc_file="$HOME/.zshrc"
rc_file="$HOME/.zshrc"
fi
if ! command -v db-contrib-tool &> /dev/null; then
if ! python3 -c "import sys; sys.exit(sys.version_info < (3, 7))" &> /dev/null; then
actual_version=$(python3 -c 'import sys; print(sys.version)')
echo "You must have python3.7+ installed. Detected version $actual_version."
echo "To avoid unexpected issues, python3.7+ will not be automatically installed."
echo "Please, do it yourself."
echo
echo "On macOS you can run:"
echo
echo " brew install python3"
echo
exit 1
fi
if command -v pipx &> /dev/null; then
echo "Found pipx: $(command -v pipx)."
echo "Using it to install 'db-contrib-tool'."
echo
pipx ensurepath &> /dev/null
if [[ -f "$rc_file" ]]; then
source "$rc_file"
if ! command -v db-contrib-tool &>/dev/null; then
if ! python3 -c "import sys; sys.exit(sys.version_info < (3, 7))" &>/dev/null; then
actual_version=$(python3 -c 'import sys; print(sys.version)')
echo "You must have python3.7+ installed. Detected version $actual_version."
echo "To avoid unexpected issues, python3.7+ will not be automatically installed."
echo "Please, do it yourself."
echo
echo "On macOS you can run:"
echo
echo " brew install python3"
echo
exit 1
fi
pipx install db-contrib-tool --python $(command -v python3) --force
echo
else
if ! python3 -m pipx --version &> /dev/null; then
echo "Couldn't find pipx. Installing it as python3 module:"
echo " $(command -v python3) -m pip install pipx"
echo
python3 -m pip install pipx
echo
if command -v pipx &>/dev/null; then
echo "Found pipx: $(command -v pipx)."
echo "Using it to install 'db-contrib-tool'."
echo
pipx ensurepath &>/dev/null
if [[ -f "$rc_file" ]]; then
source "$rc_file"
fi
pipx install db-contrib-tool --python $(command -v python3) --force
echo
else
echo "Found pipx installed as python3 module:"
echo " $(command -v python3) -m pipx --version"
echo "Using it to install 'db-contrib-tool'."
echo
fi
if ! python3 -m pipx --version &>/dev/null; then
echo "Couldn't find pipx. Installing it as python3 module:"
echo " $(command -v python3) -m pip install pipx"
echo
python3 -m pip install pipx
echo
else
echo "Found pipx installed as python3 module:"
echo " $(command -v python3) -m pipx --version"
echo "Using it to install 'db-contrib-tool'."
echo
fi
python3 -m pipx ensurepath &> /dev/null
if [[ -f "$rc_file" ]]; then
source "$rc_file"
fi
python3 -m pipx ensurepath &>/dev/null
if [[ -f "$rc_file" ]]; then
source "$rc_file"
fi
python3 -m pipx install db-contrib-tool --force
echo
fi
python3 -m pipx install db-contrib-tool --force
echo
fi
fi
echo "Please, open a new shell or run:"

View File

@ -5,31 +5,31 @@ ARGS=("$@")
# Ordered list of possible clangd locations
CANDIDATES=(
"$(command -v custom-clangd)"
"$(find .compiledb/compiledb-*/external/mongo_toolchain_v5/v5/bin/clangd)"
"$(find bazel-*/external/mongo_toolchain_v5/v5/bin/clangd)"
"/opt/mongodbtoolchain/v5/bin/clangd"
"$(command -v custom-clangd)"
"$(find .compiledb/compiledb-*/external/mongo_toolchain_v5/v5/bin/clangd)"
"$(find bazel-*/external/mongo_toolchain_v5/v5/bin/clangd)"
"/opt/mongodbtoolchain/v5/bin/clangd"
)
# Find the first available clangd
for CANDIDATE in "${CANDIDATES[@]}"; do
if [[ -x "$CANDIDATE" ]]; then
CLANGD="$CANDIDATE"
echo "[INFO] Using clangd at: $CLANGD" >&2
break
fi
if [[ -x "$CANDIDATE" ]]; then
CLANGD="$CANDIDATE"
echo "[INFO] Using clangd at: $CLANGD" >&2
break
fi
done
# Fail if no clangd was found
if [[ -z "$CLANGD" ]]; then
echo "[ERROR] clangd not found in any of the expected locations." >&2
exit 1
echo "[ERROR] clangd not found in any of the expected locations." >&2
exit 1
fi
FINAL_ARGS=(
"${ARGS[@]}"
"--query-driver=./**/*{clang,gcc,g++}*" # allow any clang or gcc binary in the repo
"--header-insertion=never"
"${ARGS[@]}"
"--query-driver=./**/*{clang,gcc,g++}*" # allow any clang or gcc binary in the repo
"--header-insertion=never"
)
# Log the full command (optional)

View File

@ -26,87 +26,75 @@ mkdir -p "$repodir/yum/redhat"
# to support different $releasever values in yum repo configurations
#
if [ ! -e "$repodir/yum/redhat/7Server" ]
then
ln -s 7 "$repodir/yum/redhat/7Server"
if [ ! -e "$repodir/yum/redhat/7Server" ]; then
ln -s 7 "$repodir/yum/redhat/7Server"
fi
if [ ! -e "$repodir/yum/redhat/6Server" ]
then
ln -s 6 "$repodir/yum/redhat/6Server"
if [ ! -e "$repodir/yum/redhat/6Server" ]; then
ln -s 6 "$repodir/yum/redhat/6Server"
fi
if [ ! -e "$repodir/yum/redhat/5Server" ]
then
ln -s 5 "$repodir/yum/redhat/5Server"
if [ ! -e "$repodir/yum/redhat/5Server" ]; then
ln -s 5 "$repodir/yum/redhat/5Server"
fi
echo "Scanning and copying package files from $source_dir"
echo ". = skipping existing file, @ = copying file"
for package in $(find "$source_dir" -not \( -path "$repodir" -prune \) -and \( -name \*.rpm -o -name \*.deb -o -name Release \))
do
new_package_location="$repodir$(echo "$package" | sed 's/\/var\/www-enterprise\/[^\/]*//;')"
# skip if the directory structure looks weird
#
if echo "$new_package_location" | grep -q /repo/
then
continue
fi
for package in $(find "$source_dir" -not \( -path "$repodir" -prune \) -and \( -name \*.rpm -o -name \*.deb -o -name Release \)); do
new_package_location="$repodir$(echo "$package" | sed 's/\/var\/www-enterprise\/[^\/]*//;')"
# skip if the directory structure looks weird
#
if echo "$new_package_location" | grep -q /repo/; then
continue
fi
# skip if not enterprise package
#
if ! echo "$new_package_location" | grep -q enterprise
then
continue
fi
# skip if it's already there
#
if [ -e "$new_package_location" -a "$(basename "$package")" != "Release" ]
then
echo -n .
else
mkdir -p "$(dirname "$new_package_location")"
echo -n @
cp "$package" "$new_package_location"
fi
# skip if not enterprise package
#
if ! echo "$new_package_location" | grep -q enterprise; then
continue
fi
# skip if it's already there
#
if [ -e "$new_package_location" -a "$(basename "$package")" != "Release" ]; then
echo -n .
else
mkdir -p "$(dirname "$new_package_location")"
echo -n @
cp "$package" "$new_package_location"
fi
done
echo
# packages are in place, now create metadata
#
for debian_dir in "$repodir"/apt/ubuntu "$repodir"/apt/debian
do
cd "$debian_dir"
for section_dir in $(find dists -type d -name multiverse -o -name main)
do
for arch_dir in "$section_dir"/{binary-i386,binary-amd64}
do
echo "Generating Packages file under $debian_dir/$arch_dir"
if [ ! -d $arch_dir ]
then
mkdir $arch_dir
fi
dpkg-scanpackages --multiversion "$arch_dir" > "$arch_dir"/Packages
gzip -9c "$arch_dir"/Packages > "$arch_dir"/Packages.gz
for debian_dir in "$repodir"/apt/ubuntu "$repodir"/apt/debian; do
cd "$debian_dir"
for section_dir in $(find dists -type d -name multiverse -o -name main); do
for arch_dir in "$section_dir"/{binary-i386,binary-amd64}; do
echo "Generating Packages file under $debian_dir/$arch_dir"
if [ ! -d $arch_dir ]; then
mkdir $arch_dir
fi
dpkg-scanpackages --multiversion "$arch_dir" >"$arch_dir"/Packages
gzip -9c "$arch_dir"/Packages >"$arch_dir"/Packages.gz
done
done
done
for release_file in $(find "$debian_dir" -name Release)
do
release_dir=$(dirname "$release_file")
echo "Generating Release file under $release_dir"
cd $release_dir
tempfile=$(mktemp /tmp/ReleaseXXXXXX)
tempfile2=$(mktemp /tmp/ReleaseXXXXXX)
mv Release $tempfile
head -7 $tempfile > $tempfile2
apt-ftparchive release . >> $tempfile2
cp $tempfile2 Release
chmod 644 Release
rm Release.gpg
echo "Signing Release file"
gpg -r "$gpg_recip" --no-secmem-warning -abs --output Release.gpg Release
done
for release_file in $(find "$debian_dir" -name Release); do
release_dir=$(dirname "$release_file")
echo "Generating Release file under $release_dir"
cd $release_dir
tempfile=$(mktemp /tmp/ReleaseXXXXXX)
tempfile2=$(mktemp /tmp/ReleaseXXXXXX)
mv Release $tempfile
head -7 $tempfile >$tempfile2
apt-ftparchive release . >>$tempfile2
cp $tempfile2 Release
chmod 644 Release
rm Release.gpg
echo "Signing Release file"
gpg -r "$gpg_recip" --no-secmem-warning -abs --output Release.gpg Release
done
done
# Create symlinks for stable and unstable branches
@ -118,29 +106,24 @@ done
# /var/www-enterprise/repo.consolidated/apt/ubuntu/dists/precise/mongodb-enterprise/unstable -> 2.5
# /var/www-enterprise/repo.consolidated/apt/debian/dists/wheezy/mongodb-enterprise/unstable -> 2.5
#
for unstable_branch_dir in "$repodir"/yum/redhat/*/*/$unstable_branch "$repodir"/yum/amazon/*/*/$unstable_branch "$repodir"/apt/debian/dists/*/*/$unstable_branch "$repodir"/apt/ubuntu/dists/*/*/$unstable_branch "$repodir"/zypper/suse/*/*/$unstable_branch
do
full_unstable_path=$(dirname "$unstable_branch_dir")/unstable
if [ -e "$unstable_branch_dir" -a ! -e "$full_unstable_path" ]
then
echo "Linking unstable branch directory $unstable_branch_dir to $full_unstable_path"
ln -s $unstable_branch $full_unstable_path
fi
for unstable_branch_dir in "$repodir"/yum/redhat/*/*/$unstable_branch "$repodir"/yum/amazon/*/*/$unstable_branch "$repodir"/apt/debian/dists/*/*/$unstable_branch "$repodir"/apt/ubuntu/dists/*/*/$unstable_branch "$repodir"/zypper/suse/*/*/$unstable_branch; do
full_unstable_path=$(dirname "$unstable_branch_dir")/unstable
if [ -e "$unstable_branch_dir" -a ! -e "$full_unstable_path" ]; then
echo "Linking unstable branch directory $unstable_branch_dir to $full_unstable_path"
ln -s $unstable_branch $full_unstable_path
fi
done
for stable_branch_dir in "$repodir"/yum/redhat/*/*/$stable_branch "$repodir"/yum/amazon/*/*/$stable_branch "$repodir"/apt/debian/dists/*/*/$stable_branch "$repodir"/apt/ubuntu/dists/*/*/$stable_branch "$repodir"/zypper/suse/*/*/$stable_branch
do
full_stable_path=$(dirname "$stable_branch_dir")/stable
if [ -e "$stable_branch_dir" -a ! -e "$full_stable_path" ]
then
echo "Linking stable branch directory $stable_branch_dir to $full_stable_path"
ln -s $stable_branch $full_stable_path
fi
for stable_branch_dir in "$repodir"/yum/redhat/*/*/$stable_branch "$repodir"/yum/amazon/*/*/$stable_branch "$repodir"/apt/debian/dists/*/*/$stable_branch "$repodir"/apt/ubuntu/dists/*/*/$stable_branch "$repodir"/zypper/suse/*/*/$stable_branch; do
full_stable_path=$(dirname "$stable_branch_dir")/stable
if [ -e "$stable_branch_dir" -a ! -e "$full_stable_path" ]; then
echo "Linking stable branch directory $stable_branch_dir to $full_stable_path"
ln -s $stable_branch $full_stable_path
fi
done
for rpm_dir in $(find "$repodir"/yum/redhat "$repodir"/zypper/suse -type d -name x86_64 -o -name i386)
do
echo "Generating redhat repo metadata under $rpm_dir"
cd "$rpm_dir"
createrepo .
for rpm_dir in $(find "$repodir"/yum/redhat "$repodir"/zypper/suse -type d -name x86_64 -o -name i386); do
echo "Generating redhat repo metadata under $rpm_dir"
cd "$rpm_dir"
createrepo .
done

View File

@ -26,87 +26,75 @@ mkdir -p "$repodir/yum/redhat"
# to support different $releasever values in yum repo configurations
#
if [ ! -e "$repodir/yum/redhat/6Server" ]
then
ln -s 6 "$repodir/yum/redhat/6Server"
if [ ! -e "$repodir/yum/redhat/6Server" ]; then
ln -s 6 "$repodir/yum/redhat/6Server"
fi
if [ ! -e "$repodir/yum/redhat/7Server" ]
then
ln -s 7 "$repodir/yum/redhat/7Server"
if [ ! -e "$repodir/yum/redhat/7Server" ]; then
ln -s 7 "$repodir/yum/redhat/7Server"
fi
if [ ! -e "$repodir/yum/redhat/5Server" ]
then
ln -s 5 "$repodir/yum/redhat/5Server"
if [ ! -e "$repodir/yum/redhat/5Server" ]; then
ln -s 5 "$repodir/yum/redhat/5Server"
fi
echo "Scanning and copying package files from $source_dir"
echo ". = skipping existing file, @ = copying file"
for package in $(find "$source_dir" -not \( -path "$repodir" -prune \) -and \( -name \*.rpm -o -name \*.deb -o -name Release \))
do
new_package_location="$repodir$(echo "$package" | sed 's/\/var\/www-org\/[^\/]*//;')"
# skip if the directory structure looks weird
#
if echo "$new_package_location" | grep -q /repo/
then
continue
fi
for package in $(find "$source_dir" -not \( -path "$repodir" -prune \) -and \( -name \*.rpm -o -name \*.deb -o -name Release \)); do
new_package_location="$repodir$(echo "$package" | sed 's/\/var\/www-org\/[^\/]*//;')"
# skip if the directory structure looks weird
#
if echo "$new_package_location" | grep -q /repo/; then
continue
fi
# skip if not community package
#
if ! echo "$new_package_location" | grep -q org
then
continue
fi
# skip if it's already there
#
if [ -e "$new_package_location" -a "$(basename "$package")" != "Release" ]
then
echo -n .
else
mkdir -p "$(dirname "$new_package_location")"
echo -n @
cp "$package" "$new_package_location"
fi
# skip if not community package
#
if ! echo "$new_package_location" | grep -q org; then
continue
fi
# skip if it's already there
#
if [ -e "$new_package_location" -a "$(basename "$package")" != "Release" ]; then
echo -n .
else
mkdir -p "$(dirname "$new_package_location")"
echo -n @
cp "$package" "$new_package_location"
fi
done
echo
# packages are in place, now create metadata
#
for debian_dir in "$repodir"/apt/ubuntu "$repodir"/apt/debian
do
cd "$debian_dir"
for section_dir in $(find dists -type d -name multiverse -o -name main)
do
for arch_dir in "$section_dir"/{binary-i386,binary-amd64}
do
echo "Generating Packages file under $debian_dir/$arch_dir"
if [ ! -d $arch_dir ]
then
mkdir $arch_dir
fi
dpkg-scanpackages --multiversion "$arch_dir" > "$arch_dir"/Packages
gzip -9c "$arch_dir"/Packages > "$arch_dir"/Packages.gz
for debian_dir in "$repodir"/apt/ubuntu "$repodir"/apt/debian; do
cd "$debian_dir"
for section_dir in $(find dists -type d -name multiverse -o -name main); do
for arch_dir in "$section_dir"/{binary-i386,binary-amd64}; do
echo "Generating Packages file under $debian_dir/$arch_dir"
if [ ! -d $arch_dir ]; then
mkdir $arch_dir
fi
dpkg-scanpackages --multiversion "$arch_dir" >"$arch_dir"/Packages
gzip -9c "$arch_dir"/Packages >"$arch_dir"/Packages.gz
done
done
done
for release_file in $(find "$debian_dir" -name Release)
do
release_dir=$(dirname "$release_file")
echo "Generating Release file under $release_dir"
cd $release_dir
tempfile=$(mktemp /tmp/ReleaseXXXXXX)
tempfile2=$(mktemp /tmp/ReleaseXXXXXX)
mv Release $tempfile
head -7 $tempfile > $tempfile2
apt-ftparchive release . >> $tempfile2
cp $tempfile2 Release
chmod 644 Release
rm Release.gpg
echo "Signing Release file"
gpg -r "$gpg_recip" --no-secmem-warning -abs --output Release.gpg Release
done
for release_file in $(find "$debian_dir" -name Release); do
release_dir=$(dirname "$release_file")
echo "Generating Release file under $release_dir"
cd $release_dir
tempfile=$(mktemp /tmp/ReleaseXXXXXX)
tempfile2=$(mktemp /tmp/ReleaseXXXXXX)
mv Release $tempfile
head -7 $tempfile >$tempfile2
apt-ftparchive release . >>$tempfile2
cp $tempfile2 Release
chmod 644 Release
rm Release.gpg
echo "Signing Release file"
gpg -r "$gpg_recip" --no-secmem-warning -abs --output Release.gpg Release
done
done
# Create symlinks for stable and unstable branches
@ -118,29 +106,24 @@ done
# /var/www-org/repo.consolidated/apt/ubuntu/dists/precise/mongodb-org/unstable -> 2.5
# /var/www-org/repo.consolidated/apt/debian/dists/wheezy/mongodb-org/unstable -> 2.5
#
for unstable_branch_dir in "$repodir"/yum/redhat/*/*/$unstable_branch "$repodir"/yum/amazon/*/*/$unstable_branch "$repodir"/apt/debian/dists/*/*/$unstable_branch "$repodir"/apt/ubuntu/dists/*/*/$unstable_branch "$repodir"/zypper/suse/*/*/$unstable_branch
do
full_unstable_path=$(dirname "$unstable_branch_dir")/unstable
if [ -e "$unstable_branch_dir" -a ! -e "$full_unstable_path" ]
then
echo "Linking unstable branch directory $unstable_branch_dir to $full_unstable_path"
ln -s $unstable_branch $full_unstable_path
fi
for unstable_branch_dir in "$repodir"/yum/redhat/*/*/$unstable_branch "$repodir"/yum/amazon/*/*/$unstable_branch "$repodir"/apt/debian/dists/*/*/$unstable_branch "$repodir"/apt/ubuntu/dists/*/*/$unstable_branch "$repodir"/zypper/suse/*/*/$unstable_branch; do
full_unstable_path=$(dirname "$unstable_branch_dir")/unstable
if [ -e "$unstable_branch_dir" -a ! -e "$full_unstable_path" ]; then
echo "Linking unstable branch directory $unstable_branch_dir to $full_unstable_path"
ln -s $unstable_branch $full_unstable_path
fi
done
for stable_branch_dir in "$repodir"/yum/redhat/*/*/$stable_branch "$repodir"/yum/amazon/*/*/$stable_branch "$repodir"/apt/debian/dists/*/*/$stable_branch "$repodir"/apt/ubuntu/dists/*/*/$stable_branch "$repodir"/zypper/suse/*/*/$stable_branch
do
full_stable_path=$(dirname "$stable_branch_dir")/stable
if [ -e "$stable_branch_dir" -a ! -e "$full_stable_path" ]
then
echo "Linking stable branch directory $stable_branch_dir to $full_stable_path"
ln -s $stable_branch $full_stable_path
fi
for stable_branch_dir in "$repodir"/yum/redhat/*/*/$stable_branch "$repodir"/yum/amazon/*/*/$stable_branch "$repodir"/apt/debian/dists/*/*/$stable_branch "$repodir"/apt/ubuntu/dists/*/*/$stable_branch "$repodir"/zypper/suse/*/*/$stable_branch; do
full_stable_path=$(dirname "$stable_branch_dir")/stable
if [ -e "$stable_branch_dir" -a ! -e "$full_stable_path" ]; then
echo "Linking stable branch directory $stable_branch_dir to $full_stable_path"
ln -s $stable_branch $full_stable_path
fi
done
for rpm_dir in $(find "$repodir"/yum/redhat "$repodir"/yum/amazon "$repodir"/zypper/suse -type d -name x86_64 -o -name i386)
do
echo "Generating redhat repo metadata under $rpm_dir"
cd "$rpm_dir"
createrepo .
for rpm_dir in $(find "$repodir"/yum/redhat "$repodir"/yum/amazon "$repodir"/zypper/suse -type d -name x86_64 -o -name i386); do
echo "Generating redhat repo metadata under $rpm_dir"
cd "$rpm_dir"
createrepo .
done

View File

@ -12,7 +12,7 @@ user_group=$USER:$(id -Gn $USER | cut -f1 -d ' ')
# _usage_: Provides usage infomation
function _usage_ {
cat << EOF
cat <<EOF
usage: $0 options
This script supports the following parameters for Windows & Linux platforms:
-d <deviceNames>, REQUIRED, Space separated list of devices to mount /data on,
@ -31,150 +31,144 @@ This script supports the following parameters for Windows & Linux platforms:
EOF
}
# Parse command line options
while getopts "d:l:o:r:t:u:?" option
do
case $option in
while getopts "d:l:o:r:t:u:?" option; do
case $option in
d)
data_device_names=$OPTARG
;;
data_device_names=$OPTARG
;;
l)
log_device_name=$OPTARG
;;
log_device_name=$OPTARG
;;
o)
mount_options=$OPTARG
;;
mount_options=$OPTARG
;;
r)
data_raid_device_name=$OPTARG
data_raid_device_name=$OPTARG
;;
t)
fs_type=$OPTARG
;;
fs_type=$OPTARG
;;
u)
user_group=$OPTARG
;;
\?|*)
_usage_
exit 0
;;
esac
user_group=$OPTARG
;;
\? | *)
_usage_
exit 0
;;
esac
done
function mount_drive {
local root_dir=$1
local sub_dirs=$2
local device_names=$3
local raid_device_name=$4
local mount_options=$5
local fs_type=$6
local user_group=$7
local root_dir=$1
local sub_dirs=$2
local device_names=$3
local raid_device_name=$4
local mount_options=$5
local fs_type=$6
local user_group=$7
# Determine how many devices were specified
local num_devices=0
for device_name in $device_names
do
local devices="$devices /dev/$device_name"
let num_devices=num_devices+1
done
# Determine how many devices were specified
local num_devices=0
for device_name in $device_names; do
local devices="$devices /dev/$device_name"
let num_devices=num_devices+1
done
# $OS is defined in Cygwin
if [ "Windows_NT" = "$OS" ]; then
if [ $num_devices -ne 1 ]; then
echo "Must specify only one drive"
_usage_
exit 1
fi
# $OS is defined in Cygwin
if [ "Windows_NT" = "$OS" ]; then
if [ $num_devices -ne 1 ]; then
echo "Must specify only one drive"
_usage_
exit 1
fi
local drive_poll_retry=0
local drive_poll_delay=0
local drive_retry_max=40
local drive_poll_retry=0
local drive_poll_delay=0
local drive_retry_max=40
local drive=$device_names
local system_drive=c
local drive=$device_names
local system_drive=c
while true;
do
sleep $drive_poll_delay
echo "Looking for drive '$drive' to mount $root_dir"
if [ -d /cygdrive/$drive ]; then
echo "Found drive"
rm -rf /$root_dir
rm -rf /cygdrive/$system_drive/$root_dir
mkdir $drive:\\$root_dir
cmd.exe /c mklink /J $system_drive:\\$root_dir $drive:\\$root_dir
ln -s /cygdrive/$drive/$root_dir /$root_dir
setfacl -s user::rwx,group::rwx,other::rwx /cygdrive/$drive/$root_dir
for sub_dir in $sub_dirs
do
mkdir -p /cygdrive/$drive/$root_dir/$sub_dir
while true; do
sleep $drive_poll_delay
echo "Looking for drive '$drive' to mount $root_dir"
if [ -d /cygdrive/$drive ]; then
echo "Found drive"
rm -rf /$root_dir
rm -rf /cygdrive/$system_drive/$root_dir
mkdir $drive:\\$root_dir
cmd.exe /c mklink /J $system_drive:\\$root_dir $drive:\\$root_dir
ln -s /cygdrive/$drive/$root_dir /$root_dir
setfacl -s user::rwx,group::rwx,other::rwx /cygdrive/$drive/$root_dir
for sub_dir in $sub_dirs; do
mkdir -p /cygdrive/$drive/$root_dir/$sub_dir
done
chown -R $user_group /cygdrive/$system_drive/$root_dir
break
fi
let drive_poll_retry=drive_poll_retry+1
if [ $drive_poll_retry -eq $drive_retry_max ]; then
echo "Timed out trying to mount $root_dir drive."
exit 1
fi
let drive_poll_delay=drive_poll_delay+5
done
chown -R $user_group /cygdrive/$system_drive/$root_dir
break
fi
let drive_poll_retry=drive_poll_retry+1
if [ $drive_poll_retry -eq $drive_retry_max ]; then
echo "Timed out trying to mount $root_dir drive."
exit 1
fi
let drive_poll_delay=drive_poll_delay+5
done
elif [ $(uname | awk '{print tolower($0)}') = "linux" ]; then
if [ $num_devices -eq 0 ]; then
echo "Must specify atleast one device"
_usage_
exit 1
elif [ $num_devices -gt 1 ]; then
if [ -z "$raid_device_name" ]; then
echo "Missing RAID device name"
_usage_
exit 1
fi
fi
elif [ $(uname | awk '{print tolower($0)}') = "linux" ]; then
if [ $num_devices -eq 0 ]; then
echo "Must specify atleast one device"
_usage_
exit 1
elif [ $num_devices -gt 1 ]; then
if [ -z "$raid_device_name" ]; then
echo "Missing RAID device name"
_usage_
exit 1
fi
fi
# Unmount the current devices, if already mounted
umount /mnt || true
umount $devices || true
# Unmount the current devices, if already mounted
umount /mnt || true
umount $devices || true
# Determine if we have a RAID set
if [ ! -z "$raid_device_name" ]; then
echo "Creating RAID set on '$raid_device_name' for devices '$devices'"
device_name=/dev/$raid_device_name
/sbin/udevadm control --stop-exec-queue
yes | /sbin/mdadm --create $device_name --level=0 -c256 --raid-devices=$num_devices $devices
/sbin/udevadm control --start-exec-queue
/sbin/mdadm --detail --scan > /etc/mdadm.conf
/sbin/blockdev --setra 32 $device_name
# Determine if we have a RAID set
if [ ! -z "$raid_device_name" ]; then
echo "Creating RAID set on '$raid_device_name' for devices '$devices'"
device_name=/dev/$raid_device_name
/sbin/udevadm control --stop-exec-queue
yes | /sbin/mdadm --create $device_name --level=0 -c256 --raid-devices=$num_devices $devices
/sbin/udevadm control --start-exec-queue
/sbin/mdadm --detail --scan >/etc/mdadm.conf
/sbin/blockdev --setra 32 $device_name
else
device_name="/dev/$device_names"
fi
# Mount the $root_dir drive(s)
/sbin/mkfs.$fs_type $mount_options -f $device_name
# We add an entry for the device to /etc/fstab so it is automatically mounted following a
# machine reboot. The device is not guaranteed to be assigned the same name across restarts so
# we use its UUID in order to identify it.
#
# We also specify type=$fs_type in the /etc/fstab entry because specifying type=auto on
# Amazon Linux AMI 2018.03 leads to the drive not being mounted automatically following a
# machine reboot.
device_uuid=$(blkid -o value -s UUID "$device_name")
echo "Adding entry to /etc/fstab for device '$device_name' with UUID '$device_uuid'"
echo "UUID=$device_uuid /$root_dir $fs_type noatime 0 0" | tee -a /etc/fstab
mkdir /$root_dir || true
chmod 777 /$root_dir
mount -t $fs_type "UUID=$device_uuid" /$root_dir
for sub_dir in $sub_dirs; do
mkdir -p /$root_dir/$sub_dir
chmod 1777 /$root_dir/$sub_dir
done
chown -R $user_group /$root_dir
else
device_name="/dev/$device_names"
echo "Unsupported OS '$(uname)'"
exit 0
fi
# Mount the $root_dir drive(s)
/sbin/mkfs.$fs_type $mount_options -f $device_name
# We add an entry for the device to /etc/fstab so it is automatically mounted following a
# machine reboot. The device is not guaranteed to be assigned the same name across restarts so
# we use its UUID in order to identify it.
#
# We also specify type=$fs_type in the /etc/fstab entry because specifying type=auto on
# Amazon Linux AMI 2018.03 leads to the drive not being mounted automatically following a
# machine reboot.
device_uuid=$(blkid -o value -s UUID "$device_name")
echo "Adding entry to /etc/fstab for device '$device_name' with UUID '$device_uuid'"
echo "UUID=$device_uuid /$root_dir $fs_type noatime 0 0" | tee -a /etc/fstab
mkdir /$root_dir || true
chmod 777 /$root_dir
mount -t $fs_type "UUID=$device_uuid" /$root_dir
for sub_dir in $sub_dirs
do
mkdir -p /$root_dir/$sub_dir
chmod 1777 /$root_dir/$sub_dir
done
chown -R $user_group /$root_dir
else
echo "Unsupported OS '$(uname)'"
exit 0
fi
}
mount_drive data "db tmp" "$data_device_names" "$data_raid_device_name" "$mount_options" "$fs_type" "$user_group"

View File

@ -40,7 +40,7 @@ while getopts p:fin opt; do
esac
done
run () {
run() {
echo "$@"
if [[ "${dry_run}" == 1 ]]; then
return
@ -79,17 +79,17 @@ if [[ "${allow_no_venv}" != 1 ]]; then
fi
# check if poetry should be installed via pip
need_poetry_install=0 # 0 = no, 1 = yes
need_poetry_install=0 # 0 = no, 1 = yes
if ! "${py3}" -m pip show poetry &>/dev/null; then
echo "Poetry not found in this interpreter, installing via pip." >&2
need_poetry_install=1
fi
# we'll need to use pip this time around
if (( need_poetry_install )); then
if ((need_poetry_install)); then
run "${py3}" -m pip install "${pip_opts[@]}" -r poetry_requirements.txt
fi
run env \
PYTHON_KEYRING_BACKEND="keyring.backends.null.Keyring" \
"${py3}" -m poetry sync --no-root
PYTHON_KEYRING_BACKEND="keyring.backends.null.Keyring" \
"${py3}" -m poetry sync --no-root

View File

@ -10,28 +10,28 @@ ZIP_FILE=$3
LOCAL=$4
if [ -z "$REMOTE_USER" ] || [ -z "$REMOTE_HOST" ] || [ -z "$ZIP_FILE" ]; then
echo "Usage: $0 <remote_user> <remote_host> <zip_file>"
exit 1
echo "Usage: $0 <remote_user> <remote_host> <zip_file>"
exit 1
fi
if [ -z "$LOCAL" ]; then
ssh ${REMOTE_USER}@${REMOTE_HOST} "mkdir -p ~/.engflow/creds"
scp ${ZIP_FILE} ${REMOTE_USER}@${REMOTE_HOST}:~/.engflow/creds
ssh ${REMOTE_USER}@${REMOTE_HOST} "cd ~/.engflow/creds; unzip -o engflow-mTLS.zip; rm engflow-mTLS.zip"
ssh ${REMOTE_USER}@${REMOTE_HOST} "mkdir -p ~/.engflow/creds"
scp ${ZIP_FILE} ${REMOTE_USER}@${REMOTE_HOST}:~/.engflow/creds
ssh ${REMOTE_USER}@${REMOTE_HOST} "cd ~/.engflow/creds; unzip -o engflow-mTLS.zip; rm engflow-mTLS.zip"
ssh ${REMOTE_USER}@${REMOTE_HOST} "chown ${REMOTE_USER}:${REMOTE_USER} /home/${REMOTE_USER}/.engflow/creds/engflow.crt /home/${REMOTE_USER}/.engflow/creds/engflow.key"
ssh ${REMOTE_USER}@${REMOTE_HOST} "chmod 600 /home/${REMOTE_USER}/.engflow/creds/engflow.crt /home/${REMOTE_USER}/.engflow/creds/engflow.key"
ssh ${REMOTE_USER}@${REMOTE_HOST} "chown ${REMOTE_USER}:${REMOTE_USER} /home/${REMOTE_USER}/.engflow/creds/engflow.crt /home/${REMOTE_USER}/.engflow/creds/engflow.key"
ssh ${REMOTE_USER}@${REMOTE_HOST} "chmod 600 /home/${REMOTE_USER}/.engflow/creds/engflow.crt /home/${REMOTE_USER}/.engflow/creds/engflow.key"
ssh ${REMOTE_USER}@${REMOTE_HOST} "echo \"common --tls_client_certificate=/home/${REMOTE_USER}/.engflow/creds/engflow.crt\" >> ~/.bazelrc"
ssh ${REMOTE_USER}@${REMOTE_HOST} "echo \"common --tls_client_key=/home/${REMOTE_USER}/.engflow/creds/engflow.key\" >> ~/.bazelrc"
ssh ${REMOTE_USER}@${REMOTE_HOST} "echo \"common --tls_client_certificate=/home/${REMOTE_USER}/.engflow/creds/engflow.crt\" >> ~/.bazelrc"
ssh ${REMOTE_USER}@${REMOTE_HOST} "echo \"common --tls_client_key=/home/${REMOTE_USER}/.engflow/creds/engflow.key\" >> ~/.bazelrc"
else
mkdir -p $HOME/.engflow/creds
unzip -o "$ZIP_FILE"
rm "$ZIP_FILE"
mv engflow.crt $HOME/.engflow/creds
mv engflow.key $HOME/.engflow/creds
chown $USER $HOME/.engflow/creds/engflow.crt $HOME/.engflow/creds/engflow.key
chmod 600 $HOME/.engflow/creds/engflow.crt $HOME/.engflow/creds/engflow.key
echo "common --tls_client_certificate=$HOME/.engflow/creds/engflow.crt" >> $HOME/.bazelrc
echo "common --tls_client_key=$HOME/.engflow/creds/engflow.key" >> $HOME/.bazelrc
mkdir -p $HOME/.engflow/creds
unzip -o "$ZIP_FILE"
rm "$ZIP_FILE"
mv engflow.crt $HOME/.engflow/creds
mv engflow.key $HOME/.engflow/creds
chown $USER $HOME/.engflow/creds/engflow.crt $HOME/.engflow/creds/engflow.key
chmod 600 $HOME/.engflow/creds/engflow.crt $HOME/.engflow/creds/engflow.key
echo "common --tls_client_certificate=$HOME/.engflow/creds/engflow.crt" >>$HOME/.bazelrc
echo "common --tls_client_key=$HOME/.engflow/creds/engflow.key" >>$HOME/.bazelrc
fi

View File

@ -1,41 +0,0 @@
#!/bin/bash
set +o errexit
shfmt=shfmt
if [ -n "$SHFMT_PATH" ]; then
shfmt=$(readlink $SHFMT_PATH)
fi
if [ -n "$BUILD_WORKSPACE_DIRECTORY" ]; then
cd $BUILD_WORKSPACE_DIRECTORY
fi
if ! command -v $shfmt &>/dev/null; then
echo "Could not find shfmt at $shfmt"
exit 1
fi
lint_dirs="evergreen"
if [ "$1" = "fix" ]; then
$shfmt -w -i 2 -bn -sr "$lint_dirs"
fi
output_file="shfmt_output.txt"
exit_code=0
$shfmt -d -i 2 -bn -sr "$lint_dirs" >"$output_file"
if [ -s "$output_file" ]; then
echo "ERROR: Found formatting errors in shell script files in directories: $lint_dirs"
echo ""
cat "$output_file"
echo ""
echo "To fix formatting errors run"
echo ""
echo " ./buildscripts/shellscripts-linters.sh fix"
echo ""
exit_code=1
fi
rm -rf "$output_file"
exit "$exit_code"

View File

@ -8,4 +8,3 @@ set -vx
NAME=protobuf
VERSION="mongo/v4.25.0"

View File

@ -7,8 +7,8 @@ set -euo pipefail
IFS=$'\n\t'
if [ "$#" -ne 0 ]; then
echo "This script does not take any arguments"
exit 1
echo "This script does not take any arguments"
exit 1
fi
# Create a temporary directory to clone and configure librdkafka
@ -42,11 +42,11 @@ mv config.h $PLATFORM_DIR/$platformName/include
# Remove un-used files
rm -rf CHANGELOG.md CODE_OF_CONDUCT.md CONFIGURATION.md CONTRIBUTING.md INTRODUCTION.md \
README.md README.win32 STATISTICS.md config.log.old dev-conf.sh examples/ \
CMakeLists.txt lds-gen.py mklove/ packaging/ service.yml tests/ vcpkg.json win32/ \
Makefile Makefile.config config.cache configure.self configure debian mainpage.doxy Doxyfile \
src/CMakeLists.txt src/Makefile src/generate_proto.sh src/librdkafka_cgrp_synch.png src/statistics_schema.json \
src-cpp/CMakeLists.txt src-cpp/Makefile src-cpp/README.md config.log
README.md README.win32 STATISTICS.md config.log.old dev-conf.sh examples/ \
CMakeLists.txt lds-gen.py mklove/ packaging/ service.yml tests/ vcpkg.json win32/ \
Makefile Makefile.config config.cache configure.self configure debian mainpage.doxy Doxyfile \
src/CMakeLists.txt src/Makefile src/generate_proto.sh src/librdkafka_cgrp_synch.png src/statistics_schema.json \
src-cpp/CMakeLists.txt src-cpp/Makefile src-cpp/README.md config.log
pushd src
# Replace all instances of the string "LZ4" and "XXH" with "KLZ4" and "KXXH" in the C source code.

View File

@ -7,8 +7,8 @@ set -euo pipefail
IFS=$'\n\t'
if [ "$#" -ne 0 ]; then
echo "This script does not take any arguments"
exit 1
echo "This script does not take any arguments"
exit 1
fi
# Create a temporary directory to clone and configure librdkafka
@ -43,11 +43,11 @@ mv config.h $PLATFORM_DIR/$platformName/include
# Remove un-used files
rm -rf CHANGELOG.md CODE_OF_CONDUCT.md CONFIGURATION.md CONTRIBUTING.md INTRODUCTION.md \
README.md README.win32 STATISTICS.md config.log.old dev-conf.sh examples/ \
CMakeLists.txt lds-gen.py mklove/ packaging/ service.yml tests/ vcpkg.json win32/ \
Makefile Makefile.config config.cache configure.self configure debian mainpage.doxy Doxyfile \
src/CMakeLists.txt src/Makefile src/generate_proto.sh src/librdkafka_cgrp_synch.png src/statistics_schema.json \
src-cpp/CMakeLists.txt src-cpp/Makefile src-cpp/README.md config.log
README.md README.win32 STATISTICS.md config.log.old dev-conf.sh examples/ \
CMakeLists.txt lds-gen.py mklove/ packaging/ service.yml tests/ vcpkg.json win32/ \
Makefile Makefile.config config.cache configure.self configure debian mainpage.doxy Doxyfile \
src/CMakeLists.txt src/Makefile src/generate_proto.sh src/librdkafka_cgrp_synch.png src/statistics_schema.json \
src-cpp/CMakeLists.txt src-cpp/Makefile src-cpp/README.md config.log
pushd src
# Replace all instances of the string "LZ4" and "XXH" with "KLZ4" and "KXXH" in the C source code.

View File

@ -7,8 +7,8 @@ set -euo pipefail
IFS=$'\n\t'
if [ "$#" -ne 0 ]; then
echo "This script does not take any arguments"
exit 1
echo "This script does not take any arguments"
exit 1
fi
# Create a temporary directory to clone and configure librdkafka
@ -43,11 +43,11 @@ mv config.h $PLATFORM_DIR/$platformName/include
# Remove un-used files
rm -rf CHANGELOG.md CODE_OF_CONDUCT.md CONFIGURATION.md CONTRIBUTING.md INTRODUCTION.md \
README.md README.win32 STATISTICS.md config.log.old dev-conf.sh examples/ \
CMakeLists.txt lds-gen.py mklove/ packaging/ service.yml tests/ vcpkg.json win32/ \
Makefile Makefile.config config.cache configure.self configure debian mainpage.doxy Doxyfile \
src/CMakeLists.txt src/Makefile src/generate_proto.sh src/librdkafka_cgrp_synch.png src/statistics_schema.json \
src-cpp/CMakeLists.txt src-cpp/Makefile src-cpp/README.md config.log
README.md README.win32 STATISTICS.md config.log.old dev-conf.sh examples/ \
CMakeLists.txt lds-gen.py mklove/ packaging/ service.yml tests/ vcpkg.json win32/ \
Makefile Makefile.config config.cache configure.self configure debian mainpage.doxy Doxyfile \
src/CMakeLists.txt src/Makefile src/generate_proto.sh src/librdkafka_cgrp_synch.png src/statistics_schema.json \
src-cpp/CMakeLists.txt src-cpp/Makefile src-cpp/README.md config.log
pushd src
# Replace all instances of the string "LZ4" and "XXH" with "KLZ4" and "KXXH" in the C source code.

View File

@ -5,14 +5,14 @@ cd "$BASEDIR/../"
yamllint -c etc/yamllint_config.yml buildscripts etc jstests
PATH="$PATH:$HOME" evergreen evaluate etc/evergreen.yml > etc/evaluated_evergreen.yml
PATH="$PATH:$HOME" evergreen evaluate etc/evergreen_nightly.yml > etc/evaluated_evergreen_nightly.yml
PATH="$PATH:$HOME" evergreen evaluate etc/evergreen.yml >etc/evaluated_evergreen.yml
PATH="$PATH:$HOME" evergreen evaluate etc/evergreen_nightly.yml >etc/evaluated_evergreen_nightly.yml
# Remove references to the DSI repo before evergreen evaluate.
# The DSI module references break 'evaluate', the system_perf config should
# parse without them, and we don't want changes to the DSI repository to
# break checking that the rest of the imports etc. work.
awk '/lint_yaml trim start/{drop=1} /lint_yaml trim end/{drop=0} !drop' etc/system_perf.yml > etc/trimmed_system_perf.yml
PATH="$PATH:$HOME" evergreen evaluate etc/trimmed_system_perf.yml > etc/evaluated_system_perf.yml
awk '/lint_yaml trim start/{drop=1} /lint_yaml trim end/{drop=0} !drop' etc/system_perf.yml >etc/trimmed_system_perf.yml
PATH="$PATH:$HOME" evergreen evaluate etc/trimmed_system_perf.yml >etc/evaluated_system_perf.yml
python -m evergreen_lint -c ./etc/evergreen_lint.yml lint

View File

@ -1,5 +1,5 @@
silent_grep() {
command grep -q > /dev/null 2>&1 "$@"
command grep -q "$@" >/dev/null 2>&1
}
idem_file_append() {
@ -19,10 +19,10 @@ idem_file_append() {
local end_marker="# END $2"
if ! silent_grep "^$start_marker" "$1"; then
{
echo -e "\n$start_marker";
echo -e "$3";
echo -e "$end_marker";
} >> "$1"
echo -e "\n$start_marker"
echo -e "$3"
echo -e "$end_marker"
} >>"$1"
fi
}
@ -30,7 +30,8 @@ setup_bash() {
# Bash profile should source .bashrc
echo "################################################################################"
echo "Setting up bash..."
local block=$(cat <<BLOCK
local block=$(
cat <<BLOCK
if [[ -f ~/.bashrc ]]; then
source ~/.bashrc
fi
@ -67,7 +68,7 @@ setup_poetry() {
echo "################################################################################"
echo "Installing 'poetry' command..."
export PATH="$PATH:$HOME/.local/bin"
if command -v poetry &> /dev/null; then
if command -v poetry &>/dev/null; then
echo "'poetry' command exists; skipping setup"
else
pipx install poetry --pip-args="-r $(pwd)/poetry_requirements.txt"
@ -78,7 +79,7 @@ setup_poetry() {
setup_pipx() {
echo "################################################################################"
echo "Installing 'pipx' command..."
if command -v pipx &> /dev/null; then
if command -v pipx &>/dev/null; then
echo "'pipx' command exists; skipping setup"
else
export PATH="$PATH:$HOME/.local/bin"
@ -112,7 +113,7 @@ setup_db_contrib_tool() {
echo "################################################################################"
echo "Installing 'db-contrib-tool' command..."
export PATH="$PATH:$HOME/.local/bin"
if command -v db-contrib-tool &> /dev/null; then
if command -v db-contrib-tool &>/dev/null; then
echo "'db-contrib-tool' command exists; skipping setup"
else
pipx install db-contrib-tool
@ -161,8 +162,8 @@ run_setup() {
setup_clang_config
setup_gdb
setup_pipx
setup_db_contrib_tool # This step requires `setup_pipx` to have been run.
setup_poetry # This step requires `setup_pipx` to have been run.
setup_db_contrib_tool # This step requires `setup_pipx` to have been run.
setup_poetry # This step requires `setup_pipx` to have been run.
setup_mongo_venv # This step requires `setup_poetry` to have been run.

View File

@ -296,11 +296,6 @@ sh_binary(
srcs = ["lint_fuzzer_sanity_patch.sh"],
)
sh_binary(
name = "lint_shellscripts",
srcs = ["lint_shellscripts.sh"],
)
sh_binary(
name = "lint_yaml",
srcs = ["lint_yaml.sh"],

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
set -o errexit
@ -8,19 +8,19 @@ antithesis_repo="us-central1-docker.pkg.dev/molten-verve-216720/mongodb-reposito
# tag images as evergreen[-${antithesis_build_type}]-{latest,patch} or just ${antithesis_image_tag}
if [ -n "${antithesis_image_tag:-}" ]; then
echo "Using provided tag: '$antithesis_image_tag' for docker pushes"
tag=$antithesis_image_tag
echo "Using provided tag: '$antithesis_image_tag' for docker pushes"
tag=$antithesis_image_tag
else
tag="evergreen"
if [[ -n "${antithesis_build_type}" ]]; then
tag="${tag}-${antithesis_build_type}"
fi
tag="evergreen"
if [[ -n "${antithesis_build_type}" ]]; then
tag="${tag}-${antithesis_build_type}"
fi
if [ "${is_patch}" = "true" ]; then
tag="${tag}-patch"
else
tag="${tag}-latest-${branch_name}"
fi
if [ "${is_patch}" = "true" ]; then
tag="${tag}-patch"
else
tag="${tag}-latest-${branch_name}"
fi
fi
# Clean up any leftover docker artifacts
@ -33,9 +33,9 @@ sudo docker network prune --force
sudo service docker stop
sudo mkdir -p /data/mci/docker
if ! sudo jq -e . /etc/docker/daemon.json; then
echo "docker daemon.json did not exist or was invalid"
echo "setting docker daemon.json to {}"
sudo sh -c 'echo "{}" > /etc/docker/daemon.json'
echo "docker daemon.json did not exist or was invalid"
echo "setting docker daemon.json to {}"
sudo sh -c 'echo "{}" > /etc/docker/daemon.json'
fi
MODIFIED_JSON=$(sudo jq '."data-root" |= "/data/mci/docker"' /etc/docker/daemon.json)
sudo echo "${MODIFIED_JSON}" | sudo tee /etc/docker/daemon.json
@ -43,7 +43,7 @@ echo "docker daemon.json: set data-root to /data/mci/docker"
sudo service docker start
# Login
echo "${antithesis_repo_key}" > mongodb.key.json
echo "${antithesis_repo_key}" >mongodb.key.json
cat mongodb.key.json | sudo docker login -u _json_key https://us-central1-docker.pkg.dev --password-stdin
rm mongodb.key.json
@ -64,15 +64,15 @@ timeout -v 1800 docker exec workload buildscripts/resmoke.py run --suite ${suite
RET=$?
set -o errexit
docker-compose -f docker_compose/${suite}/docker-compose.yml logs > docker_logs.txt
docker-compose -f docker_compose/${suite}/docker-compose.yml logs >docker_logs.txt
docker-compose -f docker_compose/${suite}/docker-compose.yml down
# Change the permissions of all of the files in the docker compose directory to the current user.
# Some of the data files cannot be archived otherwise.
sudo chown -R $USER docker_compose/${suite}/
if [ $RET -ne 0 ]; then
echo "Resmoke sanity check has failed"
exit $RET
echo "Resmoke sanity check has failed"
exit $RET
fi
# Push Image

View File

@ -1,7 +1,7 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
source "$DIR/bazel_utility_functions.sh"
(
cd $DIR/..
exec $(bazel_get_binary_path) "$@"
cd $DIR/..
exec $(bazel_get_binary_path) "$@"
)

View File

@ -1,11 +1,11 @@
bazel_rbe_supported() {
OS="$(uname)"
ARCH="$(uname -m)"
OS="$(uname)"
ARCH="$(uname -m)"
if [ "$ARCH" == "aarch64" ] || [ "$ARCH" == "arm64" ] || [ "$ARCH" == "x86_64" ]; then
return 0
else
return 1
fi
if [ "$ARCH" == "aarch64" ] || [ "$ARCH" == "arm64" ] || [ "$ARCH" == "x86_64" ]; then
return 0
else
return 1
fi
}

View File

@ -8,7 +8,7 @@
# * ${args} - List of additional Bazel arguments (e.g.: "--config=clang-tidy")
# Needed for evergreen scripts that use evergreen expansions and utility methods.
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -22,67 +22,67 @@ activate_venv
# evergreen patch.
build_patch_id="${build_patch_id:-${reuse_compile_from}}"
if [ -n "${build_patch_id}" ]; then
echo "build_patch_id detected, trying to skip task"
echo "build_patch_id detected, trying to skip task"
# On windows we change the extension to zip
if [ -z "${ext}" ]; then
ext="tgz"
fi
# On windows we change the extension to zip
if [ -z "${ext}" ]; then
ext="tgz"
fi
extra_db_contrib_args=""
extra_db_contrib_args=""
# get the platform of the dist archive. This is needed if
# db-contrib-tool cannot autodetect the platform of the ec2 instance.
regex='MONGO_DISTMOD=([a-z0-9]*)'
if [[ ${bazel_compile_flags} =~ ${regex} ]]; then
extra_db_contrib_args="${extra_db_contrib_args} --platform=${BASH_REMATCH[1]}"
fi
# get the platform of the dist archive. This is needed if
# db-contrib-tool cannot autodetect the platform of the ec2 instance.
regex='MONGO_DISTMOD=([a-z0-9]*)'
if [[ ${bazel_compile_flags} =~ ${regex} ]]; then
extra_db_contrib_args="${extra_db_contrib_args} --platform=${BASH_REMATCH[1]}"
fi
download_dir="./tmp_db_contrib_tool_download_dir"
rm -rf ${download_dir}
download_dir="./tmp_db_contrib_tool_download_dir"
rm -rf ${download_dir}
if [ "${task_name}" = "archive_dist_test" ]; then
file_name="dist-test-stripped.${ext}"
invocation="db-contrib-tool setup-repro-env ${build_patch_id} \
if [ "${task_name}" = "archive_dist_test" ]; then
file_name="dist-test-stripped.${ext}"
invocation="db-contrib-tool setup-repro-env ${build_patch_id} \
--variant=${compile_variant} --extractDownloads=False \
--binariesName=${file_name} --installDir=${download_dir} ${extra_db_contrib_args}"
fi
fi
if [ "${task_name}" = "archive_dist_test_debug" ]; then
file_name="dist-test-debug.${ext}"
invocation="db-contrib-tool setup-repro-env ${build_patch_id} \
if [ "${task_name}" = "archive_dist_test_debug" ]; then
file_name="dist-test-debug.${ext}"
invocation="db-contrib-tool setup-repro-env ${build_patch_id} \
--variant=${compile_variant} --extractDownloads=False \
--debugsymbolsName=${file_name} --installDir=${download_dir} \
--skipBinaries --downloadSymbols ${extra_db_contrib_args}"
fi
if [ -n "${invocation}" ]; then
setup_db_contrib_tool
echo "db-contrib-tool invocation: ${invocation}"
eval ${invocation}
if [ $? -ne 0 ]; then
echo "Could not retrieve files with db-contrib-tool"
exit 1
fi
file_location=$(find "${download_dir}" -name "${file_name}")
echo "Downloaded: ${file_location}"
mkdir -p bazel-bin
mv "${file_location}" "bazel-bin/${file_name}"
echo "Moved ${file_name} to the correct location"
echo "Skipping ${task_name} compile"
exit 0
fi
echo "Could not skip ${task_name} compile, compiling as normal"
if [ -n "${invocation}" ]; then
setup_db_contrib_tool
echo "db-contrib-tool invocation: ${invocation}"
eval ${invocation}
if [ $? -ne 0 ]; then
echo "Could not retrieve files with db-contrib-tool"
exit 1
fi
file_location=$(find "${download_dir}" -name "${file_name}")
echo "Downloaded: ${file_location}"
mkdir -p bazel-bin
mv "${file_location}" "bazel-bin/${file_name}"
echo "Moved ${file_name} to the correct location"
echo "Skipping ${task_name} compile"
exit 0
fi
echo "Could not skip ${task_name} compile, compiling as normal"
fi
# --build-mongot is a compile flag used by the evergreen build variants that run end-to-end search
# suites, as it downloads the necessary mongot binary.
if [ "${build_mongot}" = "true" ]; then
setup_db_contrib_tool
use_db_contrib_tool_mongot
bazel_args="${bazel_args} --include_mongot=True"
setup_db_contrib_tool
use_db_contrib_tool_mongot
bazel_args="${bazel_args} --include_mongot=True"
fi
# This is hacky way to pass off build time from archive_dist_test to archive_dist_test_debug
@ -91,14 +91,14 @@ fi
# build-id for debugging as they will be different when -Wl,-S is passed in.
# The relinked binaries should still be hash identical when stripped with strip
if [ "${skip_debug_link}" = "true" ]; then
export compile_variant="${compile_variant}"
export version_id="${version_id}"
if [ "${task_name}" = "archive_dist_test" ]; then
task_compile_flags="${task_compile_flags} --simple_build_id=True --linkopt='-Wl,-S' --separate_debug=False"
fi
if [ "${task_name}" = "archive_dist_test_debug" ]; then
task_compile_flags="${task_compile_flags} --simple_build_id=True"
fi
export compile_variant="${compile_variant}"
export version_id="${version_id}"
if [ "${task_name}" = "archive_dist_test" ]; then
task_compile_flags="${task_compile_flags} --simple_build_id=True --linkopt='-Wl,-S' --separate_debug=False"
fi
if [ "${task_name}" = "archive_dist_test_debug" ]; then
task_compile_flags="${task_compile_flags} --simple_build_id=True"
fi
fi
set -o pipefail
@ -110,7 +110,7 @@ source ./evergreen/bazel_utility_functions.sh
source ./evergreen/bazel_RBE_supported.sh
if [[ "${evergreen_remote_exec}" != "on" ]]; then
LOCAL_ARG="$LOCAL_ARG --jobs=auto"
LOCAL_ARG="$LOCAL_ARG --jobs=auto"
fi
BAZEL_BINARY=$(bazel_get_binary_path)
@ -119,19 +119,19 @@ BAZEL_BINARY=$(bazel_get_binary_path)
# for retries.
TIMEOUT_CMD=""
if [ -n "${build_timeout_seconds}" ]; then
TIMEOUT_CMD="timeout ${build_timeout_seconds}"
TIMEOUT_CMD="timeout ${build_timeout_seconds}"
elif [[ "${evergreen_remote_exec}" == "on" ]]; then
# Timeout remote execution runs in 60 minutes as a workaround for
# scheduling timeout bugs
TIMEOUT_CMD="timeout 3600"
# Timeout remote execution runs in 60 minutes as a workaround for
# scheduling timeout bugs
TIMEOUT_CMD="timeout 3600"
fi
if is_ppc64le; then
LOCAL_ARG="$LOCAL_ARG --jobs=48"
LOCAL_ARG="$LOCAL_ARG --jobs=48"
fi
if is_s390x; then
LOCAL_ARG="$LOCAL_ARG --jobs=16"
LOCAL_ARG="$LOCAL_ARG --jobs=16"
fi
# If we are doing a patch build or we are building a non-push
@ -139,23 +139,23 @@ fi
# flag. Otherwise, this is potentially a build that "leaves
# the building", so we do want that flag.
if [ "${is_patch}" = "true" ] || [ -z "${push_bucket}" ] || [ "${compiling_for_test}" = "true" ]; then
echo "This is a non-release build."
echo "This is a non-release build."
else
LOCAL_ARG="$LOCAL_ARG --config=public-release"
LOCAL_ARG="$LOCAL_ARG --config=public-release"
fi
for i in {1..3}; do
eval ${TIMEOUT_CMD} $BAZEL_BINARY build --verbose_failures $LOCAL_ARG ${bazel_args} ${bazel_compile_flags} ${task_compile_flags} \
--define=MONGO_VERSION=${version} ${patch_compile_flags} ${targets} 2>&1 | tee bazel_stdout.log \
&& RET=0 && break || RET=$? && sleep 60
if [ $RET -eq 124 ]; then
echo "Bazel timed out after ${build_timeout_seconds} seconds, retrying..."
else
echo "Errors were found during the bazel run, here are the errors:" 1>&2
grep "ERROR:" bazel_stdout.log 1>&2
echo "Bazel failed to execute, retrying..."
fi
$BAZEL_BINARY shutdown
eval ${TIMEOUT_CMD} $BAZEL_BINARY build --verbose_failures $LOCAL_ARG ${bazel_args} ${bazel_compile_flags} ${task_compile_flags} \
--define=MONGO_VERSION=${version} ${patch_compile_flags} ${targets} 2>&1 | tee bazel_stdout.log &&
RET=0 && break || RET=$? && sleep 60
if [ $RET -eq 124 ]; then
echo "Bazel timed out after ${build_timeout_seconds} seconds, retrying..."
else
echo "Errors were found during the bazel run, here are the errors:" 1>&2
grep "ERROR:" bazel_stdout.log 1>&2
echo "Bazel failed to execute, retrying..."
fi
$BAZEL_BINARY shutdown
done
exit $RET

View File

@ -8,7 +8,7 @@
# * ${args} - Extra command line args to pass to "bazel coverage"
# Needed for evergreen scripts that use evergreen expansions and utility methods.
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -24,7 +24,7 @@ eval echo "Execution environment: Args: ${args} Target: ${target}"
BAZEL_BINARY=bazel
# Print command being run to file that can be uploaded
echo "python buildscripts/install_bazel.py" > bazel-invocation.txt
echo "python buildscripts/install_bazel.py" >bazel-invocation.txt
echo " bazel coverage ${args} ${target}" >> bazel-invocation.txt
echo " bazel coverage ${args} ${target}" >>bazel-invocation.txt
$BAZEL_BINARY coverage ${args} ${target}

View File

@ -8,7 +8,7 @@
# * ${redact_args} - If set, redact the args in the report
# Needed for evergreen scripts that use evergreen expansions and utility methods.
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -25,45 +25,45 @@ source ./evergreen/bazel_utility_functions.sh
source ./evergreen/bazel_RBE_supported.sh
if bazel_rbe_supported; then
LOCAL_ARG=""
LOCAL_ARG=""
else
LOCAL_ARG="--config=local"
LOCAL_ARG="--config=local"
fi
if [[ "${evergreen_remote_exec}" != "on" ]]; then
LOCAL_ARG="--config=local"
LOCAL_ARG="--config=local"
fi
BAZEL_BINARY=$(bazel_get_binary_path)
# AL2 stores certs in a nonstandard location
if [[ -f /etc/os-release ]]; then
DISTRO=$(awk -F '[="]*' '/^PRETTY_NAME/ { print $2 }' < /etc/os-release)
if [[ $DISTRO == "Amazon Linux 2" ]]; then
export SSL_CERT_DIR=/etc/pki/tls/certs
export SSL_CERT_FILE=/etc/pki/tls/certs/ca-bundle.crt
elif [[ $DISTRO == "Red Hat Enterprise Linux"* ]]; then
export SSL_CERT_DIR=/etc/pki/ca-trust/extracted/pem
export SSL_CERT_FILE=/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
fi
DISTRO=$(awk -F '[="]*' '/^PRETTY_NAME/ { print $2 }' </etc/os-release)
if [[ $DISTRO == "Amazon Linux 2" ]]; then
export SSL_CERT_DIR=/etc/pki/tls/certs
export SSL_CERT_FILE=/etc/pki/tls/certs/ca-bundle.crt
elif [[ $DISTRO == "Red Hat Enterprise Linux"* ]]; then
export SSL_CERT_DIR=/etc/pki/ca-trust/extracted/pem
export SSL_CERT_FILE=/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
fi
fi
if [[ -n "$redact_args" ]]; then
INVOCATION_WITH_REDACTION="${target}"
INVOCATION_WITH_REDACTION="${target}"
else
INVOCATION_WITH_REDACTION="${target} ${args}"
INVOCATION_WITH_REDACTION="${target} ${args}"
fi
# Print command being run to file that can be uploaded
echo "python buildscripts/install_bazel.py" > bazel-invocation.txt
echo "bazel run --verbose_failures ${bazel_compile_flags} ${task_compile_flags} ${LOCAL_ARG} ${INVOCATION_WITH_REDACTION}" >> bazel-invocation.txt
echo "python buildscripts/install_bazel.py" >bazel-invocation.txt
echo "bazel run --verbose_failures ${bazel_compile_flags} ${task_compile_flags} ${LOCAL_ARG} ${INVOCATION_WITH_REDACTION}" >>bazel-invocation.txt
# Run bazel command, retrying up to five times
MAX_ATTEMPTS=5
for ((i = 1; i <= $MAX_ATTEMPTS; i++)); do
eval $env $BAZEL_BINARY run --verbose_failures $LOCAL_ARG ${target} ${args} >> bazel_output.log 2>&1 && RET=0 && break || RET=$? && sleep 10
if [ $i -lt $MAX_ATTEMPTS ]; then echo "Bazel failed to execute, retrying ($(($i + 1)) of $MAX_ATTEMPTS attempts)... " >> bazel_output.log 2>&1; fi
$BAZEL_BINARY shutdown
eval $env $BAZEL_BINARY run --verbose_failures $LOCAL_ARG ${target} ${args} >>bazel_output.log 2>&1 && RET=0 && break || RET=$? && sleep 10
if [ $i -lt $MAX_ATTEMPTS ]; then echo "Bazel failed to execute, retrying ($(($i + 1)) of $MAX_ATTEMPTS attempts)... " >>bazel_output.log 2>&1; fi
$BAZEL_BINARY shutdown
done
$python ./buildscripts/simple_report.py --test-name "bazel run ${INVOCATION_WITH_REDACTION}" --log-file bazel_output.log --exit-code $RET

View File

@ -5,7 +5,7 @@
# * ${targets} - Test targets
# * ${bazel_args} - Extra command line args to pass to "bazel test"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -25,7 +25,7 @@ source ./evergreen/bazel_RBE_supported.sh
LOCAL_ARG=""
if [[ "${evergreen_remote_exec}" != "on" ]]; then
LOCAL_ARG="$LOCAL_ARG --jobs=auto"
LOCAL_ARG="$LOCAL_ARG --jobs=auto"
fi
BAZEL_BINARY=$(bazel_get_binary_path)
@ -34,15 +34,15 @@ BAZEL_BINARY=$(bazel_get_binary_path)
# for retries.
TIMEOUT_CMD=""
if [ -n "${build_timeout_seconds}" ]; then
TIMEOUT_CMD="timeout ${build_timeout_seconds}"
TIMEOUT_CMD="timeout ${build_timeout_seconds}"
fi
if is_ppc64le; then
LOCAL_ARG="$LOCAL_ARG --jobs=48"
LOCAL_ARG="$LOCAL_ARG --jobs=48"
fi
if is_s390x; then
LOCAL_ARG="$LOCAL_ARG --jobs=16"
LOCAL_ARG="$LOCAL_ARG --jobs=16"
fi
# If we are doing a patch build or we are building a non-push
@ -50,55 +50,55 @@ fi
# flag. Otherwise, this is potentially a build that "leaves
# the building", so we do want that flag.
if [ "${is_patch}" = "true" ] || [ -z "${push_bucket}" ] || [ "${compiling_for_test}" = "true" ]; then
echo "This is a non-release build."
echo "This is a non-release build."
else
LOCAL_ARG="$LOCAL_ARG --config=public-release"
LOCAL_ARG="$LOCAL_ARG --config=public-release"
fi
if [ -n "${test_timeout_sec}" ]; then
# s390x and ppc64le often run slower than other architectures
if is_s390x_or_ppc64le; then
test_timeout_sec=$(($test_timeout_sec * 4))
fi
bazel_args="${bazel_args} --test_timeout=${test_timeout_sec}"
# s390x and ppc64le often run slower than other architectures
if is_s390x_or_ppc64le; then
test_timeout_sec=$(($test_timeout_sec * 4))
fi
bazel_args="${bazel_args} --test_timeout=${test_timeout_sec}"
fi
ALL_FLAGS="--verbose_failures ${LOCAL_ARG} ${bazel_args} ${bazel_compile_flags} ${task_compile_flags} --define=MONGO_VERSION=${version} ${patch_compile_flags}"
echo ${ALL_FLAGS} > .bazel_build_flags
echo ${ALL_FLAGS} >.bazel_build_flags
set +o errexit
# Retry the build since it's deterministic and may fail due to transient issues.
for i in {1..3}; do
eval ${TIMEOUT_CMD} ${BAZEL_BINARY} build ${ALL_FLAGS} ${targets} && RET=0 && break || RET=$? && sleep 1
if [ $RET -eq 124 ]; then
echo "Bazel build timed out after ${build_timeout_seconds} seconds, retrying..."
else
echo "Bazel build failed, retrying..."
fi
$BAZEL_BINARY shutdown
eval ${TIMEOUT_CMD} ${BAZEL_BINARY} build ${ALL_FLAGS} ${targets} && RET=0 && break || RET=$? && sleep 1
if [ $RET -eq 124 ]; then
echo "Bazel build timed out after ${build_timeout_seconds} seconds, retrying..."
else
echo "Bazel build failed, retrying..."
fi
$BAZEL_BINARY shutdown
done
for i in {1..3}; do
eval ${TIMEOUT_CMD} ${BAZEL_BINARY} test ${ALL_FLAGS} ${targets} 2>&1 | tee bazel_stdout.log \
&& RET=0 && break || RET=$? && sleep 1
if [ $RET -eq 124 ]; then
echo "Bazel timed out after ${build_timeout_seconds} seconds, retrying..."
else
echo "Errors were found during the bazel test, failing the execution"
break
fi
$BAZEL_BINARY shutdown
eval ${TIMEOUT_CMD} ${BAZEL_BINARY} test ${ALL_FLAGS} ${targets} 2>&1 | tee bazel_stdout.log &&
RET=0 && break || RET=$? && sleep 1
if [ $RET -eq 124 ]; then
echo "Bazel timed out after ${build_timeout_seconds} seconds, retrying..."
else
echo "Errors were found during the bazel test, failing the execution"
break
fi
$BAZEL_BINARY shutdown
done
set -o errexit
if [[ $RET != 0 ]]; then
# The --config flag needs to stay consistent between invocations to avoid evicting the previous results.
# Strip out anything that isn't a --config flag that could interfere with the run command.
CONFIG_FLAGS=$(echo "${ALL_FLAGS}" | tr ' ' '\n' | grep -- '--config' | tr '\n' ' ')
# The --config flag needs to stay consistent between invocations to avoid evicting the previous results.
# Strip out anything that isn't a --config flag that could interfere with the run command.
CONFIG_FLAGS=$(echo "${ALL_FLAGS}" | tr ' ' '\n' | grep -- '--config' | tr '\n' ' ')
eval ${BAZEL_BINARY} run ${CONFIG_FLAGS} //buildscripts:gather_failed_unittests
eval ${BAZEL_BINARY} run ${CONFIG_FLAGS} //buildscripts:gather_failed_unittests
fi
exit $RET

View File

@ -1,39 +1,39 @@
is_ppc64le() {
ARCH="$(uname -m)"
ARCH="$(uname -m)"
if [[ "$ARCH" == "ppc64le" || "$ARCH" == "ppc64" || "$ARCH" == "ppc" ]]; then
return 0
else
return 1
fi
if [[ "$ARCH" == "ppc64le" || "$ARCH" == "ppc64" || "$ARCH" == "ppc" ]]; then
return 0
else
return 1
fi
}
is_s390x() {
ARCH="$(uname -m)"
ARCH="$(uname -m)"
if [[ "$ARCH" == "s390x" || "$ARCH" == "s390" ]]; then
return 0
else
return 1
fi
if [[ "$ARCH" == "s390x" || "$ARCH" == "s390" ]]; then
return 0
else
return 1
fi
}
is_s390x_or_ppc64le() {
if is_ppc64le || is_s390x; then
return 0
else
return 1
fi
if is_ppc64le || is_s390x; then
return 0
else
return 1
fi
}
bazel_get_binary_path() {
if is_s390x_or_ppc64le; then
echo "bazel/bazelisk.py"
elif grep -q "ID=debian" /etc/os-release; then
echo "bazel/bazelisk.py"
elif grep -q 'ID="sles"' /etc/os-release; then
echo "bazel/bazelisk.py"
else
echo "bazel"
fi
if is_s390x_or_ppc64le; then
echo "bazel/bazelisk.py"
elif grep -q "ID=debian" /etc/os-release; then
echo "bazel/bazelisk.py"
elif grep -q 'ID="sles"' /etc/os-release; then
echo "bazel/bazelisk.py"
else
echo "bazel"
fi
}

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -11,21 +11,21 @@ $python buildscripts/idl/check_stable_api_commands_have_idl_definitions.py -v --
$python buildscripts/idl/checkout_idl_files_from_past_releases.py -v idls
function run_idl_check_compatibility {
dir=$1
output=$(
python buildscripts/idl/idl_check_compatibility.py -v \
--old-include "$dir/src" \
--old-include "$dir/src/mongo/db/modules/enterprise/src" \
--new-include src \
--new-include src/mongo/db/modules/enterprise/src \
"$dir/src" src
)
exit_code=$?
echo "Performing idl check compatibility with release: $dir:"
echo "$output"
if [ $exit_code -ne 0 ]; then
exit 255
fi
dir=$1
output=$(
python buildscripts/idl/idl_check_compatibility.py -v \
--old-include "$dir/src" \
--old-include "$dir/src/mongo/db/modules/enterprise/src" \
--new-include src \
--new-include src/mongo/db/modules/enterprise/src \
"$dir/src" src
)
exit_code=$?
echo "Performing idl check compatibility with release: $dir:"
echo "$output"
if [ $exit_code -ne 0 ]; then
exit 255
fi
}
export -f run_idl_check_compatibility
find idls -maxdepth 1 -mindepth 1 -type d | xargs -n 1 -P 0 -I % bash -c 'run_idl_check_compatibility "$@"' _ %

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src

View File

@ -1,5 +1,5 @@
set -o verbose
cd src
if [ -f resmoke_error_code ]; then
exit $(cat resmoke_error_code)
exit $(cat resmoke_error_code)
fi

View File

@ -1,5 +1,5 @@
set -o verbose
cd src
if [ -f run_tests_infrastructure_failure ]; then
exit $(cat run_tests_infrastructure_failure)
exit $(cat run_tests_infrastructure_failure)
fi

View File

@ -2,57 +2,57 @@ failed_setup=false
source ~/.bashrc
if command -v pipx &> /dev/null; then
echo "'pipx' command exists"
if command -v pipx &>/dev/null; then
echo "'pipx' command exists"
else
echo "pipx command not found - failed setup"
failed_setup=true
echo "pipx command not found - failed setup"
failed_setup=true
fi
if command -v poetry &> /dev/null; then
echo "'poetry' command exists"
if command -v poetry &>/dev/null; then
echo "'poetry' command exists"
else
echo "poetry command not found - failed setup"
failed_setup=true
echo "poetry command not found - failed setup"
failed_setup=true
fi
if command -v db-contrib-tool &> /dev/null; then
echo "'db-contrib-tool' command exists"
if command -v db-contrib-tool &>/dev/null; then
echo "'db-contrib-tool' command exists"
else
echo "db-contrib-tool command not found - failed setup"
failed_setup=true
echo "db-contrib-tool command not found - failed setup"
failed_setup=true
fi
if test -d "./python3-venv"; then
echo "Venv directory exists, checking activation"
. python3-venv/bin/activate
./buildscripts/resmoke.py run --help &> /dev/null
if [ $? -eq 0 ]; then
echo "Virtual workstation set up correctly"
else
echo "Virtual workstation failed activation"
failed_setup=true
fi
deactivate
echo "Venv directory exists, checking activation"
. python3-venv/bin/activate
./buildscripts/resmoke.py run --help &>/dev/null
if [ $? -eq 0 ]; then
echo "Virtual workstation set up correctly"
else
echo "Virtual workstation failed activation"
failed_setup=true
fi
deactivate
else
echo "mongo virtual environment not created correctly - failed setup"
failed_setup=true
echo "mongo virtual environment not created correctly - failed setup"
failed_setup=true
fi
if test -d "../Boost-Pretty-Printer"; then
echo "Pretty printers set up correctly"
echo "Pretty printers set up correctly"
else
echo "Pretty printers failed setup"
failed_setup=true
echo "Pretty printers failed setup"
failed_setup=true
fi
if test -f "./compile_commands.json"; then
echo "Clang configuration set up correctly"
echo "Clang configuration set up correctly"
else
echo "Clang configuration failed setup"
failed_setup=true
echo "Clang configuration failed setup"
failed_setup=true
fi
if $failed_setup; then
exit 1
exit 1
fi

View File

@ -1,13 +1,13 @@
set -o verbose
rm -rf \
/data/db/* \
mongo-diskstats* \
mongo-*.tgz \
~/.aws \
~/.boto \
venv \
/data/install \
/data/multiversion
/data/db/* \
mongo-diskstats* \
mongo-*.tgz \
~/.aws \
~/.boto \
venv \
/data/install \
/data/multiversion
exit 0

View File

@ -1,12 +1,12 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
activate_venv
if [ "Windows_NT" = "$OS" ]; then
vcvars="$(vswhere -latest -property installationPath | tr '\\' '/' | dos2unix.exe)/VC/Auxiliary/Build/"
cd "$vcvars" && cmd /K "vcvarsall.bat amd64 && cd ${workdir}\src"
vcvars="$(vswhere -latest -property installationPath | tr '\\' '/' | dos2unix.exe)/VC/Auxiliary/Build/"
cd "$vcvars" && cmd /K "vcvarsall.bat amd64 && cd ${workdir}\src"
fi
python -m pip install ninja
ninja install-core

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -20,19 +20,19 @@ version=${project#mongodb-mongo-}
version=${version#v}
if [ ! -z "${multiversion_platform_50_or_later}" ]; then
platform="${multiversion_platform_50_or_later}"
platform="${multiversion_platform_50_or_later}"
fi
# This is primarily for tests for infrastructure which don't always need the latest
# binaries.
db-contrib-tool setup-repro-env \
--installDir /data/install \
--linkDir /data/multiversion \
--edition $edition \
--platform $platform \
--architecture $architecture \
--evgVersionsFile multiversion-downloads.json \
$version
--installDir /data/install \
--linkDir /data/multiversion \
--edition $edition \
--platform $platform \
--architecture $architecture \
--evgVersionsFile multiversion-downloads.json \
$version
dist_test_dir=$(find /data/install -type d -iname "dist-test")
mv "$dist_test_dir" "$(pwd)"

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -10,5 +10,5 @@ OUTPUT_FILE="build/benchmarks.txt"
# Concatenate all text files in the directory into the output file
for file in build/*_bm.txt; do
cat "$file" >> "$OUTPUT_FILE"
cat "$file" >>"$OUTPUT_FILE"
done

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -8,7 +8,7 @@ attempts=0
max_attempts=4
while ! aws ecr get-login-password --region us-east-1 | podman login --password-stdin --username ${release_tools_container_registry_username_ecr} ${release_tools_container_registry_ecr}; do
[ "$attempts" -ge "$max_attempts" ] && exit 1
((attempts++))
sleep 10
[ "$attempts" -ge "$max_attempts" ] && exit 1
((attempts++))
sleep 10
done

View File

@ -10,7 +10,7 @@ python buildscripts/install_bazel.py
bazel_bin="$HOME/.local/bin/bazelisk"
# number of parallel jobs to use for build.
# Even with scale=0 (the default), bc command adds decimal digits in case of multiplication. Division by 1 gives us a whole number with scale=0
bazel_jobs=$(bc <<< "$(grep -c '^processor' /proc/cpuinfo) * .85 / 1")
bazel_jobs=$(bc <<<"$(grep -c '^processor' /proc/cpuinfo) * .85 / 1")
build_config="--config=local --jobs=$bazel_jobs --compiler_type=gcc --opt=off --dbg=False --allocator=system"
bazel_query='mnemonic("CppCompile|LinkCompile", filter(//src/mongo, deps(//:install-core)) except //src/mongo/db/modules/enterprise/src/streams/third_party/...)'
bazel_cache="--output_user_root=$workdir/bazel_cache"
@ -21,14 +21,14 @@ bazelBuildCommand="$bazel_bin $bazel_cache build $build_config //src/mongo/db/mo
echo "Bazel Build Command: $bazelBuildCommand"
covIdir="$workdir/covIdir"
if [ -d "$covIdir" ]; then
echo "covIdir already exists, meaning idir extracted after download from S3"
echo "covIdir already exists, meaning idir extracted after download from S3"
else
mkdir $workdir/covIdir
mkdir $workdir/covIdir
fi
$workdir/coverity/bin/cov-build --dir "$covIdir" --verbose 0 -j $bazel_jobs --return-emit-failures --parse-error-threshold=99 --bazel $bazelBuildCommand
ret=$?
if [ $ret -ne 0 ]; then
echo "cov-build faild with exit code $ret"
echo "cov-build faild with exit code $ret"
else
echo "cov-build was successful"
echo "cov-build was successful"
fi

View File

@ -3,7 +3,7 @@
# This script verifies that specific symbols, and specific symbols only are
# exported in mongo_crypt_v1.so
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -12,8 +12,8 @@ set -o errexit
set -o verbose
if [ "$(uname)" != "Linux" ]; then
echo "Skipping test, this is for linux only"
exit 0
echo "Skipping test, this is for linux only"
exit 0
fi
EXTRACT_DIR="bazel-bin/install"
@ -25,8 +25,8 @@ GDB_PATH="/opt/mongodbtoolchain/v5/bin/gdb"
find $EXTRACT_DIR
if [ ! -f "$SOPATH" ]; then
echo "Error: can not find library at: $SOPATH"
exit 1
echo "Error: can not find library at: $SOPATH"
exit 1
fi
#
@ -51,10 +51,10 @@ mongo_crypt_v1_status_get_explanation@@MONGO_CRYPT_1.0'
actual="$(readelf -W --dyn-syms "$SOPATH" | awk '$5 == "GLOBAL" && $7 != "UND" && $7 != "ABS" {print $8}' | sort)"
if [ "$actual" != "$expect" ]; then
echo "Error: symbols are not as expected in: $SOPATH"
echo "Diff:"
diff <(echo "$actual") <(echo "$expect")
exit 1
echo "Error: symbols are not as expected in: $SOPATH"
echo "Diff:"
diff <(echo "$actual") <(echo "$expect")
exit 1
fi
echo "Mongo Crypt Shared Library exported symbols test succeeded!"
@ -64,8 +64,8 @@ echo "Mongo Crypt Shared Library exported symbols test succeeded!"
# and the verify it can be debugged with gdb
#
if [ ! -f "$UNITTEST_PATH" ]; then
echo "Skipping Mongo Crypt Shared Library unit test. Test not found at $UNITTEST_PATH"
exit 0
echo "Skipping Mongo Crypt Shared Library unit test. Test not found at $UNITTEST_PATH"
exit 0
fi
echo "Running Mongo Crypt Shared Library unit test"
@ -73,8 +73,8 @@ $UNITTEST_PATH
echo "Mongo Crypt Shared Library unit test succeeded!"
if [ ! -f "$GDB_PATH" ]; then
echo "Skipping Mongo Crypt Shared Library debuggability test. No gdb found at $GDB_PATH"
exit 0
echo "Skipping Mongo Crypt Shared Library debuggability test. No gdb found at $GDB_PATH"
exit 0
fi
echo "Running Mongo Crypt Shared Library debuggability test"

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
cd src

View File

@ -1,8 +1,8 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
cd src
set -o errexit
activate_venv
$python -c 'import socket; num_nodes = 5; print("\n".join(["%s:%d" % (socket.gethostname(), port) for port in range(20000, 20000 + num_nodes)]))' > nodes.txt
$python -c 'import socket; num_nodes = 5; print("\n".join(["%s:%d" % (socket.gethostname(), port) for port in range(20000, 20000 + num_nodes)]))' >nodes.txt

View File

@ -6,9 +6,9 @@ echo BUILD_SCM_REVISION $(git rev-parse --verify HEAD)
git diff-index --quiet HEAD --
if [[ $? == 0 ]]; then
status="clean"
status="clean"
else
status="modified"
status="modified"
fi
echo BUILD_SCM_STATUS $status
echo BUILD_SCM_REMOTE git@github.com:10gen/mongo.git

View File

@ -1,10 +1,10 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
set -o errexit
cat << EOF > aws_e2e_setup.json
cat <<EOF >aws_e2e_setup.json
{
"iam_auth_ecs_account" : "${iam_auth_ecs_account}",
"iam_auth_ecs_secret_access_key" : "${iam_auth_ecs_secret_access_key}",

View File

@ -1,10 +1,10 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
set -o errexit
cat << EOF > $HOME/azure_e2e_config.json
cat <<EOF >$HOME/azure_e2e_config.json
{
"tD548GwE1@outlook.com" : "${oidc_azure_test_user_account_one_secret}",
"tD548GwE2@outlook.com" : "${oidc_azure_test_user_account_two_secret}",
@ -24,12 +24,12 @@ cat << EOF > $HOME/azure_e2e_config.json
"oidc_azure_managed_identity_api_version": "${oidc_azure_managed_identity_api_version}"
}
EOF
cat << EOF > $HOME/oidc_azure_container_key
cat <<EOF >$HOME/oidc_azure_container_key
${oidc_azure_container_key}
EOF
# EVG project variables do not preserve line breaks so we store them as base64 and decode here
sed s/[[:space:]]//g $HOME/oidc_azure_container_key | base64 --decode > $HOME/azure_remote_key
sed s/[[:space:]]//g $HOME/oidc_azure_container_key | base64 --decode >$HOME/azure_remote_key
# Clean up temp file
rm -f $HOME/oidc_azure_container_key

View File

@ -1,11 +1,11 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
set -o errexit
# Only run this script for the external_auth_oidc_azure task.
if [ "${task_name}" != "external_auth_oidc_azure" ]; then
exit 0
exit 0
fi
echo "Cleaning up Azure OIDC test artifacts"
@ -14,20 +14,20 @@ cd src
# Clean up the SSH keyfile, if it exists
if [ -f "${HOME}/oidc_azure_container_key" ]; then
rm -f $HOME/oidc_azure_container_key
echo "Cleaned up container key"
rm -f $HOME/oidc_azure_container_key
echo "Cleaned up container key"
fi
python src/mongo/db/modules/enterprise/jstests/external_auth_oidc_azure/lib/toggle_ingress.py disable --config_file=$HOME/azure_e2e_config.json --lock_file=/tmp/azure_oidc.lock
# Clean up the config file, if it exists
if [ -f "${HOME}/azure_e2e_config.json" ]; then
rm -f $HOME/azure_e2e_config.json
echo "Cleaned up azure_e2e_config.json"
rm -f $HOME/azure_e2e_config.json
echo "Cleaned up azure_e2e_config.json"
fi
# Clean up the lock file, if it exists
if [ -f "/tmp/azure_oidc.lock" ]; then
rm -f /tmp/azure_oidc.lock
echo "Cleaned up /tmp/azure_oidc.lock"
rm -f /tmp/azure_oidc.lock
echo "Cleaned up /tmp/azure_oidc.lock"
fi

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -7,7 +7,7 @@ set -o errexit
# Create the config file, which will contain the GCE project/zone information along with
# the expected audience that will appear on the VM's ID token.
cat << EOF > $HOME/gce_vm_config.json
cat <<EOF >$HOME/gce_vm_config.json
{
"audience" : "${oidc_gcp_vm_id_token_audience}",
"projectID" : "${oidc_gcp_project_id}",
@ -19,7 +19,7 @@ EOF
# Create the SSH key file. Note that the SSH key has been base64 encoded and stored into an EVG
# environment variable, so it is first trimmed of any whitespace via sed and base64 decoded before
# being output to the file.
echo ${oidc_gcp_ssh_key} | sed "s/[[:space:]]//g" | base64 --decode > $HOME/gcp_ssh_key
echo ${oidc_gcp_ssh_key} | sed "s/[[:space:]]//g" | base64 --decode >$HOME/gcp_ssh_key
# Reduce SSH keyfile privileges so that it is secure enough for OpenSSH.
chmod 600 $HOME/gcp_ssh_key
@ -34,7 +34,7 @@ ls -al $HOME/gcp_ssh_key
# The contents of this file are expected to exist in base64 encoded format in
# $oidc_gcp_service_account_key, so the same steps are taken as above before dumping it into a
# newly-created JSON file.
echo ${oidc_gcp_service_account_key} | sed "s/[[:space:]]//g" | base64 --decode > ${GOOGLE_APPLICATION_CREDENTIALS}
echo ${oidc_gcp_service_account_key} | sed "s/[[:space:]]//g" | base64 --decode >${GOOGLE_APPLICATION_CREDENTIALS}
chmod 600 ${GOOGLE_APPLICATION_CREDENTIALS}
ls -al ${GOOGLE_APPLICATION_CREDENTIALS}

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -7,27 +7,27 @@ set -o errexit
# Only run this script for the external_auth_oidc_gcp task.
if [ "${task_name}" != "external_auth_oidc_gcp" ]; then
exit 0
exit 0
fi
echo "Cleaning up OIDC GCP test artifacts"
# Delete the GCP VM specified in gce_vm_info.json if GOOGLE_APPLICATION_CREDENTIALS is set, points
# to a file, and the GCE config and VM info files exist.
if [ ! -z "${GOOGLE_APPLICATION_CREDENTIALS}" ] \
&& [ -f "${GOOGLE_APPLICATION_CREDENTIALS}" ] \
&& [ -f "${HOME}/gce_vm_config.json" ] \
&& [ -f "${HOME}/gce_vm_info.json" ]; then
# Install google-cloud-compute so that the script can run.
$python -m pip install google-cloud-compute
$python src/mongo/db/modules/enterprise/jstests/external_auth_oidc_gcp/lib/gce_vm_manager.py delete --config_file $HOME/gce_vm_config.json --service_account_key_file ${GOOGLE_APPLICATION_CREDENTIALS} --output_file $HOME/gce_vm_info.json
if [ ! -z "${GOOGLE_APPLICATION_CREDENTIALS}" ] &&
[ -f "${GOOGLE_APPLICATION_CREDENTIALS}" ] &&
[ -f "${HOME}/gce_vm_config.json" ] &&
[ -f "${HOME}/gce_vm_info.json" ]; then
# Install google-cloud-compute so that the script can run.
$python -m pip install google-cloud-compute
$python src/mongo/db/modules/enterprise/jstests/external_auth_oidc_gcp/lib/gce_vm_manager.py delete --config_file $HOME/gce_vm_config.json --service_account_key_file ${GOOGLE_APPLICATION_CREDENTIALS} --output_file $HOME/gce_vm_info.json
fi
# Clean up the SSH and service account keys if they exist.
if [ -f "${HOME}/gcp_ssh_key" ]; then
rm -f $HOME/gcp_ssh_key
rm -f $HOME/gcp_ssh_key
fi
if [ ! -z "${GOOGLE_APPLICATION_CREDENTIALS}" ] && [ -f "${GOOGLE_APPLICATION_CREDENTIALS}" ]; then
rm -f ${GOOGLE_APPLICATION_CREDENTIALS}
rm -f ${GOOGLE_APPLICATION_CREDENTIALS}
fi

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -6,7 +6,7 @@ cd src
set -o errexit
# Should output contents to new file in home directory.
cat << EOF > $HOME/oidc_e2e_setup.json
cat <<EOF >$HOME/oidc_e2e_setup.json
{
"testserversecurityone@ping-test.com" : "${oidc_ping_test_user_account_one_secret}",
"testserversecuritytwo@ping-test.com" : "${oidc_ping_test_user_account_two_secret}",

View File

@ -1,15 +1,15 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
# Only run this script for the external_auth_oidc_azure task.
if [ "${task_name}" != "external_auth_oidc" ]; then
exit 0
exit 0
fi
echo "Cleaning up OIDC Okta test artifacts"
#Clean up the config file, if it exists
if [ -f "${HOME}/oidc_e2e_setup.json" ]; then
rm -f $HOME/oidc_e2e_setup.json
echo "Cleaned up oidc_e2e_setup.json"
rm -f $HOME/oidc_e2e_setup.json
echo "Cleaned up oidc_e2e_setup.json"
fi

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -6,88 +6,88 @@ cd src
set -eou pipefail
# Only run on unit test tasks so we don't target mongod binaries from cores.
if [ "${task_name}" != "run_dbtest" ] \
&& [[ ${task_name} != integration_tests* ]] \
&& [[ "${task_name}" != unit_test_group*_no_sandbox ]]; then
echo "Not gathering failed unittests binaries as this is not a unittest task: ${task_name}"
exit 0
if [ "${task_name}" != "run_dbtest" ] &&
[[ ${task_name} != integration_tests* ]] &&
[[ "${task_name}" != unit_test_group*_no_sandbox ]]; then
echo "Not gathering failed unittests binaries as this is not a unittest task: ${task_name}"
exit 0
fi
unittest_bin_dir=dist-unittests/bin
mkdir -p $unittest_bin_dir || true
# Find all core files
core_files=$(/usr/bin/find -H . \( -name "dump_*.core" -o -name "*.mdmp" \) 2> /dev/null)
core_files=$(/usr/bin/find -H . \( -name "dump_*.core" -o -name "*.mdmp" \) 2>/dev/null)
while read -r core_file; do
# A core file name does not always have the executable name that generated it.
# See http://stackoverflow.com/questions/34801353/core-dump-filename-gets-thread-name-instead-of-executable-name-with-core-pattern
# On platforms with GDB, we get the binary name from core file
gdb=/opt/mongodbtoolchain/v5/bin/gdb
if [ -f $gdb ]; then
binary_file=$($gdb -batch --quiet -ex "core $core_file" 2> /dev/null | grep "Core was generated" | cut -f2 -d "\`" | cut -f1 -d "'" | cut -f1 -d " ")
binary_file_locations=$binary_file
else
echo "Checking core file '$core_file'"
# Find the base file name from the core file name, note it may be truncated.
# Remove leading 'dump_' and trailing '.<pid>.core' or '.<pid or time>.mdmp'
binary_file=$(echo "$core_file" | sed "s/.*\///;s/dump_//;s/\..*\.core//;s/\..*\.mdmp//")
# Locate the binary file. Since the base file name might be truncated, the find
# may return more than 1 file.
if [ "$binary_file" != "" ]; then
binary_file_locations=$(/usr/bin/find -H . -executable -name "$binary_file*${exe}" 2> /dev/null)
fi
fi
if [ -z "$binary_file_locations" ]; then
echo "Cannot locate the unittest binary file ($binary_file) that generated the core file $core_file"
else
echo "Files to save: $binary_file_locations"
fi
for binary_file_location in $binary_file_locations; do
new_binary_file=$unittest_bin_dir/$(echo "$binary_file_location" | sed "s/.*\///")
if [ -f "$binary_file_location" ] && [ ! -f "$new_binary_file" ]; then
echo "Direct Copy $binary_file_location to $new_binary_file"
cp "$binary_file_location" "$new_binary_file"
# A core file name does not always have the executable name that generated it.
# See http://stackoverflow.com/questions/34801353/core-dump-filename-gets-thread-name-instead-of-executable-name-with-core-pattern
# On platforms with GDB, we get the binary name from core file
gdb=/opt/mongodbtoolchain/v5/bin/gdb
if [ -f $gdb ]; then
binary_file=$($gdb -batch --quiet -ex "core $core_file" 2>/dev/null | grep "Core was generated" | cut -f2 -d "\`" | cut -f1 -d "'" | cut -f1 -d " ")
binary_file_locations=$binary_file
else
echo "Checking core file '$core_file'"
# Find the base file name from the core file name, note it may be truncated.
# Remove leading 'dump_' and trailing '.<pid>.core' or '.<pid or time>.mdmp'
binary_file=$(echo "$core_file" | sed "s/.*\///;s/dump_//;s/\..*\.core//;s/\..*\.mdmp//")
# Locate the binary file. Since the base file name might be truncated, the find
# may return more than 1 file.
if [ "$binary_file" != "" ]; then
binary_file_locations=$(/usr/bin/find -H . -executable -name "$binary_file*${exe}" 2>/dev/null)
fi
fi
# On Windows if a .pdb symbol file exists, include it in the archive.
if [[ "$binary_file_location" == *".exe" ]]; then
pdb_file=$(echo "$binary_file_location" | sed "s/\.exe/.pdb/")
if [ -f "$pdb_file" ]; then
new_pdb_file=$unittest_bin_dir/$(echo "$pdb_file" | sed "s/.*\///")
echo "PDB Copy $pdb_file to $new_pdb_file"
cp "$pdb_file" "$new_pdb_file"
fi
if [ -z "$binary_file_locations" ]; then
echo "Cannot locate the unittest binary file ($binary_file) that generated the core file $core_file"
else
echo "Files to save: $binary_file_locations"
fi
# On binutils platforms, if a .debug symbol file exists, include it
# in the archive
debug_file=$binary_file_location.debug
if [ -f "$debug_file" ]; then
echo "debug Copy $debug_file to $unittest_bin_dir"
cp "$debug_file" "$unittest_bin_dir"
fi
for binary_file_location in $binary_file_locations; do
new_binary_file=$unittest_bin_dir/$(echo "$binary_file_location" | sed "s/.*\///")
if [ -f "$binary_file_location" ] && [ ! -f "$new_binary_file" ]; then
echo "Direct Copy $binary_file_location to $new_binary_file"
cp "$binary_file_location" "$new_binary_file"
fi
# Include any dwp symbol files to go with the .debug files
dwp_file=$binary_file_location.dwp
if [ -f "$dwp_file" ]; then
echo "dwp Copy $dwp_file to $unittest_bin_dir"
cp "$dwp_file" "$unittest_bin_dir"
fi
# On Windows if a .pdb symbol file exists, include it in the archive.
if [[ "$binary_file_location" == *".exe" ]]; then
pdb_file=$(echo "$binary_file_location" | sed "s/\.exe/.pdb/")
if [ -f "$pdb_file" ]; then
new_pdb_file=$unittest_bin_dir/$(echo "$pdb_file" | sed "s/.*\///")
echo "PDB Copy $pdb_file to $new_pdb_file"
cp "$pdb_file" "$new_pdb_file"
fi
fi
# On macOS, these are called .dSYM and they are directories
dsym_dir=$binary_file_location.dSYM
if [ -d "$dsym_dir" ]; then
echo "dsym Copy $dsym_dir to $unittest_bin_dir"
cp -r "$dsym_dir" "$unittest_bin_dir"
fi
# On binutils platforms, if a .debug symbol file exists, include it
# in the archive
debug_file=$binary_file_location.debug
if [ -f "$debug_file" ]; then
echo "debug Copy $debug_file to $unittest_bin_dir"
cp "$debug_file" "$unittest_bin_dir"
fi
done
done <<< "${core_files}"
# Include any dwp symbol files to go with the .debug files
dwp_file=$binary_file_location.dwp
if [ -f "$dwp_file" ]; then
echo "dwp Copy $dwp_file to $unittest_bin_dir"
cp "$dwp_file" "$unittest_bin_dir"
fi
# On macOS, these are called .dSYM and they are directories
dsym_dir=$binary_file_location.dSYM
if [ -d "$dsym_dir" ]; then
echo "dsym Copy $dsym_dir to $unittest_bin_dir"
cp -r "$dsym_dir" "$unittest_bin_dir"
fi
done
done <<<"${core_files}"
# Copy debug symbols for dynamic builds
lib_dir=bazel-bin/install/lib
if [ -d "$lib_dir" ] && [ -n "$core_files" ]; then
cp -r "$lib_dir" dist-unittests
cp -r "$lib_dir" dist-unittests
fi

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -18,7 +18,7 @@ mv all_feature_flags.txt patch_all_feature_flags.txt
# get the list of feature flags from the base commit
git --no-pager diff "$(git merge-base origin/${branch_name} HEAD)" --output="$diff_file_name" --binary
if [ -s "$diff_file_name" ]; then
git apply -R "$diff_file_name"
git apply -R "$diff_file_name"
fi
$python buildscripts/idl/gen_all_feature_flag_list.py turned-on-by-default

View File

@ -8,17 +8,17 @@ HAS_FULL_DISK=false
# 34% /dev/nvme1n1
FILESYSTEMS=$(df -H | grep -vE '^Filesystem|tmpfs|cdrom' | awk '{ print $5 " " $1 }')
while read -r output; do
usep=$(echo "$output" | awk '{ print $1}' | cut -d'%' -f1)
partition=$(echo "$output" | awk '{ print $2 }')
if [ $usep -ge $FULL_DISK_THRESHOLD ]; then
echo "Running out of space \"$partition ($usep%)\" on $(hostname) as on $(date)"
HAS_FULL_DISK=true
fi
done <<< "$FILESYSTEMS"
usep=$(echo "$output" | awk '{ print $1}' | cut -d'%' -f1)
partition=$(echo "$output" | awk '{ print $2 }')
if [ $usep -ge $FULL_DISK_THRESHOLD ]; then
echo "Running out of space \"$partition ($usep%)\" on $(hostname) as on $(date)"
HAS_FULL_DISK=true
fi
done <<<"$FILESYSTEMS"
if $HAS_FULL_DISK; then
# print all files that are above one megabyte sorted
du -cha / 2> /dev/null | grep -E "^[0-9]+(\.[0-9]+)?[G|M|T]" | sort -h
# print all files that are above one megabyte sorted
du -cha / 2>/dev/null | grep -E "^[0-9]+(\.[0-9]+)?[G|M|T]" | sort -h
else
echo "No full partitions found, skipping"
echo "No full partitions found, skipping"
fi

View File

@ -5,21 +5,21 @@ set -o verbose
tag=""
if [ -n "$bv_future_git_tag" ]; then
tag="$bv_future_git_tag"
tag="$bv_future_git_tag"
fi
if [ -n "$future_git_tag" ]; then
tag="$future_git_tag"
tag="$future_git_tag"
fi
echo "TAG: $tag"
if [ -n "$tag" ]; then
if [ "Windows_NT" = "$OS" ]; then
# On Windows, we don't seem to have a local git identity, so we populate the config with this
# dummy email and name. Without a configured email/name, the 'git tag' command will fail.
git config user.email "no-reply@evergreen.@mongodb.com"
git config user.name "Evergreen Agent"
fi
if [ "Windows_NT" = "$OS" ]; then
# On Windows, we don't seem to have a local git identity, so we populate the config with this
# dummy email and name. Without a configured email/name, the 'git tag' command will fail.
git config user.email "no-reply@evergreen.@mongodb.com"
git config user.name "Evergreen Agent"
fi
git tag -a "$tag" -m "$tag"
git tag -a "$tag" -m "$tag"
fi

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
set +o errexit
@ -6,8 +6,8 @@ set +o errexit
cd src
if [ -z "${BOLT:-}" ]; then
echo "Not applying BOLT" >&2
exit 0
echo "Not applying BOLT" >&2
exit 0
fi
tar -xvf bazel-bin/dist-test-stripped.tgz

View File

@ -1,9 +1,9 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
cd src
cat > mci.buildlogger << END_OF_CREDS
cat >mci.buildlogger <<END_OF_CREDS
slavename='${slave}'
passwd='${passwd}'
builder='${build_variant}_${project}'

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
set +o errexit
@ -6,8 +6,8 @@ set +o errexit
cd src
if [ -z "${PGO_PROFILE_URL:-}" ]; then
echo "No pgo profile url specified" >&2
exit 0
echo "No pgo profile url specified" >&2
exit 0
fi
wget $PGO_PROFILE_URL

View File

@ -1,10 +1,10 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
cd src
# Create the Evergreen API credentials
cat > .evergreen.yml << END_OF_CREDS
cat >.evergreen.yml <<END_OF_CREDS
api_server_host: https://evergreen.mongodb.com/api
api_key: "${evergreen_api_key}"
user: "${evergreen_api_user}"

View File

@ -1,12 +1,12 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
if [ -z "${files}" ]; then
exit 0
exit 0
fi
for file in ${files}; do
if [ -f "$file" ]; then
echo "Removing file $file"
rm -f $file
fi
if [ -f "$file" ]; then
echo "Removing file $file"
rm -f $file
fi
done

View File

@ -1,9 +1,9 @@
cd src
if [ -d /data/thrift ]; then
rm -rf /data/thrift
rm -rf /data/thrift
fi
if [ -d /data/charybdefs ]; then
rm -rf /data/charybdefs
rm -rf /data/charybdefs
fi

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
cd src

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
cd src
@ -8,12 +8,12 @@ set -o errexit
# For patch builds gather the modified patch files.
if [ "${is_patch}" = "true" ]; then
# Get list of patched files
git diff HEAD --name-only >> patch_files.txt
if [ -d src/mongo/db/modules/enterprise ]; then
pushd src/mongo/db/modules/enterprise
# Update the patch_files.txt in the mongo repo.
git diff HEAD --name-only >> ~1/patch_files.txt
popd
fi
# Get list of patched files
git diff HEAD --name-only >>patch_files.txt
if [ -d src/mongo/db/modules/enterprise ]; then
pushd src/mongo/db/modules/enterprise
# Update the patch_files.txt in the mongo repo.
git diff HEAD --name-only >>~1/patch_files.txt
popd
fi
fi

View File

@ -1,14 +1,14 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
cd src
set -o errexit
cat << EOF > notary_env.sh
cat <<EOF >notary_env.sh
export NOTARY_TOKEN=${signing_auth_token_70}
export BARQUE_USERNAME=${barque_user}
export BARQUE_API_KEY=${barque_api_key}
EOF
echo "${signing_auth_token_70}" > signing_auth_token
echo "${signing_auth_token_70}" >signing_auth_token

View File

@ -1,46 +1,46 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
proc_list="(java|lein|mongo|python|_test$|_test\.exe$)"
if [ "Windows_NT" = "$OS" ]; then
get_pids() {
proc_pids=$(tasklist /fo:csv \
| awk -F'","' '{x=$1; gsub("\"","",x); print $2, x}' \
| grep -iE $1 \
| cut -f1 -d ' ')
}
get_process_info() {
proc_name=""
proc_info=$(wmic process where "ProcessId=\"$1\"" get "Name,ProcessId,ThreadCount" /format:csv 2> /dev/null | grep $1)
if [ ! -z $proc_info ]; then
proc_name=$(echo $proc_info | cut -f2 -d ',')
proc_threads=$(echo $proc_info | cut -f4 -d ',')
fi
}
get_pids() {
proc_pids=$(tasklist /fo:csv |
awk -F'","' '{x=$1; gsub("\"","",x); print $2, x}' |
grep -iE $1 |
cut -f1 -d ' ')
}
get_process_info() {
proc_name=""
proc_info=$(wmic process where "ProcessId=\"$1\"" get "Name,ProcessId,ThreadCount" /format:csv 2>/dev/null | grep $1)
if [ ! -z $proc_info ]; then
proc_name=$(echo $proc_info | cut -f2 -d ',')
proc_threads=$(echo $proc_info | cut -f4 -d ',')
fi
}
else
get_pids() { proc_pids=$(pgrep $1); }
get_process_info() {
proc_name=$(ps -p $1 -o comm=)
# /proc is available on Linux platforms
if [ -f /proc/$1/status ]; then
set_sudo
proc_threads=$($sudo grep Threads /proc/$1/status | sed "s/\s//g" | cut -f2 -d ":")
else
proc_threads=$(ps -AM $1 | grep -vc PID)
fi
}
get_pids() { proc_pids=$(pgrep $1); }
get_process_info() {
proc_name=$(ps -p $1 -o comm=)
# /proc is available on Linux platforms
if [ -f /proc/$1/status ]; then
set_sudo
proc_threads=$($sudo grep Threads /proc/$1/status | sed "s/\s//g" | cut -f2 -d ":")
else
proc_threads=$(ps -AM $1 | grep -vc PID)
fi
}
fi
while [ 1 ]; do
get_pids $proc_list
if [ ! -z "$proc_pids" ]; then
printf "Running process/thread counter\n"
printf "PROCESS\tPID\tTHREADS\n"
fi
for pid in $proc_pids; do
get_process_info $pid
if [ ! -z "$proc_name" ]; then
printf "$proc_name\t$pid\t$proc_threads\n"
get_pids $proc_list
if [ ! -z "$proc_pids" ]; then
printf "Running process/thread counter\n"
printf "PROCESS\tPID\tTHREADS\n"
fi
done
sleep 60
for pid in $proc_pids; do
get_process_info $pid
if [ ! -z "$proc_name" ]; then
printf "$proc_name\t$pid\t$proc_threads\n"
fi
done
sleep 60
done

View File

@ -1,16 +1,16 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
# Since the macros 'private_key_remote' and 'private_key_file' are not always defined
# we default to /dev/null to avoid syntax errors of an empty expansion.
if [ -n "$private_key_remote_bash_var" ]; then
private_key_remote="$private_key_remote_bash_var"
private_key_remote="$private_key_remote_bash_var"
fi
if [ ! -z "${private_key_remote}" ] && [ ! -z "${private_key_file}" ]; then
mkdir -p ~/.ssh
private_key_file=$(eval echo "$private_key_file")
echo -n "${private_key_remote}" > ${private_key_file}
chmod 0600 ${private_key_file}
mkdir -p ~/.ssh
private_key_file=$(eval echo "$private_key_file")
echo -n "${private_key_remote}" >${private_key_file}
chmod 0600 ${private_key_file}
fi
# Ensure a clean aws configuration state
@ -24,23 +24,23 @@ aws_profile="${aws_profile_remote}"
# The profile in the config file is specified as [profile <profile>], except
# for [default], see http://boto3.readthedocs.io/en/latest/guide/configuration.html
if [ $aws_profile = "default" ]; then
aws_profile_config="[default]"
aws_profile_config="[default]"
else
aws_profile_config="[profile $aws_profile]"
aws_profile_config="[profile $aws_profile]"
fi
cat << EOF >> ~/.aws/config
cat <<EOF >>~/.aws/config
$aws_profile_config
region = us-east-1
EOF
# The profile in the credentials file is specified as [<profile>].
cat << EOF >> ~/.aws/credentials
cat <<EOF >>~/.aws/credentials
[$aws_profile]
aws_access_key_id = ${aws_key_remote}
aws_secret_access_key = ${aws_secret_remote}
EOF
cat << EOF > ~/.boto
cat <<EOF >~/.boto
[Boto]
https_validate_certificates = False
EOF

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
cd src
@ -8,9 +8,9 @@ set -o errexit
activate_venv
$python buildscripts/evergreen_resmoke_job_count.py \
--taskName ${task_name} \
--buildVariant ${build_variant} \
--distro ${distro_id} \
--jobFactor ${resmoke_jobs_factor} \
--jobsMax ${resmoke_jobs_max} \
--outFile resmoke_jobs_expansion.yml
--taskName ${task_name} \
--buildVariant ${build_variant} \
--distro ${distro_id} \
--jobFactor ${resmoke_jobs_factor} \
--jobsMax ${resmoke_jobs_max} \
--outFile resmoke_jobs_expansion.yml

View File

@ -3,20 +3,20 @@ set -o verbose
# On Windows we can use typeperf.exe to dump performance counters.
if [ "Windows_NT" = "$OS" ]; then
typeperf -qx PhysicalDisk | grep Disk | grep -v _Total > disk_counters.txt
typeperf -cf disk_counters.txt -si 5 -o mongo-diskstats
typeperf -qx PhysicalDisk | grep Disk | grep -v _Total >disk_counters.txt
typeperf -cf disk_counters.txt -si 5 -o mongo-diskstats
# Linux: iostat -t option for timestamp.
elif iostat -tdmx > /dev/null 2>&1; then
iostat -tdmx 5 > mongo-diskstats
elif iostat -tdmx >/dev/null 2>&1; then
iostat -tdmx 5 >mongo-diskstats
# OSX: Simulate the iostat timestamp.
elif iostat -d > /dev/null 2>&1; then
iostat -d -w 5 | while IFS= read -r line; do printf '%s %s\n' "$(date +'%m/%d/%Y %H:%M:%S')" "$line" >> mongo-diskstats; done
elif iostat -d >/dev/null 2>&1; then
iostat -d -w 5 | while IFS= read -r line; do printf '%s %s\n' "$(date +'%m/%d/%Y %H:%M:%S')" "$line" >>mongo-diskstats; done
# Check if vmstat -t is available.
elif vmstat -td > /dev/null 2>&1; then
vmstat -td 5 > mongo-diskstats
elif vmstat -td >/dev/null 2>&1; then
vmstat -td 5 >mongo-diskstats
# Check if vmstat -T d is available.
elif vmstat -T d > /dev/null 2>&1; then
vmstat -T d 5 > mongo-diskstats
elif vmstat -T d >/dev/null 2>&1; then
vmstat -T d 5 >mongo-diskstats
else
printf "Cannot collect mongo-diskstats on this platform\n"
printf "Cannot collect mongo-diskstats on this platform\n"
fi

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
cd src

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
cd src
@ -9,46 +9,46 @@ set -o errexit
# Set the suite name to be the task name by default; unless overridden with the `suite`/`suite_config` expansion.
suite_name=${task_name}
if [[ -n ${suite_config} ]]; then
suite_name=${suite_config}
suite_name=${suite_config}
elif [[ -n ${suite} ]]; then
suite_name=${suite}
suite_name=${suite}
fi
timeout_factor=""
if [[ -n "${exec_timeout_factor}" ]]; then
timeout_factor="--exec-timeout-factor ${exec_timeout_factor}"
timeout_factor="--exec-timeout-factor ${exec_timeout_factor}"
fi
build_variant_for_timeout=${build_variant}
if [[ -n "${burn_in_bypass}" ]]; then
# burn_in_tags may generate new build variants, if we are running on one of those build variants
# we should use the build variant it is based on for determining the timeout. This is stored in
# the `burn_in_bypass` expansion.
build_variant_for_timeout=${burn_in_bypass}
# burn_in_tags may generate new build variants, if we are running on one of those build variants
# we should use the build variant it is based on for determining the timeout. This is stored in
# the `burn_in_bypass` expansion.
build_variant_for_timeout=${burn_in_bypass}
fi
if [[ -n "${alias}" ]]; then
evg_alias=${alias}
evg_alias=${alias}
else
evg_alias="evg-alias-absent"
evg_alias="evg-alias-absent"
fi
resmoke_test_flags=""
if [[ -n "${test_flags}" ]]; then
resmoke_test_flags="--test-flags='${test_flags}'"
resmoke_test_flags="--test-flags='${test_flags}'"
fi
activate_venv
PATH=$PATH:$HOME:/ eval $python buildscripts/evergreen_task_timeout.py \
$timeout_factor \
$resmoke_test_flags \
--install-dir "${install_dir}" \
--task-name ${task_name} \
--suite-name ${suite_name} \
--project ${project} \
--build-variant $build_variant_for_timeout \
--evg-alias $evg_alias \
--timeout ${timeout_secs} \
--exec-timeout ${exec_timeout_secs} \
--evg-project-config ${evergreen_config_file_path} \
--out-file task_timeout_expansions.yml
$timeout_factor \
$resmoke_test_flags \
--install-dir "${install_dir}" \
--task-name ${task_name} \
--suite-name ${suite_name} \
--project ${project} \
--build-variant $build_variant_for_timeout \
--evg-alias $evg_alias \
--timeout ${timeout_secs} \
--exec-timeout ${exec_timeout_secs} \
--evg-project-config ${evergreen_config_file_path} \
--out-file task_timeout_expansions.yml

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
set -o errexit

View File

@ -1,11 +1,11 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
if [ "$(uname)" != "Linux" ] && [ "$(uname)" != "Darwin" ]; then
echo "===== Skipping ulimit dump, OS is: $(uname)."
echo "===== Skipping ulimit dump, OS is: $(uname)."
else
echo "===== Collecting soft limits:"
ulimit -Sa
echo "===== Collecting hard limits:"
ulimit -Ha
echo "===== Collecting soft limits:"
ulimit -Sa
echo "===== Collecting hard limits:"
ulimit -Ha
fi

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude_python.sh"
set -o errexit
@ -11,36 +11,36 @@ popd
ARCH=$(uname -m)
if [[ "$ARCH" == "arm64" || "$ARCH" == "aarch64" ]]; then
ARCH="arm64"
ARCH="arm64"
elif [[ "$ARCH" == "ppc64le" || "$ARCH" == "ppc64" || "$ARCH" == "ppc" || "$ARCH" == "ppcle" ]]; then
ARCH="ppc64le"
ARCH="ppc64le"
elif [[ "$ARCH" == "s390x" || "$ARCH" == "s390" ]]; then
ARCH="s390x"
ARCH="s390x"
else
ARCH="x86_64"
ARCH="x86_64"
fi
# TODO SERVER-105520
# try using downloaded venv once more reliability has been built into venv upload/download
if [[ "$ARCH" == "ppc64le" ]]; then
rm -rf $venv_dir
source "$DIR/venv_setup.sh"
rm -rf $venv_dir
source "$DIR/venv_setup.sh"
else
# Update virtual env directory in activate script
if [ "Windows_NT" = "$OS" ]; then
sed -i -e "s:VIRTUAL_ENV=\".*\":VIRTUAL_ENV=\"$venv_dir\":" "$venv_dir/Scripts/activate"
else
sed -i -e "s:VIRTUAL_ENV=\".*\":VIRTUAL_ENV=\"$venv_dir\":" "$venv_dir/bin/activate"
fi
# Update virtual env directory in activate script
if [ "Windows_NT" = "$OS" ]; then
sed -i -e "s:VIRTUAL_ENV=\".*\":VIRTUAL_ENV=\"$venv_dir\":" "$venv_dir/Scripts/activate"
else
sed -i -e "s:VIRTUAL_ENV=\".*\":VIRTUAL_ENV=\"$venv_dir\":" "$venv_dir/bin/activate"
fi
# Add back python symlinks on linux platforms
if [ "Windows_NT" = "$OS" ]; then
exit 0
fi
# Add back python symlinks on linux platforms
if [ "Windows_NT" = "$OS" ]; then
exit 0
fi
cd "$venv_dir/bin"
cd "$venv_dir/bin"
rm python python3
ln -s "$python_loc" python3
ln -s python3 python
rm python python3
ln -s "$python_loc" python3
ln -s python3 python
fi

View File

@ -1,7 +1,7 @@
# exit immediately if virtualenv is not found
set -o errexit
evergreen_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)/.."
evergreen_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)/.."
. "$evergreen_dir/prelude_workdir.sh"
. "$evergreen_dir/prelude_python.sh"
@ -10,7 +10,7 @@ echo "python_loc set to $python_loc"
venv_dir="${workdir}/venv"
if [ -d "$venv_dir" ]; then
exit 0
exit 0
fi
# We create a venv for poetry
@ -19,9 +19,9 @@ fi
# See issue SERVER-80781
POETRY_VENV="${workdir}/poetry_venv"
if [ "Windows_NT" = "$OS" ]; then
POETRY_VENV_PYTHON="$POETRY_VENV/Scripts/python.exe"
POETRY_VENV_PYTHON="$POETRY_VENV/Scripts/python.exe"
else
POETRY_VENV_PYTHON="$POETRY_VENV/bin/python3"
POETRY_VENV_PYTHON="$POETRY_VENV/bin/python3"
fi
"$python_loc" -m venv "$POETRY_VENV"
@ -36,20 +36,20 @@ export POETRY_CACHE_DIR="$poetry_dir/cache"
export PIP_CACHE_DIR="$poetry_dir/pip_cache"
pushd src
for i in {1..5}; do
$POETRY_VENV_PYTHON -m pip install -r poetry_requirements.txt && RET=0 && break || RET=$? && sleep 1
echo "Python failed to install poetry, retrying..."
$POETRY_VENV_PYTHON -m pip install -r poetry_requirements.txt && RET=0 && break || RET=$? && sleep 1
echo "Python failed to install poetry, retrying..."
done
popd
if [ $RET -ne 0 ]; then
echo "Pip install error for poetry"
exit $RET
echo "Pip install error for poetry"
exit $RET
fi
"$python_loc" -m venv "$venv_dir"
# Adding README file for using this venv locally
cat << EOF >> venv_readme.txt
cat <<EOF >>venv_readme.txt
This is an archive of the Python venv generated by this Evergreen build.
You can use it locally to avoid needing to manually set up the Python environment.
@ -60,11 +60,11 @@ echo "Updating virtual env directory in activate script"
pushd venv; venv_dir=\$(pwd); popd
EOF
if [ "Windows_NT" = "$OS" ]; then
cat << EOF >> venv_readme.txt
cat <<EOF >>venv_readme.txt
sed -i -e "s:VIRTUAL_ENV=\".*\":VIRTUAL_ENV=\"\$venv_dir\":" "\$venv_dir/Scripts/activate"
EOF
else
cat << EOF >> venv_readme.txt
cat <<EOF >>venv_readme.txt
sed -i -e "s:VIRTUAL_ENV=\".*\":VIRTUAL_ENV=\"\$venv_dir\":" "\$venv_dir/bin/activate"
echo "Adding back python symlinks"
@ -85,7 +85,7 @@ fi # End of README file
# cygwin bash does not like. dos2unix it
# (See https://bugs.python.org/issue32451)
if [ "Windows_NT" = "$OS" ]; then
dos2unix "${workdir}/venv/Scripts/activate"
dos2unix "${workdir}/venv/Scripts/activate"
fi
export VIRTUAL_ENV_DISABLE_PROMPT=yes
@ -101,13 +101,13 @@ echo "Upgrading pip to 21.0.1"
# We have seen weird network errors that can sometimes mess up the pip install
# By retrying we would like to only see errors that happen consistently
for i in {1..5}; do
python -m pip --disable-pip-version-check install "pip==21.0.1" "wheel==0.37.0" && RET=0 && break || RET=$? && sleep 1
echo "Python failed to install pip and wheel, retrying..."
python -m pip --disable-pip-version-check install "pip==21.0.1" "wheel==0.37.0" && RET=0 && break || RET=$? && sleep 1
echo "Python failed to install pip and wheel, retrying..."
done
if [ $RET -ne 0 ]; then
echo "Pip install error for wheel and pip version"
exit $RET
echo "Pip install error for wheel and pip version"
exit $RET
fi
cd src
@ -117,20 +117,20 @@ cd src
# By retrying we would like to only see errors that happen consistently
count=0
for i in {1..5}; do
yes | $POETRY_VENV_PYTHON -m poetry cache clear . --all
rm -rf $poetry_dir/*
$POETRY_VENV_PYTHON -m poetry install --no-root --sync && RET=0 && break || RET=$? && sleep 1
yes | $POETRY_VENV_PYTHON -m poetry cache clear . --all
rm -rf $poetry_dir/*
$POETRY_VENV_PYTHON -m poetry install --no-root --sync && RET=0 && break || RET=$? && sleep 1
echo "Python failed install required deps with poetry, retrying..."
sleep $((count * count * 20))
count=$((count + 1))
echo "Python failed install required deps with poetry, retrying..."
sleep $((count * count * 20))
count=$((count + 1))
done
if [ $RET -ne 0 ]; then
echo "Poetry install error for full venv"
exit $RET
echo "Poetry install error for full venv"
exit $RET
fi
cd ..
python -m pip freeze > pip-requirements.txt
python -m pip freeze >pip-requirements.txt

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
cd src
@ -10,13 +10,13 @@ MONGO_VERSION=$(git describe --abbrev=7)
# If the project is sys-perf (or related), add the string -sys-perf to the version
if [[ "${project}" == sys-perf* ]]; then
MONGO_VERSION="$MONGO_VERSION-sys-perf"
MONGO_VERSION="$MONGO_VERSION-sys-perf"
fi
# If this is a patch build, we add the patch version id to the version string so we know
# this build was a patch, and which evergreen task it came from
if [ "${is_patch}" = "true" ]; then
MONGO_VERSION="$MONGO_VERSION-patch-${version_id}"
MONGO_VERSION="$MONGO_VERSION-patch-${version_id}"
fi
echo "MONGO_VERSION = ${MONGO_VERSION}"

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
cd src
@ -6,10 +6,10 @@ cd src
set -o errexit
set -o verbose
if [ "${use_wt_develop}" = "true" ]; then
echo "Using the wtdevelop module instead..."
cd src/third_party
for wtdir in dist examples ext lang src test tools; do
rm -rf wiredtiger/$wtdir
mv wtdevelop/$wtdir wiredtiger/
done
echo "Using the wtdevelop module instead..."
cd src/third_party
for wtdir in dist examples ext lang src test tools; do
rm -rf wiredtiger/$wtdir
mv wtdevelop/$wtdir wiredtiger/
done
fi

View File

@ -1,10 +1,10 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
echo "GRS_CONFIG_USER1_USERNAME=${garasign_gpg_username_80}" >> "signing-envfile"
echo "GRS_CONFIG_USER1_PASSWORD=${garasign_gpg_password_80}" >> "signing-envfile"
echo "GRS_CONFIG_USER1_USERNAME=${garasign_gpg_username_80}" >>"signing-envfile"
echo "GRS_CONFIG_USER1_PASSWORD=${garasign_gpg_password_80}" >>"signing-envfile"
set -o errexit
set -o verbose
@ -20,14 +20,14 @@ shasum -a 256 $crypt_file_name | tee $crypt_file_name.sha256
md5sum $crypt_file_name | tee $crypt_file_name.md5
# signing crypt linux artifact with gpg
cat << EOF >> gpg_signing_commands.sh
cat <<EOF >>gpg_signing_commands.sh
gpgloader # loading gpg keys.
gpg --yes -v --armor -o $crypt_file_name.sig --detach-sign $crypt_file_name
EOF
podman run \
--env-file=signing-envfile \
--rm \
-v $(pwd):$(pwd) -w $(pwd) \
${garasign_gpg_image_ecr} \
/bin/bash -c "$(cat ./gpg_signing_commands.sh)"
--env-file=signing-envfile \
--rm \
-v $(pwd):$(pwd) -w $(pwd) \
${garasign_gpg_image_ecr} \
/bin/bash -c "$(cat ./gpg_signing_commands.sh)"

View File

@ -1,17 +1,17 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
echo "GRS_CONFIG_USER1_USERNAME=${garasign_gpg_username_80}" >> "signing-envfile"
echo "GRS_CONFIG_USER1_PASSWORD=${garasign_gpg_password_80}" >> "signing-envfile"
echo "GRS_CONFIG_USER1_USERNAME=${garasign_gpg_username_80}" >>"signing-envfile"
echo "GRS_CONFIG_USER1_PASSWORD=${garasign_gpg_password_80}" >>"signing-envfile"
set -o errexit
set -o verbose
long_ext=${ext}
if [ "$long_ext" == "tgz" ]; then
long_ext="tar.gz"
long_ext="tar.gz"
fi
mv mongo-binaries.tgz mongodb-${push_name}-${push_arch}-${suffix}.${ext}
@ -22,13 +22,13 @@ mv distsrc.${ext} mongodb-src-${src_suffix}.${long_ext} || true
# generating checksums
function gen_checksums() {
if [ -e $1 ]; then
shasum -a 1 $1 | tee $1.sha1
shasum -a 256 $1 | tee $1.sha256
md5sum $1 | tee $1.md5
else
echo "$1 does not exist. Skipping checksum generation"
fi
if [ -e $1 ]; then
shasum -a 1 $1 | tee $1.sha1
shasum -a 256 $1 | tee $1.sha256
md5sum $1 | tee $1.md5
else
echo "$1 does not exist. Skipping checksum generation"
fi
}
gen_checksums mongodb-$push_name-$push_arch-$suffix.$ext
@ -37,7 +37,7 @@ gen_checksums mongodb-src-$src_suffix.$long_ext
gen_checksums mongodb-cryptd-$push_name-$push_arch-$suffix.$ext
# signing linux artifacts with gpg
cat << 'EOF' > gpg_signing_commands.sh
cat <<'EOF' >gpg_signing_commands.sh
gpgloader # loading gpg keys.
function sign(){
if [ -e $1 ]
@ -50,7 +50,7 @@ function sign(){
EOF
cat << EOF >> gpg_signing_commands.sh
cat <<EOF >>gpg_signing_commands.sh
sign mongodb-$push_name-$push_arch-$suffix.$ext
sign mongodb-$push_name-$push_arch-debugsymbols-$suffix.$ext
sign mongodb-src-$src_suffix.$long_ext
@ -58,8 +58,8 @@ sign mongodb-cryptd-$push_name-$push_arch-$suffix.$ext
EOF
podman run \
--env-file=signing-envfile \
--rm \
-v $(pwd):$(pwd) -w $(pwd) \
${garasign_gpg_image_ecr} \
/bin/bash -c "$(cat ./gpg_signing_commands.sh)"
--env-file=signing-envfile \
--rm \
-v $(pwd):$(pwd) -w $(pwd) \
${garasign_gpg_image_ecr} \
/bin/bash -c "$(cat ./gpg_signing_commands.sh)"

View File

@ -1,14 +1,14 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
if [ "${push_name}" != "windows" ]; then
exit 0
exit 0
fi
cd src
echo "GRS_CONFIG_USER1_USERNAME=${garasign_jsign_username}" >> "signing-envfile"
echo "GRS_CONFIG_USER1_PASSWORD=${garasign_jsign_password}" >> "signing-envfile"
echo "GRS_CONFIG_USER1_USERNAME=${garasign_jsign_username}" >>"signing-envfile"
echo "GRS_CONFIG_USER1_PASSWORD=${garasign_jsign_password}" >>"signing-envfile"
set -o errexit
set -o verbose
@ -17,8 +17,8 @@ msi_filename=mongodb-${push_name}-${push_arch}-${suffix}.msi
cp bazel-bin/src/mongo/installer/msi/mongodb-win32-x86_64-windows-${version}.msi $msi_filename
if [ "${is_patch}" != "true" ]; then
# signing windows artifacts with jsign
cat << 'EOF' > jsign_signing_commands.sh
# signing windows artifacts with jsign
cat <<'EOF' >jsign_signing_commands.sh
function sign(){
if [ -e $1 ]
then
@ -28,25 +28,25 @@ function sign(){
fi
}
EOF
cat << EOF >> jsign_signing_commands.sh
cat <<EOF >>jsign_signing_commands.sh
sign $msi_filename
EOF
podman run \
--env-file=signing-envfile \
--rm \
-v $(pwd):$(pwd) -w $(pwd) \
${garasign_jsign_image_ecr} \
/bin/bash -c "$(cat ./jsign_signing_commands.sh)"
podman run \
--env-file=signing-envfile \
--rm \
-v $(pwd):$(pwd) -w $(pwd) \
${garasign_jsign_image_ecr} \
/bin/bash -c "$(cat ./jsign_signing_commands.sh)"
else
echo "Not signing windows msi due to it being a patch build"
echo "Not signing windows msi due to it being a patch build"
fi
# generating checksums
if [ -e $msi_filename ]; then
shasum -a 1 $msi_filename | tee $msi_filename.sha1
shasum -a 256 $msi_filename | tee $msi_filename.sha256
md5sum $msi_filename | tee $msi_filename.md5
shasum -a 1 $msi_filename | tee $msi_filename.sha1
shasum -a 256 $msi_filename | tee $msi_filename.sha256
md5sum $msi_filename | tee $msi_filename.md5
else
echo "$msi_filename does not exist. Skipping checksum generation"
echo "$msi_filename does not exist. Skipping checksum generation"
fi

View File

@ -1,10 +1,10 @@
cd src
# Find all core files and move to src
core_files=$(/usr/bin/find -H .. \( -name "*.core" -o -name "*.mdmp" \) 2> /dev/null)
core_files=$(/usr/bin/find -H .. \( -name "*.core" -o -name "*.mdmp" \) 2>/dev/null)
for core_file in $core_files; do
base_name=$(echo $core_file | sed "s/.*\///")
# Move file if it does not already exist
if [ ! -f $base_name ]; then
mv $core_file .
fi
base_name=$(echo $core_file | sed "s/.*\///")
# Move file if it does not already exist
if [ ! -f $base_name ]; then
mv $core_file .
fi
done

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -8,79 +8,79 @@ set -o verbose
activate_venv
if [ -z "${build_patch_id}" ] || [ -z "${reuse_compile_from}" ] || [ "${is_patch:-false}" = "false" ]; then
# Create target folder
mkdir -p mongodb/
# Create target folder
mkdir -p mongodb/
# Generate feature flag list
$python buildscripts/idl/gen_all_feature_flag_list.py turned-off-by-default
mkdir -p mongodb/feature_flags
cp ./all_feature_flags.txt mongodb/feature_flags
# Generate feature flag list
$python buildscripts/idl/gen_all_feature_flag_list.py turned-off-by-default
mkdir -p mongodb/feature_flags
cp ./all_feature_flags.txt mongodb/feature_flags
# Generate server params list
$python buildscripts/idl/gen_all_server_params_list.py
mkdir -p mongodb/server_params
cp ./all_server_params.txt mongodb/server_params
# Generate server params list
$python buildscripts/idl/gen_all_server_params_list.py
mkdir -p mongodb/server_params
cp ./all_server_params.txt mongodb/server_params
# Download mongo tools
arch=$(uname -m)
if [ -f /etc/os-release ]; then
. /etc/os-release
if [ "$ID" == "amzn" ]; then
case $arch in
"x86_64" | "aarch64")
case $VERSION_ID in
"2" | "2023")
binary_url="https://fastdl.mongodb.org/tools/db/mongodb-database-tools-amazon${VERSION_ID}-${arch}-100.9.4.tgz"
;;
*)
echo "Unsupported Amazon Linux version: $VERSION_ID"
exit 1
;;
esac
;;
*)
echo "Unsupported architecture: $arch"
exit 1
;;
esac
# Download mongo tools
arch=$(uname -m)
if [ -f /etc/os-release ]; then
. /etc/os-release
if [ "$ID" == "amzn" ]; then
case $arch in
"x86_64" | "aarch64")
case $VERSION_ID in
"2" | "2023")
binary_url="https://fastdl.mongodb.org/tools/db/mongodb-database-tools-amazon${VERSION_ID}-${arch}-100.9.4.tgz"
;;
*)
echo "Unsupported Amazon Linux version: $VERSION_ID"
exit 1
;;
esac
;;
*)
echo "Unsupported architecture: $arch"
exit 1
;;
esac
else
echo "Unsupported Linux distribution: $ID"
exit 1
fi
else
echo "Unsupported Linux distribution: $ID"
exit 1
echo "Unable to determine Linux distribution"
exit 1
fi
else
echo "Unable to determine Linux distribution"
exit 1
fi
wget "$binary_url" -O mongo-tools.tar.gz
tar -xzvf mongo-tools.tar.gz -C mongodb/ --strip-components=1 "mong*/bin"
wget "$binary_url" -O mongo-tools.tar.gz
tar -xzvf mongo-tools.tar.gz -C mongodb/ --strip-components=1 "mong*/bin"
# generate atlas info
uarch=$(uname -p)
os=$(uname -r)
json="{ \"version\": \"${version}\", \"gitVersion\": \"${revision}\", \"uarch\": \"$uarch\", \"os\": \"$os\" }"
echo $json | jq '.' > mongodb/atlas_info.json
# generate atlas info
uarch=$(uname -p)
os=$(uname -r)
json="{ \"version\": \"${version}\", \"gitVersion\": \"${revision}\", \"uarch\": \"$uarch\", \"os\": \"$os\" }"
echo $json | jq '.' >mongodb/atlas_info.json
# Add custom run_validate_collections.js wrapper
mv jstests/hooks/run_validate_collections.js jstests/hooks/run_validate_collections.actual.js
cat << EOF > jstests/hooks/run_validate_collections.js
# Add custom run_validate_collections.js wrapper
mv jstests/hooks/run_validate_collections.js jstests/hooks/run_validate_collections.actual.js
cat <<EOF >jstests/hooks/run_validate_collections.js
print("NOTE: run_validate_collections.js will skip the oplog!");
TestData = { skipValidationNamespaces: ['local.oplog.rs'] };
await import("jstests/hooks/run_validate_collections.actual.js");
EOF
# Copy the js tests
mkdir -p mongodb/jstests/hooks
cp -a jstests/* mongodb/jstests
# Copy the js tests
mkdir -p mongodb/jstests/hooks
cp -a jstests/* mongodb/jstests
# Copy the build scripts
mkdir -p mongodb/buildscripts
cp -a buildscripts/* mongodb/buildscripts
# Copy the build scripts
mkdir -p mongodb/buildscripts
cp -a buildscripts/* mongodb/buildscripts
# Create the final archive
tar czf supplementary-data.tgz mongodb
# Create the final archive
tar czf supplementary-data.tgz mongodb
else
# Evergreen does not handle nested escaped expansions well
version_to_reuse_from=$(if [ -n "${build_patch_id}" ]; then echo "${build_patch_id}"; else echo "${reuse_compile_from}"; fi)
curl -o supplementary-data.tgz https://s3.amazonaws.com/mciuploads/"${project}"/"${compile_variant}"/"${version_to_reuse_from}"/dsi/supplementary-data.tgz
# Evergreen does not handle nested escaped expansions well
version_to_reuse_from=$(if [ -n "${build_patch_id}" ]; then echo "${build_patch_id}"; else echo "${reuse_compile_from}"; fi)
curl -o supplementary-data.tgz https://s3.amazonaws.com/mciuploads/"${project}"/"${compile_variant}"/"${version_to_reuse_from}"/dsi/supplementary-data.tgz
fi

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -7,5 +7,5 @@ set -o errexit
activate_venv
$python buildscripts/evergreen_activate_gen_tasks.py \
--expansion-file ../expansions.yml \
--verbose
--expansion-file ../expansions.yml \
--verbose

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -8,19 +8,19 @@ set -o verbose
build_patch_id="${build_patch_id:-${reuse_compile_from}}"
if [ -n "${build_patch_id}" ]; then
exit 0
exit 0
fi
is_san_variant_arg=""
if [[ -n "${san_options}" ]]; then
is_san_variant_arg="--is-san-variant"
is_san_variant_arg="--is-san-variant"
fi
activate_venv
$python buildscripts/debugsymb_mapper.py \
--version "${version_id}" \
--client-id "${symbolizer_client_id}" \
--client-secret "${symbolizer_client_secret}" \
--variant "${build_variant}" \
$is_san_variant_arg
--version "${version_id}" \
--client-id "${symbolizer_client_id}" \
--client-secret "${symbolizer_client_secret}" \
--variant "${build_variant}" \
$is_san_variant_arg

View File

@ -1,10 +1,10 @@
set -o errexit
curl --fail-with-body \
--header "Api-User: ${EVERGREEN_API_USER}" \
--header "Api-Key: ${EVERGREEN_API_KEY}" \
-L https://evergreen.mongodb.com/rest/v2/tasks/${PROMOTE_TASK_ID} \
--output ./task_data.json
--header "Api-User: ${EVERGREEN_API_USER}" \
--header "Api-Key: ${EVERGREEN_API_KEY}" \
-L https://evergreen.mongodb.com/rest/v2/tasks/${PROMOTE_TASK_ID} \
--output ./task_data.json
echo ".................."
echo "task data"
@ -20,7 +20,7 @@ promote_revision=$(cat task_data.json | jq -r ".revision")
artifact_address="https://internal-downloads.mongodb.com/server-custom-builds/${promote_project_id}/${promote_version_id}/${promote_build_variant}/mongo-${promote_build_id}.tgz"
cat << EOT > ./promote-expansions.yml
cat <<EOT >./promote-expansions.yml
promote_project_id: "$promote_project_id"
promote_version_id: "$promote_version_id"
promote_build_id: "$promote_build_id"

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -9,48 +9,48 @@ set -o verbose
# Use the Evergreen temp directory to avoid filling up the disk.
mkdir -p $TMPDIR
if [[ "$OSTYPE" == "cygwin" ]] || [[ "$OSTYPE" == "win32" ]]; then
mkdir -p Z:/bazel_tmp
touch Z:/bazel_tmp/mci_path
# TODO(SERVER-94605): remove when Windows temp directory is cleared between task runs
if [[ "$PWD" != "$(cat Z:/bazel_tmp/mci_path)" ]]; then
echo "Clearing bazel output root from previous task mci '$(cat Z:/bazel_tmp/mci_path)'"
rm -rf Z:/bazel_tmp/* || true
echo $PWD > Z:/bazel_tmp/mci_path
fi
mkdir -p Z:/bazel_tmp
touch Z:/bazel_tmp/mci_path
# TODO(SERVER-94605): remove when Windows temp directory is cleared between task runs
if [[ "$PWD" != "$(cat Z:/bazel_tmp/mci_path)" ]]; then
echo "Clearing bazel output root from previous task mci '$(cat Z:/bazel_tmp/mci_path)'"
rm -rf Z:/bazel_tmp/* || true
echo $PWD >Z:/bazel_tmp/mci_path
fi
# Z:/ path is necessary to avoid running into MSVC's file length limit,
# see https://jira.mongodb.org/browse/DEVPROD-11126
abs_path=$(cygpath -w "$TMPDIR" | tr '\\' '/')
echo "startup --output_user_root=Z:/bazel_tmp" > .bazelrc.evergreen
echo "common --action_env=TMP=Z:/bazel_tmp" >> .bazelrc.evergreen
echo "common --action_env=TEMP=Z:/bazel_tmp" >> .bazelrc.evergreen
echo "BAZELISK_HOME=${abs_path}/bazelisk_home" >> .bazeliskrc
# echo "common --define GIT_COMMIT_HASH=$(git rev-parse HEAD)" >> .bazelrc.git
echo "common --define GIT_COMMIT_HASH=nogitversion" >> .bazelrc.git
# Z:/ path is necessary to avoid running into MSVC's file length limit,
# see https://jira.mongodb.org/browse/DEVPROD-11126
abs_path=$(cygpath -w "$TMPDIR" | tr '\\' '/')
echo "startup --output_user_root=Z:/bazel_tmp" >.bazelrc.evergreen
echo "common --action_env=TMP=Z:/bazel_tmp" >>.bazelrc.evergreen
echo "common --action_env=TEMP=Z:/bazel_tmp" >>.bazelrc.evergreen
echo "BAZELISK_HOME=${abs_path}/bazelisk_home" >>.bazeliskrc
# echo "common --define GIT_COMMIT_HASH=$(git rev-parse HEAD)" >> .bazelrc.git
echo "common --define GIT_COMMIT_HASH=nogitversion" >>.bazelrc.git
else
echo "startup --output_user_root=${TMPDIR}/bazel-output-root" > .bazelrc.evergreen
echo "BAZELISK_HOME=${TMPDIR}/bazelisk_home" >> .bazeliskrc
echo "common --define GIT_COMMIT_HASH=$(git rev-parse HEAD)" >> .bazelrc.git
echo "startup --output_user_root=${TMPDIR}/bazel-output-root" >.bazelrc.evergreen
echo "BAZELISK_HOME=${TMPDIR}/bazelisk_home" >>.bazeliskrc
echo "common --define GIT_COMMIT_HASH=$(git rev-parse HEAD)" >>.bazelrc.git
fi
if [[ "${evergreen_remote_exec}" != "on" ]]; then
# Temporarily disable remote exec and only use remote cache
echo "common --remote_executor=" >> .bazelrc.evergreen
echo "common --modify_execution_info=.*=+no-remote-exec" >> .bazelrc.evergreen
echo "common --jobs=auto" >> .bazelrc.evergreen
# Temporarily disable remote exec and only use remote cache
echo "common --remote_executor=" >>.bazelrc.evergreen
echo "common --modify_execution_info=.*=+no-remote-exec" >>.bazelrc.evergreen
echo "common --jobs=auto" >>.bazelrc.evergreen
fi
uri="https://spruce.mongodb.com/task/${task_id:?}?execution=${execution:?}"
echo "common --tls_client_certificate=./engflow.cert" >> .bazelrc.evergreen
echo "common --tls_client_key=./engflow.key" >> .bazelrc.evergreen
echo "common --bes_keywords=engflow:CiCdPipelineName=${build_variant:?}" >> .bazelrc.evergreen
echo "common --bes_keywords=engflow:CiCdJobName=${task_name:?}" >> .bazelrc.evergreen
echo "common --bes_keywords=engflow:CiCdUri=${uri:?}" >> .bazelrc.evergreen
echo "common --bes_keywords=evg:project=${project:?}" >> .bazelrc.evergreen
echo "common --remote_upload_local_results=True" >> .bazelrc.evergreen
echo "common --test_output=summary" >> .bazelrc.evergreen
echo "common --tls_client_certificate=./engflow.cert" >>.bazelrc.evergreen
echo "common --tls_client_key=./engflow.key" >>.bazelrc.evergreen
echo "common --bes_keywords=engflow:CiCdPipelineName=${build_variant:?}" >>.bazelrc.evergreen
echo "common --bes_keywords=engflow:CiCdJobName=${task_name:?}" >>.bazelrc.evergreen
echo "common --bes_keywords=engflow:CiCdUri=${uri:?}" >>.bazelrc.evergreen
echo "common --bes_keywords=evg:project=${project:?}" >>.bazelrc.evergreen
echo "common --remote_upload_local_results=True" >>.bazelrc.evergreen
echo "common --test_output=summary" >>.bazelrc.evergreen
# Disable remote execution in evergreen only since it runs on every PR, but we still
# want it to be fast on workstations
echo "coverage --config=no-remote-exec" >> .bazelrc.evergreen
echo "coverage --config=no-remote-exec" >>.bazelrc.evergreen

View File

@ -9,19 +9,19 @@ virtualenv -p python3.12 .venv
source .venv/bin/activate
pip install -r sast_reporting/requirements.txt
if [ -z "${TRIGGERED_BY_GIT_TAG}" ]; then
echo "Evergreen version was NOT triggered by a git tag"
echo "Setting Google Drive folder ID for non-release"
google_drive_folder_id="${SAST_REPORT_TEST_GOOGLE_DRIVE_FOLDER_ID}"
echo "Evergreen version was NOT triggered by a git tag"
echo "Setting Google Drive folder ID for non-release"
google_drive_folder_id="${SAST_REPORT_TEST_GOOGLE_DRIVE_FOLDER_ID}"
else
echo "Evergreen version was triggered by git tag '${TRIGGERED_BY_GIT_TAG}'"
echo "Setting Google Drive folder ID for release"
google_drive_folder_id="${SAST_REPORT_RELEASES_GOOGLE_DRIVE_FOLDER_ID}"
echo "Evergreen version was triggered by git tag '${TRIGGERED_BY_GIT_TAG}'"
echo "Setting Google Drive folder ID for release"
google_drive_folder_id="${SAST_REPORT_RELEASES_GOOGLE_DRIVE_FOLDER_ID}"
fi
python3 -m sast_reporting.src.mongodb_server \
--version ${MONGODB_VERSION} \
--branch ${MONGODB_RELEASE_BRANCH} \
--commit-date $commit_datetime \
--output-path ${MODULE_PATH}/sast_report_${MONGODB_VERSION}.xlsx \
--upload-file-name "[${MONGODB_VERSION}] MongoDB Server Enterprise SAST Report" \
--google-drive-folder-id $google_drive_folder_id \
--env-file ${WORK_DIR}/sast_report_generation_credentials.env
--version ${MONGODB_VERSION} \
--branch ${MONGODB_RELEASE_BRANCH} \
--commit-date $commit_datetime \
--output-path ${MODULE_PATH}/sast_report_${MONGODB_VERSION}.xlsx \
--upload-file-name "[${MONGODB_VERSION}] MongoDB Server Enterprise SAST Report" \
--google-drive-folder-id $google_drive_folder_id \
--env-file ${WORK_DIR}/sast_report_generation_credentials.env

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -9,11 +9,11 @@ set -o verbose
setup_mongo_task_generator
activate_venv
RUST_BACKTRACE=full PATH=$PATH:$HOME:/ ./mongo-task-generator \
--expansion-file ../expansions.yml \
--evg-auth-file ./.evergreen.yml \
--evg-project-file ${evergreen_config_file_path} \
--generate-sub-tasks-config etc/generate_subtasks_config.yml \
--s3-test-stats-bucket mongo-test-stats \
--include-fully-disabled-feature-tests \
--bazel-suite-configs resmoke_suite_configs.yml \
$@
--expansion-file ../expansions.yml \
--evg-auth-file ./.evergreen.yml \
--evg-project-file ${evergreen_config_file_path} \
--generate-sub-tasks-config etc/generate_subtasks_config.yml \
--s3-test-stats-bucket mongo-test-stats \
--include-fully-disabled-feature-tests \
--bazel-suite-configs resmoke_suite_configs.yml \
$@

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -15,12 +15,12 @@ echo "Base patch revision: $base_revision"
$python buildscripts/burn_in_tests.py generate-test-membership-map-file-for-ci
RUST_BACKTRACE=full PATH=$PATH:$HOME:/ ./mongo-task-generator \
--expansion-file ../expansions.yml \
--evg-auth-file ./.evergreen.yml \
--evg-project-file ${evergreen_config_file_path} \
--generate-sub-tasks-config etc/generate_subtasks_config.yml \
--s3-test-stats-bucket mongo-test-stats \
--include-fully-disabled-feature-tests \
--burn-in \
--burn-in-tests-command "python buildscripts/burn_in_tests.py run --origin-rev=$base_revision" \
$@
--expansion-file ../expansions.yml \
--evg-auth-file ./.evergreen.yml \
--evg-project-file ${evergreen_config_file_path} \
--generate-sub-tasks-config etc/generate_subtasks_config.yml \
--s3-test-stats-bucket mongo-test-stats \
--include-fully-disabled-feature-tests \
--burn-in \
--burn-in-tests-command "python buildscripts/burn_in_tests.py run --origin-rev=$base_revision" \
$@

View File

@ -4,7 +4,7 @@
# Usage:
# bash get_all_resmoke_suite_configs.sh
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -20,5 +20,5 @@ BAZEL_BINARY=$(bazel_get_binary_path)
# str(target.label).replace('@@','') -> the target name, like //buildscripts/resmokeconfig:core_config
# f.path for f in target.files.to_list() -> the path to the config file, like bazel-out/k8-fastbuild/bin/buildscripts/resmokeconfig/core.yml
${BAZEL_BINARY} cquery ${bazel_args} ${bazel_compile_flags} ${task_compile_flags} \
--define=MONGO_VERSION=${version} ${patch_compile_flags} "kind(resmoke_config, //...)" \
--output=starlark --starlark:expr "': '.join([str(target.label).replace('@@','')] + [f.path for f in target.files.to_list()])" > resmoke_suite_configs.yml
--define=MONGO_VERSION=${version} ${patch_compile_flags} "kind(resmoke_config, //...)" \
--output=starlark --starlark:expr "': '.join([str(target.label).replace('@@','')] + [f.path for f in target.files.to_list()])" >resmoke_suite_configs.yml

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src

View File

@ -6,7 +6,7 @@
# Required environment variables:
# * ${suite} - Resmoke bazel target, like //buildscripts/resmokeconfig:core
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -18,4 +18,4 @@ source ./evergreen/bazel_utility_functions.sh
BAZEL_BINARY=$(bazel_get_binary_path)
echo "suite_config: $(${BAZEL_BINARY} cquery ${bazel_args} ${bazel_compile_flags} ${task_compile_flags} \
--define=MONGO_VERSION=${version} ${patch_compile_flags} ${suite}_config --output files)" > suite_config_expansion.yml
--define=MONGO_VERSION=${version} ${patch_compile_flags} ${suite}_config --output files)" >suite_config_expansion.yml

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src
@ -8,9 +8,9 @@ set -o verbose
# Set what processes to look for. For most tasks, we rely on resmoke to figure out its subprocesses
# and run the hang analyzer on those. For non-resmoke tasks, we enumerate the process list here.
if [[ ${task_name} == *"jepsen"* ]]; then
hang_analyzer_option="-o file -o stdout -p dbtest,java,mongo,mongod,mongos,python,_test"
hang_analyzer_option="-o file -o stdout -p dbtest,java,mongo,mongod,mongos,python,_test"
else
hang_analyzer_option="-o file -o stdout -m exact -p python"
hang_analyzer_option="-o file -o stdout -m exact -p python"
fi
activate_venv
@ -19,5 +19,5 @@ $python buildscripts/resmoke.py hang-analyzer $hang_analyzer_option
# Call hang analyzer for tasks that are running remote mongo processes
if [ -n "${private_ip_address}" ]; then
$python buildscripts/resmoke.py powercycle remote-hang-analyzer
$python buildscripts/resmoke.py powercycle remote-hang-analyzer
fi

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/prelude.sh"
cd src

View File

@ -1,8 +1,8 @@
set -euo pipefail
if [ -d jepsen ]; then
echo "Cleanup docker containers"
# docker ps -q fails when no containers are running
sudo docker container kill $(docker ps -q) || true
sudo docker system prune -f
echo "Cleanup docker containers"
# docker ps -q fails when no containers are running
sudo docker container kill $(docker ps -q) || true
sudo docker system prune -f
fi

View File

@ -1,4 +1,4 @@
set -euo pipefail
cd jepsen/docker
./bin/up -n 9 -d 2>&1 > docker.log
./bin/up -n 9 -d 2>&1 >docker.log

View File

@ -1,4 +1,4 @@
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)"
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
. "$DIR/../prelude.sh"
# this file does not use set -euo pipefail because we determine test success or
@ -13,9 +13,9 @@ start_time=$(date +%s)
# However, to ensure the preservation of all the mongod and mongos logs for each failed test,
# we have implemented a for loop to iterate the test 30 times.
for i in {1..30}; do
cd jepsen/docker
cd jepsen/docker
sudo docker exec jepsen-control bash --login -c "\
sudo docker exec jepsen-control bash --login -c "\
cd /jepsen/mongodb && \
lein run test-all -w list-append \
-n n1 -n n2 -n n3 -n n4 -n n5 -n n6 -n n7 -n n8 -n n9 \
@ -31,47 +31,47 @@ for i in {1..30}; do
--nemesis partition \
--test-count 1" | tee jepsen_test_${i}.log
cd ../../
cd ../../
# copy files to expected locations for archiving
mkdir -p src/jepsen-mongodb/store/test-index${i}
sudo docker cp jepsen-control:/jepsen/mongodb/store src/jepsen-mongodb/store/test-index${i}
cp jepsen/docker/jepsen_test_${i}.log src/jepsen-mongodb/
sudo docker cp jepsen-control:/jepsen/mongodb src/jepsen-workdir
# copy files to expected locations for archiving
mkdir -p src/jepsen-mongodb/store/test-index${i}
sudo docker cp jepsen-control:/jepsen/mongodb/store src/jepsen-mongodb/store/test-index${i}
cp jepsen/docker/jepsen_test_${i}.log src/jepsen-mongodb/
sudo docker cp jepsen-control:/jepsen/mongodb src/jepsen-workdir
# Get the last five lines of the log file like below example
# 1 successes
# 0 unknown
# 0 crashed
# 0 failures
#
last_five_lines=$(tail -n 5 "jepsen/docker/jepsen_test_${i}.log")
# Get the last five lines of the log file like below example
# 1 successes
# 0 unknown
# 0 crashed
# 0 failures
#
last_five_lines=$(tail -n 5 "jepsen/docker/jepsen_test_${i}.log")
# Check if the "1 successes" string is in the last five lines
if echo "$last_five_lines" | grep -q "1 successes"; then
echo "Test is successful, no additional logs will be spared."
else
echo "Test is not successful. Sparing mongod and mongos logs into 'src/jepsen-mongodb/mongodlogs/' and 'src/jepsen-mongodb/mongoslogs/' directories."
# Check if the "1 successes" string is in the last five lines
if echo "$last_five_lines" | grep -q "1 successes"; then
echo "Test is successful, no additional logs will be spared."
else
echo "Test is not successful. Sparing mongod and mongos logs into 'src/jepsen-mongodb/mongodlogs/' and 'src/jepsen-mongodb/mongoslogs/' directories."
# copy mongod logs
mkdir -p src/jepsen-mongodb/mongodlogs/test_${i}
# loop 9 docker containers
for n in {1..9}; do
sudo docker cp jepsen-n${n}:/var/log/mongodb/mongod.log src/jepsen-mongodb/mongodlogs/test_${i}/jepson-n${n}-mongod.log
done
sudo chmod +r src/jepsen-mongodb/mongodlogs/test_${i}/*.log
# copy mongod logs
mkdir -p src/jepsen-mongodb/mongodlogs/test_${i}
# loop 9 docker containers
for n in {1..9}; do
sudo docker cp jepsen-n${n}:/var/log/mongodb/mongod.log src/jepsen-mongodb/mongodlogs/test_${i}/jepson-n${n}-mongod.log
done
sudo chmod +r src/jepsen-mongodb/mongodlogs/test_${i}/*.log
# copy mongos logs
mkdir -p src/jepsen-mongodb/mongoslogs/test_${i}
for n in {1..9}; do
sudo docker cp jepsen-n${n}:/var/log/mongodb/mongos.stdout src/jepsen-mongodb/mongoslogs/test_${i}/jepson-n${n}-mongos.stdout
done
sudo chmod +r src/jepsen-mongodb/mongoslogs/test_${i}/*.stdout
fi
# copy mongos logs
mkdir -p src/jepsen-mongodb/mongoslogs/test_${i}
for n in {1..9}; do
sudo docker cp jepsen-n${n}:/var/log/mongodb/mongos.stdout src/jepsen-mongodb/mongoslogs/test_${i}/jepson-n${n}-mongos.stdout
done
sudo chmod +r src/jepsen-mongodb/mongoslogs/test_${i}/*.stdout
fi
done
# Merge all jepsen_test_${i}.log into a single file
cat src/jepsen-mongodb/jepsen_test_*.log > src/jepsen-mongodb/jepsen_${task_name}_${execution}.log
cat src/jepsen-mongodb/jepsen_test_*.log >src/jepsen-mongodb/jepsen_${task_name}_${execution}.log
end_time=$(date +%s)
elapsed_secs=$((end_time - start_time))
@ -82,8 +82,8 @@ $python buildscripts/jepsen_report.py --start_time=$start_time --end_time=$end_t
exit_code=$?
if [ -f "jepsen_system_fail.txt" ]; then
mv jepsen_system_fail.txt jepsen-mongodb/jepsen_system_failure_${task_name}_${execution}
exit 0
mv jepsen_system_fail.txt jepsen-mongodb/jepsen_system_failure_${task_name}_${execution}
exit 0
fi
exit $exit_code

Some files were not shown because too many files have changed in this diff Show More