Merge branch 'main' into d417-refactor

This commit is contained in:
augustelalande 2025-11-12 17:58:08 -05:00
commit 0ca36a4557
473 changed files with 18890 additions and 5536 deletions

View File

@ -231,6 +231,8 @@ jobs:
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: | run: |
rustup component add clippy rustup component add clippy
@ -251,20 +253,23 @@ jobs:
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
shared-key: ruff-linux-debug
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Install mold" - name: "Install mold"
uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1 uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1
- name: "Install cargo nextest" - name: "Install cargo nextest"
uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21 uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2.62.49
with: with:
tool: cargo-nextest tool: cargo-nextest
- name: "Install cargo insta" - name: "Install cargo insta"
uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21 uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2.62.49
with: with:
tool: cargo-insta tool: cargo-insta
- name: "Install uv" - name: "Install uv"
uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
with: with:
enable-cache: "true" enable-cache: "true"
- name: ty mdtests (GitHub annotations) - name: ty mdtests (GitHub annotations)
@ -277,8 +282,8 @@ jobs:
run: cargo test -p ty_python_semantic --test mdtest || true run: cargo test -p ty_python_semantic --test mdtest || true
- name: "Run tests" - name: "Run tests"
run: cargo insta test --all-features --unreferenced reject --test-runner nextest run: cargo insta test --all-features --unreferenced reject --test-runner nextest
# Dogfood ty on py-fuzzer - name: Dogfood ty on py-fuzzer
- run: uv run --project=./python/py-fuzzer cargo run -p ty check --project=./python/py-fuzzer run: uv run --project=./python/py-fuzzer cargo run -p ty check --project=./python/py-fuzzer
# Check for broken links in the documentation. # Check for broken links in the documentation.
- run: cargo doc --all --no-deps - run: cargo doc --all --no-deps
env: env:
@ -291,14 +296,6 @@ jobs:
env: env:
# Setting RUSTDOCFLAGS because `cargo doc --check` isn't yet implemented (https://github.com/rust-lang/cargo/issues/10025). # Setting RUSTDOCFLAGS because `cargo doc --check` isn't yet implemented (https://github.com/rust-lang/cargo/issues/10025).
RUSTDOCFLAGS: "-D warnings" RUSTDOCFLAGS: "-D warnings"
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: ruff
path: target/debug/ruff
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: ty
path: target/debug/ty
cargo-test-linux-release: cargo-test-linux-release:
name: "cargo test (linux, release)" name: "cargo test (linux, release)"
@ -315,20 +312,22 @@ jobs:
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Install mold" - name: "Install mold"
uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1 uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1
- name: "Install cargo nextest" - name: "Install cargo nextest"
uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21 uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2.62.49
with: with:
tool: cargo-nextest tool: cargo-nextest
- name: "Install cargo insta" - name: "Install cargo insta"
uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21 uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2.62.49
with: with:
tool: cargo-insta tool: cargo-insta
- name: "Install uv" - name: "Install uv"
uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
with: with:
enable-cache: "true" enable-cache: "true"
- name: "Run tests" - name: "Run tests"
@ -350,14 +349,16 @@ jobs:
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Install cargo nextest" - name: "Install cargo nextest"
uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21 uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2.62.49
with: with:
tool: cargo-nextest tool: cargo-nextest
- name: "Install uv" - name: "Install uv"
uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
with: with:
enable-cache: "true" enable-cache: "true"
- name: "Run tests" - name: "Run tests"
@ -376,9 +377,11 @@ jobs:
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown run: rustup target add wasm32-unknown-unknown
- uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0 - uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
with: with:
node-version: 22 node-version: 22
cache: "npm" cache: "npm"
@ -411,6 +414,8 @@ jobs:
file: "Cargo.toml" file: "Cargo.toml"
field: "workspace.package.rust-version" field: "workspace.package.rust-version"
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
env: env:
MSRV: ${{ steps.msrv.outputs.value }} MSRV: ${{ steps.msrv.outputs.value }}
@ -435,10 +440,13 @@ jobs:
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with: with:
workspaces: "fuzz -> target" workspaces: "fuzz -> target"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1
- name: "Install cargo-binstall" - name: "Install cargo-binstall"
uses: cargo-bins/cargo-binstall@afcf9780305558bcc9e4bc94b7589ab2bb8b6106 # v1.15.9 uses: cargo-bins/cargo-binstall@ae04fb5e853ae6cd3ad7de4a1d554a8b646d12aa # v1.15.11
- name: "Install cargo-fuzz" - name: "Install cargo-fuzz"
# Download the latest version from quick install and not the github releases because github releases only has MUSL targets. # Download the latest version from quick install and not the github releases because github releases only has MUSL targets.
run: cargo binstall cargo-fuzz --force --disable-strategies crate-meta-data --no-confirm run: cargo binstall cargo-fuzz --force --disable-strategies crate-meta-data --no-confirm
@ -447,9 +455,7 @@ jobs:
fuzz-parser: fuzz-parser:
name: "fuzz parser" name: "fuzz parser"
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: needs: determine_changes
- cargo-test-linux
- determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.parser == 'true' || needs.determine_changes.outputs.py-fuzzer == 'true') }} if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.parser == 'true' || needs.determine_changes.outputs.py-fuzzer == 'true') }}
timeout-minutes: 20 timeout-minutes: 20
env: env:
@ -458,27 +464,24 @@ jobs:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 - uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
- uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
name: Download Ruff binary to test
id: download-cached-binary
with: with:
name: ruff shared-key: ruff-linux-debug
path: ruff-to-test save-if: false
- name: "Install Rust toolchain"
run: rustup show
- name: Build Ruff binary
run: cargo build --bin ruff
- name: Fuzz - name: Fuzz
env:
DOWNLOAD_PATH: ${{ steps.download-cached-binary.outputs.download-path }}
run: | run: |
# Make executable, since artifact download doesn't preserve this
chmod +x "${DOWNLOAD_PATH}/ruff"
( (
uv run \ uv run \
--python="${PYTHON_VERSION}" \ --python="${PYTHON_VERSION}" \
--project=./python/py-fuzzer \ --project=./python/py-fuzzer \
--locked \ --locked \
fuzz \ fuzz \
--test-executable="${DOWNLOAD_PATH}/ruff" \ --test-executable=target/debug/ruff \
--bin=ruff \ --bin=ruff \
0-500 0-500
) )
@ -494,7 +497,9 @@ jobs:
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup component add rustfmt run: rustup component add rustfmt
# Run all code generation scripts, and verify that the current output is # Run all code generation scripts, and verify that the current output is
@ -518,9 +523,7 @@ jobs:
ecosystem: ecosystem:
name: "ecosystem" name: "ecosystem"
runs-on: ${{ github.repository == 'astral-sh/ruff' && 'depot-ubuntu-latest-8' || 'ubuntu-latest' }} runs-on: ${{ github.repository == 'astral-sh/ruff' && 'depot-ubuntu-latest-8' || 'ubuntu-latest' }}
needs: needs: determine_changes
- cargo-test-linux
- determine_changes
# Only runs on pull requests, since that is the only we way we can find the base version for comparison. # Only runs on pull requests, since that is the only we way we can find the base version for comparison.
# Ecosystem check needs linter and/or formatter changes. # Ecosystem check needs linter and/or formatter changes.
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && github.event_name == 'pull_request' && needs.determine_changes.outputs.code == 'true' }} if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && github.event_name == 'pull_request' && needs.determine_changes.outputs.code == 'true' }}
@ -528,26 +531,37 @@ jobs:
steps: steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
ref: ${{ github.event.pull_request.base.ref }}
persist-credentials: false persist-credentials: false
- uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
- uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
activate-environment: true activate-environment: true
- uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 - name: "Install Rust toolchain"
name: Download comparison Ruff binary run: rustup show
id: ruff-target
with:
name: ruff
path: target/debug
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8 - name: "Install mold"
name: Download baseline Ruff binary uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with: with:
name: ruff shared-key: ruff-linux-debug
branch: ${{ github.event.pull_request.base.ref }} save-if: false
workflow: "ci.yaml"
check_artifacts: true - name: Build baseline version
run: |
cargo build --bin ruff
mv target/debug/ruff target/debug/ruff-baseline
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
persist-credentials: false
clean: false
- name: Build comparison version
run: cargo build --bin ruff
- name: Install ruff-ecosystem - name: Install ruff-ecosystem
run: | run: |
@ -555,16 +569,11 @@ jobs:
- name: Run `ruff check` stable ecosystem check - name: Run `ruff check` stable ecosystem check
if: ${{ needs.determine_changes.outputs.linter == 'true' }} if: ${{ needs.determine_changes.outputs.linter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: | run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
# Set pipefail to avoid hiding errors with tee # Set pipefail to avoid hiding errors with tee
set -eo pipefail set -eo pipefail
ruff-ecosystem check ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown | tee ecosystem-result-check-stable ruff-ecosystem check ./target/debug/ruff-baseline ./target/debug/ruff --cache ./checkouts --output-format markdown | tee ecosystem-result-check-stable
cat ecosystem-result-check-stable > "$GITHUB_STEP_SUMMARY" cat ecosystem-result-check-stable > "$GITHUB_STEP_SUMMARY"
echo "### Linter (stable)" > ecosystem-result echo "### Linter (stable)" > ecosystem-result
@ -573,16 +582,11 @@ jobs:
- name: Run `ruff check` preview ecosystem check - name: Run `ruff check` preview ecosystem check
if: ${{ needs.determine_changes.outputs.linter == 'true' }} if: ${{ needs.determine_changes.outputs.linter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: | run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
# Set pipefail to avoid hiding errors with tee # Set pipefail to avoid hiding errors with tee
set -eo pipefail set -eo pipefail
ruff-ecosystem check ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-check-preview ruff-ecosystem check ./target/debug/ruff-baseline ./target/debug/ruff --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-check-preview
cat ecosystem-result-check-preview > "$GITHUB_STEP_SUMMARY" cat ecosystem-result-check-preview > "$GITHUB_STEP_SUMMARY"
echo "### Linter (preview)" >> ecosystem-result echo "### Linter (preview)" >> ecosystem-result
@ -591,16 +595,11 @@ jobs:
- name: Run `ruff format` stable ecosystem check - name: Run `ruff format` stable ecosystem check
if: ${{ needs.determine_changes.outputs.formatter == 'true' }} if: ${{ needs.determine_changes.outputs.formatter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: | run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
# Set pipefail to avoid hiding errors with tee # Set pipefail to avoid hiding errors with tee
set -eo pipefail set -eo pipefail
ruff-ecosystem format ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown | tee ecosystem-result-format-stable ruff-ecosystem format ./target/debug/ruff-baseline ./target/debug/ruff --cache ./checkouts --output-format markdown | tee ecosystem-result-format-stable
cat ecosystem-result-format-stable > "$GITHUB_STEP_SUMMARY" cat ecosystem-result-format-stable > "$GITHUB_STEP_SUMMARY"
echo "### Formatter (stable)" >> ecosystem-result echo "### Formatter (stable)" >> ecosystem-result
@ -609,32 +608,19 @@ jobs:
- name: Run `ruff format` preview ecosystem check - name: Run `ruff format` preview ecosystem check
if: ${{ needs.determine_changes.outputs.formatter == 'true' }} if: ${{ needs.determine_changes.outputs.formatter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: | run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
# Set pipefail to avoid hiding errors with tee # Set pipefail to avoid hiding errors with tee
set -eo pipefail set -eo pipefail
ruff-ecosystem format ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-format-preview ruff-ecosystem format ./target/debug/ruff-baseline ./target/debug/ruff --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-format-preview
cat ecosystem-result-format-preview > "$GITHUB_STEP_SUMMARY" cat ecosystem-result-format-preview > "$GITHUB_STEP_SUMMARY"
echo "### Formatter (preview)" >> ecosystem-result echo "### Formatter (preview)" >> ecosystem-result
cat ecosystem-result-format-preview >> ecosystem-result cat ecosystem-result-format-preview >> ecosystem-result
echo "" >> ecosystem-result echo "" >> ecosystem-result
- name: Export pull request number # NOTE: astral-sh-bot uses this artifact to post comments on PRs.
run: | # Make sure to update the bot if you rename the artifact.
echo ${{ github.event.number }} > pr-number
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
name: Upload PR Number
with:
name: pr-number
path: pr-number
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
name: Upload Results name: Upload Results
with: with:
@ -645,36 +631,38 @@ jobs:
name: "Fuzz for new ty panics" name: "Fuzz for new ty panics"
runs-on: ${{ github.repository == 'astral-sh/ruff' && 'depot-ubuntu-22.04-16' || 'ubuntu-latest' }} runs-on: ${{ github.repository == 'astral-sh/ruff' && 'depot-ubuntu-22.04-16' || 'ubuntu-latest' }}
needs: needs:
- cargo-test-linux
- determine_changes - determine_changes
# Only runs on pull requests, since that is the only we way we can find the base version for comparison. # Only runs on pull requests, since that is the only we way we can find the base version for comparison.
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && github.event_name == 'pull_request' && (needs.determine_changes.outputs.ty == 'true' || needs.determine_changes.outputs.py-fuzzer == 'true') }} if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && github.event_name == 'pull_request' && (needs.determine_changes.outputs.ty == 'true' || needs.determine_changes.outputs.py-fuzzer == 'true') }}
timeout-minutes: ${{ github.repository == 'astral-sh/ruff' && 5 || 20 }} timeout-minutes: ${{ github.repository == 'astral-sh/ruff' && 10 || 20 }}
steps: steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
fetch-depth: 0
persist-credentials: false persist-credentials: false
- uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 - uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
name: Download new ty binary - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
id: ty-new
with: with:
name: ty save-if: ${{ github.ref == 'refs/heads/main' }}
path: target/debug - name: "Install Rust toolchain"
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8 run: rustup show
name: Download baseline ty binary - name: "Install mold"
with: uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1
name: ty
branch: ${{ github.event.pull_request.base.ref }}
workflow: "ci.yaml"
check_artifacts: true
- uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
- name: Fuzz - name: Fuzz
env: env:
FORCE_COLOR: 1 FORCE_COLOR: 1
NEW_TY: ${{ steps.ty-new.outputs.download-path }}
run: | run: |
# Make executable, since artifact download doesn't preserve this echo "new commit"
chmod +x "${PWD}/ty" "${NEW_TY}/ty" git rev-list --format=%s --max-count=1 "$GITHUB_SHA"
cargo build --profile=profiling --bin=ty
mv target/profiling/ty ty-new
MERGE_BASE="$(git merge-base "$GITHUB_SHA" "origin/$GITHUB_BASE_REF")"
git checkout -b old_commit "$MERGE_BASE"
echo "old commit (merge base)"
git rev-list --format=%s --max-count=1 old_commit
cargo build --profile=profiling --bin=ty
mv target/profiling/ty ty-old
( (
uv run \ uv run \
@ -682,8 +670,8 @@ jobs:
--project=./python/py-fuzzer \ --project=./python/py-fuzzer \
--locked \ --locked \
fuzz \ fuzz \
--test-executable="${NEW_TY}/ty" \ --test-executable=ty-new \
--baseline-executable="${PWD}/ty" \ --baseline-executable=ty-old \
--only-new-bugs \ --only-new-bugs \
--bin=ty \ --bin=ty \
0-1000 0-1000
@ -698,7 +686,7 @@ jobs:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: cargo-bins/cargo-binstall@afcf9780305558bcc9e4bc94b7589ab2bb8b6106 # v1.15.9 - uses: cargo-bins/cargo-binstall@ae04fb5e853ae6cd3ad7de4a1d554a8b646d12aa # v1.15.11
- run: cargo binstall --no-confirm cargo-shear - run: cargo binstall --no-confirm cargo-shear
- run: cargo shear - run: cargo shear
@ -711,12 +699,16 @@ jobs:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 - uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1
- name: "Run ty completion evaluation" - name: "Run ty completion evaluation"
run: cargo run --release --package ty_completion_eval -- all --threshold 0.4 --tasks /tmp/completion-evaluation-tasks.csv run: cargo run --profile profiling --package ty_completion_eval -- all --threshold 0.4 --tasks /tmp/completion-evaluation-tasks.csv
- name: "Ensure there are no changes" - name: "Ensure there are no changes"
run: diff ./crates/ty_completion_eval/completion-evaluation-tasks.csv /tmp/completion-evaluation-tasks.csv run: diff ./crates/ty_completion_eval/completion-evaluation-tasks.csv /tmp/completion-evaluation-tasks.csv
@ -734,6 +726,8 @@ jobs:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
architecture: x64 architecture: x64
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Prep README.md" - name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi run: python scripts/transform_readme.py --target pypi
- name: "Build wheels" - name: "Build wheels"
@ -756,9 +750,11 @@ jobs:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 - uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0 with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
with: with:
node-version: 22 node-version: 22
- name: "Cache pre-commit" - name: "Cache pre-commit"
@ -788,6 +784,8 @@ jobs:
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Add SSH key" - name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }} if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
uses: webfactory/ssh-agent@a6f90b1f127823b31d4d4a8d96047790581349bd # v0.9.1 uses: webfactory/ssh-agent@a6f90b1f127823b31d4d4a8d96047790581349bd # v0.9.1
@ -796,7 +794,7 @@ jobs:
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: Install uv - name: Install uv
uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
with: with:
python-version: 3.13 python-version: 3.13
activate-environment: true activate-environment: true
@ -830,6 +828,8 @@ jobs:
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Run checks" - name: "Run checks"
@ -843,9 +843,7 @@ jobs:
name: "test ruff-lsp" name: "test ruff-lsp"
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 5 timeout-minutes: 5
needs: needs: determine_changes
- cargo-test-linux
- determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }} if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
steps: steps:
- uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3.0.0 - uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3.0.0
@ -853,37 +851,46 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
name: "Download ruff-lsp source" name: "Checkout ruff source"
with:
persist-credentials: false
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
shared-key: ruff-linux-debug
save-if: false
- name: "Install Rust toolchain"
run: rustup show
- name: Build Ruff binary
run: cargo build -p ruff --bin ruff
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
name: "Checkout ruff-lsp source"
with: with:
persist-credentials: false persist-credentials: false
repository: "astral-sh/ruff-lsp" repository: "astral-sh/ruff-lsp"
path: ruff-lsp
- uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
# installation fails on 3.13 and newer # installation fails on 3.13 and newer
python-version: "3.12" python-version: "3.12"
- uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
name: Download development ruff binary
id: ruff-target
with:
name: ruff
path: target/debug
- name: Install ruff-lsp dependencies - name: Install ruff-lsp dependencies
run: | run: |
cd ruff-lsp
just install just install
- name: Run ruff-lsp tests - name: Run ruff-lsp tests
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: | run: |
# Setup development binary # Setup development binary
pip uninstall --yes ruff pip uninstall --yes ruff
chmod +x "${DOWNLOAD_PATH}/ruff" export PATH="${PWD}/target/debug:${PATH}"
export PATH="${DOWNLOAD_PATH}:${PATH}"
ruff version ruff version
cd ruff-lsp
just test just test
check-playground: check-playground:
@ -900,7 +907,9 @@ jobs:
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown run: rustup target add wasm32-unknown-unknown
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0 with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
with: with:
node-version: 22 node-version: 22
cache: "npm" cache: "npm"
@ -938,21 +947,23 @@ jobs:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Install codspeed" - name: "Install codspeed"
uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21 uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2.62.49
with: with:
tool: cargo-codspeed tool: cargo-codspeed
- name: "Build benchmarks" - name: "Build benchmarks"
run: cargo codspeed build --features "codspeed,instrumented" --no-default-features -p ruff_benchmark --bench formatter --bench lexer --bench linter --bench parser run: cargo codspeed build --features "codspeed,instrumented" --profile profiling --no-default-features -p ruff_benchmark --bench formatter --bench lexer --bench linter --bench parser
- name: "Run benchmarks" - name: "Run benchmarks"
uses: CodSpeedHQ/action@6b43a0cd438f6ca5ad26f9ed03ed159ed2df7da9 # v4.1.1 uses: CodSpeedHQ/action@bb005fe1c1eea036d3894f02c049cb6b154a1c27 # v4.3.3
with: with:
mode: instrumentation mode: instrumentation
run: cargo codspeed run run: cargo codspeed run
@ -976,21 +987,23 @@ jobs:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Install codspeed" - name: "Install codspeed"
uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21 uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2.62.49
with: with:
tool: cargo-codspeed tool: cargo-codspeed
- name: "Build benchmarks" - name: "Build benchmarks"
run: cargo codspeed build --features "codspeed,instrumented" --no-default-features -p ruff_benchmark --bench ty run: cargo codspeed build --features "codspeed,instrumented" --profile profiling --no-default-features -p ruff_benchmark --bench ty
- name: "Run benchmarks" - name: "Run benchmarks"
uses: CodSpeedHQ/action@6b43a0cd438f6ca5ad26f9ed03ed159ed2df7da9 # v4.1.1 uses: CodSpeedHQ/action@bb005fe1c1eea036d3894f02c049cb6b154a1c27 # v4.3.3
with: with:
mode: instrumentation mode: instrumentation
run: cargo codspeed run run: cargo codspeed run
@ -1014,21 +1027,23 @@ jobs:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Install codspeed" - name: "Install codspeed"
uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21 uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2.62.49
with: with:
tool: cargo-codspeed tool: cargo-codspeed
- name: "Build benchmarks" - name: "Build benchmarks"
run: cargo codspeed build --features "codspeed,walltime" --no-default-features -p ruff_benchmark run: cargo codspeed build --features "codspeed,walltime" --profile profiling --no-default-features -p ruff_benchmark
- name: "Run benchmarks" - name: "Run benchmarks"
uses: CodSpeedHQ/action@6b43a0cd438f6ca5ad26f9ed03ed159ed2df7da9 # v4.1.1 uses: CodSpeedHQ/action@bb005fe1c1eea036d3894f02c049cb6b154a1c27 # v4.3.3
env: env:
# enabling walltime flamegraphs adds ~6 minutes to the CI time, and they don't # enabling walltime flamegraphs adds ~6 minutes to the CI time, and they don't
# appear to provide much useful insight for our walltime benchmarks right now # appear to provide much useful insight for our walltime benchmarks right now

View File

@ -34,7 +34,7 @@ jobs:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 - uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Install mold" - name: "Install mold"

View File

@ -43,7 +43,7 @@ jobs:
persist-credentials: false persist-credentials: false
- name: Install the latest version of uv - name: Install the latest version of uv
uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with: with:
@ -59,20 +59,15 @@ jobs:
run: | run: |
cd ruff cd ruff
scripts/mypy_primer.sh scripts/mypy_primer.sh
echo ${{ github.event.number }} > ../pr-number
# NOTE: astral-sh-bot uses this artifact to post comments on PRs.
# Make sure to update the bot if you rename the artifact.
- name: Upload diff - name: Upload diff
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with: with:
name: mypy_primer_diff name: mypy_primer_diff
path: mypy_primer.diff path: mypy_primer.diff
- name: Upload pr-number
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: pr-number
path: pr-number
memory_usage: memory_usage:
name: Run memory statistics name: Run memory statistics
runs-on: ${{ github.repository == 'astral-sh/ruff' && 'depot-ubuntu-22.04-32' || 'ubuntu-latest' }} runs-on: ${{ github.repository == 'astral-sh/ruff' && 'depot-ubuntu-22.04-32' || 'ubuntu-latest' }}
@ -85,7 +80,7 @@ jobs:
persist-credentials: false persist-credentials: false
- name: Install the latest version of uv - name: Install the latest version of uv
uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
- uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with: with:

View File

@ -1,122 +0,0 @@
name: PR comment (mypy_primer)
on: # zizmor: ignore[dangerous-triggers]
workflow_run:
workflows: [Run mypy_primer]
types: [completed]
workflow_dispatch:
inputs:
workflow_run_id:
description: The mypy_primer workflow that triggers the workflow run
required: true
jobs:
comment:
runs-on: ubuntu-24.04
permissions:
pull-requests: write
steps:
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: Download PR number
with:
name: pr-number
run_id: ${{ github.event.workflow_run.id || github.event.inputs.workflow_run_id }}
if_no_artifact_found: ignore
allow_forks: true
- name: Parse pull request number
id: pr-number
run: |
if [[ -f pr-number ]]
then
echo "pr-number=$(<pr-number)" >> "$GITHUB_OUTPUT"
fi
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: "Download mypy_primer results"
id: download-mypy_primer_diff
if: steps.pr-number.outputs.pr-number
with:
name: mypy_primer_diff
workflow: mypy_primer.yaml
pr: ${{ steps.pr-number.outputs.pr-number }}
path: pr/mypy_primer_diff
workflow_conclusion: completed
if_no_artifact_found: ignore
allow_forks: true
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: "Download mypy_primer memory results"
id: download-mypy_primer_memory_diff
if: steps.pr-number.outputs.pr-number
with:
name: mypy_primer_memory_diff
workflow: mypy_primer.yaml
pr: ${{ steps.pr-number.outputs.pr-number }}
path: pr/mypy_primer_memory_diff
workflow_conclusion: completed
if_no_artifact_found: ignore
allow_forks: true
- name: Generate comment content
id: generate-comment
if: ${{ steps.download-mypy_primer_diff.outputs.found_artifact == 'true' && steps.download-mypy_primer_memory_diff.outputs.found_artifact == 'true' }}
run: |
# Guard against malicious mypy_primer results that symlink to a secret
# file on this runner
if [[ -L pr/mypy_primer_diff/mypy_primer.diff ]] || [[ -L pr/mypy_primer_memory_diff/mypy_primer_memory.diff ]]
then
echo "Error: mypy_primer.diff and mypy_primer_memory.diff cannot be a symlink"
exit 1
fi
# Note this identifier is used to find the comment to update on
# subsequent runs
echo '<!-- generated-comment mypy_primer -->' >> comment.txt
echo '## `mypy_primer` results' >> comment.txt
if [ -s "pr/mypy_primer_diff/mypy_primer.diff" ]; then
echo '<details>' >> comment.txt
echo '<summary>Changes were detected when running on open source projects</summary>' >> comment.txt
echo '' >> comment.txt
echo '```diff' >> comment.txt
cat pr/mypy_primer_diff/mypy_primer.diff >> comment.txt
echo '```' >> comment.txt
echo '</details>' >> comment.txt
else
echo 'No ecosystem changes detected ✅' >> comment.txt
fi
if [ -s "pr/mypy_primer_memory_diff/mypy_primer_memory.diff" ]; then
echo '<details>' >> comment.txt
echo '<summary>Memory usage changes were detected when running on open source projects</summary>' >> comment.txt
echo '' >> comment.txt
echo '```diff' >> comment.txt
cat pr/mypy_primer_memory_diff/mypy_primer_memory.diff >> comment.txt
echo '```' >> comment.txt
echo '</details>' >> comment.txt
else
echo 'No memory usage changes detected ✅' >> comment.txt
fi
echo 'comment<<EOF' >> "$GITHUB_OUTPUT"
cat comment.txt >> "$GITHUB_OUTPUT"
echo 'EOF' >> "$GITHUB_OUTPUT"
- name: Find existing comment
uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.1.0
if: steps.generate-comment.outcome == 'success'
id: find-comment
with:
issue-number: ${{ steps.pr-number.outputs.pr-number }}
comment-author: "github-actions[bot]"
body-includes: "<!-- generated-comment mypy_primer -->"
- name: Create or update comment
if: steps.find-comment.outcome == 'success'
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4
with:
comment-id: ${{ steps.find-comment.outputs.comment-id }}
issue-number: ${{ steps.pr-number.outputs.pr-number }}
body-path: comment.txt
edit-mode: replace

View File

@ -1,88 +0,0 @@
name: Ecosystem check comment
on:
workflow_run:
workflows: [CI]
types: [completed]
workflow_dispatch:
inputs:
workflow_run_id:
description: The ecosystem workflow that triggers the workflow run
required: true
jobs:
comment:
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: Download pull request number
with:
name: pr-number
run_id: ${{ github.event.workflow_run.id || github.event.inputs.workflow_run_id }}
if_no_artifact_found: ignore
allow_forks: true
- name: Parse pull request number
id: pr-number
run: |
if [[ -f pr-number ]]
then
echo "pr-number=$(<pr-number)" >> "$GITHUB_OUTPUT"
fi
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: "Download ecosystem results"
id: download-ecosystem-result
if: steps.pr-number.outputs.pr-number
with:
name: ecosystem-result
workflow: ci.yaml
pr: ${{ steps.pr-number.outputs.pr-number }}
path: pr/ecosystem
workflow_conclusion: completed
if_no_artifact_found: ignore
allow_forks: true
- name: Generate comment content
id: generate-comment
if: steps.download-ecosystem-result.outputs.found_artifact == 'true'
run: |
# Guard against malicious ecosystem results that symlink to a secret
# file on this runner
if [[ -L pr/ecosystem/ecosystem-result ]]
then
echo "Error: ecosystem-result cannot be a symlink"
exit 1
fi
# Note this identifier is used to find the comment to update on
# subsequent runs
echo '<!-- generated-comment ecosystem -->' >> comment.txt
echo '## `ruff-ecosystem` results' >> comment.txt
cat pr/ecosystem/ecosystem-result >> comment.txt
echo "" >> comment.txt
echo 'comment<<EOF' >> "$GITHUB_OUTPUT"
cat comment.txt >> "$GITHUB_OUTPUT"
echo 'EOF' >> "$GITHUB_OUTPUT"
- name: Find existing comment
uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.1.0
if: steps.generate-comment.outcome == 'success'
id: find-comment
with:
issue-number: ${{ steps.pr-number.outputs.pr-number }}
comment-author: "github-actions[bot]"
body-includes: "<!-- generated-comment ecosystem -->"
- name: Create or update comment
if: steps.find-comment.outcome == 'success'
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4
with:
comment-id: ${{ steps.find-comment.outputs.comment-id }}
issue-number: ${{ steps.pr-number.outputs.pr-number }}
body-path: comment.txt
edit-mode: replace

View File

@ -31,7 +31,7 @@ jobs:
persist-credentials: false persist-credentials: false
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown run: rustup target add wasm32-unknown-unknown
- uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0 - uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
with: with:
node-version: 22 node-version: 22
package-manager-cache: false package-manager-cache: false

View File

@ -22,7 +22,7 @@ jobs:
id-token: write id-token: write
steps: steps:
- name: "Install uv" - name: "Install uv"
uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
- uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
with: with:
pattern: wheels-* pattern: wheels-*

View File

@ -35,7 +35,7 @@ jobs:
persist-credentials: false persist-credentials: false
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown run: rustup target add wasm32-unknown-unknown
- uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0 - uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
with: with:
node-version: 22 node-version: 22
package-manager-cache: false package-manager-cache: false

View File

@ -45,7 +45,7 @@ jobs:
jq '.name="@astral-sh/ruff-wasm-${{ matrix.target }}"' crates/ruff_wasm/pkg/package.json > /tmp/package.json jq '.name="@astral-sh/ruff-wasm-${{ matrix.target }}"' crates/ruff_wasm/pkg/package.json > /tmp/package.json
mv /tmp/package.json crates/ruff_wasm/pkg mv /tmp/package.json crates/ruff_wasm/pkg
- run: cp LICENSE crates/ruff_wasm/pkg # wasm-pack does not put the LICENSE file in the pkg - run: cp LICENSE crates/ruff_wasm/pkg # wasm-pack does not put the LICENSE file in the pkg
- uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0 - uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
with: with:
node-version: 22 node-version: 22
registry-url: "https://registry.npmjs.org" registry-url: "https://registry.npmjs.org"

View File

@ -68,7 +68,7 @@ jobs:
# we specify bash to get pipefail; it guards against the `curl` command # we specify bash to get pipefail; it guards against the `curl` command
# failing. otherwise `sh` won't catch that `curl` returned non-0 # failing. otherwise `sh` won't catch that `curl` returned non-0
shell: bash shell: bash
run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.30.0/cargo-dist-installer.sh | sh" run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.30.2/cargo-dist-installer.sh | sh"
- name: Cache dist - name: Cache dist
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
with: with:
@ -166,8 +166,8 @@ jobs:
- custom-build-binaries - custom-build-binaries
- custom-build-docker - custom-build-docker
- build-global-artifacts - build-global-artifacts
# Only run if we're "publishing", and only if local and global didn't fail (skipped is fine) # Only run if we're "publishing", and only if plan, local and global didn't fail (skipped is fine)
if: ${{ always() && needs.plan.outputs.publishing == 'true' && (needs.build-global-artifacts.result == 'skipped' || needs.build-global-artifacts.result == 'success') && (needs.custom-build-binaries.result == 'skipped' || needs.custom-build-binaries.result == 'success') && (needs.custom-build-docker.result == 'skipped' || needs.custom-build-docker.result == 'success') }} if: ${{ always() && needs.plan.result == 'success' && needs.plan.outputs.publishing == 'true' && (needs.build-global-artifacts.result == 'skipped' || needs.build-global-artifacts.result == 'success') && (needs.custom-build-binaries.result == 'skipped' || needs.custom-build-binaries.result == 'success') && (needs.custom-build-docker.result == 'skipped' || needs.custom-build-docker.result == 'success') }}
env: env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
runs-on: "depot-ubuntu-latest-4" runs-on: "depot-ubuntu-latest-4"

View File

@ -77,7 +77,7 @@ jobs:
run: | run: |
git config --global user.name typeshedbot git config --global user.name typeshedbot
git config --global user.email '<>' git config --global user.email '<>'
- uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 - uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
- name: Sync typeshed stubs - name: Sync typeshed stubs
run: | run: |
rm -rf "ruff/${VENDORED_TYPESHED}" rm -rf "ruff/${VENDORED_TYPESHED}"
@ -131,7 +131,7 @@ jobs:
with: with:
persist-credentials: true persist-credentials: true
ref: ${{ env.UPSTREAM_BRANCH}} ref: ${{ env.UPSTREAM_BRANCH}}
- uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 - uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
- name: Setup git - name: Setup git
run: | run: |
git config --global user.name typeshedbot git config --global user.name typeshedbot
@ -170,7 +170,7 @@ jobs:
with: with:
persist-credentials: true persist-credentials: true
ref: ${{ env.UPSTREAM_BRANCH}} ref: ${{ env.UPSTREAM_BRANCH}}
- uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 - uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
- name: Setup git - name: Setup git
run: | run: |
git config --global user.name typeshedbot git config --global user.name typeshedbot
@ -207,12 +207,12 @@ jobs:
uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1 uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1
- name: "Install cargo nextest" - name: "Install cargo nextest"
if: ${{ success() }} if: ${{ success() }}
uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21 uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2.62.49
with: with:
tool: cargo-nextest tool: cargo-nextest
- name: "Install cargo insta" - name: "Install cargo insta"
if: ${{ success() }} if: ${{ success() }}
uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21 uses: taiki-e/install-action@44c6d64aa62cd779e873306675c7a58e86d6d532 # v2.62.49
with: with:
tool: cargo-insta tool: cargo-insta
- name: Update snapshots - name: Update snapshots

View File

@ -33,7 +33,7 @@ jobs:
persist-credentials: false persist-credentials: false
- name: Install the latest version of uv - name: Install the latest version of uv
uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
with: with:
enable-cache: true # zizmor: ignore[cache-poisoning] acceptable risk for CloudFlare pages artifact enable-cache: true # zizmor: ignore[cache-poisoning] acceptable risk for CloudFlare pages artifact
@ -112,8 +112,6 @@ jobs:
cat diff-statistics.md >> "$GITHUB_STEP_SUMMARY" cat diff-statistics.md >> "$GITHUB_STEP_SUMMARY"
echo ${{ github.event.number }} > pr-number
- name: "Deploy to Cloudflare Pages" - name: "Deploy to Cloudflare Pages"
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }} if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
id: deploy id: deploy
@ -131,18 +129,14 @@ jobs:
echo >> comment.md echo >> comment.md
echo "**[Full report with detailed diff]($DEPLOYMENT_URL/diff)** ([timing results]($DEPLOYMENT_URL/timing))" >> comment.md echo "**[Full report with detailed diff]($DEPLOYMENT_URL/diff)** ([timing results]($DEPLOYMENT_URL/timing))" >> comment.md
# NOTE: astral-sh-bot uses this artifact to post comments on PRs.
# Make sure to update the bot if you rename the artifact.
- name: Upload comment - name: Upload comment
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with: with:
name: comment.md name: comment.md
path: comment.md path: comment.md
- name: Upload pr-number
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: pr-number
path: pr-number
- name: Upload diagnostics diff - name: Upload diagnostics diff
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with: with:

View File

@ -1,85 +0,0 @@
name: PR comment (ty ecosystem-analyzer)
on: # zizmor: ignore[dangerous-triggers]
workflow_run:
workflows: [ty ecosystem-analyzer]
types: [completed]
workflow_dispatch:
inputs:
workflow_run_id:
description: The ty ecosystem-analyzer workflow that triggers the workflow run
required: true
jobs:
comment:
runs-on: ubuntu-24.04
permissions:
pull-requests: write
steps:
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: Download PR number
with:
name: pr-number
run_id: ${{ github.event.workflow_run.id || github.event.inputs.workflow_run_id }}
if_no_artifact_found: ignore
allow_forks: true
- name: Parse pull request number
id: pr-number
run: |
if [[ -f pr-number ]]
then
echo "pr-number=$(<pr-number)" >> "$GITHUB_OUTPUT"
fi
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: "Download comment.md"
id: download-comment
if: steps.pr-number.outputs.pr-number
with:
name: comment.md
workflow: ty-ecosystem-analyzer.yaml
pr: ${{ steps.pr-number.outputs.pr-number }}
path: pr/comment
workflow_conclusion: completed
if_no_artifact_found: ignore
allow_forks: true
- name: Generate comment content
id: generate-comment
if: ${{ steps.download-comment.outputs.found_artifact == 'true' }}
run: |
# Guard against malicious ty ecosystem-analyzer results that symlink to a secret
# file on this runner
if [[ -L pr/comment/comment.md ]]
then
echo "Error: comment.md cannot be a symlink"
exit 1
fi
# Note: this identifier is used to find the comment to update on subsequent runs
echo '<!-- generated-comment ty ecosystem-analyzer -->' > comment.md
echo >> comment.md
cat pr/comment/comment.md >> comment.md
echo 'comment<<EOF' >> "$GITHUB_OUTPUT"
cat comment.md >> "$GITHUB_OUTPUT"
echo 'EOF' >> "$GITHUB_OUTPUT"
- name: Find existing comment
uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.1.0
if: steps.generate-comment.outcome == 'success'
id: find-comment
with:
issue-number: ${{ steps.pr-number.outputs.pr-number }}
comment-author: "github-actions[bot]"
body-includes: "<!-- generated-comment ty ecosystem-analyzer -->"
- name: Create or update comment
if: steps.find-comment.outcome == 'success'
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4
with:
comment-id: ${{ steps.find-comment.outputs.comment-id }}
issue-number: ${{ steps.pr-number.outputs.pr-number }}
body-path: comment.md
edit-mode: replace

View File

@ -29,7 +29,7 @@ jobs:
persist-credentials: false persist-credentials: false
- name: Install the latest version of uv - name: Install the latest version of uv
uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0 uses: astral-sh/setup-uv@85856786d1ce8acfbcc2f13a5f3fbd6b938f9f41 # v7.1.2
with: with:
enable-cache: true # zizmor: ignore[cache-poisoning] acceptable risk for CloudFlare pages artifact enable-cache: true # zizmor: ignore[cache-poisoning] acceptable risk for CloudFlare pages artifact

View File

@ -24,7 +24,7 @@ env:
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10 RUSTUP_MAX_RETRIES: 10
RUST_BACKTRACE: 1 RUST_BACKTRACE: 1
CONFORMANCE_SUITE_COMMIT: d4f39b27a4a47aac8b6d4019e1b0b5b3156fabdc CONFORMANCE_SUITE_COMMIT: 9f6d8ced7cd1c8d92687a4e9c96d7716452e471e
jobs: jobs:
typing_conformance: typing_conformance:
@ -94,21 +94,18 @@ jobs:
touch typing_conformance_diagnostics.diff touch typing_conformance_diagnostics.diff
fi fi
echo ${{ github.event.number }} > pr-number
echo "${CONFORMANCE_SUITE_COMMIT}" > conformance-suite-commit echo "${CONFORMANCE_SUITE_COMMIT}" > conformance-suite-commit
# NOTE: astral-sh-bot uses this artifact to post comments on PRs.
# Make sure to update the bot if you rename the artifact.
- name: Upload diff - name: Upload diff
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with: with:
name: typing_conformance_diagnostics_diff name: typing_conformance_diagnostics_diff
path: typing_conformance_diagnostics.diff path: typing_conformance_diagnostics.diff
- name: Upload pr-number # NOTE: astral-sh-bot uses this artifact to post comments on PRs.
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 # Make sure to update the bot if you rename the artifact.
with:
name: pr-number
path: pr-number
- name: Upload conformance suite commit - name: Upload conformance suite commit
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with: with:

View File

@ -1,112 +0,0 @@
name: PR comment (typing_conformance)
on: # zizmor: ignore[dangerous-triggers]
workflow_run:
workflows: [Run typing conformance]
types: [completed]
workflow_dispatch:
inputs:
workflow_run_id:
description: The typing_conformance workflow that triggers the workflow run
required: true
jobs:
comment:
runs-on: ubuntu-24.04
permissions:
pull-requests: write
steps:
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: Download PR number
with:
name: pr-number
run_id: ${{ github.event.workflow_run.id || github.event.inputs.workflow_run_id }}
if_no_artifact_found: ignore
allow_forks: true
- name: Parse pull request number
id: pr-number
run: |
if [[ -f pr-number ]]
then
echo "pr-number=$(<pr-number)" >> "$GITHUB_OUTPUT"
fi
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: Download typing conformance suite commit
with:
name: conformance-suite-commit
run_id: ${{ github.event.workflow_run.id || github.event.inputs.workflow_run_id }}
if_no_artifact_found: ignore
allow_forks: true
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: "Download typing_conformance results"
id: download-typing_conformance_diff
if: steps.pr-number.outputs.pr-number
with:
name: typing_conformance_diagnostics_diff
workflow: typing_conformance.yaml
pr: ${{ steps.pr-number.outputs.pr-number }}
path: pr/typing_conformance_diagnostics_diff
workflow_conclusion: completed
if_no_artifact_found: ignore
allow_forks: true
- name: Generate comment content
id: generate-comment
if: ${{ steps.download-typing_conformance_diff.outputs.found_artifact == 'true' }}
run: |
# Guard against malicious typing_conformance results that symlink to a secret
# file on this runner
if [[ -L pr/typing_conformance_diagnostics_diff/typing_conformance_diagnostics.diff ]]
then
echo "Error: typing_conformance_diagnostics.diff cannot be a symlink"
exit 1
fi
# Note this identifier is used to find the comment to update on
# subsequent runs
echo '<!-- generated-comment typing_conformance_diagnostics_diff -->' >> comment.txt
if [[ -f conformance-suite-commit ]]
then
echo "## Diagnostic diff on [typing conformance tests](https://github.com/python/typing/tree/$(<conformance-suite-commit)/conformance)" >> comment.txt
else
echo "conformance-suite-commit file not found"
echo "## Diagnostic diff on typing conformance tests" >> comment.txt
fi
if [ -s "pr/typing_conformance_diagnostics_diff/typing_conformance_diagnostics.diff" ]; then
echo '<details>' >> comment.txt
echo '<summary>Changes were detected when running ty on typing conformance tests</summary>' >> comment.txt
echo '' >> comment.txt
echo '```diff' >> comment.txt
cat pr/typing_conformance_diagnostics_diff/typing_conformance_diagnostics.diff >> comment.txt
echo '```' >> comment.txt
echo '</details>' >> comment.txt
else
echo 'No changes detected when running ty on typing conformance tests ✅' >> comment.txt
fi
echo 'comment<<EOF' >> "$GITHUB_OUTPUT"
cat comment.txt >> "$GITHUB_OUTPUT"
echo 'EOF' >> "$GITHUB_OUTPUT"
- name: Find existing comment
uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.1.0
if: steps.generate-comment.outcome == 'success'
id: find-comment
with:
issue-number: ${{ steps.pr-number.outputs.pr-number }}
comment-author: "github-actions[bot]"
body-includes: "<!-- generated-comment typing_conformance_diagnostics_diff -->"
- name: Create or update comment
if: steps.find-comment.outcome == 'success'
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4
with:
comment-id: ${{ steps.find-comment.outputs.comment-id }}
issue-number: ${{ steps.pr-number.outputs.pr-number }}
body-path: comment.txt
edit-mode: replace

3
.github/zizmor.yml vendored
View File

@ -3,9 +3,6 @@
# #
# TODO: can we remove the ignores here so that our workflows are more secure? # TODO: can we remove the ignores here so that our workflows are more secure?
rules: rules:
dangerous-triggers:
ignore:
- pr-comment.yaml
cache-poisoning: cache-poisoning:
ignore: ignore:
- build-docker.yml - build-docker.yml

View File

@ -1,5 +1,105 @@
# Changelog # Changelog
## 0.14.4
Released on 2025-11-06.
### Preview features
- [formatter] Allow newlines after function headers without docstrings ([#21110](https://github.com/astral-sh/ruff/pull/21110))
- [formatter] Avoid extra parentheses for long `match` patterns with `as` captures ([#21176](https://github.com/astral-sh/ruff/pull/21176))
- \[`refurb`\] Expand fix safety for keyword arguments and `Decimal`s (`FURB164`) ([#21259](https://github.com/astral-sh/ruff/pull/21259))
- \[`refurb`\] Preserve argument ordering in autofix (`FURB103`) ([#20790](https://github.com/astral-sh/ruff/pull/20790))
### Bug fixes
- [server] Fix missing diagnostics for notebooks ([#21156](https://github.com/astral-sh/ruff/pull/21156))
- \[`flake8-bugbear`\] Ignore non-NFKC attribute names in `B009` and `B010` ([#21131](https://github.com/astral-sh/ruff/pull/21131))
- \[`refurb`\] Fix false negative for underscores before sign in `Decimal` constructor (`FURB157`) ([#21190](https://github.com/astral-sh/ruff/pull/21190))
- \[`ruff`\] Fix false positives on starred arguments (`RUF057`) ([#21256](https://github.com/astral-sh/ruff/pull/21256))
### Rule changes
- \[`airflow`\] extend deprecated argument `concurrency` in `airflow..DAG` (`AIR301`) ([#21220](https://github.com/astral-sh/ruff/pull/21220))
### Documentation
- Improve `extend` docs ([#21135](https://github.com/astral-sh/ruff/pull/21135))
- \[`flake8-comprehensions`\] Fix typo in `C416` documentation ([#21184](https://github.com/astral-sh/ruff/pull/21184))
- Revise Ruff setup instructions for Zed editor ([#20935](https://github.com/astral-sh/ruff/pull/20935))
### Other changes
- Make `ruff analyze graph` work with jupyter notebooks ([#21161](https://github.com/astral-sh/ruff/pull/21161))
### Contributors
- [@chirizxc](https://github.com/chirizxc)
- [@Lee-W](https://github.com/Lee-W)
- [@musicinmybrain](https://github.com/musicinmybrain)
- [@MichaReiser](https://github.com/MichaReiser)
- [@tjkuson](https://github.com/tjkuson)
- [@danparizher](https://github.com/danparizher)
- [@renovate](https://github.com/renovate)
- [@ntBre](https://github.com/ntBre)
- [@gauthsvenkat](https://github.com/gauthsvenkat)
- [@LoicRiegel](https://github.com/LoicRiegel)
## 0.14.3
Released on 2025-10-30.
### Preview features
- Respect `--output-format` with `--watch` ([#21097](https://github.com/astral-sh/ruff/pull/21097))
- \[`pydoclint`\] Fix false positive on explicit exception re-raising (`DOC501`, `DOC502`) ([#21011](https://github.com/astral-sh/ruff/pull/21011))
- \[`pyflakes`\] Revert to stable behavior if imports for module lie in alternate branches for `F401` ([#20878](https://github.com/astral-sh/ruff/pull/20878))
- \[`pylint`\] Implement `stop-iteration-return` (`PLR1708`) ([#20733](https://github.com/astral-sh/ruff/pull/20733))
- \[`ruff`\] Add support for additional eager conversion patterns (`RUF065`) ([#20657](https://github.com/astral-sh/ruff/pull/20657))
### Bug fixes
- Fix finding keyword range for clause header after statement ending with semicolon ([#21067](https://github.com/astral-sh/ruff/pull/21067))
- Fix syntax error false positive on nested alternative patterns ([#21104](https://github.com/astral-sh/ruff/pull/21104))
- \[`ISC001`\] Fix panic when string literals are unclosed ([#21034](https://github.com/astral-sh/ruff/pull/21034))
- \[`flake8-django`\] Apply `DJ001` to annotated fields ([#20907](https://github.com/astral-sh/ruff/pull/20907))
- \[`flake8-pyi`\] Fix `PYI034` to not trigger on metaclasses (`PYI034`) ([#20881](https://github.com/astral-sh/ruff/pull/20881))
- \[`flake8-type-checking`\] Fix `TC003` false positive with `future-annotations` ([#21125](https://github.com/astral-sh/ruff/pull/21125))
- \[`pyflakes`\] Fix false positive for `__class__` in lambda expressions within class definitions (`F821`) ([#20564](https://github.com/astral-sh/ruff/pull/20564))
- \[`pyupgrade`\] Fix false positive for `TypeVar` with default on Python \<3.13 (`UP046`,`UP047`) ([#21045](https://github.com/astral-sh/ruff/pull/21045))
### Rule changes
- Add missing docstring sections to the numpy list ([#20931](https://github.com/astral-sh/ruff/pull/20931))
- \[`airflow`\] Extend `airflow.models..Param` check (`AIR311`) ([#21043](https://github.com/astral-sh/ruff/pull/21043))
- \[`airflow`\] Warn that `airflow....DAG.create_dagrun` has been removed (`AIR301`) ([#21093](https://github.com/astral-sh/ruff/pull/21093))
- \[`refurb`\] Preserve digit separators in `Decimal` constructor (`FURB157`) ([#20588](https://github.com/astral-sh/ruff/pull/20588))
### Server
- Avoid sending an unnecessary "clear diagnostics" message for clients supporting pull diagnostics ([#21105](https://github.com/astral-sh/ruff/pull/21105))
### Documentation
- \[`flake8-bandit`\] Fix correct example for `S308` ([#21128](https://github.com/astral-sh/ruff/pull/21128))
### Other changes
- Clearer error message when `line-length` goes beyond threshold ([#21072](https://github.com/astral-sh/ruff/pull/21072))
### Contributors
- [@danparizher](https://github.com/danparizher)
- [@jvacek](https://github.com/jvacek)
- [@ntBre](https://github.com/ntBre)
- [@augustelalande](https://github.com/augustelalande)
- [@prakhar1144](https://github.com/prakhar1144)
- [@TaKO8Ki](https://github.com/TaKO8Ki)
- [@dylwil3](https://github.com/dylwil3)
- [@fatelei](https://github.com/fatelei)
- [@ShaharNaveh](https://github.com/ShaharNaveh)
- [@Lee-W](https://github.com/Lee-W)
## 0.14.2 ## 0.14.2
Released on 2025-10-23. Released on 2025-10-23.

View File

@ -280,6 +280,55 @@ Note that plugin-specific configuration options are defined in their own modules
Finally, regenerate the documentation and generated code with `cargo dev generate-all`. Finally, regenerate the documentation and generated code with `cargo dev generate-all`.
### Opening a PR
After you finish your changes, the next step is to open a PR. By default, two
sections will be filled into the PR body: the summary and the test plan.
#### The summary
The summary is intended to give us as maintainers information about your PR.
This should typically include a link to the relevant issue(s) you're addressing
in your PR, as well as a summary of the issue and your approach to fixing it. If
you have any questions about your approach or design, or if you considered
alternative approaches, that can also be helpful to include.
AI can be helpful in generating both the code and summary of your PR, but a
successful contribution should still be carefully reviewed by you and the
summary editorialized before submitting a PR. A great summary is thorough but
also succinct and gives us the context we need to review your PR.
You can find examples of excellent issues and PRs by searching for the
[`great writeup`](https://github.com/astral-sh/ruff/issues?q=label%3A%22great%20writeup%22)
label.
#### The test plan
The test plan is likely to be shorter than the summary and can be as simple as
"Added new snapshot tests for `RUF123`," at least for rule bugs. For LSP or some
types of CLI changes, in particular, it can also be helpful to include
screenshots or recordings of your change in action.
#### Ecosystem report
After opening the PR, an ecosystem report will be run as part of CI. This shows
a diff of linter and formatter behavior before and after the changes in your PR.
Going through these changes and reporting your findings in the PR summary or an
additional comment help us to review your PR more efficiently. It's also a great
way to find new test cases to incorporate into your PR if you identify any
issues.
#### PR status
To help us know when your PR is ready for review again, please either move your
PR back to a draft while working on it (marking it ready for review afterwards
will ping the previous reviewers) or explicitly re-request a review. This helps
us to avoid re-reviewing a PR while you're still working on it and also to
prioritize PRs that are definitely ready for review.
You can also thumbs-up or mark as resolved any comments we leave to let us know
you addressed them.
## MkDocs ## MkDocs
> [!NOTE] > [!NOTE]

414
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -5,7 +5,7 @@ resolver = "2"
[workspace.package] [workspace.package]
# Please update rustfmt.toml when bumping the Rust edition # Please update rustfmt.toml when bumping the Rust edition
edition = "2024" edition = "2024"
rust-version = "1.88" rust-version = "1.89"
homepage = "https://docs.astral.sh/ruff" homepage = "https://docs.astral.sh/ruff"
documentation = "https://docs.astral.sh/ruff" documentation = "https://docs.astral.sh/ruff"
repository = "https://github.com/astral-sh/ruff" repository = "https://github.com/astral-sh/ruff"
@ -84,7 +84,7 @@ dashmap = { version = "6.0.1" }
dir-test = { version = "0.4.0" } dir-test = { version = "0.4.0" }
dunce = { version = "1.0.5" } dunce = { version = "1.0.5" }
drop_bomb = { version = "0.1.5" } drop_bomb = { version = "0.1.5" }
etcetera = { version = "0.10.0" } etcetera = { version = "0.11.0" }
fern = { version = "0.7.0" } fern = { version = "0.7.0" }
filetime = { version = "0.2.23" } filetime = { version = "0.2.23" }
getrandom = { version = "0.3.1" } getrandom = { version = "0.3.1" }
@ -103,7 +103,7 @@ hashbrown = { version = "0.16.0", default-features = false, features = [
"inline-more", "inline-more",
] } ] }
heck = "0.5.0" heck = "0.5.0"
ignore = { version = "0.4.22" } ignore = { version = "0.4.24" }
imara-diff = { version = "0.1.5" } imara-diff = { version = "0.1.5" }
imperative = { version = "1.0.4" } imperative = { version = "1.0.4" }
indexmap = { version = "2.6.0" } indexmap = { version = "2.6.0" }
@ -124,7 +124,7 @@ lsp-server = { version = "0.7.6" }
lsp-types = { git = "https://github.com/astral-sh/lsp-types.git", rev = "3512a9f", features = [ lsp-types = { git = "https://github.com/astral-sh/lsp-types.git", rev = "3512a9f", features = [
"proposed", "proposed",
] } ] }
matchit = { version = "0.8.1" } matchit = { version = "0.9.0" }
memchr = { version = "2.7.1" } memchr = { version = "2.7.1" }
mimalloc = { version = "0.1.39" } mimalloc = { version = "0.1.39" }
natord = { version = "1.0.9" } natord = { version = "1.0.9" }
@ -146,7 +146,7 @@ regex-automata = { version = "0.4.9" }
rustc-hash = { version = "2.0.0" } rustc-hash = { version = "2.0.0" }
rustc-stable-hash = { version = "0.1.2" } rustc-stable-hash = { version = "0.1.2" }
# When updating salsa, make sure to also update the revision in `fuzz/Cargo.toml` # When updating salsa, make sure to also update the revision in `fuzz/Cargo.toml`
salsa = { git = "https://github.com/salsa-rs/salsa.git", rev = "cdd0b85516a52c18b8a6d17a2279a96ed6c3e198", default-features = false, features = [ salsa = { git = "https://github.com/salsa-rs/salsa.git", rev = "05a9af7f554b64b8aadc2eeb6f2caf73d0408d09", default-features = false, features = [
"compact_str", "compact_str",
"macros", "macros",
"salsa_unstable", "salsa_unstable",

View File

@ -147,8 +147,8 @@ curl -LsSf https://astral.sh/ruff/install.sh | sh
powershell -c "irm https://astral.sh/ruff/install.ps1 | iex" powershell -c "irm https://astral.sh/ruff/install.ps1 | iex"
# For a specific version. # For a specific version.
curl -LsSf https://astral.sh/ruff/0.14.2/install.sh | sh curl -LsSf https://astral.sh/ruff/0.14.4/install.sh | sh
powershell -c "irm https://astral.sh/ruff/0.14.2/install.ps1 | iex" powershell -c "irm https://astral.sh/ruff/0.14.4/install.ps1 | iex"
``` ```
You can also install Ruff via [Homebrew](https://formulae.brew.sh/formula/ruff), [Conda](https://anaconda.org/conda-forge/ruff), You can also install Ruff via [Homebrew](https://formulae.brew.sh/formula/ruff), [Conda](https://anaconda.org/conda-forge/ruff),
@ -181,7 +181,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com/) hook via [`ruff
```yaml ```yaml
- repo: https://github.com/astral-sh/ruff-pre-commit - repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version. # Ruff version.
rev: v0.14.2 rev: v0.14.4
hooks: hooks:
# Run the linter. # Run the linter.
- id: ruff-check - id: ruff-check
@ -491,6 +491,7 @@ Ruff is used by a number of major open-source projects and companies, including:
- [PyTorch](https://github.com/pytorch/pytorch) - [PyTorch](https://github.com/pytorch/pytorch)
- [Pydantic](https://github.com/pydantic/pydantic) - [Pydantic](https://github.com/pydantic/pydantic)
- [Pylint](https://github.com/PyCQA/pylint) - [Pylint](https://github.com/PyCQA/pylint)
- [PyScripter](https://github.com/pyscripter/pyscripter)
- [PyVista](https://github.com/pyvista/pyvista) - [PyVista](https://github.com/pyvista/pyvista)
- [Reflex](https://github.com/reflex-dev/reflex) - [Reflex](https://github.com/reflex-dev/reflex)
- [River](https://github.com/online-ml/river) - [River](https://github.com/online-ml/river)

View File

@ -1,6 +1,6 @@
[package] [package]
name = "ruff" name = "ruff"
version = "0.14.2" version = "0.14.4"
publish = true publish = true
authors = { workspace = true } authors = { workspace = true }
edition = { workspace = true } edition = { workspace = true }

View File

@ -7,6 +7,8 @@ use std::sync::Arc;
use crate::commands::completions::config::{OptionString, OptionStringParser}; use crate::commands::completions::config::{OptionString, OptionStringParser};
use anyhow::bail; use anyhow::bail;
use clap::builder::Styles;
use clap::builder::styling::{AnsiColor, Effects};
use clap::builder::{TypedValueParser, ValueParserFactory}; use clap::builder::{TypedValueParser, ValueParserFactory};
use clap::{Parser, Subcommand, command}; use clap::{Parser, Subcommand, command};
use colored::Colorize; use colored::Colorize;
@ -78,6 +80,13 @@ impl GlobalConfigArgs {
} }
} }
// Configures Clap v3-style help menu colors
const STYLES: Styles = Styles::styled()
.header(AnsiColor::Green.on_default().effects(Effects::BOLD))
.usage(AnsiColor::Green.on_default().effects(Effects::BOLD))
.literal(AnsiColor::Cyan.on_default().effects(Effects::BOLD))
.placeholder(AnsiColor::Cyan.on_default());
#[derive(Debug, Parser)] #[derive(Debug, Parser)]
#[command( #[command(
author, author,
@ -86,6 +95,7 @@ impl GlobalConfigArgs {
after_help = "For help with a specific command, see: `ruff help <command>`." after_help = "For help with a specific command, see: `ruff help <command>`."
)] )]
#[command(version)] #[command(version)]
#[command(styles = STYLES)]
pub struct Args { pub struct Args {
#[command(subcommand)] #[command(subcommand)]
pub(crate) command: Command, pub(crate) command: Command,
@ -405,8 +415,13 @@ pub struct CheckCommand {
)] )]
pub statistics: bool, pub statistics: bool,
/// Enable automatic additions of `noqa` directives to failing lines. /// Enable automatic additions of `noqa` directives to failing lines.
/// Optionally provide a reason to append after the codes.
#[arg( #[arg(
long, long,
value_name = "REASON",
default_missing_value = "",
num_args = 0..=1,
require_equals = true,
// conflicts_with = "add_noqa", // conflicts_with = "add_noqa",
conflicts_with = "show_files", conflicts_with = "show_files",
conflicts_with = "show_settings", conflicts_with = "show_settings",
@ -418,7 +433,7 @@ pub struct CheckCommand {
conflicts_with = "fix", conflicts_with = "fix",
conflicts_with = "diff", conflicts_with = "diff",
)] )]
pub add_noqa: bool, pub add_noqa: Option<String>,
/// See the files Ruff will be run against with the current settings. /// See the files Ruff will be run against with the current settings.
#[arg( #[arg(
long, long,
@ -1047,7 +1062,7 @@ Possible choices:
/// etc.). /// etc.).
#[expect(clippy::struct_excessive_bools)] #[expect(clippy::struct_excessive_bools)]
pub struct CheckArguments { pub struct CheckArguments {
pub add_noqa: bool, pub add_noqa: Option<String>,
pub diff: bool, pub diff: bool,
pub exit_non_zero_on_fix: bool, pub exit_non_zero_on_fix: bool,
pub exit_zero: bool, pub exit_zero: bool,

View File

@ -21,6 +21,7 @@ pub(crate) fn add_noqa(
files: &[PathBuf], files: &[PathBuf],
pyproject_config: &PyprojectConfig, pyproject_config: &PyprojectConfig,
config_arguments: &ConfigArguments, config_arguments: &ConfigArguments,
reason: Option<&str>,
) -> Result<usize> { ) -> Result<usize> {
// Collect all the files to check. // Collect all the files to check.
let start = Instant::now(); let start = Instant::now();
@ -76,7 +77,14 @@ pub(crate) fn add_noqa(
return None; return None;
} }
}; };
match add_noqa_to_path(path, package, &source_kind, source_type, &settings.linter) { match add_noqa_to_path(
path,
package,
&source_kind,
source_type,
&settings.linter,
reason,
) {
Ok(count) => Some(count), Ok(count) => Some(count),
Err(e) => { Err(e) => {
error!("Failed to add noqa to {}: {e}", path.display()); error!("Failed to add noqa to {}: {e}", path.display());

View File

@ -7,6 +7,7 @@ use path_absolutize::CWD;
use ruff_db::system::{SystemPath, SystemPathBuf}; use ruff_db::system::{SystemPath, SystemPathBuf};
use ruff_graph::{Direction, ImportMap, ModuleDb, ModuleImports}; use ruff_graph::{Direction, ImportMap, ModuleDb, ModuleImports};
use ruff_linter::package::PackageRoot; use ruff_linter::package::PackageRoot;
use ruff_linter::source_kind::SourceKind;
use ruff_linter::{warn_user, warn_user_once}; use ruff_linter::{warn_user, warn_user_once};
use ruff_python_ast::{PySourceType, SourceType}; use ruff_python_ast::{PySourceType, SourceType};
use ruff_workspace::resolver::{ResolvedFile, match_exclusion, python_files_in_path}; use ruff_workspace::resolver::{ResolvedFile, match_exclusion, python_files_in_path};
@ -127,10 +128,6 @@ pub(crate) fn analyze_graph(
}, },
Some(language) => PySourceType::from(language), Some(language) => PySourceType::from(language),
}; };
if matches!(source_type, PySourceType::Ipynb) {
debug!("Ignoring Jupyter notebook: {}", path.display());
continue;
}
// Convert to system paths. // Convert to system paths.
let Ok(package) = package.map(SystemPathBuf::from_path_buf).transpose() else { let Ok(package) = package.map(SystemPathBuf::from_path_buf).transpose() else {
@ -147,13 +144,34 @@ pub(crate) fn analyze_graph(
let root = root.clone(); let root = root.clone();
let result = inner_result.clone(); let result = inner_result.clone();
scope.spawn(move |_| { scope.spawn(move |_| {
// Extract source code (handles both .py and .ipynb files)
let source_kind = match SourceKind::from_path(path.as_std_path(), source_type) {
Ok(Some(source_kind)) => source_kind,
Ok(None) => {
debug!("Skipping non-Python notebook: {path}");
return;
}
Err(err) => {
warn!("Failed to read source for {path}: {err}");
return;
}
};
let source_code = source_kind.source_code();
// Identify any imports via static analysis. // Identify any imports via static analysis.
let mut imports = let mut imports = ModuleImports::detect(
ModuleImports::detect(&db, &path, package.as_deref(), string_imports) &db,
.unwrap_or_else(|err| { source_code,
warn!("Failed to generate import map for {path}: {err}"); source_type,
ModuleImports::default() &path,
}); package.as_deref(),
string_imports,
)
.unwrap_or_else(|err| {
warn!("Failed to generate import map for {path}: {err}");
ModuleImports::default()
});
debug!("Discovered {} imports for {}", imports.len(), path); debug!("Discovered {} imports for {}", imports.len(), path);

View File

@ -370,7 +370,7 @@ pub(crate) fn format_source(
let line_index = LineIndex::from_source_text(unformatted); let line_index = LineIndex::from_source_text(unformatted);
let byte_range = range.to_text_range(unformatted, &line_index); let byte_range = range.to_text_range(unformatted, &line_index);
format_range(unformatted, byte_range, options).map(|formatted_range| { format_range(unformatted, byte_range, options).map(|formatted_range| {
let mut formatted = unformatted.to_string(); let mut formatted = unformatted.clone();
formatted.replace_range( formatted.replace_range(
std::ops::Range::<usize>::from(formatted_range.source_range()), std::ops::Range::<usize>::from(formatted_range.source_range()),
formatted_range.as_code(), formatted_range.as_code(),

View File

@ -16,6 +16,8 @@ struct LinterInfo {
prefix: &'static str, prefix: &'static str,
name: &'static str, name: &'static str,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
url: Option<&'static str>,
#[serde(skip_serializing_if = "Option::is_none")]
categories: Option<Vec<LinterCategoryInfo>>, categories: Option<Vec<LinterCategoryInfo>>,
} }
@ -50,6 +52,7 @@ pub(crate) fn linter(format: HelpFormat) -> Result<()> {
.map(|linter_info| LinterInfo { .map(|linter_info| LinterInfo {
prefix: linter_info.common_prefix(), prefix: linter_info.common_prefix(),
name: linter_info.name(), name: linter_info.name(),
url: linter_info.url(),
categories: linter_info.upstream_categories().map(|cats| { categories: linter_info.upstream_categories().map(|cats| {
cats.iter() cats.iter()
.map(|c| LinterCategoryInfo { .map(|c| LinterCategoryInfo {

View File

@ -319,12 +319,20 @@ pub fn check(args: CheckCommand, global_options: GlobalConfigArgs) -> Result<Exi
warn_user!("Detected debug build without --no-cache."); warn_user!("Detected debug build without --no-cache.");
} }
if cli.add_noqa { if let Some(reason) = &cli.add_noqa {
if !fix_mode.is_generate() { if !fix_mode.is_generate() {
warn_user!("--fix is incompatible with --add-noqa."); warn_user!("--fix is incompatible with --add-noqa.");
} }
if reason.contains(['\n', '\r']) {
return Err(anyhow::anyhow!(
"--add-noqa <reason> cannot contain newline characters"
));
}
let reason_opt = (!reason.is_empty()).then_some(reason.as_str());
let modifications = let modifications =
commands::add_noqa::add_noqa(&files, &pyproject_config, &config_arguments)?; commands::add_noqa::add_noqa(&files, &pyproject_config, &config_arguments, reason_opt)?;
if modifications > 0 && config_arguments.log_level >= LogLevel::Default { if modifications > 0 && config_arguments.log_level >= LogLevel::Default {
let s = if modifications == 1 { "" } else { "s" }; let s = if modifications == 1 { "" } else { "s" };
#[expect(clippy::print_stderr)] #[expect(clippy::print_stderr)]

View File

@ -653,3 +653,133 @@ fn venv() -> Result<()> {
Ok(()) Ok(())
} }
#[test]
fn notebook_basic() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff")
.child("a.py")
.write_str(indoc::indoc! {r#"
def helper():
pass
"#})?;
// Create a basic notebook with a simple import
root.child("notebook.ipynb").write_str(indoc::indoc! {r#"
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from ruff.a import helper"
]
}
],
"metadata": {
"language_info": {
"name": "python",
"version": "3.12.0"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
"#})?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"notebook.ipynb": [
"ruff/a.py"
],
"ruff/__init__.py": [],
"ruff/a.py": []
}
----- stderr -----
"###);
});
Ok(())
}
#[test]
fn notebook_with_magic() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff")
.child("a.py")
.write_str(indoc::indoc! {r#"
def helper():
pass
"#})?;
// Create a notebook with IPython magic commands and imports
root.child("notebook.ipynb").write_str(indoc::indoc! {r#"
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%load_ext autoreload\n",
"%autoreload 2"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from ruff.a import helper"
]
}
],
"metadata": {
"language_info": {
"name": "python",
"version": "3.12.0"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
"#})?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"notebook.ipynb": [
"ruff/a.py"
],
"ruff/__init__.py": [],
"ruff/a.py": []
}
----- stderr -----
"###);
});
Ok(())
}

View File

@ -1760,6 +1760,64 @@ from foo import ( # noqa: F401
Ok(()) Ok(())
} }
#[test]
fn add_noqa_with_reason() -> Result<()> {
let fixture = CliTest::new()?;
fixture.write_file(
"test.py",
r#"import os
def foo():
x = 1
"#,
)?;
assert_cmd_snapshot!(fixture
.check_command()
.arg("--add-noqa=TODO: fix")
.arg("--select=F401,F841")
.arg("test.py"), @r"
success: true
exit_code: 0
----- stdout -----
----- stderr -----
Added 2 noqa directives.
");
let content = fs::read_to_string(fixture.root().join("test.py"))?;
insta::assert_snapshot!(content, @r"
import os # noqa: F401 TODO: fix
def foo():
x = 1 # noqa: F841 TODO: fix
");
Ok(())
}
#[test]
fn add_noqa_with_newline_in_reason() -> Result<()> {
let fixture = CliTest::new()?;
fixture.write_file("test.py", "import os\n")?;
assert_cmd_snapshot!(fixture
.check_command()
.arg("--add-noqa=line1\nline2")
.arg("--select=F401")
.arg("test.py"), @r###"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ruff failed
Cause: --add-noqa <reason> cannot contain newline characters
"###);
Ok(())
}
/// Infer `3.11` from `requires-python` in `pyproject.toml`. /// Infer `3.11` from `requires-python` in `pyproject.toml`.
#[test] #[test]
fn requires_python() -> Result<()> { fn requires_python() -> Result<()> {

View File

@ -71,16 +71,13 @@ impl Display for Benchmark<'_> {
} }
} }
fn check_project(db: &ProjectDatabase, max_diagnostics: usize) { fn check_project(db: &ProjectDatabase, project_name: &str, max_diagnostics: usize) {
let result = db.check(); let result = db.check();
let diagnostics = result.len(); let diagnostics = result.len();
assert!( assert!(
diagnostics > 1 && diagnostics <= max_diagnostics, diagnostics > 1 && diagnostics <= max_diagnostics,
"Expected between {} and {} diagnostics but got {}", "Expected between 1 and {max_diagnostics} diagnostics on project '{project_name}' but got {diagnostics}",
1,
max_diagnostics,
diagnostics
); );
} }
@ -146,7 +143,7 @@ static FREQTRADE: Benchmark = Benchmark::new(
max_dep_date: "2025-06-17", max_dep_date: "2025-06-17",
python_version: PythonVersion::PY312, python_version: PythonVersion::PY312,
}, },
400, 525,
); );
static PANDAS: Benchmark = Benchmark::new( static PANDAS: Benchmark = Benchmark::new(
@ -184,7 +181,7 @@ static PYDANTIC: Benchmark = Benchmark::new(
max_dep_date: "2025-06-17", max_dep_date: "2025-06-17",
python_version: PythonVersion::PY39, python_version: PythonVersion::PY39,
}, },
1000, 5000,
); );
static SYMPY: Benchmark = Benchmark::new( static SYMPY: Benchmark = Benchmark::new(
@ -226,7 +223,7 @@ static STATIC_FRAME: Benchmark = Benchmark::new(
max_dep_date: "2025-08-09", max_dep_date: "2025-08-09",
python_version: PythonVersion::PY311, python_version: PythonVersion::PY311,
}, },
800, 900,
); );
#[track_caller] #[track_caller]
@ -234,11 +231,11 @@ fn run_single_threaded(bencher: Bencher, benchmark: &Benchmark) {
bencher bencher
.with_inputs(|| benchmark.setup_iteration()) .with_inputs(|| benchmark.setup_iteration())
.bench_local_refs(|db| { .bench_local_refs(|db| {
check_project(db, benchmark.max_diagnostics); check_project(db, benchmark.project.name, benchmark.max_diagnostics);
}); });
} }
#[bench(args=[&ALTAIR, &FREQTRADE, &PYDANTIC, &TANJUN], sample_size=2, sample_count=3)] #[bench(args=[&ALTAIR, &FREQTRADE, &TANJUN], sample_size=2, sample_count=3)]
fn small(bencher: Bencher, benchmark: &Benchmark) { fn small(bencher: Bencher, benchmark: &Benchmark) {
run_single_threaded(bencher, benchmark); run_single_threaded(bencher, benchmark);
} }
@ -248,12 +245,12 @@ fn medium(bencher: Bencher, benchmark: &Benchmark) {
run_single_threaded(bencher, benchmark); run_single_threaded(bencher, benchmark);
} }
#[bench(args=[&SYMPY], sample_size=1, sample_count=2)] #[bench(args=[&SYMPY, &PYDANTIC], sample_size=1, sample_count=2)]
fn large(bencher: Bencher, benchmark: &Benchmark) { fn large(bencher: Bencher, benchmark: &Benchmark) {
run_single_threaded(bencher, benchmark); run_single_threaded(bencher, benchmark);
} }
#[bench(args=[&PYDANTIC], sample_size=3, sample_count=8)] #[bench(args=[&ALTAIR], sample_size=3, sample_count=8)]
fn multithreaded(bencher: Bencher, benchmark: &Benchmark) { fn multithreaded(bencher: Bencher, benchmark: &Benchmark) {
let thread_pool = ThreadPoolBuilder::new().build().unwrap(); let thread_pool = ThreadPoolBuilder::new().build().unwrap();
@ -261,7 +258,7 @@ fn multithreaded(bencher: Bencher, benchmark: &Benchmark) {
.with_inputs(|| benchmark.setup_iteration()) .with_inputs(|| benchmark.setup_iteration())
.bench_local_values(|db| { .bench_local_values(|db| {
thread_pool.install(|| { thread_pool.install(|| {
check_project(&db, benchmark.max_diagnostics); check_project(&db, benchmark.project.name, benchmark.max_diagnostics);
db db
}) })
}); });
@ -285,7 +282,7 @@ fn main() {
// branch when looking up the ingredient index. // branch when looking up the ingredient index.
{ {
let db = TANJUN.setup_iteration(); let db = TANJUN.setup_iteration();
check_project(&db, TANJUN.max_diagnostics); check_project(&db, TANJUN.project.name, TANJUN.max_diagnostics);
} }
divan::main(); divan::main();

View File

@ -112,16 +112,16 @@ impl std::fmt::Display for Diff<'_> {
// `None`, indicating a regular script file, all the lines will be in one "cell" under the // `None`, indicating a regular script file, all the lines will be in one "cell" under the
// `None` key. // `None` key.
let cells = if let Some(notebook_index) = &self.notebook_index { let cells = if let Some(notebook_index) = &self.notebook_index {
let mut last_cell = OneIndexed::MIN; let mut last_cell_index = OneIndexed::MIN;
let mut cells: Vec<(Option<OneIndexed>, TextSize)> = Vec::new(); let mut cells: Vec<(Option<OneIndexed>, TextSize)> = Vec::new();
for (row, cell) in notebook_index.iter() { for cell in notebook_index.iter() {
if cell != last_cell { if cell.cell_index() != last_cell_index {
let offset = source_code.line_start(row); let offset = source_code.line_start(cell.start_row());
cells.push((Some(last_cell), offset)); cells.push((Some(last_cell_index), offset));
last_cell = cell; last_cell_index = cell.cell_index();
} }
} }
cells.push((Some(last_cell), source_text.text_len())); cells.push((Some(last_cell_index), source_text.text_len()));
cells cells
} else { } else {
vec![(None, source_text.text_len())] vec![(None, source_text.text_len())]

View File

@ -470,6 +470,17 @@ impl File {
self.source_type(db).is_stub() self.source_type(db).is_stub()
} }
/// Returns `true` if the file is an `__init__.pyi`
pub fn is_package_stub(self, db: &dyn Db) -> bool {
self.path(db).as_str().ends_with("__init__.pyi")
}
/// Returns `true` if the file is an `__init__.pyi`
pub fn is_package(self, db: &dyn Db) -> bool {
let path = self.path(db).as_str();
path.ends_with("__init__.pyi") || path.ends_with("__init__.py")
}
pub fn source_type(self, db: &dyn Db) -> PySourceType { pub fn source_type(self, db: &dyn Db) -> PySourceType {
match self.path(db) { match self.path(db) {
FilePath::System(path) => path FilePath::System(path) => path

View File

@ -723,10 +723,11 @@ impl ruff_cache::CacheKey for SystemPathBuf {
/// A slice of a virtual path on [`System`](super::System) (akin to [`str`]). /// A slice of a virtual path on [`System`](super::System) (akin to [`str`]).
#[repr(transparent)] #[repr(transparent)]
#[derive(Eq, PartialEq, Hash, PartialOrd, Ord)]
pub struct SystemVirtualPath(str); pub struct SystemVirtualPath(str);
impl SystemVirtualPath { impl SystemVirtualPath {
pub fn new(path: &str) -> &SystemVirtualPath { pub const fn new(path: &str) -> &SystemVirtualPath {
// SAFETY: SystemVirtualPath is marked as #[repr(transparent)] so the conversion from a // SAFETY: SystemVirtualPath is marked as #[repr(transparent)] so the conversion from a
// *const str to a *const SystemVirtualPath is valid. // *const str to a *const SystemVirtualPath is valid.
unsafe { &*(path as *const str as *const SystemVirtualPath) } unsafe { &*(path as *const str as *const SystemVirtualPath) }
@ -767,8 +768,8 @@ pub struct SystemVirtualPathBuf(String);
impl SystemVirtualPathBuf { impl SystemVirtualPathBuf {
#[inline] #[inline]
pub fn as_path(&self) -> &SystemVirtualPath { pub const fn as_path(&self) -> &SystemVirtualPath {
SystemVirtualPath::new(&self.0) SystemVirtualPath::new(self.0.as_str())
} }
} }
@ -852,6 +853,12 @@ impl ruff_cache::CacheKey for SystemVirtualPathBuf {
} }
} }
impl Borrow<SystemVirtualPath> for SystemVirtualPathBuf {
fn borrow(&self) -> &SystemVirtualPath {
self.as_path()
}
}
/// Deduplicates identical paths and removes nested paths. /// Deduplicates identical paths and removes nested paths.
/// ///
/// # Examples /// # Examples

View File

@ -62,7 +62,7 @@ fn generate_set(output: &mut String, set: Set, parents: &mut Vec<Set>) {
generate_set( generate_set(
output, output,
Set::Named { Set::Named {
name: set_name.to_string(), name: set_name.clone(),
set: *sub_set, set: *sub_set,
}, },
parents, parents,

View File

@ -104,7 +104,7 @@ fn generate_set(output: &mut String, set: Set, parents: &mut Vec<Set>) {
generate_set( generate_set(
output, output,
Set::Named { Set::Named {
name: set_name.to_string(), name: set_name.clone(),
set: *sub_set, set: *sub_set,
}, },
parents, parents,

View File

@ -1006,7 +1006,7 @@ impl<Context> std::fmt::Debug for Align<'_, Context> {
/// Block indents indent a block of code, such as in a function body, and therefore insert a line /// Block indents indent a block of code, such as in a function body, and therefore insert a line
/// break before and after the content. /// break before and after the content.
/// ///
/// Doesn't create an indentation if the passed in content is [`FormatElement.is_empty`]. /// Doesn't create an indentation if the passed in content is empty.
/// ///
/// # Examples /// # Examples
/// ///

View File

@ -487,7 +487,7 @@ pub trait FormatElements {
/// Represents the width by adding 1 to the actual width so that the width can be represented by a [`NonZeroU32`], /// Represents the width by adding 1 to the actual width so that the width can be represented by a [`NonZeroU32`],
/// allowing [`TextWidth`] or [`Option<Width>`] fit in 4 bytes rather than 8. /// allowing [`TextWidth`] or [`Option<Width>`] fit in 4 bytes rather than 8.
/// ///
/// This means that 2^32 can not be precisely represented and instead has the same value as 2^32-1. /// This means that 2^32 cannot be precisely represented and instead has the same value as 2^32-1.
/// This imprecision shouldn't matter in practice because either text are longer than any configured line width /// This imprecision shouldn't matter in practice because either text are longer than any configured line width
/// and thus, the text should break. /// and thus, the text should break.
#[derive(Copy, Clone, Debug, Eq, PartialEq)] #[derive(Copy, Clone, Debug, Eq, PartialEq)]

View File

@ -3,8 +3,9 @@ use std::collections::{BTreeMap, BTreeSet};
use anyhow::Result; use anyhow::Result;
use ruff_db::system::{SystemPath, SystemPathBuf}; use ruff_db::system::{SystemPath, SystemPathBuf};
use ruff_python_ast::PySourceType;
use ruff_python_ast::helpers::to_module_path; use ruff_python_ast::helpers::to_module_path;
use ruff_python_parser::{Mode, ParseOptions, parse}; use ruff_python_parser::{ParseOptions, parse};
use crate::collector::Collector; use crate::collector::Collector;
pub use crate::db::ModuleDb; pub use crate::db::ModuleDb;
@ -24,13 +25,14 @@ impl ModuleImports {
/// Detect the [`ModuleImports`] for a given Python file. /// Detect the [`ModuleImports`] for a given Python file.
pub fn detect( pub fn detect(
db: &ModuleDb, db: &ModuleDb,
source: &str,
source_type: PySourceType,
path: &SystemPath, path: &SystemPath,
package: Option<&SystemPath>, package: Option<&SystemPath>,
string_imports: StringImports, string_imports: StringImports,
) -> Result<Self> { ) -> Result<Self> {
// Read and parse the source code. // Parse the source code.
let source = std::fs::read_to_string(path)?; let parsed = parse(source, ParseOptions::from(source_type))?;
let parsed = parse(&source, ParseOptions::from(Mode::Module))?;
let module_path = let module_path =
package.and_then(|package| to_module_path(package.as_std_path(), path.as_std_path())); package.and_then(|package| to_module_path(package.as_std_path(), path.as_std_path()));

View File

@ -1,6 +1,6 @@
[package] [package]
name = "ruff_linter" name = "ruff_linter"
version = "0.14.2" version = "0.14.4"
publish = false publish = false
authors = { workspace = true } authors = { workspace = true }
edition = { workspace = true } edition = { workspace = true }

View File

@ -22,6 +22,7 @@ DAG(dag_id="class_schedule_interval", schedule_interval="@hourly")
DAG(dag_id="class_timetable", timetable=NullTimetable()) DAG(dag_id="class_timetable", timetable=NullTimetable())
DAG(dag_id="class_concurrency", concurrency=12)
DAG(dag_id="class_fail_stop", fail_stop=True) DAG(dag_id="class_fail_stop", fail_stop=True)

View File

@ -70,3 +70,12 @@ builtins.getattr(foo, "bar")
# Regression test for: https://github.com/astral-sh/ruff/issues/18353 # Regression test for: https://github.com/astral-sh/ruff/issues/18353
setattr(foo, "__debug__", 0) setattr(foo, "__debug__", 0)
# Regression test for: https://github.com/astral-sh/ruff/issues/21126
# Non-NFKC attribute names should be marked as unsafe. Python normalizes identifiers in
# attribute access (obj.attr) using NFKC, but does not normalize string
# arguments passed to getattr/setattr. Rewriting `getattr(ns, "ſ")` to
# `ns.ſ` would be interpreted as `ns.s` at runtime, changing behavior.
# Example: the long s character "ſ" normalizes to "s" under NFKC.
getattr(foo, "ſ")
setattr(foo, "ſ", 1)

View File

@ -46,7 +46,8 @@ def func():
def func(): def func():
# OK (index doesn't start at 0 # SIM113
# https://github.com/astral-sh/ruff/pull/21395
idx = 10 idx = 10
for x in range(5): for x in range(5):
g(x, idx) g(x, idx)

View File

@ -204,3 +204,15 @@ x = 1
print(f"{x=}" or "bar") # SIM222 print(f"{x=}" or "bar") # SIM222
(lambda: 1) or True # SIM222 (lambda: 1) or True # SIM222
(i for i in range(1)) or "bar" # SIM222 (i for i in range(1)) or "bar" # SIM222
# https://github.com/astral-sh/ruff/issues/21136
def get_items():
return tuple(item for item in Item.objects.all()) or None # OK
def get_items_list():
return tuple([item for item in items]) or None # OK
def get_items_set():
return tuple({item for item in items}) or None # OK

View File

@ -371,6 +371,61 @@ class Foo:
""" """
return return
# DOC102 - Test case from issue #20959: comma-separated parameters
def leq(x: object, y: object) -> bool:
"""Compare two objects for loose equality.
Parameters
----------
x1, x2 : object
Objects.
Returns
-------
bool
Whether the objects are identical or equal.
"""
return x is y or x == y
# OK - comma-separated parameters that match function signature
def compare_values(x1: int, x2: int) -> bool:
"""Compare two integer values.
Parameters
----------
x1, x2 : int
Values to compare.
Returns
-------
bool
True if values are equal.
"""
return x1 == x2
# DOC102 - mixed comma-separated and regular parameters
def process_data(data, x1: str, x2: str) -> str:
"""Process data with multiple string parameters.
Parameters
----------
data : list
Input data to process.
x1, x2 : str
String parameters for processing.
extra_param : str
Extra parameter not in signature.
Returns
-------
str
Processed result.
"""
return f"{x1}{x2}{len(data)}"
# OK # OK
def baz(x: int) -> int: def baz(x: int) -> int:
""" """
@ -389,3 +444,21 @@ def baz(x: int) -> int:
int int
""" """
return x return x
# OK - comma-separated parameters without type annotations
def add_numbers(a, b):
"""
Adds two numbers and returns the result.
Parameters
----------
a, b
The numbers to add.
Returns
-------
int
The sum of the two numbers.
"""
return a + b

View File

@ -83,6 +83,37 @@ def calculate_speed(distance: float, time: float) -> float:
raise raise
# DOC502 regression for Sphinx directive after Raises (issue #18959)
def foo():
"""First line.
Raises:
ValueError:
some text
.. versionadded:: 0.7.0
The ``init_kwargs`` argument.
"""
raise ValueError
# DOC502 regression for following section with colons
def example_with_following_section():
"""Summary.
Returns:
str: The resulting expression.
Raises:
ValueError: If the unit is not valid.
Relation to `time_range_lookup`:
- Handles the "start of" modifier.
- Example: "start of month" `DATETRUNC()`.
"""
raise ValueError
# This should NOT trigger DOC502 because OSError is explicitly re-raised # This should NOT trigger DOC502 because OSError is explicitly re-raised
def f(): def f():
"""Do nothing. """Do nothing.

View File

@ -117,3 +117,33 @@ def calculate_speed(distance: float, time: float) -> float:
except TypeError: except TypeError:
print("Not a number? Shame on you!") print("Not a number? Shame on you!")
raise raise
# DOC502 regression for Sphinx directive after Raises (issue #18959)
def foo():
"""First line.
Raises
------
ValueError
some text
.. versionadded:: 0.7.0
The ``init_kwargs`` argument.
"""
raise ValueError
# Make sure we don't bail out on a Sphinx directive in the description of one
# of the exceptions
def foo():
"""First line.
Raises
------
ValueError
some text
.. math:: e^{xception}
ZeroDivisionError
Will not be raised, DOC502
"""
raise ValueError

View File

@ -0,0 +1,5 @@
from .builtins import next
from ..builtins import str
from ...builtins import int
from .builtins import next as _next

View File

@ -125,3 +125,18 @@ with open(*filename, mode="r") as f:
# `buffering`. # `buffering`.
with open(*filename, file="file.txt", mode="r") as f: with open(*filename, file="file.txt", mode="r") as f:
x = f.read() x = f.read()
# FURB101
with open("file.txt", encoding="utf-8") as f:
contents: str = f.read()
# FURB101 but no fix because it would remove the assignment to `x`
with open("file.txt", encoding="utf-8") as f:
contents, x = f.read(), 2
# FURB101 but no fix because it would remove the `process_contents` call
with open("file.txt", encoding="utf-8") as f:
contents = process_contents(f.read())
with open("file.txt", encoding="utf-8") as f:
contents: str = process_contents(f.read())

View File

@ -145,3 +145,11 @@ with open("file.txt", "w") as f:
with open("file.txt", "w") as f: with open("file.txt", "w") as f:
for line in text: for line in text:
f.write(line) f.write(line)
# See: https://github.com/astral-sh/ruff/issues/20785
import json
data = {"price": 100}
with open("test.json", "wb") as f:
f.write(json.dumps(data, indent=4).encode("utf-8"))

View File

@ -19,6 +19,9 @@ print("", *args, sep="")
print("", **kwargs) print("", **kwargs)
print(sep="\t") print(sep="\t")
print(sep=print(1)) print(sep=print(1))
print(f"")
print(f"", sep=",")
print(f"", end="bar")
# OK. # OK.
@ -33,3 +36,4 @@ print("foo", "", sep=",")
print("foo", "", "bar", "", sep=",") print("foo", "", "bar", "", sep=",")
print("", "", **kwargs) print("", "", **kwargs)
print(*args, sep=",") print(*args, sep=",")
print(f"foo")

View File

@ -85,3 +85,9 @@ Decimal("1234_5678") # Safe fix: preserves non-thousands separators
Decimal("0001_2345") Decimal("0001_2345")
Decimal("000_1_2345") Decimal("000_1_2345")
Decimal("000_000") Decimal("000_000")
# Test cases for underscores before sign
# https://github.com/astral-sh/ruff/issues/21186
Decimal("_-1") # Should flag as verbose
Decimal("_+1") # Should flag as verbose
Decimal("_-1_000") # Should flag as verbose

View File

@ -64,3 +64,8 @@ _ = Decimal.from_float(True)
_ = Decimal.from_float(float("-nan")) _ = Decimal.from_float(float("-nan"))
_ = Decimal.from_float(float("\x2dnan")) _ = Decimal.from_float(float("\x2dnan"))
_ = Decimal.from_float(float("\N{HYPHEN-MINUS}nan")) _ = Decimal.from_float(float("\N{HYPHEN-MINUS}nan"))
# See: https://github.com/astral-sh/ruff/issues/21257
# fixes must be safe
_ = Fraction.from_float(f=4.2)
_ = Fraction.from_decimal(dec=4)

View File

@ -81,3 +81,7 @@ round(# a comment
round( round(
17 # a comment 17 # a comment
) )
# See: https://github.com/astral-sh/ruff/issues/21209
print(round(125, **{"ndigits": -2}))
print(round(125, *[-2]))

View File

@ -0,0 +1,18 @@
import logging
# Test cases for str() that should NOT be flagged (issue #21315)
# str() with no arguments - should not be flagged
logging.warning("%s", str())
# str() with multiple arguments - should not be flagged
logging.warning("%s", str(b"\xe2\x9a\xa0", "utf-8"))
# str() with starred arguments - should not be flagged
logging.warning("%s", str(*(b"\xf0\x9f\x9a\xa7", "utf-8")))
# str() with keyword unpacking - should not be flagged
logging.warning("%s", str(**{"object": b"\xf0\x9f\x9a\xa8", "encoding": "utf-8"}))
# str() with single keyword argument - should be flagged (equivalent to str("!"))
logging.warning("%s", str(object="!"))

View File

@ -43,9 +43,6 @@ pub(crate) fn statement(stmt: &Stmt, checker: &mut Checker) {
pycodestyle::rules::ambiguous_variable_name(checker, name, name.range()); pycodestyle::rules::ambiguous_variable_name(checker, name, name.range());
} }
} }
if checker.is_rule_enabled(Rule::NonlocalWithoutBinding) {
pylint::rules::nonlocal_without_binding(checker, nonlocal);
}
if checker.is_rule_enabled(Rule::NonlocalAndGlobal) { if checker.is_rule_enabled(Rule::NonlocalAndGlobal) {
pylint::rules::nonlocal_and_global(checker, nonlocal); pylint::rules::nonlocal_and_global(checker, nonlocal);
} }
@ -720,7 +717,9 @@ pub(crate) fn statement(stmt: &Stmt, checker: &mut Checker) {
} }
if checker.is_rule_enabled(Rule::UnnecessaryBuiltinImport) { if checker.is_rule_enabled(Rule::UnnecessaryBuiltinImport) {
if let Some(module) = module { if let Some(module) = module {
pyupgrade::rules::unnecessary_builtin_import(checker, stmt, module, names); pyupgrade::rules::unnecessary_builtin_import(
checker, stmt, module, names, level,
);
} }
} }
if checker.any_rule_enabled(&[ if checker.any_rule_enabled(&[

View File

@ -73,7 +73,8 @@ use crate::rules::pyflakes::rules::{
UndefinedLocalWithNestedImportStarUsage, YieldOutsideFunction, UndefinedLocalWithNestedImportStarUsage, YieldOutsideFunction,
}; };
use crate::rules::pylint::rules::{ use crate::rules::pylint::rules::{
AwaitOutsideAsync, LoadBeforeGlobalDeclaration, YieldFromInAsyncFunction, AwaitOutsideAsync, LoadBeforeGlobalDeclaration, NonlocalWithoutBinding,
YieldFromInAsyncFunction,
}; };
use crate::rules::{flake8_pyi, flake8_type_checking, pyflakes, pyupgrade}; use crate::rules::{flake8_pyi, flake8_type_checking, pyflakes, pyupgrade};
use crate::settings::rule_table::RuleTable; use crate::settings::rule_table::RuleTable;
@ -641,6 +642,10 @@ impl SemanticSyntaxContext for Checker<'_> {
self.semantic.global(name) self.semantic.global(name)
} }
fn has_nonlocal_binding(&self, name: &str) -> bool {
self.semantic.nonlocal(name).is_some()
}
fn report_semantic_error(&self, error: SemanticSyntaxError) { fn report_semantic_error(&self, error: SemanticSyntaxError) {
match error.kind { match error.kind {
SemanticSyntaxErrorKind::LateFutureImport => { SemanticSyntaxErrorKind::LateFutureImport => {
@ -717,6 +722,12 @@ impl SemanticSyntaxContext for Checker<'_> {
self.report_diagnostic(pyflakes::rules::ContinueOutsideLoop, error.range); self.report_diagnostic(pyflakes::rules::ContinueOutsideLoop, error.range);
} }
} }
SemanticSyntaxErrorKind::NonlocalWithoutBinding(name) => {
// PLE0117
if self.is_rule_enabled(Rule::NonlocalWithoutBinding) {
self.report_diagnostic(NonlocalWithoutBinding { name }, error.range);
}
}
SemanticSyntaxErrorKind::ReboundComprehensionVariable SemanticSyntaxErrorKind::ReboundComprehensionVariable
| SemanticSyntaxErrorKind::DuplicateTypeParameter | SemanticSyntaxErrorKind::DuplicateTypeParameter
| SemanticSyntaxErrorKind::MultipleCaseAssignment(_) | SemanticSyntaxErrorKind::MultipleCaseAssignment(_)

View File

@ -51,13 +51,17 @@ impl<'de> serde::Deserialize<'de> for LineLength {
where where
D: serde::Deserializer<'de>, D: serde::Deserializer<'de>,
{ {
let value = u16::deserialize(deserializer)?; let value = i64::deserialize(deserializer)?;
Self::try_from(value).map_err(|_| {
serde::de::Error::custom(format!( u16::try_from(value)
"line-length must be between 1 and {} (got {value})", .ok()
Self::MAX, .and_then(|u16_value| Self::try_from(u16_value).ok())
)) .ok_or_else(|| {
}) serde::de::Error::custom(format!(
"line-length must be between 1 and {} (got {value})",
Self::MAX,
))
})
} }
} }

View File

@ -377,6 +377,7 @@ pub fn add_noqa_to_path(
source_kind: &SourceKind, source_kind: &SourceKind,
source_type: PySourceType, source_type: PySourceType,
settings: &LinterSettings, settings: &LinterSettings,
reason: Option<&str>,
) -> Result<usize> { ) -> Result<usize> {
// Parse once. // Parse once.
let target_version = settings.resolve_target_version(path); let target_version = settings.resolve_target_version(path);
@ -425,6 +426,7 @@ pub fn add_noqa_to_path(
&settings.external, &settings.external,
&directives.noqa_line_for, &directives.noqa_line_for,
stylist.line_ending(), stylist.line_ending(),
reason,
) )
} }

View File

@ -4,4 +4,4 @@ expression: content
--- ---
syntax_errors.py: syntax_errors.py:
1:15 invalid-syntax: Expected one or more symbol names after import 1:15 invalid-syntax: Expected one or more symbol names after import
3:12 invalid-syntax: Expected ')', found newline 3:12 invalid-syntax: Expected `)`, found newline

View File

@ -39,7 +39,7 @@ pub fn generate_noqa_edits(
let exemption = FileExemption::from(&file_directives); let exemption = FileExemption::from(&file_directives);
let directives = NoqaDirectives::from_commented_ranges(comment_ranges, external, path, locator); let directives = NoqaDirectives::from_commented_ranges(comment_ranges, external, path, locator);
let comments = find_noqa_comments(diagnostics, locator, &exemption, &directives, noqa_line_for); let comments = find_noqa_comments(diagnostics, locator, &exemption, &directives, noqa_line_for);
build_noqa_edits_by_diagnostic(comments, locator, line_ending) build_noqa_edits_by_diagnostic(comments, locator, line_ending, None)
} }
/// A directive to ignore a set of rules either for a given line of Python source code or an entire file (e.g., /// A directive to ignore a set of rules either for a given line of Python source code or an entire file (e.g.,
@ -715,6 +715,7 @@ impl Display for LexicalError {
impl Error for LexicalError {} impl Error for LexicalError {}
/// Adds noqa comments to suppress all messages of a file. /// Adds noqa comments to suppress all messages of a file.
#[expect(clippy::too_many_arguments)]
pub(crate) fn add_noqa( pub(crate) fn add_noqa(
path: &Path, path: &Path,
diagnostics: &[Diagnostic], diagnostics: &[Diagnostic],
@ -723,6 +724,7 @@ pub(crate) fn add_noqa(
external: &[String], external: &[String],
noqa_line_for: &NoqaMapping, noqa_line_for: &NoqaMapping,
line_ending: LineEnding, line_ending: LineEnding,
reason: Option<&str>,
) -> Result<usize> { ) -> Result<usize> {
let (count, output) = add_noqa_inner( let (count, output) = add_noqa_inner(
path, path,
@ -732,12 +734,14 @@ pub(crate) fn add_noqa(
external, external,
noqa_line_for, noqa_line_for,
line_ending, line_ending,
reason,
); );
fs::write(path, output)?; fs::write(path, output)?;
Ok(count) Ok(count)
} }
#[expect(clippy::too_many_arguments)]
fn add_noqa_inner( fn add_noqa_inner(
path: &Path, path: &Path,
diagnostics: &[Diagnostic], diagnostics: &[Diagnostic],
@ -746,6 +750,7 @@ fn add_noqa_inner(
external: &[String], external: &[String],
noqa_line_for: &NoqaMapping, noqa_line_for: &NoqaMapping,
line_ending: LineEnding, line_ending: LineEnding,
reason: Option<&str>,
) -> (usize, String) { ) -> (usize, String) {
let mut count = 0; let mut count = 0;
@ -757,7 +762,7 @@ fn add_noqa_inner(
let comments = find_noqa_comments(diagnostics, locator, &exemption, &directives, noqa_line_for); let comments = find_noqa_comments(diagnostics, locator, &exemption, &directives, noqa_line_for);
let edits = build_noqa_edits_by_line(comments, locator, line_ending); let edits = build_noqa_edits_by_line(comments, locator, line_ending, reason);
let contents = locator.contents(); let contents = locator.contents();
@ -783,6 +788,7 @@ fn build_noqa_edits_by_diagnostic(
comments: Vec<Option<NoqaComment>>, comments: Vec<Option<NoqaComment>>,
locator: &Locator, locator: &Locator,
line_ending: LineEnding, line_ending: LineEnding,
reason: Option<&str>,
) -> Vec<Option<Edit>> { ) -> Vec<Option<Edit>> {
let mut edits = Vec::default(); let mut edits = Vec::default();
for comment in comments { for comment in comments {
@ -794,6 +800,7 @@ fn build_noqa_edits_by_diagnostic(
FxHashSet::from_iter([comment.code]), FxHashSet::from_iter([comment.code]),
locator, locator,
line_ending, line_ending,
reason,
) { ) {
edits.push(Some(noqa_edit.into_edit())); edits.push(Some(noqa_edit.into_edit()));
} }
@ -808,6 +815,7 @@ fn build_noqa_edits_by_line<'a>(
comments: Vec<Option<NoqaComment<'a>>>, comments: Vec<Option<NoqaComment<'a>>>,
locator: &Locator, locator: &Locator,
line_ending: LineEnding, line_ending: LineEnding,
reason: Option<&'a str>,
) -> BTreeMap<TextSize, NoqaEdit<'a>> { ) -> BTreeMap<TextSize, NoqaEdit<'a>> {
let mut comments_by_line = BTreeMap::default(); let mut comments_by_line = BTreeMap::default();
for comment in comments.into_iter().flatten() { for comment in comments.into_iter().flatten() {
@ -831,6 +839,7 @@ fn build_noqa_edits_by_line<'a>(
.collect(), .collect(),
locator, locator,
line_ending, line_ending,
reason,
) { ) {
edits.insert(offset, edit); edits.insert(offset, edit);
} }
@ -927,6 +936,7 @@ struct NoqaEdit<'a> {
noqa_codes: FxHashSet<&'a SecondaryCode>, noqa_codes: FxHashSet<&'a SecondaryCode>,
codes: Option<&'a Codes<'a>>, codes: Option<&'a Codes<'a>>,
line_ending: LineEnding, line_ending: LineEnding,
reason: Option<&'a str>,
} }
impl NoqaEdit<'_> { impl NoqaEdit<'_> {
@ -954,6 +964,9 @@ impl NoqaEdit<'_> {
push_codes(writer, self.noqa_codes.iter().sorted_unstable()); push_codes(writer, self.noqa_codes.iter().sorted_unstable());
} }
} }
if let Some(reason) = self.reason {
write!(writer, " {reason}").unwrap();
}
write!(writer, "{}", self.line_ending.as_str()).unwrap(); write!(writer, "{}", self.line_ending.as_str()).unwrap();
} }
} }
@ -970,6 +983,7 @@ fn generate_noqa_edit<'a>(
noqa_codes: FxHashSet<&'a SecondaryCode>, noqa_codes: FxHashSet<&'a SecondaryCode>,
locator: &Locator, locator: &Locator,
line_ending: LineEnding, line_ending: LineEnding,
reason: Option<&'a str>,
) -> Option<NoqaEdit<'a>> { ) -> Option<NoqaEdit<'a>> {
let line_range = locator.full_line_range(offset); let line_range = locator.full_line_range(offset);
@ -999,6 +1013,7 @@ fn generate_noqa_edit<'a>(
noqa_codes, noqa_codes,
codes, codes,
line_ending, line_ending,
reason,
}) })
} }
@ -2832,6 +2847,7 @@ mod tests {
&[], &[],
&noqa_line_for, &noqa_line_for,
LineEnding::Lf, LineEnding::Lf,
None,
); );
assert_eq!(count, 0); assert_eq!(count, 0);
assert_eq!(output, format!("{contents}")); assert_eq!(output, format!("{contents}"));
@ -2855,6 +2871,7 @@ mod tests {
&[], &[],
&noqa_line_for, &noqa_line_for,
LineEnding::Lf, LineEnding::Lf,
None,
); );
assert_eq!(count, 1); assert_eq!(count, 1);
assert_eq!(output, "x = 1 # noqa: F841\n"); assert_eq!(output, "x = 1 # noqa: F841\n");
@ -2885,6 +2902,7 @@ mod tests {
&[], &[],
&noqa_line_for, &noqa_line_for,
LineEnding::Lf, LineEnding::Lf,
None,
); );
assert_eq!(count, 1); assert_eq!(count, 1);
assert_eq!(output, "x = 1 # noqa: E741, F841\n"); assert_eq!(output, "x = 1 # noqa: E741, F841\n");
@ -2915,6 +2933,7 @@ mod tests {
&[], &[],
&noqa_line_for, &noqa_line_for,
LineEnding::Lf, LineEnding::Lf,
None,
); );
assert_eq!(count, 0); assert_eq!(count, 0);
assert_eq!(output, "x = 1 # noqa"); assert_eq!(output, "x = 1 # noqa");

View File

@ -261,16 +261,6 @@ pub(crate) const fn is_b006_unsafe_fix_preserve_assignment_expr_enabled(
settings.preview.is_enabled() settings.preview.is_enabled()
} }
// https://github.com/astral-sh/ruff/pull/20520
pub(crate) const fn is_fix_read_whole_file_enabled(settings: &LinterSettings) -> bool {
settings.preview.is_enabled()
}
// https://github.com/astral-sh/ruff/pull/20520
pub(crate) const fn is_fix_write_whole_file_enabled(settings: &LinterSettings) -> bool {
settings.preview.is_enabled()
}
pub(crate) const fn is_typing_extensions_str_alias_enabled(settings: &LinterSettings) -> bool { pub(crate) const fn is_typing_extensions_str_alias_enabled(settings: &LinterSettings) -> bool {
settings.preview.is_enabled() settings.preview.is_enabled()
} }
@ -279,3 +269,8 @@ pub(crate) const fn is_typing_extensions_str_alias_enabled(settings: &LinterSett
pub(crate) const fn is_extended_i18n_function_matching_enabled(settings: &LinterSettings) -> bool { pub(crate) const fn is_extended_i18n_function_matching_enabled(settings: &LinterSettings) -> bool {
settings.preview.is_enabled() settings.preview.is_enabled()
} }
// https://github.com/astral-sh/ruff/pull/21395
pub(crate) const fn is_enumerate_for_loop_int_index_enabled(settings: &LinterSettings) -> bool {
settings.preview.is_enabled()
}

View File

@ -196,6 +196,7 @@ fn check_call_arguments(checker: &Checker, qualified_name: &QualifiedName, argum
match qualified_name.segments() { match qualified_name.segments() {
["airflow", .., "DAG" | "dag"] => { ["airflow", .., "DAG" | "dag"] => {
// with replacement // with replacement
diagnostic_for_argument(checker, arguments, "concurrency", Some("max_active_tasks"));
diagnostic_for_argument(checker, arguments, "fail_stop", Some("fail_fast")); diagnostic_for_argument(checker, arguments, "fail_stop", Some("fail_fast"));
diagnostic_for_argument(checker, arguments, "schedule_interval", Some("schedule")); diagnostic_for_argument(checker, arguments, "schedule_interval", Some("schedule"));
diagnostic_for_argument(checker, arguments, "timetable", Some("schedule")); diagnostic_for_argument(checker, arguments, "timetable", Some("schedule"));

View File

@ -28,6 +28,8 @@ AIR301 [*] `timetable` is removed in Airflow 3.0
22 | 22 |
23 | DAG(dag_id="class_timetable", timetable=NullTimetable()) 23 | DAG(dag_id="class_timetable", timetable=NullTimetable())
| ^^^^^^^^^ | ^^^^^^^^^
24 |
25 | DAG(dag_id="class_concurrency", concurrency=12)
| |
help: Use `schedule` instead help: Use `schedule` instead
20 | 20 |
@ -36,249 +38,271 @@ help: Use `schedule` instead
- DAG(dag_id="class_timetable", timetable=NullTimetable()) - DAG(dag_id="class_timetable", timetable=NullTimetable())
23 + DAG(dag_id="class_timetable", schedule=NullTimetable()) 23 + DAG(dag_id="class_timetable", schedule=NullTimetable())
24 | 24 |
25 | 25 | DAG(dag_id="class_concurrency", concurrency=12)
26 | DAG(dag_id="class_fail_stop", fail_stop=True) 26 |
AIR301 [*] `fail_stop` is removed in Airflow 3.0 AIR301 [*] `concurrency` is removed in Airflow 3.0
--> AIR301_args.py:26:31 --> AIR301_args.py:25:33
| |
26 | DAG(dag_id="class_fail_stop", fail_stop=True)
| ^^^^^^^^^
27 |
28 | DAG(dag_id="class_default_view", default_view="dag_default_view")
|
help: Use `fail_fast` instead
23 | DAG(dag_id="class_timetable", timetable=NullTimetable()) 23 | DAG(dag_id="class_timetable", timetable=NullTimetable())
24 | 24 |
25 | 25 | DAG(dag_id="class_concurrency", concurrency=12)
| ^^^^^^^^^^^
26 |
27 | DAG(dag_id="class_fail_stop", fail_stop=True)
|
help: Use `max_active_tasks` instead
22 |
23 | DAG(dag_id="class_timetable", timetable=NullTimetable())
24 |
- DAG(dag_id="class_concurrency", concurrency=12)
25 + DAG(dag_id="class_concurrency", max_active_tasks=12)
26 |
27 | DAG(dag_id="class_fail_stop", fail_stop=True)
28 |
AIR301 [*] `fail_stop` is removed in Airflow 3.0
--> AIR301_args.py:27:31
|
25 | DAG(dag_id="class_concurrency", concurrency=12)
26 |
27 | DAG(dag_id="class_fail_stop", fail_stop=True)
| ^^^^^^^^^
28 |
29 | DAG(dag_id="class_default_view", default_view="dag_default_view")
|
help: Use `fail_fast` instead
24 |
25 | DAG(dag_id="class_concurrency", concurrency=12)
26 |
- DAG(dag_id="class_fail_stop", fail_stop=True) - DAG(dag_id="class_fail_stop", fail_stop=True)
26 + DAG(dag_id="class_fail_stop", fail_fast=True) 27 + DAG(dag_id="class_fail_stop", fail_fast=True)
27 | 28 |
28 | DAG(dag_id="class_default_view", default_view="dag_default_view") 29 | DAG(dag_id="class_default_view", default_view="dag_default_view")
29 | 30 |
AIR301 `default_view` is removed in Airflow 3.0 AIR301 `default_view` is removed in Airflow 3.0
--> AIR301_args.py:28:34 --> AIR301_args.py:29:34
| |
26 | DAG(dag_id="class_fail_stop", fail_stop=True) 27 | DAG(dag_id="class_fail_stop", fail_stop=True)
27 | 28 |
28 | DAG(dag_id="class_default_view", default_view="dag_default_view") 29 | DAG(dag_id="class_default_view", default_view="dag_default_view")
| ^^^^^^^^^^^^ | ^^^^^^^^^^^^
29 | 30 |
30 | DAG(dag_id="class_orientation", orientation="BT") 31 | DAG(dag_id="class_orientation", orientation="BT")
| |
AIR301 `orientation` is removed in Airflow 3.0 AIR301 `orientation` is removed in Airflow 3.0
--> AIR301_args.py:30:33 --> AIR301_args.py:31:33
| |
28 | DAG(dag_id="class_default_view", default_view="dag_default_view") 29 | DAG(dag_id="class_default_view", default_view="dag_default_view")
29 | 30 |
30 | DAG(dag_id="class_orientation", orientation="BT") 31 | DAG(dag_id="class_orientation", orientation="BT")
| ^^^^^^^^^^^ | ^^^^^^^^^^^
31 | 32 |
32 | allow_future_exec_dates_dag = DAG(dag_id="class_allow_future_exec_dates") 33 | allow_future_exec_dates_dag = DAG(dag_id="class_allow_future_exec_dates")
| |
AIR301 [*] `schedule_interval` is removed in Airflow 3.0 AIR301 [*] `schedule_interval` is removed in Airflow 3.0
--> AIR301_args.py:41:6 --> AIR301_args.py:42:6
| |
41 | @dag(schedule_interval="0 * * * *") 42 | @dag(schedule_interval="0 * * * *")
| ^^^^^^^^^^^^^^^^^ | ^^^^^^^^^^^^^^^^^
42 | def decorator_schedule_interval(): 43 | def decorator_schedule_interval():
43 | pass 44 | pass
| |
help: Use `schedule` instead help: Use `schedule` instead
38 | pass 39 | pass
39 |
40 | 40 |
41 |
- @dag(schedule_interval="0 * * * *") - @dag(schedule_interval="0 * * * *")
41 + @dag(schedule="0 * * * *") 42 + @dag(schedule="0 * * * *")
42 | def decorator_schedule_interval(): 43 | def decorator_schedule_interval():
43 | pass 44 | pass
44 | 45 |
AIR301 [*] `timetable` is removed in Airflow 3.0 AIR301 [*] `timetable` is removed in Airflow 3.0
--> AIR301_args.py:46:6 --> AIR301_args.py:47:6
| |
46 | @dag(timetable=NullTimetable()) 47 | @dag(timetable=NullTimetable())
| ^^^^^^^^^ | ^^^^^^^^^
47 | def decorator_timetable(): 48 | def decorator_timetable():
48 | pass 49 | pass
| |
help: Use `schedule` instead help: Use `schedule` instead
43 | pass 44 | pass
44 |
45 | 45 |
46 |
- @dag(timetable=NullTimetable()) - @dag(timetable=NullTimetable())
46 + @dag(schedule=NullTimetable()) 47 + @dag(schedule=NullTimetable())
47 | def decorator_timetable(): 48 | def decorator_timetable():
48 | pass 49 | pass
49 | 50 |
AIR301 [*] `execution_date` is removed in Airflow 3.0 AIR301 [*] `execution_date` is removed in Airflow 3.0
--> AIR301_args.py:54:62 --> AIR301_args.py:55:62
| |
52 | def decorator_deprecated_operator_args(): 53 | def decorator_deprecated_operator_args():
53 | trigger_dagrun_op = trigger_dagrun.TriggerDagRunOperator( 54 | trigger_dagrun_op = trigger_dagrun.TriggerDagRunOperator(
54 | task_id="trigger_dagrun_op1", trigger_dag_id="test", execution_date="2024-12-04" 55 | task_id="trigger_dagrun_op1", trigger_dag_id="test", execution_date="2024-12-04"
| ^^^^^^^^^^^^^^ | ^^^^^^^^^^^^^^
55 | ) 56 | )
56 | trigger_dagrun_op2 = TriggerDagRunOperator( 57 | trigger_dagrun_op2 = TriggerDagRunOperator(
| |
help: Use `logical_date` instead help: Use `logical_date` instead
51 | @dag() 52 | @dag()
52 | def decorator_deprecated_operator_args(): 53 | def decorator_deprecated_operator_args():
53 | trigger_dagrun_op = trigger_dagrun.TriggerDagRunOperator( 54 | trigger_dagrun_op = trigger_dagrun.TriggerDagRunOperator(
- task_id="trigger_dagrun_op1", trigger_dag_id="test", execution_date="2024-12-04" - task_id="trigger_dagrun_op1", trigger_dag_id="test", execution_date="2024-12-04"
54 + task_id="trigger_dagrun_op1", trigger_dag_id="test", logical_date="2024-12-04" 55 + task_id="trigger_dagrun_op1", trigger_dag_id="test", logical_date="2024-12-04"
55 | ) 56 | )
56 | trigger_dagrun_op2 = TriggerDagRunOperator( 57 | trigger_dagrun_op2 = TriggerDagRunOperator(
57 | task_id="trigger_dagrun_op2", trigger_dag_id="test", execution_date="2024-12-04" 58 | task_id="trigger_dagrun_op2", trigger_dag_id="test", execution_date="2024-12-04"
AIR301 [*] `execution_date` is removed in Airflow 3.0 AIR301 [*] `execution_date` is removed in Airflow 3.0
--> AIR301_args.py:57:62 --> AIR301_args.py:58:62
| |
55 | ) 56 | )
56 | trigger_dagrun_op2 = TriggerDagRunOperator( 57 | trigger_dagrun_op2 = TriggerDagRunOperator(
57 | task_id="trigger_dagrun_op2", trigger_dag_id="test", execution_date="2024-12-04" 58 | task_id="trigger_dagrun_op2", trigger_dag_id="test", execution_date="2024-12-04"
| ^^^^^^^^^^^^^^ | ^^^^^^^^^^^^^^
58 | ) 59 | )
| |
help: Use `logical_date` instead help: Use `logical_date` instead
54 | task_id="trigger_dagrun_op1", trigger_dag_id="test", execution_date="2024-12-04" 55 | task_id="trigger_dagrun_op1", trigger_dag_id="test", execution_date="2024-12-04"
55 | ) 56 | )
56 | trigger_dagrun_op2 = TriggerDagRunOperator( 57 | trigger_dagrun_op2 = TriggerDagRunOperator(
- task_id="trigger_dagrun_op2", trigger_dag_id="test", execution_date="2024-12-04" - task_id="trigger_dagrun_op2", trigger_dag_id="test", execution_date="2024-12-04"
57 + task_id="trigger_dagrun_op2", trigger_dag_id="test", logical_date="2024-12-04" 58 + task_id="trigger_dagrun_op2", trigger_dag_id="test", logical_date="2024-12-04"
58 | ) 59 | )
59 | 60 |
60 | branch_dt_op = datetime.BranchDateTimeOperator( 61 | branch_dt_op = datetime.BranchDateTimeOperator(
AIR301 [*] `use_task_execution_day` is removed in Airflow 3.0 AIR301 [*] `use_task_execution_day` is removed in Airflow 3.0
--> AIR301_args.py:61:33 --> AIR301_args.py:62:33
| |
60 | branch_dt_op = datetime.BranchDateTimeOperator( 61 | branch_dt_op = datetime.BranchDateTimeOperator(
61 | task_id="branch_dt_op", use_task_execution_day=True, task_concurrency=5 62 | task_id="branch_dt_op", use_task_execution_day=True, task_concurrency=5
| ^^^^^^^^^^^^^^^^^^^^^^ | ^^^^^^^^^^^^^^^^^^^^^^
62 | ) 63 | )
63 | branch_dt_op2 = BranchDateTimeOperator( 64 | branch_dt_op2 = BranchDateTimeOperator(
| |
help: Use `use_task_logical_date` instead help: Use `use_task_logical_date` instead
58 | ) 59 | )
59 | 60 |
60 | branch_dt_op = datetime.BranchDateTimeOperator( 61 | branch_dt_op = datetime.BranchDateTimeOperator(
- task_id="branch_dt_op", use_task_execution_day=True, task_concurrency=5 - task_id="branch_dt_op", use_task_execution_day=True, task_concurrency=5
61 + task_id="branch_dt_op", use_task_logical_date=True, task_concurrency=5 62 + task_id="branch_dt_op", use_task_logical_date=True, task_concurrency=5
62 | ) 63 | )
63 | branch_dt_op2 = BranchDateTimeOperator( 64 | branch_dt_op2 = BranchDateTimeOperator(
64 | task_id="branch_dt_op2", 65 | task_id="branch_dt_op2",
AIR301 [*] `task_concurrency` is removed in Airflow 3.0 AIR301 [*] `task_concurrency` is removed in Airflow 3.0
--> AIR301_args.py:61:62 --> AIR301_args.py:62:62
| |
60 | branch_dt_op = datetime.BranchDateTimeOperator( 61 | branch_dt_op = datetime.BranchDateTimeOperator(
61 | task_id="branch_dt_op", use_task_execution_day=True, task_concurrency=5 62 | task_id="branch_dt_op", use_task_execution_day=True, task_concurrency=5
| ^^^^^^^^^^^^^^^^ | ^^^^^^^^^^^^^^^^
62 | ) 63 | )
63 | branch_dt_op2 = BranchDateTimeOperator( 64 | branch_dt_op2 = BranchDateTimeOperator(
| |
help: Use `max_active_tis_per_dag` instead help: Use `max_active_tis_per_dag` instead
58 | ) 59 | )
59 | 60 |
60 | branch_dt_op = datetime.BranchDateTimeOperator( 61 | branch_dt_op = datetime.BranchDateTimeOperator(
- task_id="branch_dt_op", use_task_execution_day=True, task_concurrency=5 - task_id="branch_dt_op", use_task_execution_day=True, task_concurrency=5
61 + task_id="branch_dt_op", use_task_execution_day=True, max_active_tis_per_dag=5 62 + task_id="branch_dt_op", use_task_execution_day=True, max_active_tis_per_dag=5
62 | ) 63 | )
63 | branch_dt_op2 = BranchDateTimeOperator( 64 | branch_dt_op2 = BranchDateTimeOperator(
64 | task_id="branch_dt_op2", 65 | task_id="branch_dt_op2",
AIR301 [*] `use_task_execution_day` is removed in Airflow 3.0 AIR301 [*] `use_task_execution_day` is removed in Airflow 3.0
--> AIR301_args.py:65:9 --> AIR301_args.py:66:9
| |
63 | branch_dt_op2 = BranchDateTimeOperator( 64 | branch_dt_op2 = BranchDateTimeOperator(
64 | task_id="branch_dt_op2", 65 | task_id="branch_dt_op2",
65 | use_task_execution_day=True, 66 | use_task_execution_day=True,
| ^^^^^^^^^^^^^^^^^^^^^^ | ^^^^^^^^^^^^^^^^^^^^^^
66 | sla=timedelta(seconds=10), 67 | sla=timedelta(seconds=10),
67 | ) 68 | )
| |
help: Use `use_task_logical_date` instead help: Use `use_task_logical_date` instead
62 | ) 63 | )
63 | branch_dt_op2 = BranchDateTimeOperator( 64 | branch_dt_op2 = BranchDateTimeOperator(
64 | task_id="branch_dt_op2", 65 | task_id="branch_dt_op2",
- use_task_execution_day=True, - use_task_execution_day=True,
65 + use_task_logical_date=True, 66 + use_task_logical_date=True,
66 | sla=timedelta(seconds=10), 67 | sla=timedelta(seconds=10),
67 | ) 68 | )
68 | 69 |
AIR301 [*] `use_task_execution_day` is removed in Airflow 3.0 AIR301 [*] `use_task_execution_day` is removed in Airflow 3.0
--> AIR301_args.py:92:9 --> AIR301_args.py:93:9
| |
90 | follow_task_ids_if_true=None, 91 | follow_task_ids_if_true=None,
91 | week_day=1, 92 | week_day=1,
92 | use_task_execution_day=True, 93 | use_task_execution_day=True,
| ^^^^^^^^^^^^^^^^^^^^^^ | ^^^^^^^^^^^^^^^^^^^^^^
93 | ) 94 | )
| |
help: Use `use_task_logical_date` instead help: Use `use_task_logical_date` instead
89 | follow_task_ids_if_false=None, 90 | follow_task_ids_if_false=None,
90 | follow_task_ids_if_true=None, 91 | follow_task_ids_if_true=None,
91 | week_day=1, 92 | week_day=1,
- use_task_execution_day=True, - use_task_execution_day=True,
92 + use_task_logical_date=True, 93 + use_task_logical_date=True,
93 | ) 94 | )
94 | 95 |
95 | trigger_dagrun_op >> trigger_dagrun_op2 96 | trigger_dagrun_op >> trigger_dagrun_op2
AIR301 `filename_template` is removed in Airflow 3.0 AIR301 `filename_template` is removed in Airflow 3.0
--> AIR301_args.py:102:15 --> AIR301_args.py:103:15
| |
101 | # deprecated filename_template argument in FileTaskHandler 102 | # deprecated filename_template argument in FileTaskHandler
102 | S3TaskHandler(filename_template="/tmp/test") 103 | S3TaskHandler(filename_template="/tmp/test")
| ^^^^^^^^^^^^^^^^^ | ^^^^^^^^^^^^^^^^^
103 | HdfsTaskHandler(filename_template="/tmp/test") 104 | HdfsTaskHandler(filename_template="/tmp/test")
104 | ElasticsearchTaskHandler(filename_template="/tmp/test") 105 | ElasticsearchTaskHandler(filename_template="/tmp/test")
| |
AIR301 `filename_template` is removed in Airflow 3.0 AIR301 `filename_template` is removed in Airflow 3.0
--> AIR301_args.py:103:17 --> AIR301_args.py:104:17
| |
101 | # deprecated filename_template argument in FileTaskHandler 102 | # deprecated filename_template argument in FileTaskHandler
102 | S3TaskHandler(filename_template="/tmp/test") 103 | S3TaskHandler(filename_template="/tmp/test")
103 | HdfsTaskHandler(filename_template="/tmp/test") 104 | HdfsTaskHandler(filename_template="/tmp/test")
| ^^^^^^^^^^^^^^^^^ | ^^^^^^^^^^^^^^^^^
104 | ElasticsearchTaskHandler(filename_template="/tmp/test") 105 | ElasticsearchTaskHandler(filename_template="/tmp/test")
105 | GCSTaskHandler(filename_template="/tmp/test") 106 | GCSTaskHandler(filename_template="/tmp/test")
| |
AIR301 `filename_template` is removed in Airflow 3.0 AIR301 `filename_template` is removed in Airflow 3.0
--> AIR301_args.py:104:26 --> AIR301_args.py:105:26
| |
102 | S3TaskHandler(filename_template="/tmp/test") 103 | S3TaskHandler(filename_template="/tmp/test")
103 | HdfsTaskHandler(filename_template="/tmp/test") 104 | HdfsTaskHandler(filename_template="/tmp/test")
104 | ElasticsearchTaskHandler(filename_template="/tmp/test") 105 | ElasticsearchTaskHandler(filename_template="/tmp/test")
| ^^^^^^^^^^^^^^^^^ | ^^^^^^^^^^^^^^^^^
105 | GCSTaskHandler(filename_template="/tmp/test") 106 | GCSTaskHandler(filename_template="/tmp/test")
| |
AIR301 `filename_template` is removed in Airflow 3.0 AIR301 `filename_template` is removed in Airflow 3.0
--> AIR301_args.py:105:16 --> AIR301_args.py:106:16
| |
103 | HdfsTaskHandler(filename_template="/tmp/test") 104 | HdfsTaskHandler(filename_template="/tmp/test")
104 | ElasticsearchTaskHandler(filename_template="/tmp/test") 105 | ElasticsearchTaskHandler(filename_template="/tmp/test")
105 | GCSTaskHandler(filename_template="/tmp/test") 106 | GCSTaskHandler(filename_template="/tmp/test")
| ^^^^^^^^^^^^^^^^^ | ^^^^^^^^^^^^^^^^^
106 | 107 |
107 | FabAuthManager(None) 108 | FabAuthManager(None)
| |
AIR301 `appbuilder` is removed in Airflow 3.0 AIR301 `appbuilder` is removed in Airflow 3.0
--> AIR301_args.py:107:15 --> AIR301_args.py:108:15
| |
105 | GCSTaskHandler(filename_template="/tmp/test") 106 | GCSTaskHandler(filename_template="/tmp/test")
106 | 107 |
107 | FabAuthManager(None) 108 | FabAuthManager(None)
| ^^^^^^ | ^^^^^^
| |
help: The constructor takes no parameter now help: The constructor takes no parameter now

View File

@ -513,6 +513,9 @@ impl Violation for MissingReturnTypeClassMethod {
/// def foo(x: MyAny): ... /// def foo(x: MyAny): ...
/// ``` /// ```
/// ///
/// ## Options
/// - `lint.flake8-annotations.allow-star-arg-any`
///
/// ## References /// ## References
/// - [Typing spec: `Any`](https://typing.python.org/en/latest/spec/special-types.html#any) /// - [Typing spec: `Any`](https://typing.python.org/en/latest/spec/special-types.html#any)
/// - [Python documentation: `typing.Any`](https://docs.python.org/3/library/typing.html#typing.Any) /// - [Python documentation: `typing.Any`](https://docs.python.org/3/library/typing.html#typing.Any)

View File

@ -3,6 +3,7 @@ use ruff_python_ast::{self as ast, Expr};
use ruff_python_stdlib::identifiers::{is_identifier, is_mangled_private}; use ruff_python_stdlib::identifiers::{is_identifier, is_mangled_private};
use ruff_source_file::LineRanges; use ruff_source_file::LineRanges;
use ruff_text_size::Ranged; use ruff_text_size::Ranged;
use unicode_normalization::UnicodeNormalization;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
use crate::fix::edits::pad; use crate::fix::edits::pad;
@ -29,6 +30,21 @@ use crate::{AlwaysFixableViolation, Edit, Fix};
/// obj.foo /// obj.foo
/// ``` /// ```
/// ///
/// ## Fix safety
/// The fix is marked as unsafe for attribute names that are not in NFKC (Normalization Form KC)
/// normalization. Python normalizes identifiers using NFKC when using attribute access syntax
/// (e.g., `obj.attr`), but does not normalize string arguments passed to `getattr`. Rewriting
/// `getattr(obj, "ſ")` to `obj.ſ` would be interpreted as `obj.s` at runtime, changing behavior.
///
/// For example, the long s character `"ſ"` normalizes to `"s"` under NFKC, so:
/// ```python
/// # This accesses an attribute with the exact name "ſ" (if it exists)
/// value = getattr(obj, "ſ")
///
/// # But this would normalize to "s" and access a different attribute
/// obj.ſ # This is interpreted as obj.s, not obj.ſ
/// ```
///
/// ## References /// ## References
/// - [Python documentation: `getattr`](https://docs.python.org/3/library/functions.html#getattr) /// - [Python documentation: `getattr`](https://docs.python.org/3/library/functions.html#getattr)
#[derive(ViolationMetadata)] #[derive(ViolationMetadata)]
@ -69,8 +85,14 @@ pub(crate) fn getattr_with_constant(checker: &Checker, expr: &Expr, func: &Expr,
return; return;
} }
// Mark fixes as unsafe for non-NFKC attribute names. Python normalizes identifiers using NFKC, so using
// attribute syntax (e.g., `obj.attr`) would normalize the name and potentially change
// program behavior.
let attr_name = value.to_str();
let is_unsafe = attr_name.nfkc().collect::<String>() != attr_name;
let mut diagnostic = checker.report_diagnostic(GetAttrWithConstant, expr.range()); let mut diagnostic = checker.report_diagnostic(GetAttrWithConstant, expr.range());
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement( let edit = Edit::range_replacement(
pad( pad(
if matches!( if matches!(
obj, obj,
@ -88,5 +110,11 @@ pub(crate) fn getattr_with_constant(checker: &Checker, expr: &Expr, func: &Expr,
checker.locator(), checker.locator(),
), ),
expr.range(), expr.range(),
))); );
let fix = if is_unsafe {
Fix::unsafe_edit(edit)
} else {
Fix::safe_edit(edit)
};
diagnostic.set_fix(fix);
} }

View File

@ -4,6 +4,7 @@ use ruff_text_size::{Ranged, TextRange};
use ruff_macros::{ViolationMetadata, derive_message_formats}; use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_codegen::Generator; use ruff_python_codegen::Generator;
use ruff_python_stdlib::identifiers::{is_identifier, is_mangled_private}; use ruff_python_stdlib::identifiers::{is_identifier, is_mangled_private};
use unicode_normalization::UnicodeNormalization;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
use crate::{AlwaysFixableViolation, Edit, Fix}; use crate::{AlwaysFixableViolation, Edit, Fix};
@ -28,6 +29,23 @@ use crate::{AlwaysFixableViolation, Edit, Fix};
/// obj.foo = 42 /// obj.foo = 42
/// ``` /// ```
/// ///
/// ## Fix safety
/// The fix is marked as unsafe for attribute names that are not in NFKC (Normalization Form KC)
/// normalization. Python normalizes identifiers using NFKC when using attribute access syntax
/// (e.g., `obj.attr = value`), but does not normalize string arguments passed to `setattr`.
/// Rewriting `setattr(obj, "ſ", 1)` to `obj.ſ = 1` would be interpreted as `obj.s = 1` at
/// runtime, changing behavior.
///
/// For example, the long s character `"ſ"` normalizes to `"s"` under NFKC, so:
/// ```python
/// # This creates an attribute with the exact name "ſ"
/// setattr(obj, "ſ", 1)
/// getattr(obj, "ſ") # Returns 1
///
/// # But this would normalize to "s" and set a different attribute
/// obj.ſ = 1 # This is interpreted as obj.s = 1, not obj.ſ = 1
/// ```
///
/// ## References /// ## References
/// - [Python documentation: `setattr`](https://docs.python.org/3/library/functions.html#setattr) /// - [Python documentation: `setattr`](https://docs.python.org/3/library/functions.html#setattr)
#[derive(ViolationMetadata)] #[derive(ViolationMetadata)]
@ -89,6 +107,12 @@ pub(crate) fn setattr_with_constant(checker: &Checker, expr: &Expr, func: &Expr,
return; return;
} }
// Mark fixes as unsafe for non-NFKC attribute names. Python normalizes identifiers using NFKC, so using
// attribute syntax (e.g., `obj.attr = value`) would normalize the name and potentially change
// program behavior.
let attr_name = name.to_str();
let is_unsafe = attr_name.nfkc().collect::<String>() != attr_name;
// We can only replace a `setattr` call (which is an `Expr`) with an assignment // We can only replace a `setattr` call (which is an `Expr`) with an assignment
// (which is a `Stmt`) if the `Expr` is already being used as a `Stmt` // (which is a `Stmt`) if the `Expr` is already being used as a `Stmt`
// (i.e., it's directly within an `Stmt::Expr`). // (i.e., it's directly within an `Stmt::Expr`).
@ -100,10 +124,16 @@ pub(crate) fn setattr_with_constant(checker: &Checker, expr: &Expr, func: &Expr,
{ {
if expr == child.as_ref() { if expr == child.as_ref() {
let mut diagnostic = checker.report_diagnostic(SetAttrWithConstant, expr.range()); let mut diagnostic = checker.report_diagnostic(SetAttrWithConstant, expr.range());
diagnostic.set_fix(Fix::safe_edit(Edit::range_replacement( let edit = Edit::range_replacement(
assignment(obj, name.to_str(), value, checker.generator()), assignment(obj, name.to_str(), value, checker.generator()),
expr.range(), expr.range(),
))); );
let fix = if is_unsafe {
Fix::unsafe_edit(edit)
} else {
Fix::safe_edit(edit)
};
diagnostic.set_fix(fix);
} }
} }
} }

View File

@ -360,3 +360,21 @@ help: Replace `getattr` with attribute access
70 | 70 |
71 | # Regression test for: https://github.com/astral-sh/ruff/issues/18353 71 | # Regression test for: https://github.com/astral-sh/ruff/issues/18353
72 | setattr(foo, "__debug__", 0) 72 | setattr(foo, "__debug__", 0)
B009 [*] Do not call `getattr` with a constant attribute value. It is not any safer than normal property access.
--> B009_B010.py:80:1
|
78 | # `ns.ſ` would be interpreted as `ns.s` at runtime, changing behavior.
79 | # Example: the long s character "ſ" normalizes to "s" under NFKC.
80 | getattr(foo, "ſ")
| ^^^^^^^^^^^^^^^^^
81 | setattr(foo, "ſ", 1)
|
help: Replace `getattr` with attribute access
77 | # arguments passed to getattr/setattr. Rewriting `getattr(ns, "ſ")` to
78 | # `ns.ſ` would be interpreted as `ns.s` at runtime, changing behavior.
79 | # Example: the long s character "ſ" normalizes to "s" under NFKC.
- getattr(foo, "ſ")
80 + foo.ſ
81 | setattr(foo, "ſ", 1)
note: This is an unsafe fix and may change runtime behavior

View File

@ -118,3 +118,19 @@ help: Replace `setattr` with assignment
56 | 56 |
57 | # Regression test for: https://github.com/astral-sh/ruff/issues/7455#issuecomment-1722458885 57 | # Regression test for: https://github.com/astral-sh/ruff/issues/7455#issuecomment-1722458885
58 | assert getattr(func, '_rpc')is True 58 | assert getattr(func, '_rpc')is True
B010 [*] Do not call `setattr` with a constant attribute value. It is not any safer than normal property access.
--> B009_B010.py:81:1
|
79 | # Example: the long s character "ſ" normalizes to "s" under NFKC.
80 | getattr(foo, "ſ")
81 | setattr(foo, "ſ", 1)
| ^^^^^^^^^^^^^^^^^^^^
|
help: Replace `setattr` with assignment
78 | # `ns.ſ` would be interpreted as `ns.s` at runtime, changing behavior.
79 | # Example: the long s character "ſ" normalizes to "s" under NFKC.
80 | getattr(foo, "ſ")
- setattr(foo, "ſ", 1)
81 + foo.ſ = 1
note: This is an unsafe fix and may change runtime behavior

View File

@ -43,7 +43,7 @@ use crate::rules::flake8_comprehensions::fixes;
/// >>> {x: y for x, y in d1} # Iterates over the keys of a mapping /// >>> {x: y for x, y in d1} # Iterates over the keys of a mapping
/// {1: 2, 4: 5} /// {1: 2, 4: 5}
/// >>> dict(d1) # Ruff's incorrect suggested fix /// >>> dict(d1) # Ruff's incorrect suggested fix
/// (1, 2): 3, (4, 5): 6} /// {(1, 2): 3, (4, 5): 6}
/// >>> dict(d1.keys()) # Correct fix /// >>> dict(d1.keys()) # Correct fix
/// {1: 2, 4: 5} /// {1: 2, 4: 5}
/// ``` /// ```

View File

@ -78,7 +78,7 @@ pub(crate) fn unconventional_import_alias(
let mut diagnostic = checker.report_diagnostic( let mut diagnostic = checker.report_diagnostic(
UnconventionalImportAlias { UnconventionalImportAlias {
name: qualified_name, name: qualified_name,
asname: expected_alias.to_string(), asname: expected_alias.clone(),
}, },
binding.range(), binding.range(),
); );

View File

@ -6,21 +6,17 @@ use ruff_macros::CacheKey;
#[derive(Clone, Copy, Debug, CacheKey, PartialEq, Eq, Serialize, Deserialize)] #[derive(Clone, Copy, Debug, CacheKey, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[derive(Default)]
pub enum ParametrizeNameType { pub enum ParametrizeNameType {
#[serde(rename = "csv")] #[serde(rename = "csv")]
Csv, Csv,
#[serde(rename = "tuple")] #[serde(rename = "tuple")]
#[default]
Tuple, Tuple,
#[serde(rename = "list")] #[serde(rename = "list")]
List, List,
} }
impl Default for ParametrizeNameType {
fn default() -> Self {
Self::Tuple
}
}
impl Display for ParametrizeNameType { impl Display for ParametrizeNameType {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self { match self {
@ -33,19 +29,15 @@ impl Display for ParametrizeNameType {
#[derive(Clone, Copy, Debug, CacheKey, PartialEq, Eq, Serialize, Deserialize)] #[derive(Clone, Copy, Debug, CacheKey, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[derive(Default)]
pub enum ParametrizeValuesType { pub enum ParametrizeValuesType {
#[serde(rename = "tuple")] #[serde(rename = "tuple")]
Tuple, Tuple,
#[serde(rename = "list")] #[serde(rename = "list")]
#[default]
List, List,
} }
impl Default for ParametrizeValuesType {
fn default() -> Self {
Self::List
}
}
impl Display for ParametrizeValuesType { impl Display for ParametrizeValuesType {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self { match self {
@ -57,19 +49,15 @@ impl Display for ParametrizeValuesType {
#[derive(Clone, Copy, Debug, CacheKey, PartialEq, Eq, Serialize, Deserialize)] #[derive(Clone, Copy, Debug, CacheKey, PartialEq, Eq, Serialize, Deserialize)]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[derive(Default)]
pub enum ParametrizeValuesRowType { pub enum ParametrizeValuesRowType {
#[serde(rename = "tuple")] #[serde(rename = "tuple")]
#[default]
Tuple, Tuple,
#[serde(rename = "list")] #[serde(rename = "list")]
List, List,
} }
impl Default for ParametrizeValuesRowType {
fn default() -> Self {
Self::Tuple
}
}
impl Display for ParametrizeValuesRowType { impl Display for ParametrizeValuesRowType {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
match self { match self {

View File

@ -9,19 +9,15 @@ use ruff_macros::CacheKey;
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, CacheKey)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, CacheKey)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")] #[serde(deny_unknown_fields, rename_all = "kebab-case")]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[derive(Default)]
pub enum Quote { pub enum Quote {
/// Use double quotes. /// Use double quotes.
#[default]
Double, Double,
/// Use single quotes. /// Use single quotes.
Single, Single,
} }
impl Default for Quote {
fn default() -> Self {
Self::Double
}
}
impl From<ruff_python_ast::str::Quote> for Quote { impl From<ruff_python_ast::str::Quote> for Quote {
fn from(value: ruff_python_ast::str::Quote) -> Self { fn from(value: ruff_python_ast::str::Quote) -> Self {
match value { match value {

View File

@ -61,6 +61,7 @@ mod tests {
#[test_case(Rule::SplitStaticString, Path::new("SIM905.py"))] #[test_case(Rule::SplitStaticString, Path::new("SIM905.py"))]
#[test_case(Rule::DictGetWithNoneDefault, Path::new("SIM910.py"))] #[test_case(Rule::DictGetWithNoneDefault, Path::new("SIM910.py"))]
#[test_case(Rule::EnumerateForLoop, Path::new("SIM113.py"))]
fn preview_rules(rule_code: Rule, path: &Path) -> Result<()> { fn preview_rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!( let snapshot = format!(
"preview__{}_{}", "preview__{}_{}",

View File

@ -1,6 +1,8 @@
use crate::preview::is_enumerate_for_loop_int_index_enabled;
use ruff_macros::{ViolationMetadata, derive_message_formats}; use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast::statement_visitor::{StatementVisitor, walk_stmt}; use ruff_python_ast::statement_visitor::{StatementVisitor, walk_stmt};
use ruff_python_ast::{self as ast, Expr, Int, Number, Operator, Stmt}; use ruff_python_ast::{self as ast, Expr, Int, Number, Operator, Stmt};
use ruff_python_semantic::analyze::type_inference::{NumberLike, PythonType, ResolvedPythonType};
use ruff_python_semantic::analyze::typing; use ruff_python_semantic::analyze::typing;
use ruff_text_size::Ranged; use ruff_text_size::Ranged;
@ -11,6 +13,9 @@ use crate::checkers::ast::Checker;
/// Checks for `for` loops with explicit loop-index variables that can be replaced /// Checks for `for` loops with explicit loop-index variables that can be replaced
/// with `enumerate()`. /// with `enumerate()`.
/// ///
/// In [preview], this rule checks for index variables initialized with any integer rather than only
/// a literal zero.
///
/// ## Why is this bad? /// ## Why is this bad?
/// When iterating over a sequence, it's often desirable to keep track of the /// When iterating over a sequence, it's often desirable to keep track of the
/// index of each element alongside the element itself. Prefer the `enumerate` /// index of each element alongside the element itself. Prefer the `enumerate`
@ -35,6 +40,8 @@ use crate::checkers::ast::Checker;
/// ///
/// ## References /// ## References
/// - [Python documentation: `enumerate`](https://docs.python.org/3/library/functions.html#enumerate) /// - [Python documentation: `enumerate`](https://docs.python.org/3/library/functions.html#enumerate)
///
/// [preview]: https://docs.astral.sh/ruff/preview/
#[derive(ViolationMetadata)] #[derive(ViolationMetadata)]
#[violation_metadata(stable_since = "v0.2.0")] #[violation_metadata(stable_since = "v0.2.0")]
pub(crate) struct EnumerateForLoop { pub(crate) struct EnumerateForLoop {
@ -82,17 +89,21 @@ pub(crate) fn enumerate_for_loop(checker: &Checker, for_stmt: &ast::StmtFor) {
continue; continue;
} }
// Ensure that the index variable was initialized to 0. // Ensure that the index variable was initialized to 0 (or instance of `int` if preview is enabled).
let Some(value) = typing::find_binding_value(binding, checker.semantic()) else { let Some(value) = typing::find_binding_value(binding, checker.semantic()) else {
continue; continue;
}; };
if !matches!( if !(matches!(
value, value,
Expr::NumberLiteral(ast::ExprNumberLiteral { Expr::NumberLiteral(ast::ExprNumberLiteral {
value: Number::Int(Int::ZERO), value: Number::Int(Int::ZERO),
.. ..
}) })
) { ) || matches!(
ResolvedPythonType::from(value),
ResolvedPythonType::Atom(PythonType::Number(NumberLike::Integer))
) && is_enumerate_for_loop_int_index_enabled(checker.settings()))
{
continue; continue;
} }

View File

@ -116,7 +116,7 @@ pub(crate) fn convert_for_loop_to_any_all(checker: &Checker, stmt: &Stmt) {
let mut diagnostic = checker.report_diagnostic( let mut diagnostic = checker.report_diagnostic(
ReimplementedBuiltin { ReimplementedBuiltin {
replacement: contents.to_string(), replacement: contents.clone(),
}, },
TextRange::new(stmt.start(), terminal.stmt.end()), TextRange::new(stmt.start(), terminal.stmt.end()),
); );
@ -212,7 +212,7 @@ pub(crate) fn convert_for_loop_to_any_all(checker: &Checker, stmt: &Stmt) {
let mut diagnostic = checker.report_diagnostic( let mut diagnostic = checker.report_diagnostic(
ReimplementedBuiltin { ReimplementedBuiltin {
replacement: contents.to_string(), replacement: contents.clone(),
}, },
TextRange::new(stmt.start(), terminal.stmt.end()), TextRange::new(stmt.start(), terminal.stmt.end()),
); );

View File

@ -1101,6 +1101,7 @@ help: Replace with `f"{x=}"`
204 + print(f"{x=}") # SIM222 204 + print(f"{x=}") # SIM222
205 | (lambda: 1) or True # SIM222 205 | (lambda: 1) or True # SIM222
206 | (i for i in range(1)) or "bar" # SIM222 206 | (i for i in range(1)) or "bar" # SIM222
207 |
note: This is an unsafe fix and may change runtime behavior note: This is an unsafe fix and may change runtime behavior
SIM222 [*] Use `lambda: 1` instead of `lambda: 1 or ...` SIM222 [*] Use `lambda: 1` instead of `lambda: 1 or ...`
@ -1119,6 +1120,8 @@ help: Replace with `lambda: 1`
- (lambda: 1) or True # SIM222 - (lambda: 1) or True # SIM222
205 + lambda: 1 # SIM222 205 + lambda: 1 # SIM222
206 | (i for i in range(1)) or "bar" # SIM222 206 | (i for i in range(1)) or "bar" # SIM222
207 |
208 | # https://github.com/astral-sh/ruff/issues/21136
note: This is an unsafe fix and may change runtime behavior note: This is an unsafe fix and may change runtime behavior
SIM222 [*] Use `(i for i in range(1))` instead of `(i for i in range(1)) or ...` SIM222 [*] Use `(i for i in range(1))` instead of `(i for i in range(1)) or ...`
@ -1128,6 +1131,8 @@ SIM222 [*] Use `(i for i in range(1))` instead of `(i for i in range(1)) or ...`
205 | (lambda: 1) or True # SIM222 205 | (lambda: 1) or True # SIM222
206 | (i for i in range(1)) or "bar" # SIM222 206 | (i for i in range(1)) or "bar" # SIM222
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
207 |
208 | # https://github.com/astral-sh/ruff/issues/21136
| |
help: Replace with `(i for i in range(1))` help: Replace with `(i for i in range(1))`
203 | x = 1 203 | x = 1
@ -1135,4 +1140,7 @@ help: Replace with `(i for i in range(1))`
205 | (lambda: 1) or True # SIM222 205 | (lambda: 1) or True # SIM222
- (i for i in range(1)) or "bar" # SIM222 - (i for i in range(1)) or "bar" # SIM222
206 + (i for i in range(1)) # SIM222 206 + (i for i in range(1)) # SIM222
207 |
208 | # https://github.com/astral-sh/ruff/issues/21136
209 | def get_items():
note: This is an unsafe fix and may change runtime behavior note: This is an unsafe fix and may change runtime behavior

View File

@ -0,0 +1,60 @@
---
source: crates/ruff_linter/src/rules/flake8_simplify/mod.rs
---
SIM113 Use `enumerate()` for index variable `idx` in `for` loop
--> SIM113.py:6:9
|
4 | for x in range(5):
5 | g(x, idx)
6 | idx += 1
| ^^^^^^^^
7 | h(x)
|
SIM113 Use `enumerate()` for index variable `idx` in `for` loop
--> SIM113.py:17:9
|
15 | if g(x):
16 | break
17 | idx += 1
| ^^^^^^^^
18 | sum += h(x, idx)
|
SIM113 Use `enumerate()` for index variable `idx` in `for` loop
--> SIM113.py:27:9
|
25 | g(x)
26 | h(x, y)
27 | idx += 1
| ^^^^^^^^
|
SIM113 Use `enumerate()` for index variable `idx` in `for` loop
--> SIM113.py:36:9
|
34 | for x in range(5):
35 | sum += h(x, idx)
36 | idx += 1
| ^^^^^^^^
|
SIM113 Use `enumerate()` for index variable `idx` in `for` loop
--> SIM113.py:44:9
|
42 | for x in range(5):
43 | g(x, idx)
44 | idx += 1
| ^^^^^^^^
45 | h(x)
|
SIM113 Use `enumerate()` for index variable `idx` in `for` loop
--> SIM113.py:54:9
|
52 | for x in range(5):
53 | g(x, idx)
54 | idx += 1
| ^^^^^^^^
55 | h(x)
|

View File

@ -47,7 +47,7 @@ pub(crate) fn banned_api<T: Ranged>(checker: &Checker, policy: &NameMatchPolicy,
checker.report_diagnostic( checker.report_diagnostic(
BannedApi { BannedApi {
name: banned_module, name: banned_module,
message: reason.msg.to_string(), message: reason.msg.clone(),
}, },
node.range(), node.range(),
); );
@ -74,8 +74,8 @@ pub(crate) fn banned_attribute_access(checker: &Checker, expr: &Expr) {
{ {
checker.report_diagnostic( checker.report_diagnostic(
BannedApi { BannedApi {
name: banned_path.to_string(), name: banned_path.clone(),
message: ban.msg.to_string(), message: ban.msg.clone(),
}, },
expr.range(), expr.range(),
); );

View File

@ -20,21 +20,17 @@ use super::categorize::ImportSection;
#[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, CacheKey)] #[derive(Debug, Copy, Clone, PartialEq, Eq, Serialize, Deserialize, CacheKey)]
#[serde(deny_unknown_fields, rename_all = "kebab-case")] #[serde(deny_unknown_fields, rename_all = "kebab-case")]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))] #[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[derive(Default)]
pub enum RelativeImportsOrder { pub enum RelativeImportsOrder {
/// Place "closer" imports (fewer `.` characters, most local) before /// Place "closer" imports (fewer `.` characters, most local) before
/// "further" imports (more `.` characters, least local). /// "further" imports (more `.` characters, least local).
ClosestToFurthest, ClosestToFurthest,
/// Place "further" imports (more `.` characters, least local) imports /// Place "further" imports (more `.` characters, least local) imports
/// before "closer" imports (fewer `.` characters, most local). /// before "closer" imports (fewer `.` characters, most local).
#[default]
FurthestToClosest, FurthestToClosest,
} }
impl Default for RelativeImportsOrder {
fn default() -> Self {
Self::FurthestToClosest
}
}
impl Display for RelativeImportsOrder { impl Display for RelativeImportsOrder {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
match self { match self {

View File

@ -427,7 +427,7 @@ pub(crate) fn literal_comparisons(checker: &Checker, compare: &ast::ExprCompare)
for diagnostic in &mut diagnostics { for diagnostic in &mut diagnostics {
diagnostic.set_fix(Fix::unsafe_edit(Edit::range_replacement( diagnostic.set_fix(Fix::unsafe_edit(Edit::range_replacement(
content.to_string(), content.clone(),
compare.range(), compare.range(),
))); )));
} }

View File

@ -1,7 +1,7 @@
--- ---
source: crates/ruff_linter/src/rules/pycodestyle/mod.rs source: crates/ruff_linter/src/rules/pycodestyle/mod.rs
--- ---
E231 [*] Missing whitespace after ',' E231 [*] Missing whitespace after `,`
--> E23.py:2:7 --> E23.py:2:7
| |
1 | #: E231 1 | #: E231
@ -18,7 +18,7 @@ help: Add missing whitespace
4 | a[b1,:] 4 | a[b1,:]
5 | #: E231 5 | #: E231
E231 [*] Missing whitespace after ',' E231 [*] Missing whitespace after `,`
--> E23.py:4:5 --> E23.py:4:5
| |
2 | a = (1,2) 2 | a = (1,2)
@ -38,7 +38,7 @@ help: Add missing whitespace
6 | a = [{'a':''}] 6 | a = [{'a':''}]
7 | #: Okay 7 | #: Okay
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:6:10 --> E23.py:6:10
| |
4 | a[b1,:] 4 | a[b1,:]
@ -58,7 +58,7 @@ help: Add missing whitespace
8 | a = (4,) 8 | a = (4,)
9 | b = (5, ) 9 | b = (5, )
E231 [*] Missing whitespace after ',' E231 [*] Missing whitespace after `,`
--> E23.py:19:10 --> E23.py:19:10
| |
17 | def foo() -> None: 17 | def foo() -> None:
@ -77,7 +77,7 @@ help: Add missing whitespace
21 | 21 |
22 | #: Okay 22 | #: Okay
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:29:20 --> E23.py:29:20
| |
27 | mdtypes_template = { 27 | mdtypes_template = {
@ -96,7 +96,7 @@ help: Add missing whitespace
31 | 31 |
32 | # E231 32 | # E231
E231 [*] Missing whitespace after ',' E231 [*] Missing whitespace after `,`
--> E23.py:33:6 --> E23.py:33:6
| |
32 | # E231 32 | # E231
@ -115,7 +115,7 @@ help: Add missing whitespace
35 | # Okay because it's hard to differentiate between the usages of a colon in a f-string 35 | # Okay because it's hard to differentiate between the usages of a colon in a f-string
36 | f"{a:=1}" 36 | f"{a:=1}"
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:47:37 --> E23.py:47:37
| |
46 | #: E231 46 | #: E231
@ -134,7 +134,7 @@ help: Add missing whitespace
49 | #: Okay 49 | #: Okay
50 | a = (1,) 50 | a = (1,)
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:60:13 --> E23.py:60:13
| |
58 | results = { 58 | results = {
@ -154,7 +154,7 @@ help: Add missing whitespace
62 | results_in_tuple = ( 62 | results_in_tuple = (
63 | { 63 | {
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:65:17 --> E23.py:65:17
| |
63 | { 63 | {
@ -174,7 +174,7 @@ help: Add missing whitespace
67 | ) 67 | )
68 | results_in_list = [ 68 | results_in_list = [
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:71:17 --> E23.py:71:17
| |
69 | { 69 | {
@ -194,7 +194,7 @@ help: Add missing whitespace
73 | ] 73 | ]
74 | results_in_list_first = [ 74 | results_in_list_first = [
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:76:17 --> E23.py:76:17
| |
74 | results_in_list_first = [ 74 | results_in_list_first = [
@ -214,7 +214,7 @@ help: Add missing whitespace
78 | ] 78 | ]
79 | 79 |
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:82:13 --> E23.py:82:13
| |
80 | x = [ 80 | x = [
@ -234,7 +234,7 @@ help: Add missing whitespace
84 | "k3":[2], # E231 84 | "k3":[2], # E231
85 | "k4": [2], 85 | "k4": [2],
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:84:13 --> E23.py:84:13
| |
82 | "k1":[2], # E231 82 | "k1":[2], # E231
@ -254,7 +254,7 @@ help: Add missing whitespace
86 | "k5": [2], 86 | "k5": [2],
87 | "k6": [1, 2, 3, 4,5,6,7] # E231 87 | "k6": [1, 2, 3, 4,5,6,7] # E231
E231 [*] Missing whitespace after ',' E231 [*] Missing whitespace after `,`
--> E23.py:87:26 --> E23.py:87:26
| |
85 | "k4": [2], 85 | "k4": [2],
@ -274,7 +274,7 @@ help: Add missing whitespace
89 | { 89 | {
90 | "k1": [ 90 | "k1": [
E231 [*] Missing whitespace after ',' E231 [*] Missing whitespace after `,`
--> E23.py:87:28 --> E23.py:87:28
| |
85 | "k4": [2], 85 | "k4": [2],
@ -294,7 +294,7 @@ help: Add missing whitespace
89 | { 89 | {
90 | "k1": [ 90 | "k1": [
E231 [*] Missing whitespace after ',' E231 [*] Missing whitespace after `,`
--> E23.py:87:30 --> E23.py:87:30
| |
85 | "k4": [2], 85 | "k4": [2],
@ -314,7 +314,7 @@ help: Add missing whitespace
89 | { 89 | {
90 | "k1": [ 90 | "k1": [
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:92:21 --> E23.py:92:21
| |
90 | "k1": [ 90 | "k1": [
@ -334,7 +334,7 @@ help: Add missing whitespace
94 | { 94 | {
95 | "kb": [2,3], # E231 95 | "kb": [2,3], # E231
E231 [*] Missing whitespace after ',' E231 [*] Missing whitespace after `,`
--> E23.py:92:24 --> E23.py:92:24
| |
90 | "k1": [ 90 | "k1": [
@ -354,7 +354,7 @@ help: Add missing whitespace
94 | { 94 | {
95 | "kb": [2,3], # E231 95 | "kb": [2,3], # E231
E231 [*] Missing whitespace after ',' E231 [*] Missing whitespace after `,`
--> E23.py:95:25 --> E23.py:95:25
| |
93 | }, 93 | },
@ -374,7 +374,7 @@ help: Add missing whitespace
97 | { 97 | {
98 | "ka":[2, 3], # E231 98 | "ka":[2, 3], # E231
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:98:21 --> E23.py:98:21
| |
96 | }, 96 | },
@ -394,7 +394,7 @@ help: Add missing whitespace
100 | "kc": [2, 3], # Ok 100 | "kc": [2, 3], # Ok
101 | "kd": [2,3], # E231 101 | "kd": [2,3], # E231
E231 [*] Missing whitespace after ',' E231 [*] Missing whitespace after `,`
--> E23.py:101:25 --> E23.py:101:25
| |
99 | "kb": [2, 3], # Ok 99 | "kb": [2, 3], # Ok
@ -414,7 +414,7 @@ help: Add missing whitespace
103 | }, 103 | },
104 | ] 104 | ]
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:102:21 --> E23.py:102:21
| |
100 | "kc": [2, 3], # Ok 100 | "kc": [2, 3], # Ok
@ -434,7 +434,7 @@ help: Add missing whitespace
104 | ] 104 | ]
105 | } 105 | }
E231 [*] Missing whitespace after ',' E231 [*] Missing whitespace after `,`
--> E23.py:102:24 --> E23.py:102:24
| |
100 | "kc": [2, 3], # Ok 100 | "kc": [2, 3], # Ok
@ -454,7 +454,7 @@ help: Add missing whitespace
104 | ] 104 | ]
105 | } 105 | }
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:109:18 --> E23.py:109:18
| |
108 | # Should be E231 errors on all of these type parameters and function parameters, but not on their (strange) defaults 108 | # Should be E231 errors on all of these type parameters and function parameters, but not on their (strange) defaults
@ -473,7 +473,7 @@ help: Add missing whitespace
111 | y:B = [[["foo", "bar"]]], 111 | y:B = [[["foo", "bar"]]],
112 | z:object = "fooo", 112 | z:object = "fooo",
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:109:40 --> E23.py:109:40
| |
108 | # Should be E231 errors on all of these type parameters and function parameters, but not on their (strange) defaults 108 | # Should be E231 errors on all of these type parameters and function parameters, but not on their (strange) defaults
@ -492,7 +492,7 @@ help: Add missing whitespace
111 | y:B = [[["foo", "bar"]]], 111 | y:B = [[["foo", "bar"]]],
112 | z:object = "fooo", 112 | z:object = "fooo",
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:109:70 --> E23.py:109:70
| |
108 | # Should be E231 errors on all of these type parameters and function parameters, but not on their (strange) defaults 108 | # Should be E231 errors on all of these type parameters and function parameters, but not on their (strange) defaults
@ -511,7 +511,7 @@ help: Add missing whitespace
111 | y:B = [[["foo", "bar"]]], 111 | y:B = [[["foo", "bar"]]],
112 | z:object = "fooo", 112 | z:object = "fooo",
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:110:6 --> E23.py:110:6
| |
108 | # Should be E231 errors on all of these type parameters and function parameters, but not on their (strange) defaults 108 | # Should be E231 errors on all of these type parameters and function parameters, but not on their (strange) defaults
@ -531,7 +531,7 @@ help: Add missing whitespace
112 | z:object = "fooo", 112 | z:object = "fooo",
113 | ): 113 | ):
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:111:6 --> E23.py:111:6
| |
109 | def pep_696_bad[A:object="foo"[::-1], B:object =[[["foo", "bar"]]], C:object= bytes]( 109 | def pep_696_bad[A:object="foo"[::-1], B:object =[[["foo", "bar"]]], C:object= bytes](
@ -551,7 +551,7 @@ help: Add missing whitespace
113 | ): 113 | ):
114 | pass 114 | pass
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:112:6 --> E23.py:112:6
| |
110 | x:A = "foo"[::-1], 110 | x:A = "foo"[::-1],
@ -571,7 +571,7 @@ help: Add missing whitespace
114 | pass 114 | pass
115 | 115 |
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:116:18 --> E23.py:116:18
| |
114 | pass 114 | pass
@ -591,7 +591,7 @@ help: Add missing whitespace
118 | self, 118 | self,
119 | x:A = "foo"[::-1], 119 | x:A = "foo"[::-1],
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:116:40 --> E23.py:116:40
| |
114 | pass 114 | pass
@ -611,7 +611,7 @@ help: Add missing whitespace
118 | self, 118 | self,
119 | x:A = "foo"[::-1], 119 | x:A = "foo"[::-1],
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:116:70 --> E23.py:116:70
| |
114 | pass 114 | pass
@ -631,7 +631,7 @@ help: Add missing whitespace
118 | self, 118 | self,
119 | x:A = "foo"[::-1], 119 | x:A = "foo"[::-1],
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:117:29 --> E23.py:117:29
| |
116 | class PEP696Bad[A:object="foo"[::-1], B:object =[[["foo", "bar"]]], C:object= bytes]: 116 | class PEP696Bad[A:object="foo"[::-1], B:object =[[["foo", "bar"]]], C:object= bytes]:
@ -650,7 +650,7 @@ help: Add missing whitespace
119 | x:A = "foo"[::-1], 119 | x:A = "foo"[::-1],
120 | y:B = [[["foo", "bar"]]], 120 | y:B = [[["foo", "bar"]]],
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:117:51 --> E23.py:117:51
| |
116 | class PEP696Bad[A:object="foo"[::-1], B:object =[[["foo", "bar"]]], C:object= bytes]: 116 | class PEP696Bad[A:object="foo"[::-1], B:object =[[["foo", "bar"]]], C:object= bytes]:
@ -669,7 +669,7 @@ help: Add missing whitespace
119 | x:A = "foo"[::-1], 119 | x:A = "foo"[::-1],
120 | y:B = [[["foo", "bar"]]], 120 | y:B = [[["foo", "bar"]]],
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:117:81 --> E23.py:117:81
| |
116 | class PEP696Bad[A:object="foo"[::-1], B:object =[[["foo", "bar"]]], C:object= bytes]: 116 | class PEP696Bad[A:object="foo"[::-1], B:object =[[["foo", "bar"]]], C:object= bytes]:
@ -688,7 +688,7 @@ help: Add missing whitespace
119 | x:A = "foo"[::-1], 119 | x:A = "foo"[::-1],
120 | y:B = [[["foo", "bar"]]], 120 | y:B = [[["foo", "bar"]]],
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:119:10 --> E23.py:119:10
| |
117 | def pep_696_bad_method[A:object="foo"[::-1], B:object =[[["foo", "bar"]]], C:object= bytes]( 117 | def pep_696_bad_method[A:object="foo"[::-1], B:object =[[["foo", "bar"]]], C:object= bytes](
@ -708,7 +708,7 @@ help: Add missing whitespace
121 | z:object = "fooo", 121 | z:object = "fooo",
122 | ): 122 | ):
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:120:10 --> E23.py:120:10
| |
118 | self, 118 | self,
@ -728,7 +728,7 @@ help: Add missing whitespace
122 | ): 122 | ):
123 | pass 123 | pass
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:121:10 --> E23.py:121:10
| |
119 | x:A = "foo"[::-1], 119 | x:A = "foo"[::-1],
@ -748,7 +748,7 @@ help: Add missing whitespace
123 | pass 123 | pass
124 | 124 |
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:125:32 --> E23.py:125:32
| |
123 | pass 123 | pass
@ -768,7 +768,7 @@ help: Add missing whitespace
127 | pass 127 | pass
128 | 128 |
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:125:54 --> E23.py:125:54
| |
123 | pass 123 | pass
@ -788,7 +788,7 @@ help: Add missing whitespace
127 | pass 127 | pass
128 | 128 |
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:125:84 --> E23.py:125:84
| |
123 | pass 123 | pass
@ -808,7 +808,7 @@ help: Add missing whitespace
127 | pass 127 | pass
128 | 128 |
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:126:47 --> E23.py:126:47
| |
125 | class PEP696BadWithEmptyBases[A:object="foo"[::-1], B:object =[[["foo", "bar"]]], C:object= bytes](): 125 | class PEP696BadWithEmptyBases[A:object="foo"[::-1], B:object =[[["foo", "bar"]]], C:object= bytes]():
@ -826,7 +826,7 @@ help: Add missing whitespace
128 | 128 |
129 | # Should be no E231 errors on any of these: 129 | # Should be no E231 errors on any of these:
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:126:69 --> E23.py:126:69
| |
125 | class PEP696BadWithEmptyBases[A:object="foo"[::-1], B:object =[[["foo", "bar"]]], C:object= bytes](): 125 | class PEP696BadWithEmptyBases[A:object="foo"[::-1], B:object =[[["foo", "bar"]]], C:object= bytes]():
@ -844,7 +844,7 @@ help: Add missing whitespace
128 | 128 |
129 | # Should be no E231 errors on any of these: 129 | # Should be no E231 errors on any of these:
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:126:99 --> E23.py:126:99
| |
125 | class PEP696BadWithEmptyBases[A:object="foo"[::-1], B:object =[[["foo", "bar"]]], C:object= bytes](): 125 | class PEP696BadWithEmptyBases[A:object="foo"[::-1], B:object =[[["foo", "bar"]]], C:object= bytes]():
@ -862,7 +862,7 @@ help: Add missing whitespace
128 | 128 |
129 | # Should be no E231 errors on any of these: 129 | # Should be no E231 errors on any of these:
E231 [*] Missing whitespace after ',' E231 [*] Missing whitespace after `,`
--> E23.py:147:6 --> E23.py:147:6
| |
146 | # E231 146 | # E231
@ -881,7 +881,7 @@ help: Add missing whitespace
149 | # Okay because it's hard to differentiate between the usages of a colon in a t-string 149 | # Okay because it's hard to differentiate between the usages of a colon in a t-string
150 | t"{a:=1}" 150 | t"{a:=1}"
E231 [*] Missing whitespace after ':' E231 [*] Missing whitespace after `:`
--> E23.py:161:37 --> E23.py:161:37
| |
160 | #: E231 160 | #: E231

View File

@ -1,7 +1,7 @@
--- ---
source: crates/ruff_linter/src/rules/pycodestyle/mod.rs source: crates/ruff_linter/src/rules/pycodestyle/mod.rs
--- ---
invalid-syntax: Expected ']', found '(' invalid-syntax: Expected `]`, found `(`
--> E30_syntax_error.py:4:15 --> E30_syntax_error.py:4:15
| |
2 | # parenthesis. 2 | # parenthesis.
@ -11,7 +11,7 @@ invalid-syntax: Expected ']', found '('
5 | pass 5 | pass
| |
invalid-syntax: Expected ')', found newline invalid-syntax: Expected `)`, found newline
--> E30_syntax_error.py:13:18 --> E30_syntax_error.py:13:18
| |
12 | class Foo: 12 | class Foo:
@ -32,7 +32,7 @@ E301 Expected 1 blank line, found 0
| |
help: Add missing blank line help: Add missing blank line
invalid-syntax: Expected ')', found newline invalid-syntax: Expected `)`, found newline
--> E30_syntax_error.py:18:11 --> E30_syntax_error.py:18:11
| |
16 | pass 16 | pass
@ -41,7 +41,7 @@ invalid-syntax: Expected ')', found newline
| ^ | ^
| |
invalid-syntax: Expected ')', found newline invalid-syntax: Expected `)`, found newline
--> E30_syntax_error.py:21:9 --> E30_syntax_error.py:21:9
| |
21 | def top( 21 | def top(

View File

@ -1,7 +1,7 @@
--- ---
source: crates/ruff_linter/src/rules/pycodestyle/mod.rs source: crates/ruff_linter/src/rules/pycodestyle/mod.rs
--- ---
invalid-syntax: Expected ']', found '(' invalid-syntax: Expected `]`, found `(`
--> E30_syntax_error.py:4:15 --> E30_syntax_error.py:4:15
| |
2 | # parenthesis. 2 | # parenthesis.
@ -22,7 +22,7 @@ E302 Expected 2 blank lines, found 1
| |
help: Add missing blank line(s) help: Add missing blank line(s)
invalid-syntax: Expected ')', found newline invalid-syntax: Expected `)`, found newline
--> E30_syntax_error.py:13:18 --> E30_syntax_error.py:13:18
| |
12 | class Foo: 12 | class Foo:
@ -32,7 +32,7 @@ invalid-syntax: Expected ')', found newline
15 | def method(): 15 | def method():
| |
invalid-syntax: Expected ')', found newline invalid-syntax: Expected `)`, found newline
--> E30_syntax_error.py:18:11 --> E30_syntax_error.py:18:11
| |
16 | pass 16 | pass
@ -41,7 +41,7 @@ invalid-syntax: Expected ')', found newline
| ^ | ^
| |
invalid-syntax: Expected ')', found newline invalid-syntax: Expected `)`, found newline
--> E30_syntax_error.py:21:9 --> E30_syntax_error.py:21:9
| |
21 | def top( 21 | def top(

View File

@ -1,7 +1,7 @@
--- ---
source: crates/ruff_linter/src/rules/pycodestyle/mod.rs source: crates/ruff_linter/src/rules/pycodestyle/mod.rs
--- ---
invalid-syntax: Expected ']', found '(' invalid-syntax: Expected `]`, found `(`
--> E30_syntax_error.py:4:15 --> E30_syntax_error.py:4:15
| |
2 | # parenthesis. 2 | # parenthesis.
@ -21,7 +21,7 @@ E303 Too many blank lines (3)
| |
help: Remove extraneous blank line(s) help: Remove extraneous blank line(s)
invalid-syntax: Expected ')', found newline invalid-syntax: Expected `)`, found newline
--> E30_syntax_error.py:13:18 --> E30_syntax_error.py:13:18
| |
12 | class Foo: 12 | class Foo:
@ -31,7 +31,7 @@ invalid-syntax: Expected ')', found newline
15 | def method(): 15 | def method():
| |
invalid-syntax: Expected ')', found newline invalid-syntax: Expected `)`, found newline
--> E30_syntax_error.py:18:11 --> E30_syntax_error.py:18:11
| |
16 | pass 16 | pass
@ -40,7 +40,7 @@ invalid-syntax: Expected ')', found newline
| ^ | ^
| |
invalid-syntax: Expected ')', found newline invalid-syntax: Expected `)`, found newline
--> E30_syntax_error.py:21:9 --> E30_syntax_error.py:21:9
| |
21 | def top( 21 | def top(

View File

@ -1,7 +1,7 @@
--- ---
source: crates/ruff_linter/src/rules/pycodestyle/mod.rs source: crates/ruff_linter/src/rules/pycodestyle/mod.rs
--- ---
invalid-syntax: Expected ']', found '(' invalid-syntax: Expected `]`, found `(`
--> E30_syntax_error.py:4:15 --> E30_syntax_error.py:4:15
| |
2 | # parenthesis. 2 | # parenthesis.
@ -11,7 +11,7 @@ invalid-syntax: Expected ']', found '('
5 | pass 5 | pass
| |
invalid-syntax: Expected ')', found newline invalid-syntax: Expected `)`, found newline
--> E30_syntax_error.py:13:18 --> E30_syntax_error.py:13:18
| |
12 | class Foo: 12 | class Foo:
@ -31,7 +31,7 @@ E305 Expected 2 blank lines after class or function definition, found (1)
| |
help: Add missing blank line(s) help: Add missing blank line(s)
invalid-syntax: Expected ')', found newline invalid-syntax: Expected `)`, found newline
--> E30_syntax_error.py:18:11 --> E30_syntax_error.py:18:11
| |
16 | pass 16 | pass
@ -40,7 +40,7 @@ invalid-syntax: Expected ')', found newline
| ^ | ^
| |
invalid-syntax: Expected ')', found newline invalid-syntax: Expected `)`, found newline
--> E30_syntax_error.py:21:9 --> E30_syntax_error.py:21:9
| |
21 | def top( 21 | def top(

View File

@ -1,7 +1,7 @@
--- ---
source: crates/ruff_linter/src/rules/pycodestyle/mod.rs source: crates/ruff_linter/src/rules/pycodestyle/mod.rs
--- ---
invalid-syntax: Expected ']', found '(' invalid-syntax: Expected `]`, found `(`
--> E30_syntax_error.py:4:15 --> E30_syntax_error.py:4:15
| |
2 | # parenthesis. 2 | # parenthesis.
@ -11,7 +11,7 @@ invalid-syntax: Expected ']', found '('
5 | pass 5 | pass
| |
invalid-syntax: Expected ')', found newline invalid-syntax: Expected `)`, found newline
--> E30_syntax_error.py:13:18 --> E30_syntax_error.py:13:18
| |
12 | class Foo: 12 | class Foo:
@ -21,7 +21,7 @@ invalid-syntax: Expected ')', found newline
15 | def method(): 15 | def method():
| |
invalid-syntax: Expected ')', found newline invalid-syntax: Expected `)`, found newline
--> E30_syntax_error.py:18:11 --> E30_syntax_error.py:18:11
| |
16 | pass 16 | pass
@ -30,7 +30,7 @@ invalid-syntax: Expected ')', found newline
| ^ | ^
| |
invalid-syntax: Expected ')', found newline invalid-syntax: Expected `)`, found newline
--> E30_syntax_error.py:21:9 --> E30_syntax_error.py:21:9
| |
21 | def top( 21 | def top(

View File

@ -705,20 +705,31 @@ fn parse_parameters_numpy(content: &str, content_start: TextSize) -> Vec<Paramet
.is_some_and(|first_char| !first_char.is_whitespace()) .is_some_and(|first_char| !first_char.is_whitespace())
{ {
if let Some(before_colon) = entry.split(':').next() { if let Some(before_colon) = entry.split(':').next() {
let param = before_colon.trim_end(); let param_line = before_colon.trim_end();
let param_name = param.trim_start_matches('*'); // Split on commas to handle comma-separated parameters
if is_identifier(param_name) { let mut current_offset = TextSize::from(0);
let param_start = line_start + indentation.text_len(); for param_part in param_line.split(',') {
let param_end = param_start + param.text_len(); let param_part_trimmed = param_part.trim();
let param_name = param_part_trimmed.trim_start_matches('*');
if is_identifier(param_name) {
// Calculate the position of this specific parameter part within the line
// Account for leading whitespace that gets trimmed
let param_start_in_line = current_offset
+ (param_part.text_len() - param_part_trimmed.text_len());
let param_start =
line_start + indentation.text_len() + param_start_in_line;
entries.push(ParameterEntry { entries.push(ParameterEntry {
name: param_name, name: param_name,
has_definition: true, has_definition: true,
range: TextRange::new( range: TextRange::at(
content_start + param_start, content_start + param_start,
content_start + param_end, param_part_trimmed.text_len(),
), ),
}); });
}
// Update offset for next iteration: add the part length plus comma length
current_offset = current_offset + param_part.text_len() + ','.text_len();
} }
} }
} }
@ -755,12 +766,30 @@ fn parse_raises(content: &str, style: Option<SectionStyle>) -> Vec<QualifiedName
/// ``` /// ```
fn parse_raises_google(content: &str) -> Vec<QualifiedName<'_>> { fn parse_raises_google(content: &str) -> Vec<QualifiedName<'_>> {
let mut entries: Vec<QualifiedName> = Vec::new(); let mut entries: Vec<QualifiedName> = Vec::new();
for potential in content.lines() { let mut lines = content.lines().peekable();
let Some(colon_idx) = potential.find(':') else { let Some(first) = lines.peek() else {
continue; return entries;
}; };
let entry = potential[..colon_idx].trim(); let indentation = &first[..first.len() - first.trim_start().len()];
entries.push(QualifiedName::user_defined(entry)); for potential in lines {
if let Some(entry) = potential.strip_prefix(indentation) {
if let Some(first_char) = entry.chars().next() {
if !first_char.is_whitespace() {
if let Some(colon_idx) = entry.find(':') {
let entry = entry[..colon_idx].trim();
if !entry.is_empty() {
entries.push(QualifiedName::user_defined(entry));
}
}
}
}
} else {
// If we can't strip the expected indentation, check if this is a dedented line
// (not blank) - if so, break early as we've reached the end of this section
if !potential.trim().is_empty() {
break;
}
}
} }
entries entries
} }
@ -788,6 +817,12 @@ fn parse_raises_numpy(content: &str) -> Vec<QualifiedName<'_>> {
let indentation = &dashes_line[..dashes_line.len() - dashes.len()]; let indentation = &dashes_line[..dashes_line.len() - dashes.len()];
for potential in lines { for potential in lines {
if let Some(entry) = potential.strip_prefix(indentation) { if let Some(entry) = potential.strip_prefix(indentation) {
// Check for Sphinx directives (lines starting with ..) - these indicate the end of the
// section. In numpy-style, exceptions are dedented to the same level as sphinx
// directives.
if entry.starts_with("..") {
break;
}
if let Some(first_char) = entry.chars().next() { if let Some(first_char) = entry.chars().next() {
if !first_char.is_whitespace() { if !first_char.is_whitespace() {
entries.push(QualifiedName::user_defined(entry.trim_end())); entries.push(QualifiedName::user_defined(entry.trim_end()));

View File

@ -71,17 +71,7 @@ D417 Missing argument description in the docstring for `test_missing_numpy_args`
399 | """Toggle the gizmo. 399 | """Toggle the gizmo.
| |
D417 Missing argument descriptions in the docstring for `test_method`: `another_test`, `test`, `x`, `y` D417 Missing argument descriptions in the docstring for `test_missing_args`: `test`, `y`, `z`
--> sections.py:413:9
|
411 | """Test class."""
412 |
413 | def test_method(self, test, another_test, z, _, x=1, y=2, _private_arg=1): # noqa: D213, D407
| ^^^^^^^^^^^
414 | """Test a valid args section.
|
D417 Missing argument descriptions in the docstring for `test_missing_args`: `t`, `test`, `x`, `y`, `z`
--> sections.py:434:9 --> sections.py:434:9
| |
432 | "(argument(s) test, y, z are missing descriptions in " 432 | "(argument(s) test, y, z are missing descriptions in "
@ -91,7 +81,7 @@ D417 Missing argument descriptions in the docstring for `test_missing_args`: `t`
435 | """Test a valid args section. 435 | """Test a valid args section.
| |
D417 Missing argument descriptions in the docstring for `test_missing_args_static_method`: `a`, `x`, `y`, `z` D417 Missing argument descriptions in the docstring for `test_missing_args_static_method`: `a`, `z`
--> sections.py:468:9 --> sections.py:468:9
| |
466 | "(argument(s) a, z are missing descriptions in " 466 | "(argument(s) a, z are missing descriptions in "

View File

@ -95,3 +95,23 @@ DOC502 Raised exception is not explicitly raised: `DivisionByZero`
82 | return distance / time 82 | return distance / time
| |
help: Remove `DivisionByZero` from the docstring help: Remove `DivisionByZero` from the docstring
DOC502 Raised exception is not explicitly raised: `ZeroDivisionError`
--> DOC502_numpy.py:139:5
|
137 | # of the exceptions
138 | def foo():
139 | / """First line.
140 | |
141 | | Raises
142 | | ------
143 | | ValueError
144 | | some text
145 | | .. math:: e^{xception}
146 | | ZeroDivisionError
147 | | Will not be raised, DOC502
148 | | """
| |_______^
149 | raise ValueError
|
help: Remove `ZeroDivisionError` from the docstring

View File

@ -187,3 +187,36 @@ DOC102 Documented parameter `a` is not in the function's signature
302 | b 302 | b
| |
help: Remove the extraneous parameter from the docstring help: Remove the extraneous parameter from the docstring
DOC102 Documented parameter `x1` is not in the function's signature
--> DOC102_numpy.py:380:5
|
378 | Parameters
379 | ----------
380 | x1, x2 : object
| ^^
381 | Objects.
|
help: Remove the extraneous parameter from the docstring
DOC102 Documented parameter `x2` is not in the function's signature
--> DOC102_numpy.py:380:9
|
378 | Parameters
379 | ----------
380 | x1, x2 : object
| ^^
381 | Objects.
|
help: Remove the extraneous parameter from the docstring
DOC102 Documented parameter `extra_param` is not in the function's signature
--> DOC102_numpy.py:418:5
|
416 | x1, x2 : str
417 | String parameters for processing.
418 | extra_param : str
| ^^^^^^^^^^^
419 | Extra parameter not in signature.
|
help: Remove the extraneous parameter from the docstring

View File

@ -94,7 +94,7 @@ pub(crate) fn capitalized(checker: &Checker, docstring: &Docstring) {
let mut diagnostic = checker.report_diagnostic( let mut diagnostic = checker.report_diagnostic(
FirstWordUncapitalized { FirstWordUncapitalized {
first_word: first_word.to_string(), first_word: first_word.to_string(),
capitalized_word: capitalized_word.to_string(), capitalized_word: capitalized_word.clone(),
}, },
docstring.range(), docstring.range(),
); );

View File

@ -1,9 +1,6 @@
use ruff_macros::{ViolationMetadata, derive_message_formats}; use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_text_size::Ranged;
use crate::Violation; use crate::Violation;
use crate::checkers::ast::Checker;
/// ## What it does /// ## What it does
/// Checks for `nonlocal` names without bindings. /// Checks for `nonlocal` names without bindings.
@ -46,19 +43,3 @@ impl Violation for NonlocalWithoutBinding {
format!("Nonlocal name `{name}` found without binding") format!("Nonlocal name `{name}` found without binding")
} }
} }
/// PLE0117
pub(crate) fn nonlocal_without_binding(checker: &Checker, nonlocal: &ast::StmtNonlocal) {
if !checker.semantic().scope_id.is_global() {
for name in &nonlocal.names {
if checker.semantic().nonlocal(name).is_none() {
checker.report_diagnostic(
NonlocalWithoutBinding {
name: name.to_string(),
},
name.range(),
);
}
}
}
}

View File

@ -99,6 +99,7 @@ mod tests {
#[test_case(Rule::UTF8EncodingDeclaration, Path::new("UP009_many_empty_lines.py"))] #[test_case(Rule::UTF8EncodingDeclaration, Path::new("UP009_many_empty_lines.py"))]
#[test_case(Rule::UnicodeKindPrefix, Path::new("UP025.py"))] #[test_case(Rule::UnicodeKindPrefix, Path::new("UP025.py"))]
#[test_case(Rule::UnnecessaryBuiltinImport, Path::new("UP029_0.py"))] #[test_case(Rule::UnnecessaryBuiltinImport, Path::new("UP029_0.py"))]
#[test_case(Rule::UnnecessaryBuiltinImport, Path::new("UP029_2.py"))]
#[test_case(Rule::UnnecessaryClassParentheses, Path::new("UP039.py"))] #[test_case(Rule::UnnecessaryClassParentheses, Path::new("UP039.py"))]
#[test_case(Rule::UnnecessaryDefaultTypeArgs, Path::new("UP043.py"))] #[test_case(Rule::UnnecessaryDefaultTypeArgs, Path::new("UP043.py"))]
#[test_case(Rule::UnnecessaryEncodeUTF8, Path::new("UP012.py"))] #[test_case(Rule::UnnecessaryEncodeUTF8, Path::new("UP012.py"))]

View File

@ -766,11 +766,12 @@ pub(crate) fn deprecated_import(checker: &Checker, import_from_stmt: &StmtImport
} }
for operation in fixer.with_renames() { for operation in fixer.with_renames() {
checker.report_diagnostic( let mut diagnostic = checker.report_diagnostic(
DeprecatedImport { DeprecatedImport {
deprecation: Deprecation::WithRename(operation), deprecation: Deprecation::WithRename(operation),
}, },
import_from_stmt.range(), import_from_stmt.range(),
); );
diagnostic.add_primary_tag(ruff_db::diagnostic::DiagnosticTag::Deprecated);
} }
} }

View File

@ -75,7 +75,13 @@ pub(crate) fn unnecessary_builtin_import(
stmt: &Stmt, stmt: &Stmt,
module: &str, module: &str,
names: &[Alias], names: &[Alias],
level: u32,
) { ) {
// Ignore relative imports (they're importing from local modules, not Python's builtins).
if level > 0 {
return;
}
// Ignore irrelevant modules. // Ignore irrelevant modules.
if !matches!( if !matches!(
module, module,

View File

@ -0,0 +1,4 @@
---
source: crates/ruff_linter/src/rules/pyupgrade/mod.rs
---

View File

@ -12,7 +12,6 @@ mod tests {
use test_case::test_case; use test_case::test_case;
use crate::registry::Rule; use crate::registry::Rule;
use crate::settings::types::PreviewMode;
use crate::test::test_path; use crate::test::test_path;
use crate::{assert_diagnostics, settings}; use crate::{assert_diagnostics, settings};
@ -63,25 +62,6 @@ mod tests {
Ok(()) Ok(())
} }
#[test_case(Rule::ReadWholeFile, Path::new("FURB101.py"))]
#[test_case(Rule::WriteWholeFile, Path::new("FURB103.py"))]
fn preview_rules(rule_code: Rule, path: &Path) -> Result<()> {
let snapshot = format!(
"preview_{}_{}",
rule_code.noqa_code(),
path.to_string_lossy()
);
let diagnostics = test_path(
Path::new("refurb").join(path).as_path(),
&settings::LinterSettings {
preview: PreviewMode::Enabled,
..settings::LinterSettings::for_rule(rule_code)
},
)?;
assert_diagnostics!(snapshot, diagnostics);
Ok(())
}
#[test] #[test]
fn write_whole_file_python_39() -> Result<()> { fn write_whole_file_python_39() -> Result<()> {
let diagnostics = test_path( let diagnostics = test_path(

Some files were not shown because too many files have changed in this diff Show More