Compare commits

..

No commits in common. "main" and "0.14.2" have entirely different histories.
main ... 0.14.2

1381 changed files with 24404 additions and 300572 deletions

View File

@ -2,11 +2,12 @@
$schema: "https://docs.renovatebot.com/renovate-schema.json", $schema: "https://docs.renovatebot.com/renovate-schema.json",
dependencyDashboard: true, dependencyDashboard: true,
suppressNotifications: ["prEditedNotification"], suppressNotifications: ["prEditedNotification"],
extends: ["github>astral-sh/renovate-config"], extends: ["config:recommended"],
labels: ["internal"], labels: ["internal"],
schedule: ["before 4am on Monday"], schedule: ["before 4am on Monday"],
semanticCommits: "disabled", semanticCommits: "disabled",
separateMajorMinor: false, separateMajorMinor: false,
prHourlyLimit: 10,
enabledManagers: ["github-actions", "pre-commit", "cargo", "pep621", "pip_requirements", "npm"], enabledManagers: ["github-actions", "pre-commit", "cargo", "pep621", "pip_requirements", "npm"],
cargo: { cargo: {
// See https://docs.renovatebot.com/configuration-options/#rangestrategy // See https://docs.renovatebot.com/configuration-options/#rangestrategy
@ -15,7 +16,7 @@
pep621: { pep621: {
// The default for this package manager is to only search for `pyproject.toml` files // The default for this package manager is to only search for `pyproject.toml` files
// found at the repository root: https://docs.renovatebot.com/modules/manager/pep621/#file-matching // found at the repository root: https://docs.renovatebot.com/modules/manager/pep621/#file-matching
managerFilePatterns: ["^(python|scripts)/.*pyproject\\.toml$"], fileMatch: ["^(python|scripts)/.*pyproject\\.toml$"],
}, },
pip_requirements: { pip_requirements: {
// The default for this package manager is to run on all requirements.txt files: // The default for this package manager is to run on all requirements.txt files:
@ -33,7 +34,7 @@
npm: { npm: {
// The default for this package manager is to only search for `package.json` files // The default for this package manager is to only search for `package.json` files
// found at the repository root: https://docs.renovatebot.com/modules/manager/npm/#file-matching // found at the repository root: https://docs.renovatebot.com/modules/manager/npm/#file-matching
managerFilePatterns: ["^playground/.*package\\.json$"], fileMatch: ["^playground/.*package\\.json$"],
}, },
"pre-commit": { "pre-commit": {
enabled: true, enabled: true,
@ -75,6 +76,14 @@
matchManagers: ["cargo"], matchManagers: ["cargo"],
enabled: false, enabled: false,
}, },
{
// `mkdocs-material` requires a manual update to keep the version in sync
// with `mkdocs-material-insider`.
// See: https://squidfunk.github.io/mkdocs-material/insiders/upgrade/
matchManagers: ["pip_requirements"],
matchPackageNames: ["mkdocs-material"],
enabled: false,
},
{ {
groupName: "pre-commit dependencies", groupName: "pre-commit dependencies",
matchManagers: ["pre-commit"], matchManagers: ["pre-commit"],

View File

@ -43,7 +43,7 @@ jobs:
with: with:
submodules: recursive submodules: recursive
persist-credentials: false persist-credentials: false
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
- name: "Prep README.md" - name: "Prep README.md"
@ -72,7 +72,7 @@ jobs:
with: with:
submodules: recursive submodules: recursive
persist-credentials: false persist-credentials: false
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
architecture: x64 architecture: x64
@ -114,7 +114,7 @@ jobs:
with: with:
submodules: recursive submodules: recursive
persist-credentials: false persist-credentials: false
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
architecture: arm64 architecture: arm64
@ -170,7 +170,7 @@ jobs:
with: with:
submodules: recursive submodules: recursive
persist-credentials: false persist-credentials: false
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
architecture: ${{ matrix.platform.arch }} architecture: ${{ matrix.platform.arch }}
@ -223,7 +223,7 @@ jobs:
with: with:
submodules: recursive submodules: recursive
persist-credentials: false persist-credentials: false
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
architecture: x64 architecture: x64
@ -300,7 +300,7 @@ jobs:
with: with:
submodules: recursive submodules: recursive
persist-credentials: false persist-credentials: false
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
- name: "Prep README.md" - name: "Prep README.md"
@ -365,7 +365,7 @@ jobs:
with: with:
submodules: recursive submodules: recursive
persist-credentials: false persist-credentials: false
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
architecture: x64 architecture: x64
@ -431,7 +431,7 @@ jobs:
with: with:
submodules: recursive submodules: recursive
persist-credentials: false persist-credentials: false
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
- name: "Prep README.md" - name: "Prep README.md"

View File

@ -24,8 +24,6 @@ env:
PACKAGE_NAME: ruff PACKAGE_NAME: ruff
PYTHON_VERSION: "3.14" PYTHON_VERSION: "3.14"
NEXTEST_PROFILE: ci NEXTEST_PROFILE: ci
# Enable mdtests that require external dependencies
MDTEST_EXTERNAL: "1"
jobs: jobs:
determine_changes: determine_changes:
@ -232,9 +230,7 @@ jobs:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: | run: |
rustup component add clippy rustup component add clippy
@ -254,24 +250,21 @@ jobs:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
shared-key: ruff-linux-debug
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Install mold" - name: "Install mold"
uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1 uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1
- name: "Install cargo nextest" - name: "Install cargo nextest"
uses: taiki-e/install-action@3575e532701a5fc614b0c842e4119af4cc5fd16d # v2.62.60 uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21
with: with:
tool: cargo-nextest tool: cargo-nextest
- name: "Install cargo insta" - name: "Install cargo insta"
uses: taiki-e/install-action@3575e532701a5fc614b0c842e4119af4cc5fd16d # v2.62.60 uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21
with: with:
tool: cargo-insta tool: cargo-insta
- name: "Install uv" - name: "Install uv"
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
with: with:
enable-cache: "true" enable-cache: "true"
- name: ty mdtests (GitHub annotations) - name: ty mdtests (GitHub annotations)
@ -284,12 +277,8 @@ jobs:
run: cargo test -p ty_python_semantic --test mdtest || true run: cargo test -p ty_python_semantic --test mdtest || true
- name: "Run tests" - name: "Run tests"
run: cargo insta test --all-features --unreferenced reject --test-runner nextest run: cargo insta test --all-features --unreferenced reject --test-runner nextest
- name: Dogfood ty on py-fuzzer # Dogfood ty on py-fuzzer
run: uv run --project=./python/py-fuzzer cargo run -p ty check --project=./python/py-fuzzer - run: uv run --project=./python/py-fuzzer cargo run -p ty check --project=./python/py-fuzzer
- name: Dogfood ty on the scripts directory
run: uv run --project=./scripts cargo run -p ty check --project=./scripts
- name: Dogfood ty on ty_benchmark
run: uv run --project=./scripts/ty_benchmark cargo run -p ty check --project=./scripts/ty_benchmark
# Check for broken links in the documentation. # Check for broken links in the documentation.
- run: cargo doc --all --no-deps - run: cargo doc --all --no-deps
env: env:
@ -298,10 +287,18 @@ jobs:
# sync, not just public items. Eventually we should do this for all # sync, not just public items. Eventually we should do this for all
# crates; for now add crates here as they are warning-clean to prevent # crates; for now add crates here as they are warning-clean to prevent
# regression. # regression.
- run: cargo doc --no-deps -p ty_python_semantic -p ty -p ty_test -p ruff_db -p ruff_python_formatter --document-private-items - run: cargo doc --no-deps -p ty_python_semantic -p ty -p ty_test -p ruff_db --document-private-items
env: env:
# Setting RUSTDOCFLAGS because `cargo doc --check` isn't yet implemented (https://github.com/rust-lang/cargo/issues/10025). # Setting RUSTDOCFLAGS because `cargo doc --check` isn't yet implemented (https://github.com/rust-lang/cargo/issues/10025).
RUSTDOCFLAGS: "-D warnings" RUSTDOCFLAGS: "-D warnings"
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: ruff
path: target/debug/ruff
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: ty
path: target/debug/ty
cargo-test-linux-release: cargo-test-linux-release:
name: "cargo test (linux, release)" name: "cargo test (linux, release)"
@ -317,25 +314,25 @@ jobs:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Install mold" - name: "Install mold"
uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1 uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1
- name: "Install cargo nextest" - name: "Install cargo nextest"
uses: taiki-e/install-action@3575e532701a5fc614b0c842e4119af4cc5fd16d # v2.62.60 uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21
with: with:
tool: cargo-nextest tool: cargo-nextest
- name: "Install cargo insta"
uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21
with:
tool: cargo-insta
- name: "Install uv" - name: "Install uv"
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
with: with:
enable-cache: "true" enable-cache: "true"
- name: "Run tests" - name: "Run tests"
run: cargo nextest run --cargo-profile profiling --all-features run: cargo insta test --release --all-features --unreferenced reject --test-runner nextest
- name: "Run doctests"
run: cargo test --doc --profile profiling --all-features
cargo-test-other: cargo-test-other:
strategy: strategy:
@ -352,17 +349,15 @@ jobs:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Install cargo nextest" - name: "Install cargo nextest"
uses: taiki-e/install-action@3575e532701a5fc614b0c842e4119af4cc5fd16d # v2.62.60 uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21
with: with:
tool: cargo-nextest tool: cargo-nextest
- name: "Install uv" - name: "Install uv"
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
with: with:
enable-cache: "true" enable-cache: "true"
- name: "Run tests" - name: "Run tests"
@ -380,12 +375,10 @@ jobs:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown run: rustup target add wasm32-unknown-unknown
- uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0 - uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0
with: with:
node-version: 22 node-version: 22
cache: "npm" cache: "npm"
@ -417,9 +410,7 @@ jobs:
with: with:
file: "Cargo.toml" file: "Cargo.toml"
field: "workspace.package.rust-version" field: "workspace.package.rust-version"
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
env: env:
MSRV: ${{ steps.msrv.outputs.value }} MSRV: ${{ steps.msrv.outputs.value }}
@ -441,16 +432,13 @@ jobs:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with: with:
workspaces: "fuzz -> target" workspaces: "fuzz -> target"
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1
- name: "Install cargo-binstall" - name: "Install cargo-binstall"
uses: cargo-bins/cargo-binstall@3fc81674af4165a753833a94cae9f91d8849049f # v1.16.2 uses: cargo-bins/cargo-binstall@a66119fbb1c952daba62640c2609111fe0803621 # v1.15.7
- name: "Install cargo-fuzz" - name: "Install cargo-fuzz"
# Download the latest version from quick install and not the github releases because github releases only has MUSL targets. # Download the latest version from quick install and not the github releases because github releases only has MUSL targets.
run: cargo binstall cargo-fuzz --force --disable-strategies crate-meta-data --no-confirm run: cargo binstall cargo-fuzz --force --disable-strategies crate-meta-data --no-confirm
@ -459,7 +447,9 @@ jobs:
fuzz-parser: fuzz-parser:
name: "fuzz parser" name: "fuzz parser"
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: determine_changes needs:
- cargo-test-linux
- determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.parser == 'true' || needs.determine_changes.outputs.py-fuzzer == 'true') }} if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.parser == 'true' || needs.determine_changes.outputs.py-fuzzer == 'true') }}
timeout-minutes: 20 timeout-minutes: 20
env: env:
@ -468,24 +458,27 @@ jobs:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
name: Download Ruff binary to test
id: download-cached-binary
with: with:
shared-key: ruff-linux-debug name: ruff
save-if: false path: ruff-to-test
- name: "Install Rust toolchain"
run: rustup show
- name: Build Ruff binary
run: cargo build --bin ruff
- name: Fuzz - name: Fuzz
env:
DOWNLOAD_PATH: ${{ steps.download-cached-binary.outputs.download-path }}
run: | run: |
# Make executable, since artifact download doesn't preserve this
chmod +x "${DOWNLOAD_PATH}/ruff"
( (
uv run \ uv run \
--python="${PYTHON_VERSION}" \ --python="${PYTHON_VERSION}" \
--project=./python/py-fuzzer \ --project=./python/py-fuzzer \
--locked \ --locked \
fuzz \ fuzz \
--test-executable=target/debug/ruff \ --test-executable="${DOWNLOAD_PATH}/ruff" \
--bin=ruff \ --bin=ruff \
0-500 0-500
) )
@ -500,10 +493,8 @@ jobs:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with: - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
save-if: ${{ github.ref == 'refs/heads/main' }}
- uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup component add rustfmt run: rustup component add rustfmt
# Run all code generation scripts, and verify that the current output is # Run all code generation scripts, and verify that the current output is
@ -527,7 +518,9 @@ jobs:
ecosystem: ecosystem:
name: "ecosystem" name: "ecosystem"
runs-on: ${{ github.repository == 'astral-sh/ruff' && 'depot-ubuntu-latest-8' || 'ubuntu-latest' }} runs-on: ${{ github.repository == 'astral-sh/ruff' && 'depot-ubuntu-latest-8' || 'ubuntu-latest' }}
needs: determine_changes needs:
- cargo-test-linux
- determine_changes
# Only runs on pull requests, since that is the only we way we can find the base version for comparison. # Only runs on pull requests, since that is the only we way we can find the base version for comparison.
# Ecosystem check needs linter and/or formatter changes. # Ecosystem check needs linter and/or formatter changes.
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && github.event_name == 'pull_request' && needs.determine_changes.outputs.code == 'true' }} if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && github.event_name == 'pull_request' && needs.determine_changes.outputs.code == 'true' }}
@ -535,37 +528,27 @@ jobs:
steps: steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
ref: ${{ github.event.pull_request.base.ref }}
persist-credentials: false persist-credentials: false
- uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
- uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4
with: with:
python-version: ${{ env.PYTHON_VERSION }} # TODO: figure out why `ruff-ecosystem` crashes on Python 3.14
python-version: "3.13"
activate-environment: true activate-environment: true
- name: "Install Rust toolchain" - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
run: rustup show name: Download comparison Ruff binary
id: ruff-target
- name: "Install mold"
uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
with: with:
shared-key: ruff-linux-debug name: ruff
save-if: false path: target/debug
- name: Build baseline version - uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
run: | name: Download baseline Ruff binary
cargo build --bin ruff
mv target/debug/ruff target/debug/ruff-baseline
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false name: ruff
clean: false branch: ${{ github.event.pull_request.base.ref }}
workflow: "ci.yaml"
- name: Build comparison version check_artifacts: true
run: cargo build --bin ruff
- name: Install ruff-ecosystem - name: Install ruff-ecosystem
run: | run: |
@ -573,11 +556,16 @@ jobs:
- name: Run `ruff check` stable ecosystem check - name: Run `ruff check` stable ecosystem check
if: ${{ needs.determine_changes.outputs.linter == 'true' }} if: ${{ needs.determine_changes.outputs.linter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: | run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
# Set pipefail to avoid hiding errors with tee # Set pipefail to avoid hiding errors with tee
set -eo pipefail set -eo pipefail
ruff-ecosystem check ./target/debug/ruff-baseline ./target/debug/ruff --cache ./checkouts --output-format markdown | tee ecosystem-result-check-stable ruff-ecosystem check ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown | tee ecosystem-result-check-stable
cat ecosystem-result-check-stable > "$GITHUB_STEP_SUMMARY" cat ecosystem-result-check-stable > "$GITHUB_STEP_SUMMARY"
echo "### Linter (stable)" > ecosystem-result echo "### Linter (stable)" > ecosystem-result
@ -586,11 +574,16 @@ jobs:
- name: Run `ruff check` preview ecosystem check - name: Run `ruff check` preview ecosystem check
if: ${{ needs.determine_changes.outputs.linter == 'true' }} if: ${{ needs.determine_changes.outputs.linter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: | run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
# Set pipefail to avoid hiding errors with tee # Set pipefail to avoid hiding errors with tee
set -eo pipefail set -eo pipefail
ruff-ecosystem check ./target/debug/ruff-baseline ./target/debug/ruff --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-check-preview ruff-ecosystem check ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-check-preview
cat ecosystem-result-check-preview > "$GITHUB_STEP_SUMMARY" cat ecosystem-result-check-preview > "$GITHUB_STEP_SUMMARY"
echo "### Linter (preview)" >> ecosystem-result echo "### Linter (preview)" >> ecosystem-result
@ -599,11 +592,16 @@ jobs:
- name: Run `ruff format` stable ecosystem check - name: Run `ruff format` stable ecosystem check
if: ${{ needs.determine_changes.outputs.formatter == 'true' }} if: ${{ needs.determine_changes.outputs.formatter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: | run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
# Set pipefail to avoid hiding errors with tee # Set pipefail to avoid hiding errors with tee
set -eo pipefail set -eo pipefail
ruff-ecosystem format ./target/debug/ruff-baseline ./target/debug/ruff --cache ./checkouts --output-format markdown | tee ecosystem-result-format-stable ruff-ecosystem format ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown | tee ecosystem-result-format-stable
cat ecosystem-result-format-stable > "$GITHUB_STEP_SUMMARY" cat ecosystem-result-format-stable > "$GITHUB_STEP_SUMMARY"
echo "### Formatter (stable)" >> ecosystem-result echo "### Formatter (stable)" >> ecosystem-result
@ -612,19 +610,32 @@ jobs:
- name: Run `ruff format` preview ecosystem check - name: Run `ruff format` preview ecosystem check
if: ${{ needs.determine_changes.outputs.formatter == 'true' }} if: ${{ needs.determine_changes.outputs.formatter == 'true' }}
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: | run: |
# Make executable, since artifact download doesn't preserve this
chmod +x ./ruff "${DOWNLOAD_PATH}/ruff"
# Set pipefail to avoid hiding errors with tee # Set pipefail to avoid hiding errors with tee
set -eo pipefail set -eo pipefail
ruff-ecosystem format ./target/debug/ruff-baseline ./target/debug/ruff --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-format-preview ruff-ecosystem format ./ruff "${DOWNLOAD_PATH}/ruff" --cache ./checkouts --output-format markdown --force-preview | tee ecosystem-result-format-preview
cat ecosystem-result-format-preview > "$GITHUB_STEP_SUMMARY" cat ecosystem-result-format-preview > "$GITHUB_STEP_SUMMARY"
echo "### Formatter (preview)" >> ecosystem-result echo "### Formatter (preview)" >> ecosystem-result
cat ecosystem-result-format-preview >> ecosystem-result cat ecosystem-result-format-preview >> ecosystem-result
echo "" >> ecosystem-result echo "" >> ecosystem-result
# NOTE: astral-sh-bot uses this artifact to post comments on PRs. - name: Export pull request number
# Make sure to update the bot if you rename the artifact. run: |
echo ${{ github.event.number }} > pr-number
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
name: Upload PR Number
with:
name: pr-number
path: pr-number
- uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
name: Upload Results name: Upload Results
with: with:
@ -635,38 +646,36 @@ jobs:
name: "Fuzz for new ty panics" name: "Fuzz for new ty panics"
runs-on: ${{ github.repository == 'astral-sh/ruff' && 'depot-ubuntu-22.04-16' || 'ubuntu-latest' }} runs-on: ${{ github.repository == 'astral-sh/ruff' && 'depot-ubuntu-22.04-16' || 'ubuntu-latest' }}
needs: needs:
- cargo-test-linux
- determine_changes - determine_changes
# Only runs on pull requests, since that is the only we way we can find the base version for comparison. # Only runs on pull requests, since that is the only we way we can find the base version for comparison.
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && github.event_name == 'pull_request' && (needs.determine_changes.outputs.ty == 'true' || needs.determine_changes.outputs.py-fuzzer == 'true') }} if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && github.event_name == 'pull_request' && (needs.determine_changes.outputs.ty == 'true' || needs.determine_changes.outputs.py-fuzzer == 'true') }}
timeout-minutes: ${{ github.repository == 'astral-sh/ruff' && 10 || 20 }} timeout-minutes: ${{ github.repository == 'astral-sh/ruff' && 5 || 20 }}
steps: steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
fetch-depth: 0
persist-credentials: false persist-credentials: false
- uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 name: Download new ty binary
id: ty-new
with: with:
save-if: ${{ github.ref == 'refs/heads/main' }} name: ty
- name: "Install Rust toolchain" path: target/debug
run: rustup show - uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
- name: "Install mold" name: Download baseline ty binary
uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1 with:
name: ty
branch: ${{ github.event.pull_request.base.ref }}
workflow: "ci.yaml"
check_artifacts: true
- uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
- name: Fuzz - name: Fuzz
env: env:
FORCE_COLOR: 1 FORCE_COLOR: 1
NEW_TY: ${{ steps.ty-new.outputs.download-path }}
run: | run: |
echo "new commit" # Make executable, since artifact download doesn't preserve this
git rev-list --format=%s --max-count=1 "$GITHUB_SHA" chmod +x "${PWD}/ty" "${NEW_TY}/ty"
cargo build --profile=profiling --bin=ty
mv target/profiling/ty ty-new
MERGE_BASE="$(git merge-base "$GITHUB_SHA" "origin/$GITHUB_BASE_REF")"
git checkout -b old_commit "$MERGE_BASE"
echo "old commit (merge base)"
git rev-list --format=%s --max-count=1 old_commit
cargo build --profile=profiling --bin=ty
mv target/profiling/ty ty-old
( (
uv run \ uv run \
@ -674,8 +683,8 @@ jobs:
--project=./python/py-fuzzer \ --project=./python/py-fuzzer \
--locked \ --locked \
fuzz \ fuzz \
--test-executable=ty-new \ --test-executable="${NEW_TY}/ty" \
--baseline-executable=ty-old \ --baseline-executable="${PWD}/ty" \
--only-new-bugs \ --only-new-bugs \
--bin=ty \ --bin=ty \
0-1000 0-1000
@ -690,7 +699,7 @@ jobs:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: cargo-bins/cargo-binstall@3fc81674af4165a753833a94cae9f91d8849049f # v1.16.2 - uses: cargo-bins/cargo-binstall@a66119fbb1c952daba62640c2609111fe0803621 # v1.15.7
- run: cargo binstall --no-confirm cargo-shear - run: cargo binstall --no-confirm cargo-shear
- run: cargo shear - run: cargo shear
@ -703,16 +712,12 @@ jobs:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Install mold"
uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1
- name: "Run ty completion evaluation" - name: "Run ty completion evaluation"
run: cargo run --profile profiling --package ty_completion_eval -- all --threshold 0.4 --tasks /tmp/completion-evaluation-tasks.csv run: cargo run --release --package ty_completion_eval -- all --threshold 0.4 --tasks /tmp/completion-evaluation-tasks.csv
- name: "Ensure there are no changes" - name: "Ensure there are no changes"
run: diff ./crates/ty_completion_eval/completion-evaluation-tasks.csv /tmp/completion-evaluation-tasks.csv run: diff ./crates/ty_completion_eval/completion-evaluation-tasks.csv /tmp/completion-evaluation-tasks.csv
@ -725,13 +730,11 @@ jobs:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
architecture: x64 architecture: x64
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Prep README.md" - name: "Prep README.md"
run: python scripts/transform_readme.py --target pypi run: python scripts/transform_readme.py --target pypi
- name: "Build wheels" - name: "Build wheels"
@ -754,11 +757,9 @@ jobs:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with: - uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0
save-if: ${{ github.ref == 'refs/heads/main' }}
- uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
with: with:
node-version: 22 node-version: 22
- name: "Cache pre-commit" - name: "Cache pre-commit"
@ -781,21 +782,30 @@ jobs:
name: "mkdocs" name: "mkdocs"
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 10 timeout-minutes: 10
env:
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
steps: steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
uses: webfactory/ssh-agent@a6f90b1f127823b31d4d4a8d96047790581349bd # v0.9.1
with: with:
save-if: ${{ github.ref == 'refs/heads/main' }} ssh-private-key: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: Install uv - name: Install uv
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
with: with:
python-version: 3.13 python-version: 3.13
activate-environment: true activate-environment: true
- name: "Install Insiders dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: uv pip install -r docs/requirements-insiders.txt
- name: "Install dependencies" - name: "Install dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: uv pip install -r docs/requirements.txt run: uv pip install -r docs/requirements.txt
- name: "Update README File" - name: "Update README File"
run: python scripts/transform_readme.py --target mkdocs run: python scripts/transform_readme.py --target mkdocs
@ -803,8 +813,12 @@ jobs:
run: python scripts/generate_mkdocs.py run: python scripts/generate_mkdocs.py
- name: "Check docs formatting" - name: "Check docs formatting"
run: python scripts/check_docs_formatted.py run: python scripts/check_docs_formatted.py
- name: "Build Insiders docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: mkdocs build --strict -f mkdocs.insiders.yml
- name: "Build docs" - name: "Build docs"
run: mkdocs build --strict -f mkdocs.yml if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: mkdocs build --strict -f mkdocs.public.yml
check-formatter-instability-and-black-similarity: check-formatter-instability-and-black-similarity:
name: "formatter instabilities and black similarity" name: "formatter instabilities and black similarity"
@ -816,9 +830,7 @@ jobs:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with:
save-if: ${{ github.ref == 'refs/heads/main' }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Run checks" - name: "Run checks"
@ -832,7 +844,9 @@ jobs:
name: "test ruff-lsp" name: "test ruff-lsp"
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 5 timeout-minutes: 5
needs: determine_changes needs:
- cargo-test-linux
- determine_changes
if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }} if: ${{ !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.code == 'true' || github.ref == 'refs/heads/main') }}
steps: steps:
- uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3.0.0 - uses: extractions/setup-just@e33e0265a09d6d736e2ee1e0eb685ef1de4669ff # v3.0.0
@ -840,46 +854,37 @@ jobs:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
name: "Checkout ruff source" name: "Download ruff-lsp source"
with:
persist-credentials: false
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
with:
shared-key: ruff-linux-debug
save-if: false
- name: "Install Rust toolchain"
run: rustup show
- name: Build Ruff binary
run: cargo build -p ruff --bin ruff
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
name: "Checkout ruff-lsp source"
with: with:
persist-credentials: false persist-credentials: false
repository: "astral-sh/ruff-lsp" repository: "astral-sh/ruff-lsp"
path: ruff-lsp
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
# installation fails on 3.13 and newer # installation fails on 3.13 and newer
python-version: "3.12" python-version: "3.12"
- uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
name: Download development ruff binary
id: ruff-target
with:
name: ruff
path: target/debug
- name: Install ruff-lsp dependencies - name: Install ruff-lsp dependencies
run: | run: |
cd ruff-lsp
just install just install
- name: Run ruff-lsp tests - name: Run ruff-lsp tests
env:
DOWNLOAD_PATH: ${{ steps.ruff-target.outputs.download-path }}
run: | run: |
# Setup development binary # Setup development binary
pip uninstall --yes ruff pip uninstall --yes ruff
export PATH="${PWD}/target/debug:${PATH}" chmod +x "${DOWNLOAD_PATH}/ruff"
export PATH="${DOWNLOAD_PATH}:${PATH}"
ruff version ruff version
cd ruff-lsp
just test just test
check-playground: check-playground:
@ -895,17 +900,15 @@ jobs:
persist-credentials: false persist-credentials: false
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown run: rustup target add wasm32-unknown-unknown
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with: - uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0
save-if: ${{ github.ref == 'refs/heads/main' }}
- uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0
with: with:
node-version: 22 node-version: 22
cache: "npm" cache: "npm"
cache-dependency-path: playground/package-lock.json cache-dependency-path: playground/package-lock.json
- uses: jetli/wasm-bindgen-action@20b33e20595891ab1a0ed73145d8a21fc96e7c29 # v0.2.0 - uses: jetli/wasm-bindgen-action@20b33e20595891ab1a0ed73145d8a21fc96e7c29 # v0.2.0
- name: "Install Node dependencies" - name: "Install Node dependencies"
run: npm ci --ignore-scripts run: npm ci
working-directory: playground working-directory: playground
- name: "Build playgrounds" - name: "Build playgrounds"
run: npm run dev:wasm run: npm run dev:wasm
@ -929,36 +932,32 @@ jobs:
needs.determine_changes.outputs.linter == 'true' needs.determine_changes.outputs.linter == 'true'
) )
timeout-minutes: 20 timeout-minutes: 20
permissions:
contents: read # required for actions/checkout
id-token: write # required for OIDC authentication with CodSpeed
steps: steps:
- name: "Checkout Branch" - name: "Checkout Branch"
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with: - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
save-if: ${{ github.ref == 'refs/heads/main' }}
- uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Install codspeed" - name: "Install codspeed"
uses: taiki-e/install-action@3575e532701a5fc614b0c842e4119af4cc5fd16d # v2.62.60 uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21
with: with:
tool: cargo-codspeed tool: cargo-codspeed
- name: "Build benchmarks" - name: "Build benchmarks"
run: cargo codspeed build --features "codspeed,instrumented" --profile profiling --no-default-features -p ruff_benchmark --bench formatter --bench lexer --bench linter --bench parser run: cargo codspeed build --features "codspeed,instrumented" --no-default-features -p ruff_benchmark --bench formatter --bench lexer --bench linter --bench parser
- name: "Run benchmarks" - name: "Run benchmarks"
uses: CodSpeedHQ/action@346a2d8a8d9d38909abd0bc3d23f773110f076ad # v4.4.1 uses: CodSpeedHQ/action@6b43a0cd438f6ca5ad26f9ed03ed159ed2df7da9 # v4.1.1
with: with:
mode: simulation mode: instrumentation
run: cargo codspeed run run: cargo codspeed run
token: ${{ secrets.CODSPEED_TOKEN }}
benchmarks-instrumented-ty: benchmarks-instrumented-ty:
name: "benchmarks instrumented (ty)" name: "benchmarks instrumented (ty)"
@ -971,36 +970,32 @@ jobs:
needs.determine_changes.outputs.ty == 'true' needs.determine_changes.outputs.ty == 'true'
) )
timeout-minutes: 20 timeout-minutes: 20
permissions:
contents: read # required for actions/checkout
id-token: write # required for OIDC authentication with CodSpeed
steps: steps:
- name: "Checkout Branch" - name: "Checkout Branch"
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with: - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
save-if: ${{ github.ref == 'refs/heads/main' }}
- uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Install codspeed" - name: "Install codspeed"
uses: taiki-e/install-action@3575e532701a5fc614b0c842e4119af4cc5fd16d # v2.62.60 uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21
with: with:
tool: cargo-codspeed tool: cargo-codspeed
- name: "Build benchmarks" - name: "Build benchmarks"
run: cargo codspeed build --features "codspeed,instrumented" --profile profiling --no-default-features -p ruff_benchmark --bench ty run: cargo codspeed build --features "codspeed,instrumented" --no-default-features -p ruff_benchmark --bench ty
- name: "Run benchmarks" - name: "Run benchmarks"
uses: CodSpeedHQ/action@346a2d8a8d9d38909abd0bc3d23f773110f076ad # v4.4.1 uses: CodSpeedHQ/action@6b43a0cd438f6ca5ad26f9ed03ed159ed2df7da9 # v4.1.1
with: with:
mode: simulation mode: instrumentation
run: cargo codspeed run run: cargo codspeed run
token: ${{ secrets.CODSPEED_TOKEN }}
benchmarks-walltime: benchmarks-walltime:
name: "benchmarks walltime (${{ matrix.benchmarks }})" name: "benchmarks walltime (${{ matrix.benchmarks }})"
@ -1008,9 +1003,6 @@ jobs:
needs: determine_changes needs: determine_changes
if: ${{ github.repository == 'astral-sh/ruff' && !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.ty == 'true' || github.ref == 'refs/heads/main') }} if: ${{ github.repository == 'astral-sh/ruff' && !contains(github.event.pull_request.labels.*.name, 'no-test') && (needs.determine_changes.outputs.ty == 'true' || github.ref == 'refs/heads/main') }}
timeout-minutes: 20 timeout-minutes: 20
permissions:
contents: read # required for actions/checkout
id-token: write # required for OIDC authentication with CodSpeed
strategy: strategy:
matrix: matrix:
benchmarks: benchmarks:
@ -1022,24 +1014,22 @@ jobs:
with: with:
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with: - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
save-if: ${{ github.ref == 'refs/heads/main' }}
- uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Install codspeed" - name: "Install codspeed"
uses: taiki-e/install-action@3575e532701a5fc614b0c842e4119af4cc5fd16d # v2.62.60 uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21
with: with:
tool: cargo-codspeed tool: cargo-codspeed
- name: "Build benchmarks" - name: "Build benchmarks"
run: cargo codspeed build --features "codspeed,walltime" --profile profiling --no-default-features -p ruff_benchmark run: cargo codspeed build --features "codspeed,walltime" --no-default-features -p ruff_benchmark
- name: "Run benchmarks" - name: "Run benchmarks"
uses: CodSpeedHQ/action@346a2d8a8d9d38909abd0bc3d23f773110f076ad # v4.4.1 uses: CodSpeedHQ/action@6b43a0cd438f6ca5ad26f9ed03ed159ed2df7da9 # v4.1.1
env: env:
# enabling walltime flamegraphs adds ~6 minutes to the CI time, and they don't # enabling walltime flamegraphs adds ~6 minutes to the CI time, and they don't
# appear to provide much useful insight for our walltime benchmarks right now # appear to provide much useful insight for our walltime benchmarks right now
@ -1048,3 +1038,4 @@ jobs:
with: with:
mode: walltime mode: walltime
run: cargo codspeed run --bench ty_walltime "${{ matrix.benchmarks }}" run: cargo codspeed run --bench ty_walltime "${{ matrix.benchmarks }}"
token: ${{ secrets.CODSPEED_TOKEN }}

View File

@ -34,12 +34,12 @@ jobs:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
persist-credentials: false persist-credentials: false
- uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Install mold" - name: "Install mold"
uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1 uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- name: Build ruff - name: Build ruff
# A debug build means the script runs slower once it gets started, # A debug build means the script runs slower once it gets started,
# but this is outweighed by the fact that a release build takes *much* longer to compile in CI # but this is outweighed by the fact that a release build takes *much* longer to compile in CI

View File

@ -43,11 +43,10 @@ jobs:
persist-credentials: false persist-credentials: false
- name: Install the latest version of uv - name: Install the latest version of uv
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with: with:
shared-key: "mypy-primer"
workspaces: "ruff" workspaces: "ruff"
- name: Install Rust toolchain - name: Install Rust toolchain
@ -56,20 +55,24 @@ jobs:
- name: Run mypy_primer - name: Run mypy_primer
env: env:
PRIMER_SELECTOR: crates/ty_python_semantic/resources/primer/good.txt PRIMER_SELECTOR: crates/ty_python_semantic/resources/primer/good.txt
CLICOLOR_FORCE: "1"
DIFF_FILE: mypy_primer.diff DIFF_FILE: mypy_primer.diff
run: | run: |
cd ruff cd ruff
scripts/mypy_primer.sh scripts/mypy_primer.sh
echo ${{ github.event.number }} > ../pr-number
# NOTE: astral-sh-bot uses this artifact to post comments on PRs.
# Make sure to update the bot if you rename the artifact.
- name: Upload diff - name: Upload diff
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with: with:
name: mypy_primer_diff name: mypy_primer_diff
path: mypy_primer.diff path: mypy_primer.diff
- name: Upload pr-number
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: pr-number
path: pr-number
memory_usage: memory_usage:
name: Run memory statistics name: Run memory statistics
runs-on: ${{ github.repository == 'astral-sh/ruff' && 'depot-ubuntu-22.04-32' || 'ubuntu-latest' }} runs-on: ${{ github.repository == 'astral-sh/ruff' && 'depot-ubuntu-22.04-32' || 'ubuntu-latest' }}
@ -82,12 +85,11 @@ jobs:
persist-credentials: false persist-credentials: false
- name: Install the latest version of uv - name: Install the latest version of uv
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with: with:
workspaces: "ruff" workspaces: "ruff"
shared-key: "mypy-primer"
- name: Install Rust toolchain - name: Install Rust toolchain
run: rustup show run: rustup show
@ -107,54 +109,3 @@ jobs:
with: with:
name: mypy_primer_memory_diff name: mypy_primer_memory_diff
path: mypy_primer_memory.diff path: mypy_primer_memory.diff
# Runs mypy twice against the same ty version to catch any non-deterministic behavior (ideally).
# The job is disabled for now because there are some non-deterministic diagnostics.
mypy_primer_same_revision:
name: Run mypy_primer on same revision
runs-on: ${{ github.repository == 'astral-sh/ruff' && 'depot-ubuntu-22.04-32' || 'ubuntu-latest' }}
timeout-minutes: 20
# TODO: Enable once we fixed the non-deterministic diagnostics
if: false
steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
path: ruff
fetch-depth: 0
persist-credentials: false
- name: Install the latest version of uv
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2
with:
workspaces: "ruff"
shared-key: "mypy-primer"
- name: Install Rust toolchain
run: rustup show
- name: Run determinism check
env:
BASE_REVISION: ${{ github.event.pull_request.head.sha }}
PRIMER_SELECTOR: crates/ty_python_semantic/resources/primer/good.txt
CLICOLOR_FORCE: "1"
DIFF_FILE: mypy_primer_determinism.diff
run: |
cd ruff
scripts/mypy_primer.sh
- name: Check for non-determinism
run: |
# Remove ANSI color codes for checking
sed -e 's/\x1b\[[0-9;]*m//g' mypy_primer_determinism.diff > mypy_primer_determinism_clean.diff
# Check if there are any differences (non-determinism)
if [ -s mypy_primer_determinism_clean.diff ]; then
echo "ERROR: Non-deterministic output detected!"
echo "The following differences were found when running ty twice on the same commit:"
cat mypy_primer_determinism_clean.diff
exit 1
else
echo "✓ Output is deterministic"
fi

View File

@ -0,0 +1,122 @@
name: PR comment (mypy_primer)
on: # zizmor: ignore[dangerous-triggers]
workflow_run:
workflows: [Run mypy_primer]
types: [completed]
workflow_dispatch:
inputs:
workflow_run_id:
description: The mypy_primer workflow that triggers the workflow run
required: true
jobs:
comment:
runs-on: ubuntu-24.04
permissions:
pull-requests: write
steps:
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: Download PR number
with:
name: pr-number
run_id: ${{ github.event.workflow_run.id || github.event.inputs.workflow_run_id }}
if_no_artifact_found: ignore
allow_forks: true
- name: Parse pull request number
id: pr-number
run: |
if [[ -f pr-number ]]
then
echo "pr-number=$(<pr-number)" >> "$GITHUB_OUTPUT"
fi
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: "Download mypy_primer results"
id: download-mypy_primer_diff
if: steps.pr-number.outputs.pr-number
with:
name: mypy_primer_diff
workflow: mypy_primer.yaml
pr: ${{ steps.pr-number.outputs.pr-number }}
path: pr/mypy_primer_diff
workflow_conclusion: completed
if_no_artifact_found: ignore
allow_forks: true
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: "Download mypy_primer memory results"
id: download-mypy_primer_memory_diff
if: steps.pr-number.outputs.pr-number
with:
name: mypy_primer_memory_diff
workflow: mypy_primer.yaml
pr: ${{ steps.pr-number.outputs.pr-number }}
path: pr/mypy_primer_memory_diff
workflow_conclusion: completed
if_no_artifact_found: ignore
allow_forks: true
- name: Generate comment content
id: generate-comment
if: ${{ steps.download-mypy_primer_diff.outputs.found_artifact == 'true' && steps.download-mypy_primer_memory_diff.outputs.found_artifact == 'true' }}
run: |
# Guard against malicious mypy_primer results that symlink to a secret
# file on this runner
if [[ -L pr/mypy_primer_diff/mypy_primer.diff ]] || [[ -L pr/mypy_primer_memory_diff/mypy_primer_memory.diff ]]
then
echo "Error: mypy_primer.diff and mypy_primer_memory.diff cannot be a symlink"
exit 1
fi
# Note this identifier is used to find the comment to update on
# subsequent runs
echo '<!-- generated-comment mypy_primer -->' >> comment.txt
echo '## `mypy_primer` results' >> comment.txt
if [ -s "pr/mypy_primer_diff/mypy_primer.diff" ]; then
echo '<details>' >> comment.txt
echo '<summary>Changes were detected when running on open source projects</summary>' >> comment.txt
echo '' >> comment.txt
echo '```diff' >> comment.txt
cat pr/mypy_primer_diff/mypy_primer.diff >> comment.txt
echo '```' >> comment.txt
echo '</details>' >> comment.txt
else
echo 'No ecosystem changes detected ✅' >> comment.txt
fi
if [ -s "pr/mypy_primer_memory_diff/mypy_primer_memory.diff" ]; then
echo '<details>' >> comment.txt
echo '<summary>Memory usage changes were detected when running on open source projects</summary>' >> comment.txt
echo '' >> comment.txt
echo '```diff' >> comment.txt
cat pr/mypy_primer_memory_diff/mypy_primer_memory.diff >> comment.txt
echo '```' >> comment.txt
echo '</details>' >> comment.txt
else
echo 'No memory usage changes detected ✅' >> comment.txt
fi
echo 'comment<<EOF' >> "$GITHUB_OUTPUT"
cat comment.txt >> "$GITHUB_OUTPUT"
echo 'EOF' >> "$GITHUB_OUTPUT"
- name: Find existing comment
uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.1.0
if: steps.generate-comment.outcome == 'success'
id: find-comment
with:
issue-number: ${{ steps.pr-number.outputs.pr-number }}
comment-author: "github-actions[bot]"
body-includes: "<!-- generated-comment mypy_primer -->"
- name: Create or update comment
if: steps.find-comment.outcome == 'success'
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4
with:
comment-id: ${{ steps.find-comment.outputs.comment-id }}
issue-number: ${{ steps.pr-number.outputs.pr-number }}
body-path: comment.txt
edit-mode: replace

88
.github/workflows/pr-comment.yaml vendored Normal file
View File

@ -0,0 +1,88 @@
name: Ecosystem check comment
on:
workflow_run:
workflows: [CI]
types: [completed]
workflow_dispatch:
inputs:
workflow_run_id:
description: The ecosystem workflow that triggers the workflow run
required: true
jobs:
comment:
runs-on: ubuntu-latest
permissions:
pull-requests: write
steps:
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: Download pull request number
with:
name: pr-number
run_id: ${{ github.event.workflow_run.id || github.event.inputs.workflow_run_id }}
if_no_artifact_found: ignore
allow_forks: true
- name: Parse pull request number
id: pr-number
run: |
if [[ -f pr-number ]]
then
echo "pr-number=$(<pr-number)" >> "$GITHUB_OUTPUT"
fi
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: "Download ecosystem results"
id: download-ecosystem-result
if: steps.pr-number.outputs.pr-number
with:
name: ecosystem-result
workflow: ci.yaml
pr: ${{ steps.pr-number.outputs.pr-number }}
path: pr/ecosystem
workflow_conclusion: completed
if_no_artifact_found: ignore
allow_forks: true
- name: Generate comment content
id: generate-comment
if: steps.download-ecosystem-result.outputs.found_artifact == 'true'
run: |
# Guard against malicious ecosystem results that symlink to a secret
# file on this runner
if [[ -L pr/ecosystem/ecosystem-result ]]
then
echo "Error: ecosystem-result cannot be a symlink"
exit 1
fi
# Note this identifier is used to find the comment to update on
# subsequent runs
echo '<!-- generated-comment ecosystem -->' >> comment.txt
echo '## `ruff-ecosystem` results' >> comment.txt
cat pr/ecosystem/ecosystem-result >> comment.txt
echo "" >> comment.txt
echo 'comment<<EOF' >> "$GITHUB_OUTPUT"
cat comment.txt >> "$GITHUB_OUTPUT"
echo 'EOF' >> "$GITHUB_OUTPUT"
- name: Find existing comment
uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.1.0
if: steps.generate-comment.outcome == 'success'
id: find-comment
with:
issue-number: ${{ steps.pr-number.outputs.pr-number }}
comment-author: "github-actions[bot]"
body-includes: "<!-- generated-comment ecosystem -->"
- name: Create or update comment
if: steps.find-comment.outcome == 'success'
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4
with:
comment-id: ${{ steps.find-comment.outputs.comment-id }}
issue-number: ${{ steps.pr-number.outputs.pr-number }}
body-path: comment.txt
edit-mode: replace

View File

@ -20,13 +20,15 @@ on:
jobs: jobs:
mkdocs: mkdocs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
env:
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
steps: steps:
- uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with: with:
ref: ${{ inputs.ref }} ref: ${{ inputs.ref }}
persist-credentials: true persist-credentials: true
- uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
with: with:
python-version: 3.12 python-version: 3.12
@ -57,12 +59,23 @@ jobs:
echo "branch_name=update-docs-$branch_display_name-$timestamp" >> "$GITHUB_ENV" echo "branch_name=update-docs-$branch_display_name-$timestamp" >> "$GITHUB_ENV"
echo "timestamp=$timestamp" >> "$GITHUB_ENV" echo "timestamp=$timestamp" >> "$GITHUB_ENV"
- name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
uses: webfactory/ssh-agent@a6f90b1f127823b31d4d4a8d96047790581349bd # v0.9.1
with:
ssh-private-key: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY }}
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- name: "Install Insiders dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: pip install -r docs/requirements-insiders.txt
- name: "Install dependencies" - name: "Install dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: pip install -r docs/requirements.txt run: pip install -r docs/requirements.txt
- name: "Copy README File" - name: "Copy README File"
@ -70,8 +83,13 @@ jobs:
python scripts/transform_readme.py --target mkdocs python scripts/transform_readme.py --target mkdocs
python scripts/generate_mkdocs.py python scripts/generate_mkdocs.py
- name: "Build Insiders docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: mkdocs build --strict -f mkdocs.insiders.yml
- name: "Build docs" - name: "Build docs"
run: mkdocs build --strict -f mkdocs.yml if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: mkdocs build --strict -f mkdocs.public.yml
- name: "Clone docs repo" - name: "Clone docs repo"
run: git clone https://${{ secrets.ASTRAL_DOCS_PAT }}@github.com/astral-sh/docs.git astral-docs run: git clone https://${{ secrets.ASTRAL_DOCS_PAT }}@github.com/astral-sh/docs.git astral-docs

View File

@ -31,13 +31,13 @@ jobs:
persist-credentials: false persist-credentials: false
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown run: rustup target add wasm32-unknown-unknown
- uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0 - uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0
with: with:
node-version: 22 node-version: 22
package-manager-cache: false package-manager-cache: false
- uses: jetli/wasm-bindgen-action@20b33e20595891ab1a0ed73145d8a21fc96e7c29 # v0.2.0 - uses: jetli/wasm-bindgen-action@20b33e20595891ab1a0ed73145d8a21fc96e7c29 # v0.2.0
- name: "Install Node dependencies" - name: "Install Node dependencies"
run: npm ci --ignore-scripts run: npm ci
working-directory: playground working-directory: playground
- name: "Run TypeScript checks" - name: "Run TypeScript checks"
run: npm run check run: npm run check

View File

@ -22,7 +22,7 @@ jobs:
id-token: write id-token: write
steps: steps:
- name: "Install uv" - name: "Install uv"
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
- uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0 - uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
with: with:
pattern: wheels-* pattern: wheels-*

View File

@ -35,13 +35,13 @@ jobs:
persist-credentials: false persist-credentials: false
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown run: rustup target add wasm32-unknown-unknown
- uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0 - uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0
with: with:
node-version: 22 node-version: 22
package-manager-cache: false package-manager-cache: false
- uses: jetli/wasm-bindgen-action@20b33e20595891ab1a0ed73145d8a21fc96e7c29 # v0.2.0 - uses: jetli/wasm-bindgen-action@20b33e20595891ab1a0ed73145d8a21fc96e7c29 # v0.2.0
- name: "Install Node dependencies" - name: "Install Node dependencies"
run: npm ci --ignore-scripts run: npm ci
working-directory: playground working-directory: playground
- name: "Run TypeScript checks" - name: "Run TypeScript checks"
run: npm run check run: npm run check

View File

@ -45,7 +45,7 @@ jobs:
jq '.name="@astral-sh/ruff-wasm-${{ matrix.target }}"' crates/ruff_wasm/pkg/package.json > /tmp/package.json jq '.name="@astral-sh/ruff-wasm-${{ matrix.target }}"' crates/ruff_wasm/pkg/package.json > /tmp/package.json
mv /tmp/package.json crates/ruff_wasm/pkg mv /tmp/package.json crates/ruff_wasm/pkg
- run: cp LICENSE crates/ruff_wasm/pkg # wasm-pack does not put the LICENSE file in the pkg - run: cp LICENSE crates/ruff_wasm/pkg # wasm-pack does not put the LICENSE file in the pkg
- uses: actions/setup-node@2028fbc5c25fe9cf00d9f06a71cc4710d4507903 # v6.0.0 - uses: actions/setup-node@a0853c24544627f65ddf259abe73b1d18a591444 # v5.0.0
with: with:
node-version: 22 node-version: 22
registry-url: "https://registry.npmjs.org" registry-url: "https://registry.npmjs.org"

View File

@ -60,7 +60,7 @@ jobs:
env: env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 - uses: actions/checkout@ff7abcd0c3c05ccf6adc123a8cd1fd4fb30fb493
with: with:
persist-credentials: false persist-credentials: false
submodules: recursive submodules: recursive
@ -68,9 +68,9 @@ jobs:
# we specify bash to get pipefail; it guards against the `curl` command # we specify bash to get pipefail; it guards against the `curl` command
# failing. otherwise `sh` won't catch that `curl` returned non-0 # failing. otherwise `sh` won't catch that `curl` returned non-0
shell: bash shell: bash
run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.30.2/cargo-dist-installer.sh | sh" run: "curl --proto '=https' --tlsv1.2 -LsSf https://github.com/axodotdev/cargo-dist/releases/download/v0.30.0/cargo-dist-installer.sh | sh"
- name: Cache dist - name: Cache dist
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 uses: actions/upload-artifact@6027e3dd177782cd8ab9af838c04fd81a07f1d47
with: with:
name: cargo-dist-cache name: cargo-dist-cache
path: ~/.cargo/bin/dist path: ~/.cargo/bin/dist
@ -86,7 +86,7 @@ jobs:
cat plan-dist-manifest.json cat plan-dist-manifest.json
echo "manifest=$(jq -c "." plan-dist-manifest.json)" >> "$GITHUB_OUTPUT" echo "manifest=$(jq -c "." plan-dist-manifest.json)" >> "$GITHUB_OUTPUT"
- name: "Upload dist-manifest.json" - name: "Upload dist-manifest.json"
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 uses: actions/upload-artifact@6027e3dd177782cd8ab9af838c04fd81a07f1d47
with: with:
name: artifacts-plan-dist-manifest name: artifacts-plan-dist-manifest
path: plan-dist-manifest.json path: plan-dist-manifest.json
@ -123,19 +123,19 @@ jobs:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BUILD_MANIFEST_NAME: target/distrib/global-dist-manifest.json BUILD_MANIFEST_NAME: target/distrib/global-dist-manifest.json
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 - uses: actions/checkout@ff7abcd0c3c05ccf6adc123a8cd1fd4fb30fb493
with: with:
persist-credentials: false persist-credentials: false
submodules: recursive submodules: recursive
- name: Install cached dist - name: Install cached dist
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0
with: with:
name: cargo-dist-cache name: cargo-dist-cache
path: ~/.cargo/bin/ path: ~/.cargo/bin/
- run: chmod +x ~/.cargo/bin/dist - run: chmod +x ~/.cargo/bin/dist
# Get all the local artifacts for the global tasks to use (for e.g. checksums) # Get all the local artifacts for the global tasks to use (for e.g. checksums)
- name: Fetch local artifacts - name: Fetch local artifacts
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0
with: with:
pattern: artifacts-* pattern: artifacts-*
path: target/distrib/ path: target/distrib/
@ -153,7 +153,7 @@ jobs:
cp dist-manifest.json "$BUILD_MANIFEST_NAME" cp dist-manifest.json "$BUILD_MANIFEST_NAME"
- name: "Upload artifacts" - name: "Upload artifacts"
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 uses: actions/upload-artifact@6027e3dd177782cd8ab9af838c04fd81a07f1d47
with: with:
name: artifacts-build-global name: artifacts-build-global
path: | path: |
@ -166,27 +166,27 @@ jobs:
- custom-build-binaries - custom-build-binaries
- custom-build-docker - custom-build-docker
- build-global-artifacts - build-global-artifacts
# Only run if we're "publishing", and only if plan, local and global didn't fail (skipped is fine) # Only run if we're "publishing", and only if local and global didn't fail (skipped is fine)
if: ${{ always() && needs.plan.result == 'success' && needs.plan.outputs.publishing == 'true' && (needs.build-global-artifacts.result == 'skipped' || needs.build-global-artifacts.result == 'success') && (needs.custom-build-binaries.result == 'skipped' || needs.custom-build-binaries.result == 'success') && (needs.custom-build-docker.result == 'skipped' || needs.custom-build-docker.result == 'success') }} if: ${{ always() && needs.plan.outputs.publishing == 'true' && (needs.build-global-artifacts.result == 'skipped' || needs.build-global-artifacts.result == 'success') && (needs.custom-build-binaries.result == 'skipped' || needs.custom-build-binaries.result == 'success') && (needs.custom-build-docker.result == 'skipped' || needs.custom-build-docker.result == 'success') }}
env: env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
runs-on: "depot-ubuntu-latest-4" runs-on: "depot-ubuntu-latest-4"
outputs: outputs:
val: ${{ steps.host.outputs.manifest }} val: ${{ steps.host.outputs.manifest }}
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 - uses: actions/checkout@ff7abcd0c3c05ccf6adc123a8cd1fd4fb30fb493
with: with:
persist-credentials: false persist-credentials: false
submodules: recursive submodules: recursive
- name: Install cached dist - name: Install cached dist
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0
with: with:
name: cargo-dist-cache name: cargo-dist-cache
path: ~/.cargo/bin/ path: ~/.cargo/bin/
- run: chmod +x ~/.cargo/bin/dist - run: chmod +x ~/.cargo/bin/dist
# Fetch artifacts from scratch-storage # Fetch artifacts from scratch-storage
- name: Fetch artifacts - name: Fetch artifacts
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0
with: with:
pattern: artifacts-* pattern: artifacts-*
path: target/distrib/ path: target/distrib/
@ -200,7 +200,7 @@ jobs:
cat dist-manifest.json cat dist-manifest.json
echo "manifest=$(jq -c "." dist-manifest.json)" >> "$GITHUB_OUTPUT" echo "manifest=$(jq -c "." dist-manifest.json)" >> "$GITHUB_OUTPUT"
- name: "Upload dist-manifest.json" - name: "Upload dist-manifest.json"
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 uses: actions/upload-artifact@6027e3dd177782cd8ab9af838c04fd81a07f1d47
with: with:
# Overwrite the previous copy # Overwrite the previous copy
name: artifacts-dist-manifest name: artifacts-dist-manifest
@ -250,13 +250,13 @@ jobs:
env: env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 - uses: actions/checkout@ff7abcd0c3c05ccf6adc123a8cd1fd4fb30fb493
with: with:
persist-credentials: false persist-credentials: false
submodules: recursive submodules: recursive
# Create a GitHub Release while uploading all files to it # Create a GitHub Release while uploading all files to it
- name: "Download GitHub Artifacts" - name: "Download GitHub Artifacts"
uses: actions/download-artifact@018cc2cf5baa6db3ef3c5f8a56943fffe632ef53 uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0
with: with:
pattern: artifacts-* pattern: artifacts-*
path: artifacts path: artifacts

View File

@ -77,7 +77,7 @@ jobs:
run: | run: |
git config --global user.name typeshedbot git config --global user.name typeshedbot
git config --global user.email '<>' git config --global user.email '<>'
- uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
- name: Sync typeshed stubs - name: Sync typeshed stubs
run: | run: |
rm -rf "ruff/${VENDORED_TYPESHED}" rm -rf "ruff/${VENDORED_TYPESHED}"
@ -131,7 +131,7 @@ jobs:
with: with:
persist-credentials: true persist-credentials: true
ref: ${{ env.UPSTREAM_BRANCH}} ref: ${{ env.UPSTREAM_BRANCH}}
- uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
- name: Setup git - name: Setup git
run: | run: |
git config --global user.name typeshedbot git config --global user.name typeshedbot
@ -170,7 +170,7 @@ jobs:
with: with:
persist-credentials: true persist-credentials: true
ref: ${{ env.UPSTREAM_BRANCH}} ref: ${{ env.UPSTREAM_BRANCH}}
- uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 - uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
- name: Setup git - name: Setup git
run: | run: |
git config --global user.name typeshedbot git config --global user.name typeshedbot
@ -198,7 +198,7 @@ jobs:
run: | run: |
rm "${VENDORED_TYPESHED}/pyproject.toml" rm "${VENDORED_TYPESHED}/pyproject.toml"
git commit -am "Remove pyproject.toml file" git commit -am "Remove pyproject.toml file"
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
if: ${{ success() }} if: ${{ success() }}
run: rustup show run: rustup show
@ -207,22 +207,17 @@ jobs:
uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1 uses: rui314/setup-mold@725a8794d15fc7563f59595bd9556495c0564878 # v1
- name: "Install cargo nextest" - name: "Install cargo nextest"
if: ${{ success() }} if: ${{ success() }}
uses: taiki-e/install-action@3575e532701a5fc614b0c842e4119af4cc5fd16d # v2.62.60 uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21
with: with:
tool: cargo-nextest tool: cargo-nextest
- name: "Install cargo insta" - name: "Install cargo insta"
if: ${{ success() }} if: ${{ success() }}
uses: taiki-e/install-action@3575e532701a5fc614b0c842e4119af4cc5fd16d # v2.62.60 uses: taiki-e/install-action@522492a8c115f1b6d4d318581f09638e9442547b # v2.62.21
with: with:
tool: cargo-insta tool: cargo-insta
- name: Update snapshots - name: Update snapshots
if: ${{ success() }} if: ${{ success() }}
run: | run: |
cargo r \
--profile=profiling \
-p ty_completion_eval \
-- all --tasks ./crates/ty_completion_eval/completion-evaluation-tasks.csv
# The `cargo insta` docs indicate that `--unreferenced=delete` might be a good option, # The `cargo insta` docs indicate that `--unreferenced=delete` might be a good option,
# but from local testing it appears to just revert all changes made by `cargo insta test --accept`. # but from local testing it appears to just revert all changes made by `cargo insta test --accept`.
# #

View File

@ -33,11 +33,11 @@ jobs:
persist-credentials: false persist-credentials: false
- name: Install the latest version of uv - name: Install the latest version of uv
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
with: with:
enable-cache: true # zizmor: ignore[cache-poisoning] acceptable risk for CloudFlare pages artifact enable-cache: true # zizmor: ignore[cache-poisoning] acceptable risk for CloudFlare pages artifact
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with: with:
workspaces: "ruff" workspaces: "ruff"
lookup-only: false # zizmor: ignore[cache-poisoning] acceptable risk for CloudFlare pages artifact lookup-only: false # zizmor: ignore[cache-poisoning] acceptable risk for CloudFlare pages artifact
@ -67,7 +67,7 @@ jobs:
cd .. cd ..
uv tool install "git+https://github.com/astral-sh/ecosystem-analyzer@2e1816eac09c90140b1ba51d19afc5f59da460f5" uv tool install "git+https://github.com/astral-sh/ecosystem-analyzer@908758da02a73ef3f3308e1dbb2248510029bbe4"
ecosystem-analyzer \ ecosystem-analyzer \
--repository ruff \ --repository ruff \
@ -112,6 +112,8 @@ jobs:
cat diff-statistics.md >> "$GITHUB_STEP_SUMMARY" cat diff-statistics.md >> "$GITHUB_STEP_SUMMARY"
echo ${{ github.event.number }} > pr-number
- name: "Deploy to Cloudflare Pages" - name: "Deploy to Cloudflare Pages"
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }} if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
id: deploy id: deploy
@ -129,14 +131,18 @@ jobs:
echo >> comment.md echo >> comment.md
echo "**[Full report with detailed diff]($DEPLOYMENT_URL/diff)** ([timing results]($DEPLOYMENT_URL/timing))" >> comment.md echo "**[Full report with detailed diff]($DEPLOYMENT_URL/diff)** ([timing results]($DEPLOYMENT_URL/timing))" >> comment.md
# NOTE: astral-sh-bot uses this artifact to post comments on PRs.
# Make sure to update the bot if you rename the artifact.
- name: Upload comment - name: Upload comment
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with: with:
name: comment.md name: comment.md
path: comment.md path: comment.md
- name: Upload pr-number
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: pr-number
path: pr-number
- name: Upload diagnostics diff - name: Upload diagnostics diff
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with: with:

View File

@ -0,0 +1,85 @@
name: PR comment (ty ecosystem-analyzer)
on: # zizmor: ignore[dangerous-triggers]
workflow_run:
workflows: [ty ecosystem-analyzer]
types: [completed]
workflow_dispatch:
inputs:
workflow_run_id:
description: The ty ecosystem-analyzer workflow that triggers the workflow run
required: true
jobs:
comment:
runs-on: ubuntu-24.04
permissions:
pull-requests: write
steps:
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: Download PR number
with:
name: pr-number
run_id: ${{ github.event.workflow_run.id || github.event.inputs.workflow_run_id }}
if_no_artifact_found: ignore
allow_forks: true
- name: Parse pull request number
id: pr-number
run: |
if [[ -f pr-number ]]
then
echo "pr-number=$(<pr-number)" >> "$GITHUB_OUTPUT"
fi
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: "Download comment.md"
id: download-comment
if: steps.pr-number.outputs.pr-number
with:
name: comment.md
workflow: ty-ecosystem-analyzer.yaml
pr: ${{ steps.pr-number.outputs.pr-number }}
path: pr/comment
workflow_conclusion: completed
if_no_artifact_found: ignore
allow_forks: true
- name: Generate comment content
id: generate-comment
if: ${{ steps.download-comment.outputs.found_artifact == 'true' }}
run: |
# Guard against malicious ty ecosystem-analyzer results that symlink to a secret
# file on this runner
if [[ -L pr/comment/comment.md ]]
then
echo "Error: comment.md cannot be a symlink"
exit 1
fi
# Note: this identifier is used to find the comment to update on subsequent runs
echo '<!-- generated-comment ty ecosystem-analyzer -->' > comment.md
echo >> comment.md
cat pr/comment/comment.md >> comment.md
echo 'comment<<EOF' >> "$GITHUB_OUTPUT"
cat comment.md >> "$GITHUB_OUTPUT"
echo 'EOF' >> "$GITHUB_OUTPUT"
- name: Find existing comment
uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.1.0
if: steps.generate-comment.outcome == 'success'
id: find-comment
with:
issue-number: ${{ steps.pr-number.outputs.pr-number }}
comment-author: "github-actions[bot]"
body-includes: "<!-- generated-comment ty ecosystem-analyzer -->"
- name: Create or update comment
if: steps.find-comment.outcome == 'success'
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4
with:
comment-id: ${{ steps.find-comment.outputs.comment-id }}
issue-number: ${{ steps.pr-number.outputs.pr-number }}
body-path: comment.md
edit-mode: replace

View File

@ -29,11 +29,11 @@ jobs:
persist-credentials: false persist-credentials: false
- name: Install the latest version of uv - name: Install the latest version of uv
uses: astral-sh/setup-uv@1e862dfacbd1d6d858c55d9b792c756523627244 # v7.1.4 uses: astral-sh/setup-uv@d0cc045d04ccac9d8b7881df0226f9e82c39688e # v6.8.0
with: with:
enable-cache: true # zizmor: ignore[cache-poisoning] acceptable risk for CloudFlare pages artifact enable-cache: true # zizmor: ignore[cache-poisoning] acceptable risk for CloudFlare pages artifact
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with: with:
workspaces: "ruff" workspaces: "ruff"
lookup-only: false # zizmor: ignore[cache-poisoning] acceptable risk for CloudFlare pages artifact lookup-only: false # zizmor: ignore[cache-poisoning] acceptable risk for CloudFlare pages artifact
@ -52,7 +52,7 @@ jobs:
cd .. cd ..
uv tool install "git+https://github.com/astral-sh/ecosystem-analyzer@2e1816eac09c90140b1ba51d19afc5f59da460f5" uv tool install "git+https://github.com/astral-sh/ecosystem-analyzer@908758da02a73ef3f3308e1dbb2248510029bbe4"
ecosystem-analyzer \ ecosystem-analyzer \
--verbose \ --verbose \

View File

@ -24,7 +24,7 @@ env:
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10 RUSTUP_MAX_RETRIES: 10
RUST_BACKTRACE: 1 RUST_BACKTRACE: 1
CONFORMANCE_SUITE_COMMIT: 9f6d8ced7cd1c8d92687a4e9c96d7716452e471e CONFORMANCE_SUITE_COMMIT: d4f39b27a4a47aac8b6d4019e1b0b5b3156fabdc
jobs: jobs:
typing_conformance: typing_conformance:
@ -45,7 +45,7 @@ jobs:
path: typing path: typing
persist-credentials: false persist-credentials: false
- uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1
with: with:
workspaces: "ruff" workspaces: "ruff"
@ -94,18 +94,21 @@ jobs:
touch typing_conformance_diagnostics.diff touch typing_conformance_diagnostics.diff
fi fi
echo ${{ github.event.number }} > pr-number
echo "${CONFORMANCE_SUITE_COMMIT}" > conformance-suite-commit echo "${CONFORMANCE_SUITE_COMMIT}" > conformance-suite-commit
# NOTE: astral-sh-bot uses this artifact to post comments on PRs.
# Make sure to update the bot if you rename the artifact.
- name: Upload diff - name: Upload diff
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with: with:
name: typing_conformance_diagnostics_diff name: typing_conformance_diagnostics_diff
path: typing_conformance_diagnostics.diff path: typing_conformance_diagnostics.diff
# NOTE: astral-sh-bot uses this artifact to post comments on PRs. - name: Upload pr-number
# Make sure to update the bot if you rename the artifact. uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: pr-number
path: pr-number
- name: Upload conformance suite commit - name: Upload conformance suite commit
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2 uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with: with:

View File

@ -0,0 +1,112 @@
name: PR comment (typing_conformance)
on: # zizmor: ignore[dangerous-triggers]
workflow_run:
workflows: [Run typing conformance]
types: [completed]
workflow_dispatch:
inputs:
workflow_run_id:
description: The typing_conformance workflow that triggers the workflow run
required: true
jobs:
comment:
runs-on: ubuntu-24.04
permissions:
pull-requests: write
steps:
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: Download PR number
with:
name: pr-number
run_id: ${{ github.event.workflow_run.id || github.event.inputs.workflow_run_id }}
if_no_artifact_found: ignore
allow_forks: true
- name: Parse pull request number
id: pr-number
run: |
if [[ -f pr-number ]]
then
echo "pr-number=$(<pr-number)" >> "$GITHUB_OUTPUT"
fi
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: Download typing conformance suite commit
with:
name: conformance-suite-commit
run_id: ${{ github.event.workflow_run.id || github.event.inputs.workflow_run_id }}
if_no_artifact_found: ignore
allow_forks: true
- uses: dawidd6/action-download-artifact@20319c5641d495c8a52e688b7dc5fada6c3a9fbc # v8
name: "Download typing_conformance results"
id: download-typing_conformance_diff
if: steps.pr-number.outputs.pr-number
with:
name: typing_conformance_diagnostics_diff
workflow: typing_conformance.yaml
pr: ${{ steps.pr-number.outputs.pr-number }}
path: pr/typing_conformance_diagnostics_diff
workflow_conclusion: completed
if_no_artifact_found: ignore
allow_forks: true
- name: Generate comment content
id: generate-comment
if: ${{ steps.download-typing_conformance_diff.outputs.found_artifact == 'true' }}
run: |
# Guard against malicious typing_conformance results that symlink to a secret
# file on this runner
if [[ -L pr/typing_conformance_diagnostics_diff/typing_conformance_diagnostics.diff ]]
then
echo "Error: typing_conformance_diagnostics.diff cannot be a symlink"
exit 1
fi
# Note this identifier is used to find the comment to update on
# subsequent runs
echo '<!-- generated-comment typing_conformance_diagnostics_diff -->' >> comment.txt
if [[ -f conformance-suite-commit ]]
then
echo "## Diagnostic diff on [typing conformance tests](https://github.com/python/typing/tree/$(<conformance-suite-commit)/conformance)" >> comment.txt
else
echo "conformance-suite-commit file not found"
echo "## Diagnostic diff on typing conformance tests" >> comment.txt
fi
if [ -s "pr/typing_conformance_diagnostics_diff/typing_conformance_diagnostics.diff" ]; then
echo '<details>' >> comment.txt
echo '<summary>Changes were detected when running ty on typing conformance tests</summary>' >> comment.txt
echo '' >> comment.txt
echo '```diff' >> comment.txt
cat pr/typing_conformance_diagnostics_diff/typing_conformance_diagnostics.diff >> comment.txt
echo '```' >> comment.txt
echo '</details>' >> comment.txt
else
echo 'No changes detected when running ty on typing conformance tests ✅' >> comment.txt
fi
echo 'comment<<EOF' >> "$GITHUB_OUTPUT"
cat comment.txt >> "$GITHUB_OUTPUT"
echo 'EOF' >> "$GITHUB_OUTPUT"
- name: Find existing comment
uses: peter-evans/find-comment@3eae4d37986fb5a8592848f6a574fdf654e61f9e # v3.1.0
if: steps.generate-comment.outcome == 'success'
id: find-comment
with:
issue-number: ${{ steps.pr-number.outputs.pr-number }}
comment-author: "github-actions[bot]"
body-includes: "<!-- generated-comment typing_conformance_diagnostics_diff -->"
- name: Create or update comment
if: steps.find-comment.outcome == 'success'
uses: peter-evans/create-or-update-comment@71345be0265236311c031f5c7866368bd1eff043 # v4
with:
comment-id: ${{ steps.find-comment.outputs.comment-id }}
issue-number: ${{ steps.pr-number.outputs.pr-number }}
body-path: comment.txt
edit-mode: replace

5
.github/zizmor.yml vendored
View File

@ -1,8 +1,11 @@
# Configuration for the zizmor static analysis tool, run via pre-commit in CI # Configuration for the zizmor static analysis tool, run via pre-commit in CI
# https://docs.zizmor.sh/configuration/ # https://woodruffw.github.io/zizmor/configuration/
# #
# TODO: can we remove the ignores here so that our workflows are more secure? # TODO: can we remove the ignores here so that our workflows are more secure?
rules: rules:
dangerous-triggers:
ignore:
- pr-comment.yaml
cache-poisoning: cache-poisoning:
ignore: ignore:
- build-docker.yml - build-docker.yml

View File

@ -102,7 +102,7 @@ repos:
# zizmor detects security vulnerabilities in GitHub Actions workflows. # zizmor detects security vulnerabilities in GitHub Actions workflows.
# Additional configuration for the tool is found in `.github/zizmor.yml` # Additional configuration for the tool is found in `.github/zizmor.yml`
- repo: https://github.com/zizmorcore/zizmor-pre-commit - repo: https://github.com/zizmorcore/zizmor-pre-commit
rev: v1.16.0 rev: v1.15.2
hooks: hooks:
- id: zizmor - id: zizmor

View File

@ -5,6 +5,5 @@
"rust-analyzer.check.command": "clippy", "rust-analyzer.check.command": "clippy",
"search.exclude": { "search.exclude": {
"**/*.snap": true "**/*.snap": true
}, }
"ty.diagnosticMode": "openFilesOnly"
} }

View File

@ -1,307 +1,5 @@
# Changelog # Changelog
## 0.14.9
Released on 2025-12-11.
### Preview features
- \[`ruff`\] New `RUF100` diagnostics for unused range suppressions ([#21783](https://github.com/astral-sh/ruff/pull/21783))
- \[`pylint`\] Detect subclasses of builtin exceptions (`PLW0133`) ([#21382](https://github.com/astral-sh/ruff/pull/21382))
### Bug fixes
- Fix comment placement in lambda parameters ([#21868](https://github.com/astral-sh/ruff/pull/21868))
- Skip over trivia tokens after re-lexing ([#21895](https://github.com/astral-sh/ruff/pull/21895))
- \[`flake8-bandit`\] Fix false positive when using non-standard `CSafeLoader` path (S506). ([#21830](https://github.com/astral-sh/ruff/pull/21830))
- \[`flake8-bugbear`\] Accept immutable slice default arguments (`B008`) ([#21823](https://github.com/astral-sh/ruff/pull/21823))
### Rule changes
- \[`pydocstyle`\] Suppress `D417` for parameters with `Unpack` annotations ([#21816](https://github.com/astral-sh/ruff/pull/21816))
### Performance
- Use `memchr` for computing line indexes ([#21838](https://github.com/astral-sh/ruff/pull/21838))
### Documentation
- Document `*.pyw` is included by default in preview ([#21885](https://github.com/astral-sh/ruff/pull/21885))
- Document range suppressions, reorganize suppression docs ([#21884](https://github.com/astral-sh/ruff/pull/21884))
- Update mkdocs-material to 9.7.0 (Insiders now free) ([#21797](https://github.com/astral-sh/ruff/pull/21797))
### Contributors
- [@Avasam](https://github.com/Avasam)
- [@MichaReiser](https://github.com/MichaReiser)
- [@charliermarsh](https://github.com/charliermarsh)
- [@amyreese](https://github.com/amyreese)
- [@phongddo](https://github.com/phongddo)
- [@prakhar1144](https://github.com/prakhar1144)
- [@mahiro72](https://github.com/mahiro72)
- [@ntBre](https://github.com/ntBre)
- [@LoicRiegel](https://github.com/LoicRiegel)
## 0.14.8
Released on 2025-12-04.
### Preview features
- \[`flake8-bugbear`\] Catch `yield` expressions within other statements (`B901`) ([#21200](https://github.com/astral-sh/ruff/pull/21200))
- \[`flake8-use-pathlib`\] Mark fixes unsafe for return type changes (`PTH104`, `PTH105`, `PTH109`, `PTH115`) ([#21440](https://github.com/astral-sh/ruff/pull/21440))
### Bug fixes
- Fix syntax error false positives for `await` outside functions ([#21763](https://github.com/astral-sh/ruff/pull/21763))
- \[`flake8-simplify`\] Fix truthiness assumption for non-iterable arguments in tuple/list/set calls (`SIM222`, `SIM223`) ([#21479](https://github.com/astral-sh/ruff/pull/21479))
### Documentation
- Suggest using `--output-file` option in GitLab integration ([#21706](https://github.com/astral-sh/ruff/pull/21706))
### Other changes
- [syntax-error] Default type parameter followed by non-default type parameter ([#21657](https://github.com/astral-sh/ruff/pull/21657))
### Contributors
- [@kieran-ryan](https://github.com/kieran-ryan)
- [@11happy](https://github.com/11happy)
- [@danparizher](https://github.com/danparizher)
- [@ntBre](https://github.com/ntBre)
## 0.14.7
Released on 2025-11-28.
### Preview features
- \[`flake8-bandit`\] Handle string literal bindings in suspicious-url-open-usage (`S310`) ([#21469](https://github.com/astral-sh/ruff/pull/21469))
- \[`pylint`\] Fix `PLR1708` false positives on nested functions ([#21177](https://github.com/astral-sh/ruff/pull/21177))
- \[`pylint`\] Fix suppression for empty dict without tuple key annotation (`PLE1141`) ([#21290](https://github.com/astral-sh/ruff/pull/21290))
- \[`ruff`\] Add rule `RUF066` to detect unnecessary class properties ([#21535](https://github.com/astral-sh/ruff/pull/21535))
- \[`ruff`\] Catch more dummy variable uses (`RUF052`) ([#19799](https://github.com/astral-sh/ruff/pull/19799))
### Bug fixes
- [server] Set severity for non-rule diagnostics ([#21559](https://github.com/astral-sh/ruff/pull/21559))
- \[`flake8-implicit-str-concat`\] Avoid invalid fix in (`ISC003`) ([#21517](https://github.com/astral-sh/ruff/pull/21517))
- \[`parser`\] Fix panic when parsing IPython escape command expressions ([#21480](https://github.com/astral-sh/ruff/pull/21480))
### CLI
- Show partial fixability indicator in statistics output ([#21513](https://github.com/astral-sh/ruff/pull/21513))
### Contributors
- [@mikeleppane](https://github.com/mikeleppane)
- [@senekor](https://github.com/senekor)
- [@ShaharNaveh](https://github.com/ShaharNaveh)
- [@JumboBear](https://github.com/JumboBear)
- [@prakhar1144](https://github.com/prakhar1144)
- [@tsvikas](https://github.com/tsvikas)
- [@danparizher](https://github.com/danparizher)
- [@chirizxc](https://github.com/chirizxc)
- [@AlexWaygood](https://github.com/AlexWaygood)
- [@MichaReiser](https://github.com/MichaReiser)
## 0.14.6
Released on 2025-11-21.
### Preview features
- \[`flake8-bandit`\] Support new PySNMP API paths (`S508`, `S509`) ([#21374](https://github.com/astral-sh/ruff/pull/21374))
### Bug fixes
- Adjust own-line comment placement between branches ([#21185](https://github.com/astral-sh/ruff/pull/21185))
- Avoid syntax error when formatting attribute expressions with outer parentheses, parenthesized value, and trailing comment on value ([#20418](https://github.com/astral-sh/ruff/pull/20418))
- Fix panic when formatting comments in unary expressions ([#21501](https://github.com/astral-sh/ruff/pull/21501))
- Respect `fmt: skip` for compound statements on a single line ([#20633](https://github.com/astral-sh/ruff/pull/20633))
- \[`refurb`\] Fix `FURB103` autofix ([#21454](https://github.com/astral-sh/ruff/pull/21454))
- \[`ruff`\] Fix false positive for complex conversion specifiers in `logging-eager-conversion` (`RUF065`) ([#21464](https://github.com/astral-sh/ruff/pull/21464))
### Rule changes
- \[`ruff`\] Avoid false positive on `ClassVar` reassignment (`RUF012`) ([#21478](https://github.com/astral-sh/ruff/pull/21478))
### CLI
- Render hyperlinks for lint errors ([#21514](https://github.com/astral-sh/ruff/pull/21514))
- Add a `ruff analyze` option to skip over imports in `TYPE_CHECKING` blocks ([#21472](https://github.com/astral-sh/ruff/pull/21472))
### Documentation
- Limit `eglot-format` hook to eglot-managed Python buffers ([#21459](https://github.com/astral-sh/ruff/pull/21459))
- Mention `force-exclude` in "Configuration > Python file discovery" ([#21500](https://github.com/astral-sh/ruff/pull/21500))
### Contributors
- [@ntBre](https://github.com/ntBre)
- [@dylwil3](https://github.com/dylwil3)
- [@gauthsvenkat](https://github.com/gauthsvenkat)
- [@MichaReiser](https://github.com/MichaReiser)
- [@thamer](https://github.com/thamer)
- [@Ruchir28](https://github.com/Ruchir28)
- [@thejcannon](https://github.com/thejcannon)
- [@danparizher](https://github.com/danparizher)
- [@chirizxc](https://github.com/chirizxc)
## 0.14.5
Released on 2025-11-13.
### Preview features
- \[`flake8-simplify`\] Apply `SIM113` when index variable is of type `int` ([#21395](https://github.com/astral-sh/ruff/pull/21395))
- \[`pydoclint`\] Fix false positive when Sphinx directives follow a "Raises" section (`DOC502`) ([#20535](https://github.com/astral-sh/ruff/pull/20535))
- \[`pydoclint`\] Support NumPy-style comma-separated parameters (`DOC102`) ([#20972](https://github.com/astral-sh/ruff/pull/20972))
- \[`refurb`\] Auto-fix annotated assignments (`FURB101`) ([#21278](https://github.com/astral-sh/ruff/pull/21278))
- \[`ruff`\] Ignore `str()` when not used for simple conversion (`RUF065`) ([#21330](https://github.com/astral-sh/ruff/pull/21330))
### Bug fixes
- Fix syntax error false positive on alternative `match` patterns ([#21362](https://github.com/astral-sh/ruff/pull/21362))
- \[`flake8-simplify`\] Fix false positive for iterable initializers with generator arguments (`SIM222`) ([#21187](https://github.com/astral-sh/ruff/pull/21187))
- \[`pyupgrade`\] Fix false positive on relative imports from local `.builtins` module (`UP029`) ([#21309](https://github.com/astral-sh/ruff/pull/21309))
- \[`pyupgrade`\] Consistently set the deprecated tag (`UP035`) ([#21396](https://github.com/astral-sh/ruff/pull/21396))
### Rule changes
- \[`refurb`\] Detect empty f-strings (`FURB105`) ([#21348](https://github.com/astral-sh/ruff/pull/21348))
### CLI
- Add option to provide a reason to `--add-noqa` ([#21294](https://github.com/astral-sh/ruff/pull/21294))
- Add upstream linter URL to `ruff linter --output-format=json` ([#21316](https://github.com/astral-sh/ruff/pull/21316))
- Add color to `--help` ([#21337](https://github.com/astral-sh/ruff/pull/21337))
### Documentation
- Add a new "Opening a PR" section to the contribution guide ([#21298](https://github.com/astral-sh/ruff/pull/21298))
- Added the PyScripter IDE to the list of "Who is using Ruff?" ([#21402](https://github.com/astral-sh/ruff/pull/21402))
- Update PyCharm setup instructions ([#21409](https://github.com/astral-sh/ruff/pull/21409))
- \[`flake8-annotations`\] Add link to `allow-star-arg-any` option (`ANN401`) ([#21326](https://github.com/astral-sh/ruff/pull/21326))
### Other changes
- \[`configuration`\] Improve error message when `line-length` exceeds `u16::MAX` ([#21329](https://github.com/astral-sh/ruff/pull/21329))
### Contributors
- [@njhearp](https://github.com/njhearp)
- [@11happy](https://github.com/11happy)
- [@hugovk](https://github.com/hugovk)
- [@Gankra](https://github.com/Gankra)
- [@ntBre](https://github.com/ntBre)
- [@pyscripter](https://github.com/pyscripter)
- [@danparizher](https://github.com/danparizher)
- [@MichaReiser](https://github.com/MichaReiser)
- [@henryiii](https://github.com/henryiii)
- [@charliecloudberry](https://github.com/charliecloudberry)
## 0.14.4
Released on 2025-11-06.
### Preview features
- [formatter] Allow newlines after function headers without docstrings ([#21110](https://github.com/astral-sh/ruff/pull/21110))
- [formatter] Avoid extra parentheses for long `match` patterns with `as` captures ([#21176](https://github.com/astral-sh/ruff/pull/21176))
- \[`refurb`\] Expand fix safety for keyword arguments and `Decimal`s (`FURB164`) ([#21259](https://github.com/astral-sh/ruff/pull/21259))
- \[`refurb`\] Preserve argument ordering in autofix (`FURB103`) ([#20790](https://github.com/astral-sh/ruff/pull/20790))
### Bug fixes
- [server] Fix missing diagnostics for notebooks ([#21156](https://github.com/astral-sh/ruff/pull/21156))
- \[`flake8-bugbear`\] Ignore non-NFKC attribute names in `B009` and `B010` ([#21131](https://github.com/astral-sh/ruff/pull/21131))
- \[`refurb`\] Fix false negative for underscores before sign in `Decimal` constructor (`FURB157`) ([#21190](https://github.com/astral-sh/ruff/pull/21190))
- \[`ruff`\] Fix false positives on starred arguments (`RUF057`) ([#21256](https://github.com/astral-sh/ruff/pull/21256))
### Rule changes
- \[`airflow`\] extend deprecated argument `concurrency` in `airflow..DAG` (`AIR301`) ([#21220](https://github.com/astral-sh/ruff/pull/21220))
### Documentation
- Improve `extend` docs ([#21135](https://github.com/astral-sh/ruff/pull/21135))
- \[`flake8-comprehensions`\] Fix typo in `C416` documentation ([#21184](https://github.com/astral-sh/ruff/pull/21184))
- Revise Ruff setup instructions for Zed editor ([#20935](https://github.com/astral-sh/ruff/pull/20935))
### Other changes
- Make `ruff analyze graph` work with jupyter notebooks ([#21161](https://github.com/astral-sh/ruff/pull/21161))
### Contributors
- [@chirizxc](https://github.com/chirizxc)
- [@Lee-W](https://github.com/Lee-W)
- [@musicinmybrain](https://github.com/musicinmybrain)
- [@MichaReiser](https://github.com/MichaReiser)
- [@tjkuson](https://github.com/tjkuson)
- [@danparizher](https://github.com/danparizher)
- [@renovate](https://github.com/renovate)
- [@ntBre](https://github.com/ntBre)
- [@gauthsvenkat](https://github.com/gauthsvenkat)
- [@LoicRiegel](https://github.com/LoicRiegel)
## 0.14.3
Released on 2025-10-30.
### Preview features
- Respect `--output-format` with `--watch` ([#21097](https://github.com/astral-sh/ruff/pull/21097))
- \[`pydoclint`\] Fix false positive on explicit exception re-raising (`DOC501`, `DOC502`) ([#21011](https://github.com/astral-sh/ruff/pull/21011))
- \[`pyflakes`\] Revert to stable behavior if imports for module lie in alternate branches for `F401` ([#20878](https://github.com/astral-sh/ruff/pull/20878))
- \[`pylint`\] Implement `stop-iteration-return` (`PLR1708`) ([#20733](https://github.com/astral-sh/ruff/pull/20733))
- \[`ruff`\] Add support for additional eager conversion patterns (`RUF065`) ([#20657](https://github.com/astral-sh/ruff/pull/20657))
### Bug fixes
- Fix finding keyword range for clause header after statement ending with semicolon ([#21067](https://github.com/astral-sh/ruff/pull/21067))
- Fix syntax error false positive on nested alternative patterns ([#21104](https://github.com/astral-sh/ruff/pull/21104))
- \[`ISC001`\] Fix panic when string literals are unclosed ([#21034](https://github.com/astral-sh/ruff/pull/21034))
- \[`flake8-django`\] Apply `DJ001` to annotated fields ([#20907](https://github.com/astral-sh/ruff/pull/20907))
- \[`flake8-pyi`\] Fix `PYI034` to not trigger on metaclasses (`PYI034`) ([#20881](https://github.com/astral-sh/ruff/pull/20881))
- \[`flake8-type-checking`\] Fix `TC003` false positive with `future-annotations` ([#21125](https://github.com/astral-sh/ruff/pull/21125))
- \[`pyflakes`\] Fix false positive for `__class__` in lambda expressions within class definitions (`F821`) ([#20564](https://github.com/astral-sh/ruff/pull/20564))
- \[`pyupgrade`\] Fix false positive for `TypeVar` with default on Python \<3.13 (`UP046`,`UP047`) ([#21045](https://github.com/astral-sh/ruff/pull/21045))
### Rule changes
- Add missing docstring sections to the numpy list ([#20931](https://github.com/astral-sh/ruff/pull/20931))
- \[`airflow`\] Extend `airflow.models..Param` check (`AIR311`) ([#21043](https://github.com/astral-sh/ruff/pull/21043))
- \[`airflow`\] Warn that `airflow....DAG.create_dagrun` has been removed (`AIR301`) ([#21093](https://github.com/astral-sh/ruff/pull/21093))
- \[`refurb`\] Preserve digit separators in `Decimal` constructor (`FURB157`) ([#20588](https://github.com/astral-sh/ruff/pull/20588))
### Server
- Avoid sending an unnecessary "clear diagnostics" message for clients supporting pull diagnostics ([#21105](https://github.com/astral-sh/ruff/pull/21105))
### Documentation
- \[`flake8-bandit`\] Fix correct example for `S308` ([#21128](https://github.com/astral-sh/ruff/pull/21128))
### Other changes
- Clearer error message when `line-length` goes beyond threshold ([#21072](https://github.com/astral-sh/ruff/pull/21072))
### Contributors
- [@danparizher](https://github.com/danparizher)
- [@jvacek](https://github.com/jvacek)
- [@ntBre](https://github.com/ntBre)
- [@augustelalande](https://github.com/augustelalande)
- [@prakhar1144](https://github.com/prakhar1144)
- [@TaKO8Ki](https://github.com/TaKO8Ki)
- [@dylwil3](https://github.com/dylwil3)
- [@fatelei](https://github.com/fatelei)
- [@ShaharNaveh](https://github.com/ShaharNaveh)
- [@Lee-W](https://github.com/Lee-W)
## 0.14.2 ## 0.14.2
Released on 2025-10-23. Released on 2025-10-23.

View File

@ -280,57 +280,15 @@ Note that plugin-specific configuration options are defined in their own modules
Finally, regenerate the documentation and generated code with `cargo dev generate-all`. Finally, regenerate the documentation and generated code with `cargo dev generate-all`.
### Opening a PR
After you finish your changes, the next step is to open a PR. By default, two
sections will be filled into the PR body: the summary and the test plan.
#### The summary
The summary is intended to give us as maintainers information about your PR.
This should typically include a link to the relevant issue(s) you're addressing
in your PR, as well as a summary of the issue and your approach to fixing it. If
you have any questions about your approach or design, or if you considered
alternative approaches, that can also be helpful to include.
AI can be helpful in generating both the code and summary of your PR, but a
successful contribution should still be carefully reviewed by you and the
summary editorialized before submitting a PR. A great summary is thorough but
also succinct and gives us the context we need to review your PR.
You can find examples of excellent issues and PRs by searching for the
[`great writeup`](https://github.com/astral-sh/ruff/issues?q=label%3A%22great%20writeup%22)
label.
#### The test plan
The test plan is likely to be shorter than the summary and can be as simple as
"Added new snapshot tests for `RUF123`," at least for rule bugs. For LSP or some
types of CLI changes, in particular, it can also be helpful to include
screenshots or recordings of your change in action.
#### Ecosystem report
After opening the PR, an ecosystem report will be run as part of CI. This shows
a diff of linter and formatter behavior before and after the changes in your PR.
Going through these changes and reporting your findings in the PR summary or an
additional comment help us to review your PR more efficiently. It's also a great
way to find new test cases to incorporate into your PR if you identify any
issues.
#### PR status
To help us know when your PR is ready for review again, please either move your
PR back to a draft while working on it (marking it ready for review afterwards
will ping the previous reviewers) or explicitly re-request a review. This helps
us to avoid re-reviewing a PR while you're still working on it and also to
prioritize PRs that are definitely ready for review.
You can also thumbs-up or mark as resolved any comments we leave to let us know
you addressed them.
## MkDocs ## MkDocs
> [!NOTE]
>
> The documentation uses Material for MkDocs Insiders, which is closed-source software.
> This means only members of the Astral organization can preview the documentation exactly as it
> will appear in production.
> Outside contributors can still preview the documentation, but there will be some differences. Consult [the Material for MkDocs documentation](https://squidfunk.github.io/mkdocs-material/insiders/benefits/#features) for which features are exclusively available in the insiders version.
To preview any changes to the documentation locally: To preview any changes to the documentation locally:
1. Install the [Rust toolchain](https://www.rust-lang.org/tools/install). 1. Install the [Rust toolchain](https://www.rust-lang.org/tools/install).
@ -344,7 +302,11 @@ To preview any changes to the documentation locally:
1. Run the development server with: 1. Run the development server with:
```shell ```shell
uvx --with-requirements docs/requirements.txt -- mkdocs serve -f mkdocs.yml # For contributors.
uvx --with-requirements docs/requirements.txt -- mkdocs serve -f mkdocs.public.yml
# For members of the Astral org, which has access to MkDocs Insiders via sponsorship.
uvx --with-requirements docs/requirements-insiders.txt -- mkdocs serve -f mkdocs.insiders.yml
``` ```
The documentation should then be available locally at The documentation should then be available locally at

560
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -5,7 +5,7 @@ resolver = "2"
[workspace.package] [workspace.package]
# Please update rustfmt.toml when bumping the Rust edition # Please update rustfmt.toml when bumping the Rust edition
edition = "2024" edition = "2024"
rust-version = "1.90" rust-version = "1.88"
homepage = "https://docs.astral.sh/ruff" homepage = "https://docs.astral.sh/ruff"
documentation = "https://docs.astral.sh/ruff" documentation = "https://docs.astral.sh/ruff"
repository = "https://github.com/astral-sh/ruff" repository = "https://github.com/astral-sh/ruff"
@ -81,15 +81,14 @@ compact_str = "0.9.0"
criterion = { version = "0.7.0", default-features = false } criterion = { version = "0.7.0", default-features = false }
crossbeam = { version = "0.8.4" } crossbeam = { version = "0.8.4" }
dashmap = { version = "6.0.1" } dashmap = { version = "6.0.1" }
datatest-stable = { version = "0.3.3" }
dir-test = { version = "0.4.0" } dir-test = { version = "0.4.0" }
dunce = { version = "1.0.5" } dunce = { version = "1.0.5" }
drop_bomb = { version = "0.1.5" } drop_bomb = { version = "0.1.5" }
etcetera = { version = "0.11.0" } etcetera = { version = "0.10.0" }
fern = { version = "0.7.0" } fern = { version = "0.7.0" }
filetime = { version = "0.2.23" } filetime = { version = "0.2.23" }
getrandom = { version = "0.3.1" } getrandom = { version = "0.3.1" }
get-size2 = { version = "0.7.3", features = [ get-size2 = { version = "0.7.0", features = [
"derive", "derive",
"smallvec", "smallvec",
"hashbrown", "hashbrown",
@ -104,7 +103,7 @@ hashbrown = { version = "0.16.0", default-features = false, features = [
"inline-more", "inline-more",
] } ] }
heck = "0.5.0" heck = "0.5.0"
ignore = { version = "0.4.24" } ignore = { version = "0.4.22" }
imara-diff = { version = "0.1.5" } imara-diff = { version = "0.1.5" }
imperative = { version = "1.0.4" } imperative = { version = "1.0.4" }
indexmap = { version = "2.6.0" } indexmap = { version = "2.6.0" }
@ -125,12 +124,12 @@ lsp-server = { version = "0.7.6" }
lsp-types = { git = "https://github.com/astral-sh/lsp-types.git", rev = "3512a9f", features = [ lsp-types = { git = "https://github.com/astral-sh/lsp-types.git", rev = "3512a9f", features = [
"proposed", "proposed",
] } ] }
matchit = { version = "0.9.0" } matchit = { version = "0.8.1" }
memchr = { version = "2.7.1" } memchr = { version = "2.7.1" }
mimalloc = { version = "0.1.39" } mimalloc = { version = "0.1.39" }
natord = { version = "1.0.9" } natord = { version = "1.0.9" }
notify = { version = "8.0.0" } notify = { version = "8.0.0" }
ordermap = { version = "1.0.0" } ordermap = { version = "0.5.0" }
path-absolutize = { version = "3.1.1" } path-absolutize = { version = "3.1.1" }
path-slash = { version = "0.2.1" } path-slash = { version = "0.2.1" }
pathdiff = { version = "0.2.1" } pathdiff = { version = "0.2.1" }
@ -147,7 +146,7 @@ regex-automata = { version = "0.4.9" }
rustc-hash = { version = "2.0.0" } rustc-hash = { version = "2.0.0" }
rustc-stable-hash = { version = "0.1.2" } rustc-stable-hash = { version = "0.1.2" }
# When updating salsa, make sure to also update the revision in `fuzz/Cargo.toml` # When updating salsa, make sure to also update the revision in `fuzz/Cargo.toml`
salsa = { git = "https://github.com/salsa-rs/salsa.git", rev = "55e5e7d32fa3fc189276f35bb04c9438f9aedbd1", default-features = false, features = [ salsa = { git = "https://github.com/salsa-rs/salsa.git", rev = "d38145c29574758de7ffbe8a13cd4584c3b09161", default-features = false, features = [
"compact_str", "compact_str",
"macros", "macros",
"salsa_unstable", "salsa_unstable",
@ -174,7 +173,6 @@ snapbox = { version = "0.6.0", features = [
static_assertions = "1.1.0" static_assertions = "1.1.0"
strum = { version = "0.27.0", features = ["strum_macros"] } strum = { version = "0.27.0", features = ["strum_macros"] }
strum_macros = { version = "0.27.0" } strum_macros = { version = "0.27.0" }
supports-hyperlinks = { version = "3.1.0" }
syn = { version = "2.0.55" } syn = { version = "2.0.55" }
tempfile = { version = "3.9.0" } tempfile = { version = "3.9.0" }
test-case = { version = "3.3.1" } test-case = { version = "3.3.1" }
@ -273,12 +271,6 @@ large_stack_arrays = "allow"
lto = "fat" lto = "fat"
codegen-units = 16 codegen-units = 16
# Profile to build a minimally sized binary for ruff/ty
[profile.minimal-size]
inherits = "release"
opt-level = "z"
codegen-units = 1
# Some crates don't change as much but benefit more from # Some crates don't change as much but benefit more from
# more expensive optimization passes, so we selectively # more expensive optimization passes, so we selectively
# decrease codegen-units in some cases. # decrease codegen-units in some cases.

View File

@ -57,11 +57,8 @@ Ruff is extremely actively developed and used in major open-source projects like
...and [many more](#whos-using-ruff). ...and [many more](#whos-using-ruff).
Ruff is backed by [Astral](https://astral.sh), the creators of Ruff is backed by [Astral](https://astral.sh). Read the [launch post](https://astral.sh/blog/announcing-astral-the-company-behind-ruff),
[uv](https://github.com/astral-sh/uv) and [ty](https://github.com/astral-sh/ty). or the original [project announcement](https://notes.crmarsh.com/python-tooling-could-be-much-much-faster).
Read the [launch post](https://astral.sh/blog/announcing-astral-the-company-behind-ruff), or the
original [project announcement](https://notes.crmarsh.com/python-tooling-could-be-much-much-faster).
## Testimonials ## Testimonials
@ -150,8 +147,8 @@ curl -LsSf https://astral.sh/ruff/install.sh | sh
powershell -c "irm https://astral.sh/ruff/install.ps1 | iex" powershell -c "irm https://astral.sh/ruff/install.ps1 | iex"
# For a specific version. # For a specific version.
curl -LsSf https://astral.sh/ruff/0.14.9/install.sh | sh curl -LsSf https://astral.sh/ruff/0.14.2/install.sh | sh
powershell -c "irm https://astral.sh/ruff/0.14.9/install.ps1 | iex" powershell -c "irm https://astral.sh/ruff/0.14.2/install.ps1 | iex"
``` ```
You can also install Ruff via [Homebrew](https://formulae.brew.sh/formula/ruff), [Conda](https://anaconda.org/conda-forge/ruff), You can also install Ruff via [Homebrew](https://formulae.brew.sh/formula/ruff), [Conda](https://anaconda.org/conda-forge/ruff),
@ -184,7 +181,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com/) hook via [`ruff
```yaml ```yaml
- repo: https://github.com/astral-sh/ruff-pre-commit - repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version. # Ruff version.
rev: v0.14.9 rev: v0.14.2
hooks: hooks:
# Run the linter. # Run the linter.
- id: ruff-check - id: ruff-check
@ -494,7 +491,6 @@ Ruff is used by a number of major open-source projects and companies, including:
- [PyTorch](https://github.com/pytorch/pytorch) - [PyTorch](https://github.com/pytorch/pytorch)
- [Pydantic](https://github.com/pydantic/pydantic) - [Pydantic](https://github.com/pydantic/pydantic)
- [Pylint](https://github.com/PyCQA/pylint) - [Pylint](https://github.com/PyCQA/pylint)
- [PyScripter](https://github.com/pyscripter/pyscripter)
- [PyVista](https://github.com/pyvista/pyvista) - [PyVista](https://github.com/pyvista/pyvista)
- [Reflex](https://github.com/reflex-dev/reflex) - [Reflex](https://github.com/reflex-dev/reflex)
- [River](https://github.com/online-ml/river) - [River](https://github.com/online-ml/river)

View File

@ -1,6 +1,6 @@
[package] [package]
name = "ruff" name = "ruff"
version = "0.14.9" version = "0.14.2"
publish = true publish = true
authors = { workspace = true } authors = { workspace = true }
edition = { workspace = true } edition = { workspace = true }

View File

@ -7,10 +7,8 @@ use std::sync::Arc;
use crate::commands::completions::config::{OptionString, OptionStringParser}; use crate::commands::completions::config::{OptionString, OptionStringParser};
use anyhow::bail; use anyhow::bail;
use clap::builder::Styles;
use clap::builder::styling::{AnsiColor, Effects};
use clap::builder::{TypedValueParser, ValueParserFactory}; use clap::builder::{TypedValueParser, ValueParserFactory};
use clap::{Parser, Subcommand}; use clap::{Parser, Subcommand, command};
use colored::Colorize; use colored::Colorize;
use itertools::Itertools; use itertools::Itertools;
use path_absolutize::path_dedot; use path_absolutize::path_dedot;
@ -80,13 +78,6 @@ impl GlobalConfigArgs {
} }
} }
// Configures Clap v3-style help menu colors
const STYLES: Styles = Styles::styled()
.header(AnsiColor::Green.on_default().effects(Effects::BOLD))
.usage(AnsiColor::Green.on_default().effects(Effects::BOLD))
.literal(AnsiColor::Cyan.on_default().effects(Effects::BOLD))
.placeholder(AnsiColor::Cyan.on_default());
#[derive(Debug, Parser)] #[derive(Debug, Parser)]
#[command( #[command(
author, author,
@ -95,7 +86,6 @@ const STYLES: Styles = Styles::styled()
after_help = "For help with a specific command, see: `ruff help <command>`." after_help = "For help with a specific command, see: `ruff help <command>`."
)] )]
#[command(version)] #[command(version)]
#[command(styles = STYLES)]
pub struct Args { pub struct Args {
#[command(subcommand)] #[command(subcommand)]
pub(crate) command: Command, pub(crate) command: Command,
@ -167,7 +157,6 @@ pub enum AnalyzeCommand {
} }
#[derive(Clone, Debug, clap::Parser)] #[derive(Clone, Debug, clap::Parser)]
#[expect(clippy::struct_excessive_bools)]
pub struct AnalyzeGraphCommand { pub struct AnalyzeGraphCommand {
/// List of files or directories to include. /// List of files or directories to include.
#[clap(help = "List of files or directories to include [default: .]")] #[clap(help = "List of files or directories to include [default: .]")]
@ -194,12 +183,6 @@ pub struct AnalyzeGraphCommand {
/// Path to a virtual environment to use for resolving additional dependencies /// Path to a virtual environment to use for resolving additional dependencies
#[arg(long)] #[arg(long)]
python: Option<PathBuf>, python: Option<PathBuf>,
/// Include imports that are only used for type checking (i.e., imports within `if TYPE_CHECKING:` blocks).
/// Use `--no-type-checking-imports` to exclude imports that are only used for type checking.
#[arg(long, overrides_with("no_type_checking_imports"))]
type_checking_imports: bool,
#[arg(long, overrides_with("type_checking_imports"), hide = true)]
no_type_checking_imports: bool,
} }
// The `Parser` derive is for ruff_dev, for ruff `Args` would be sufficient // The `Parser` derive is for ruff_dev, for ruff `Args` would be sufficient
@ -422,13 +405,8 @@ pub struct CheckCommand {
)] )]
pub statistics: bool, pub statistics: bool,
/// Enable automatic additions of `noqa` directives to failing lines. /// Enable automatic additions of `noqa` directives to failing lines.
/// Optionally provide a reason to append after the codes.
#[arg( #[arg(
long, long,
value_name = "REASON",
default_missing_value = "",
num_args = 0..=1,
require_equals = true,
// conflicts_with = "add_noqa", // conflicts_with = "add_noqa",
conflicts_with = "show_files", conflicts_with = "show_files",
conflicts_with = "show_settings", conflicts_with = "show_settings",
@ -440,7 +418,7 @@ pub struct CheckCommand {
conflicts_with = "fix", conflicts_with = "fix",
conflicts_with = "diff", conflicts_with = "diff",
)] )]
pub add_noqa: Option<String>, pub add_noqa: bool,
/// See the files Ruff will be run against with the current settings. /// See the files Ruff will be run against with the current settings.
#[arg( #[arg(
long, long,
@ -846,10 +824,6 @@ impl AnalyzeGraphCommand {
string_imports_min_dots: self.min_dots, string_imports_min_dots: self.min_dots,
preview: resolve_bool_arg(self.preview, self.no_preview).map(PreviewMode::from), preview: resolve_bool_arg(self.preview, self.no_preview).map(PreviewMode::from),
target_version: self.target_version.map(ast::PythonVersion::from), target_version: self.target_version.map(ast::PythonVersion::from),
type_checking_imports: resolve_bool_arg(
self.type_checking_imports,
self.no_type_checking_imports,
),
..ExplicitConfigOverrides::default() ..ExplicitConfigOverrides::default()
}; };
@ -1073,7 +1047,7 @@ Possible choices:
/// etc.). /// etc.).
#[expect(clippy::struct_excessive_bools)] #[expect(clippy::struct_excessive_bools)]
pub struct CheckArguments { pub struct CheckArguments {
pub add_noqa: Option<String>, pub add_noqa: bool,
pub diff: bool, pub diff: bool,
pub exit_non_zero_on_fix: bool, pub exit_non_zero_on_fix: bool,
pub exit_zero: bool, pub exit_zero: bool,
@ -1346,7 +1320,6 @@ struct ExplicitConfigOverrides {
extension: Option<Vec<ExtensionPair>>, extension: Option<Vec<ExtensionPair>>,
detect_string_imports: Option<bool>, detect_string_imports: Option<bool>,
string_imports_min_dots: Option<usize>, string_imports_min_dots: Option<usize>,
type_checking_imports: Option<bool>,
} }
impl ConfigurationTransformer for ExplicitConfigOverrides { impl ConfigurationTransformer for ExplicitConfigOverrides {
@ -1437,9 +1410,6 @@ impl ConfigurationTransformer for ExplicitConfigOverrides {
if let Some(string_imports_min_dots) = &self.string_imports_min_dots { if let Some(string_imports_min_dots) = &self.string_imports_min_dots {
config.analyze.string_imports_min_dots = Some(*string_imports_min_dots); config.analyze.string_imports_min_dots = Some(*string_imports_min_dots);
} }
if let Some(type_checking_imports) = &self.type_checking_imports {
config.analyze.type_checking_imports = Some(*type_checking_imports);
}
config config
} }

View File

@ -21,7 +21,6 @@ pub(crate) fn add_noqa(
files: &[PathBuf], files: &[PathBuf],
pyproject_config: &PyprojectConfig, pyproject_config: &PyprojectConfig,
config_arguments: &ConfigArguments, config_arguments: &ConfigArguments,
reason: Option<&str>,
) -> Result<usize> { ) -> Result<usize> {
// Collect all the files to check. // Collect all the files to check.
let start = Instant::now(); let start = Instant::now();
@ -77,14 +76,7 @@ pub(crate) fn add_noqa(
return None; return None;
} }
}; };
match add_noqa_to_path( match add_noqa_to_path(path, package, &source_kind, source_type, &settings.linter) {
path,
package,
&source_kind,
source_type,
&settings.linter,
reason,
) {
Ok(count) => Some(count), Ok(count) => Some(count),
Err(e) => { Err(e) => {
error!("Failed to add noqa to {}: {e}", path.display()); error!("Failed to add noqa to {}: {e}", path.display());

View File

@ -7,7 +7,6 @@ use path_absolutize::CWD;
use ruff_db::system::{SystemPath, SystemPathBuf}; use ruff_db::system::{SystemPath, SystemPathBuf};
use ruff_graph::{Direction, ImportMap, ModuleDb, ModuleImports}; use ruff_graph::{Direction, ImportMap, ModuleDb, ModuleImports};
use ruff_linter::package::PackageRoot; use ruff_linter::package::PackageRoot;
use ruff_linter::source_kind::SourceKind;
use ruff_linter::{warn_user, warn_user_once}; use ruff_linter::{warn_user, warn_user_once};
use ruff_python_ast::{PySourceType, SourceType}; use ruff_python_ast::{PySourceType, SourceType};
use ruff_workspace::resolver::{ResolvedFile, match_exclusion, python_files_in_path}; use ruff_workspace::resolver::{ResolvedFile, match_exclusion, python_files_in_path};
@ -105,7 +104,6 @@ pub(crate) fn analyze_graph(
let settings = resolver.resolve(path); let settings = resolver.resolve(path);
let string_imports = settings.analyze.string_imports; let string_imports = settings.analyze.string_imports;
let include_dependencies = settings.analyze.include_dependencies.get(path).cloned(); let include_dependencies = settings.analyze.include_dependencies.get(path).cloned();
let type_checking_imports = settings.analyze.type_checking_imports;
// Skip excluded files. // Skip excluded files.
if (settings.file_resolver.force_exclude || !resolved_file.is_root()) if (settings.file_resolver.force_exclude || !resolved_file.is_root())
@ -129,6 +127,10 @@ pub(crate) fn analyze_graph(
}, },
Some(language) => PySourceType::from(language), Some(language) => PySourceType::from(language),
}; };
if matches!(source_type, PySourceType::Ipynb) {
debug!("Ignoring Jupyter notebook: {}", path.display());
continue;
}
// Convert to system paths. // Convert to system paths.
let Ok(package) = package.map(SystemPathBuf::from_path_buf).transpose() else { let Ok(package) = package.map(SystemPathBuf::from_path_buf).transpose() else {
@ -145,31 +147,9 @@ pub(crate) fn analyze_graph(
let root = root.clone(); let root = root.clone();
let result = inner_result.clone(); let result = inner_result.clone();
scope.spawn(move |_| { scope.spawn(move |_| {
// Extract source code (handles both .py and .ipynb files)
let source_kind = match SourceKind::from_path(path.as_std_path(), source_type) {
Ok(Some(source_kind)) => source_kind,
Ok(None) => {
debug!("Skipping non-Python notebook: {path}");
return;
}
Err(err) => {
warn!("Failed to read source for {path}: {err}");
return;
}
};
let source_code = source_kind.source_code();
// Identify any imports via static analysis. // Identify any imports via static analysis.
let mut imports = ModuleImports::detect( let mut imports =
&db, ModuleImports::detect(&db, &path, package.as_deref(), string_imports)
source_code,
source_type,
&path,
package.as_deref(),
string_imports,
type_checking_imports,
)
.unwrap_or_else(|err| { .unwrap_or_else(|err| {
warn!("Failed to generate import map for {path}: {err}"); warn!("Failed to generate import map for {path}: {err}");
ModuleImports::default() ModuleImports::default()

View File

@ -370,7 +370,7 @@ pub(crate) fn format_source(
let line_index = LineIndex::from_source_text(unformatted); let line_index = LineIndex::from_source_text(unformatted);
let byte_range = range.to_text_range(unformatted, &line_index); let byte_range = range.to_text_range(unformatted, &line_index);
format_range(unformatted, byte_range, options).map(|formatted_range| { format_range(unformatted, byte_range, options).map(|formatted_range| {
let mut formatted = unformatted.clone(); let mut formatted = unformatted.to_string();
formatted.replace_range( formatted.replace_range(
std::ops::Range::<usize>::from(formatted_range.source_range()), std::ops::Range::<usize>::from(formatted_range.source_range()),
formatted_range.as_code(), formatted_range.as_code(),

View File

@ -16,8 +16,6 @@ struct LinterInfo {
prefix: &'static str, prefix: &'static str,
name: &'static str, name: &'static str,
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
url: Option<&'static str>,
#[serde(skip_serializing_if = "Option::is_none")]
categories: Option<Vec<LinterCategoryInfo>>, categories: Option<Vec<LinterCategoryInfo>>,
} }
@ -52,7 +50,6 @@ pub(crate) fn linter(format: HelpFormat) -> Result<()> {
.map(|linter_info| LinterInfo { .map(|linter_info| LinterInfo {
prefix: linter_info.common_prefix(), prefix: linter_info.common_prefix(),
name: linter_info.name(), name: linter_info.name(),
url: linter_info.url(),
categories: linter_info.upstream_categories().map(|cats| { categories: linter_info.upstream_categories().map(|cats| {
cats.iter() cats.iter()
.map(|c| LinterCategoryInfo { .map(|c| LinterCategoryInfo {

View File

@ -9,7 +9,7 @@ use std::sync::mpsc::channel;
use anyhow::Result; use anyhow::Result;
use clap::CommandFactory; use clap::CommandFactory;
use colored::Colorize; use colored::Colorize;
use log::error; use log::{error, warn};
use notify::{RecursiveMode, Watcher, recommended_watcher}; use notify::{RecursiveMode, Watcher, recommended_watcher};
use args::{GlobalConfigArgs, ServerCommand}; use args::{GlobalConfigArgs, ServerCommand};
@ -319,20 +319,12 @@ pub fn check(args: CheckCommand, global_options: GlobalConfigArgs) -> Result<Exi
warn_user!("Detected debug build without --no-cache."); warn_user!("Detected debug build without --no-cache.");
} }
if let Some(reason) = &cli.add_noqa { if cli.add_noqa {
if !fix_mode.is_generate() { if !fix_mode.is_generate() {
warn_user!("--fix is incompatible with --add-noqa."); warn_user!("--fix is incompatible with --add-noqa.");
} }
if reason.contains(['\n', '\r']) {
return Err(anyhow::anyhow!(
"--add-noqa <reason> cannot contain newline characters"
));
}
let reason_opt = (!reason.is_empty()).then_some(reason.as_str());
let modifications = let modifications =
commands::add_noqa::add_noqa(&files, &pyproject_config, &config_arguments, reason_opt)?; commands::add_noqa::add_noqa(&files, &pyproject_config, &config_arguments)?;
if modifications > 0 && config_arguments.log_level >= LogLevel::Default { if modifications > 0 && config_arguments.log_level >= LogLevel::Default {
let s = if modifications == 1 { "" } else { "s" }; let s = if modifications == 1 { "" } else { "s" };
#[expect(clippy::print_stderr)] #[expect(clippy::print_stderr)]

View File

@ -9,7 +9,9 @@ use itertools::{Itertools, iterate};
use ruff_linter::linter::FixTable; use ruff_linter::linter::FixTable;
use serde::Serialize; use serde::Serialize;
use ruff_db::diagnostic::{Diagnostic, DisplayDiagnosticConfig, SecondaryCode}; use ruff_db::diagnostic::{
Diagnostic, DiagnosticFormat, DisplayDiagnosticConfig, DisplayDiagnostics, SecondaryCode,
};
use ruff_linter::fs::relativize_path; use ruff_linter::fs::relativize_path;
use ruff_linter::logging::LogLevel; use ruff_linter::logging::LogLevel;
use ruff_linter::message::{EmitterContext, render_diagnostics}; use ruff_linter::message::{EmitterContext, render_diagnostics};
@ -34,21 +36,9 @@ struct ExpandedStatistics<'a> {
code: Option<&'a SecondaryCode>, code: Option<&'a SecondaryCode>,
name: &'static str, name: &'static str,
count: usize, count: usize,
#[serde(rename = "fixable")] fixable: bool,
all_fixable: bool,
fixable_count: usize,
} }
impl ExpandedStatistics<'_> {
fn any_fixable(&self) -> bool {
self.fixable_count > 0
}
}
/// Accumulator type for grouping diagnostics by code.
/// Format: (`code`, `representative_diagnostic`, `total_count`, `fixable_count`)
type DiagnosticGroup<'a> = (Option<&'a SecondaryCode>, &'a Diagnostic, usize, usize);
pub(crate) struct Printer { pub(crate) struct Printer {
format: OutputFormat, format: OutputFormat,
log_level: LogLevel, log_level: LogLevel,
@ -145,7 +135,7 @@ impl Printer {
if fixables.applicable > 0 { if fixables.applicable > 0 {
writeln!( writeln!(
writer, writer,
"{fix_prefix} {} fixable with the `--fix` option.", "{fix_prefix} {} fixable with the --fix option.",
fixables.applicable fixables.applicable
)?; )?;
} }
@ -268,41 +258,35 @@ impl Printer {
diagnostics: &Diagnostics, diagnostics: &Diagnostics,
writer: &mut dyn Write, writer: &mut dyn Write,
) -> Result<()> { ) -> Result<()> {
let required_applicability = self.unsafe_fixes.required_applicability();
let statistics: Vec<ExpandedStatistics> = diagnostics let statistics: Vec<ExpandedStatistics> = diagnostics
.inner .inner
.iter() .iter()
.sorted_by_key(|diagnostic| diagnostic.secondary_code()) .map(|message| (message.secondary_code(), message))
.fold(vec![], |mut acc: Vec<DiagnosticGroup>, diagnostic| { .sorted_by_key(|(code, message)| (*code, message.fixable()))
let is_fixable = diagnostic .fold(
.fix() vec![],
.is_some_and(|fix| fix.applies(required_applicability)); |mut acc: Vec<((Option<&SecondaryCode>, &Diagnostic), usize)>, (code, message)| {
let code = diagnostic.secondary_code(); if let Some(((prev_code, _prev_message), count)) = acc.last_mut() {
if let Some((prev_code, _prev_message, count, fixable_count)) = acc.last_mut() {
if *prev_code == code { if *prev_code == code {
*count += 1; *count += 1;
if is_fixable {
*fixable_count += 1;
}
return acc; return acc;
} }
} }
acc.push((code, diagnostic, 1, usize::from(is_fixable))); acc.push(((code, message), 1));
acc acc
}) },
)
.iter() .iter()
.map( .map(|&((code, message), count)| ExpandedStatistics {
|&(code, message, count, fixable_count)| ExpandedStatistics {
code, code,
name: message.name(), name: message.name(),
count, count,
// Backward compatibility: `fixable` is true only when all violations are fixable. fixable: if let Some(fix) = message.fix() {
// See: https://github.com/astral-sh/ruff/pull/21513 fix.applies(self.unsafe_fixes.required_applicability())
all_fixable: fixable_count == count, } else {
fixable_count, false
}, },
) })
.sorted_by_key(|statistic| Reverse(statistic.count)) .sorted_by_key(|statistic| Reverse(statistic.count))
.collect(); .collect();
@ -326,14 +310,13 @@ impl Printer {
.map(|statistic| statistic.code.map_or(0, |s| s.len())) .map(|statistic| statistic.code.map_or(0, |s| s.len()))
.max() .max()
.unwrap(); .unwrap();
let any_fixable = statistics.iter().any(ExpandedStatistics::any_fixable); let any_fixable = statistics.iter().any(|statistic| statistic.fixable);
let all_fixable = format!("[{}] ", "*".cyan()); let fixable = format!("[{}] ", "*".cyan());
let partially_fixable = format!("[{}] ", "-".cyan());
let unfixable = "[ ] "; let unfixable = "[ ] ";
// By default, we mimic Flake8's `--statistics` format. // By default, we mimic Flake8's `--statistics` format.
for statistic in &statistics { for statistic in statistics {
writeln!( writeln!(
writer, writer,
"{:>count_width$}\t{:<code_width$}\t{}{}", "{:>count_width$}\t{:<code_width$}\t{}{}",
@ -345,10 +328,8 @@ impl Printer {
.red() .red()
.bold(), .bold(),
if any_fixable { if any_fixable {
if statistic.all_fixable { if statistic.fixable {
&all_fixable &fixable
} else if statistic.any_fixable() {
&partially_fixable
} else { } else {
unfixable unfixable
} }
@ -409,18 +390,21 @@ impl Printer {
let context = EmitterContext::new(&diagnostics.notebook_indexes); let context = EmitterContext::new(&diagnostics.notebook_indexes);
let format = if preview { let format = if preview {
self.format DiagnosticFormat::Full
} else { } else {
OutputFormat::Concise DiagnosticFormat::Concise
}; };
let config = DisplayDiagnosticConfig::default() let config = DisplayDiagnosticConfig::default()
.preview(preview)
.hide_severity(true) .hide_severity(true)
.color(!cfg!(test) && colored::control::SHOULD_COLORIZE.should_colorize()) .color(!cfg!(test) && colored::control::SHOULD_COLORIZE.should_colorize())
.with_show_fix_status(show_fix_status(self.fix_mode, fixables.as_ref())) .with_show_fix_status(show_fix_status(self.fix_mode, fixables.as_ref()))
.with_fix_applicability(self.unsafe_fixes.required_applicability()) .format(format)
.show_fix_diff(preview); .with_fix_applicability(self.unsafe_fixes.required_applicability());
render_diagnostics(writer, format, config, &context, &diagnostics.inner)?; write!(
writer,
"{}",
DisplayDiagnostics::new(&context, &config, &diagnostics.inner)
)?;
} }
writer.flush()?; writer.flush()?;

View File

@ -653,133 +653,3 @@ fn venv() -> Result<()> {
Ok(()) Ok(())
} }
#[test]
fn notebook_basic() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff")
.child("a.py")
.write_str(indoc::indoc! {r#"
def helper():
pass
"#})?;
// Create a basic notebook with a simple import
root.child("notebook.ipynb").write_str(indoc::indoc! {r#"
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from ruff.a import helper"
]
}
],
"metadata": {
"language_info": {
"name": "python",
"version": "3.12.0"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
"#})?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"notebook.ipynb": [
"ruff/a.py"
],
"ruff/__init__.py": [],
"ruff/a.py": []
}
----- stderr -----
"###);
});
Ok(())
}
#[test]
fn notebook_with_magic() -> Result<()> {
let tempdir = TempDir::new()?;
let root = ChildPath::new(tempdir.path());
root.child("ruff").child("__init__.py").write_str("")?;
root.child("ruff")
.child("a.py")
.write_str(indoc::indoc! {r#"
def helper():
pass
"#})?;
// Create a notebook with IPython magic commands and imports
root.child("notebook.ipynb").write_str(indoc::indoc! {r#"
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"%load_ext autoreload\n",
"%autoreload 2"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from ruff.a import helper"
]
}
],
"metadata": {
"language_info": {
"name": "python",
"version": "3.12.0"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
"#})?;
insta::with_settings!({
filters => INSTA_FILTERS.to_vec(),
}, {
assert_cmd_snapshot!(command().current_dir(&root), @r###"
success: true
exit_code: 0
----- stdout -----
{
"notebook.ipynb": [
"ruff/a.py"
],
"ruff/__init__.py": [],
"ruff/a.py": []
}
----- stderr -----
"###);
});
Ok(())
}

View File

@ -1,193 +0,0 @@
use std::process::Command;
use insta_cmd::assert_cmd_snapshot;
use crate::CliTest;
#[test]
fn type_checking_imports() -> anyhow::Result<()> {
let test = AnalyzeTest::with_files([
("ruff/__init__.py", ""),
(
"ruff/a.py",
r#"
from typing import TYPE_CHECKING
import ruff.b
if TYPE_CHECKING:
import ruff.c
"#,
),
(
"ruff/b.py",
r#"
if TYPE_CHECKING:
from ruff import c
"#,
),
("ruff/c.py", ""),
])?;
assert_cmd_snapshot!(test.command(), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py",
"ruff/c.py"
],
"ruff/b.py": [
"ruff/c.py"
],
"ruff/c.py": []
}
----- stderr -----
"###);
assert_cmd_snapshot!(
test.command()
.arg("--no-type-checking-imports"),
@r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [],
"ruff/c.py": []
}
----- stderr -----
"###
);
Ok(())
}
#[test]
fn type_checking_imports_from_config() -> anyhow::Result<()> {
let test = AnalyzeTest::with_files([
("ruff/__init__.py", ""),
(
"ruff/a.py",
r#"
from typing import TYPE_CHECKING
import ruff.b
if TYPE_CHECKING:
import ruff.c
"#,
),
(
"ruff/b.py",
r#"
if TYPE_CHECKING:
from ruff import c
"#,
),
("ruff/c.py", ""),
(
"ruff.toml",
r#"
[analyze]
type-checking-imports = false
"#,
),
])?;
assert_cmd_snapshot!(test.command(), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py"
],
"ruff/b.py": [],
"ruff/c.py": []
}
----- stderr -----
"###);
test.write_file(
"ruff.toml",
r#"
[analyze]
type-checking-imports = true
"#,
)?;
assert_cmd_snapshot!(test.command(), @r###"
success: true
exit_code: 0
----- stdout -----
{
"ruff/__init__.py": [],
"ruff/a.py": [
"ruff/b.py",
"ruff/c.py"
],
"ruff/b.py": [
"ruff/c.py"
],
"ruff/c.py": []
}
----- stderr -----
"###
);
Ok(())
}
struct AnalyzeTest {
cli_test: CliTest,
}
impl AnalyzeTest {
pub(crate) fn new() -> anyhow::Result<Self> {
Ok(Self {
cli_test: CliTest::with_settings(|_, mut settings| {
settings.add_filter(r#"\\\\"#, "/");
settings
})?,
})
}
fn with_files<'a>(files: impl IntoIterator<Item = (&'a str, &'a str)>) -> anyhow::Result<Self> {
let case = Self::new()?;
case.write_files(files)?;
Ok(case)
}
#[expect(unused)]
fn with_file(path: impl AsRef<std::path::Path>, content: &str) -> anyhow::Result<Self> {
let fixture = Self::new()?;
fixture.write_file(path, content)?;
Ok(fixture)
}
fn command(&self) -> Command {
let mut command = self.cli_test.command();
command.arg("analyze").arg("graph").arg("--preview");
command
}
}
impl std::ops::Deref for AnalyzeTest {
type Target = CliTest;
fn deref(&self) -> &Self::Target {
&self.cli_test
}
}

View File

@ -1440,78 +1440,6 @@ def function():
Ok(()) Ok(())
} }
#[test]
fn ignore_noqa() -> Result<()> {
let fixture = CliTest::new()?;
fixture.write_file(
"ruff.toml",
r#"
[lint]
select = ["F401"]
"#,
)?;
fixture.write_file(
"noqa.py",
r#"
import os # noqa: F401
# ruff: disable[F401]
import sys
"#,
)?;
// without --ignore-noqa
assert_cmd_snapshot!(fixture
.check_command()
.args(["--config", "ruff.toml"])
.arg("noqa.py"),
@r"
success: false
exit_code: 1
----- stdout -----
noqa.py:5:8: F401 [*] `sys` imported but unused
Found 1 error.
[*] 1 fixable with the `--fix` option.
----- stderr -----
");
assert_cmd_snapshot!(fixture
.check_command()
.args(["--config", "ruff.toml"])
.arg("noqa.py")
.args(["--preview"]),
@r"
success: true
exit_code: 0
----- stdout -----
All checks passed!
----- stderr -----
");
// with --ignore-noqa --preview
assert_cmd_snapshot!(fixture
.check_command()
.args(["--config", "ruff.toml"])
.arg("noqa.py")
.args(["--ignore-noqa", "--preview"]),
@r"
success: false
exit_code: 1
----- stdout -----
noqa.py:2:8: F401 [*] `os` imported but unused
noqa.py:5:8: F401 [*] `sys` imported but unused
Found 2 errors.
[*] 2 fixable with the `--fix` option.
----- stderr -----
");
Ok(())
}
#[test] #[test]
fn add_noqa() -> Result<()> { fn add_noqa() -> Result<()> {
let fixture = CliTest::new()?; let fixture = CliTest::new()?;
@ -1704,100 +1632,6 @@ def unused(x): # noqa: ANN001, ARG001, D103
Ok(()) Ok(())
} }
#[test]
fn add_noqa_existing_file_level_noqa() -> Result<()> {
let fixture = CliTest::new()?;
fixture.write_file(
"ruff.toml",
r#"
[lint]
select = ["F401"]
"#,
)?;
fixture.write_file(
"noqa.py",
r#"
# ruff: noqa F401
import os
"#,
)?;
assert_cmd_snapshot!(fixture
.check_command()
.args(["--config", "ruff.toml"])
.arg("noqa.py")
.arg("--preview")
.args(["--add-noqa"])
.arg("-")
.pass_stdin(r#"
"#), @r"
success: true
exit_code: 0
----- stdout -----
----- stderr -----
");
let test_code =
fs::read_to_string(fixture.root().join("noqa.py")).expect("should read test file");
insta::assert_snapshot!(test_code, @r"
# ruff: noqa F401
import os
");
Ok(())
}
#[test]
fn add_noqa_existing_range_suppression() -> Result<()> {
let fixture = CliTest::new()?;
fixture.write_file(
"ruff.toml",
r#"
[lint]
select = ["F401"]
"#,
)?;
fixture.write_file(
"noqa.py",
r#"
# ruff: disable[F401]
import os
"#,
)?;
assert_cmd_snapshot!(fixture
.check_command()
.args(["--config", "ruff.toml"])
.arg("noqa.py")
.arg("--preview")
.args(["--add-noqa"])
.arg("-")
.pass_stdin(r#"
"#), @r"
success: true
exit_code: 0
----- stdout -----
----- stderr -----
");
let test_code =
fs::read_to_string(fixture.root().join("noqa.py")).expect("should read test file");
insta::assert_snapshot!(test_code, @r"
# ruff: disable[F401]
import os
");
Ok(())
}
#[test] #[test]
fn add_noqa_multiline_comment() -> Result<()> { fn add_noqa_multiline_comment() -> Result<()> {
let fixture = CliTest::new()?; let fixture = CliTest::new()?;
@ -1926,64 +1760,6 @@ from foo import ( # noqa: F401
Ok(()) Ok(())
} }
#[test]
fn add_noqa_with_reason() -> Result<()> {
let fixture = CliTest::new()?;
fixture.write_file(
"test.py",
r#"import os
def foo():
x = 1
"#,
)?;
assert_cmd_snapshot!(fixture
.check_command()
.arg("--add-noqa=TODO: fix")
.arg("--select=F401,F841")
.arg("test.py"), @r"
success: true
exit_code: 0
----- stdout -----
----- stderr -----
Added 2 noqa directives.
");
let content = fs::read_to_string(fixture.root().join("test.py"))?;
insta::assert_snapshot!(content, @r"
import os # noqa: F401 TODO: fix
def foo():
x = 1 # noqa: F841 TODO: fix
");
Ok(())
}
#[test]
fn add_noqa_with_newline_in_reason() -> Result<()> {
let fixture = CliTest::new()?;
fixture.write_file("test.py", "import os\n")?;
assert_cmd_snapshot!(fixture
.check_command()
.arg("--add-noqa=line1\nline2")
.arg("--select=F401")
.arg("test.py"), @r###"
success: false
exit_code: 2
----- stdout -----
----- stderr -----
ruff failed
Cause: --add-noqa <reason> cannot contain newline characters
"###);
Ok(())
}
/// Infer `3.11` from `requires-python` in `pyproject.toml`. /// Infer `3.11` from `requires-python` in `pyproject.toml`.
#[test] #[test]
fn requires_python() -> Result<()> { fn requires_python() -> Result<()> {

View File

@ -15,7 +15,6 @@ use std::{
}; };
use tempfile::TempDir; use tempfile::TempDir;
mod analyze_graph;
mod format; mod format;
mod lint; mod lint;
@ -63,7 +62,9 @@ impl CliTest {
files: impl IntoIterator<Item = (&'a str, &'a str)>, files: impl IntoIterator<Item = (&'a str, &'a str)>,
) -> anyhow::Result<Self> { ) -> anyhow::Result<Self> {
let case = Self::new()?; let case = Self::new()?;
case.write_files(files)?; for file in files {
case.write_file(file.0, file.1)?;
}
Ok(case) Ok(case)
} }
@ -152,16 +153,6 @@ impl CliTest {
Ok(()) Ok(())
} }
pub(crate) fn write_files<'a>(
&self,
files: impl IntoIterator<Item = (&'a str, &'a str)>,
) -> Result<()> {
for file in files {
self.write_file(file.0, file.1)?;
}
Ok(())
}
/// Returns the path to the test directory root. /// Returns the path to the test directory root.
pub(crate) fn root(&self) -> &Path { pub(crate) fn root(&self) -> &Path {
&self.project_dir &self.project_dir

View File

@ -9,6 +9,7 @@ info:
- concise - concise
- "--show-settings" - "--show-settings"
- test.py - test.py
snapshot_kind: text
--- ---
success: true success: true
exit_code: 0 exit_code: 0
@ -283,6 +284,5 @@ analyze.target_version = 3.10
analyze.string_imports = disabled analyze.string_imports = disabled
analyze.extension = ExtensionMapping({}) analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {} analyze.include_dependencies = {}
analyze.type_checking_imports = true
----- stderr ----- ----- stderr -----

View File

@ -12,6 +12,7 @@ info:
- UP007 - UP007
- test.py - test.py
- "-" - "-"
snapshot_kind: text
--- ---
success: true success: true
exit_code: 0 exit_code: 0
@ -285,6 +286,5 @@ analyze.target_version = 3.11
analyze.string_imports = disabled analyze.string_imports = disabled
analyze.extension = ExtensionMapping({}) analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {} analyze.include_dependencies = {}
analyze.type_checking_imports = true
----- stderr ----- ----- stderr -----

View File

@ -13,6 +13,7 @@ info:
- UP007 - UP007
- test.py - test.py
- "-" - "-"
snapshot_kind: text
--- ---
success: true success: true
exit_code: 0 exit_code: 0
@ -287,6 +288,5 @@ analyze.target_version = 3.11
analyze.string_imports = disabled analyze.string_imports = disabled
analyze.extension = ExtensionMapping({}) analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {} analyze.include_dependencies = {}
analyze.type_checking_imports = true
----- stderr ----- ----- stderr -----

View File

@ -14,6 +14,7 @@ info:
- py310 - py310
- test.py - test.py
- "-" - "-"
snapshot_kind: text
--- ---
success: true success: true
exit_code: 0 exit_code: 0
@ -287,6 +288,5 @@ analyze.target_version = 3.10
analyze.string_imports = disabled analyze.string_imports = disabled
analyze.extension = ExtensionMapping({}) analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {} analyze.include_dependencies = {}
analyze.type_checking_imports = true
----- stderr ----- ----- stderr -----

View File

@ -11,6 +11,7 @@ info:
- "--select" - "--select"
- UP007 - UP007
- foo/test.py - foo/test.py
snapshot_kind: text
--- ---
success: true success: true
exit_code: 0 exit_code: 0
@ -284,6 +285,5 @@ analyze.target_version = 3.11
analyze.string_imports = disabled analyze.string_imports = disabled
analyze.extension = ExtensionMapping({}) analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {} analyze.include_dependencies = {}
analyze.type_checking_imports = true
----- stderr ----- ----- stderr -----

View File

@ -11,6 +11,7 @@ info:
- "--select" - "--select"
- UP007 - UP007
- foo/test.py - foo/test.py
snapshot_kind: text
--- ---
success: true success: true
exit_code: 0 exit_code: 0
@ -284,6 +285,5 @@ analyze.target_version = 3.10
analyze.string_imports = disabled analyze.string_imports = disabled
analyze.extension = ExtensionMapping({}) analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {} analyze.include_dependencies = {}
analyze.type_checking_imports = true
----- stderr ----- ----- stderr -----

View File

@ -283,6 +283,5 @@ analyze.target_version = 3.10
analyze.string_imports = disabled analyze.string_imports = disabled
analyze.extension = ExtensionMapping({}) analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {} analyze.include_dependencies = {}
analyze.type_checking_imports = true
----- stderr ----- ----- stderr -----

View File

@ -283,6 +283,5 @@ analyze.target_version = 3.10
analyze.string_imports = disabled analyze.string_imports = disabled
analyze.extension = ExtensionMapping({}) analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {} analyze.include_dependencies = {}
analyze.type_checking_imports = true
----- stderr ----- ----- stderr -----

View File

@ -9,6 +9,7 @@ info:
- concise - concise
- test.py - test.py
- "--show-settings" - "--show-settings"
snapshot_kind: text
--- ---
success: true success: true
exit_code: 0 exit_code: 0
@ -283,6 +284,5 @@ analyze.target_version = 3.11
analyze.string_imports = disabled analyze.string_imports = disabled
analyze.extension = ExtensionMapping({}) analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {} analyze.include_dependencies = {}
analyze.type_checking_imports = true
----- stderr ----- ----- stderr -----

View File

@ -1043,7 +1043,7 @@ def mvce(keys, values):
----- stdout ----- ----- stdout -----
1 C416 [*] unnecessary-comprehension 1 C416 [*] unnecessary-comprehension
Found 1 error. Found 1 error.
[*] 1 fixable with the `--fix` option. [*] 1 fixable with the --fix option.
----- stderr ----- ----- stderr -----
"); ");
@ -1073,8 +1073,7 @@ def mvce(keys, values):
"code": "C416", "code": "C416",
"name": "unnecessary-comprehension", "name": "unnecessary-comprehension",
"count": 1, "count": 1,
"fixable": false, "fixable": false
"fixable_count": 0
} }
] ]
@ -1107,8 +1106,7 @@ def mvce(keys, values):
"code": "C416", "code": "C416",
"name": "unnecessary-comprehension", "name": "unnecessary-comprehension",
"count": 1, "count": 1,
"fixable": true, "fixable": true
"fixable_count": 1
} }
] ]
@ -1116,54 +1114,6 @@ def mvce(keys, values):
"#); "#);
} }
#[test]
fn show_statistics_json_partial_fix() {
let mut cmd = RuffCheck::default()
.args([
"--select",
"UP035",
"--statistics",
"--output-format",
"json",
])
.build();
assert_cmd_snapshot!(cmd
.pass_stdin("from typing import List, AsyncGenerator"), @r#"
success: false
exit_code: 1
----- stdout -----
[
{
"code": "UP035",
"name": "deprecated-import",
"count": 2,
"fixable": false,
"fixable_count": 1
}
]
----- stderr -----
"#);
}
#[test]
fn show_statistics_partial_fix() {
let mut cmd = RuffCheck::default()
.args(["--select", "UP035", "--statistics"])
.build();
assert_cmd_snapshot!(cmd
.pass_stdin("from typing import List, AsyncGenerator"), @r"
success: false
exit_code: 1
----- stdout -----
2 UP035 [-] deprecated-import
Found 2 errors.
[*] 1 fixable with the `--fix` option.
----- stderr -----
");
}
#[test] #[test]
fn show_statistics_syntax_errors() { fn show_statistics_syntax_errors() {
let mut cmd = RuffCheck::default() let mut cmd = RuffCheck::default()
@ -1860,7 +1810,7 @@ fn check_no_hint_for_hidden_unsafe_fixes_when_disabled() {
--> -:1:1 --> -:1:1
Found 2 errors. Found 2 errors.
[*] 1 fixable with the `--fix` option. [*] 1 fixable with the --fix option.
----- stderr ----- ----- stderr -----
"); ");
@ -1903,7 +1853,7 @@ fn check_shows_unsafe_fixes_with_opt_in() {
--> -:1:1 --> -:1:1
Found 2 errors. Found 2 errors.
[*] 2 fixable with the `--fix` option. [*] 2 fixable with the --fix option.
----- stderr ----- ----- stderr -----
"); ");

View File

@ -396,6 +396,5 @@ analyze.target_version = 3.7
analyze.string_imports = disabled analyze.string_imports = disabled
analyze.extension = ExtensionMapping({}) analyze.extension = ExtensionMapping({})
analyze.include_dependencies = {} analyze.include_dependencies = {}
analyze.type_checking_imports = true
----- stderr ----- ----- stderr -----

View File

@ -31,7 +31,7 @@
//! styling. //! styling.
//! //!
//! The above snippet has been built out of the following structure: //! The above snippet has been built out of the following structure:
use crate::{Id, snippet}; use crate::snippet;
use std::cmp::{Reverse, max, min}; use std::cmp::{Reverse, max, min};
use std::collections::HashMap; use std::collections::HashMap;
use std::fmt::Display; use std::fmt::Display;
@ -189,7 +189,6 @@ impl DisplaySet<'_> {
} }
Ok(()) Ok(())
} }
fn format_annotation( fn format_annotation(
&self, &self,
line_offset: usize, line_offset: usize,
@ -200,13 +199,11 @@ impl DisplaySet<'_> {
) -> fmt::Result { ) -> fmt::Result {
let hide_severity = annotation.annotation_type.is_none(); let hide_severity = annotation.annotation_type.is_none();
let color = get_annotation_style(&annotation.annotation_type, stylesheet); let color = get_annotation_style(&annotation.annotation_type, stylesheet);
let formatted_len = if let Some(id) = &annotation.id { let formatted_len = if let Some(id) = &annotation.id {
let id_len = id.id.len();
if hide_severity { if hide_severity {
id_len id.len()
} else { } else {
2 + id_len + annotation_type_len(&annotation.annotation_type) 2 + id.len() + annotation_type_len(&annotation.annotation_type)
} }
} else { } else {
annotation_type_len(&annotation.annotation_type) annotation_type_len(&annotation.annotation_type)
@ -259,20 +256,9 @@ impl DisplaySet<'_> {
let annotation_type = annotation_type_str(&annotation.annotation_type); let annotation_type = annotation_type_str(&annotation.annotation_type);
if let Some(id) = annotation.id { if let Some(id) = annotation.id {
if hide_severity { if hide_severity {
buffer.append( buffer.append(line_offset, &format!("{id} "), *stylesheet.error());
line_offset,
&format!("{id} ", id = fmt_with_hyperlink(id.id, id.url, stylesheet)),
*stylesheet.error(),
);
} else { } else {
buffer.append( buffer.append(line_offset, &format!("{annotation_type}[{id}]"), *color);
line_offset,
&format!(
"{annotation_type}[{id}]",
id = fmt_with_hyperlink(id.id, id.url, stylesheet)
),
*color,
);
} }
} else { } else {
buffer.append(line_offset, annotation_type, *color); buffer.append(line_offset, annotation_type, *color);
@ -721,7 +707,7 @@ impl DisplaySet<'_> {
let style = let style =
get_annotation_style(&annotation.annotation_type, stylesheet); get_annotation_style(&annotation.annotation_type, stylesheet);
let mut formatted_len = if let Some(id) = &annotation.annotation.id { let mut formatted_len = if let Some(id) = &annotation.annotation.id {
2 + id.id.len() 2 + id.len()
+ annotation_type_len(&annotation.annotation.annotation_type) + annotation_type_len(&annotation.annotation.annotation_type)
} else { } else {
annotation_type_len(&annotation.annotation.annotation_type) annotation_type_len(&annotation.annotation.annotation_type)
@ -738,10 +724,7 @@ impl DisplaySet<'_> {
} else if formatted_len != 0 { } else if formatted_len != 0 {
formatted_len += 2; formatted_len += 2;
let id = match &annotation.annotation.id { let id = match &annotation.annotation.id {
Some(id) => format!( Some(id) => format!("[{id}]"),
"[{id}]",
id = fmt_with_hyperlink(&id.id, id.url, stylesheet)
),
None => String::new(), None => String::new(),
}; };
buffer.puts( buffer.puts(
@ -844,7 +827,7 @@ impl DisplaySet<'_> {
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
pub(crate) struct Annotation<'a> { pub(crate) struct Annotation<'a> {
pub(crate) annotation_type: DisplayAnnotationType, pub(crate) annotation_type: DisplayAnnotationType,
pub(crate) id: Option<Id<'a>>, pub(crate) id: Option<&'a str>,
pub(crate) label: Vec<DisplayTextFragment<'a>>, pub(crate) label: Vec<DisplayTextFragment<'a>>,
pub(crate) is_fixable: bool, pub(crate) is_fixable: bool,
} }
@ -1157,7 +1140,7 @@ fn format_message<'m>(
fn format_title<'a>( fn format_title<'a>(
level: crate::Level, level: crate::Level,
id: Option<Id<'a>>, id: Option<&'a str>,
label: &'a str, label: &'a str,
is_fixable: bool, is_fixable: bool,
) -> DisplayLine<'a> { ) -> DisplayLine<'a> {
@ -1175,7 +1158,7 @@ fn format_title<'a>(
fn format_footer<'a>( fn format_footer<'a>(
level: crate::Level, level: crate::Level,
id: Option<Id<'a>>, id: Option<&'a str>,
label: &'a str, label: &'a str,
) -> Vec<DisplayLine<'a>> { ) -> Vec<DisplayLine<'a>> {
let mut result = vec![]; let mut result = vec![];
@ -1723,7 +1706,6 @@ fn format_body<'m>(
annotation: Annotation { annotation: Annotation {
annotation_type, annotation_type,
id: None, id: None,
label: format_label(annotation.label, None), label: format_label(annotation.label, None),
is_fixable: false, is_fixable: false,
}, },
@ -1905,40 +1887,3 @@ fn char_width(c: char) -> Option<usize> {
unicode_width::UnicodeWidthChar::width(c) unicode_width::UnicodeWidthChar::width(c)
} }
} }
pub(super) fn fmt_with_hyperlink<'a, T>(
content: T,
url: Option<&'a str>,
stylesheet: &Stylesheet,
) -> impl std::fmt::Display + 'a
where
T: std::fmt::Display + 'a,
{
struct FmtHyperlink<'a, T> {
content: T,
url: Option<&'a str>,
}
impl<T> std::fmt::Display for FmtHyperlink<'_, T>
where
T: std::fmt::Display,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(url) = self.url {
write!(f, "\x1B]8;;{url}\x1B\\")?;
}
self.content.fmt(f)?;
if self.url.is_some() {
f.write_str("\x1B]8;;\x1B\\")?;
}
Ok(())
}
}
let url = if stylesheet.hyperlink { url } else { None };
FmtHyperlink { content, url }
}

View File

@ -76,7 +76,6 @@ impl Renderer {
} }
.effects(Effects::BOLD), .effects(Effects::BOLD),
none: Style::new(), none: Style::new(),
hyperlink: true,
}, },
..Self::plain() ..Self::plain()
} }
@ -155,11 +154,6 @@ impl Renderer {
self self
} }
pub const fn hyperlink(mut self, hyperlink: bool) -> Self {
self.stylesheet.hyperlink = hyperlink;
self
}
/// Set the string used for when a long line is cut. /// Set the string used for when a long line is cut.
/// ///
/// The default is `...` (three `U+002E` characters). /// The default is `...` (three `U+002E` characters).

View File

@ -10,7 +10,6 @@ pub(crate) struct Stylesheet {
pub(crate) line_no: Style, pub(crate) line_no: Style,
pub(crate) emphasis: Style, pub(crate) emphasis: Style,
pub(crate) none: Style, pub(crate) none: Style,
pub(crate) hyperlink: bool,
} }
impl Default for Stylesheet { impl Default for Stylesheet {
@ -30,7 +29,6 @@ impl Stylesheet {
line_no: Style::new(), line_no: Style::new(),
emphasis: Style::new(), emphasis: Style::new(),
none: Style::new(), none: Style::new(),
hyperlink: false,
} }
} }
} }

View File

@ -12,19 +12,13 @@
use std::ops::Range; use std::ops::Range;
#[derive(Copy, Clone, Debug, Default, PartialEq)]
pub(crate) struct Id<'a> {
pub(crate) id: &'a str,
pub(crate) url: Option<&'a str>,
}
/// Primary structure provided for formatting /// Primary structure provided for formatting
/// ///
/// See [`Level::title`] to create a [`Message`] /// See [`Level::title`] to create a [`Message`]
#[derive(Debug)] #[derive(Debug)]
pub struct Message<'a> { pub struct Message<'a> {
pub(crate) level: Level, pub(crate) level: Level,
pub(crate) id: Option<Id<'a>>, pub(crate) id: Option<&'a str>,
pub(crate) title: &'a str, pub(crate) title: &'a str,
pub(crate) snippets: Vec<Snippet<'a>>, pub(crate) snippets: Vec<Snippet<'a>>,
pub(crate) footer: Vec<Message<'a>>, pub(crate) footer: Vec<Message<'a>>,
@ -34,12 +28,7 @@ pub struct Message<'a> {
impl<'a> Message<'a> { impl<'a> Message<'a> {
pub fn id(mut self, id: &'a str) -> Self { pub fn id(mut self, id: &'a str) -> Self {
self.id = Some(Id { id, url: None }); self.id = Some(id);
self
}
pub fn id_with_url(mut self, id: &'a str, url: Option<&'a str>) -> Self {
self.id = Some(Id { id, url });
self self
} }

View File

@ -59,6 +59,8 @@ divan = { workspace = true, optional = true }
anyhow = { workspace = true } anyhow = { workspace = true }
codspeed-criterion-compat = { workspace = true, default-features = false, optional = true } codspeed-criterion-compat = { workspace = true, default-features = false, optional = true }
criterion = { workspace = true, default-features = false, optional = true } criterion = { workspace = true, default-features = false, optional = true }
rayon = { workspace = true }
rustc-hash = { workspace = true }
serde = { workspace = true } serde = { workspace = true }
serde_json = { workspace = true } serde_json = { workspace = true }
tracing = { workspace = true } tracing = { workspace = true }
@ -86,7 +88,3 @@ mimalloc = { workspace = true }
[target.'cfg(all(not(target_os = "windows"), not(target_os = "openbsd"), any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64", target_arch = "riscv64")))'.dev-dependencies] [target.'cfg(all(not(target_os = "windows"), not(target_os = "openbsd"), any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64", target_arch = "riscv64")))'.dev-dependencies]
tikv-jemallocator = { workspace = true } tikv-jemallocator = { workspace = true }
[dev-dependencies]
rustc-hash = { workspace = true }
rayon = { workspace = true }

View File

@ -6,8 +6,7 @@ use criterion::{
use ruff_benchmark::{ use ruff_benchmark::{
LARGE_DATASET, NUMPY_CTYPESLIB, NUMPY_GLOBALS, PYDANTIC_TYPES, TestCase, UNICODE_PYPINYIN, LARGE_DATASET, NUMPY_CTYPESLIB, NUMPY_GLOBALS, PYDANTIC_TYPES, TestCase, UNICODE_PYPINYIN,
}; };
use ruff_python_ast::token::TokenKind; use ruff_python_parser::{Mode, TokenKind, lexer};
use ruff_python_parser::{Mode, lexer};
#[cfg(target_os = "windows")] #[cfg(target_os = "windows")]
#[global_allocator] #[global_allocator]

View File

@ -667,7 +667,7 @@ fn attrs(criterion: &mut Criterion) {
max_dep_date: "2025-06-17", max_dep_date: "2025-06-17",
python_version: PythonVersion::PY313, python_version: PythonVersion::PY313,
}, },
120, 110,
); );
bench_project(&benchmark, criterion); bench_project(&benchmark, criterion);

View File

@ -71,13 +71,16 @@ impl Display for Benchmark<'_> {
} }
} }
fn check_project(db: &ProjectDatabase, project_name: &str, max_diagnostics: usize) { fn check_project(db: &ProjectDatabase, max_diagnostics: usize) {
let result = db.check(); let result = db.check();
let diagnostics = result.len(); let diagnostics = result.len();
assert!( assert!(
diagnostics > 1 && diagnostics <= max_diagnostics, diagnostics > 1 && diagnostics <= max_diagnostics,
"Expected between 1 and {max_diagnostics} diagnostics on project '{project_name}' but got {diagnostics}", "Expected between {} and {} diagnostics but got {}",
1,
max_diagnostics,
diagnostics
); );
} }
@ -120,7 +123,7 @@ static COLOUR_SCIENCE: Benchmark = Benchmark::new(
max_dep_date: "2025-06-17", max_dep_date: "2025-06-17",
python_version: PythonVersion::PY310, python_version: PythonVersion::PY310,
}, },
1070, 600,
); );
static FREQTRADE: Benchmark = Benchmark::new( static FREQTRADE: Benchmark = Benchmark::new(
@ -143,7 +146,7 @@ static FREQTRADE: Benchmark = Benchmark::new(
max_dep_date: "2025-06-17", max_dep_date: "2025-06-17",
python_version: PythonVersion::PY312, python_version: PythonVersion::PY312,
}, },
600, 400,
); );
static PANDAS: Benchmark = Benchmark::new( static PANDAS: Benchmark = Benchmark::new(
@ -163,7 +166,7 @@ static PANDAS: Benchmark = Benchmark::new(
max_dep_date: "2025-06-17", max_dep_date: "2025-06-17",
python_version: PythonVersion::PY312, python_version: PythonVersion::PY312,
}, },
4000, 3000,
); );
static PYDANTIC: Benchmark = Benchmark::new( static PYDANTIC: Benchmark = Benchmark::new(
@ -181,7 +184,7 @@ static PYDANTIC: Benchmark = Benchmark::new(
max_dep_date: "2025-06-17", max_dep_date: "2025-06-17",
python_version: PythonVersion::PY39, python_version: PythonVersion::PY39,
}, },
7000, 1000,
); );
static SYMPY: Benchmark = Benchmark::new( static SYMPY: Benchmark = Benchmark::new(
@ -194,7 +197,7 @@ static SYMPY: Benchmark = Benchmark::new(
max_dep_date: "2025-06-17", max_dep_date: "2025-06-17",
python_version: PythonVersion::PY312, python_version: PythonVersion::PY312,
}, },
13100, 13000,
); );
static TANJUN: Benchmark = Benchmark::new( static TANJUN: Benchmark = Benchmark::new(
@ -223,7 +226,7 @@ static STATIC_FRAME: Benchmark = Benchmark::new(
max_dep_date: "2025-08-09", max_dep_date: "2025-08-09",
python_version: PythonVersion::PY311, python_version: PythonVersion::PY311,
}, },
1100, 750,
); );
#[track_caller] #[track_caller]
@ -231,11 +234,11 @@ fn run_single_threaded(bencher: Bencher, benchmark: &Benchmark) {
bencher bencher
.with_inputs(|| benchmark.setup_iteration()) .with_inputs(|| benchmark.setup_iteration())
.bench_local_refs(|db| { .bench_local_refs(|db| {
check_project(db, benchmark.project.name, benchmark.max_diagnostics); check_project(db, benchmark.max_diagnostics);
}); });
} }
#[bench(args=[&ALTAIR, &FREQTRADE, &TANJUN], sample_size=2, sample_count=3)] #[bench(args=[&ALTAIR, &FREQTRADE, &PYDANTIC, &TANJUN], sample_size=2, sample_count=3)]
fn small(bencher: Bencher, benchmark: &Benchmark) { fn small(bencher: Bencher, benchmark: &Benchmark) {
run_single_threaded(bencher, benchmark); run_single_threaded(bencher, benchmark);
} }
@ -245,12 +248,12 @@ fn medium(bencher: Bencher, benchmark: &Benchmark) {
run_single_threaded(bencher, benchmark); run_single_threaded(bencher, benchmark);
} }
#[bench(args=[&SYMPY, &PYDANTIC], sample_size=1, sample_count=2)] #[bench(args=[&SYMPY], sample_size=1, sample_count=2)]
fn large(bencher: Bencher, benchmark: &Benchmark) { fn large(bencher: Bencher, benchmark: &Benchmark) {
run_single_threaded(bencher, benchmark); run_single_threaded(bencher, benchmark);
} }
#[bench(args=[&ALTAIR], sample_size=3, sample_count=8)] #[bench(args=[&PYDANTIC], sample_size=3, sample_count=8)]
fn multithreaded(bencher: Bencher, benchmark: &Benchmark) { fn multithreaded(bencher: Bencher, benchmark: &Benchmark) {
let thread_pool = ThreadPoolBuilder::new().build().unwrap(); let thread_pool = ThreadPoolBuilder::new().build().unwrap();
@ -258,7 +261,7 @@ fn multithreaded(bencher: Bencher, benchmark: &Benchmark) {
.with_inputs(|| benchmark.setup_iteration()) .with_inputs(|| benchmark.setup_iteration())
.bench_local_values(|db| { .bench_local_values(|db| {
thread_pool.install(|| { thread_pool.install(|| {
check_project(&db, benchmark.project.name, benchmark.max_diagnostics); check_project(&db, benchmark.max_diagnostics);
db db
}) })
}); });
@ -282,7 +285,7 @@ fn main() {
// branch when looking up the ingredient index. // branch when looking up the ingredient index.
{ {
let db = TANJUN.setup_iteration(); let db = TANJUN.setup_iteration();
check_project(&db, TANJUN.project.name, TANJUN.max_diagnostics); check_project(&db, TANJUN.max_diagnostics);
} }
divan::main(); divan::main();

View File

@ -42,7 +42,6 @@ schemars = { workspace = true, optional = true }
serde = { workspace = true, optional = true } serde = { workspace = true, optional = true }
serde_json = { workspace = true, optional = true } serde_json = { workspace = true, optional = true }
similar = { workspace = true } similar = { workspace = true }
supports-hyperlinks = { workspace = true }
thiserror = { workspace = true } thiserror = { workspace = true }
tracing = { workspace = true } tracing = { workspace = true }
tracing-subscriber = { workspace = true, optional = true } tracing-subscriber = { workspace = true, optional = true }

View File

@ -64,8 +64,6 @@ impl Diagnostic {
id, id,
severity, severity,
message: message.into_diagnostic_message(), message: message.into_diagnostic_message(),
custom_concise_message: None,
documentation_url: None,
annotations: vec![], annotations: vec![],
subs: vec![], subs: vec![],
fix: None, fix: None,
@ -166,8 +164,28 @@ impl Diagnostic {
/// Returns the primary message for this diagnostic. /// Returns the primary message for this diagnostic.
/// ///
/// A diagnostic always has a message, but it may be empty. /// A diagnostic always has a message, but it may be empty.
///
/// NOTE: At present, this routine will return the first primary
/// annotation's message as the primary message when the main diagnostic
/// message is empty. This is meant to facilitate an incremental migration
/// in ty over to the new diagnostic data model. (The old data model
/// didn't distinguish between messages on the entire diagnostic and
/// messages attached to a particular span.)
pub fn primary_message(&self) -> &str { pub fn primary_message(&self) -> &str {
self.inner.message.as_str() if !self.inner.message.as_str().is_empty() {
return self.inner.message.as_str();
}
// FIXME: As a special case, while we're migrating ty
// to the new diagnostic data model, we'll look for a primary
// message from the primary annotation. This is because most
// ty diagnostics are created with an empty diagnostic
// message and instead attach the message to the annotation.
// Fixing this will require touching basically every diagnostic
// in ty, so we do it this way for now to match the old
// semantics. ---AG
self.primary_annotation()
.and_then(|ann| ann.get_message())
.unwrap_or_default()
} }
/// Introspects this diagnostic and returns what kind of "primary" message /// Introspects this diagnostic and returns what kind of "primary" message
@ -179,35 +197,35 @@ impl Diagnostic {
/// contains *essential* information or context for understanding the /// contains *essential* information or context for understanding the
/// diagnostic. /// diagnostic.
/// ///
/// The reason why we don't just always return both the main diagnostic
/// message and the primary annotation message is because this was written
/// in the midst of an incremental migration of ty over to the new
/// diagnostic data model. At time of writing, diagnostics were still
/// constructed in the old model where the main diagnostic message and the
/// primary annotation message were not distinguished from each other. So
/// for now, we carefully return what kind of messages this diagnostic
/// contains. In effect, if this diagnostic has a non-empty main message
/// *and* a non-empty primary annotation message, then the diagnostic is
/// 100% using the new diagnostic data model and we can format things
/// appropriately.
///
/// The type returned implements the `std::fmt::Display` trait. In most /// The type returned implements the `std::fmt::Display` trait. In most
/// cases, just converting it to a string (or printing it) will do what /// cases, just converting it to a string (or printing it) will do what
/// you want. /// you want.
pub fn concise_message(&self) -> ConciseMessage<'_> { pub fn concise_message(&self) -> ConciseMessage<'_> {
if let Some(custom_message) = &self.inner.custom_concise_message {
return ConciseMessage::Custom(custom_message.as_str());
}
let main = self.inner.message.as_str(); let main = self.inner.message.as_str();
let annotation = self let annotation = self
.primary_annotation() .primary_annotation()
.and_then(|ann| ann.get_message()) .and_then(|ann| ann.get_message())
.unwrap_or_default(); .unwrap_or_default();
if annotation.is_empty() { match (main.is_empty(), annotation.is_empty()) {
ConciseMessage::MainDiagnostic(main) (false, true) => ConciseMessage::MainDiagnostic(main),
} else { (true, false) => ConciseMessage::PrimaryAnnotation(annotation),
ConciseMessage::Both { main, annotation } (false, false) => ConciseMessage::Both { main, annotation },
(true, true) => ConciseMessage::Empty,
} }
} }
/// Set a custom message for the concise formatting of this diagnostic.
///
/// This overrides the default behavior of generating a concise message
/// from the main diagnostic message and the primary annotation.
pub fn set_concise_message(&mut self, message: impl IntoDiagnosticMessage) {
Arc::make_mut(&mut self.inner).custom_concise_message =
Some(message.into_diagnostic_message());
}
/// Returns the severity of this diagnostic. /// Returns the severity of this diagnostic.
/// ///
/// Note that this may be different than the severity of sub-diagnostics. /// Note that this may be different than the severity of sub-diagnostics.
@ -321,13 +339,6 @@ impl Diagnostic {
Arc::make_mut(&mut self.inner).fix = Some(fix); Arc::make_mut(&mut self.inner).fix = Some(fix);
} }
/// If `fix` is `Some`, set the fix for this diagnostic.
pub fn set_optional_fix(&mut self, fix: Option<Fix>) {
if let Some(fix) = fix {
self.set_fix(fix);
}
}
/// Remove the fix for this diagnostic. /// Remove the fix for this diagnostic.
pub fn remove_fix(&mut self) { pub fn remove_fix(&mut self) {
Arc::make_mut(&mut self.inner).fix = None; Arc::make_mut(&mut self.inner).fix = None;
@ -345,14 +356,6 @@ impl Diagnostic {
.is_some_and(|fix| fix.applies(config.fix_applicability)) .is_some_and(|fix| fix.applies(config.fix_applicability))
} }
pub fn documentation_url(&self) -> Option<&str> {
self.inner.documentation_url.as_deref()
}
pub fn set_documentation_url(&mut self, url: Option<String>) {
Arc::make_mut(&mut self.inner).documentation_url = url;
}
/// Returns the offset of the parent statement for this diagnostic if it exists. /// Returns the offset of the parent statement for this diagnostic if it exists.
/// ///
/// This is primarily used for checking noqa/secondary code suppressions. /// This is primarily used for checking noqa/secondary code suppressions.
@ -426,6 +429,28 @@ impl Diagnostic {
.map(|sub| sub.inner.message.as_str()) .map(|sub| sub.inner.message.as_str())
} }
/// Returns the URL for the rule documentation, if it exists.
pub fn to_ruff_url(&self) -> Option<String> {
match self.id() {
DiagnosticId::Panic
| DiagnosticId::Io
| DiagnosticId::InvalidSyntax
| DiagnosticId::RevealedType
| DiagnosticId::UnknownRule
| DiagnosticId::InvalidGlob
| DiagnosticId::EmptyInclude
| DiagnosticId::UnnecessaryOverridesSection
| DiagnosticId::UselessOverridesSection
| DiagnosticId::DeprecatedSetting
| DiagnosticId::Unformatted
| DiagnosticId::InvalidCliOption
| DiagnosticId::InternalError => None,
DiagnosticId::Lint(lint_name) => {
Some(format!("{}/rules/{lint_name}", env!("CARGO_PKG_HOMEPAGE")))
}
}
}
/// Returns the filename for the message. /// Returns the filename for the message.
/// ///
/// Panics if the diagnostic has no primary span, or if its file is not a `SourceFile`. /// Panics if the diagnostic has no primary span, or if its file is not a `SourceFile`.
@ -505,10 +530,8 @@ impl Diagnostic {
#[derive(Debug, Clone, Eq, PartialEq, Hash, get_size2::GetSize)] #[derive(Debug, Clone, Eq, PartialEq, Hash, get_size2::GetSize)]
struct DiagnosticInner { struct DiagnosticInner {
id: DiagnosticId, id: DiagnosticId,
documentation_url: Option<String>,
severity: Severity, severity: Severity,
message: DiagnosticMessage, message: DiagnosticMessage,
custom_concise_message: Option<DiagnosticMessage>,
annotations: Vec<Annotation>, annotations: Vec<Annotation>,
subs: Vec<SubDiagnostic>, subs: Vec<SubDiagnostic>,
fix: Option<Fix>, fix: Option<Fix>,
@ -660,6 +683,18 @@ impl SubDiagnostic {
/// contains *essential* information or context for understanding the /// contains *essential* information or context for understanding the
/// diagnostic. /// diagnostic.
/// ///
/// The reason why we don't just always return both the main diagnostic
/// message and the primary annotation message is because this was written
/// in the midst of an incremental migration of ty over to the new
/// diagnostic data model. At time of writing, diagnostics were still
/// constructed in the old model where the main diagnostic message and the
/// primary annotation message were not distinguished from each other. So
/// for now, we carefully return what kind of messages this diagnostic
/// contains. In effect, if this diagnostic has a non-empty main message
/// *and* a non-empty primary annotation message, then the diagnostic is
/// 100% using the new diagnostic data model and we can format things
/// appropriately.
///
/// The type returned implements the `std::fmt::Display` trait. In most /// The type returned implements the `std::fmt::Display` trait. In most
/// cases, just converting it to a string (or printing it) will do what /// cases, just converting it to a string (or printing it) will do what
/// you want. /// you want.
@ -669,10 +704,11 @@ impl SubDiagnostic {
.primary_annotation() .primary_annotation()
.and_then(|ann| ann.get_message()) .and_then(|ann| ann.get_message())
.unwrap_or_default(); .unwrap_or_default();
if annotation.is_empty() { match (main.is_empty(), annotation.is_empty()) {
ConciseMessage::MainDiagnostic(main) (false, true) => ConciseMessage::MainDiagnostic(main),
} else { (true, false) => ConciseMessage::PrimaryAnnotation(annotation),
ConciseMessage::Both { main, annotation } (false, false) => ConciseMessage::Both { main, annotation },
(true, true) => ConciseMessage::Empty,
} }
} }
} }
@ -842,10 +878,6 @@ impl Annotation {
pub fn hide_snippet(&mut self, yes: bool) { pub fn hide_snippet(&mut self, yes: bool) {
self.hide_snippet = yes; self.hide_snippet = yes;
} }
pub fn is_primary(&self) -> bool {
self.is_primary
}
} }
/// Tags that can be associated with an annotation. /// Tags that can be associated with an annotation.
@ -1466,12 +1498,28 @@ pub enum DiagnosticFormat {
pub enum ConciseMessage<'a> { pub enum ConciseMessage<'a> {
/// A diagnostic contains a non-empty main message and an empty /// A diagnostic contains a non-empty main message and an empty
/// primary annotation message. /// primary annotation message.
///
/// This strongly suggests that the diagnostic is using the
/// "new" data model.
MainDiagnostic(&'a str), MainDiagnostic(&'a str),
/// A diagnostic contains an empty main message and a non-empty
/// primary annotation message.
///
/// This strongly suggests that the diagnostic is using the
/// "old" data model.
PrimaryAnnotation(&'a str),
/// A diagnostic contains a non-empty main message and a non-empty /// A diagnostic contains a non-empty main message and a non-empty
/// primary annotation message. /// primary annotation message.
///
/// This strongly suggests that the diagnostic is using the
/// "new" data model.
Both { main: &'a str, annotation: &'a str }, Both { main: &'a str, annotation: &'a str },
/// A custom concise message has been provided. /// A diagnostic contains an empty main message and an empty
Custom(&'a str), /// primary annotation message.
///
/// This indicates that the diagnostic is probably using the old
/// model.
Empty,
} }
impl std::fmt::Display for ConciseMessage<'_> { impl std::fmt::Display for ConciseMessage<'_> {
@ -1480,12 +1528,13 @@ impl std::fmt::Display for ConciseMessage<'_> {
ConciseMessage::MainDiagnostic(main) => { ConciseMessage::MainDiagnostic(main) => {
write!(f, "{main}") write!(f, "{main}")
} }
ConciseMessage::PrimaryAnnotation(annotation) => {
write!(f, "{annotation}")
}
ConciseMessage::Both { main, annotation } => { ConciseMessage::Both { main, annotation } => {
write!(f, "{main}: {annotation}") write!(f, "{main}: {annotation}")
} }
ConciseMessage::Custom(message) => { ConciseMessage::Empty => Ok(()),
write!(f, "{message}")
}
} }
} }
} }

View File

@ -205,7 +205,6 @@ impl<'a> Resolved<'a> {
struct ResolvedDiagnostic<'a> { struct ResolvedDiagnostic<'a> {
level: AnnotateLevel, level: AnnotateLevel,
id: Option<String>, id: Option<String>,
documentation_url: Option<String>,
message: String, message: String,
annotations: Vec<ResolvedAnnotation<'a>>, annotations: Vec<ResolvedAnnotation<'a>>,
is_fixable: bool, is_fixable: bool,
@ -241,12 +240,12 @@ impl<'a> ResolvedDiagnostic<'a> {
// `DisplaySet::format_annotation` for both cases, but this is a small hack to improve // `DisplaySet::format_annotation` for both cases, but this is a small hack to improve
// the formatting of syntax errors for now. This should also be kept consistent with the // the formatting of syntax errors for now. This should also be kept consistent with the
// concise formatting. // concise formatting.
diag.secondary_code().map_or_else( Some(diag.secondary_code().map_or_else(
|| format!("{id}:", id = diag.inner.id), || format!("{id}:", id = diag.inner.id),
|code| code.to_string(), |code| code.to_string(),
) ))
} else { } else {
diag.inner.id.to_string() Some(diag.inner.id.to_string())
}; };
let level = if config.hide_severity { let level = if config.hide_severity {
@ -257,8 +256,7 @@ impl<'a> ResolvedDiagnostic<'a> {
ResolvedDiagnostic { ResolvedDiagnostic {
level, level,
id: Some(id), id,
documentation_url: diag.documentation_url().map(ToString::to_string),
message: diag.inner.message.as_str().to_string(), message: diag.inner.message.as_str().to_string(),
annotations, annotations,
is_fixable: config.show_fix_status && diag.has_applicable_fix(config), is_fixable: config.show_fix_status && diag.has_applicable_fix(config),
@ -289,7 +287,6 @@ impl<'a> ResolvedDiagnostic<'a> {
ResolvedDiagnostic { ResolvedDiagnostic {
level: diag.inner.severity.to_annotate(), level: diag.inner.severity.to_annotate(),
id: None, id: None,
documentation_url: None,
message: diag.inner.message.as_str().to_string(), message: diag.inner.message.as_str().to_string(),
annotations, annotations,
is_fixable: false, is_fixable: false,
@ -388,7 +385,6 @@ impl<'a> ResolvedDiagnostic<'a> {
RenderableDiagnostic { RenderableDiagnostic {
level: self.level, level: self.level,
id: self.id.as_deref(), id: self.id.as_deref(),
documentation_url: self.documentation_url.as_deref(),
message: &self.message, message: &self.message,
snippets_by_input, snippets_by_input,
is_fixable: self.is_fixable, is_fixable: self.is_fixable,
@ -489,7 +485,6 @@ struct RenderableDiagnostic<'r> {
/// An ID is always present for top-level diagnostics and always absent for /// An ID is always present for top-level diagnostics and always absent for
/// sub-diagnostics. /// sub-diagnostics.
id: Option<&'r str>, id: Option<&'r str>,
documentation_url: Option<&'r str>,
/// The message emitted with the diagnostic, before any snippets are /// The message emitted with the diagnostic, before any snippets are
/// rendered. /// rendered.
message: &'r str, message: &'r str,
@ -524,7 +519,7 @@ impl RenderableDiagnostic<'_> {
.is_fixable(self.is_fixable) .is_fixable(self.is_fixable)
.lineno_offset(self.header_offset); .lineno_offset(self.header_offset);
if let Some(id) = self.id { if let Some(id) = self.id {
message = message.id_with_url(id, self.documentation_url); message = message.id(id);
} }
message.snippets(snippets) message.snippets(snippets)
} }
@ -2881,12 +2876,6 @@ watermelon
self.diag.help(message); self.diag.help(message);
self self
} }
/// Set the documentation URL for the diagnostic.
pub(super) fn documentation_url(mut self, url: impl Into<String>) -> DiagnosticBuilder<'e> {
self.diag.set_documentation_url(Some(url.into()));
self
}
} }
/// A helper builder for tersely populating a `SubDiagnostic`. /// A helper builder for tersely populating a `SubDiagnostic`.
@ -3001,7 +2990,6 @@ def fibonacci(n):
TextSize::from(10), TextSize::from(10),
)))) ))))
.noqa_offset(TextSize::from(7)) .noqa_offset(TextSize::from(7))
.documentation_url("https://docs.astral.sh/ruff/rules/unused-import")
.build(), .build(),
env.builder( env.builder(
"unused-variable", "unused-variable",
@ -3016,13 +3004,11 @@ def fibonacci(n):
TextSize::from(99), TextSize::from(99),
))) )))
.noqa_offset(TextSize::from(94)) .noqa_offset(TextSize::from(94))
.documentation_url("https://docs.astral.sh/ruff/rules/unused-variable")
.build(), .build(),
env.builder("undefined-name", Severity::Error, "Undefined name `a`") env.builder("undefined-name", Severity::Error, "Undefined name `a`")
.primary("undef.py", "1:3", "1:4", "") .primary("undef.py", "1:3", "1:4", "")
.secondary_code("F821") .secondary_code("F821")
.noqa_offset(TextSize::from(3)) .noqa_offset(TextSize::from(3))
.documentation_url("https://docs.astral.sh/ruff/rules/undefined-name")
.build(), .build(),
]; ];
@ -3137,7 +3123,6 @@ if call(foo
TextSize::from(19), TextSize::from(19),
)))) ))))
.noqa_offset(TextSize::from(16)) .noqa_offset(TextSize::from(16))
.documentation_url("https://docs.astral.sh/ruff/rules/unused-import")
.build(), .build(),
env.builder( env.builder(
"unused-import", "unused-import",
@ -3152,7 +3137,6 @@ if call(foo
TextSize::from(40), TextSize::from(40),
)))) ))))
.noqa_offset(TextSize::from(35)) .noqa_offset(TextSize::from(35))
.documentation_url("https://docs.astral.sh/ruff/rules/unused-import")
.build(), .build(),
env.builder( env.builder(
"unused-variable", "unused-variable",
@ -3167,7 +3151,6 @@ if call(foo
TextSize::from(104), TextSize::from(104),
)))) ))))
.noqa_offset(TextSize::from(98)) .noqa_offset(TextSize::from(98))
.documentation_url("https://docs.astral.sh/ruff/rules/unused-variable")
.build(), .build(),
]; ];

View File

@ -1,6 +1,6 @@
use crate::diagnostic::{ use crate::diagnostic::{
Diagnostic, DisplayDiagnosticConfig, Severity, Diagnostic, DisplayDiagnosticConfig, Severity,
stylesheet::{DiagnosticStylesheet, fmt_styled, fmt_with_hyperlink}, stylesheet::{DiagnosticStylesheet, fmt_styled},
}; };
use super::FileResolver; use super::FileResolver;
@ -62,29 +62,18 @@ impl<'a> ConciseRenderer<'a> {
} }
write!(f, "{sep} ")?; write!(f, "{sep} ")?;
} }
if self.config.hide_severity { if self.config.hide_severity {
if let Some(code) = diag.secondary_code() { if let Some(code) = diag.secondary_code() {
write!( write!(
f, f,
"{code} ", "{code} ",
code = fmt_styled( code = fmt_styled(code, stylesheet.secondary_code)
fmt_with_hyperlink(&code, diag.documentation_url(), &stylesheet),
stylesheet.secondary_code
)
)?; )?;
} else { } else {
write!( write!(
f, f,
"{id}: ", "{id}: ",
id = fmt_styled( id = fmt_styled(diag.inner.id.as_str(), stylesheet.secondary_code)
fmt_with_hyperlink(
&diag.inner.id,
diag.documentation_url(),
&stylesheet
),
stylesheet.secondary_code
)
)?; )?;
} }
if self.config.show_fix_status { if self.config.show_fix_status {
@ -104,10 +93,7 @@ impl<'a> ConciseRenderer<'a> {
f, f,
"{severity}[{id}] ", "{severity}[{id}] ",
severity = fmt_styled(severity, severity_style), severity = fmt_styled(severity, severity_style),
id = fmt_styled( id = fmt_styled(diag.id(), stylesheet.emphasis)
fmt_with_hyperlink(&diag.id(), diag.documentation_url(), &stylesheet),
stylesheet.emphasis
)
)?; )?;
} }

View File

@ -49,8 +49,7 @@ impl<'a> FullRenderer<'a> {
.help(stylesheet.help) .help(stylesheet.help)
.line_no(stylesheet.line_no) .line_no(stylesheet.line_no)
.emphasis(stylesheet.emphasis) .emphasis(stylesheet.emphasis)
.none(stylesheet.none) .none(stylesheet.none);
.hyperlink(stylesheet.hyperlink);
for diag in diagnostics { for diag in diagnostics {
let resolved = Resolved::new(self.resolver, diag, self.config); let resolved = Resolved::new(self.resolver, diag, self.config);
@ -113,16 +112,16 @@ impl std::fmt::Display for Diff<'_> {
// `None`, indicating a regular script file, all the lines will be in one "cell" under the // `None`, indicating a regular script file, all the lines will be in one "cell" under the
// `None` key. // `None` key.
let cells = if let Some(notebook_index) = &self.notebook_index { let cells = if let Some(notebook_index) = &self.notebook_index {
let mut last_cell_index = OneIndexed::MIN; let mut last_cell = OneIndexed::MIN;
let mut cells: Vec<(Option<OneIndexed>, TextSize)> = Vec::new(); let mut cells: Vec<(Option<OneIndexed>, TextSize)> = Vec::new();
for cell in notebook_index.iter() { for (row, cell) in notebook_index.iter() {
if cell.cell_index() != last_cell_index { if cell != last_cell {
let offset = source_code.line_start(cell.start_row()); let offset = source_code.line_start(row);
cells.push((Some(last_cell_index), offset)); cells.push((Some(last_cell), offset));
last_cell_index = cell.cell_index(); last_cell = cell;
} }
} }
cells.push((Some(last_cell_index), source_text.text_len())); cells.push((Some(last_cell), source_text.text_len()));
cells cells
} else { } else {
vec![(None, source_text.text_len())] vec![(None, source_text.text_len())]
@ -704,7 +703,52 @@ print()
env.show_fix_status(true); env.show_fix_status(true);
env.fix_applicability(Applicability::DisplayOnly); env.fix_applicability(Applicability::DisplayOnly);
insta::assert_snapshot!(env.render_diagnostics(&diagnostics)); insta::assert_snapshot!(env.render_diagnostics(&diagnostics), @r"
error[unused-import][*]: `os` imported but unused
--> notebook.ipynb:cell 1:2:8
|
1 | # cell 1
2 | import os
| ^^
|
help: Remove unused import: `os`
::: cell 1
1 | # cell 1
- import os
error[unused-import][*]: `math` imported but unused
--> notebook.ipynb:cell 2:2:8
|
1 | # cell 2
2 | import math
| ^^^^
3 |
4 | print('hello world')
|
help: Remove unused import: `math`
::: cell 2
1 | # cell 2
- import math
2 |
3 | print('hello world')
error[unused-variable][*]: Local variable `x` is assigned to but never used
--> notebook.ipynb:cell 3:4:5
|
2 | def foo():
3 | print()
4 | x = 1
| ^
|
help: Remove assignment to unused variable `x`
::: cell 3
1 | # cell 3
2 | def foo():
3 | print()
- x = 1
4 |
note: This is an unsafe fix and may change runtime behavior
");
} }
#[test] #[test]
@ -724,7 +768,31 @@ print()
} }
*fix = Fix::unsafe_edits(edits.remove(0), edits); *fix = Fix::unsafe_edits(edits.remove(0), edits);
insta::assert_snapshot!(env.render(&diagnostic)); insta::assert_snapshot!(env.render(&diagnostic), @r"
error[unused-import][*]: `os` imported but unused
--> notebook.ipynb:cell 1:2:8
|
1 | # cell 1
2 | import os
| ^^
|
help: Remove unused import: `os`
::: cell 1
1 | # cell 1
- import os
::: cell 2
1 | # cell 2
- import math
2 |
3 | print('hello world')
::: cell 3
1 | # cell 3
2 | def foo():
3 | print()
- x = 1
4 |
note: This is an unsafe fix and may change runtime behavior
");
} }
/// Carriage return (`\r`) is a valid line-ending in Python, so we should normalize this to a /// Carriage return (`\r`) is a valid line-ending in Python, so we should normalize this to a

View File

@ -100,7 +100,7 @@ pub(super) fn diagnostic_to_json<'a>(
if config.preview { if config.preview {
JsonDiagnostic { JsonDiagnostic {
code: diagnostic.secondary_code_or_id(), code: diagnostic.secondary_code_or_id(),
url: diagnostic.documentation_url(), url: diagnostic.to_ruff_url(),
message: diagnostic.body(), message: diagnostic.body(),
fix, fix,
cell: notebook_cell_index, cell: notebook_cell_index,
@ -112,7 +112,7 @@ pub(super) fn diagnostic_to_json<'a>(
} else { } else {
JsonDiagnostic { JsonDiagnostic {
code: diagnostic.secondary_code_or_id(), code: diagnostic.secondary_code_or_id(),
url: diagnostic.documentation_url(), url: diagnostic.to_ruff_url(),
message: diagnostic.body(), message: diagnostic.body(),
fix, fix,
cell: notebook_cell_index, cell: notebook_cell_index,
@ -228,7 +228,7 @@ pub(crate) struct JsonDiagnostic<'a> {
location: Option<JsonLocation>, location: Option<JsonLocation>,
message: &'a str, message: &'a str,
noqa_row: Option<OneIndexed>, noqa_row: Option<OneIndexed>,
url: Option<&'a str>, url: Option<String>,
} }
#[derive(Serialize)] #[derive(Serialize)]
@ -294,10 +294,7 @@ mod tests {
env.format(DiagnosticFormat::Json); env.format(DiagnosticFormat::Json);
env.preview(false); env.preview(false);
let diag = env let diag = env.err().build();
.err()
.documentation_url("https://docs.astral.sh/ruff/rules/test-diagnostic")
.build();
insta::assert_snapshot!( insta::assert_snapshot!(
env.render(&diag), env.render(&diag),
@ -331,10 +328,7 @@ mod tests {
env.format(DiagnosticFormat::Json); env.format(DiagnosticFormat::Json);
env.preview(true); env.preview(true);
let diag = env let diag = env.err().build();
.err()
.documentation_url("https://docs.astral.sh/ruff/rules/test-diagnostic")
.build();
insta::assert_snapshot!( insta::assert_snapshot!(
env.render(&diag), env.render(&diag),

View File

@ -82,7 +82,7 @@ fn diagnostic_to_rdjson<'a>(
value: diagnostic value: diagnostic
.secondary_code() .secondary_code()
.map_or_else(|| diagnostic.name(), |code| code.as_str()), .map_or_else(|| diagnostic.name(), |code| code.as_str()),
url: diagnostic.documentation_url(), url: diagnostic.to_ruff_url(),
}, },
suggestions: rdjson_suggestions( suggestions: rdjson_suggestions(
edits, edits,
@ -182,7 +182,7 @@ impl RdjsonRange {
#[derive(Serialize)] #[derive(Serialize)]
struct RdjsonCode<'a> { struct RdjsonCode<'a> {
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
url: Option<&'a str>, url: Option<String>,
value: &'a str, value: &'a str,
} }
@ -217,10 +217,7 @@ mod tests {
env.format(DiagnosticFormat::Rdjson); env.format(DiagnosticFormat::Rdjson);
env.preview(false); env.preview(false);
let diag = env let diag = env.err().build();
.err()
.documentation_url("https://docs.astral.sh/ruff/rules/test-diagnostic")
.build();
insta::assert_snapshot!(env.render(&diag)); insta::assert_snapshot!(env.render(&diag));
} }
@ -231,10 +228,7 @@ mod tests {
env.format(DiagnosticFormat::Rdjson); env.format(DiagnosticFormat::Rdjson);
env.preview(true); env.preview(true);
let diag = env let diag = env.err().build();
.err()
.documentation_url("https://docs.astral.sh/ruff/rules/test-diagnostic")
.build();
insta::assert_snapshot!(env.render(&diag)); insta::assert_snapshot!(env.render(&diag));
} }

View File

@ -1,48 +0,0 @@
---
source: crates/ruff_db/src/diagnostic/render/full.rs
expression: env.render_diagnostics(&diagnostics)
---
error[unused-import][*]: `os` imported but unused
--> notebook.ipynb:cell 1:2:8
|
1 | # cell 1
2 | import os
| ^^
|
help: Remove unused import: `os`
::: cell 1
1 | # cell 1
- import os
error[unused-import][*]: `math` imported but unused
--> notebook.ipynb:cell 2:2:8
|
1 | # cell 2
2 | import math
| ^^^^
3 |
4 | print('hello world')
|
help: Remove unused import: `math`
::: cell 2
1 | # cell 2
- import math
2 |
3 | print('hello world')
error[unused-variable][*]: Local variable `x` is assigned to but never used
--> notebook.ipynb:cell 3:4:5
|
2 | def foo():
3 | print()
4 | x = 1
| ^
|
help: Remove assignment to unused variable `x`
::: cell 3
1 | # cell 3
2 | def foo():
3 | print()
- x = 1
4 |
note: This is an unsafe fix and may change runtime behavior

View File

@ -1,27 +0,0 @@
---
source: crates/ruff_db/src/diagnostic/render/full.rs
expression: env.render(&diagnostic)
---
error[unused-import][*]: `os` imported but unused
--> notebook.ipynb:cell 1:2:8
|
1 | # cell 1
2 | import os
| ^^
|
help: Remove unused import: `os`
::: cell 1
1 | # cell 1
- import os
::: cell 2
1 | # cell 2
- import math
2 |
3 | print('hello world')
::: cell 3
1 | # cell 3
2 | def foo():
3 | print()
- x = 1
4 |
note: This is an unsafe fix and may change runtime behavior

View File

@ -31,43 +31,6 @@ where
FmtStyled { content, style } FmtStyled { content, style }
} }
pub(super) fn fmt_with_hyperlink<'a, T>(
content: T,
url: Option<&'a str>,
stylesheet: &DiagnosticStylesheet,
) -> impl std::fmt::Display + 'a
where
T: std::fmt::Display + 'a,
{
struct FmtHyperlink<'a, T> {
content: T,
url: Option<&'a str>,
}
impl<T> std::fmt::Display for FmtHyperlink<'_, T>
where
T: std::fmt::Display,
{
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
if let Some(url) = self.url {
write!(f, "\x1B]8;;{url}\x1B\\")?;
}
self.content.fmt(f)?;
if self.url.is_some() {
f.write_str("\x1B]8;;\x1B\\")?;
}
Ok(())
}
}
let url = if stylesheet.hyperlink { url } else { None };
FmtHyperlink { content, url }
}
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct DiagnosticStylesheet { pub struct DiagnosticStylesheet {
pub(crate) error: Style, pub(crate) error: Style,
@ -84,7 +47,6 @@ pub struct DiagnosticStylesheet {
pub(crate) deletion: Style, pub(crate) deletion: Style,
pub(crate) insertion_line_no: Style, pub(crate) insertion_line_no: Style,
pub(crate) deletion_line_no: Style, pub(crate) deletion_line_no: Style,
pub(crate) hyperlink: bool,
} }
impl Default for DiagnosticStylesheet { impl Default for DiagnosticStylesheet {
@ -97,8 +59,6 @@ impl DiagnosticStylesheet {
/// Default terminal styling /// Default terminal styling
pub fn styled() -> Self { pub fn styled() -> Self {
let bright_blue = AnsiColor::BrightBlue.on_default(); let bright_blue = AnsiColor::BrightBlue.on_default();
let hyperlink = supports_hyperlinks::supports_hyperlinks();
Self { Self {
error: AnsiColor::BrightRed.on_default().effects(Effects::BOLD), error: AnsiColor::BrightRed.on_default().effects(Effects::BOLD),
warning: AnsiColor::Yellow.on_default().effects(Effects::BOLD), warning: AnsiColor::Yellow.on_default().effects(Effects::BOLD),
@ -114,7 +74,6 @@ impl DiagnosticStylesheet {
deletion: AnsiColor::Red.on_default(), deletion: AnsiColor::Red.on_default(),
insertion_line_no: AnsiColor::Green.on_default().effects(Effects::BOLD), insertion_line_no: AnsiColor::Green.on_default().effects(Effects::BOLD),
deletion_line_no: AnsiColor::Red.on_default().effects(Effects::BOLD), deletion_line_no: AnsiColor::Red.on_default().effects(Effects::BOLD),
hyperlink,
} }
} }
@ -134,7 +93,6 @@ impl DiagnosticStylesheet {
deletion: Style::new(), deletion: Style::new(),
insertion_line_no: Style::new(), insertion_line_no: Style::new(),
deletion_line_no: Style::new(), deletion_line_no: Style::new(),
hyperlink: false,
} }
} }
} }

View File

@ -470,17 +470,6 @@ impl File {
self.source_type(db).is_stub() self.source_type(db).is_stub()
} }
/// Returns `true` if the file is an `__init__.pyi`
pub fn is_package_stub(self, db: &dyn Db) -> bool {
self.path(db).as_str().ends_with("__init__.pyi")
}
/// Returns `true` if the file is an `__init__.pyi`
pub fn is_package(self, db: &dyn Db) -> bool {
let path = self.path(db).as_str();
path.ends_with("__init__.pyi") || path.ends_with("__init__.py")
}
pub fn source_type(self, db: &dyn Db) -> PySourceType { pub fn source_type(self, db: &dyn Db) -> PySourceType {
match self.path(db) { match self.path(db) {
FilePath::System(path) => path FilePath::System(path) => path

View File

@ -21,11 +21,7 @@ use crate::source::source_text;
/// reflected in the changed AST offsets. /// reflected in the changed AST offsets.
/// The other reason is that Ruff's AST doesn't implement `Eq` which Salsa requires /// The other reason is that Ruff's AST doesn't implement `Eq` which Salsa requires
/// for determining if a query result is unchanged. /// for determining if a query result is unchanged.
/// #[salsa::tracked(returns(ref), no_eq, heap_size=ruff_memory_usage::heap_size)]
/// The LRU capacity of 200 was picked without any empirical evidence that it's optimal,
/// instead it's a wild guess that it should be unlikely that incremental changes involve
/// more than 200 modules. Parsed ASTs within the same revision are never evicted by Salsa.
#[salsa::tracked(returns(ref), no_eq, heap_size=ruff_memory_usage::heap_size, lru=200)]
pub fn parsed_module(db: &dyn Db, file: File) -> ParsedModule { pub fn parsed_module(db: &dyn Db, file: File) -> ParsedModule {
let _span = tracing::trace_span!("parsed_module", ?file).entered(); let _span = tracing::trace_span!("parsed_module", ?file).entered();
@ -96,9 +92,14 @@ impl ParsedModule {
self.inner.store(None); self.inner.store(None);
} }
/// Returns the file to which this module belongs. /// Returns the pointer address of this [`ParsedModule`].
pub fn file(&self) -> File { ///
self.file /// The pointer uniquely identifies the module within the current Salsa revision,
/// regardless of whether particular [`ParsedModuleRef`] instances are garbage collected.
pub fn addr(&self) -> usize {
// Note that the outer `Arc` in `inner` is stable across garbage collection, while the inner
// `Arc` within the `ArcSwap` may change.
Arc::as_ptr(&self.inner).addr()
} }
} }

View File

@ -7,7 +7,6 @@ use ruff_source_file::LineIndex;
use crate::Db; use crate::Db;
use crate::files::{File, FilePath}; use crate::files::{File, FilePath};
use crate::system::System;
/// Reads the source text of a python text file (must be valid UTF8) or notebook. /// Reads the source text of a python text file (must be valid UTF8) or notebook.
#[salsa::tracked(heap_size=ruff_memory_usage::heap_size)] #[salsa::tracked(heap_size=ruff_memory_usage::heap_size)]
@ -16,7 +15,7 @@ pub fn source_text(db: &dyn Db, file: File) -> SourceText {
let _span = tracing::trace_span!("source_text", file = %path).entered(); let _span = tracing::trace_span!("source_text", file = %path).entered();
let mut read_error = None; let mut read_error = None;
let kind = if is_notebook(db.system(), path) { let kind = if is_notebook(file.path(db)) {
file.read_to_notebook(db) file.read_to_notebook(db)
.unwrap_or_else(|error| { .unwrap_or_else(|error| {
tracing::debug!("Failed to read notebook '{path}': {error}"); tracing::debug!("Failed to read notebook '{path}': {error}");
@ -41,17 +40,18 @@ pub fn source_text(db: &dyn Db, file: File) -> SourceText {
} }
} }
fn is_notebook(system: &dyn System, path: &FilePath) -> bool { fn is_notebook(path: &FilePath) -> bool {
let source_type = match path { match path {
FilePath::System(path) => system.source_type(path), FilePath::System(system) => system.extension().is_some_and(|extension| {
FilePath::SystemVirtual(system_virtual) => system.virtual_path_source_type(system_virtual), PySourceType::try_from_extension(extension) == Some(PySourceType::Ipynb)
FilePath::Vendored(_) => return false, }),
}; FilePath::SystemVirtual(system_virtual) => {
system_virtual.extension().is_some_and(|extension| {
let with_extension_fallback = PySourceType::try_from_extension(extension) == Some(PySourceType::Ipynb)
source_type.or_else(|| PySourceType::try_from_extension(path.extension()?)); })
}
with_extension_fallback == Some(PySourceType::Ipynb) FilePath::Vendored(_) => false,
}
} }
/// The source text of a file containing python code. /// The source text of a file containing python code.

View File

@ -9,7 +9,6 @@ pub use os::OsSystem;
use filetime::FileTime; use filetime::FileTime;
use ruff_notebook::{Notebook, NotebookError}; use ruff_notebook::{Notebook, NotebookError};
use ruff_python_ast::PySourceType;
use std::error::Error; use std::error::Error;
use std::fmt::{Debug, Formatter}; use std::fmt::{Debug, Formatter};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
@ -17,11 +16,12 @@ use std::{fmt, io};
pub use test::{DbWithTestSystem, DbWithWritableSystem, InMemorySystem, TestSystem}; pub use test::{DbWithTestSystem, DbWithWritableSystem, InMemorySystem, TestSystem};
use walk_directory::WalkDirectoryBuilder; use walk_directory::WalkDirectoryBuilder;
use crate::file_revision::FileRevision;
pub use self::path::{ pub use self::path::{
DeduplicatedNestedPathsIter, SystemPath, SystemPathBuf, SystemVirtualPath, DeduplicatedNestedPathsIter, SystemPath, SystemPathBuf, SystemVirtualPath,
SystemVirtualPathBuf, deduplicate_nested_paths, SystemVirtualPathBuf, deduplicate_nested_paths,
}; };
use crate::file_revision::FileRevision;
mod memory_fs; mod memory_fs;
#[cfg(feature = "os")] #[cfg(feature = "os")]
@ -66,35 +66,6 @@ pub trait System: Debug + Sync + Send {
/// See [dunce::canonicalize] for more information. /// See [dunce::canonicalize] for more information.
fn canonicalize_path(&self, path: &SystemPath) -> Result<SystemPathBuf>; fn canonicalize_path(&self, path: &SystemPath) -> Result<SystemPathBuf>;
/// Returns the source type for `path` if known or `None`.
///
/// The default is to always return `None`, assuming the system
/// has no additional information and that the caller should
/// rely on the file extension instead.
///
/// This is primarily used for the LSP integration to respect
/// the chosen language (or the fact that it is a notebook) in
/// the editor.
fn source_type(&self, path: &SystemPath) -> Option<PySourceType> {
let _ = path;
None
}
/// Returns the source type for `path` if known or `None`.
///
/// The default is to always return `None`, assuming the system
/// has no additional information and that the caller should
/// rely on the file extension instead.
///
/// This is primarily used for the LSP integration to respect
/// the chosen language (or the fact that it is a notebook) in
/// the editor.
fn virtual_path_source_type(&self, path: &SystemVirtualPath) -> Option<PySourceType> {
let _ = path;
None
}
/// Reads the content of the file at `path` into a [`String`]. /// Reads the content of the file at `path` into a [`String`].
fn read_to_string(&self, path: &SystemPath) -> Result<String>; fn read_to_string(&self, path: &SystemPath) -> Result<String>;

View File

@ -200,12 +200,7 @@ impl System for OsSystem {
/// The walker ignores files according to [`ignore::WalkBuilder::standard_filters`] /// The walker ignores files according to [`ignore::WalkBuilder::standard_filters`]
/// when setting [`WalkDirectoryBuilder::standard_filters`] to true. /// when setting [`WalkDirectoryBuilder::standard_filters`] to true.
fn walk_directory(&self, path: &SystemPath) -> WalkDirectoryBuilder { fn walk_directory(&self, path: &SystemPath) -> WalkDirectoryBuilder {
WalkDirectoryBuilder::new( WalkDirectoryBuilder::new(path, OsDirectoryWalker {})
path,
OsDirectoryWalker {
cwd: self.current_directory().to_path_buf(),
},
)
} }
fn glob( fn glob(
@ -459,9 +454,7 @@ struct ListedDirectory {
} }
#[derive(Debug)] #[derive(Debug)]
struct OsDirectoryWalker { struct OsDirectoryWalker;
cwd: SystemPathBuf,
}
impl DirectoryWalker for OsDirectoryWalker { impl DirectoryWalker for OsDirectoryWalker {
fn walk( fn walk(
@ -480,7 +473,6 @@ impl DirectoryWalker for OsDirectoryWalker {
}; };
let mut builder = ignore::WalkBuilder::new(first.as_std_path()); let mut builder = ignore::WalkBuilder::new(first.as_std_path());
builder.current_dir(self.cwd.as_std_path());
builder.standard_filters(standard_filters); builder.standard_filters(standard_filters);
builder.hidden(hidden); builder.hidden(hidden);

View File

@ -667,13 +667,6 @@ impl Deref for SystemPathBuf {
} }
} }
impl AsRef<Path> for SystemPathBuf {
#[inline]
fn as_ref(&self) -> &Path {
self.0.as_std_path()
}
}
impl<P: AsRef<SystemPath>> FromIterator<P> for SystemPathBuf { impl<P: AsRef<SystemPath>> FromIterator<P> for SystemPathBuf {
fn from_iter<I: IntoIterator<Item = P>>(iter: I) -> Self { fn from_iter<I: IntoIterator<Item = P>>(iter: I) -> Self {
let mut buf = SystemPathBuf::new(); let mut buf = SystemPathBuf::new();
@ -730,11 +723,10 @@ impl ruff_cache::CacheKey for SystemPathBuf {
/// A slice of a virtual path on [`System`](super::System) (akin to [`str`]). /// A slice of a virtual path on [`System`](super::System) (akin to [`str`]).
#[repr(transparent)] #[repr(transparent)]
#[derive(Eq, PartialEq, Hash, PartialOrd, Ord)]
pub struct SystemVirtualPath(str); pub struct SystemVirtualPath(str);
impl SystemVirtualPath { impl SystemVirtualPath {
pub const fn new(path: &str) -> &SystemVirtualPath { pub fn new(path: &str) -> &SystemVirtualPath {
// SAFETY: SystemVirtualPath is marked as #[repr(transparent)] so the conversion from a // SAFETY: SystemVirtualPath is marked as #[repr(transparent)] so the conversion from a
// *const str to a *const SystemVirtualPath is valid. // *const str to a *const SystemVirtualPath is valid.
unsafe { &*(path as *const str as *const SystemVirtualPath) } unsafe { &*(path as *const str as *const SystemVirtualPath) }
@ -775,8 +767,8 @@ pub struct SystemVirtualPathBuf(String);
impl SystemVirtualPathBuf { impl SystemVirtualPathBuf {
#[inline] #[inline]
pub const fn as_path(&self) -> &SystemVirtualPath { pub fn as_path(&self) -> &SystemVirtualPath {
SystemVirtualPath::new(self.0.as_str()) SystemVirtualPath::new(&self.0)
} }
} }
@ -860,12 +852,6 @@ impl ruff_cache::CacheKey for SystemVirtualPathBuf {
} }
} }
impl Borrow<SystemVirtualPath> for SystemVirtualPathBuf {
fn borrow(&self) -> &SystemVirtualPath {
self.as_path()
}
}
/// Deduplicates identical paths and removes nested paths. /// Deduplicates identical paths and removes nested paths.
/// ///
/// # Examples /// # Examples

View File

@ -62,7 +62,7 @@ fn generate_set(output: &mut String, set: Set, parents: &mut Vec<Set>) {
generate_set( generate_set(
output, output,
Set::Named { Set::Named {
name: set_name.clone(), name: set_name.to_string(),
set: *sub_set, set: *sub_set,
}, },
parents, parents,

View File

@ -104,7 +104,7 @@ fn generate_set(output: &mut String, set: Set, parents: &mut Vec<Set>) {
generate_set( generate_set(
output, output,
Set::Named { Set::Named {
name: set_name.clone(), name: set_name.to_string(),
set: *sub_set, set: *sub_set,
}, },
parents, parents,
@ -144,8 +144,8 @@ fn emit_field(output: &mut String, name: &str, field: &OptionField, parents: &[S
output.push('\n'); output.push('\n');
if let Some(deprecated) = &field.deprecated { if let Some(deprecated) = &field.deprecated {
output.push_str("!!! warning \"Deprecated\"\n"); output.push_str("> [!WARN] \"Deprecated\"\n");
output.push_str(" This option has been deprecated"); output.push_str("> This option has been deprecated");
if let Some(since) = deprecated.since { if let Some(since) = deprecated.since {
write!(output, " in {since}").unwrap(); write!(output, " in {since}").unwrap();
@ -166,9 +166,8 @@ fn emit_field(output: &mut String, name: &str, field: &OptionField, parents: &[S
output.push('\n'); output.push('\n');
let _ = writeln!(output, "**Type**: `{}`", field.value_type); let _ = writeln!(output, "**Type**: `{}`", field.value_type);
output.push('\n'); output.push('\n');
output.push_str("**Example usage**:\n\n"); output.push_str("**Example usage** (`pyproject.toml`):\n\n");
output.push_str(&format_example( output.push_str(&format_example(
"pyproject.toml",
&format_header( &format_header(
field.scope, field.scope,
field.example, field.example,
@ -180,11 +179,11 @@ fn emit_field(output: &mut String, name: &str, field: &OptionField, parents: &[S
output.push('\n'); output.push('\n');
} }
fn format_example(title: &str, header: &str, content: &str) -> String { fn format_example(header: &str, content: &str) -> String {
if header.is_empty() { if header.is_empty() {
format!("```toml title=\"{title}\"\n{content}\n```\n",) format!("```toml\n{content}\n```\n",)
} else { } else {
format!("```toml title=\"{title}\"\n{header}\n{content}\n```\n",) format!("```toml\n{header}\n{content}\n```\n",)
} }
} }

View File

@ -39,7 +39,7 @@ impl Edit {
/// Creates an edit that replaces the content in `range` with `content`. /// Creates an edit that replaces the content in `range` with `content`.
pub fn range_replacement(content: String, range: TextRange) -> Self { pub fn range_replacement(content: String, range: TextRange) -> Self {
debug_assert!(!content.is_empty(), "Prefer `Edit::deletion`"); debug_assert!(!content.is_empty(), "Prefer `Fix::deletion`");
Self { Self {
content: Some(Box::from(content)), content: Some(Box::from(content)),

View File

@ -149,10 +149,6 @@ impl Fix {
&self.edits &self.edits
} }
pub fn into_edits(self) -> Vec<Edit> {
self.edits
}
/// Return the [`Applicability`] of the [`Fix`]. /// Return the [`Applicability`] of the [`Fix`].
pub fn applicability(&self) -> Applicability { pub fn applicability(&self) -> Applicability {
self.applicability self.applicability

View File

@ -1006,7 +1006,7 @@ impl<Context> std::fmt::Debug for Align<'_, Context> {
/// Block indents indent a block of code, such as in a function body, and therefore insert a line /// Block indents indent a block of code, such as in a function body, and therefore insert a line
/// break before and after the content. /// break before and after the content.
/// ///
/// Doesn't create an indentation if the passed in content is empty. /// Doesn't create an indentation if the passed in content is [`FormatElement.is_empty`].
/// ///
/// # Examples /// # Examples
/// ///

View File

@ -337,7 +337,7 @@ macro_rules! best_fitting {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use crate::prelude::*; use crate::prelude::*;
use crate::{FormatState, SimpleFormatOptions, VecBuffer}; use crate::{FormatState, SimpleFormatOptions, VecBuffer, write};
struct TestFormat; struct TestFormat;
@ -385,8 +385,8 @@ mod tests {
#[test] #[test]
fn best_fitting_variants_print_as_lists() { fn best_fitting_variants_print_as_lists() {
use crate::Formatted;
use crate::prelude::*; use crate::prelude::*;
use crate::{Formatted, format, format_args};
// The second variant below should be selected when printing at a width of 30 // The second variant below should be selected when printing at a width of 30
let formatted_best_fitting = format!( let formatted_best_fitting = format!(

View File

@ -14,21 +14,14 @@ pub(crate) struct Collector<'a> {
string_imports: StringImports, string_imports: StringImports,
/// The collected imports from the Python AST. /// The collected imports from the Python AST.
imports: Vec<CollectedImport>, imports: Vec<CollectedImport>,
/// Whether to detect type checking imports
type_checking_imports: bool,
} }
impl<'a> Collector<'a> { impl<'a> Collector<'a> {
pub(crate) fn new( pub(crate) fn new(module_path: Option<&'a [String]>, string_imports: StringImports) -> Self {
module_path: Option<&'a [String]>,
string_imports: StringImports,
type_checking_imports: bool,
) -> Self {
Self { Self {
module_path, module_path,
string_imports, string_imports,
imports: Vec::new(), imports: Vec::new(),
type_checking_imports,
} }
} }
@ -98,25 +91,10 @@ impl<'ast> SourceOrderVisitor<'ast> for Collector<'_> {
} }
} }
} }
Stmt::If(ast::StmtIf {
test,
body,
elif_else_clauses,
range: _,
node_index: _,
}) => {
// Skip TYPE_CHECKING blocks if not requested
if self.type_checking_imports || !is_type_checking_condition(test) {
self.visit_body(body);
}
for clause in elif_else_clauses {
self.visit_elif_else_clause(clause);
}
}
Stmt::FunctionDef(_) Stmt::FunctionDef(_)
| Stmt::ClassDef(_) | Stmt::ClassDef(_)
| Stmt::While(_) | Stmt::While(_)
| Stmt::If(_)
| Stmt::With(_) | Stmt::With(_)
| Stmt::Match(_) | Stmt::Match(_)
| Stmt::Try(_) | Stmt::Try(_)
@ -174,30 +152,6 @@ impl<'ast> SourceOrderVisitor<'ast> for Collector<'_> {
} }
} }
/// Check if an expression is a `TYPE_CHECKING` condition.
///
/// Returns `true` for:
/// - `TYPE_CHECKING`
/// - `typing.TYPE_CHECKING`
///
/// NOTE: Aliased `TYPE_CHECKING`, i.e. `import typing.TYPE_CHECKING as TC; if TC: ...`
/// will not be detected!
fn is_type_checking_condition(expr: &Expr) -> bool {
match expr {
// `if TYPE_CHECKING:`
Expr::Name(ast::ExprName { id, .. }) => id.as_str() == "TYPE_CHECKING",
// `if typing.TYPE_CHECKING:`
Expr::Attribute(ast::ExprAttribute { value, attr, .. }) => {
attr.as_str() == "TYPE_CHECKING"
&& matches!(
value.as_ref(),
Expr::Name(ast::ExprName { id, .. }) if id.as_str() == "typing"
)
}
_ => false,
}
}
#[derive(Debug)] #[derive(Debug)]
pub(crate) enum CollectedImport { pub(crate) enum CollectedImport {
/// The import was part of an `import` statement. /// The import was part of an `import` statement.

View File

@ -3,9 +3,8 @@ use std::collections::{BTreeMap, BTreeSet};
use anyhow::Result; use anyhow::Result;
use ruff_db::system::{SystemPath, SystemPathBuf}; use ruff_db::system::{SystemPath, SystemPathBuf};
use ruff_python_ast::PySourceType;
use ruff_python_ast::helpers::to_module_path; use ruff_python_ast::helpers::to_module_path;
use ruff_python_parser::{ParseOptions, parse}; use ruff_python_parser::{Mode, ParseOptions, parse};
use crate::collector::Collector; use crate::collector::Collector;
pub use crate::db::ModuleDb; pub use crate::db::ModuleDb;
@ -25,31 +24,25 @@ impl ModuleImports {
/// Detect the [`ModuleImports`] for a given Python file. /// Detect the [`ModuleImports`] for a given Python file.
pub fn detect( pub fn detect(
db: &ModuleDb, db: &ModuleDb,
source: &str,
source_type: PySourceType,
path: &SystemPath, path: &SystemPath,
package: Option<&SystemPath>, package: Option<&SystemPath>,
string_imports: StringImports, string_imports: StringImports,
type_checking_imports: bool,
) -> Result<Self> { ) -> Result<Self> {
// Parse the source code. // Read and parse the source code.
let parsed = parse(source, ParseOptions::from(source_type))?; let source = std::fs::read_to_string(path)?;
let parsed = parse(&source, ParseOptions::from(Mode::Module))?;
let module_path = let module_path =
package.and_then(|package| to_module_path(package.as_std_path(), path.as_std_path())); package.and_then(|package| to_module_path(package.as_std_path(), path.as_std_path()));
// Collect the imports. // Collect the imports.
let imports = Collector::new( let imports =
module_path.as_deref(), Collector::new(module_path.as_deref(), string_imports).collect(parsed.syntax());
string_imports,
type_checking_imports,
)
.collect(parsed.syntax());
// Resolve the imports. // Resolve the imports.
let mut resolved_imports = ModuleImports::default(); let mut resolved_imports = ModuleImports::default();
for import in imports { for import in imports {
for resolved in Resolver::new(db, path).resolve(import) { for resolved in Resolver::new(db).resolve(import) {
if let Some(path) = resolved.as_system_path() { if let Some(path) = resolved.as_system_path() {
resolved_imports.insert(path.to_path_buf()); resolved_imports.insert(path.to_path_buf());
} }

View File

@ -1,9 +1,5 @@
use ruff_db::files::{File, FilePath, system_path_to_file}; use ruff_db::files::FilePath;
use ruff_db::system::SystemPath; use ty_python_semantic::{ModuleName, resolve_module, resolve_real_module};
use ty_python_semantic::{
ModuleName, resolve_module, resolve_module_confident, resolve_real_module,
resolve_real_module_confident,
};
use crate::ModuleDb; use crate::ModuleDb;
use crate::collector::CollectedImport; use crate::collector::CollectedImport;
@ -11,15 +7,12 @@ use crate::collector::CollectedImport;
/// Collect all imports for a given Python file. /// Collect all imports for a given Python file.
pub(crate) struct Resolver<'a> { pub(crate) struct Resolver<'a> {
db: &'a ModuleDb, db: &'a ModuleDb,
file: Option<File>,
} }
impl<'a> Resolver<'a> { impl<'a> Resolver<'a> {
/// Initialize a [`Resolver`] with a given [`ModuleDb`]. /// Initialize a [`Resolver`] with a given [`ModuleDb`].
pub(crate) fn new(db: &'a ModuleDb, path: &SystemPath) -> Self { pub(crate) fn new(db: &'a ModuleDb) -> Self {
// If we know the importing file we can potentially resolve more imports Self { db }
let file = system_path_to_file(db, path).ok();
Self { db, file }
} }
/// Resolve the [`CollectedImport`] into a [`FilePath`]. /// Resolve the [`CollectedImport`] into a [`FilePath`].
@ -77,21 +70,13 @@ impl<'a> Resolver<'a> {
/// Resolves a module name to a module. /// Resolves a module name to a module.
pub(crate) fn resolve_module(&self, module_name: &ModuleName) -> Option<&'a FilePath> { pub(crate) fn resolve_module(&self, module_name: &ModuleName) -> Option<&'a FilePath> {
let module = if let Some(file) = self.file { let module = resolve_module(self.db, module_name)?;
resolve_module(self.db, file, module_name)?
} else {
resolve_module_confident(self.db, module_name)?
};
Some(module.file(self.db)?.path(self.db)) Some(module.file(self.db)?.path(self.db))
} }
/// Resolves a module name to a module (stubs not allowed). /// Resolves a module name to a module (stubs not allowed).
fn resolve_real_module(&self, module_name: &ModuleName) -> Option<&'a FilePath> { fn resolve_real_module(&self, module_name: &ModuleName) -> Option<&'a FilePath> {
let module = if let Some(file) = self.file { let module = resolve_real_module(self.db, module_name)?;
resolve_real_module(self.db, file, module_name)?
} else {
resolve_real_module_confident(self.db, module_name)?
};
Some(module.file(self.db)?.path(self.db)) Some(module.file(self.db)?.path(self.db))
} }
} }

View File

@ -6,7 +6,7 @@ use std::collections::BTreeMap;
use std::fmt; use std::fmt;
use std::path::PathBuf; use std::path::PathBuf;
#[derive(Debug, Clone, CacheKey)] #[derive(Debug, Default, Clone, CacheKey)]
pub struct AnalyzeSettings { pub struct AnalyzeSettings {
pub exclude: FilePatternSet, pub exclude: FilePatternSet,
pub preview: PreviewMode, pub preview: PreviewMode,
@ -14,21 +14,6 @@ pub struct AnalyzeSettings {
pub string_imports: StringImports, pub string_imports: StringImports,
pub include_dependencies: BTreeMap<PathBuf, (PathBuf, Vec<String>)>, pub include_dependencies: BTreeMap<PathBuf, (PathBuf, Vec<String>)>,
pub extension: ExtensionMapping, pub extension: ExtensionMapping,
pub type_checking_imports: bool,
}
impl Default for AnalyzeSettings {
fn default() -> Self {
Self {
exclude: FilePatternSet::default(),
preview: PreviewMode::default(),
target_version: PythonVersion::default(),
string_imports: StringImports::default(),
include_dependencies: BTreeMap::default(),
extension: ExtensionMapping::default(),
type_checking_imports: true,
}
}
} }
impl fmt::Display for AnalyzeSettings { impl fmt::Display for AnalyzeSettings {
@ -44,7 +29,6 @@ impl fmt::Display for AnalyzeSettings {
self.string_imports, self.string_imports,
self.extension | debug, self.extension | debug,
self.include_dependencies | debug, self.include_dependencies | debug,
self.type_checking_imports,
] ]
} }
Ok(()) Ok(())

View File

@ -1,6 +1,6 @@
[package] [package]
name = "ruff_linter" name = "ruff_linter"
version = "0.14.9" version = "0.14.2"
publish = false publish = false
authors = { workspace = true } authors = { workspace = true }
edition = { workspace = true } edition = { workspace = true }
@ -35,7 +35,6 @@ anyhow = { workspace = true }
bitflags = { workspace = true } bitflags = { workspace = true }
clap = { workspace = true, features = ["derive", "string"], optional = true } clap = { workspace = true, features = ["derive", "string"], optional = true }
colored = { workspace = true } colored = { workspace = true }
compact_str = { workspace = true }
fern = { workspace = true } fern = { workspace = true }
glob = { workspace = true } glob = { workspace = true }
globset = { workspace = true } globset = { workspace = true }

View File

@ -22,7 +22,6 @@ DAG(dag_id="class_schedule_interval", schedule_interval="@hourly")
DAG(dag_id="class_timetable", timetable=NullTimetable()) DAG(dag_id="class_timetable", timetable=NullTimetable())
DAG(dag_id="class_concurrency", concurrency=12)
DAG(dag_id="class_fail_stop", fail_stop=True) DAG(dag_id="class_fail_stop", fail_stop=True)

View File

@ -10,7 +10,6 @@ from airflow.datasets import (
) )
from airflow.datasets.manager import DatasetManager from airflow.datasets.manager import DatasetManager
from airflow.lineage.hook import DatasetLineageInfo, HookLineageCollector from airflow.lineage.hook import DatasetLineageInfo, HookLineageCollector
from airflow.models.dag import DAG
from airflow.providers.amazon.aws.auth_manager.aws_auth_manager import AwsAuthManager from airflow.providers.amazon.aws.auth_manager.aws_auth_manager import AwsAuthManager
from airflow.providers.apache.beam.hooks import BeamHook, NotAir302HookError from airflow.providers.apache.beam.hooks import BeamHook, NotAir302HookError
from airflow.providers.google.cloud.secrets.secret_manager import ( from airflow.providers.google.cloud.secrets.secret_manager import (
@ -21,7 +20,6 @@ from airflow.providers_manager import ProvidersManager
from airflow.secrets.base_secrets import BaseSecretsBackend from airflow.secrets.base_secrets import BaseSecretsBackend
from airflow.secrets.local_filesystem import LocalFilesystemBackend from airflow.secrets.local_filesystem import LocalFilesystemBackend
# airflow.Dataset # airflow.Dataset
dataset_from_root = DatasetFromRoot() dataset_from_root = DatasetFromRoot()
dataset_from_root.iter_datasets() dataset_from_root.iter_datasets()
@ -58,10 +56,6 @@ hlc.add_input_dataset()
hlc.add_output_dataset() hlc.add_output_dataset()
hlc.collected_datasets() hlc.collected_datasets()
# airflow.models.dag.DAG
test_dag = DAG(dag_id="test_dag")
test_dag.create_dagrun()
# airflow.providers.amazon.auth_manager.aws_auth_manager # airflow.providers.amazon.auth_manager.aws_auth_manager
aam = AwsAuthManager() aam = AwsAuthManager()
aam.is_authorized_dataset() aam.is_authorized_dataset()
@ -102,15 +96,3 @@ base_secret_backend.get_connections()
# airflow.secrets.local_filesystem # airflow.secrets.local_filesystem
lfb = LocalFilesystemBackend() lfb = LocalFilesystemBackend()
lfb.get_connections() lfb.get_connections()
from airflow.models import DAG
# airflow.DAG
test_dag = DAG(dag_id="test_dag")
test_dag.create_dagrun()
from airflow import DAG
# airflow.DAG
test_dag = DAG(dag_id="test_dag")
test_dag.create_dagrun()

View File

@ -91,20 +91,10 @@ get_unique_task_id()
task_decorator_factory() task_decorator_factory()
from airflow.models import DagParam, Param, ParamsDict from airflow.models import Param
# airflow.models # airflow.models
Param() Param()
DagParam()
ParamsDict()
from airflow.models.param import DagParam, Param, ParamsDict
# airflow.models.param
Param()
DagParam()
ParamsDict()
from airflow.sensors.base import ( from airflow.sensors.base import (

View File

@ -45,22 +45,3 @@ urllib.request.urlopen(urllib.request.Request(url))
# https://github.com/astral-sh/ruff/issues/15522 # https://github.com/astral-sh/ruff/issues/15522
map(urllib.request.urlopen, []) map(urllib.request.urlopen, [])
foo = urllib.request.urlopen foo = urllib.request.urlopen
# https://github.com/astral-sh/ruff/issues/21462
path = "https://example.com/data.csv"
urllib.request.urlretrieve(path, "data.csv")
url = "https://example.com/api"
urllib.request.Request(url)
# Test resolved f-strings and concatenated string literals
fstring_url = f"https://example.com/data.csv"
urllib.request.urlopen(fstring_url)
urllib.request.Request(fstring_url)
concatenated_url = "https://" + "example.com/data.csv"
urllib.request.urlopen(concatenated_url)
urllib.request.Request(concatenated_url)
nested_concatenated = "http://" + "example.com" + "/data.csv"
urllib.request.urlopen(nested_concatenated)
urllib.request.Request(nested_concatenated)

View File

@ -28,11 +28,9 @@ yaml.load("{}", SafeLoader)
yaml.load("{}", yaml.SafeLoader) yaml.load("{}", yaml.SafeLoader)
yaml.load("{}", CSafeLoader) yaml.load("{}", CSafeLoader)
yaml.load("{}", yaml.CSafeLoader) yaml.load("{}", yaml.CSafeLoader)
yaml.load("{}", yaml.cyaml.CSafeLoader)
yaml.load("{}", NewSafeLoader) yaml.load("{}", NewSafeLoader)
yaml.load("{}", Loader=SafeLoader) yaml.load("{}", Loader=SafeLoader)
yaml.load("{}", Loader=yaml.SafeLoader) yaml.load("{}", Loader=yaml.SafeLoader)
yaml.load("{}", Loader=CSafeLoader) yaml.load("{}", Loader=CSafeLoader)
yaml.load("{}", Loader=yaml.CSafeLoader) yaml.load("{}", Loader=yaml.CSafeLoader)
yaml.load("{}", Loader=yaml.cyaml.CSafeLoader)
yaml.load("{}", Loader=NewSafeLoader) yaml.load("{}", Loader=NewSafeLoader)

View File

@ -4,31 +4,3 @@ CommunityData("public", mpModel=0) # S508
CommunityData("public", mpModel=1) # S508 CommunityData("public", mpModel=1) # S508
CommunityData("public", mpModel=2) # OK CommunityData("public", mpModel=2) # OK
# New API paths
import pysnmp.hlapi.asyncio
import pysnmp.hlapi.v1arch
import pysnmp.hlapi.v1arch.asyncio
import pysnmp.hlapi.v1arch.asyncio.auth
import pysnmp.hlapi.v3arch
import pysnmp.hlapi.v3arch.asyncio
import pysnmp.hlapi.v3arch.asyncio.auth
import pysnmp.hlapi.auth
pysnmp.hlapi.asyncio.CommunityData("public", mpModel=0) # S508
pysnmp.hlapi.v1arch.asyncio.auth.CommunityData("public", mpModel=0) # S508
pysnmp.hlapi.v1arch.asyncio.CommunityData("public", mpModel=0) # S508
pysnmp.hlapi.v1arch.CommunityData("public", mpModel=0) # S508
pysnmp.hlapi.v3arch.asyncio.auth.CommunityData("public", mpModel=0) # S508
pysnmp.hlapi.v3arch.asyncio.CommunityData("public", mpModel=0) # S508
pysnmp.hlapi.v3arch.CommunityData("public", mpModel=0) # S508
pysnmp.hlapi.auth.CommunityData("public", mpModel=0) # S508
pysnmp.hlapi.asyncio.CommunityData("public", mpModel=2) # OK
pysnmp.hlapi.v1arch.asyncio.auth.CommunityData("public", mpModel=2) # OK
pysnmp.hlapi.v1arch.asyncio.CommunityData("public", mpModel=2) # OK
pysnmp.hlapi.v1arch.CommunityData("public", mpModel=2) # OK
pysnmp.hlapi.v3arch.asyncio.auth.CommunityData("public", mpModel=2) # OK
pysnmp.hlapi.v3arch.asyncio.CommunityData("public", mpModel=2) # OK
pysnmp.hlapi.v3arch.CommunityData("public", mpModel=2) # OK
pysnmp.hlapi.auth.CommunityData("public", mpModel=2) # OK

View File

@ -5,19 +5,3 @@ insecure = UsmUserData("securityName") # S509
auth_no_priv = UsmUserData("securityName", "authName") # S509 auth_no_priv = UsmUserData("securityName", "authName") # S509
less_insecure = UsmUserData("securityName", "authName", "privName") # OK less_insecure = UsmUserData("securityName", "authName", "privName") # OK
# New API paths
import pysnmp.hlapi.asyncio
import pysnmp.hlapi.v3arch.asyncio
import pysnmp.hlapi.v3arch.asyncio.auth
import pysnmp.hlapi.auth
pysnmp.hlapi.asyncio.UsmUserData("user") # S509
pysnmp.hlapi.v3arch.asyncio.UsmUserData("user") # S509
pysnmp.hlapi.v3arch.asyncio.auth.UsmUserData("user") # S509
pysnmp.hlapi.auth.UsmUserData("user") # S509
pysnmp.hlapi.asyncio.UsmUserData("user", "authkey", "privkey") # OK
pysnmp.hlapi.v3arch.asyncio.UsmUserData("user", "authkey", "privkey") # OK
pysnmp.hlapi.v3arch.asyncio.auth.UsmUserData("user", "authkey", "privkey") # OK
pysnmp.hlapi.auth.UsmUserData("user", "authkey", "privkey") # OK

View File

@ -199,9 +199,6 @@ def bytes_okay(value=bytes(1)):
def int_okay(value=int("12")): def int_okay(value=int("12")):
pass pass
# Allow immutable slice()
def slice_okay(value=slice(1,2)):
pass
# Allow immutable complex() value # Allow immutable complex() value
def complex_okay(value=complex(1,2)): def complex_okay(value=complex(1,2)):

View File

@ -70,12 +70,3 @@ builtins.getattr(foo, "bar")
# Regression test for: https://github.com/astral-sh/ruff/issues/18353 # Regression test for: https://github.com/astral-sh/ruff/issues/18353
setattr(foo, "__debug__", 0) setattr(foo, "__debug__", 0)
# Regression test for: https://github.com/astral-sh/ruff/issues/21126
# Non-NFKC attribute names should be marked as unsafe. Python normalizes identifiers in
# attribute access (obj.attr) using NFKC, but does not normalize string
# arguments passed to getattr/setattr. Rewriting `getattr(ns, "ſ")` to
# `ns.ſ` would be interpreted as `ns.s` at runtime, changing behavior.
# Example: the long s character "ſ" normalizes to "s" under NFKC.
getattr(foo, "ſ")
setattr(foo, "ſ", 1)

View File

@ -52,16 +52,16 @@ def not_broken5():
yield inner() yield inner()
def broken3(): def not_broken6():
return (yield from []) return (yield from [])
def broken4(): def not_broken7():
x = yield from [] x = yield from []
return x return x
def broken5(): def not_broken8():
x = None x = None
def inner(ex): def inner(ex):
@ -76,13 +76,3 @@ class NotBroken9(object):
def __await__(self): def __await__(self):
yield from function() yield from function()
return 42 return 42
async def broken6():
yield 1
return foo()
async def broken7():
yield 1
return [1, 2, 3]

View File

@ -46,9 +46,3 @@ class CorrectModel(models.Model):
max_length=255, null=True, blank=True, unique=True max_length=255, null=True, blank=True, unique=True
) )
urlfieldu = models.URLField(max_length=255, null=True, blank=True, unique=True) urlfieldu = models.URLField(max_length=255, null=True, blank=True, unique=True)
class IncorrectModelWithSimpleAnnotations(models.Model):
charfield: models.CharField = models.CharField(max_length=255, null=True)
textfield: models.TextField = models.TextField(max_length=255, null=True)
slugfield: models.SlugField = models.SlugField(max_length=255, null=True)

View File

@ -208,17 +208,3 @@ _ = t"b {f"c" f"d {t"e" t"f"} g"} h"
_ = f"b {t"abc" \ _ = f"b {t"abc" \
t"def"} g" t"def"} g"
# Explicit concatenation with either operand being
# a string literal that wraps across multiple lines (in parentheses)
# reports diagnostic - no autofix.
# See https://github.com/astral-sh/ruff/issues/19757
_ = "abc" + (
"def"
"ghi"
)
_ = (
"abc"
"def"
) + "ghi"

View File

@ -1,7 +0,0 @@
# Regression test for https://github.com/astral-sh/ruff/issues/21023
'' '
"" ""
'' '' '
"" "" "
f"" f"
f"" f"" f"

View File

@ -359,29 +359,3 @@ class Generic5(list[PotentialTypeVar]):
def __new__(cls: type[Generic5]) -> Generic5: ... def __new__(cls: type[Generic5]) -> Generic5: ...
def __enter__(self: Generic5) -> Generic5: ... def __enter__(self: Generic5) -> Generic5: ...
# Test cases based on issue #20781 - metaclasses that triggers IsMetaclass::Maybe
class MetaclassInWhichSelfCannotBeUsed5(type(Protocol)):
def __new__(
cls, name: str, bases: tuple[type[Any], ...], attrs: dict[str, Any], **kwargs: Any
) -> MetaclassInWhichSelfCannotBeUsed5:
new_class = super().__new__(cls, name, bases, attrs, **kwargs)
return new_class
import django.db.models.base
class MetaclassInWhichSelfCannotBeUsed6(django.db.models.base.ModelBase):
def __new__(cls, name: str, bases: tuple[Any, ...], attrs: dict[str, Any], **kwargs: Any) -> MetaclassInWhichSelfCannotBeUsed6:
...
class MetaclassInWhichSelfCannotBeUsed7(django.db.models.base.ModelBase):
def __new__(cls, /, name: str, bases: tuple[object, ...], attrs: dict[str, object], **kwds: object) -> MetaclassInWhichSelfCannotBeUsed7:
...
class MetaclassInWhichSelfCannotBeUsed8(django.db.models.base.ModelBase):
def __new__(cls, name: builtins.str, bases: tuple, attributes: dict, /, **kw) -> MetaclassInWhichSelfCannotBeUsed8:
...

View File

@ -252,28 +252,3 @@ from some_module import PotentialTypeVar
class Generic5(list[PotentialTypeVar]): class Generic5(list[PotentialTypeVar]):
def __new__(cls: type[Generic5]) -> Generic5: ... def __new__(cls: type[Generic5]) -> Generic5: ...
def __enter__(self: Generic5) -> Generic5: ... def __enter__(self: Generic5) -> Generic5: ...
# Test case based on issue #20781 - metaclass that triggers IsMetaclass::Maybe
class MetaclassInWhichSelfCannotBeUsed5(type(Protocol)):
def __new__(
cls, name: str, bases: tuple[type[Any], ...], attrs: dict[str, Any], **kwargs: Any
) -> MetaclassInWhichSelfCannotBeUsed5: ...
import django.db.models.base
class MetaclassInWhichSelfCannotBeUsed6(django.db.models.base.ModelBase):
def __new__(cls, name: str, bases: tuple[Any, ...], attrs: dict[str, Any], **kwargs: Any) -> MetaclassInWhichSelfCannotBeUsed6:
...
class MetaclassInWhichSelfCannotBeUsed7(django.db.models.base.ModelBase):
def __new__(cls, /, name: str, bases: tuple[object, ...], attrs: dict[str, object], **kwds: object) -> MetaclassInWhichSelfCannotBeUsed7:
...
class MetaclassInWhichSelfCannotBeUsed8(django.db.models.base.ModelBase):
def __new__(cls, name: builtins.str, bases: tuple, attributes: dict, /, **kw) -> MetaclassInWhichSelfCannotBeUsed8:
...

Some files were not shown because too many files have changed in this diff Show More