Merge branch 'main' into pylint-too-many-public-methods

This commit is contained in:
Charlie Marsh 2023-09-13 20:23:19 -04:00
commit 3d1ae648ad
648 changed files with 30347 additions and 19639 deletions

View File

@ -10,7 +10,7 @@ indent_style = space
insert_final_newline = true insert_final_newline = true
indent_size = 2 indent_size = 2
[*.{rs,py}] [*.{rs,py,pyi}]
indent_size = 4 indent_size = 4
[*.snap] [*.snap]

View File

@ -4,8 +4,10 @@ updates:
directory: "/" directory: "/"
schedule: schedule:
interval: "weekly" interval: "weekly"
day: "monday" labels: ["internal"]
time: "12:00"
timezone: "America/New_York" - package-ecosystem: "cargo"
commit-message: directory: "/"
prefix: "ci(deps)" schedule:
interval: "daily"
labels: ["internal"]

3
.github/release.yml vendored
View File

@ -20,6 +20,9 @@ changelog:
- title: Bug Fixes - title: Bug Fixes
labels: labels:
- bug - bug
- title: Preview
labels:
- preview
- title: Other Changes - title: Other Changes
labels: labels:
- "*" - "*"

View File

@ -26,11 +26,11 @@ jobs:
linter: ${{ steps.changed.outputs.linter_any_changed }} linter: ${{ steps.changed.outputs.linter_any_changed }}
formatter: ${{ steps.changed.outputs.formatter_any_changed }} formatter: ${{ steps.changed.outputs.formatter_any_changed }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
- uses: tj-actions/changed-files@v38 - uses: tj-actions/changed-files@v39
id: changed id: changed
with: with:
files_yaml: | files_yaml: |
@ -62,7 +62,7 @@ jobs:
name: "cargo fmt" name: "cargo fmt"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup component add rustfmt run: rustup component add rustfmt
- run: cargo fmt --all --check - run: cargo fmt --all --check
@ -71,7 +71,7 @@ jobs:
name: "cargo clippy" name: "cargo clippy"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: | run: |
rustup component add clippy rustup component add clippy
@ -89,7 +89,7 @@ jobs:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
name: "cargo test | ${{ matrix.os }}" name: "cargo test | ${{ matrix.os }}"
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Install cargo insta" - name: "Install cargo insta"
@ -125,7 +125,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: "cargo fuzz" name: "cargo fuzz"
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
@ -141,7 +141,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
name: "cargo test (wasm)" name: "cargo test (wasm)"
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown run: rustup target add wasm32-unknown-unknown
- uses: actions/setup-node@v3 - uses: actions/setup-node@v3
@ -160,7 +160,7 @@ jobs:
name: "test scripts" name: "test scripts"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup component add rustfmt run: rustup component add rustfmt
- uses: Swatinem/rust-cache@v2 - uses: Swatinem/rust-cache@v2
@ -182,7 +182,7 @@ jobs:
# Only runs on pull requests, since that is the only we way we can find the base version for comparison. # Only runs on pull requests, since that is the only we way we can find the base version for comparison.
if: github.event_name == 'pull_request' && needs.determine_changes.outputs.linter == 'true' if: github.event_name == 'pull_request' && needs.determine_changes.outputs.linter == 'true'
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
@ -227,7 +227,7 @@ jobs:
name: "cargo udeps" name: "cargo udeps"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: "Install nightly Rust toolchain" - name: "Install nightly Rust toolchain"
# Only pinned to make caching work, update freely # Only pinned to make caching work, update freely
run: rustup toolchain install nightly-2023-06-08 run: rustup toolchain install nightly-2023-06-08
@ -241,7 +241,7 @@ jobs:
name: "python package" name: "python package"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
@ -265,7 +265,7 @@ jobs:
name: "pre-commit" name: "pre-commit"
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
@ -295,7 +295,7 @@ jobs:
env: env:
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }} MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
- name: "Add SSH key" - name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }} if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
@ -330,7 +330,7 @@ jobs:
needs: determine_changes needs: determine_changes
if: needs.determine_changes.outputs.formatter == 'true' || github.ref == 'refs/heads/main' if: needs.determine_changes.outputs.formatter == 'true' || github.ref == 'refs/heads/main'
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show
- name: "Cache rust" - name: "Cache rust"
@ -346,7 +346,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: "Checkout Branch" - name: "Checkout Branch"
uses: actions/checkout@v3 uses: actions/checkout@v4
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup show run: rustup show

View File

@ -2,6 +2,11 @@ name: mkdocs
on: on:
workflow_dispatch: workflow_dispatch:
inputs:
ref:
description: "The commit SHA, tag, or branch to publish. Uses the default branch if not specified."
default: ""
type: string
release: release:
types: [published] types: [published]
@ -12,7 +17,9 @@ jobs:
CF_API_TOKEN_EXISTS: ${{ secrets.CF_API_TOKEN != '' }} CF_API_TOKEN_EXISTS: ${{ secrets.CF_API_TOKEN != '' }}
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }} MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with:
ref: ${{ inputs.ref }}
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
- name: "Add SSH key" - name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }} if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
@ -40,8 +47,9 @@ jobs:
run: mkdocs build --strict -f mkdocs.generated.yml run: mkdocs build --strict -f mkdocs.generated.yml
- name: "Deploy to Cloudflare Pages" - name: "Deploy to Cloudflare Pages"
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }} if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
uses: cloudflare/wrangler-action@v3.1.0 uses: cloudflare/wrangler-action@v3.1.1
with: with:
apiToken: ${{ secrets.CF_API_TOKEN }} apiToken: ${{ secrets.CF_API_TOKEN }}
accountId: ${{ secrets.CF_ACCOUNT_ID }} accountId: ${{ secrets.CF_ACCOUNT_ID }}
command: pages publish site --project-name=ruff-docs --branch ${GITHUB_HEAD_REF} --commit-hash ${GITHUB_SHA} # `github.head_ref` is only set during pull requests and for manual runs or tags we use `main` to deploy to production
command: pages deploy site --project-name=ruff-docs --branch ${{ github.head_ref || 'main' }} --commit-hash ${GITHUB_SHA}

View File

@ -19,7 +19,7 @@ jobs:
macos-x86_64: macos-x86_64:
runs-on: macos-latest runs-on: macos-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
@ -42,7 +42,7 @@ jobs:
macos-universal: macos-universal:
runs-on: macos-latest runs-on: macos-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
@ -68,7 +68,7 @@ jobs:
matrix: matrix:
target: [x64, x86] target: [x64, x86]
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
@ -96,7 +96,7 @@ jobs:
matrix: matrix:
target: [x86_64, i686] target: [x86_64, i686]
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
@ -123,7 +123,7 @@ jobs:
matrix: matrix:
target: [aarch64, armv7, s390x, ppc64le, ppc64] target: [aarch64, armv7, s390x, ppc64le, ppc64]
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
@ -160,7 +160,7 @@ jobs:
- x86_64-unknown-linux-musl - x86_64-unknown-linux-musl
- i686-unknown-linux-musl - i686-unknown-linux-musl
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
@ -196,7 +196,7 @@ jobs:
- target: armv7-unknown-linux-musleabihf - target: armv7-unknown-linux-musleabihf
arch: armv7 arch: armv7
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}

View File

@ -17,7 +17,7 @@ jobs:
env: env:
CF_API_TOKEN_EXISTS: ${{ secrets.CF_API_TOKEN != '' }} CF_API_TOKEN_EXISTS: ${{ secrets.CF_API_TOKEN != '' }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
- name: "Install Rust toolchain" - name: "Install Rust toolchain"
run: rustup target add wasm32-unknown-unknown run: rustup target add wasm32-unknown-unknown
- uses: actions/setup-node@v3 - uses: actions/setup-node@v3
@ -40,7 +40,7 @@ jobs:
working-directory: playground working-directory: playground
- name: "Deploy to Cloudflare Pages" - name: "Deploy to Cloudflare Pages"
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }} if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
uses: cloudflare/wrangler-action@v3.1.0 uses: cloudflare/wrangler-action@v3.1.1
with: with:
apiToken: ${{ secrets.CF_API_TOKEN }} apiToken: ${{ secrets.CF_API_TOKEN }}
accountId: ${{ secrets.CF_ACCOUNT_ID }} accountId: ${{ secrets.CF_ACCOUNT_ID }}

View File

@ -7,12 +7,15 @@ on:
description: "The version to tag, without the leading 'v'. If omitted, will initiate a dry run (no uploads)." description: "The version to tag, without the leading 'v'. If omitted, will initiate a dry run (no uploads)."
type: string type: string
sha: sha:
description: "Optionally, the full sha of the commit to be released" description: "The full sha of the commit to be released. If omitted, the latest commit on the default branch will be used."
default: ""
type: string type: string
pull_request: pull_request:
paths: paths:
# When we change pyproject.toml, we want to ensure that the maturin builds still work # When we change pyproject.toml, we want to ensure that the maturin builds still work
- pyproject.toml - pyproject.toml
# And when we change this workflow itself...
- .github/workflows/release.yaml
concurrency: concurrency:
group: ${{ github.workflow }}-${{ github.ref }} group: ${{ github.workflow }}-${{ github.ref }}
@ -30,7 +33,9 @@ jobs:
sdist: sdist:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
@ -56,7 +61,9 @@ jobs:
macos-x86_64: macos-x86_64:
runs-on: macos-latest runs-on: macos-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
@ -94,7 +101,9 @@ jobs:
macos-universal: macos-universal:
runs-on: macos-latest runs-on: macos-latest
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
@ -140,7 +149,9 @@ jobs:
- target: aarch64-pc-windows-msvc - target: aarch64-pc-windows-msvc
arch: x64 arch: x64
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
@ -186,7 +197,9 @@ jobs:
- x86_64-unknown-linux-gnu - x86_64-unknown-linux-gnu
- i686-unknown-linux-gnu - i686-unknown-linux-gnu
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
@ -243,7 +256,9 @@ jobs:
arch: ppc64 arch: ppc64
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
@ -296,7 +311,9 @@ jobs:
- x86_64-unknown-linux-musl - x86_64-unknown-linux-musl
- i686-unknown-linux-musl - i686-unknown-linux-musl
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
@ -350,7 +367,9 @@ jobs:
arch: armv7 arch: armv7
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: ${{ env.PYTHON_VERSION }} python-version: ${{ env.PYTHON_VERSION }}
@ -398,7 +417,9 @@ jobs:
# If you don't set an input tag, it's a dry run (no uploads). # If you don't set an input tag, it's a dry run (no uploads).
if: ${{ inputs.tag }} if: ${{ inputs.tag }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- name: Check tag consistency - name: Check tag consistency
run: | run: |
version=$(grep "version = " pyproject.toml | sed -e 's/version = "\(.*\)"/\1/g') version=$(grep "version = " pyproject.toml | sed -e 's/version = "\(.*\)"/\1/g')
@ -410,6 +431,15 @@ jobs:
else else
echo "Releasing ${version}" echo "Releasing ${version}"
fi fi
- name: Check main branch
if: ${{ inputs.sha }}
run: |
# Fetch the main branch since a shallow checkout is used by default
git fetch origin main --unshallow
if ! git branch --contains ${{ inputs.sha }} | grep -E '(^|\s)main$'; then
echo "The specified sha is not on the main branch" >&2
exit 1
fi
- name: Check SHA consistency - name: Check SHA consistency
if: ${{ inputs.sha }} if: ${{ inputs.sha }}
run: | run: |
@ -464,7 +494,9 @@ jobs:
# For git tag # For git tag
contents: write contents: write
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with:
ref: ${{ inputs.sha }}
- name: git tag - name: git tag
run: | run: |
git config user.email "hey@astral.sh" git config user.email "hey@astral.sh"

View File

@ -23,8 +23,6 @@ repos:
- id: mdformat - id: mdformat
additional_dependencies: additional_dependencies:
- mdformat-mkdocs - mdformat-mkdocs
- mdformat-black
- black==23.1.0 # Must be the latest version of Black
- repo: https://github.com/igorshubovych/markdownlint-cli - repo: https://github.com/igorshubovych/markdownlint-cli
rev: v0.33.0 rev: v0.33.0

View File

@ -1,5 +1,20 @@
# Breaking Changes # Breaking Changes
## 0.0.288
### Remove support for emoji identifiers ([#7212](https://github.com/astral-sh/ruff/pull/7212))
Previously, Ruff supported the non-standard compliant emoji identifiers e.g. `📦 = 1`.
We decided to remove this non-standard language extension, and Ruff now reports syntax errors for emoji identifiers in your code, the same as CPython.
### Improved GitLab fingerprints ([#7203](https://github.com/astral-sh/ruff/pull/7203))
GitLab uses fingerprints to identify new, existing, or fixed violations. Previously, Ruff included the violation's position in the fingerprint. Using the location has the downside that changing any code before the violation causes the fingerprint to change, resulting in GitLab reporting one fixed and one new violation even though it is a pre-existing violation.
Ruff now uses a more stable location-agnostic fingerprint to minimize that existing violations incorrectly get marked as fixed and re-reported as new violations.
Expect GitLab to report each pre-existing violation in your project as fixed and a new violation in your Ruff upgrade PR.
## 0.0.283 / 0.284 ## 0.0.283 / 0.284
### The target Python version now defaults to 3.8 instead of 3.10 ([#6397](https://github.com/astral-sh/ruff/pull/6397)) ### The target Python version now defaults to 3.8 instead of 3.10 ([#6397](https://github.com/astral-sh/ruff/pull/6397))

View File

@ -129,6 +129,7 @@ At time of writing, the repository includes the following crates:
intermediate representation. The backend for `ruff_python_formatter`. intermediate representation. The backend for `ruff_python_formatter`.
- `crates/ruff_index`: library crate inspired by `rustc_index`. - `crates/ruff_index`: library crate inspired by `rustc_index`.
- `crates/ruff_macros`: proc macro crate containing macros used by Ruff. - `crates/ruff_macros`: proc macro crate containing macros used by Ruff.
- `crates/ruff_notebook`: library crate for parsing and manipulating Jupyter notebooks.
- `crates/ruff_python_ast`: library crate containing Python-specific AST types and utilities. - `crates/ruff_python_ast`: library crate containing Python-specific AST types and utilities.
- `crates/ruff_python_codegen`: library crate containing utilities for generating Python source code. - `crates/ruff_python_codegen`: library crate containing utilities for generating Python source code.
- `crates/ruff_python_formatter`: library crate implementing the Python formatter. Emits an - `crates/ruff_python_formatter`: library crate implementing the Python formatter. Emits an

770
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -22,26 +22,27 @@ glob = { version = "0.3.1" }
globset = { version = "0.4.10" } globset = { version = "0.4.10" }
ignore = { version = "0.4.20" } ignore = { version = "0.4.20" }
insta = { version = "1.31.0", feature = ["filters", "glob"] } insta = { version = "1.31.0", feature = ["filters", "glob"] }
is-macro = { version = "0.2.2" } is-macro = { version = "0.3.0" }
itertools = { version = "0.10.5" } itertools = { version = "0.10.5" }
log = { version = "0.4.17" } log = { version = "0.4.17" }
memchr = "2.5.0" memchr = "2.6.3"
num-bigint = { version = "0.4.3" } num-bigint = { version = "0.4.3" }
num-traits = { version = "0.2.15" } num-traits = { version = "0.2.15" }
once_cell = { version = "1.17.1" } once_cell = { version = "1.17.1" }
path-absolutize = { version = "3.0.14" } path-absolutize = { version = "3.1.1" }
proc-macro2 = { version = "1.0.51" } proc-macro2 = { version = "1.0.51" }
quote = { version = "1.0.23" } quote = { version = "1.0.23" }
regex = { version = "1.7.1" } regex = { version = "1.7.1" }
rustc-hash = { version = "1.1.0" } rustc-hash = { version = "1.1.0" }
schemars = { version = "0.8.12" } schemars = { version = "0.8.12" }
serde = { version = "1.0.152", features = ["derive"] } serde = { version = "1.0.152", features = ["derive"] }
serde_json = { version = "1.0.93" } serde_json = { version = "1.0.106" }
shellexpand = { version = "3.0.0" } shellexpand = { version = "3.0.0" }
similar = { version = "2.2.1", features = ["inline"] } similar = { version = "2.2.1", features = ["inline"] }
smallvec = { version = "1.10.0" } smallvec = { version = "1.10.0" }
strum = { version = "0.24.1", features = ["strum_macros"] } static_assertions = "1.1.0"
strum_macros = { version = "0.24.3" } strum = { version = "0.25.0", features = ["strum_macros"] }
strum_macros = { version = "0.25.2" }
syn = { version = "2.0.15" } syn = { version = "2.0.15" }
test-case = { version = "3.0.0" } test-case = { version = "3.0.0" }
thiserror = { version = "1.0.43" } thiserror = { version = "1.0.43" }
@ -49,12 +50,13 @@ toml = { version = "0.7.2" }
tracing = "0.1.37" tracing = "0.1.37"
tracing-indicatif = "0.3.4" tracing-indicatif = "0.3.4"
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] } tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
unicode-ident = "1.0.11"
unicode-width = "0.1.10" unicode-width = "0.1.10"
uuid = { version = "1.4.1", features = ["v4", "fast-rng", "macro-diagnostics", "js"] } uuid = { version = "1.4.1", features = ["v4", "fast-rng", "macro-diagnostics", "js"] }
wsl = { version = "0.1.0" } wsl = { version = "0.1.0" }
# v1.0.1 # v1.0.1
libcst = { git = "https://github.com/Instagram/LibCST.git", rev = "3cacca1a1029f05707e50703b49fe3dd860aa839", default-features = false } libcst = { version = "0.1.0", default-features = false }
[profile.release] [profile.release]
lto = "fat" lto = "fat"

View File

@ -140,7 +140,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com) hook:
```yaml ```yaml
- repo: https://github.com/astral-sh/ruff-pre-commit - repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version. # Ruff version.
rev: v0.0.286 rev: v0.0.289
hooks: hooks:
- id: ruff - id: ruff
``` ```
@ -398,6 +398,7 @@ Ruff is used by a number of major open-source projects and companies, including:
- [Pydantic](https://github.com/pydantic/pydantic) - [Pydantic](https://github.com/pydantic/pydantic)
- [Pylint](https://github.com/PyCQA/pylint) - [Pylint](https://github.com/PyCQA/pylint)
- [Reflex](https://github.com/reflex-dev/reflex) - [Reflex](https://github.com/reflex-dev/reflex)
- [Rippling](https://rippling.com)
- [Robyn](https://github.com/sansyrox/robyn) - [Robyn](https://github.com/sansyrox/robyn)
- Scale AI ([Launch SDK](https://github.com/scaleapi/launch-python-client)) - Scale AI ([Launch SDK](https://github.com/scaleapi/launch-python-client))
- Snowflake ([SnowCLI](https://github.com/Snowflake-Labs/snowcli)) - Snowflake ([SnowCLI](https://github.com/Snowflake-Labs/snowcli))

View File

@ -1,6 +1,6 @@
[package] [package]
name = "flake8-to-ruff" name = "flake8-to-ruff"
version = "0.0.286" version = "0.0.289"
description = """ description = """
Convert Flake8 configuration files to Ruff configuration files. Convert Flake8 configuration files to Ruff configuration files.
""" """

View File

@ -4,6 +4,7 @@ use std::str::FromStr;
use anyhow::anyhow; use anyhow::anyhow;
use ruff::registry::Linter; use ruff::registry::Linter;
use ruff::settings::types::PreviewMode;
use ruff::RuleSelector; use ruff::RuleSelector;
#[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)] #[derive(Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
@ -331,7 +332,7 @@ pub(crate) fn infer_plugins_from_codes(selectors: &HashSet<RuleSelector>) -> Vec
.filter(|plugin| { .filter(|plugin| {
for selector in selectors { for selector in selectors {
if selector if selector
.into_iter() .rules(PreviewMode::Disabled)
.any(|rule| Linter::from(plugin).rules().any(|r| r == rule)) .any(|rule| Linter::from(plugin).rules().any(|r| r == rule))
{ {
return true; return true;

View File

@ -1,6 +1,6 @@
[package] [package]
name = "ruff" name = "ruff"
version = "0.0.286" version = "0.0.289"
publish = false publish = false
authors = { workspace = true } authors = { workspace = true }
edition = { workspace = true } edition = { workspace = true }
@ -18,6 +18,7 @@ name = "ruff"
ruff_cache = { path = "../ruff_cache" } ruff_cache = { path = "../ruff_cache" }
ruff_diagnostics = { path = "../ruff_diagnostics", features = ["serde"] } ruff_diagnostics = { path = "../ruff_diagnostics", features = ["serde"] }
ruff_index = { path = "../ruff_index" } ruff_index = { path = "../ruff_index" }
ruff_notebook = { path = "../ruff_notebook" }
ruff_macros = { path = "../ruff_macros" } ruff_macros = { path = "../ruff_macros" }
ruff_python_ast = { path = "../ruff_python_ast", features = ["serde"] } ruff_python_ast = { path = "../ruff_python_ast", features = ["serde"] }
ruff_python_codegen = { path = "../ruff_python_codegen" } ruff_python_codegen = { path = "../ruff_python_codegen" }
@ -55,7 +56,7 @@ path-absolutize = { workspace = true, features = [
] } ] }
pathdiff = { version = "0.2.1" } pathdiff = { version = "0.2.1" }
pep440_rs = { version = "0.3.1", features = ["serde"] } pep440_rs = { version = "0.3.1", features = ["serde"] }
pyproject-toml = { version = "0.6.0" } pyproject-toml = { version = "0.7.0" }
quick-junit = { version = "0.3.2" } quick-junit = { version = "0.3.2" }
regex = { workspace = true } regex = { workspace = true }
result-like = { version = "0.4.6" } result-like = { version = "0.4.6" }
@ -64,17 +65,15 @@ schemars = { workspace = true, optional = true }
semver = { version = "1.0.16" } semver = { version = "1.0.16" }
serde = { workspace = true } serde = { workspace = true }
serde_json = { workspace = true } serde_json = { workspace = true }
serde_with = { version = "3.0.0" }
similar = { workspace = true } similar = { workspace = true }
smallvec = { workspace = true } smallvec = { workspace = true }
strum = { workspace = true } strum = { workspace = true }
strum_macros = { workspace = true } strum_macros = { workspace = true }
thiserror = { version = "1.0.43" } thiserror = { workspace = true }
toml = { workspace = true } toml = { workspace = true }
typed-arena = { version = "2.0.2" } typed-arena = { version = "2.0.2" }
unicode-width = { workspace = true } unicode-width = { workspace = true }
unicode_names2 = { version = "0.6.0", git = "https://github.com/youknowone/unicode_names2.git", rev = "4ce16aa85cbcdd9cc830410f1a72ef9a235f2fde" } unicode_names2 = { version = "0.6.0", git = "https://github.com/youknowone/unicode_names2.git", rev = "4ce16aa85cbcdd9cc830410f1a72ef9a235f2fde" }
uuid = { workspace = true, features = ["v4", "fast-rng", "macro-diagnostics", "js"] }
wsl = { version = "0.1.0" } wsl = { version = "0.1.0" }
[dev-dependencies] [dev-dependencies]

View File

@ -0,0 +1,5 @@
# Docstring followed by a newline
def foobar(foor, bar={}):
"""
"""

View File

@ -0,0 +1,6 @@
# Docstring followed by whitespace with no newline
# Regression test for https://github.com/astral-sh/ruff/issues/7155
def foobar(foor, bar={}):
"""
"""

View File

@ -0,0 +1,6 @@
# Docstring with no newline
def foobar(foor, bar={}):
"""
"""

View File

@ -308,3 +308,7 @@ def single_line_func_wrong(value: dict[str, str] = {
def single_line_func_wrong(value: dict[str, str] = {}) \ def single_line_func_wrong(value: dict[str, str] = {}) \
: \ : \
"""Docstring""" """Docstring"""
def single_line_func_wrong(value: dict[str, str] = {}):
"""Docstring without newline"""

View File

@ -1,7 +1,7 @@
""" """
Should emit: Should emit:
B009 - Line 19, 20, 21, 22, 23, 24 B009 - Lines 19-31
B010 - Line 40, 41, 42, 43, 44, 45 B010 - Lines 40-45
""" """
# Valid getattr usage # Valid getattr usage
@ -24,6 +24,16 @@ getattr(foo, r"abc123")
_ = lambda x: getattr(x, "bar") _ = lambda x: getattr(x, "bar")
if getattr(x, "bar"): if getattr(x, "bar"):
pass pass
getattr(1, "real")
getattr(1., "real")
getattr(1.0, "real")
getattr(1j, "real")
getattr(True, "real")
getattr(x := 1, "real")
getattr(x + y, "real")
getattr("foo"
"bar", "real")
# Valid setattr usage # Valid setattr usage
setattr(foo, bar, None) setattr(foo, bar, None)

View File

@ -1,3 +1,5 @@
retriable_exceptions = (FileExistsError, FileNotFoundError)
try: try:
pass pass
except (ValueError,): except (ValueError,):
@ -6,3 +8,7 @@ except AttributeError:
pass pass
except (ImportError, TypeError): except (ImportError, TypeError):
pass pass
except (*retriable_exceptions,):
pass
except(ValueError,):
pass

View File

@ -16,3 +16,6 @@ def f(x):
return x return x
print(f'Hello {dict((x,f(x)) for x in "abc")} World') print(f'Hello {dict((x,f(x)) for x in "abc")} World')
# Regression test for: https://github.com/astral-sh/ruff/issues/7086
dict((k,v)for k,v in d.iteritems() if k in only_args)

View File

@ -11,3 +11,6 @@ f"{dict([(s,f(s)) for s in 'ab'])}"
f'{dict([(s,s) for s in "ab"]) | dict([(s,s) for s in "ab"])}' f'{dict([(s,s) for s in "ab"]) | dict([(s,s) for s in "ab"])}'
f'{ dict([(s,s) for s in "ab"]) | dict([(s,s) for s in "ab"]) }' f'{ dict([(s,s) for s in "ab"]) | dict([(s,s) for s in "ab"]) }'
# Regression test for: https://github.com/astral-sh/ruff/issues/7087
saved.append(dict([(k, v)for k,v in list(unique_instance.__dict__.items()) if k in [f.name for f in unique_instance._meta.fields]]))

View File

@ -8,6 +8,11 @@ reversed(sorted(x, key=lambda e: e, reverse=True))
reversed(sorted(x, reverse=True, key=lambda e: e)) reversed(sorted(x, reverse=True, key=lambda e: e))
reversed(sorted(x, reverse=False)) reversed(sorted(x, reverse=False))
# Regression test for: https://github.com/astral-sh/ruff/issues/7289
reversed(sorted(i for i in range(42)))
reversed(sorted((i for i in range(42)), reverse=True))
def reversed(*args, **kwargs): def reversed(*args, **kwargs):
return None return None

View File

@ -17,3 +17,6 @@ d = {"a": 1, "b": 2, "c": 3}
{k.foo: k for k in y} {k.foo: k for k in y}
{k["foo"]: k for k in y} {k["foo"]: k for k in y}
{k: v if v else None for k, v in y} {k: v if v else None for k, v in y}
# Regression test for: https://github.com/astral-sh/ruff/issues/7196
any(len(symbol_table.get_by_type(symbol_type)) > 0 for symbol_type in[t for t in SymbolType])

View File

@ -5,11 +5,13 @@ map(lambda x: str(x), nums)
list(map(lambda x: x * 2, nums)) list(map(lambda x: x * 2, nums))
set(map(lambda x: x % 2 == 0, nums)) set(map(lambda x: x % 2 == 0, nums))
dict(map(lambda v: (v, v**2), nums)) dict(map(lambda v: (v, v**2), nums))
dict(map(lambda v: [v, v**2], nums))
map(lambda: "const", nums) map(lambda: "const", nums)
map(lambda _: 3.0, nums) map(lambda _: 3.0, nums)
_ = "".join(map(lambda x: x in nums and "1" or "0", range(123))) _ = "".join(map(lambda x: x in nums and "1" or "0", range(123)))
all(map(lambda v: isinstance(v, dict), nums)) all(map(lambda v: isinstance(v, dict), nums))
filter(func, map(lambda v: v, nums)) filter(func, map(lambda v: v, nums))
list(map(lambda x, y: x * y, nums))
# When inside f-string, then the fix should be surrounded by whitespace # When inside f-string, then the fix should be surrounded by whitespace
_ = f"{set(map(lambda x: x % 2 == 0, nums))}" _ = f"{set(map(lambda x: x % 2 == 0, nums))}"
@ -40,3 +42,8 @@ map(lambda **kwargs: len(kwargs), range(4))
# Ok because multiple arguments are allowed. # Ok because multiple arguments are allowed.
dict(map(lambda k, v: (k, v), keys, values)) dict(map(lambda k, v: (k, v), keys, values))
# Regression test for: https://github.com/astral-sh/ruff/issues/7121
map(lambda x: x, y if y else z)
map(lambda x: x, (y if y else z))
map(lambda x: x, (x, y, z))

View File

@ -3,16 +3,20 @@ def bar():
def foo(): def foo():
"""foo""" # OK """foo""" # OK, docstrings are handled by another rule
def buzz(): def buzz():
print("buzz") # OK, not in stub file print("buzz") # ERROR PYI010
def foo2(): def foo2():
123 # OK, not in a stub file 123 # ERROR PYI010
def bizz(): def bizz():
x = 123 # OK, not in a stub file x = 123 # ERROR PYI010
def foo3():
pass # OK, pass is handled by another rule

View File

@ -1,6 +1,6 @@
def bar(): ... # OK def bar(): ... # OK
def foo(): def foo():
"""foo""" # OK, strings are handled by another rule """foo""" # OK, docstrings are handled by another rule
def buzz(): def buzz():
print("buzz") # ERROR PYI010 print("buzz") # ERROR PYI010
@ -10,3 +10,6 @@ def foo2():
def bizz(): def bizz():
x = 123 # ERROR PYI010 x = 123 # ERROR PYI010
def foo3():
pass # OK, pass is handled by another rule

View File

@ -1,19 +1,27 @@
def bar(): # OK def bar():
... ... # OK
def oof(): # OK, docstrings are handled by another rule def bar():
pass # OK
def bar():
"""oof""" # OK
def oof(): # ERROR PYI048
"""oof""" """oof"""
print("foo") print("foo")
def foo(): # Ok not in Stub file def foo(): # ERROR PYI048
"""foo""" """foo"""
print("foo") print("foo")
print("foo") print("foo")
def buzz(): # Ok not in Stub file def buzz(): # ERROR PYI048
print("fizz") print("fizz")
print("buzz") print("buzz")
print("test") print("test")

View File

@ -1,20 +1,20 @@
def bar(): ... # OK
def bar(): def bar():
... # OK pass # OK
def bar():
"""oof""" # OK
def oof(): # OK, docstrings are handled by another rule def oof(): # ERROR PYI048
"""oof""" """oof"""
print("foo") print("foo")
def foo(): # ERROR PYI048
def foo(): # ERROR PYI048
"""foo""" """foo"""
print("foo") print("foo")
print("foo") print("foo")
def buzz(): # ERROR PYI048
def buzz(): # ERROR PYI048
print("fizz") print("fizz")
print("buzz") print("buzz")
print("test") print("test")

View File

@ -52,3 +52,21 @@ def test_multiline():
x = 1; \ x = 1; \
assert something and something_else assert something and something_else
# Regression test for: https://github.com/astral-sh/ruff/issues/7143
def test_parenthesized_not():
assert not (
self.find_graph_output(node.output[0])
or self.find_graph_input(node.input[0])
or self.find_graph_output(node.input[0])
)
assert (not (
self.find_graph_output(node.output[0])
or self.find_graph_input(node.input[0])
or self.find_graph_output(node.input[0])
))
assert (not self.find_graph_output(node.output[0]) or
self.find_graph_input(node.input[0]))

View File

@ -357,3 +357,9 @@ def foo():
def foo(): def foo():
a = 1 # Comment a = 1 # Comment
return a return a
# Regression test for: https://github.com/astral-sh/ruff/issues/7098
def mavko_debari(P_kbar):
D=0.4853881 + 3.6006116*P - 0.0117368*(P-1.3822)**2
return D

View File

@ -0,0 +1,10 @@
def foo(obj):
obj._meta # OK
def foo(obj):
obj._asdict # SLF001
def foo(obj):
obj._bar # SLF001

View File

@ -8,6 +8,7 @@ try:
except ValueError: except ValueError:
pass pass
# SIM105 # SIM105
try: try:
foo() foo()
@ -110,3 +111,20 @@ try:
print() print()
except "not an exception": except "not an exception":
pass pass
# Regression test for: https://github.com/astral-sh/ruff/issues/7123
def write_models(directory, Models):
try:
os.makedirs(model_dir);
except OSError:
pass;
try: os.makedirs(model_dir);
except OSError:
pass;
try: os.makedirs(model_dir);
except OSError:
pass; \
\
#

View File

@ -42,3 +42,17 @@ class Foo:
def __contains__(self, key: object) -> bool: def __contains__(self, key: object) -> bool:
return key in self.keys() # OK return key in self.keys() # OK
# Regression test for: https://github.com/astral-sh/ruff/issues/7124
key in obj.keys()and foo
(key in obj.keys())and foo
key in (obj.keys())and foo
# Regression test for: https://github.com/astral-sh/ruff/issues/7200
for key in (
self.experiment.surveys[0]
.stations[0]
.keys()
):
continue

View File

@ -13,3 +13,8 @@ def f():
return False return False
a = True if b else False a = True if b else False
# Regression test for: https://github.com/astral-sh/ruff/issues/7076
samesld = True if (psl.privatesuffix(urlparse(response.url).netloc) ==
psl.privatesuffix(src.netloc)) else False

View File

@ -152,3 +152,11 @@ if (a or [1] or True or [2]) == (a or [1]): # SIM222
if f(a or [1] or True or [2]): # SIM222 if f(a or [1] or True or [2]): # SIM222
pass pass
# Regression test for: https://github.com/astral-sh/ruff/issues/7099
def secondToTime(s0: int) -> (int, int, int) or str:
m, s = divmod(s0, 60)
def secondToTime(s0: int) -> ((int, int, int) or str):
m, s = divmod(s0, 60)

View File

@ -14,6 +14,8 @@ YODA >= age # SIM300
JediOrder.YODA == age # SIM300 JediOrder.YODA == age # SIM300
0 < (number - 100) # SIM300 0 < (number - 100) # SIM300
SomeClass().settings.SOME_CONSTANT_VALUE > (60 * 60) # SIM300 SomeClass().settings.SOME_CONSTANT_VALUE > (60 * 60) # SIM300
B<A[0][0]or B
B or(B)<A[0][0]
# OK # OK
compare == "yoda" compare == "yoda"

View File

@ -16,3 +16,8 @@ nok4 = "a".join([a, a, *a]) # Not OK (not a static length)
nok5 = "a".join([choice("flarp")]) # Not OK (not a simple call) nok5 = "a".join([choice("flarp")]) # Not OK (not a simple call)
nok6 = "a".join(x for x in "feefoofum") # Not OK (generator) nok6 = "a".join(x for x in "feefoofum") # Not OK (generator)
nok7 = "a".join([f"foo{8}", "bar"]) # Not OK (contains an f-string) nok7 = "a".join([f"foo{8}", "bar"]) # Not OK (contains an f-string)
# Regression test for: https://github.com/astral-sh/ruff/issues/7197
def create_file_public_url(url, filename):
return''.join([url, filename])

View File

@ -18,3 +18,8 @@ pdf = pd.DataFrame(
) )
_ = arr.astype(np.int) _ = arr.astype(np.int)
# Regression test for: https://github.com/astral-sh/ruff/issues/6952
from numpy import float
float(1)

View File

@ -1,15 +1,18 @@
import numpy as np def func():
import numpy as np
np.round_(np.random.rand(5, 5), 2) np.round_(np.random.rand(5, 5), 2)
np.product(np.random.rand(5, 5)) np.product(np.random.rand(5, 5))
np.cumproduct(np.random.rand(5, 5)) np.cumproduct(np.random.rand(5, 5))
np.sometrue(np.random.rand(5, 5)) np.sometrue(np.random.rand(5, 5))
np.alltrue(np.random.rand(5, 5)) np.alltrue(np.random.rand(5, 5))
from numpy import round_, product, cumproduct, sometrue, alltrue
round_(np.random.rand(5, 5), 2) def func():
product(np.random.rand(5, 5)) from numpy import round_, product, cumproduct, sometrue, alltrue
cumproduct(np.random.rand(5, 5))
sometrue(np.random.rand(5, 5)) round_(np.random.rand(5, 5), 2)
alltrue(np.random.rand(5, 5)) product(np.random.rand(5, 5))
cumproduct(np.random.rand(5, 5))
sometrue(np.random.rand(5, 5))
alltrue(np.random.rand(5, 5))

View File

@ -29,3 +29,5 @@ x.apply(lambda x: x.sort_values("a", inplace=True))
import torch import torch
torch.m.ReLU(inplace=True) # safe because this isn't a pandas call torch.m.ReLU(inplace=True) # safe because this isn't a pandas call
(x.drop(["a"], axis=1, inplace=True))

View File

@ -1,8 +1,6 @@
import collections import collections
from collections import namedtuple from collections import namedtuple
from typing import TypeVar from typing import TypeAlias, TypeVar, NewType, NamedTuple, TypedDict
from typing import NewType
from typing import NamedTuple, TypedDict
GLOBAL: str = "foo" GLOBAL: str = "foo"
@ -21,9 +19,11 @@ def assign():
T = TypeVar("T") T = TypeVar("T")
UserId = NewType("UserId", int) UserId = NewType("UserId", int)
Employee = NamedTuple('Employee', [('name', str), ('id', int)]) Employee = NamedTuple("Employee", [("name", str), ("id", int)])
Point2D = TypedDict('Point2D', {'in': int, 'x-y': int}) Point2D = TypedDict("Point2D", {"in": int, "x-y": int})
IntOrStr: TypeAlias = int | str
def aug_assign(rank, world_size): def aug_assign(rank, world_size):

View File

@ -18,11 +18,11 @@ def f():
result = [] result = []
for i in items: for i in items:
if i % 2: if i % 2:
result.append(i) # PERF401 result.append(i) # Ok
elif i % 2: elif i % 2:
result.append(i) # PERF401 result.append(i)
else: else:
result.append(i) # PERF401 result.append(i)
def f(): def f():
@ -60,3 +60,15 @@ def f():
for i in range(20): for i in range(20):
foo.fibonacci.append(sum(foo.fibonacci[-2:])) # OK foo.fibonacci.append(sum(foo.fibonacci[-2:])) # OK
print(foo.fibonacci) print(foo.fibonacci)
class Foo:
def append(self, x):
pass
def f():
items = [1, 2, 3, 4]
result = Foo()
for i in items:
result.append(i) # Ok

View File

@ -24,3 +24,22 @@ def f():
result = {} result = {}
for i in items: for i in items:
result[i].append(i * i) # OK result[i].append(i * i) # OK
class Foo:
def append(self, x):
pass
def f():
items = [1, 2, 3, 4]
result = Foo()
for i in items:
result.append(i) # OK
def f():
import sys
for path in ("foo", "bar"):
sys.path.append(path) # OK

View File

@ -138,3 +138,12 @@ def scope():
class TemperatureScales(Enum): class TemperatureScales(Enum):
CELSIUS = (lambda deg_c: deg_c) CELSIUS = (lambda deg_c: deg_c)
FAHRENHEIT = (lambda deg_c: deg_c * 9 / 5 + 32) FAHRENHEIT = (lambda deg_c: deg_c * 9 / 5 + 32)
# Regression test for: https://github.com/astral-sh/ruff/issues/7141
def scope():
# E731
f = lambda: (
i := 1,
)

View File

@ -639,3 +639,22 @@ def starts_with_space_then_this():
class SameLine: """This is a docstring on the same line""" class SameLine: """This is a docstring on the same line"""
def same_line(): """This is a docstring on the same line""" def same_line(): """This is a docstring on the same line"""
def single_line_docstring_with_an_escaped_backslash():
"\
"
class StatementOnSameLineAsDocstring:
"After this docstring there's another statement on the same line separated by a semicolon." ; priorities=1
def sort_services(self):
pass
class StatementOnSameLineAsDocstring:
"After this docstring there's another statement on the same line separated by a semicolon."; priorities=1
class CommentAfterDocstring:
"After this docstring there's a comment." # priorities=1
def sort_services(self):
pass

View File

@ -128,6 +128,19 @@ def f(x, *, y, z):
""" """
return x, y, z return x, y, z
def f(x):
"""Do something with valid description.
Args:
----
x: the value
Returns:
-------
the value
"""
return x
class Test: class Test:
def f(self, /, arg1: int) -> None: def f(self, /, arg1: int) -> None:

View File

@ -529,3 +529,16 @@ def replace_equals_with_dash2():
Parameters Parameters
=========== ===========
""" """
@expect(_D213)
def non_empty_blank_line_before_section(): # noqa: D416
"""Toggle the gizmo.
The function's description.
Returns
-------
A value of some sort.
"""

View File

@ -112,3 +112,11 @@ match *0, 1, *2:
import b1 import b1
import b2 import b2
# Regression test for: https://github.com/astral-sh/ruff/issues/7244
from datameta_client_lib.model_utils import ( # noqa: F401
noqa )
from datameta_client_lib.model_helpers import (
noqa )

View File

@ -9,3 +9,8 @@ hidden = {"a": "!"}
"%(a)s" % {'a': 1, u"b": "!"} # F504 ("b" not used) "%(a)s" % {'a': 1, u"b": "!"} # F504 ("b" not used)
'' % {'a''b' : ''} # F504 ("ab" not used) '' % {'a''b' : ''} # F504 ("ab" not used)
# https://github.com/astral-sh/ruff/issues/4899
"" % {
'test1': '', 'test2': '',
}

View File

@ -165,3 +165,9 @@ def f():
x = 1 x = 1
y = 2 y = 2
def f():
(x) = foo()
((x)) = foo()
(x) = (y.z) = foo()

View File

@ -1,3 +1,6 @@
from typing import override
class Apples: class Apples:
def _init_(self): # [bad-dunder-name] def _init_(self): # [bad-dunder-name]
pass pass
@ -21,6 +24,11 @@ class Apples:
# author likely meant to call the invert dunder method # author likely meant to call the invert dunder method
pass pass
@override
def _ignore__(self): # [bad-dunder-name]
# overridden dunder methods should be ignored
pass
def hello(self): def hello(self):
print("hello") print("hello")

View File

@ -36,3 +36,6 @@ tuples_list = [
min(min(tuples_list)) min(min(tuples_list))
max(max(tuples_list)) max(max(tuples_list))
# Starred argument should be copied as it is.
max(1, max(*a))

View File

@ -91,3 +91,20 @@ def f(x: Optional[int : float]) -> None:
def f(x: Optional[str, int : float]) -> None: def f(x: Optional[str, int : float]) -> None:
... ...
def f(x: Optional[int, float]) -> None:
...
# Regression test for: https://github.com/astral-sh/ruff/issues/7131
class ServiceRefOrValue:
service_specification: Optional[
list[ServiceSpecificationRef]
| list[ServiceSpecification]
] = None
# Regression test for: https://github.com/astral-sh/ruff/issues/7201
class ServiceRefOrValue:
service_specification: Optional[str]is not True = None

View File

@ -1,12 +1,10 @@
import subprocess import subprocess
import subprocess as somename
from subprocess import run from subprocess import run
from subprocess import run as anothername
# Errors
subprocess.run(["foo"], universal_newlines=True, check=True) subprocess.run(["foo"], universal_newlines=True, check=True)
somename.run(["foo"], universal_newlines=True) subprocess.run(["foo"], universal_newlines=True, text=True)
run(["foo"], universal_newlines=True, check=False) run(["foo"], universal_newlines=True, check=False)
anothername(["foo"], universal_newlines=True)
# OK
subprocess.run(["foo"], check=True) subprocess.run(["foo"], check=True)

View File

@ -35,8 +35,19 @@ if output:
encoding="utf-8", encoding="utf-8",
) )
output = subprocess.run(
["foo"], stdout=subprocess.PIPE, capture_output=True, stderr=subprocess.PIPE
)
# Examples that should NOT trigger the rule output = subprocess.run(
["foo"], stdout=subprocess.PIPE, capture_output=False, stderr=subprocess.PIPE
)
output = subprocess.run(
["foo"], capture_output=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
# OK
from foo import PIPE from foo import PIPE
subprocess.run(["foo"], stdout=PIPE, stderr=PIPE) subprocess.run(["foo"], stdout=PIPE, stderr=PIPE)
run(["foo"], stdout=None, stderr=PIPE) run(["foo"], stdout=None, stderr=PIPE)

View File

@ -96,3 +96,11 @@ try:
pass pass
except (OSError, KeyError): except (OSError, KeyError):
pass pass
# Regression test for: https://github.com/astral-sh/ruff/issues/7101
def get_owner_id_from_mac_address():
try:
mac_address = get_primary_mac_address()
except(IOError, OSError) as ex:
msg = 'Unable to query URL to get Owner ID: {u}\n{e}'.format(u=owner_id_url, e=ex)

View File

@ -72,3 +72,12 @@ def f():
for x, y in z(): for x, y in z():
yield x, y yield x, y
x = 1 x = 1
# Regression test for: https://github.com/astral-sh/ruff/issues/7103
def _serve_method(fn):
for h in (
TaggedText.from_file(args.input)
.markup(highlight=args.region)
):
yield h

View File

@ -1,4 +1,4 @@
# These should NOT change # OK
def f(): def f():
for x in z: for x in z:
yield yield

View File

@ -74,6 +74,8 @@ from typing import Collection
from typing import AsyncGenerator from typing import AsyncGenerator
from typing import Reversible from typing import Reversible
from typing import Generator from typing import Generator
from typing import Callable
from typing import cast
# OK # OK
from a import b from a import b

View File

@ -178,3 +178,9 @@ if True:
if True: if True:
if sys.version_info > (3, 0): \ if sys.version_info > (3, 0): \
expected_error = [] expected_error = []
if sys.version_info < (3,12):
print("py3")
if sys.version_info <= (3,12):
print("py3")

View File

@ -0,0 +1,21 @@
l = [1, 2, 3, 4, 5]
# Errors.
a = l[:]
b, c = 1, l[:]
d, e = l[:], 1
m = l[::]
l[:]
print(l[:])
# False negatives.
aa = a[:] # Type inference.
# OK.
t = (1, 2, 3, 4, 5)
f = t[:] # t.copy() is not supported.
g = l[1:3]
h = l[1:]
i = l[:3]
j = l[1:3:2]
k = l[::2]

View File

@ -12,3 +12,10 @@ sum([[1, 2, 3], [4, 5, 6]],
# OK # OK
sum([x, y]) sum([x, y])
sum([[1, 2, 3], [4, 5, 6]]) sum([[1, 2, 3], [4, 5, 6]])
# Regression test for: https://github.com/astral-sh/ruff/issues/7059
def func():
import functools, operator
sum([x, y], [])

View File

@ -3,14 +3,14 @@
use anyhow::{Context, Result}; use anyhow::{Context, Result};
use ruff_diagnostics::Edit; use ruff_diagnostics::Edit;
use ruff_python_ast::{self as ast, Arguments, ExceptHandler, Expr, Keyword, Stmt}; use ruff_python_ast::{self as ast, Arguments, ExceptHandler, Stmt};
use ruff_python_codegen::Stylist; use ruff_python_codegen::Stylist;
use ruff_python_index::Indexer; use ruff_python_index::Indexer;
use ruff_python_trivia::{ use ruff_python_trivia::{
has_leading_content, is_python_whitespace, PythonWhitespace, SimpleTokenKind, SimpleTokenizer, has_leading_content, is_python_whitespace, PythonWhitespace, SimpleTokenKind, SimpleTokenizer,
}; };
use ruff_source_file::{Locator, NewlineWithTrailingNewline}; use ruff_source_file::{Locator, NewlineWithTrailingNewline};
use ruff_text_size::{Ranged, TextLen, TextSize}; use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use crate::autofix::codemods; use crate::autofix::codemods;
@ -92,10 +92,8 @@ pub(crate) fn remove_argument<T: Ranged>(
) -> Result<Edit> { ) -> Result<Edit> {
// Partition into arguments before and after the argument to remove. // Partition into arguments before and after the argument to remove.
let (before, after): (Vec<_>, Vec<_>) = arguments let (before, after): (Vec<_>, Vec<_>) = arguments
.args .arguments_source_order()
.iter() .map(|arg| arg.range())
.map(Expr::range)
.chain(arguments.keywords.iter().map(Keyword::range))
.filter(|range| argument.range() != *range) .filter(|range| argument.range() != *range)
.partition(|range| range.start() < argument.start()); .partition(|range| range.start() < argument.start());
@ -249,6 +247,44 @@ fn next_stmt_break(semicolon: TextSize, locator: &Locator) -> TextSize {
locator.line_end(start_location) locator.line_end(start_location)
} }
/// Add leading whitespace to a snippet, if it's immediately preceded an identifier or keyword.
pub(crate) fn pad_start(mut content: String, start: TextSize, locator: &Locator) -> String {
// Ex) When converting `except(ValueError,)` from a tuple to a single argument, we need to
// insert a space before the fix, to achieve `except ValueError`.
if locator
.up_to(start)
.chars()
.last()
.is_some_and(|char| char.is_ascii_alphabetic())
{
content.insert(0, ' ');
}
content
}
/// Add trailing whitespace to a snippet, if it's immediately followed by an identifier or keyword.
pub(crate) fn pad_end(mut content: String, end: TextSize, locator: &Locator) -> String {
if locator
.after(end)
.chars()
.next()
.is_some_and(|char| char.is_ascii_alphabetic())
{
content.push(' ');
}
content
}
/// Add leading or trailing whitespace to a snippet, if it's immediately preceded or followed by
/// an identifier or keyword.
pub(crate) fn pad(content: String, range: TextRange, locator: &Locator) -> String {
pad_start(
pad_end(content, range.end(), locator),
range.start(),
locator,
)
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use anyhow::Result; use anyhow::Result;

View File

@ -4,17 +4,15 @@ use std::collections::BTreeSet;
use ruff_text_size::{Ranged, TextLen, TextRange, TextSize}; use ruff_text_size::{Ranged, TextLen, TextRange, TextSize};
use rustc_hash::{FxHashMap, FxHashSet}; use rustc_hash::{FxHashMap, FxHashSet};
use ruff_diagnostics::{Diagnostic, Edit, Fix, IsolationLevel}; use ruff_diagnostics::{Diagnostic, Edit, Fix, IsolationLevel, SourceMap};
use ruff_source_file::Locator; use ruff_source_file::Locator;
use crate::autofix::source_map::SourceMap;
use crate::linter::FixTable; use crate::linter::FixTable;
use crate::registry::{AsRule, Rule}; use crate::registry::{AsRule, Rule};
pub(crate) mod codemods; pub(crate) mod codemods;
pub(crate) mod edits; pub(crate) mod edits;
pub(crate) mod snippet; pub(crate) mod snippet;
pub(crate) mod source_map;
pub(crate) struct FixResult { pub(crate) struct FixResult {
/// The resulting source code, after applying all fixes. /// The resulting source code, after applying all fixes.
@ -140,10 +138,9 @@ fn cmp_fix(rule1: Rule, rule2: Rule, fix1: &Fix, fix2: &Fix) -> std::cmp::Orderi
mod tests { mod tests {
use ruff_text_size::{Ranged, TextSize}; use ruff_text_size::{Ranged, TextSize};
use ruff_diagnostics::{Diagnostic, Edit, Fix}; use ruff_diagnostics::{Diagnostic, Edit, Fix, SourceMarker};
use ruff_source_file::Locator; use ruff_source_file::Locator;
use crate::autofix::source_map::SourceMarker;
use crate::autofix::{apply_fixes, FixResult}; use crate::autofix::{apply_fixes, FixResult};
use crate::rules::pycodestyle::rules::MissingNewlineAtEndOfFile; use crate::rules::pycodestyle::rules::MissingNewlineAtEndOfFile;
@ -207,14 +204,8 @@ print("hello world")
assert_eq!( assert_eq!(
source_map.markers(), source_map.markers(),
&[ &[
SourceMarker { SourceMarker::new(10.into(), 10.into(),),
source: 10.into(), SourceMarker::new(10.into(), 21.into(),),
dest: 10.into(),
},
SourceMarker {
source: 10.into(),
dest: 21.into(),
},
] ]
); );
} }
@ -250,14 +241,8 @@ class A(Bar):
assert_eq!( assert_eq!(
source_map.markers(), source_map.markers(),
&[ &[
SourceMarker { SourceMarker::new(8.into(), 8.into(),),
source: 8.into(), SourceMarker::new(14.into(), 11.into(),),
dest: 8.into(),
},
SourceMarker {
source: 14.into(),
dest: 11.into(),
},
] ]
); );
} }
@ -289,14 +274,8 @@ class A:
assert_eq!( assert_eq!(
source_map.markers(), source_map.markers(),
&[ &[
SourceMarker { SourceMarker::new(7.into(), 7.into()),
source: 7.into(), SourceMarker::new(15.into(), 7.into()),
dest: 7.into()
},
SourceMarker {
source: 15.into(),
dest: 7.into()
}
] ]
); );
} }
@ -332,22 +311,10 @@ class A(object):
assert_eq!( assert_eq!(
source_map.markers(), source_map.markers(),
&[ &[
SourceMarker { SourceMarker::new(8.into(), 8.into()),
source: 8.into(), SourceMarker::new(16.into(), 8.into()),
dest: 8.into() SourceMarker::new(22.into(), 14.into(),),
}, SourceMarker::new(30.into(), 14.into(),),
SourceMarker {
source: 16.into(),
dest: 8.into()
},
SourceMarker {
source: 22.into(),
dest: 14.into(),
},
SourceMarker {
source: 30.into(),
dest: 14.into(),
}
] ]
); );
} }
@ -382,14 +349,8 @@ class A:
assert_eq!( assert_eq!(
source_map.markers(), source_map.markers(),
&[ &[
SourceMarker { SourceMarker::new(7.into(), 7.into(),),
source: 7.into(), SourceMarker::new(15.into(), 7.into(),),
dest: 7.into(),
},
SourceMarker {
source: 15.into(),
dest: 7.into(),
}
] ]
); );
} }

View File

@ -16,7 +16,7 @@ use crate::rules::{
flake8_future_annotations, flake8_gettext, flake8_implicit_str_concat, flake8_logging_format, flake8_future_annotations, flake8_gettext, flake8_implicit_str_concat, flake8_logging_format,
flake8_pie, flake8_print, flake8_pyi, flake8_pytest_style, flake8_self, flake8_simplify, flake8_pie, flake8_print, flake8_pyi, flake8_pytest_style, flake8_self, flake8_simplify,
flake8_tidy_imports, flake8_use_pathlib, flynt, numpy, pandas_vet, pep8_naming, pycodestyle, flake8_tidy_imports, flake8_use_pathlib, flynt, numpy, pandas_vet, pep8_naming, pycodestyle,
pyflakes, pygrep_hooks, pylint, pyupgrade, ruff, pyflakes, pygrep_hooks, pylint, pyupgrade, refurb, ruff,
}; };
use crate::settings::types::PythonVersion; use crate::settings::types::PythonVersion;
@ -113,10 +113,12 @@ pub(crate) fn expression(expr: &Expr, checker: &mut Checker) {
if checker.enabled(Rule::UnnecessaryIterableAllocationForFirstElement) { if checker.enabled(Rule::UnnecessaryIterableAllocationForFirstElement) {
ruff::rules::unnecessary_iterable_allocation_for_first_element(checker, subscript); ruff::rules::unnecessary_iterable_allocation_for_first_element(checker, subscript);
} }
if checker.enabled(Rule::InvalidIndexType) { if checker.enabled(Rule::InvalidIndexType) {
ruff::rules::invalid_index_type(checker, subscript); ruff::rules::invalid_index_type(checker, subscript);
} }
if checker.settings.rules.enabled(Rule::SliceCopy) {
refurb::rules::slice_copy(checker, subscript);
}
pandas_vet::rules::subscript(checker, value, expr); pandas_vet::rules::subscript(checker, value, expr);
} }
@ -1195,7 +1197,6 @@ pub(crate) fn expression(expr: &Expr, checker: &mut Checker) {
} }
Expr::Constant(ast::ExprConstant { Expr::Constant(ast::ExprConstant {
value: Constant::Int(_) | Constant::Float(_) | Constant::Complex { .. }, value: Constant::Int(_) | Constant::Float(_) | Constant::Complex { .. },
kind: _,
range: _, range: _,
}) => { }) => {
if checker.source_type.is_stub() && checker.enabled(Rule::NumericLiteralTooLong) { if checker.source_type.is_stub() && checker.enabled(Rule::NumericLiteralTooLong) {
@ -1204,7 +1205,6 @@ pub(crate) fn expression(expr: &Expr, checker: &mut Checker) {
} }
Expr::Constant(ast::ExprConstant { Expr::Constant(ast::ExprConstant {
value: Constant::Bytes(_), value: Constant::Bytes(_),
kind: _,
range: _, range: _,
}) => { }) => {
if checker.source_type.is_stub() && checker.enabled(Rule::StringOrBytesTooLong) { if checker.source_type.is_stub() && checker.enabled(Rule::StringOrBytesTooLong) {
@ -1213,7 +1213,6 @@ pub(crate) fn expression(expr: &Expr, checker: &mut Checker) {
} }
Expr::Constant(ast::ExprConstant { Expr::Constant(ast::ExprConstant {
value: Constant::Str(value), value: Constant::Str(value),
kind,
range: _, range: _,
}) => { }) => {
if checker.enabled(Rule::HardcodedBindAllInterfaces) { if checker.enabled(Rule::HardcodedBindAllInterfaces) {
@ -1227,7 +1226,7 @@ pub(crate) fn expression(expr: &Expr, checker: &mut Checker) {
flake8_bandit::rules::hardcoded_tmp_directory(checker, expr, value); flake8_bandit::rules::hardcoded_tmp_directory(checker, expr, value);
} }
if checker.enabled(Rule::UnicodeKindPrefix) { if checker.enabled(Rule::UnicodeKindPrefix) {
pyupgrade::rules::unicode_kind_prefix(checker, expr, kind.as_deref()); pyupgrade::rules::unicode_kind_prefix(checker, expr, value.unicode);
} }
if checker.source_type.is_stub() { if checker.source_type.is_stub() {
if checker.enabled(Rule::StringOrBytesTooLong) { if checker.enabled(Rule::StringOrBytesTooLong) {
@ -1253,14 +1252,10 @@ pub(crate) fn expression(expr: &Expr, checker: &mut Checker) {
range: _, range: _,
}) => { }) => {
if checker.enabled(Rule::IfExprWithTrueFalse) { if checker.enabled(Rule::IfExprWithTrueFalse) {
flake8_simplify::rules::explicit_true_false_in_ifexpr( flake8_simplify::rules::if_expr_with_true_false(checker, expr, test, body, orelse);
checker, expr, test, body, orelse,
);
} }
if checker.enabled(Rule::IfExprWithFalseTrue) { if checker.enabled(Rule::IfExprWithFalseTrue) {
flake8_simplify::rules::explicit_false_true_in_ifexpr( flake8_simplify::rules::if_expr_with_false_true(checker, expr, test, body, orelse);
checker, expr, test, body, orelse,
);
} }
if checker.enabled(Rule::IfExprWithTwistedArms) { if checker.enabled(Rule::IfExprWithTwistedArms) {
flake8_simplify::rules::twisted_arms_in_ifexpr(checker, expr, test, body, orelse); flake8_simplify::rules::twisted_arms_in_ifexpr(checker, expr, test, body, orelse);
@ -1358,8 +1353,8 @@ pub(crate) fn expression(expr: &Expr, checker: &mut Checker) {
if checker.enabled(Rule::ExprAndFalse) { if checker.enabled(Rule::ExprAndFalse) {
flake8_simplify::rules::expr_and_false(checker, expr); flake8_simplify::rules::expr_and_false(checker, expr);
} }
if checker.enabled(Rule::RepeatedEqualityComparisonTarget) { if checker.enabled(Rule::RepeatedEqualityComparison) {
pylint::rules::repeated_equality_comparison_target(checker, bool_op); pylint::rules::repeated_equality_comparison(checker, bool_op);
} }
} }
_ => {} _ => {}

View File

@ -528,11 +528,7 @@ where
&self.semantic.definitions, &self.semantic.definitions,
); );
self.semantic.push_definition(definition); self.semantic.push_definition(definition);
self.semantic.push_scope(ScopeKind::Function(function_def));
self.semantic.push_scope(match &stmt {
Stmt::FunctionDef(stmt) => ScopeKind::Function(stmt),
_ => unreachable!("Expected Stmt::FunctionDef"),
});
self.deferred.functions.push(self.semantic.snapshot()); self.deferred.functions.push(self.semantic.snapshot());
@ -1192,7 +1188,6 @@ where
} }
Expr::Constant(ast::ExprConstant { Expr::Constant(ast::ExprConstant {
value: Constant::Str(value), value: Constant::Str(value),
kind: _,
range: _, range: _,
}) => { }) => {
if self.semantic.in_type_definition() if self.semantic.in_type_definition()

View File

@ -85,7 +85,7 @@ pub(crate) fn check_imports(
stylist: &Stylist, stylist: &Stylist,
path: &Path, path: &Path,
package: Option<&Path>, package: Option<&Path>,
source_kind: Option<&SourceKind>, source_kind: &SourceKind,
source_type: PySourceType, source_type: PySourceType,
) -> (Vec<Diagnostic>, Option<ImportMap>) { ) -> (Vec<Diagnostic>, Option<ImportMap>) {
// Extract all import blocks from the AST. // Extract all import blocks from the AST.

View File

@ -9,6 +9,7 @@ use strum_macros::{AsRefStr, EnumIter};
use ruff_diagnostics::Violation; use ruff_diagnostics::Violation;
use crate::registry::{AsRule, Linter}; use crate::registry::{AsRule, Linter};
use crate::rule_selector::is_single_rule_selector;
use crate::rules; use crate::rules;
#[derive(PartialEq, Eq, PartialOrd, Ord)] #[derive(PartialEq, Eq, PartialOrd, Ord)]
@ -51,7 +52,10 @@ impl PartialEq<&str> for NoqaCode {
pub enum RuleGroup { pub enum RuleGroup {
/// The rule has not been assigned to any specific group. /// The rule has not been assigned to any specific group.
Unspecified, Unspecified,
/// The rule is still under development, and must be enabled explicitly. /// The rule is unstable, and preview mode must be enabled for usage.
Preview,
/// Legacy category for unstable rules, supports backwards compatible selection.
#[deprecated(note = "Use `RuleGroup::Preview` for new rules instead")]
Nursery, Nursery,
} }
@ -64,38 +68,71 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
Some(match (linter, code) { Some(match (linter, code) {
// pycodestyle errors // pycodestyle errors
(Pycodestyle, "E101") => (RuleGroup::Unspecified, rules::pycodestyle::rules::MixedSpacesAndTabs), (Pycodestyle, "E101") => (RuleGroup::Unspecified, rules::pycodestyle::rules::MixedSpacesAndTabs),
#[allow(deprecated)]
(Pycodestyle, "E111") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::IndentationWithInvalidMultiple), (Pycodestyle, "E111") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::IndentationWithInvalidMultiple),
#[allow(deprecated)]
(Pycodestyle, "E112") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::NoIndentedBlock), (Pycodestyle, "E112") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::NoIndentedBlock),
#[allow(deprecated)]
(Pycodestyle, "E113") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::UnexpectedIndentation), (Pycodestyle, "E113") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::UnexpectedIndentation),
#[allow(deprecated)]
(Pycodestyle, "E114") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::IndentationWithInvalidMultipleComment), (Pycodestyle, "E114") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::IndentationWithInvalidMultipleComment),
#[allow(deprecated)]
(Pycodestyle, "E115") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::NoIndentedBlockComment), (Pycodestyle, "E115") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::NoIndentedBlockComment),
#[allow(deprecated)]
(Pycodestyle, "E116") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::UnexpectedIndentationComment), (Pycodestyle, "E116") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::UnexpectedIndentationComment),
#[allow(deprecated)]
(Pycodestyle, "E117") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::OverIndented), (Pycodestyle, "E117") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::OverIndented),
#[allow(deprecated)]
(Pycodestyle, "E201") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::WhitespaceAfterOpenBracket), (Pycodestyle, "E201") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::WhitespaceAfterOpenBracket),
#[allow(deprecated)]
(Pycodestyle, "E202") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::WhitespaceBeforeCloseBracket), (Pycodestyle, "E202") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::WhitespaceBeforeCloseBracket),
#[allow(deprecated)]
(Pycodestyle, "E203") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::WhitespaceBeforePunctuation), (Pycodestyle, "E203") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::WhitespaceBeforePunctuation),
#[allow(deprecated)]
(Pycodestyle, "E211") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::WhitespaceBeforeParameters), (Pycodestyle, "E211") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::WhitespaceBeforeParameters),
#[allow(deprecated)]
(Pycodestyle, "E221") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MultipleSpacesBeforeOperator), (Pycodestyle, "E221") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MultipleSpacesBeforeOperator),
#[allow(deprecated)]
(Pycodestyle, "E222") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MultipleSpacesAfterOperator), (Pycodestyle, "E222") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MultipleSpacesAfterOperator),
#[allow(deprecated)]
(Pycodestyle, "E223") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::TabBeforeOperator), (Pycodestyle, "E223") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::TabBeforeOperator),
#[allow(deprecated)]
(Pycodestyle, "E224") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::TabAfterOperator), (Pycodestyle, "E224") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::TabAfterOperator),
#[allow(deprecated)]
(Pycodestyle, "E225") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundOperator), (Pycodestyle, "E225") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundOperator),
#[allow(deprecated)]
(Pycodestyle, "E226") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundArithmeticOperator), (Pycodestyle, "E226") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundArithmeticOperator),
#[allow(deprecated)]
(Pycodestyle, "E227") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundBitwiseOrShiftOperator), (Pycodestyle, "E227") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundBitwiseOrShiftOperator),
#[allow(deprecated)]
(Pycodestyle, "E228") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundModuloOperator), (Pycodestyle, "E228") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundModuloOperator),
#[allow(deprecated)]
(Pycodestyle, "E231") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespace), (Pycodestyle, "E231") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespace),
#[allow(deprecated)]
(Pycodestyle, "E241") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MultipleSpacesAfterComma), (Pycodestyle, "E241") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MultipleSpacesAfterComma),
#[allow(deprecated)]
(Pycodestyle, "E242") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::TabAfterComma), (Pycodestyle, "E242") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::TabAfterComma),
#[allow(deprecated)]
(Pycodestyle, "E251") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::UnexpectedSpacesAroundKeywordParameterEquals), (Pycodestyle, "E251") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::UnexpectedSpacesAroundKeywordParameterEquals),
#[allow(deprecated)]
(Pycodestyle, "E252") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundParameterEquals), (Pycodestyle, "E252") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespaceAroundParameterEquals),
#[allow(deprecated)]
(Pycodestyle, "E261") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::TooFewSpacesBeforeInlineComment), (Pycodestyle, "E261") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::TooFewSpacesBeforeInlineComment),
#[allow(deprecated)]
(Pycodestyle, "E262") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::NoSpaceAfterInlineComment), (Pycodestyle, "E262") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::NoSpaceAfterInlineComment),
#[allow(deprecated)]
(Pycodestyle, "E265") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::NoSpaceAfterBlockComment), (Pycodestyle, "E265") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::NoSpaceAfterBlockComment),
#[allow(deprecated)]
(Pycodestyle, "E266") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MultipleLeadingHashesForBlockComment), (Pycodestyle, "E266") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MultipleLeadingHashesForBlockComment),
#[allow(deprecated)]
(Pycodestyle, "E271") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MultipleSpacesAfterKeyword), (Pycodestyle, "E271") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MultipleSpacesAfterKeyword),
#[allow(deprecated)]
(Pycodestyle, "E272") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MultipleSpacesBeforeKeyword), (Pycodestyle, "E272") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MultipleSpacesBeforeKeyword),
#[allow(deprecated)]
(Pycodestyle, "E273") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::TabAfterKeyword), (Pycodestyle, "E273") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::TabAfterKeyword),
#[allow(deprecated)]
(Pycodestyle, "E274") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::TabBeforeKeyword), (Pycodestyle, "E274") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::TabBeforeKeyword),
#[allow(deprecated)]
(Pycodestyle, "E275") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespaceAfterKeyword), (Pycodestyle, "E275") => (RuleGroup::Nursery, rules::pycodestyle::rules::logical_lines::MissingWhitespaceAfterKeyword),
(Pycodestyle, "E401") => (RuleGroup::Unspecified, rules::pycodestyle::rules::MultipleImportsOnOneLine), (Pycodestyle, "E401") => (RuleGroup::Unspecified, rules::pycodestyle::rules::MultipleImportsOnOneLine),
(Pycodestyle, "E402") => (RuleGroup::Unspecified, rules::pycodestyle::rules::ModuleImportNotAtTopOfFile), (Pycodestyle, "E402") => (RuleGroup::Unspecified, rules::pycodestyle::rules::ModuleImportNotAtTopOfFile),
@ -176,6 +213,7 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Pylint, "C0205") => (RuleGroup::Unspecified, rules::pylint::rules::SingleStringSlots), (Pylint, "C0205") => (RuleGroup::Unspecified, rules::pylint::rules::SingleStringSlots),
(Pylint, "C0208") => (RuleGroup::Unspecified, rules::pylint::rules::IterationOverSet), (Pylint, "C0208") => (RuleGroup::Unspecified, rules::pylint::rules::IterationOverSet),
(Pylint, "C0414") => (RuleGroup::Unspecified, rules::pylint::rules::UselessImportAlias), (Pylint, "C0414") => (RuleGroup::Unspecified, rules::pylint::rules::UselessImportAlias),
#[allow(deprecated)]
(Pylint, "C1901") => (RuleGroup::Nursery, rules::pylint::rules::CompareToEmptyString), (Pylint, "C1901") => (RuleGroup::Nursery, rules::pylint::rules::CompareToEmptyString),
(Pylint, "C3002") => (RuleGroup::Unspecified, rules::pylint::rules::UnnecessaryDirectLambdaCall), (Pylint, "C3002") => (RuleGroup::Unspecified, rules::pylint::rules::UnnecessaryDirectLambdaCall),
(Pylint, "E0100") => (RuleGroup::Unspecified, rules::pylint::rules::YieldInInit), (Pylint, "E0100") => (RuleGroup::Unspecified, rules::pylint::rules::YieldInInit),
@ -212,10 +250,11 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Pylint, "R0915") => (RuleGroup::Unspecified, rules::pylint::rules::TooManyStatements), (Pylint, "R0915") => (RuleGroup::Unspecified, rules::pylint::rules::TooManyStatements),
(Pylint, "R1701") => (RuleGroup::Unspecified, rules::pylint::rules::RepeatedIsinstanceCalls), (Pylint, "R1701") => (RuleGroup::Unspecified, rules::pylint::rules::RepeatedIsinstanceCalls),
(Pylint, "R1711") => (RuleGroup::Unspecified, rules::pylint::rules::UselessReturn), (Pylint, "R1711") => (RuleGroup::Unspecified, rules::pylint::rules::UselessReturn),
(Pylint, "R1714") => (RuleGroup::Unspecified, rules::pylint::rules::RepeatedEqualityComparisonTarget), (Pylint, "R1714") => (RuleGroup::Unspecified, rules::pylint::rules::RepeatedEqualityComparison),
(Pylint, "R1722") => (RuleGroup::Unspecified, rules::pylint::rules::SysExitAlias), (Pylint, "R1722") => (RuleGroup::Unspecified, rules::pylint::rules::SysExitAlias),
(Pylint, "R2004") => (RuleGroup::Unspecified, rules::pylint::rules::MagicValueComparison), (Pylint, "R2004") => (RuleGroup::Unspecified, rules::pylint::rules::MagicValueComparison),
(Pylint, "R5501") => (RuleGroup::Unspecified, rules::pylint::rules::CollapsibleElseIf), (Pylint, "R5501") => (RuleGroup::Unspecified, rules::pylint::rules::CollapsibleElseIf),
#[allow(deprecated)]
(Pylint, "R6301") => (RuleGroup::Nursery, rules::pylint::rules::NoSelfUse), (Pylint, "R6301") => (RuleGroup::Nursery, rules::pylint::rules::NoSelfUse),
(Pylint, "W0120") => (RuleGroup::Unspecified, rules::pylint::rules::UselessElseOnLoop), (Pylint, "W0120") => (RuleGroup::Unspecified, rules::pylint::rules::UselessElseOnLoop),
(Pylint, "W0127") => (RuleGroup::Unspecified, rules::pylint::rules::SelfAssigningVariable), (Pylint, "W0127") => (RuleGroup::Unspecified, rules::pylint::rules::SelfAssigningVariable),
@ -228,9 +267,11 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Pylint, "W1508") => (RuleGroup::Unspecified, rules::pylint::rules::InvalidEnvvarDefault), (Pylint, "W1508") => (RuleGroup::Unspecified, rules::pylint::rules::InvalidEnvvarDefault),
(Pylint, "W1509") => (RuleGroup::Unspecified, rules::pylint::rules::SubprocessPopenPreexecFn), (Pylint, "W1509") => (RuleGroup::Unspecified, rules::pylint::rules::SubprocessPopenPreexecFn),
(Pylint, "W1510") => (RuleGroup::Unspecified, rules::pylint::rules::SubprocessRunWithoutCheck), (Pylint, "W1510") => (RuleGroup::Unspecified, rules::pylint::rules::SubprocessRunWithoutCheck),
#[allow(deprecated)]
(Pylint, "W1641") => (RuleGroup::Nursery, rules::pylint::rules::EqWithoutHash), (Pylint, "W1641") => (RuleGroup::Nursery, rules::pylint::rules::EqWithoutHash),
(Pylint, "R0904") => (RuleGroup::Unspecified, rules::pylint::rules::TooManyPublicMethods), (Pylint, "R0904") => (RuleGroup::Unspecified, rules::pylint::rules::TooManyPublicMethods),
(Pylint, "W2901") => (RuleGroup::Unspecified, rules::pylint::rules::RedefinedLoopName), (Pylint, "W2901") => (RuleGroup::Unspecified, rules::pylint::rules::RedefinedLoopName),
#[allow(deprecated)]
(Pylint, "W3201") => (RuleGroup::Nursery, rules::pylint::rules::BadDunderMethodName), (Pylint, "W3201") => (RuleGroup::Nursery, rules::pylint::rules::BadDunderMethodName),
(Pylint, "W3301") => (RuleGroup::Unspecified, rules::pylint::rules::NestedMinMax), (Pylint, "W3301") => (RuleGroup::Unspecified, rules::pylint::rules::NestedMinMax),
@ -404,6 +445,7 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Flake8Simplify, "910") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::DictGetWithNoneDefault), (Flake8Simplify, "910") => (RuleGroup::Unspecified, rules::flake8_simplify::rules::DictGetWithNoneDefault),
// flake8-copyright // flake8-copyright
#[allow(deprecated)]
(Flake8Copyright, "001") => (RuleGroup::Nursery, rules::flake8_copyright::rules::MissingCopyrightNotice), (Flake8Copyright, "001") => (RuleGroup::Nursery, rules::flake8_copyright::rules::MissingCopyrightNotice),
// pyupgrade // pyupgrade
@ -816,9 +858,11 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Ruff, "012") => (RuleGroup::Unspecified, rules::ruff::rules::MutableClassDefault), (Ruff, "012") => (RuleGroup::Unspecified, rules::ruff::rules::MutableClassDefault),
(Ruff, "013") => (RuleGroup::Unspecified, rules::ruff::rules::ImplicitOptional), (Ruff, "013") => (RuleGroup::Unspecified, rules::ruff::rules::ImplicitOptional),
#[cfg(feature = "unreachable-code")] // When removing this feature gate, also update rules_selector.rs #[cfg(feature = "unreachable-code")] // When removing this feature gate, also update rules_selector.rs
#[allow(deprecated)]
(Ruff, "014") => (RuleGroup::Nursery, rules::ruff::rules::UnreachableCode), (Ruff, "014") => (RuleGroup::Nursery, rules::ruff::rules::UnreachableCode),
(Ruff, "015") => (RuleGroup::Unspecified, rules::ruff::rules::UnnecessaryIterableAllocationForFirstElement), (Ruff, "015") => (RuleGroup::Unspecified, rules::ruff::rules::UnnecessaryIterableAllocationForFirstElement),
(Ruff, "016") => (RuleGroup::Unspecified, rules::ruff::rules::InvalidIndexType), (Ruff, "016") => (RuleGroup::Unspecified, rules::ruff::rules::InvalidIndexType),
#[allow(deprecated)]
(Ruff, "017") => (RuleGroup::Nursery, rules::ruff::rules::QuadraticListSummation), (Ruff, "017") => (RuleGroup::Nursery, rules::ruff::rules::QuadraticListSummation),
(Ruff, "100") => (RuleGroup::Unspecified, rules::ruff::rules::UnusedNOQA), (Ruff, "100") => (RuleGroup::Unspecified, rules::ruff::rules::UnusedNOQA),
(Ruff, "200") => (RuleGroup::Unspecified, rules::ruff::rules::InvalidPyprojectToml), (Ruff, "200") => (RuleGroup::Unspecified, rules::ruff::rules::InvalidPyprojectToml),
@ -867,9 +911,13 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Flake8Slots, "002") => (RuleGroup::Unspecified, rules::flake8_slots::rules::NoSlotsInNamedtupleSubclass), (Flake8Slots, "002") => (RuleGroup::Unspecified, rules::flake8_slots::rules::NoSlotsInNamedtupleSubclass),
// refurb // refurb
#[allow(deprecated)]
(Refurb, "113") => (RuleGroup::Nursery, rules::refurb::rules::RepeatedAppend), (Refurb, "113") => (RuleGroup::Nursery, rules::refurb::rules::RepeatedAppend),
#[allow(deprecated)]
(Refurb, "131") => (RuleGroup::Nursery, rules::refurb::rules::DeleteFullSlice), (Refurb, "131") => (RuleGroup::Nursery, rules::refurb::rules::DeleteFullSlice),
#[allow(deprecated)]
(Refurb, "132") => (RuleGroup::Nursery, rules::refurb::rules::CheckAndRemoveFromSet), (Refurb, "132") => (RuleGroup::Nursery, rules::refurb::rules::CheckAndRemoveFromSet),
(Refurb, "145") => (RuleGroup::Preview, rules::refurb::rules::SliceCopy),
_ => return None, _ => return None,
}) })

View File

@ -1,4 +1,4 @@
use libcst_native::{Expression, NameOrAttribute}; use libcst_native::{Expression, NameOrAttribute, ParenthesizableWhitespace, SimpleWhitespace};
fn compose_call_path_inner<'a>(expr: &'a Expression, parts: &mut Vec<&'a str>) { fn compose_call_path_inner<'a>(expr: &'a Expression, parts: &mut Vec<&'a str>) {
match expr { match expr {
@ -36,3 +36,17 @@ pub(crate) fn compose_module_path(module: &NameOrAttribute) -> String {
} }
} }
} }
/// Return a [`ParenthesizableWhitespace`] containing a single space.
pub(crate) fn space() -> ParenthesizableWhitespace<'static> {
ParenthesizableWhitespace::SimpleWhitespace(SimpleWhitespace(" "))
}
/// Ensure that a [`ParenthesizableWhitespace`] contains at least one space.
pub(crate) fn or_space(whitespace: ParenthesizableWhitespace) -> ParenthesizableWhitespace {
if whitespace == ParenthesizableWhitespace::default() {
space()
} else {
whitespace
}
}

View File

@ -171,10 +171,8 @@ impl<'a> Importer<'a> {
at: TextSize, at: TextSize,
semantic: &SemanticModel, semantic: &SemanticModel,
) -> Result<(Edit, String), ResolutionError> { ) -> Result<(Edit, String), ResolutionError> {
match self.get_symbol(symbol, at, semantic) { self.get_symbol(symbol, at, semantic)?
Some(result) => result, .map_or_else(|| self.import_symbol(symbol, at, semantic), Ok)
None => self.import_symbol(symbol, at, semantic),
}
} }
/// Return an [`Edit`] to reference an existing symbol, if it's present in the given [`SemanticModel`]. /// Return an [`Edit`] to reference an existing symbol, if it's present in the given [`SemanticModel`].
@ -183,9 +181,13 @@ impl<'a> Importer<'a> {
symbol: &ImportRequest, symbol: &ImportRequest,
at: TextSize, at: TextSize,
semantic: &SemanticModel, semantic: &SemanticModel,
) -> Option<Result<(Edit, String), ResolutionError>> { ) -> Result<Option<(Edit, String)>, ResolutionError> {
// If the symbol is already available in the current scope, use it. // If the symbol is already available in the current scope, use it.
let imported_name = semantic.resolve_qualified_import_name(symbol.module, symbol.member)?; let Some(imported_name) =
semantic.resolve_qualified_import_name(symbol.module, symbol.member)
else {
return Ok(None);
};
// If the symbol source (i.e., the import statement) comes after the current location, // If the symbol source (i.e., the import statement) comes after the current location,
// abort. For example, we could be generating an edit within a function, and the import // abort. For example, we could be generating an edit within a function, and the import
@ -195,13 +197,13 @@ impl<'a> Importer<'a> {
// unclear whether should add an import statement at the start of the file, since it could // unclear whether should add an import statement at the start of the file, since it could
// be shadowed between the import and the current location. // be shadowed between the import and the current location.
if imported_name.start() > at { if imported_name.start() > at {
return Some(Err(ResolutionError::ImportAfterUsage)); return Err(ResolutionError::ImportAfterUsage);
} }
// If the symbol source (i.e., the import statement) is in a typing-only context, but we're // If the symbol source (i.e., the import statement) is in a typing-only context, but we're
// in a runtime context, abort. // in a runtime context, abort.
if imported_name.context().is_typing() && semantic.execution_context().is_runtime() { if imported_name.context().is_typing() && semantic.execution_context().is_runtime() {
return Some(Err(ResolutionError::IncompatibleContext)); return Err(ResolutionError::IncompatibleContext);
} }
// We also add a no-op edit to force conflicts with any other fixes that might try to // We also add a no-op edit to force conflicts with any other fixes that might try to
@ -224,7 +226,7 @@ impl<'a> Importer<'a> {
self.locator.slice(imported_name.range()).to_string(), self.locator.slice(imported_name.range()).to_string(),
imported_name.range(), imported_name.range(),
); );
Some(Ok((import_edit, imported_name.into_name()))) Ok(Some((import_edit, imported_name.into_name())))
} }
/// Generate an [`Edit`] to reference the given symbol. Returns the [`Edit`] necessary to make /// Generate an [`Edit`] to reference the given symbol. Returns the [`Edit`] necessary to make

View File

@ -6,7 +6,7 @@
//! [Ruff]: https://github.com/astral-sh/ruff //! [Ruff]: https://github.com/astral-sh/ruff
pub use rule_selector::RuleSelector; pub use rule_selector::RuleSelector;
pub use rules::pycodestyle::rules::IOError; pub use rules::pycodestyle::rules::{IOError, SyntaxError};
pub const VERSION: &str = env!("CARGO_PKG_VERSION"); pub const VERSION: &str = env!("CARGO_PKG_VERSION");
@ -20,7 +20,6 @@ mod doc_lines;
mod docstrings; mod docstrings;
pub mod fs; pub mod fs;
mod importer; mod importer;
pub mod jupyter;
mod lex; mod lex;
pub mod line_width; pub mod line_width;
pub mod linter; pub mod linter;

View File

@ -6,8 +6,6 @@ use anyhow::{anyhow, Result};
use colored::Colorize; use colored::Colorize;
use itertools::Itertools; use itertools::Itertools;
use log::error; use log::error;
use ruff_python_parser::lexer::LexResult;
use ruff_python_parser::{AsMode, ParseError};
use rustc_hash::FxHashMap; use rustc_hash::FxHashMap;
use ruff_diagnostics::Diagnostic; use ruff_diagnostics::Diagnostic;
@ -15,7 +13,8 @@ use ruff_python_ast::imports::ImportMap;
use ruff_python_ast::PySourceType; use ruff_python_ast::PySourceType;
use ruff_python_codegen::Stylist; use ruff_python_codegen::Stylist;
use ruff_python_index::Indexer; use ruff_python_index::Indexer;
use ruff_python_parser::lexer::LexResult;
use ruff_python_parser::{AsMode, ParseError};
use ruff_source_file::{Locator, SourceFileBuilder}; use ruff_source_file::{Locator, SourceFileBuilder};
use ruff_text_size::Ranged; use ruff_text_size::Ranged;
@ -82,7 +81,7 @@ pub fn check_path(
directives: &Directives, directives: &Directives,
settings: &Settings, settings: &Settings,
noqa: flags::Noqa, noqa: flags::Noqa,
source_kind: Option<&SourceKind>, source_kind: &SourceKind,
source_type: PySourceType, source_type: PySourceType,
) -> LinterResult<(Vec<Diagnostic>, Option<ImportMap>)> { ) -> LinterResult<(Vec<Diagnostic>, Option<ImportMap>)> {
// Aggregate all diagnostics. // Aggregate all diagnostics.
@ -271,17 +270,17 @@ const MAX_ITERATIONS: usize = 100;
pub fn add_noqa_to_path( pub fn add_noqa_to_path(
path: &Path, path: &Path,
package: Option<&Path>, package: Option<&Path>,
source_kind: &SourceKind,
source_type: PySourceType, source_type: PySourceType,
settings: &Settings, settings: &Settings,
) -> Result<usize> { ) -> Result<usize> {
// Read the file from disk. let contents = source_kind.source_code();
let contents = std::fs::read_to_string(path)?;
// Tokenize once. // Tokenize once.
let tokens: Vec<LexResult> = ruff_python_parser::tokenize(&contents, source_type.as_mode()); let tokens: Vec<LexResult> = ruff_python_parser::tokenize(contents, source_type.as_mode());
// Map row and column locations to byte slices (lazily). // Map row and column locations to byte slices (lazily).
let locator = Locator::new(&contents); let locator = Locator::new(contents);
// Detect the current code style (lazily). // Detect the current code style (lazily).
let stylist = Stylist::from_tokens(&tokens, &locator); let stylist = Stylist::from_tokens(&tokens, &locator);
@ -311,21 +310,20 @@ pub fn add_noqa_to_path(
&directives, &directives,
settings, settings,
flags::Noqa::Disabled, flags::Noqa::Disabled,
None, source_kind,
source_type, source_type,
); );
// Log any parse errors. // Log any parse errors.
if let Some(err) = error { if let Some(err) = error {
// TODO(dhruvmanila): This should use `SourceKind`, update when
// `--add-noqa` is supported for Jupyter notebooks.
error!( error!(
"{}", "{}",
DisplayParseError::new(err, locator.to_source_code(), None) DisplayParseError::new(err, locator.to_source_code(), source_kind)
); );
} }
// Add any missing `# noqa` pragmas. // Add any missing `# noqa` pragmas.
// TODO(dhruvmanila): Add support for Jupyter Notebooks
add_noqa( add_noqa(
path, path,
&diagnostics.0, &diagnostics.0,
@ -378,7 +376,7 @@ pub fn lint_only(
&directives, &directives,
settings, settings,
noqa, noqa,
Some(source_kind), source_kind,
source_type, source_type,
); );
@ -472,7 +470,7 @@ pub fn lint_fix<'a>(
&directives, &directives,
settings, settings,
noqa, noqa,
Some(source_kind), source_kind,
source_type, source_type,
); );
@ -609,3 +607,133 @@ This indicates a bug in `{}`. If you could open an issue at:
); );
} }
} }
#[cfg(test)]
mod tests {
use std::path::Path;
use anyhow::Result;
use test_case::test_case;
use ruff_notebook::{Notebook, NotebookError};
use crate::registry::Rule;
use crate::source_kind::SourceKind;
use crate::test::{test_contents, test_notebook_path, TestedNotebook};
use crate::{assert_messages, settings};
/// Construct a path to a Jupyter notebook in the `resources/test/fixtures/jupyter` directory.
fn notebook_path(path: impl AsRef<Path>) -> std::path::PathBuf {
Path::new("../ruff_notebook/resources/test/fixtures/jupyter").join(path)
}
#[test]
fn test_import_sorting() -> Result<(), NotebookError> {
let actual = notebook_path("isort.ipynb");
let expected = notebook_path("isort_expected.ipynb");
let TestedNotebook {
messages,
source_notebook,
..
} = test_notebook_path(
&actual,
expected,
&settings::Settings::for_rule(Rule::UnsortedImports),
)?;
assert_messages!(messages, actual, source_notebook);
Ok(())
}
#[test]
fn test_ipy_escape_command() -> Result<(), NotebookError> {
let actual = notebook_path("ipy_escape_command.ipynb");
let expected = notebook_path("ipy_escape_command_expected.ipynb");
let TestedNotebook {
messages,
source_notebook,
..
} = test_notebook_path(
&actual,
expected,
&settings::Settings::for_rule(Rule::UnusedImport),
)?;
assert_messages!(messages, actual, source_notebook);
Ok(())
}
#[test]
fn test_unused_variable() -> Result<(), NotebookError> {
let actual = notebook_path("unused_variable.ipynb");
let expected = notebook_path("unused_variable_expected.ipynb");
let TestedNotebook {
messages,
source_notebook,
..
} = test_notebook_path(
&actual,
expected,
&settings::Settings::for_rule(Rule::UnusedVariable),
)?;
assert_messages!(messages, actual, source_notebook);
Ok(())
}
#[test]
fn test_json_consistency() -> Result<()> {
let actual_path = notebook_path("before_fix.ipynb");
let expected_path = notebook_path("after_fix.ipynb");
let TestedNotebook {
linted_notebook: fixed_notebook,
..
} = test_notebook_path(
actual_path,
&expected_path,
&settings::Settings::for_rule(Rule::UnusedImport),
)?;
let mut writer = Vec::new();
fixed_notebook.write(&mut writer)?;
let actual = String::from_utf8(writer)?;
let expected = std::fs::read_to_string(expected_path)?;
assert_eq!(actual, expected);
Ok(())
}
#[test_case(Path::new("before_fix.ipynb"), true; "trailing_newline")]
#[test_case(Path::new("no_trailing_newline.ipynb"), false; "no_trailing_newline")]
fn test_trailing_newline(path: &Path, trailing_newline: bool) -> Result<()> {
let notebook = Notebook::from_path(&notebook_path(path))?;
assert_eq!(notebook.trailing_newline(), trailing_newline);
let mut writer = Vec::new();
notebook.write(&mut writer)?;
let string = String::from_utf8(writer)?;
assert_eq!(string.ends_with('\n'), trailing_newline);
Ok(())
}
// Version <4.5, don't emit cell ids
#[test_case(Path::new("no_cell_id.ipynb"), false; "no_cell_id")]
// Version 4.5, cell ids are missing and need to be added
#[test_case(Path::new("add_missing_cell_id.ipynb"), true; "add_missing_cell_id")]
fn test_cell_id(path: &Path, has_id: bool) -> Result<()> {
let source_notebook = Notebook::from_path(&notebook_path(path))?;
let source_kind = SourceKind::IpyNotebook(source_notebook);
let (_, transformed) = test_contents(
&source_kind,
path,
&settings::Settings::for_rule(Rule::UnusedImport),
);
let linted_notebook = transformed.into_owned().expect_ipy_notebook();
let mut writer = Vec::new();
linted_notebook.write(&mut writer)?;
let actual = String::from_utf8(writer)?;
if has_id {
assert!(actual.contains(r#""id": ""#));
} else {
assert!(!actual.contains(r#""id":"#));
}
Ok(())
}
}

View File

@ -12,8 +12,8 @@ use ruff_python_parser::{ParseError, ParseErrorType};
use ruff_source_file::{OneIndexed, SourceCode, SourceLocation}; use ruff_source_file::{OneIndexed, SourceCode, SourceLocation};
use crate::fs; use crate::fs;
use crate::jupyter::Notebook;
use crate::source_kind::SourceKind; use crate::source_kind::SourceKind;
use ruff_notebook::Notebook;
pub static WARNINGS: Lazy<Mutex<Vec<&'static str>>> = Lazy::new(Mutex::default); pub static WARNINGS: Lazy<Mutex<Vec<&'static str>>> = Lazy::new(Mutex::default);
@ -139,14 +139,14 @@ pub fn set_up_logging(level: &LogLevel) -> Result<()> {
pub struct DisplayParseError<'a> { pub struct DisplayParseError<'a> {
error: ParseError, error: ParseError,
source_code: SourceCode<'a, 'a>, source_code: SourceCode<'a, 'a>,
source_kind: Option<&'a SourceKind>, source_kind: &'a SourceKind,
} }
impl<'a> DisplayParseError<'a> { impl<'a> DisplayParseError<'a> {
pub fn new( pub fn new(
error: ParseError, error: ParseError,
source_code: SourceCode<'a, 'a>, source_code: SourceCode<'a, 'a>,
source_kind: Option<&'a SourceKind>, source_kind: &'a SourceKind,
) -> Self { ) -> Self {
Self { Self {
error, error,
@ -171,32 +171,29 @@ impl Display for DisplayParseError<'_> {
// If we're working on a Jupyter notebook, translate the positions // If we're working on a Jupyter notebook, translate the positions
// with respect to the cell and row in the cell. This is the same // with respect to the cell and row in the cell. This is the same
// format as the `TextEmitter`. // format as the `TextEmitter`.
let error_location = if let Some(jupyter_index) = self let error_location =
.source_kind if let Some(jupyter_index) = self.source_kind.as_ipy_notebook().map(Notebook::index) {
.and_then(SourceKind::notebook) write!(
.map(Notebook::index) f,
{ "cell {cell}{colon}",
write!( cell = jupyter_index
f, .cell(source_location.row.get())
"cell {cell}{colon}", .unwrap_or_default(),
cell = jupyter_index colon = ":".cyan(),
.cell(source_location.row.get()) )?;
.unwrap_or_default(),
colon = ":".cyan(),
)?;
SourceLocation { SourceLocation {
row: OneIndexed::new( row: OneIndexed::new(
jupyter_index jupyter_index
.cell_row(source_location.row.get()) .cell_row(source_location.row.get())
.unwrap_or(1) as usize, .unwrap_or(1) as usize,
) )
.unwrap(), .unwrap(),
column: source_location.column, column: source_location.column,
} }
} else { } else {
source_location source_location
}; };
write!( write!(
f, f,

View File

@ -1,4 +1,5 @@
use std::collections::hash_map::DefaultHasher; use std::collections::hash_map::DefaultHasher;
use std::collections::HashSet;
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
use std::io::Write; use std::io::Write;
@ -6,8 +7,6 @@ use serde::ser::SerializeSeq;
use serde::{Serialize, Serializer}; use serde::{Serialize, Serializer};
use serde_json::json; use serde_json::json;
use ruff_source_file::SourceLocation;
use crate::fs::{relativize_path, relativize_path_to}; use crate::fs::{relativize_path, relativize_path_to};
use crate::message::{Emitter, EmitterContext, Message}; use crate::message::{Emitter, EmitterContext, Message};
use crate::registry::AsRule; use crate::registry::AsRule;
@ -58,6 +57,7 @@ impl Serialize for SerializedMessages<'_> {
S: Serializer, S: Serializer,
{ {
let mut s = serializer.serialize_seq(Some(self.messages.len()))?; let mut s = serializer.serialize_seq(Some(self.messages.len()))?;
let mut fingerprints = HashSet::<u64>::with_capacity(self.messages.len());
for message in self.messages { for message in self.messages {
let start_location = message.compute_start_location(); let start_location = message.compute_start_location();
@ -82,10 +82,19 @@ impl Serialize for SerializedMessages<'_> {
|project_dir| relativize_path_to(message.filename(), project_dir), |project_dir| relativize_path_to(message.filename(), project_dir),
); );
let mut message_fingerprint = fingerprint(message, 0);
// Make sure that we do not get a fingerprint that is already in use
// by adding in the previously generated one.
while fingerprints.contains(&message_fingerprint) {
message_fingerprint = fingerprint(message, message_fingerprint);
}
fingerprints.insert(message_fingerprint);
let value = json!({ let value = json!({
"description": format!("({}) {}", message.kind.rule().noqa_code(), message.kind.body), "description": format!("({}) {}", message.kind.rule().noqa_code(), message.kind.body),
"severity": "major", "severity": "major",
"fingerprint": fingerprint(message, &start_location, &end_location), "fingerprint": format!("{:x}", message_fingerprint),
"location": { "location": {
"path": path, "path": path,
"lines": lines "lines": lines
@ -100,11 +109,7 @@ impl Serialize for SerializedMessages<'_> {
} }
/// Generate a unique fingerprint to identify a violation. /// Generate a unique fingerprint to identify a violation.
fn fingerprint( fn fingerprint(message: &Message, salt: u64) -> u64 {
message: &Message,
start_location: &SourceLocation,
end_location: &SourceLocation,
) -> String {
let Message { let Message {
kind, kind,
range: _, range: _,
@ -115,12 +120,11 @@ fn fingerprint(
let mut hasher = DefaultHasher::new(); let mut hasher = DefaultHasher::new();
kind.rule().hash(&mut hasher); salt.hash(&mut hasher);
start_location.hash(&mut hasher); kind.name.hash(&mut hasher);
end_location.hash(&mut hasher);
file.name().hash(&mut hasher); file.name().hash(&mut hasher);
format!("{:x}", hasher.finish()) hasher.finish()
} }
#[cfg(test)] #[cfg(test)]

View File

@ -4,10 +4,10 @@ use std::num::NonZeroUsize;
use colored::Colorize; use colored::Colorize;
use ruff_notebook::NotebookIndex;
use ruff_source_file::OneIndexed; use ruff_source_file::OneIndexed;
use crate::fs::relativize_path; use crate::fs::relativize_path;
use crate::jupyter::{Notebook, NotebookIndex};
use crate::message::diff::calculate_print_width; use crate::message::diff::calculate_print_width;
use crate::message::text::{MessageCodeFrame, RuleCodeAndBody}; use crate::message::text::{MessageCodeFrame, RuleCodeAndBody};
use crate::message::{ use crate::message::{
@ -65,7 +65,7 @@ impl Emitter for GroupedEmitter {
writer, writer,
"{}", "{}",
DisplayGroupedMessage { DisplayGroupedMessage {
jupyter_index: context.notebook(message.filename()).map(Notebook::index), notebook_index: context.notebook_index(message.filename()),
message, message,
show_fix_status: self.show_fix_status, show_fix_status: self.show_fix_status,
show_source: self.show_source, show_source: self.show_source,
@ -92,7 +92,7 @@ struct DisplayGroupedMessage<'a> {
show_source: bool, show_source: bool,
row_length: NonZeroUsize, row_length: NonZeroUsize,
column_length: NonZeroUsize, column_length: NonZeroUsize,
jupyter_index: Option<&'a NotebookIndex>, notebook_index: Option<&'a NotebookIndex>,
} }
impl Display for DisplayGroupedMessage<'_> { impl Display for DisplayGroupedMessage<'_> {
@ -110,7 +110,7 @@ impl Display for DisplayGroupedMessage<'_> {
)?; )?;
// Check if we're working on a jupyter notebook and translate positions with cell accordingly // Check if we're working on a jupyter notebook and translate positions with cell accordingly
let (row, col) = if let Some(jupyter_index) = self.jupyter_index { let (row, col) = if let Some(jupyter_index) = self.notebook_index {
write!( write!(
f, f,
"cell {cell}{sep}", "cell {cell}{sep}",
@ -150,7 +150,7 @@ impl Display for DisplayGroupedMessage<'_> {
"{}", "{}",
MessageCodeFrame { MessageCodeFrame {
message, message,
jupyter_index: self.jupyter_index notebook_index: self.notebook_index
} }
)?; )?;
} }

View File

@ -14,12 +14,11 @@ pub use json_lines::JsonLinesEmitter;
pub use junit::JunitEmitter; pub use junit::JunitEmitter;
pub use pylint::PylintEmitter; pub use pylint::PylintEmitter;
use ruff_diagnostics::{Diagnostic, DiagnosticKind, Fix}; use ruff_diagnostics::{Diagnostic, DiagnosticKind, Fix};
use ruff_notebook::NotebookIndex;
use ruff_source_file::{SourceFile, SourceLocation}; use ruff_source_file::{SourceFile, SourceLocation};
use ruff_text_size::{Ranged, TextRange, TextSize}; use ruff_text_size::{Ranged, TextRange, TextSize};
pub use text::TextEmitter; pub use text::TextEmitter;
use crate::jupyter::Notebook;
mod azure; mod azure;
mod diff; mod diff;
mod github; mod github;
@ -128,21 +127,21 @@ pub trait Emitter {
/// Context passed to [`Emitter`]. /// Context passed to [`Emitter`].
pub struct EmitterContext<'a> { pub struct EmitterContext<'a> {
notebooks: &'a FxHashMap<String, Notebook>, notebook_indexes: &'a FxHashMap<String, NotebookIndex>,
} }
impl<'a> EmitterContext<'a> { impl<'a> EmitterContext<'a> {
pub fn new(notebooks: &'a FxHashMap<String, Notebook>) -> Self { pub fn new(notebook_indexes: &'a FxHashMap<String, NotebookIndex>) -> Self {
Self { notebooks } Self { notebook_indexes }
} }
/// Tests if the file with `name` is a jupyter notebook. /// Tests if the file with `name` is a jupyter notebook.
pub fn is_notebook(&self, name: &str) -> bool { pub fn is_notebook(&self, name: &str) -> bool {
self.notebooks.contains_key(name) self.notebook_indexes.contains_key(name)
} }
pub fn notebook(&self, name: &str) -> Option<&Notebook> { pub fn notebook_index(&self, name: &str) -> Option<&NotebookIndex> {
self.notebooks.get(name) self.notebook_indexes.get(name)
} }
} }
@ -226,8 +225,8 @@ def fibonacci(n):
emitter: &mut dyn Emitter, emitter: &mut dyn Emitter,
messages: &[Message], messages: &[Message],
) -> String { ) -> String {
let source_kinds = FxHashMap::default(); let notebook_indexes = FxHashMap::default();
let context = EmitterContext::new(&source_kinds); let context = EmitterContext::new(&notebook_indexes);
let mut output: Vec<u8> = Vec::new(); let mut output: Vec<u8> = Vec::new();
emitter.emit(&mut output, messages, &context).unwrap(); emitter.emit(&mut output, messages, &context).unwrap();

View File

@ -7,11 +7,11 @@ use annotate_snippets::snippet::{Annotation, AnnotationType, Slice, Snippet, Sou
use bitflags::bitflags; use bitflags::bitflags;
use colored::Colorize; use colored::Colorize;
use ruff_notebook::NotebookIndex;
use ruff_source_file::{OneIndexed, SourceLocation}; use ruff_source_file::{OneIndexed, SourceLocation};
use ruff_text_size::{Ranged, TextRange, TextSize}; use ruff_text_size::{Ranged, TextRange, TextSize};
use crate::fs::relativize_path; use crate::fs::relativize_path;
use crate::jupyter::{Notebook, NotebookIndex};
use crate::line_width::{LineWidthBuilder, TabSize}; use crate::line_width::{LineWidthBuilder, TabSize};
use crate::message::diff::Diff; use crate::message::diff::Diff;
use crate::message::{Emitter, EmitterContext, Message}; use crate::message::{Emitter, EmitterContext, Message};
@ -71,14 +71,14 @@ impl Emitter for TextEmitter {
)?; )?;
let start_location = message.compute_start_location(); let start_location = message.compute_start_location();
let jupyter_index = context.notebook(message.filename()).map(Notebook::index); let notebook_index = context.notebook_index(message.filename());
// Check if we're working on a jupyter notebook and translate positions with cell accordingly // Check if we're working on a jupyter notebook and translate positions with cell accordingly
let diagnostic_location = if let Some(jupyter_index) = jupyter_index { let diagnostic_location = if let Some(notebook_index) = notebook_index {
write!( write!(
writer, writer,
"cell {cell}{sep}", "cell {cell}{sep}",
cell = jupyter_index cell = notebook_index
.cell(start_location.row.get()) .cell(start_location.row.get())
.unwrap_or_default(), .unwrap_or_default(),
sep = ":".cyan(), sep = ":".cyan(),
@ -86,7 +86,7 @@ impl Emitter for TextEmitter {
SourceLocation { SourceLocation {
row: OneIndexed::new( row: OneIndexed::new(
jupyter_index notebook_index
.cell_row(start_location.row.get()) .cell_row(start_location.row.get())
.unwrap_or(1) as usize, .unwrap_or(1) as usize,
) )
@ -115,7 +115,7 @@ impl Emitter for TextEmitter {
"{}", "{}",
MessageCodeFrame { MessageCodeFrame {
message, message,
jupyter_index notebook_index
} }
)?; )?;
} }
@ -161,7 +161,7 @@ impl Display for RuleCodeAndBody<'_> {
pub(super) struct MessageCodeFrame<'a> { pub(super) struct MessageCodeFrame<'a> {
pub(crate) message: &'a Message, pub(crate) message: &'a Message,
pub(crate) jupyter_index: Option<&'a NotebookIndex>, pub(crate) notebook_index: Option<&'a NotebookIndex>,
} }
impl Display for MessageCodeFrame<'_> { impl Display for MessageCodeFrame<'_> {
@ -186,14 +186,12 @@ impl Display for MessageCodeFrame<'_> {
let content_start_index = source_code.line_index(range.start()); let content_start_index = source_code.line_index(range.start());
let mut start_index = content_start_index.saturating_sub(2); let mut start_index = content_start_index.saturating_sub(2);
// If we're working on a jupyter notebook, skip the lines which are // If we're working with a Jupyter Notebook, skip the lines which are
// outside of the cell containing the diagnostic. // outside of the cell containing the diagnostic.
if let Some(jupyter_index) = self.jupyter_index { if let Some(index) = self.notebook_index {
let content_start_cell = jupyter_index let content_start_cell = index.cell(content_start_index.get()).unwrap_or_default();
.cell(content_start_index.get())
.unwrap_or_default();
while start_index < content_start_index { while start_index < content_start_index {
if jupyter_index.cell(start_index.get()).unwrap_or_default() == content_start_cell { if index.cell(start_index.get()).unwrap_or_default() == content_start_cell {
break; break;
} }
start_index = start_index.saturating_add(1); start_index = start_index.saturating_add(1);
@ -213,14 +211,12 @@ impl Display for MessageCodeFrame<'_> {
.saturating_add(2) .saturating_add(2)
.min(OneIndexed::from_zero_indexed(source_code.line_count())); .min(OneIndexed::from_zero_indexed(source_code.line_count()));
// If we're working on a jupyter notebook, skip the lines which are // If we're working with a Jupyter Notebook, skip the lines which are
// outside of the cell containing the diagnostic. // outside of the cell containing the diagnostic.
if let Some(jupyter_index) = self.jupyter_index { if let Some(index) = self.notebook_index {
let content_end_cell = jupyter_index let content_end_cell = index.cell(content_end_index.get()).unwrap_or_default();
.cell(content_end_index.get())
.unwrap_or_default();
while end_index > content_end_index { while end_index > content_end_index {
if jupyter_index.cell(end_index.get()).unwrap_or_default() == content_end_cell { if index.cell(end_index.get()).unwrap_or_default() == content_end_cell {
break; break;
} }
end_index = end_index.saturating_sub(1); end_index = end_index.saturating_sub(1);
@ -256,10 +252,10 @@ impl Display for MessageCodeFrame<'_> {
title: None, title: None,
slices: vec![Slice { slices: vec![Slice {
source: &source.text, source: &source.text,
line_start: self.jupyter_index.map_or_else( line_start: self.notebook_index.map_or_else(
|| start_index.get(), || start_index.get(),
|jupyter_index| { |notebook_index| {
jupyter_index notebook_index
.cell_row(start_index.get()) .cell_row(start_index.get())
.unwrap_or_default() as usize .unwrap_or_default() as usize
}, },

View File

@ -59,7 +59,7 @@ impl<'a> Directive<'a> {
if text[..comment_start] if text[..comment_start]
.chars() .chars()
.last() .last()
.is_some_and(|c| c != '#') .map_or(true, |c| c != '#')
{ {
continue; continue;
} }

View File

@ -9,12 +9,16 @@ use crate::codes::RuleCodePrefix;
use crate::codes::RuleIter; use crate::codes::RuleIter;
use crate::registry::{Linter, Rule, RuleNamespace}; use crate::registry::{Linter, Rule, RuleNamespace};
use crate::rule_redirects::get_redirect; use crate::rule_redirects::get_redirect;
use crate::settings::types::PreviewMode;
#[derive(Debug, Clone, PartialEq, Eq, Hash)] #[derive(Debug, Clone, PartialEq, Eq, Hash)]
pub enum RuleSelector { pub enum RuleSelector {
/// Select all stable rules. /// Select all rules (includes rules in preview if enabled)
All, All,
/// Select all nursery rules. /// Category to select all rules in preview (includes legacy nursery rules)
Preview,
/// Legacy category to select all rules in the "nursery" which predated preview mode
#[deprecated(note = "Use `RuleSelector::Preview` for new rules instead")]
Nursery, Nursery,
/// Legacy category to select both the `mccabe` and `flake8-comprehensions` linters /// Legacy category to select both the `mccabe` and `flake8-comprehensions` linters
/// via a single selector. /// via a single selector.
@ -29,6 +33,11 @@ pub enum RuleSelector {
prefix: RuleCodePrefix, prefix: RuleCodePrefix,
redirected_from: Option<&'static str>, redirected_from: Option<&'static str>,
}, },
/// Select an individual rule with a given prefix.
Rule {
prefix: RuleCodePrefix,
redirected_from: Option<&'static str>,
},
} }
impl From<Linter> for RuleSelector { impl From<Linter> for RuleSelector {
@ -43,7 +52,9 @@ impl FromStr for RuleSelector {
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
match s { match s {
"ALL" => Ok(Self::All), "ALL" => Ok(Self::All),
#[allow(deprecated)]
"NURSERY" => Ok(Self::Nursery), "NURSERY" => Ok(Self::Nursery),
"PREVIEW" => Ok(Self::Preview),
"C" => Ok(Self::C), "C" => Ok(Self::C),
"T" => Ok(Self::T), "T" => Ok(Self::T),
_ => { _ => {
@ -59,16 +70,43 @@ impl FromStr for RuleSelector {
return Ok(Self::Linter(linter)); return Ok(Self::Linter(linter));
} }
Ok(Self::Prefix { // Does the selector select a single rule?
prefix: RuleCodePrefix::parse(&linter, code) let prefix = RuleCodePrefix::parse(&linter, code)
.map_err(|_| ParseError::Unknown(s.to_string()))?, .map_err(|_| ParseError::Unknown(s.to_string()))?;
redirected_from,
}) if is_single_rule_selector(&prefix) {
Ok(Self::Rule {
prefix,
redirected_from,
})
} else {
Ok(Self::Prefix {
prefix,
redirected_from,
})
}
} }
} }
} }
} }
/// Returns `true` if the [`RuleCodePrefix`] matches a single rule exactly
/// (e.g., `E225`, as opposed to `E2`).
pub(crate) fn is_single_rule_selector(prefix: &RuleCodePrefix) -> bool {
let mut rules = prefix.rules();
// The selector must match a single rule.
let Some(rule) = rules.next() else {
return false;
};
if rules.next().is_some() {
return false;
}
// The rule must match the selector exactly.
rule.noqa_code().suffix() == prefix.short_code()
}
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
pub enum ParseError { pub enum ParseError {
#[error("Unknown rule selector: `{0}`")] #[error("Unknown rule selector: `{0}`")]
@ -81,10 +119,12 @@ impl RuleSelector {
pub fn prefix_and_code(&self) -> (&'static str, &'static str) { pub fn prefix_and_code(&self) -> (&'static str, &'static str) {
match self { match self {
RuleSelector::All => ("", "ALL"), RuleSelector::All => ("", "ALL"),
#[allow(deprecated)]
RuleSelector::Nursery => ("", "NURSERY"), RuleSelector::Nursery => ("", "NURSERY"),
RuleSelector::Preview => ("", "PREVIEW"),
RuleSelector::C => ("", "C"), RuleSelector::C => ("", "C"),
RuleSelector::T => ("", "T"), RuleSelector::T => ("", "T"),
RuleSelector::Prefix { prefix, .. } => { RuleSelector::Prefix { prefix, .. } | RuleSelector::Rule { prefix, .. } => {
(prefix.linter().common_prefix(), prefix.short_code()) (prefix.linter().common_prefix(), prefix.short_code())
} }
RuleSelector::Linter(l) => (l.common_prefix(), ""), RuleSelector::Linter(l) => (l.common_prefix(), ""),
@ -135,27 +175,19 @@ impl Visitor<'_> for SelectorVisitor {
} }
} }
impl From<RuleCodePrefix> for RuleSelector { impl RuleSelector {
fn from(prefix: RuleCodePrefix) -> Self { /// Return all matching rules, regardless of whether they're in preview.
Self::Prefix { pub fn all_rules(&self) -> impl Iterator<Item = Rule> + '_ {
prefix,
redirected_from: None,
}
}
}
impl IntoIterator for &RuleSelector {
type IntoIter = RuleSelectorIter;
type Item = Rule;
fn into_iter(self) -> Self::IntoIter {
match self { match self {
RuleSelector::All => { RuleSelector::All => RuleSelectorIter::All(Rule::iter()),
RuleSelectorIter::All(Rule::iter().filter(|rule| !rule.is_nursery()))
} #[allow(deprecated)]
RuleSelector::Nursery => { RuleSelector::Nursery => {
RuleSelectorIter::Nursery(Rule::iter().filter(Rule::is_nursery)) RuleSelectorIter::Nursery(Rule::iter().filter(Rule::is_nursery))
} }
RuleSelector::Preview => RuleSelectorIter::Nursery(
Rule::iter().filter(|rule| rule.is_preview() || rule.is_nursery()),
),
RuleSelector::C => RuleSelectorIter::Chain( RuleSelector::C => RuleSelectorIter::Chain(
Linter::Flake8Comprehensions Linter::Flake8Comprehensions
.rules() .rules()
@ -167,13 +199,28 @@ impl IntoIterator for &RuleSelector {
.chain(Linter::Flake8Print.rules()), .chain(Linter::Flake8Print.rules()),
), ),
RuleSelector::Linter(linter) => RuleSelectorIter::Vec(linter.rules()), RuleSelector::Linter(linter) => RuleSelectorIter::Vec(linter.rules()),
RuleSelector::Prefix { prefix, .. } => RuleSelectorIter::Vec(prefix.clone().rules()), RuleSelector::Prefix { prefix, .. } | RuleSelector::Rule { prefix, .. } => {
RuleSelectorIter::Vec(prefix.clone().rules())
}
} }
} }
/// Returns rules matching the selector, taking into account whether preview mode is enabled.
pub fn rules(&self, preview: PreviewMode) -> impl Iterator<Item = Rule> + '_ {
#[allow(deprecated)]
self.all_rules().filter(move |rule| {
// Always include rules that are not in preview or the nursery
!(rule.is_preview() || rule.is_nursery())
// Backwards compatibility allows selection of nursery rules by exact code or dedicated group
|| ((matches!(self, RuleSelector::Rule { .. }) || matches!(self, RuleSelector::Nursery { .. })) && rule.is_nursery())
// Enabling preview includes all preview or nursery rules
|| preview.is_enabled()
})
}
} }
pub enum RuleSelectorIter { pub enum RuleSelectorIter {
All(std::iter::Filter<RuleIter, fn(&Rule) -> bool>), All(RuleIter),
Nursery(std::iter::Filter<RuleIter, fn(&Rule) -> bool>), Nursery(std::iter::Filter<RuleIter, fn(&Rule) -> bool>),
Chain(std::iter::Chain<std::vec::IntoIter<Rule>, std::vec::IntoIter<Rule>>), Chain(std::iter::Chain<std::vec::IntoIter<Rule>, std::vec::IntoIter<Rule>>),
Vec(std::vec::IntoIter<Rule>), Vec(std::vec::IntoIter<Rule>),
@ -192,18 +239,6 @@ impl Iterator for RuleSelectorIter {
} }
} }
/// A const alternative to the `impl From<RuleCodePrefix> for RuleSelector`
/// to let us keep the fields of [`RuleSelector`] private.
// Note that Rust doesn't yet support `impl const From<RuleCodePrefix> for
// RuleSelector` (see https://github.com/rust-lang/rust/issues/67792).
// TODO(martin): Remove once RuleSelector is an enum with Linter & Rule variants
pub(crate) const fn prefix_to_selector(prefix: RuleCodePrefix) -> RuleSelector {
RuleSelector::Prefix {
prefix,
redirected_from: None,
}
}
#[cfg(feature = "schemars")] #[cfg(feature = "schemars")]
mod schema { mod schema {
use itertools::Itertools; use itertools::Itertools;
@ -266,18 +301,20 @@ impl RuleSelector {
pub fn specificity(&self) -> Specificity { pub fn specificity(&self) -> Specificity {
match self { match self {
RuleSelector::All => Specificity::All, RuleSelector::All => Specificity::All,
RuleSelector::Preview => Specificity::All,
#[allow(deprecated)]
RuleSelector::Nursery => Specificity::All, RuleSelector::Nursery => Specificity::All,
RuleSelector::T => Specificity::LinterGroup, RuleSelector::T => Specificity::LinterGroup,
RuleSelector::C => Specificity::LinterGroup, RuleSelector::C => Specificity::LinterGroup,
RuleSelector::Linter(..) => Specificity::Linter, RuleSelector::Linter(..) => Specificity::Linter,
RuleSelector::Rule { .. } => Specificity::Rule,
RuleSelector::Prefix { prefix, .. } => { RuleSelector::Prefix { prefix, .. } => {
let prefix: &'static str = prefix.short_code(); let prefix: &'static str = prefix.short_code();
match prefix.len() { match prefix.len() {
1 => Specificity::Code1Char, 1 => Specificity::Prefix1Char,
2 => Specificity::Code2Chars, 2 => Specificity::Prefix2Chars,
3 => Specificity::Code3Chars, 3 => Specificity::Prefix3Chars,
4 => Specificity::Code4Chars, 4 => Specificity::Prefix4Chars,
5 => Specificity::Code5Chars,
_ => panic!("RuleSelector::specificity doesn't yet support codes with so many characters"), _ => panic!("RuleSelector::specificity doesn't yet support codes with so many characters"),
} }
} }
@ -285,16 +322,24 @@ impl RuleSelector {
} }
} }
#[derive(EnumIter, PartialEq, Eq, PartialOrd, Ord, Copy, Clone)] #[derive(EnumIter, PartialEq, Eq, PartialOrd, Ord, Copy, Clone, Debug)]
pub enum Specificity { pub enum Specificity {
/// The specificity when selecting all rules (e.g., `--select ALL`).
All, All,
/// The specificity when selecting a legacy linter group (e.g., `--select C` or `--select T`).
LinterGroup, LinterGroup,
/// The specificity when selecting a linter (e.g., `--select PLE` or `--select UP`).
Linter, Linter,
Code1Char, /// The specificity when selecting via a rule prefix with a one-character code (e.g., `--select PLE1`).
Code2Chars, Prefix1Char,
Code3Chars, /// The specificity when selecting via a rule prefix with a two-character code (e.g., `--select PLE12`).
Code4Chars, Prefix2Chars,
Code5Chars, /// The specificity when selecting via a rule prefix with a three-character code (e.g., `--select PLE123`).
Prefix3Chars,
/// The specificity when selecting via a rule prefix with a four-character code (e.g., `--select PLE1234`).
Prefix4Chars,
/// The specificity when selecting an individual rule (e.g., `--select PLE1205`).
Rule,
} }
#[cfg(feature = "clap")] #[cfg(feature = "clap")]

View File

@ -454,7 +454,6 @@ fn check_dynamically_typed<F>(
if let Expr::Constant(ast::ExprConstant { if let Expr::Constant(ast::ExprConstant {
range, range,
value: Constant::Str(string), value: Constant::Str(string),
..
}) = annotation }) = annotation
{ {
// Quoted annotations // Quoted annotations

View File

@ -23,6 +23,10 @@ use ruff_text_size::Ranged;
/// ///
/// Use instead: /// Use instead:
/// ```python /// ```python
/// if not x > 0:
/// raise ValueError("Expected positive value.")
///
/// # or even better:
/// if x <= 0: /// if x <= 0:
/// raise ValueError("Expected positive value.") /// raise ValueError("Expected positive value.")
/// ``` /// ```

View File

@ -33,6 +33,9 @@ mod tests {
#[test_case(Rule::JumpStatementInFinally, Path::new("B012.py"))] #[test_case(Rule::JumpStatementInFinally, Path::new("B012.py"))]
#[test_case(Rule::LoopVariableOverridesIterator, Path::new("B020.py"))] #[test_case(Rule::LoopVariableOverridesIterator, Path::new("B020.py"))]
#[test_case(Rule::MutableArgumentDefault, Path::new("B006_B008.py"))] #[test_case(Rule::MutableArgumentDefault, Path::new("B006_B008.py"))]
#[test_case(Rule::MutableArgumentDefault, Path::new("B006_1.py"))]
#[test_case(Rule::MutableArgumentDefault, Path::new("B006_2.py"))]
#[test_case(Rule::MutableArgumentDefault, Path::new("B006_3.py"))]
#[test_case(Rule::NoExplicitStacklevel, Path::new("B028.py"))] #[test_case(Rule::NoExplicitStacklevel, Path::new("B028.py"))]
#[test_case(Rule::RaiseLiteral, Path::new("B016.py"))] #[test_case(Rule::RaiseLiteral, Path::new("B016.py"))]
#[test_case(Rule::RaiseWithoutFromInsideExcept, Path::new("B904.py"))] #[test_case(Rule::RaiseWithoutFromInsideExcept, Path::new("B904.py"))]

View File

@ -1,7 +1,6 @@
use ruff_python_ast::{self as ast, Constant, Expr};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix}; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::{self as ast, Constant, Expr};
use ruff_python_stdlib::identifiers::{is_identifier, is_mangled_private}; use ruff_python_stdlib::identifiers::{is_identifier, is_mangled_private};
use ruff_text_size::Ranged; use ruff_text_size::Ranged;
@ -81,7 +80,17 @@ pub(crate) fn getattr_with_constant(
let mut diagnostic = Diagnostic::new(GetAttrWithConstant, expr.range()); let mut diagnostic = Diagnostic::new(GetAttrWithConstant, expr.range());
if checker.patch(diagnostic.kind.rule()) { if checker.patch(diagnostic.kind.rule()) {
diagnostic.set_fix(Fix::suggested(Edit::range_replacement( diagnostic.set_fix(Fix::suggested(Edit::range_replacement(
format!("{}.{}", checker.locator().slice(obj), value), if matches!(
obj,
Expr::Name(_) | Expr::Attribute(_) | Expr::Subscript(_) | Expr::Call(_)
) {
format!("{}.{}", checker.locator().slice(obj), value)
} else {
// Defensively parenthesize any other expressions. For example, attribute accesses
// on `int` literals must be parenthesized, e.g., `getattr(1, "real")` becomes
// `(1).real`. The same is true for named expressions and others.
format!("({}).{}", checker.locator().slice(obj), value)
},
expr.range(), expr.range(),
))); )));
} }

View File

@ -175,8 +175,15 @@ fn move_initialization(
return None; return None;
} }
Edit::insertion(content, locator.line_start(statement.start())) Edit::insertion(content, locator.line_start(statement.start()))
} else if locator.full_line_end(statement.end()) == locator.text_len() {
// If the statement is at the end of the file, without a trailing newline, insert
// _after_ it with an extra newline.
Edit::insertion(
format!("{}{}", stylist.line_ending().as_str(), content),
locator.full_line_end(statement.end()),
)
} else { } else {
// If the docstring is the only statement, insert _before_ it. // If the docstring is the only statement, insert _after_ it.
Edit::insertion(content, locator.full_line_end(statement.end())) Edit::insertion(content, locator.full_line_end(statement.end()))
} }
} else { } else {

View File

@ -1,9 +1,10 @@
use ruff_python_ast::{self as ast, ExceptHandler, Expr};
use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix}; use ruff_diagnostics::{AlwaysAutofixableViolation, Diagnostic, Edit, Fix};
use ruff_macros::{derive_message_formats, violation}; use ruff_macros::{derive_message_formats, violation};
use ruff_python_ast::helpers::map_starred;
use ruff_python_ast::{self as ast, ExceptHandler, Expr};
use ruff_text_size::Ranged; use ruff_text_size::Ranged;
use crate::autofix::edits::pad;
use crate::checkers::ast::Checker; use crate::checkers::ast::Checker;
use crate::registry::AsRule; use crate::registry::AsRule;
@ -41,11 +42,7 @@ pub struct RedundantTupleInExceptionHandler {
impl AlwaysAutofixableViolation for RedundantTupleInExceptionHandler { impl AlwaysAutofixableViolation for RedundantTupleInExceptionHandler {
#[derive_message_formats] #[derive_message_formats]
fn message(&self) -> String { fn message(&self) -> String {
let RedundantTupleInExceptionHandler { name } = self; format!("A length-one tuple literal is redundant in exception handlers")
format!(
"A length-one tuple literal is redundant. Write `except {name}` instead of `except \
({name},)`."
)
} }
fn autofix_title(&self) -> String { fn autofix_title(&self) -> String {
@ -70,9 +67,10 @@ pub(crate) fn redundant_tuple_in_exception_handler(
let Expr::Tuple(ast::ExprTuple { elts, .. }) = type_.as_ref() else { let Expr::Tuple(ast::ExprTuple { elts, .. }) = type_.as_ref() else {
continue; continue;
}; };
let [elt] = &elts[..] else { let [elt] = elts.as_slice() else {
continue; continue;
}; };
let elt = map_starred(elt);
let mut diagnostic = Diagnostic::new( let mut diagnostic = Diagnostic::new(
RedundantTupleInExceptionHandler { RedundantTupleInExceptionHandler {
name: checker.generator().expr(elt), name: checker.generator().expr(elt),
@ -80,8 +78,19 @@ pub(crate) fn redundant_tuple_in_exception_handler(
type_.range(), type_.range(),
); );
if checker.patch(diagnostic.kind.rule()) { if checker.patch(diagnostic.kind.rule()) {
// If there's no space between the `except` and the tuple, we need to insert a space,
// as in:
// ```python
// except(ValueError,):
// ```
// Otherwise, the output will be invalid syntax, since we're removing a set of
// parentheses.
diagnostic.set_fix(Fix::automatic(Edit::range_replacement( diagnostic.set_fix(Fix::automatic(Edit::range_replacement(
checker.generator().expr(elt), pad(
checker.generator().expr(elt),
type_.range(),
checker.locator(),
),
type_.range(), type_.range(),
))); )));
} }

View File

@ -0,0 +1,26 @@
---
source: crates/ruff/src/rules/flake8_bugbear/mod.rs
---
B006_1.py:3:22: B006 [*] Do not use mutable data structures for argument defaults
|
1 | # Docstring followed by a newline
2 |
3 | def foobar(foor, bar={}):
| ^^ B006
4 | """
5 | """
|
= help: Replace with `None`; initialize within function
Possible fix
1 1 | # Docstring followed by a newline
2 2 |
3 |-def foobar(foor, bar={}):
3 |+def foobar(foor, bar=None):
4 4 | """
5 5 | """
6 |+
7 |+ if bar is None:
8 |+ bar = {}

View File

@ -0,0 +1,27 @@
---
source: crates/ruff/src/rules/flake8_bugbear/mod.rs
---
B006_2.py:4:22: B006 [*] Do not use mutable data structures for argument defaults
|
2 | # Regression test for https://github.com/astral-sh/ruff/issues/7155
3 |
4 | def foobar(foor, bar={}):
| ^^ B006
5 | """
6 | """
|
= help: Replace with `None`; initialize within function
Possible fix
1 1 | # Docstring followed by whitespace with no newline
2 2 | # Regression test for https://github.com/astral-sh/ruff/issues/7155
3 3 |
4 |-def foobar(foor, bar={}):
4 |+def foobar(foor, bar=None):
5 5 | """
6 |- """
6 |+ """
7 |+ if bar is None:
8 |+ bar = {}

View File

@ -0,0 +1,25 @@
---
source: crates/ruff/src/rules/flake8_bugbear/mod.rs
---
B006_3.py:4:22: B006 [*] Do not use mutable data structures for argument defaults
|
4 | def foobar(foor, bar={}):
| ^^ B006
5 | """
6 | """
|
= help: Replace with `None`; initialize within function
Possible fix
1 1 | # Docstring with no newline
2 2 |
3 3 |
4 |-def foobar(foor, bar={}):
4 |+def foobar(foor, bar=None):
5 |+ """
5 6 | """
6 |- """
7 |+ if bar is None:
8 |+ bar = {}

View File

@ -476,4 +476,23 @@ B006_B008.py:308:52: B006 Do not use mutable data structures for argument defaul
| |
= help: Replace with `None`; initialize within function = help: Replace with `None`; initialize within function
B006_B008.py:313:52: B006 [*] Do not use mutable data structures for argument defaults
|
313 | def single_line_func_wrong(value: dict[str, str] = {}):
| ^^ B006
314 | """Docstring without newline"""
|
= help: Replace with `None`; initialize within function
Possible fix
310 310 | """Docstring"""
311 311 |
312 312 |
313 |-def single_line_func_wrong(value: dict[str, str] = {}):
314 |- """Docstring without newline"""
313 |+def single_line_func_wrong(value: dict[str, str] = None):
314 |+ """Docstring without newline"""
315 |+ if value is None:
316 |+ value = {}

View File

@ -124,7 +124,7 @@ B009_B010.py:24:15: B009 [*] Do not call `getattr` with a constant attribute val
24 |+_ = lambda x: x.bar 24 |+_ = lambda x: x.bar
25 25 | if getattr(x, "bar"): 25 25 | if getattr(x, "bar"):
26 26 | pass 26 26 | pass
27 27 | 27 27 | getattr(1, "real")
B009_B010.py:25:4: B009 [*] Do not call `getattr` with a constant attribute value. It is not any safer than normal property access. B009_B010.py:25:4: B009 [*] Do not call `getattr` with a constant attribute value. It is not any safer than normal property access.
| |
@ -133,6 +133,7 @@ B009_B010.py:25:4: B009 [*] Do not call `getattr` with a constant attribute valu
25 | if getattr(x, "bar"): 25 | if getattr(x, "bar"):
| ^^^^^^^^^^^^^^^^^ B009 | ^^^^^^^^^^^^^^^^^ B009
26 | pass 26 | pass
27 | getattr(1, "real")
| |
= help: Replace `getattr` with attribute access = help: Replace `getattr` with attribute access
@ -143,7 +144,176 @@ B009_B010.py:25:4: B009 [*] Do not call `getattr` with a constant attribute valu
25 |-if getattr(x, "bar"): 25 |-if getattr(x, "bar"):
25 |+if x.bar: 25 |+if x.bar:
26 26 | pass 26 26 | pass
27 27 | 27 27 | getattr(1, "real")
28 28 | # Valid setattr usage 28 28 | getattr(1., "real")
B009_B010.py:27:1: B009 [*] Do not call `getattr` with a constant attribute value. It is not any safer than normal property access.
|
25 | if getattr(x, "bar"):
26 | pass
27 | getattr(1, "real")
| ^^^^^^^^^^^^^^^^^^ B009
28 | getattr(1., "real")
29 | getattr(1.0, "real")
|
= help: Replace `getattr` with attribute access
Suggested fix
24 24 | _ = lambda x: getattr(x, "bar")
25 25 | if getattr(x, "bar"):
26 26 | pass
27 |-getattr(1, "real")
27 |+(1).real
28 28 | getattr(1., "real")
29 29 | getattr(1.0, "real")
30 30 | getattr(1j, "real")
B009_B010.py:28:1: B009 [*] Do not call `getattr` with a constant attribute value. It is not any safer than normal property access.
|
26 | pass
27 | getattr(1, "real")
28 | getattr(1., "real")
| ^^^^^^^^^^^^^^^^^^^ B009
29 | getattr(1.0, "real")
30 | getattr(1j, "real")
|
= help: Replace `getattr` with attribute access
Suggested fix
25 25 | if getattr(x, "bar"):
26 26 | pass
27 27 | getattr(1, "real")
28 |-getattr(1., "real")
28 |+(1.).real
29 29 | getattr(1.0, "real")
30 30 | getattr(1j, "real")
31 31 | getattr(True, "real")
B009_B010.py:29:1: B009 [*] Do not call `getattr` with a constant attribute value. It is not any safer than normal property access.
|
27 | getattr(1, "real")
28 | getattr(1., "real")
29 | getattr(1.0, "real")
| ^^^^^^^^^^^^^^^^^^^^ B009
30 | getattr(1j, "real")
31 | getattr(True, "real")
|
= help: Replace `getattr` with attribute access
Suggested fix
26 26 | pass
27 27 | getattr(1, "real")
28 28 | getattr(1., "real")
29 |-getattr(1.0, "real")
29 |+(1.0).real
30 30 | getattr(1j, "real")
31 31 | getattr(True, "real")
32 32 | getattr(x := 1, "real")
B009_B010.py:30:1: B009 [*] Do not call `getattr` with a constant attribute value. It is not any safer than normal property access.
|
28 | getattr(1., "real")
29 | getattr(1.0, "real")
30 | getattr(1j, "real")
| ^^^^^^^^^^^^^^^^^^^ B009
31 | getattr(True, "real")
32 | getattr(x := 1, "real")
|
= help: Replace `getattr` with attribute access
Suggested fix
27 27 | getattr(1, "real")
28 28 | getattr(1., "real")
29 29 | getattr(1.0, "real")
30 |-getattr(1j, "real")
30 |+(1j).real
31 31 | getattr(True, "real")
32 32 | getattr(x := 1, "real")
33 33 | getattr(x + y, "real")
B009_B010.py:31:1: B009 [*] Do not call `getattr` with a constant attribute value. It is not any safer than normal property access.
|
29 | getattr(1.0, "real")
30 | getattr(1j, "real")
31 | getattr(True, "real")
| ^^^^^^^^^^^^^^^^^^^^^ B009
32 | getattr(x := 1, "real")
33 | getattr(x + y, "real")
|
= help: Replace `getattr` with attribute access
Suggested fix
28 28 | getattr(1., "real")
29 29 | getattr(1.0, "real")
30 30 | getattr(1j, "real")
31 |-getattr(True, "real")
31 |+(True).real
32 32 | getattr(x := 1, "real")
33 33 | getattr(x + y, "real")
34 34 | getattr("foo"
B009_B010.py:32:1: B009 [*] Do not call `getattr` with a constant attribute value. It is not any safer than normal property access.
|
30 | getattr(1j, "real")
31 | getattr(True, "real")
32 | getattr(x := 1, "real")
| ^^^^^^^^^^^^^^^^^^^^^^^ B009
33 | getattr(x + y, "real")
34 | getattr("foo"
|
= help: Replace `getattr` with attribute access
Suggested fix
29 29 | getattr(1.0, "real")
30 30 | getattr(1j, "real")
31 31 | getattr(True, "real")
32 |-getattr(x := 1, "real")
32 |+(x := 1).real
33 33 | getattr(x + y, "real")
34 34 | getattr("foo"
35 35 | "bar", "real")
B009_B010.py:33:1: B009 [*] Do not call `getattr` with a constant attribute value. It is not any safer than normal property access.
|
31 | getattr(True, "real")
32 | getattr(x := 1, "real")
33 | getattr(x + y, "real")
| ^^^^^^^^^^^^^^^^^^^^^^ B009
34 | getattr("foo"
35 | "bar", "real")
|
= help: Replace `getattr` with attribute access
Suggested fix
30 30 | getattr(1j, "real")
31 31 | getattr(True, "real")
32 32 | getattr(x := 1, "real")
33 |-getattr(x + y, "real")
33 |+(x + y).real
34 34 | getattr("foo"
35 35 | "bar", "real")
36 36 |
B009_B010.py:34:1: B009 [*] Do not call `getattr` with a constant attribute value. It is not any safer than normal property access.
|
32 | getattr(x := 1, "real")
33 | getattr(x + y, "real")
34 | / getattr("foo"
35 | | "bar", "real")
| |______________________^ B009
|
= help: Replace `getattr` with attribute access
Suggested fix
31 31 | getattr(True, "real")
32 32 | getattr(x := 1, "real")
33 33 | getattr(x + y, "real")
34 |-getattr("foo"
35 |- "bar", "real")
34 |+("foo"
35 |+ "bar").real
36 36 |
37 37 |
38 38 | # Valid setattr usage

View File

@ -1,120 +1,120 @@
--- ---
source: crates/ruff/src/rules/flake8_bugbear/mod.rs source: crates/ruff/src/rules/flake8_bugbear/mod.rs
--- ---
B009_B010.py:40:1: B010 [*] Do not call `setattr` with a constant attribute value. It is not any safer than normal property access. B009_B010.py:50:1: B010 [*] Do not call `setattr` with a constant attribute value. It is not any safer than normal property access.
| |
39 | # Invalid usage 49 | # Invalid usage
40 | setattr(foo, "bar", None) 50 | setattr(foo, "bar", None)
| ^^^^^^^^^^^^^^^^^^^^^^^^^ B010 | ^^^^^^^^^^^^^^^^^^^^^^^^^ B010
41 | setattr(foo, "_123abc", None) 51 | setattr(foo, "_123abc", None)
42 | setattr(foo, "__123abc__", None) 52 | setattr(foo, "__123abc__", None)
| |
= help: Replace `setattr` with assignment = help: Replace `setattr` with assignment
Suggested fix Suggested fix
37 37 | pass 47 47 | pass
38 38 | 48 48 |
39 39 | # Invalid usage 49 49 | # Invalid usage
40 |-setattr(foo, "bar", None) 50 |-setattr(foo, "bar", None)
40 |+foo.bar = None 50 |+foo.bar = None
41 41 | setattr(foo, "_123abc", None) 51 51 | setattr(foo, "_123abc", None)
42 42 | setattr(foo, "__123abc__", None) 52 52 | setattr(foo, "__123abc__", None)
43 43 | setattr(foo, "abc123", None) 53 53 | setattr(foo, "abc123", None)
B009_B010.py:41:1: B010 [*] Do not call `setattr` with a constant attribute value. It is not any safer than normal property access. B009_B010.py:51:1: B010 [*] Do not call `setattr` with a constant attribute value. It is not any safer than normal property access.
| |
39 | # Invalid usage 49 | # Invalid usage
40 | setattr(foo, "bar", None) 50 | setattr(foo, "bar", None)
41 | setattr(foo, "_123abc", None) 51 | setattr(foo, "_123abc", None)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ B010 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ B010
42 | setattr(foo, "__123abc__", None) 52 | setattr(foo, "__123abc__", None)
43 | setattr(foo, "abc123", None) 53 | setattr(foo, "abc123", None)
| |
= help: Replace `setattr` with assignment = help: Replace `setattr` with assignment
Suggested fix Suggested fix
38 38 | 48 48 |
39 39 | # Invalid usage 49 49 | # Invalid usage
40 40 | setattr(foo, "bar", None) 50 50 | setattr(foo, "bar", None)
41 |-setattr(foo, "_123abc", None) 51 |-setattr(foo, "_123abc", None)
41 |+foo._123abc = None 51 |+foo._123abc = None
42 42 | setattr(foo, "__123abc__", None) 52 52 | setattr(foo, "__123abc__", None)
43 43 | setattr(foo, "abc123", None) 53 53 | setattr(foo, "abc123", None)
44 44 | setattr(foo, r"abc123", None) 54 54 | setattr(foo, r"abc123", None)
B009_B010.py:42:1: B010 [*] Do not call `setattr` with a constant attribute value. It is not any safer than normal property access. B009_B010.py:52:1: B010 [*] Do not call `setattr` with a constant attribute value. It is not any safer than normal property access.
| |
40 | setattr(foo, "bar", None) 50 | setattr(foo, "bar", None)
41 | setattr(foo, "_123abc", None) 51 | setattr(foo, "_123abc", None)
42 | setattr(foo, "__123abc__", None) 52 | setattr(foo, "__123abc__", None)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ B010 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ B010
43 | setattr(foo, "abc123", None) 53 | setattr(foo, "abc123", None)
44 | setattr(foo, r"abc123", None) 54 | setattr(foo, r"abc123", None)
| |
= help: Replace `setattr` with assignment = help: Replace `setattr` with assignment
Suggested fix Suggested fix
39 39 | # Invalid usage 49 49 | # Invalid usage
40 40 | setattr(foo, "bar", None) 50 50 | setattr(foo, "bar", None)
41 41 | setattr(foo, "_123abc", None) 51 51 | setattr(foo, "_123abc", None)
42 |-setattr(foo, "__123abc__", None) 52 |-setattr(foo, "__123abc__", None)
42 |+foo.__123abc__ = None 52 |+foo.__123abc__ = None
43 43 | setattr(foo, "abc123", None) 53 53 | setattr(foo, "abc123", None)
44 44 | setattr(foo, r"abc123", None) 54 54 | setattr(foo, r"abc123", None)
45 45 | setattr(foo.bar, r"baz", None) 55 55 | setattr(foo.bar, r"baz", None)
B009_B010.py:43:1: B010 [*] Do not call `setattr` with a constant attribute value. It is not any safer than normal property access. B009_B010.py:53:1: B010 [*] Do not call `setattr` with a constant attribute value. It is not any safer than normal property access.
| |
41 | setattr(foo, "_123abc", None) 51 | setattr(foo, "_123abc", None)
42 | setattr(foo, "__123abc__", None) 52 | setattr(foo, "__123abc__", None)
43 | setattr(foo, "abc123", None) 53 | setattr(foo, "abc123", None)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ B010 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ B010
44 | setattr(foo, r"abc123", None) 54 | setattr(foo, r"abc123", None)
45 | setattr(foo.bar, r"baz", None) 55 | setattr(foo.bar, r"baz", None)
| |
= help: Replace `setattr` with assignment = help: Replace `setattr` with assignment
Suggested fix Suggested fix
40 40 | setattr(foo, "bar", None) 50 50 | setattr(foo, "bar", None)
41 41 | setattr(foo, "_123abc", None) 51 51 | setattr(foo, "_123abc", None)
42 42 | setattr(foo, "__123abc__", None) 52 52 | setattr(foo, "__123abc__", None)
43 |-setattr(foo, "abc123", None) 53 |-setattr(foo, "abc123", None)
43 |+foo.abc123 = None 53 |+foo.abc123 = None
44 44 | setattr(foo, r"abc123", None) 54 54 | setattr(foo, r"abc123", None)
45 45 | setattr(foo.bar, r"baz", None) 55 55 | setattr(foo.bar, r"baz", None)
B009_B010.py:44:1: B010 [*] Do not call `setattr` with a constant attribute value. It is not any safer than normal property access. B009_B010.py:54:1: B010 [*] Do not call `setattr` with a constant attribute value. It is not any safer than normal property access.
| |
42 | setattr(foo, "__123abc__", None) 52 | setattr(foo, "__123abc__", None)
43 | setattr(foo, "abc123", None) 53 | setattr(foo, "abc123", None)
44 | setattr(foo, r"abc123", None) 54 | setattr(foo, r"abc123", None)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ B010 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ B010
45 | setattr(foo.bar, r"baz", None) 55 | setattr(foo.bar, r"baz", None)
| |
= help: Replace `setattr` with assignment = help: Replace `setattr` with assignment
Suggested fix Suggested fix
41 41 | setattr(foo, "_123abc", None) 51 51 | setattr(foo, "_123abc", None)
42 42 | setattr(foo, "__123abc__", None) 52 52 | setattr(foo, "__123abc__", None)
43 43 | setattr(foo, "abc123", None) 53 53 | setattr(foo, "abc123", None)
44 |-setattr(foo, r"abc123", None) 54 |-setattr(foo, r"abc123", None)
44 |+foo.abc123 = None 54 |+foo.abc123 = None
45 45 | setattr(foo.bar, r"baz", None) 55 55 | setattr(foo.bar, r"baz", None)
B009_B010.py:45:1: B010 [*] Do not call `setattr` with a constant attribute value. It is not any safer than normal property access. B009_B010.py:55:1: B010 [*] Do not call `setattr` with a constant attribute value. It is not any safer than normal property access.
| |
43 | setattr(foo, "abc123", None) 53 | setattr(foo, "abc123", None)
44 | setattr(foo, r"abc123", None) 54 | setattr(foo, r"abc123", None)
45 | setattr(foo.bar, r"baz", None) 55 | setattr(foo.bar, r"baz", None)
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ B010 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ B010
| |
= help: Replace `setattr` with assignment = help: Replace `setattr` with assignment
Suggested fix Suggested fix
42 42 | setattr(foo, "__123abc__", None) 52 52 | setattr(foo, "__123abc__", None)
43 43 | setattr(foo, "abc123", None) 53 53 | setattr(foo, "abc123", None)
44 44 | setattr(foo, r"abc123", None) 54 54 | setattr(foo, r"abc123", None)
45 |-setattr(foo.bar, r"baz", None) 55 |-setattr(foo.bar, r"baz", None)
45 |+foo.bar.baz = None 55 |+foo.bar.baz = None

View File

@ -1,24 +1,64 @@
--- ---
source: crates/ruff/src/rules/flake8_bugbear/mod.rs source: crates/ruff/src/rules/flake8_bugbear/mod.rs
--- ---
B013.py:3:8: B013 [*] A length-one tuple literal is redundant. Write `except ValueError` instead of `except (ValueError,)`. B013.py:5:8: B013 [*] A length-one tuple literal is redundant in exception handlers
| |
1 | try: 3 | try:
2 | pass
3 | except (ValueError,):
| ^^^^^^^^^^^^^ B013
4 | pass 4 | pass
5 | except AttributeError: 5 | except (ValueError,):
| ^^^^^^^^^^^^^ B013
6 | pass
7 | except AttributeError:
| |
= help: Replace with `except ValueError` = help: Replace with `except ValueError`
Fix Fix
1 1 | try: 2 2 |
2 2 | pass 3 3 | try:
3 |-except (ValueError,):
3 |+except ValueError:
4 4 | pass 4 4 | pass
5 5 | except AttributeError: 5 |-except (ValueError,):
5 |+except ValueError:
6 6 | pass 6 6 | pass
7 7 | except AttributeError:
8 8 | pass
B013.py:11:8: B013 [*] A length-one tuple literal is redundant in exception handlers
|
9 | except (ImportError, TypeError):
10 | pass
11 | except (*retriable_exceptions,):
| ^^^^^^^^^^^^^^^^^^^^^^^^ B013
12 | pass
13 | except(ValueError,):
|
= help: Replace with `except retriable_exceptions`
Fix
8 8 | pass
9 9 | except (ImportError, TypeError):
10 10 | pass
11 |-except (*retriable_exceptions,):
11 |+except retriable_exceptions:
12 12 | pass
13 13 | except(ValueError,):
14 14 | pass
B013.py:13:7: B013 [*] A length-one tuple literal is redundant in exception handlers
|
11 | except (*retriable_exceptions,):
12 | pass
13 | except(ValueError,):
| ^^^^^^^^^^^^^ B013
14 | pass
|
= help: Replace with `except ValueError`
Fix
10 10 | pass
11 11 | except (*retriable_exceptions,):
12 12 | pass
13 |-except(ValueError,):
13 |+except ValueError:
14 14 | pass

View File

@ -3,8 +3,8 @@ use itertools::Itertools;
use libcst_native::{ use libcst_native::{
Arg, AssignEqual, AssignTargetExpression, Call, Comment, CompFor, Dict, DictComp, DictElement, Arg, AssignEqual, AssignTargetExpression, Call, Comment, CompFor, Dict, DictComp, DictElement,
Element, EmptyLine, Expression, GeneratorExp, LeftCurlyBrace, LeftParen, LeftSquareBracket, Element, EmptyLine, Expression, GeneratorExp, LeftCurlyBrace, LeftParen, LeftSquareBracket,
List, ListComp, Name, ParenthesizableWhitespace, ParenthesizedWhitespace, RightCurlyBrace, List, ListComp, Name, ParenthesizableWhitespace, ParenthesizedNode, ParenthesizedWhitespace,
RightParen, RightSquareBracket, Set, SetComp, SimpleString, SimpleWhitespace, RightCurlyBrace, RightParen, RightSquareBracket, Set, SetComp, SimpleString, SimpleWhitespace,
TrailingWhitespace, Tuple, TrailingWhitespace, Tuple,
}; };
use ruff_python_ast::Expr; use ruff_python_ast::Expr;
@ -12,9 +12,12 @@ use ruff_text_size::{Ranged, TextRange};
use ruff_diagnostics::{Edit, Fix}; use ruff_diagnostics::{Edit, Fix};
use ruff_python_codegen::Stylist; use ruff_python_codegen::Stylist;
use ruff_python_semantic::SemanticModel;
use ruff_source_file::Locator; use ruff_source_file::Locator;
use crate::autofix::codemods::CodegenStylist; use crate::autofix::codemods::CodegenStylist;
use crate::autofix::edits::pad;
use crate::cst::helpers::space;
use crate::rules::flake8_comprehensions::rules::ObjectType; use crate::rules::flake8_comprehensions::rules::ObjectType;
use crate::{ use crate::{
checkers::ast::Checker, checkers::ast::Checker,
@ -26,9 +29,9 @@ use crate::{
/// (C400) Convert `list(x for x in y)` to `[x for x in y]`. /// (C400) Convert `list(x for x in y)` to `[x for x in y]`.
pub(crate) fn fix_unnecessary_generator_list( pub(crate) fn fix_unnecessary_generator_list(
expr: &Expr,
locator: &Locator, locator: &Locator,
stylist: &Stylist, stylist: &Stylist,
expr: &Expr,
) -> Result<Edit> { ) -> Result<Edit> {
// Expr(Call(GeneratorExp)))) -> Expr(ListComp))) // Expr(Call(GeneratorExp)))) -> Expr(ListComp)))
let module_text = locator.slice(expr); let module_text = locator.slice(expr);
@ -58,7 +61,7 @@ pub(crate) fn fix_unnecessary_generator_list(
} }
/// (C401) Convert `set(x for x in y)` to `{x for x in y}`. /// (C401) Convert `set(x for x in y)` to `{x for x in y}`.
pub(crate) fn fix_unnecessary_generator_set(checker: &Checker, expr: &Expr) -> Result<Edit> { pub(crate) fn fix_unnecessary_generator_set(expr: &Expr, checker: &Checker) -> Result<Edit> {
let locator = checker.locator(); let locator = checker.locator();
let stylist = checker.stylist(); let stylist = checker.stylist();
@ -86,14 +89,14 @@ pub(crate) fn fix_unnecessary_generator_set(checker: &Checker, expr: &Expr) -> R
let content = tree.codegen_stylist(stylist); let content = tree.codegen_stylist(stylist);
Ok(Edit::range_replacement( Ok(Edit::range_replacement(
pad_expression(content, expr.range(), checker), pad_expression(content, expr.range(), checker.locator(), checker.semantic()),
expr.range(), expr.range(),
)) ))
} }
/// (C402) Convert `dict((x, x) for x in range(3))` to `{x: x for x in /// (C402) Convert `dict((x, x) for x in range(3))` to `{x: x for x in
/// range(3)}`. /// range(3)}`.
pub(crate) fn fix_unnecessary_generator_dict(checker: &Checker, expr: &Expr) -> Result<Edit> { pub(crate) fn fix_unnecessary_generator_dict(expr: &Expr, checker: &Checker) -> Result<Edit> {
let locator = checker.locator(); let locator = checker.locator();
let stylist = checker.stylist(); let stylist = checker.stylist();
@ -110,10 +113,20 @@ pub(crate) fn fix_unnecessary_generator_dict(checker: &Checker, expr: &Expr) ->
bail!("Expected tuple to contain two elements"); bail!("Expected tuple to contain two elements");
}; };
// Insert whitespace before the `for`, since we're removing parentheses, as in:
// ```python
// dict((x, x)for x in range(3))
// ```
let mut for_in = generator_exp.for_in.clone();
if for_in.whitespace_before == ParenthesizableWhitespace::default() {
for_in.whitespace_before =
ParenthesizableWhitespace::SimpleWhitespace(SimpleWhitespace(" "));
}
tree = Expression::DictComp(Box::new(DictComp { tree = Expression::DictComp(Box::new(DictComp {
key: Box::new(key.clone()), key: Box::new(key.clone()),
value: Box::new(value.clone()), value: Box::new(value.clone()),
for_in: generator_exp.for_in.clone(), for_in,
lbrace: LeftCurlyBrace { lbrace: LeftCurlyBrace {
whitespace_after: call.whitespace_before_args.clone(), whitespace_after: call.whitespace_before_args.clone(),
}, },
@ -123,19 +136,24 @@ pub(crate) fn fix_unnecessary_generator_dict(checker: &Checker, expr: &Expr) ->
lpar: vec![], lpar: vec![],
rpar: vec![], rpar: vec![],
whitespace_before_colon: ParenthesizableWhitespace::default(), whitespace_before_colon: ParenthesizableWhitespace::default(),
whitespace_after_colon: ParenthesizableWhitespace::SimpleWhitespace(SimpleWhitespace(" ")), whitespace_after_colon: space(),
})); }));
Ok(Edit::range_replacement( Ok(Edit::range_replacement(
pad_expression(tree.codegen_stylist(stylist), expr.range(), checker), pad_expression(
tree.codegen_stylist(stylist),
expr.range(),
checker.locator(),
checker.semantic(),
),
expr.range(), expr.range(),
)) ))
} }
/// (C403) Convert `set([x for x in y])` to `{x for x in y}`. /// (C403) Convert `set([x for x in y])` to `{x for x in y}`.
pub(crate) fn fix_unnecessary_list_comprehension_set( pub(crate) fn fix_unnecessary_list_comprehension_set(
checker: &Checker,
expr: &Expr, expr: &Expr,
checker: &Checker,
) -> Result<Edit> { ) -> Result<Edit> {
let locator = checker.locator(); let locator = checker.locator();
let stylist = checker.stylist(); let stylist = checker.stylist();
@ -162,7 +180,12 @@ pub(crate) fn fix_unnecessary_list_comprehension_set(
})); }));
Ok(Edit::range_replacement( Ok(Edit::range_replacement(
pad_expression(tree.codegen_stylist(stylist), expr.range(), checker), pad_expression(
tree.codegen_stylist(stylist),
expr.range(),
checker.locator(),
checker.semantic(),
),
expr.range(), expr.range(),
)) ))
} }
@ -170,8 +193,8 @@ pub(crate) fn fix_unnecessary_list_comprehension_set(
/// (C404) Convert `dict([(i, i) for i in range(3)])` to `{i: i for i in /// (C404) Convert `dict([(i, i) for i in range(3)])` to `{i: i for i in
/// range(3)}`. /// range(3)}`.
pub(crate) fn fix_unnecessary_list_comprehension_dict( pub(crate) fn fix_unnecessary_list_comprehension_dict(
checker: &Checker,
expr: &Expr, expr: &Expr,
checker: &Checker,
) -> Result<Edit> { ) -> Result<Edit> {
let locator = checker.locator(); let locator = checker.locator();
let stylist = checker.stylist(); let stylist = checker.stylist();
@ -190,12 +213,22 @@ pub(crate) fn fix_unnecessary_list_comprehension_dict(
bail!("Expected tuple with two elements"); bail!("Expected tuple with two elements");
}; };
// Insert whitespace before the `for`, since we're removing parentheses, as in:
// ```python
// dict((x, x)for x in range(3))
// ```
let mut for_in = list_comp.for_in.clone();
if for_in.whitespace_before == ParenthesizableWhitespace::default() {
for_in.whitespace_before =
ParenthesizableWhitespace::SimpleWhitespace(SimpleWhitespace(" "));
}
tree = Expression::DictComp(Box::new(DictComp { tree = Expression::DictComp(Box::new(DictComp {
key: Box::new(key.clone()), key: Box::new(key.clone()),
value: Box::new(value.clone()), value: Box::new(value.clone()),
for_in: list_comp.for_in.clone(), for_in,
whitespace_before_colon: ParenthesizableWhitespace::default(), whitespace_before_colon: ParenthesizableWhitespace::default(),
whitespace_after_colon: ParenthesizableWhitespace::SimpleWhitespace(SimpleWhitespace(" ")), whitespace_after_colon: space(),
lbrace: LeftCurlyBrace { lbrace: LeftCurlyBrace {
whitespace_after: call.whitespace_before_args.clone(), whitespace_after: call.whitespace_before_args.clone(),
}, },
@ -207,7 +240,12 @@ pub(crate) fn fix_unnecessary_list_comprehension_dict(
})); }));
Ok(Edit::range_replacement( Ok(Edit::range_replacement(
pad_expression(tree.codegen_stylist(stylist), expr.range(), checker), pad_expression(
tree.codegen_stylist(stylist),
expr.range(),
checker.locator(),
checker.semantic(),
),
expr.range(), expr.range(),
)) ))
} }
@ -256,7 +294,7 @@ fn drop_trailing_comma<'a>(
} }
/// (C405) Convert `set((1, 2))` to `{1, 2}`. /// (C405) Convert `set((1, 2))` to `{1, 2}`.
pub(crate) fn fix_unnecessary_literal_set(checker: &Checker, expr: &Expr) -> Result<Edit> { pub(crate) fn fix_unnecessary_literal_set(expr: &Expr, checker: &Checker) -> Result<Edit> {
let locator = checker.locator(); let locator = checker.locator();
let stylist = checker.stylist(); let stylist = checker.stylist();
@ -291,13 +329,18 @@ pub(crate) fn fix_unnecessary_literal_set(checker: &Checker, expr: &Expr) -> Res
} }
Ok(Edit::range_replacement( Ok(Edit::range_replacement(
pad_expression(tree.codegen_stylist(stylist), expr.range(), checker), pad_expression(
tree.codegen_stylist(stylist),
expr.range(),
checker.locator(),
checker.semantic(),
),
expr.range(), expr.range(),
)) ))
} }
/// (C406) Convert `dict([(1, 2)])` to `{1: 2}`. /// (C406) Convert `dict([(1, 2)])` to `{1: 2}`.
pub(crate) fn fix_unnecessary_literal_dict(checker: &Checker, expr: &Expr) -> Result<Edit> { pub(crate) fn fix_unnecessary_literal_dict(expr: &Expr, checker: &Checker) -> Result<Edit> {
let locator = checker.locator(); let locator = checker.locator();
let stylist = checker.stylist(); let stylist = checker.stylist();
@ -354,13 +397,18 @@ pub(crate) fn fix_unnecessary_literal_dict(checker: &Checker, expr: &Expr) -> Re
})); }));
Ok(Edit::range_replacement( Ok(Edit::range_replacement(
pad_expression(tree.codegen_stylist(stylist), expr.range(), checker), pad_expression(
tree.codegen_stylist(stylist),
expr.range(),
checker.locator(),
checker.semantic(),
),
expr.range(), expr.range(),
)) ))
} }
/// (C408) /// (C408)
pub(crate) fn fix_unnecessary_collection_call(checker: &Checker, expr: &Expr) -> Result<Edit> { pub(crate) fn fix_unnecessary_collection_call(expr: &Expr, checker: &Checker) -> Result<Edit> {
enum Collection { enum Collection {
Tuple, Tuple,
List, List,
@ -470,7 +518,12 @@ pub(crate) fn fix_unnecessary_collection_call(checker: &Checker, expr: &Expr) ->
Ok(Edit::range_replacement( Ok(Edit::range_replacement(
if matches!(collection, Collection::Dict) { if matches!(collection, Collection::Dict) {
pad_expression(tree.codegen_stylist(stylist), expr.range(), checker) pad_expression(
tree.codegen_stylist(stylist),
expr.range(),
checker.locator(),
checker.semantic(),
)
} else { } else {
tree.codegen_stylist(stylist) tree.codegen_stylist(stylist)
}, },
@ -490,19 +543,24 @@ pub(crate) fn fix_unnecessary_collection_call(checker: &Checker, expr: &Expr) ->
/// However, this is a syntax error under the f-string grammar. As such, /// However, this is a syntax error under the f-string grammar. As such,
/// this method will pad the start and end of an expression as needed to /// this method will pad the start and end of an expression as needed to
/// avoid producing invalid syntax. /// avoid producing invalid syntax.
fn pad_expression(content: String, range: TextRange, checker: &Checker) -> String { fn pad_expression(
if !checker.semantic().in_f_string() { content: String,
range: TextRange,
locator: &Locator,
semantic: &SemanticModel,
) -> String {
if !semantic.in_f_string() {
return content; return content;
} }
// If the expression is immediately preceded by an opening brace, then // If the expression is immediately preceded by an opening brace, then
// we need to add a space before the expression. // we need to add a space before the expression.
let prefix = checker.locator().up_to(range.start()); let prefix = locator.up_to(range.start());
let left_pad = matches!(prefix.chars().next_back(), Some('{')); let left_pad = matches!(prefix.chars().next_back(), Some('{'));
// If the expression is immediately preceded by an opening brace, then // If the expression is immediately preceded by an opening brace, then
// we need to add a space before the expression. // we need to add a space before the expression.
let suffix = checker.locator().after(range.end()); let suffix = locator.after(range.end());
let right_pad = matches!(suffix.chars().next(), Some('}')); let right_pad = matches!(suffix.chars().next(), Some('}'));
if left_pad && right_pad { if left_pad && right_pad {
@ -518,9 +576,9 @@ fn pad_expression(content: String, range: TextRange, checker: &Checker) -> Strin
/// (C409) Convert `tuple([1, 2])` to `tuple(1, 2)` /// (C409) Convert `tuple([1, 2])` to `tuple(1, 2)`
pub(crate) fn fix_unnecessary_literal_within_tuple_call( pub(crate) fn fix_unnecessary_literal_within_tuple_call(
expr: &Expr,
locator: &Locator, locator: &Locator,
stylist: &Stylist, stylist: &Stylist,
expr: &Expr,
) -> Result<Edit> { ) -> Result<Edit> {
let module_text = locator.slice(expr); let module_text = locator.slice(expr);
let mut tree = match_expression(module_text)?; let mut tree = match_expression(module_text)?;
@ -568,9 +626,9 @@ pub(crate) fn fix_unnecessary_literal_within_tuple_call(
/// (C410) Convert `list([1, 2])` to `[1, 2]` /// (C410) Convert `list([1, 2])` to `[1, 2]`
pub(crate) fn fix_unnecessary_literal_within_list_call( pub(crate) fn fix_unnecessary_literal_within_list_call(
expr: &Expr,
locator: &Locator, locator: &Locator,
stylist: &Stylist, stylist: &Stylist,
expr: &Expr,
) -> Result<Edit> { ) -> Result<Edit> {
let module_text = locator.slice(expr); let module_text = locator.slice(expr);
let mut tree = match_expression(module_text)?; let mut tree = match_expression(module_text)?;
@ -620,9 +678,9 @@ pub(crate) fn fix_unnecessary_literal_within_list_call(
/// (C411) Convert `list([i * i for i in x])` to `[i * i for i in x]`. /// (C411) Convert `list([i * i for i in x])` to `[i * i for i in x]`.
pub(crate) fn fix_unnecessary_list_call( pub(crate) fn fix_unnecessary_list_call(
expr: &Expr,
locator: &Locator, locator: &Locator,
stylist: &Stylist, stylist: &Stylist,
expr: &Expr,
) -> Result<Edit> { ) -> Result<Edit> {
// Expr(Call(List|Tuple)))) -> Expr(List|Tuple))) // Expr(Call(List|Tuple)))) -> Expr(List|Tuple)))
let module_text = locator.slice(expr); let module_text = locator.slice(expr);
@ -642,9 +700,9 @@ pub(crate) fn fix_unnecessary_list_call(
/// (C413) Convert `reversed(sorted([2, 3, 1]))` to `sorted([2, 3, 1], /// (C413) Convert `reversed(sorted([2, 3, 1]))` to `sorted([2, 3, 1],
/// reverse=True)`. /// reverse=True)`.
pub(crate) fn fix_unnecessary_call_around_sorted( pub(crate) fn fix_unnecessary_call_around_sorted(
expr: &Expr,
locator: &Locator, locator: &Locator,
stylist: &Stylist, stylist: &Stylist,
expr: &Expr,
) -> Result<Edit> { ) -> Result<Edit> {
let module_text = locator.slice(expr); let module_text = locator.slice(expr);
let mut tree = match_expression(module_text)?; let mut tree = match_expression(module_text)?;
@ -660,7 +718,7 @@ pub(crate) fn fix_unnecessary_call_around_sorted(
if outer_name.value == "list" { if outer_name.value == "list" {
tree = Expression::Call(Box::new((*inner_call).clone())); tree = Expression::Call(Box::new((*inner_call).clone()));
} else { } else {
// If the `reverse` argument is used // If the `reverse` argument is used...
let args = if inner_call.args.iter().any(|arg| { let args = if inner_call.args.iter().any(|arg| {
matches!( matches!(
arg.keyword, arg.keyword,
@ -712,6 +770,28 @@ pub(crate) fn fix_unnecessary_call_around_sorted(
.collect_vec() .collect_vec()
} else { } else {
let mut args = inner_call.args.clone(); let mut args = inner_call.args.clone();
// If necessary, parenthesize a generator expression, as a generator expression must
// be parenthesized if it's not a solitary argument. For example, given:
// ```python
// reversed(sorted(i for i in range(42)))
// ```
// Rewrite as:
// ```python
// sorted((i for i in range(42)), reverse=True)
// ```
if let [arg] = args.as_mut_slice() {
if matches!(arg.value, Expression::GeneratorExp(_)) {
if arg.value.lpar().is_empty() && arg.value.rpar().is_empty() {
arg.value = arg
.value
.clone()
.with_parens(LeftParen::default(), RightParen::default());
}
}
}
// Add the `reverse=True` argument.
args.push(Arg { args.push(Arg {
value: Expression::Name(Box::new(Name { value: Expression::Name(Box::new(Name {
value: "True", value: "True",
@ -754,9 +834,9 @@ pub(crate) fn fix_unnecessary_call_around_sorted(
/// (C414) Convert `sorted(list(foo))` to `sorted(foo)` /// (C414) Convert `sorted(list(foo))` to `sorted(foo)`
pub(crate) fn fix_unnecessary_double_cast_or_process( pub(crate) fn fix_unnecessary_double_cast_or_process(
expr: &Expr,
locator: &Locator, locator: &Locator,
stylist: &Stylist, stylist: &Stylist,
expr: &Expr,
) -> Result<Edit> { ) -> Result<Edit> {
let module_text = locator.slice(expr); let module_text = locator.slice(expr);
let mut tree = match_expression(module_text)?; let mut tree = match_expression(module_text)?;
@ -785,9 +865,9 @@ pub(crate) fn fix_unnecessary_double_cast_or_process(
/// (C416) Convert `[i for i in x]` to `list(x)`. /// (C416) Convert `[i for i in x]` to `list(x)`.
pub(crate) fn fix_unnecessary_comprehension( pub(crate) fn fix_unnecessary_comprehension(
expr: &Expr,
locator: &Locator, locator: &Locator,
stylist: &Stylist, stylist: &Stylist,
expr: &Expr,
) -> Result<Edit> { ) -> Result<Edit> {
let module_text = locator.slice(expr); let module_text = locator.slice(expr);
let mut tree = match_expression(module_text)?; let mut tree = match_expression(module_text)?;
@ -865,161 +945,172 @@ pub(crate) fn fix_unnecessary_comprehension(
} }
Ok(Edit::range_replacement( Ok(Edit::range_replacement(
tree.codegen_stylist(stylist), pad(tree.codegen_stylist(stylist), expr.range(), locator),
expr.range(), expr.range(),
)) ))
} }
/// (C417) Convert `map(lambda x: x * 2, bar)` to `(x * 2 for x in bar)`. /// (C417) Convert `map(lambda x: x * 2, bar)` to `(x * 2 for x in bar)`.
pub(crate) fn fix_unnecessary_map( pub(crate) fn fix_unnecessary_map(
locator: &Locator,
stylist: &Stylist,
expr: &Expr, expr: &Expr,
parent: Option<&Expr>, parent: Option<&Expr>,
object_type: ObjectType, object_type: ObjectType,
locator: &Locator,
stylist: &Stylist,
) -> Result<Edit> { ) -> Result<Edit> {
let module_text = locator.slice(expr); let module_text = locator.slice(expr);
let mut tree = match_expression(module_text)?; let mut tree = match_expression(module_text)?;
let call = match_call_mut(&mut tree)?; let call = match_call_mut(&mut tree)?;
let arg = match_arg(call)?;
let (args, lambda_func) = match &arg.value { let (lambda, iter) = match call.args.as_slice() {
Expression::Call(outer_call) => { [call] => {
let inner_lambda = outer_call.args.first().unwrap().value.clone(); let call = match_call(&call.value)?;
match &inner_lambda { let [lambda, iter] = call.args.as_slice() else {
Expression::Lambda(..) => (outer_call.args.clone(), inner_lambda), bail!("Expected two arguments");
_ => { };
bail!("Expected a lambda function") let lambda = match_lambda(&lambda.value)?;
} let iter = &iter.value;
} (lambda, iter)
} }
Expression::Lambda(..) => (call.args.clone(), arg.value.clone()), [lambda, iter] => {
_ => { let lambda = match_lambda(&lambda.value)?;
bail!("Expected a lambda or call") let iter = &iter.value;
(lambda, iter)
} }
_ => bail!("Expected a call or lambda"),
}; };
let func_body = match_lambda(&lambda_func)?; // Format the lambda target.
let target = match lambda.params.params.as_slice() {
// Ex) `lambda: x`
[] => AssignTargetExpression::Name(Box::new(Name {
value: "_",
lpar: vec![],
rpar: vec![],
})),
// Ex) `lambda x: y`
[param] => AssignTargetExpression::Name(Box::new(param.name.clone())),
// Ex) `lambda x, y: z`
params => AssignTargetExpression::Tuple(Box::new(Tuple {
elements: params
.iter()
.map(|param| Element::Simple {
value: Expression::Name(Box::new(param.name.clone())),
comma: None,
})
.collect(),
lpar: vec![],
rpar: vec![],
})),
};
if args.len() == 2 { // Parenthesize the iterator, if necessary, as in:
if func_body.params.params.iter().any(|f| f.default.is_some()) { // ```python
bail!("Currently not supporting default values"); // map(lambda x: x, y if y else z)
// ```
let iter = iter.clone();
let iter = if iter.lpar().is_empty()
&& iter.rpar().is_empty()
&& matches!(iter, Expression::IfExp(_) | Expression::Lambda(_))
{
iter.with_parens(LeftParen::default(), RightParen::default())
} else {
iter
};
let compfor = Box::new(CompFor {
target,
iter,
ifs: vec![],
inner_for_in: None,
asynchronous: None,
whitespace_before: space(),
whitespace_after_for: space(),
whitespace_before_in: space(),
whitespace_after_in: space(),
});
match object_type {
ObjectType::Generator => {
tree = Expression::GeneratorExp(Box::new(GeneratorExp {
elt: lambda.body.clone(),
for_in: compfor,
lpar: vec![LeftParen::default()],
rpar: vec![RightParen::default()],
}));
} }
ObjectType::List => {
let mut args_str = func_body tree = Expression::ListComp(Box::new(ListComp {
.params elt: lambda.body.clone(),
.params for_in: compfor,
.iter() lbracket: LeftSquareBracket::default(),
.map(|f| f.name.value) rbracket: RightSquareBracket::default(),
.join(", ");
if args_str.is_empty() {
args_str = "_".to_string();
}
let compfor = Box::new(CompFor {
target: AssignTargetExpression::Name(Box::new(Name {
value: args_str.as_str(),
lpar: vec![], lpar: vec![],
rpar: vec![], rpar: vec![],
})), }));
iter: args.last().unwrap().value.clone(),
ifs: vec![],
inner_for_in: None,
asynchronous: None,
whitespace_before: ParenthesizableWhitespace::SimpleWhitespace(SimpleWhitespace(" ")),
whitespace_after_for: ParenthesizableWhitespace::SimpleWhitespace(SimpleWhitespace(
" ",
)),
whitespace_before_in: ParenthesizableWhitespace::SimpleWhitespace(SimpleWhitespace(
" ",
)),
whitespace_after_in: ParenthesizableWhitespace::SimpleWhitespace(SimpleWhitespace(" ")),
});
match object_type {
ObjectType::Generator => {
tree = Expression::GeneratorExp(Box::new(GeneratorExp {
elt: func_body.body.clone(),
for_in: compfor,
lpar: vec![LeftParen::default()],
rpar: vec![RightParen::default()],
}));
}
ObjectType::List => {
tree = Expression::ListComp(Box::new(ListComp {
elt: func_body.body.clone(),
for_in: compfor,
lbracket: LeftSquareBracket::default(),
rbracket: RightSquareBracket::default(),
lpar: vec![],
rpar: vec![],
}));
}
ObjectType::Set => {
tree = Expression::SetComp(Box::new(SetComp {
elt: func_body.body.clone(),
for_in: compfor,
lpar: vec![],
rpar: vec![],
lbrace: LeftCurlyBrace::default(),
rbrace: RightCurlyBrace::default(),
}));
}
ObjectType::Dict => {
let (key, value) = if let Expression::Tuple(tuple) = func_body.body.as_ref() {
if tuple.elements.len() != 2 {
bail!("Expected two elements")
}
let Some(Element::Simple { value: key, .. }) = &tuple.elements.get(0) else {
bail!("Expected tuple to contain a key as the first element");
};
let Some(Element::Simple { value, .. }) = &tuple.elements.get(1) else {
bail!("Expected tuple to contain a key as the second element");
};
(key, value)
} else {
bail!("Expected tuple for dict comprehension")
};
tree = Expression::DictComp(Box::new(DictComp {
for_in: compfor,
lpar: vec![],
rpar: vec![],
key: Box::new(key.clone()),
value: Box::new(value.clone()),
lbrace: LeftCurlyBrace::default(),
rbrace: RightCurlyBrace::default(),
whitespace_before_colon: ParenthesizableWhitespace::default(),
whitespace_after_colon: ParenthesizableWhitespace::SimpleWhitespace(
SimpleWhitespace(" "),
),
}));
}
} }
ObjectType::Set => {
let mut content = tree.codegen_stylist(stylist); tree = Expression::SetComp(Box::new(SetComp {
elt: lambda.body.clone(),
// If the expression is embedded in an f-string, surround it with spaces to avoid for_in: compfor,
// syntax errors. lpar: vec![],
if matches!(object_type, ObjectType::Set | ObjectType::Dict) { rpar: vec![],
if parent.is_some_and(Expr::is_formatted_value_expr) { lbrace: LeftCurlyBrace::default(),
content = format!(" {content} "); rbrace: RightCurlyBrace::default(),
} }));
} }
ObjectType::Dict => {
let elements = match lambda.body.as_ref() {
Expression::Tuple(tuple) => &tuple.elements,
Expression::List(list) => &list.elements,
_ => {
bail!("Expected tuple or list for dictionary comprehension")
}
};
let [key, value] = elements.as_slice() else {
bail!("Expected container to include two elements");
};
let Element::Simple { value: key, .. } = key else {
bail!("Expected container to use a key as the first element");
};
let Element::Simple { value, .. } = value else {
bail!("Expected container to use a value as the second element");
};
Ok(Edit::range_replacement(content, expr.range())) tree = Expression::DictComp(Box::new(DictComp {
} else { for_in: compfor,
bail!("Should have two arguments"); lpar: vec![],
rpar: vec![],
key: Box::new(key.clone()),
value: Box::new(value.clone()),
lbrace: LeftCurlyBrace::default(),
rbrace: RightCurlyBrace::default(),
whitespace_before_colon: ParenthesizableWhitespace::default(),
whitespace_after_colon: ParenthesizableWhitespace::SimpleWhitespace(
SimpleWhitespace(" "),
),
}));
}
} }
let mut content = tree.codegen_stylist(stylist);
// If the expression is embedded in an f-string, surround it with spaces to avoid
// syntax errors.
if matches!(object_type, ObjectType::Set | ObjectType::Dict) {
if parent.is_some_and(Expr::is_formatted_value_expr) {
content = format!(" {content} ");
}
}
Ok(Edit::range_replacement(content, expr.range()))
} }
/// (C418) Convert `dict({"a": 1})` to `{"a": 1}` /// (C418) Convert `dict({"a": 1})` to `{"a": 1}`
pub(crate) fn fix_unnecessary_literal_within_dict_call( pub(crate) fn fix_unnecessary_literal_within_dict_call(
expr: &Expr,
locator: &Locator, locator: &Locator,
stylist: &Stylist, stylist: &Stylist,
expr: &Expr,
) -> Result<Edit> { ) -> Result<Edit> {
let module_text = locator.slice(expr); let module_text = locator.slice(expr);
let mut tree = match_expression(module_text)?; let mut tree = match_expression(module_text)?;
@ -1036,9 +1127,9 @@ pub(crate) fn fix_unnecessary_literal_within_dict_call(
/// (C419) Convert `[i for i in a]` into `i for i in a` /// (C419) Convert `[i for i in a]` into `i for i in a`
pub(crate) fn fix_unnecessary_comprehension_any_all( pub(crate) fn fix_unnecessary_comprehension_any_all(
expr: &Expr,
locator: &Locator, locator: &Locator,
stylist: &Stylist, stylist: &Stylist,
expr: &Expr,
) -> Result<Fix> { ) -> Result<Fix> {
// Expr(ListComp) -> Expr(GeneratorExp) // Expr(ListComp) -> Expr(GeneratorExp)
let module_text = locator.slice(expr); let module_text = locator.slice(expr);

View File

@ -85,9 +85,9 @@ pub(crate) fn unnecessary_call_around_sorted(
if checker.patch(diagnostic.kind.rule()) { if checker.patch(diagnostic.kind.rule()) {
diagnostic.try_set_fix(|| { diagnostic.try_set_fix(|| {
let edit = fixes::fix_unnecessary_call_around_sorted( let edit = fixes::fix_unnecessary_call_around_sorted(
expr,
checker.locator(), checker.locator(),
checker.stylist(), checker.stylist(),
expr,
)?; )?;
if outer.id == "reversed" { if outer.id == "reversed" {
Ok(Fix::suggested(edit)) Ok(Fix::suggested(edit))

View File

@ -88,7 +88,7 @@ pub(crate) fn unnecessary_collection_call(
); );
if checker.patch(diagnostic.kind.rule()) { if checker.patch(diagnostic.kind.rule()) {
diagnostic.try_set_fix(|| { diagnostic.try_set_fix(|| {
fixes::fix_unnecessary_collection_call(checker, expr).map(Fix::suggested) fixes::fix_unnecessary_collection_call(expr, checker).map(Fix::suggested)
}); });
} }
checker.diagnostics.push(diagnostic); checker.diagnostics.push(diagnostic);

Some files were not shown because too many files have changed in this diff Show More