Merge branch 'main' into dhruv/unused-imports

This commit is contained in:
Dhruv Manilawala 2023-07-13 20:45:18 +05:30
commit 4110065a18
No known key found for this signature in database
GPG Key ID: 9601911DE8797432
1573 changed files with 76319 additions and 28661 deletions

1
.gitattributes vendored
View File

@ -4,3 +4,4 @@ crates/ruff/resources/test/fixtures/isort/line_ending_crlf.py text eol=crlf
crates/ruff/resources/test/fixtures/pycodestyle/W605_1.py text eol=crlf
ruff.schema.json linguist-generated=true text=auto eol=lf
*.md.snap linguist-language=Markdown

9
.github/CODEOWNERS vendored Normal file
View File

@ -0,0 +1,9 @@
# GitHub code owners file. For more info: https://help.github.com/articles/about-codeowners/
#
# - Comment lines begin with `#` character.
# - Each line is a file pattern followed by one or more owners.
# - The '*' pattern is global owners.
# - Order is important. The last matching pattern has the most precedence.
# Jupyter
/crates/ruff/src/jupyter/ @dhruvmanila

View File

@ -16,7 +16,7 @@ env:
CARGO_TERM_COLOR: always
RUSTUP_MAX_RETRIES: 10
PACKAGE_NAME: ruff
PYTHON_VERSION: "3.7" # to build abi3 wheels
PYTHON_VERSION: "3.11" # to build abi3 wheels
jobs:
cargo-fmt:
@ -31,17 +31,6 @@ jobs:
cargo-clippy:
name: "cargo clippy"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: "Install Rust toolchain"
run: |
rustup component add clippy
- uses: Swatinem/rust-cache@v2
- run: cargo clippy --workspace --all-targets --all-features -- -D warnings
cargo-clippy-wasm:
name: "cargo clippy (wasm)"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: "Install Rust toolchain"
@ -49,7 +38,10 @@ jobs:
rustup component add clippy
rustup target add wasm32-unknown-unknown
- uses: Swatinem/rust-cache@v2
- run: cargo clippy -p ruff_wasm --target wasm32-unknown-unknown --all-features -- -D warnings
- name: "Clippy"
run: cargo clippy --workspace --all-targets --all-features -- -D warnings
- name: "Clippy (wasm)"
run: cargo clippy -p ruff_wasm --target wasm32-unknown-unknown --all-features -- -D warnings
cargo-test:
strategy:
@ -62,20 +54,21 @@ jobs:
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v2
- run: cargo install cargo-insta
# cargo insta 1.30.0 fails for some reason (https://github.com/mitsuhiko/insta/issues/392)
- run: cargo install cargo-insta@=1.29.0
- run: pip install black[d]==23.1.0
- name: "Run tests (Ubuntu)"
if: ${{ matrix.os == 'ubuntu-latest' }}
run: |
cargo insta test --all --all-features --delete-unreferenced-snapshots
git diff --exit-code
run: cargo insta test --all --all-features --unreferenced reject
- name: "Run tests (Windows)"
if: ${{ matrix.os == 'windows-latest' }}
shell: bash
run: |
cargo insta test --all --all-features
git diff --exit-code
# We can't reject unreferenced snapshots on windows because flake8_executable can't run on windows
run: cargo insta test --all --all-features
- run: cargo test --package ruff_cli --test black_compatibility_test -- --ignored
# TODO: Skipped as it's currently broken. The resource were moved from the
# ruff_cli to ruff crate, but this test was not updated.
if: false
# Check for broken links in the documentation.
- run: cargo doc --all --no-deps
env:
@ -149,7 +142,7 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: "3.11"
python-version: ${{ env.PYTHON_VERSION }}
- uses: actions/download-artifact@v3
name: Download Ruff binary
@ -217,11 +210,10 @@ jobs:
- name: "Build wheels"
uses: PyO3/maturin-action@v1
with:
manylinux: auto
args: --out dist
- name: "Test wheel"
run: |
pip install dist/${{ env.PACKAGE_NAME }}-*.whl --force-reinstall
pip install --force-reinstall --find-links dist ${{ env.PACKAGE_NAME }}
ruff --help
python -m ruff --help
- name: "Remove wheels from cache"
@ -234,7 +226,7 @@ jobs:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: "3.11"
python-version: ${{ env.PYTHON_VERSION }}
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v2
@ -258,13 +250,24 @@ jobs:
docs:
name: "mkdocs"
runs-on: ubuntu-latest
env:
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
uses: webfactory/ssh-agent@v0.8.0
with:
ssh-private-key: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY }}
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v2
- name: "Install Insiders dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: pip install -r docs/requirements-insiders.txt
- name: "Install dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: pip install -r docs/requirements.txt
- name: "Update README File"
run: python scripts/transform_readme.py --target mkdocs
@ -272,5 +275,23 @@ jobs:
run: python scripts/generate_mkdocs.py
- name: "Check docs formatting"
run: python scripts/check_docs_formatted.py
- name: "Build Insiders docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: mkdocs build --strict -f mkdocs.insiders.yml
- name: "Build docs"
run: mkdocs build --strict
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: mkdocs build --strict -f mkdocs.generated.yml
check-formatter-stability:
name: "Check formatter stability"
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: "Install Rust toolchain"
run: rustup show
- name: "Cache rust"
uses: Swatinem/rust-cache@v2
- name: "Clone CPython 3.10"
run: git clone --branch 3.10 --depth 1 https://github.com/python/cpython.git crates/ruff/resources/test/cpython
- name: "Check stability"
run: cargo run --bin ruff_dev -- format-dev --stability-check crates/ruff/resources/test/cpython

View File

@ -10,20 +10,34 @@ jobs:
runs-on: ubuntu-latest
env:
CF_API_TOKEN_EXISTS: ${{ secrets.CF_API_TOKEN != '' }}
MKDOCS_INSIDERS_SSH_KEY_EXISTS: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY != '' }}
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
- name: "Add SSH key"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
uses: webfactory/ssh-agent@v0.8.0
with:
ssh-private-key: ${{ secrets.MKDOCS_INSIDERS_SSH_KEY }}
- name: "Install Rust toolchain"
run: rustup show
- uses: Swatinem/rust-cache@v2
- name: "Install Insiders dependencies"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: pip install -r docs/requirements-insiders.txt
- name: "Install dependencies"
run: |
pip install -r docs/requirements.txt
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: pip install -r docs/requirements.txt
- name: "Copy README File"
run: |
python scripts/transform_readme.py --target mkdocs
python scripts/generate_mkdocs.py
mkdocs build --strict
- name: "Build Insiders docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS == 'true' }}
run: mkdocs build --strict -f mkdocs.insiders.yml
- name: "Build docs"
if: ${{ env.MKDOCS_INSIDERS_SSH_KEY_EXISTS != 'true' }}
run: mkdocs build --strict -f mkdocs.generated.yml
- name: "Deploy to Cloudflare Pages"
if: ${{ env.CF_API_TOKEN_EXISTS == 'true' }}
uses: cloudflare/wrangler-action@2.0.0

View File

@ -9,7 +9,7 @@ concurrency:
env:
PACKAGE_NAME: flake8-to-ruff
CRATE_NAME: flake8_to_ruff
PYTHON_VERSION: "3.7" # to build abi3 wheels
PYTHON_VERSION: "3.11"
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always

View File

@ -2,8 +2,17 @@ name: "[ruff] Release"
on:
workflow_dispatch:
release:
types: [ published ]
inputs:
tag:
description: "The version to tag, without the leading 'v'. If omitted, will initiate a dry run (no uploads)."
type: string
sha:
description: "Optionally, the full sha of the commit to be released"
type: string
pull_request:
paths:
# When we change pyproject.toml, we want to ensure that the maturin builds still work
- pyproject.toml
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
@ -11,7 +20,7 @@ concurrency:
env:
PACKAGE_NAME: ruff
PYTHON_VERSION: "3.7" # to build abi3 wheels
PYTHON_VERSION: "3.11"
CARGO_INCREMENTAL: 0
CARGO_NET_RETRY: 10
CARGO_TERM_COLOR: always
@ -383,8 +392,39 @@ jobs:
*.tar.gz
*.sha256
release:
name: Release
validate-tag:
name: Validate tag
runs-on: ubuntu-latest
# If you don't set an input tag, it's a dry run (no uploads).
if: ${{ inputs.tag }}
steps:
- uses: actions/checkout@v3
- name: Check tag consistency
run: |
version=$(grep "version = " pyproject.toml | sed -e 's/version = "\(.*\)"/\1/g')
if [ "${{ inputs.tag }}" != "${version}" ]; then
echo "The input tag does not match the version from pyproject.toml:" >&2
echo "${{ inputs.tag }}" >&2
echo "${version}" >&2
exit 1
else
echo "Releasing ${version}"
fi
- name: Check SHA consistency
if: ${{ inputs.sha }}
run: |
git_sha=$(git rev-parse HEAD)
if [ "${{ inputs.sha }}" != "${git_sha}" ]; then
echo "The specified sha does not match the git checkout" >&2
echo "${{ inputs.sha }}" >&2
echo "${git_sha}" >&2
exit 1
else
echo "Releasing ${git_sha}"
fi
upload-release:
name: Upload to PyPI
runs-on: ubuntu-latest
needs:
- macos-universal
@ -394,25 +434,56 @@ jobs:
- linux-cross
- musllinux
- musllinux-cross
if: "startsWith(github.ref, 'refs/tags/')"
- validate-tag
# If you don't set an input tag, it's a dry run (no uploads).
if: ${{ inputs.tag }}
environment:
name: release
permissions:
# For pypi trusted publishing
id-token: write
# For GitHub release publishing
contents: write
steps:
- uses: actions/download-artifact@v3
with:
name: wheels
path: wheels
- name: "Publish to PyPi"
- name: Publish to PyPi
uses: pypa/gh-action-pypi-publish@release/v1
with:
skip-existing: true
packages-dir: wheels
verbose: true
tag-release:
name: Tag release
runs-on: ubuntu-latest
needs: upload-release
# If you don't set an input tag, it's a dry run (no uploads).
if: ${{ inputs.tag }}
permissions:
# For git tag
contents: write
steps:
- uses: actions/checkout@v3
- name: git tag
run: |
git config user.email "hey@astral.sh"
git config user.name "Ruff Release CI"
git tag -m "v${{ inputs.tag }}" "v${{ inputs.tag }}"
# If there is duplicate tag, this will fail. The publish to pypi action will have been a noop (due to skip
# existing), so we make a non-destructive exit here
git push --tags
publish-release:
name: Publish to GitHub
runs-on: ubuntu-latest
needs: tag-release
# If you don't set an input tag, it's a dry run (no uploads).
if: ${{ inputs.tag }}
permissions:
# For GitHub release publishing
contents: write
steps:
- uses: actions/download-artifact@v3
with:
name: binaries
@ -420,14 +491,16 @@ jobs:
- name: "Publish to GitHub"
uses: softprops/action-gh-release@v1
with:
draft: true
files: binaries/*
tag_name: v${{ inputs.tag }}
# After the release has been published, we update downstream repositories
# This is separate because if this fails the release is still fine, we just need to do some manual workflow triggers
update-dependents:
name: Release
name: Update dependents
runs-on: ubuntu-latest
needs: release
needs: publish-release
steps:
- name: "Update pre-commit mirror"
uses: actions/github-script@v6

18
.gitignore vendored
View File

@ -1,11 +1,25 @@
# Benchmarking cpython (CONTRIBUTING.md)
crates/ruff/resources/test/cpython
mkdocs.yml
.overrides
# generate_mkdocs.py
mkdocs.generated.yml
# check_ecosystem.py
ruff-old
github_search*.jsonl
# update_schemastore.py
schemastore
# `maturin develop` and ecosystem_all_check.sh
.venv*
# Formatter debugging (crates/ruff_python_formatter/README.md)
scratch.py
# Created by `perf` (CONTRIBUTING.md)
perf.data
perf.data.old
# Created by `flamegraph` (CONTRIBUTING.md)
flamegraph.svg
# Additional target directories that don't invalidate the main compile cache when changing linker settings,
# e.g. `CARGO_TARGET_DIR=target-maturin maturin build --release --strip` or
# `CARGO_TARGET_DIR=target-llvm-lines RUSTFLAGS="-Csymbol-mangling-version=v0" cargo llvm-lines -p ruff --lib`
/target*
###
# Rust.gitignore

View File

@ -3,8 +3,12 @@ fail_fast: true
exclude: |
(?x)^(
crates/ruff/resources/.*|
crates/ruff/src/rules/.*/snapshots/.*|
crates/ruff_cli/resources/.*|
crates/ruff_python_formatter/resources/.*|
crates/ruff_python_formatter/src/snapshots/.*
crates/ruff_python_formatter/tests/snapshots/.*|
crates/ruff_python_resolver/resources/.*|
crates/ruff_python_resolver/tests/snapshots/.*
)$
repos:
@ -38,14 +42,10 @@ repos:
entry: cargo fmt --
language: system
types: [ rust ]
- id: clippy
name: clippy
entry: cargo clippy --workspace --all-targets --all-features -- -D warnings
language: system
pass_filenames: false
pass_filenames: false # This makes it a lot faster
- id: ruff
name: ruff
entry: cargo run -p ruff_cli -- check --no-cache --force-exclude --fix --exit-non-zero-on-fix
entry: cargo run --bin ruff -- check --no-cache --force-exclude --fix --exit-non-zero-on-fix
language: system
types_or: [ python, pyi ]
require_serial: true
@ -54,12 +54,6 @@ repos:
crates/ruff/resources/.*|
crates/ruff_python_formatter/resources/.*
)$
- id: dev-generate-all
name: dev-generate-all
entry: cargo dev generate-all
language: system
pass_filenames: false
exclude: target
# Black
- repo: https://github.com/psf/black
@ -68,4 +62,4 @@ repos:
- id: black
ci:
skip: [cargo-fmt, clippy, dev-generate-all]
skip: [ cargo-fmt, dev-generate-all ]

View File

@ -1,5 +1,67 @@
# Breaking Changes
## 0.0.277
### `.ipynb_checkpoints`, `.pyenv`, `.pytest_cache`, and `.vscode` are now excluded by default ([#5513](https://github.com/astral-sh/ruff/pull/5513))
Ruff maintains a list of default exclusions, which now consists of the following patterns:
- `.bzr`
- `.direnv`
- `.eggs`
- `.git`
- `.git-rewrite`
- `.hg`
- `.ipynb_checkpoints`
- `.mypy_cache`
- `.nox`
- `.pants.d`
- `.pyenv`
- `.pytest_cache`
- `.pytype`
- `.ruff_cache`
- `.svn`
- `.tox`
- `.venv`
- `.vscode`
- `__pypackages__`
- `_build`
- `buck-out`
- `build`
- `dist`
- `node_modules`
- `venv`
Previously, the `.ipynb_checkpoints`, `.pyenv`, `.pytest_cache`, and `.vscode` directories were not
excluded by default. This change brings Ruff's default exclusions in line with other tools like
Black.
## 0.0.276
### The `keep-runtime-typing` setting has been reinstated ([#5470](https://github.com/astral-sh/ruff/pull/5470))
The `keep-runtime-typing` setting has been reinstated with revised semantics. This setting was
removed in [#4427](https://github.com/astral-sh/ruff/pull/4427), as it was equivalent to ignoring
the `UP006` and `UP007` rules via Ruff's standard `ignore` mechanism.
Taking `UP006` (rewrite `List[int]` to `list[int]`) as an example, the setting now behaves as
follows:
- On Python 3.7 and Python 3.8, setting `keep-runtime-typing = true` will cause Ruff to ignore
`UP006` violations, even if `from __future__ import annotations` is present in the file.
While such annotations are valid in Python 3.7 and Python 3.8 when combined with
`from __future__ import annotations`, they aren't supported by libraries like Pydantic and
FastAPI, which rely on runtime type checking.
- On Python 3.9 and above, the setting has no effect, as `list[int]` is a valid type annotation,
and libraries like Pydantic and FastAPI support it without issue.
In short: `keep-runtime-typing` can be used to ensure that Ruff doesn't introduce type annotations
that are not supported at runtime by the current Python version, which are unsupported by libraries
like Pydantic and FastAPI.
Note that this is not a breaking change, but is included here to complement the previous removal
of `keep-runtime-typing`.
## 0.0.268
### The `keep-runtime-typing` setting has been removed ([#4427](https://github.com/astral-sh/ruff/pull/4427))

View File

@ -12,7 +12,7 @@ Welcome! We're happy to have you here. Thank you in advance for your contributio
- [Example: Adding a new configuration option](#example-adding-a-new-configuration-option)
- [MkDocs](#mkdocs)
- [Release Process](#release-process)
- [Benchmarks](#benchmarks)
- [Benchmarks](#benchmarking-and-profiling)
## The Basics
@ -21,9 +21,11 @@ Ruff welcomes contributions in the form of Pull Requests.
For small changes (e.g., bug fixes), feel free to submit a PR.
For larger changes (e.g., new lint rules, new functionality, new configuration options), consider
creating an [**issue**](https://github.com/astral-sh/ruff/issues) outlining your proposed
change. You can also join us on [**Discord**](https://discord.gg/c9MhzV8aU5) to discuss your idea with
the community.
creating an [**issue**](https://github.com/astral-sh/ruff/issues) outlining your proposed change.
You can also join us on [**Discord**](https://discord.gg/c9MhzV8aU5) to discuss your idea with the
community. We have labeled [beginner-friendly tasks in the issue tracker](https://github.com/astral-sh/ruff/issues?q=is%3Aissue+is%3Aopen+label%3A%22good+first+issue%22)
as well as [bugs](https://github.com/astral-sh/ruff/issues?q=is%3Aissue+is%3Aopen+label%3Abug) and
[improvements that are ready for contributions](https://github.com/astral-sh/ruff/issues?q=is%3Aissue+is%3Aopen+label%3Aaccepted).
If you're looking for a place to start, we recommend implementing a new lint rule (see:
[_Adding a new lint rule_](#example-adding-a-new-lint-rule), which will allow you to learn from and
@ -31,8 +33,10 @@ pattern-match against the examples in the existing codebase. Many lint rules are
existing Python plugins, which can be used as a reference implementation.
As a concrete example: consider taking on one of the rules from the [`flake8-pyi`](https://github.com/astral-sh/ruff/issues/848)
plugin, and looking to the originating [Python source](https://github.com/PyCQA/flake8-pyi)
for guidance.
plugin, and looking to the originating [Python source](https://github.com/PyCQA/flake8-pyi) for
guidance.
If you have suggestions on how we might improve the contributing documentation, [let us know](https://github.com/astral-sh/ruff/discussions/5693)!
### Prerequisites
@ -45,6 +49,12 @@ You'll also need [Insta](https://insta.rs/docs/) to update snapshot tests:
cargo install cargo-insta
```
and pre-commit to run some validation checks:
```shell
pipx install pre-commit # or `pip install pre-commit` if you have a virtualenv
```
### Development
After cloning the repository, run Ruff locally with:
@ -57,9 +67,9 @@ Prior to opening a pull request, ensure that your code has been auto-formatted,
and that it passes both the lint and test validation checks:
```shell
cargo fmt # Auto-formatting...
cargo clippy --fix --workspace --all-targets --all-features # Linting...
cargo test # Testing...
cargo clippy --workspace --all-targets --all-features -- -D warnings # Rust linting
RUFF_UPDATE_SCHEMA=1 cargo test # Rust testing and updating ruff.schema.json
pre-commit run --all-files --show-diff-on-failure # Rust and Python formatting, Markdown and Python linting, etc.
```
These checks will run on GitHub Actions when you open your Pull Request, but running them locally
@ -72,13 +82,6 @@ after running `cargo test` like so:
cargo insta review
```
If you have `pre-commit` [installed](https://pre-commit.com/#installation) then you can use it to
assist with formatting and linting. The following command will run the `pre-commit` hooks:
```shell
pre-commit run --all-files
```
Your Pull Request will be reviewed by a maintainer, which may involve a few rounds of iteration
prior to merging.
@ -93,64 +96,89 @@ The vast majority of the code, including all lint rules, lives in the `ruff` cra
At time of writing, the repository includes the following crates:
- `crates/ruff`: library crate containing all lint rules and the core logic for running them.
- `crates/ruff_benchmark`: binary crate for running micro-benchmarks.
- `crates/ruff_cache`: library crate for caching lint results.
- `crates/ruff_cli`: binary crate containing Ruff's command-line interface.
- `crates/ruff_dev`: binary crate containing utilities used in the development of Ruff itself (e.g.,
`cargo dev generate-all`).
- `crates/ruff_diagnostics`: library crate for the lint diagnostics APIs.
- `crates/ruff_formatter`: library crate for generic code formatting logic based on an intermediate
representation.
- `crates/ruff_index`: library crate inspired by `rustc_index`.
- `crates/ruff_macros`: library crate containing macros used by Ruff.
- `crates/ruff_python`: library crate implementing Python-specific functionality (e.g., lists of
standard library modules by version).
- `crates/flake8_to_ruff`: binary crate for generating Ruff configuration from Flake8 configuration.
- `crates/ruff_python_ast`: library crate containing Python-specific AST types and utilities.
- `crates/ruff_python_formatter`: library crate containing Python-specific code formatting logic.
- `crates/ruff_python_semantic`: library crate containing Python-specific semantic analysis logic,
including Ruff's semantic model.
- `crates/ruff_python_stdlib`: library crate containing Python-specific standard library data.
- `crates/ruff_python_whitespace`: library crate containing Python-specific whitespace analysis
logic.
- `crates/ruff_rustpython`: library crate containing `RustPython`-specific utilities.
- `crates/ruff_testing_macros`: library crate containing macros used for testing Ruff.
- `crates/ruff_textwrap`: library crate to indent and dedent Python source code.
- `crates/ruff_wasm`: library crate for exposing Ruff as a WebAssembly module.
### Example: Adding a new lint rule
At a high level, the steps involved in adding a new lint rule are as follows:
1. Determine a name for the new rule as per our [rule naming convention](#rule-naming-convention).
1. Determine a name for the new rule as per our [rule naming convention](#rule-naming-convention)
(e.g., `AssertFalse`, as in, "allow `assert False`").
1. Create a file for your rule (e.g., `crates/ruff/src/rules/flake8_bugbear/rules/abstract_base_class.rs`).
1. Create a file for your rule (e.g., `crates/ruff/src/rules/flake8_bugbear/rules/assert_false.rs`).
1. In that file, define a violation struct. You can grep for `#[violation]` to see examples.
1. In that file, define a violation struct (e.g., `pub struct AssertFalse`). You can grep for
`#[violation]` to see examples.
1. Map the violation struct to a rule code in `crates/ruff/src/codes.rs` (e.g., `E402`).
1. In that file, define a function that adds the violation to the diagnostic list as appropriate
(e.g., `pub(crate) fn assert_false`) based on whatever inputs are required for the rule (e.g.,
an `ast::StmtAssert` node).
1. Define the logic for triggering the violation in `crates/ruff/src/checkers/ast/mod.rs` (for
AST-based checks), `crates/ruff/src/checkers/tokens.rs` (for token-based checks),
`crates/ruff/src/checkers/lines.rs` (for text-based checks), or
`crates/ruff/src/checkers/filesystem.rs` (for filesystem-based checks).
1. Map the violation struct to a rule code in `crates/ruff/src/codes.rs` (e.g., `B011`).
1. Add proper [testing](#rule-testing-fixtures-and-snapshots) for your rule.
1. Update the generated files (documentation and generated code).
To define the violation, start by creating a dedicated file for your rule under the appropriate
rule linter (e.g., `crates/ruff/src/rules/flake8_bugbear/rules/abstract_base_class.rs`). That file should
contain a struct defined via `#[violation]`, along with a function that creates the violation
based on any required inputs.
To trigger the violation, you'll likely want to augment the logic in `crates/ruff/src/checkers/ast.rs`,
which defines the Python AST visitor, responsible for iterating over the abstract syntax tree and
collecting diagnostics as it goes.
To trigger the violation, you'll likely want to augment the logic in `crates/ruff/src/checkers/ast.rs`
to call your new function at the appropriate time and with the appropriate inputs. The `Checker`
defined therein is a Python AST visitor, which iterates over the AST, building up a semantic model,
and calling out to lint rule analyzer functions as it goes.
If you need to inspect the AST, you can run `cargo dev print-ast` with a Python file. Grep
for the `Check::new` invocations to understand how other, similar rules are implemented.
for the `Diagnostic::new` invocations to understand how other, similar rules are implemented.
Once you're satisfied with your code, add tests for your rule. See [rule testing](#rule-testing-fixtures-and-snapshots)
for more details.
Finally, regenerate the documentation and generated code with `cargo dev generate-all`.
Finally, regenerate the documentation and other generated assets (like our JSON Schema) with:
`cargo dev generate-all`.
#### Rule naming convention
The rule name should make sense when read as "allow _rule-name_" or "allow _rule-name_ items".
Like Clippy, Ruff's rule names should make grammatical and logical sense when read as "allow
${rule}" or "allow ${rule} items", as in the context of suppression comments.
This implies that rule names:
For example, `AssertFalse` fits this convention: it flags `assert False` statements, and so a
suppression comment would be framed as "allow `assert False`".
- should state the bad thing being checked for
As such, rule names should...
- should not contain instructions on what you should use instead
(these belong in the rule documentation and the `autofix_title` for rules that have autofix)
- Highlight the pattern that is being linted against, rather than the preferred alternative.
For example, `AssertFalse` guards against `assert False` statements.
When re-implementing rules from other linters, this convention is given more importance than
- _Not_ contain instructions on how to fix the violation, which instead belong in the rule
documentation and the `autofix_title`.
- _Not_ contain a redundant prefix, like `Disallow` or `Banned`, which are already implied by the
convention.
When re-implementing rules from other linters, we prioritize adhering to this convention over
preserving the original rule name.
#### Rule testing: fixtures and snapshots
@ -232,7 +260,11 @@ To preview any changes to the documentation locally:
1. Run the development server with:
```shell
mkdocs serve
# For contributors.
mkdocs serve -f mkdocs.generated.yml
# For members of the Astral org, which has access to MkDocs Insiders via sponsorship.
mkdocs serve -f mkdocs.insiders.yml
```
The documentation should then be available locally at
@ -247,6 +279,28 @@ them to [PyPI](https://pypi.org/project/ruff/).
Ruff follows the [semver](https://semver.org/) versioning standard. However, as pre-1.0 software,
even patch releases may contain [non-backwards-compatible changes](https://semver.org/#spec-item-4).
### Creating a new release
1. Update the version with `rg 0.0.269 --files-with-matches | xargs sed -i 's/0.0.269/0.0.270/g'`
1. Update `BREAKING_CHANGES.md`
1. Create a PR with the version and `BREAKING_CHANGES.md` updated
1. Merge the PR
1. Run the release workflow with the version number (without starting `v`) as input. Make sure
main has your merged PR as last commit
1. The release workflow will do the following:
1. Build all the assets. If this fails (even though we tested in step 4), we havent tagged or
uploaded anything, you can restart after pushing a fix
1. Upload to pypi
1. Create and push the git tag (from pyproject.toml). We create the git tag only here
because we can't change it ([#4468](https://github.com/charliermarsh/ruff/issues/4468)), so
we want to make sure everything up to and including publishing to pypi worked.
1. Attach artifacts to draft GitHub release
1. Trigger downstream repositories. This can fail without causing fallout, it is possible (if
inconvenient) to trigger the downstream jobs manually
1. Create release notes in GitHub UI and promote from draft to proper release(<https://github.com/charliermarsh/ruff/releases/new>)
1. If needed, [update the schemastore](https://github.com/charliermarsh/ruff/blob/main/scripts/update_schemastore.py)
1. If needed, update ruff-lsp and ruff-vscode
## Ecosystem CI
GitHub Actions will run your changes against a number of real-world projects from GitHub and
@ -261,7 +315,15 @@ downloading the [`known-github-tomls.json`](https://github.com/akx/ruff-usage-ag
as `github_search.jsonl` and following the instructions in [scripts/Dockerfile.ecosystem](https://github.com/astral-sh/ruff/blob/main/scripts/Dockerfile.ecosystem).
Note that this check will take a while to run.
## Benchmarks
## Benchmarking and Profiling
We have several ways of benchmarking and profiling Ruff:
- Our main performance benchmark comparing Ruff with other tools on the CPython codebase
- Microbenchmarks which the linter or the formatter on individual files. There run on pull requests.
- Profiling the linter on either the microbenchmarks or entire projects
### CPython Benchmark
First, clone [CPython](https://github.com/python/cpython). It's a large and diverse Python codebase,
which makes it a good target for benchmarking.
@ -273,22 +335,18 @@ git clone --branch 3.10 https://github.com/python/cpython.git crates/ruff/resour
To benchmark the release build:
```shell
cargo build --release && hyperfine --ignore-failure --warmup 10 \
"./target/release/ruff ./crates/ruff/resources/test/cpython/ --no-cache" \
"./target/release/ruff ./crates/ruff/resources/test/cpython/"
cargo build --release && hyperfine --warmup 10 \
"./target/release/ruff ./crates/ruff/resources/test/cpython/ --no-cache -e" \
"./target/release/ruff ./crates/ruff/resources/test/cpython/ -e"
Benchmark 1: ./target/release/ruff ./crates/ruff/resources/test/cpython/ --no-cache
Time (mean ± σ): 293.8 ms ± 3.2 ms [User: 2384.6 ms, System: 90.3 ms]
Range (min … max): 289.9 ms … 301.6 ms 10 runs
Warning: Ignoring non-zero exit code.
Benchmark 2: ./target/release/ruff ./crates/ruff/resources/test/cpython/
Time (mean ± σ): 48.0 ms ± 3.1 ms [User: 65.2 ms, System: 124.7 ms]
Range (min … max): 45.0 ms … 66.7 ms 62 runs
Warning: Ignoring non-zero exit code.
Summary
'./target/release/ruff ./crates/ruff/resources/test/cpython/' ran
6.12 ± 0.41 times faster than './target/release/ruff ./crates/ruff/resources/test/cpython/ --no-cache'
@ -340,9 +398,9 @@ Summary
159.43 ± 2.48 times faster than 'pycodestyle crates/ruff/resources/test/cpython'
```
You can run `poetry install` from `./scripts` to create a working environment for the above. All
reported benchmarks were computed using the versions specified by `./scripts/pyproject.toml`
on Python 3.11.
You can run `poetry install` from `./scripts/benchmarks` to create a working environment for the
above. All reported benchmarks were computed using the versions specified by
`./scripts/benchmarks/pyproject.toml` on Python 3.11.
To benchmark Pylint, remove the following files from the CPython repository:
@ -383,3 +441,247 @@ Benchmark 1: find . -type f -name "*.py" | xargs -P 0 pyupgrade --py311-plus
Time (mean ± σ): 30.119 s ± 0.195 s [User: 28.638 s, System: 0.390 s]
Range (min … max): 29.813 s … 30.356 s 10 runs
```
### Microbenchmarks
The `ruff_benchmark` crate benchmarks the linter and the formatter on individual files.
You can run the benchmarks with
```shell
cargo benchmark
```
#### Benchmark driven Development
Ruff uses [Criterion.rs](https://bheisler.github.io/criterion.rs/book/) for benchmarks. You can use
`--save-baseline=<name>` to store an initial baseline benchmark (e.g. on `main`) and then use
`--benchmark=<name>` to compare against that benchmark. Criterion will print a message telling you
if the benchmark improved/regressed compared to that baseline.
```shell
# Run once on your "baseline" code
cargo benchmark --save-baseline=main
# Then iterate with
cargo benchmark --baseline=main
```
#### PR Summary
You can use `--save-baseline` and `critcmp` to get a pretty comparison between two recordings.
This is useful to illustrate the improvements of a PR.
```shell
# On main
cargo benchmark --save-baseline=main
# After applying your changes
cargo benchmark --save-baseline=pr
critcmp main pr
```
You must install [`critcmp`](https://github.com/BurntSushi/critcmp) for the comparison.
```bash
cargo install critcmp
```
#### Tips
- Use `cargo benchmark <filter>` to only run specific benchmarks. For example: `cargo benchmark linter/pydantic`
to only run the pydantic tests.
- Use `cargo benchmark --quiet` for a more cleaned up output (without statistical relevance)
- Use `cargo benchmark --quick` to get faster results (more prone to noise)
### Profiling Projects
You can either use the microbenchmarks from above or a project directory for benchmarking. There
are a lot of profiling tools out there,
[The Rust Performance Book](https://nnethercote.github.io/perf-book/profiling.html) lists some
examples.
#### Linux
Install `perf` and build `ruff_benchmark` with the `release-debug` profile and then run it with perf
```shell
cargo bench -p ruff_benchmark --no-run --profile=release-debug && perf record --call-graph dwarf -F 9999 cargo bench -p ruff_benchmark --profile=release-debug -- --profile-time=1
```
You can also use the `ruff_dev` launcher to run `ruff check` multiple times on a repository to
gather enough samples for a good flamegraph (change the 999, the sample rate, and the 30, the number
of checks, to your liking)
```shell
cargo build --bin ruff_dev --profile=release-debug
perf record -g -F 999 target/release-debug/ruff_dev repeat --repeat 30 --exit-zero --no-cache path/to/cpython > /dev/null
```
Then convert the recorded profile
```shell
perf script -F +pid > /tmp/test.perf
```
You can now view the converted file with [firefox profiler](https://profiler.firefox.com/), with a
more in-depth guide [here](https://profiler.firefox.com/docs/#/./guide-perf-profiling)
An alternative is to convert the perf data to `flamegraph.svg` using
[flamegraph](https://github.com/flamegraph-rs/flamegraph) (`cargo install flamegraph`):
```shell
flamegraph --perfdata perf.data
```
#### Mac
Install [`cargo-instruments`](https://crates.io/crates/cargo-instruments):
```shell
cargo install cargo-instruments
```
Then run the profiler with
```shell
cargo instruments -t time --bench linter --profile release-debug -p ruff_benchmark -- --profile-time=1
```
- `-t`: Specifies what to profile. Useful options are `time` to profile the wall time and `alloc`
for profiling the allocations.
- You may want to pass an additional filter to run a single test file
Otherwise, follow the instructions from the linux section.
## `cargo dev`
`cargo dev` is a shortcut for `cargo run --package ruff_dev --bin ruff_dev`. You can run some useful
utils with it:
- `cargo dev print-ast <file>`: Print the AST of a python file using the
[RustPython parser](https://github.com/astral-sh/RustPython-Parser/tree/main/parser) that is
mainly used in Ruff. For `if True: pass # comment`, you can see the syntax tree, the byte offsets
for start and stop of each node and also how the `:` token, the comment and whitespace are not
represented anymore:
```text
[
If(
StmtIf {
range: 0..13,
test: Constant(
ExprConstant {
range: 3..7,
value: Bool(
true,
),
kind: None,
},
),
body: [
Pass(
StmtPass {
range: 9..13,
},
),
],
orelse: [],
},
),
]
```
- `cargo dev print-tokens <file>`: Print the tokens that the AST is built upon. Again for
`if True: pass # comment`:
```text
0 If 2
3 True 7
7 Colon 8
9 Pass 13
14 Comment(
"# comment",
) 23
23 Newline 24
```
- `cargo dev print-cst <file>`: Print the CST of a python file using
[LibCST](https://github.com/Instagram/LibCST), which is used in addition to the RustPython parser
in Ruff. E.g. for `if True: pass # comment` everything including the whitespace is represented:
```text
Module {
body: [
Compound(
If(
If {
test: Name(
Name {
value: "True",
lpar: [],
rpar: [],
},
),
body: SimpleStatementSuite(
SimpleStatementSuite {
body: [
Pass(
Pass {
semicolon: None,
},
),
],
leading_whitespace: SimpleWhitespace(
" ",
),
trailing_whitespace: TrailingWhitespace {
whitespace: SimpleWhitespace(
" ",
),
comment: Some(
Comment(
"# comment",
),
),
newline: Newline(
None,
Real,
),
},
},
),
orelse: None,
leading_lines: [],
whitespace_before_test: SimpleWhitespace(
" ",
),
whitespace_after_test: SimpleWhitespace(
"",
),
is_elif: false,
},
),
),
],
header: [],
footer: [],
default_indent: " ",
default_newline: "\n",
has_trailing_newline: true,
encoding: "utf-8",
}
```
- `cargo dev generate-all`: Update `ruff.schema.json`, `docs/configuration.md` and `docs/rules`.
You can also set `RUFF_UPDATE_SCHEMA=1` to update `ruff.schema.json` during `cargo test`.
- `cargo dev generate-cli-help`, `cargo dev generate-docs` and `cargo dev generate-json-schema`:
Update just `docs/configuration.md`, `docs/rules` and `ruff.schema.json` respectively.
- `cargo dev generate-options`: Generate a markdown-compatible table of all `pyproject.toml`
options. Used for <https://beta.ruff.rs/docs/settings/>
- `cargo dev generate-rules-table`: Generate a markdown-compatible table of all rules. Used for <https://beta.ruff.rs/docs/rules/>
- `cargo dev round-trip <python file or jupyter notebook>`: Read a Python file or Jupyter Notebook,
parse it, serialize the parsed representation and write it back. Used to check how good our
representation is so that fixes don't rewrite irrelevant parts of a file.
- `cargo dev format_dev`: See ruff_python_formatter README.md

722
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -5,10 +5,11 @@ resolver = "2"
[workspace.package]
edition = "2021"
rust-version = "1.70"
homepage = "https://beta.ruff.rs/docs/"
documentation = "https://beta.ruff.rs/docs/"
homepage = "https://beta.ruff.rs/docs"
documentation = "https://beta.ruff.rs/docs"
repository = "https://github.com/astral-sh/ruff"
authors = ["Charlie Marsh <charlie.r.marsh@gmail.com>"]
license = "MIT"
[workspace.dependencies]
anyhow = { version = "1.0.69" }
@ -20,10 +21,9 @@ filetime = { version = "0.2.20" }
glob = { version = "0.3.1" }
globset = { version = "0.4.10" }
ignore = { version = "0.4.20" }
insta = { version = "1.28.0" }
insta = { version = "1.30.0" }
is-macro = { version = "0.2.2" }
itertools = { version = "0.10.5" }
libcst = { git = "https://github.com/charliermarsh/LibCST", rev = "80e4c1399f95e5beb532fdd1e209ad2dbb470438" }
log = { version = "0.4.17" }
memchr = "2.5.0"
nohash-hasher = { version = "0.2.0" }
@ -35,25 +35,35 @@ proc-macro2 = { version = "1.0.51" }
quote = { version = "1.0.23" }
regex = { version = "1.7.1" }
rustc-hash = { version = "1.1.0" }
ruff_text_size = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "0dc8fdf52d146698c5bcf0b842fddc9e398ad8db" }
rustpython-ast = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "0dc8fdf52d146698c5bcf0b842fddc9e398ad8db", default-features = false, features = ["all-nodes-with-ranges"]}
rustpython-format = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "0dc8fdf52d146698c5bcf0b842fddc9e398ad8db" }
rustpython-literal = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "0dc8fdf52d146698c5bcf0b842fddc9e398ad8db" }
rustpython-parser = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "0dc8fdf52d146698c5bcf0b842fddc9e398ad8db", default-features = false, features = ["full-lexer", "all-nodes-with-ranges"] }
schemars = { version = "0.8.12" }
serde = { version = "1.0.152", features = ["derive"] }
serde_json = { version = "1.0.93", features = ["preserve_order"] }
serde_json = { version = "1.0.93" }
shellexpand = { version = "3.0.0" }
similar = { version = "2.2.1" }
similar = { version = "2.2.1", features = ["inline"] }
smallvec = { version = "1.10.0" }
strum = { version = "0.24.1", features = ["strum_macros"] }
strum_macros = { version = "0.24.3" }
syn = { version = "2.0.15" }
test-case = { version = "3.0.0" }
thiserror = { version = "1.0.43" }
toml = { version = "0.7.2" }
wsl = { version = "0.1.0" }
# v1.0.1
libcst = { git = "https://github.com/Instagram/LibCST.git", rev = "3cacca1a1029f05707e50703b49fe3dd860aa839", default-features = false }
# Please tag the RustPython version every time you update its revision here and in fuzz/Cargo.toml
# Tagging the version ensures that older ruff versions continue to build from source even when we rebase our RustPython fork.
# Current tag: v0.0.7
ruff_text_size = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "c174bbf1f29527edd43d432326327f16f47ab9e0" }
rustpython-ast = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "c174bbf1f29527edd43d432326327f16f47ab9e0" , default-features = false, features = ["num-bigint"]}
rustpython-format = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "c174bbf1f29527edd43d432326327f16f47ab9e0", default-features = false, features = ["num-bigint"] }
rustpython-literal = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "c174bbf1f29527edd43d432326327f16f47ab9e0", default-features = false }
rustpython-parser = { git = "https://github.com/astral-sh/RustPython-Parser.git", rev = "c174bbf1f29527edd43d432326327f16f47ab9e0" , default-features = false, features = ["full-lexer", "num-bigint"] }
[profile.release]
lto = "fat"
codegen-units = 1
[profile.dev.package.insta]
opt-level = 3

51
LICENSE
View File

@ -1199,6 +1199,57 @@ are:
- flake8-django, licensed under the GPL license.
- perflint, licensed as follows:
"""
MIT License
Copyright (c) 2022 Anthony Shaw
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
- Pyright, licensed as follows:
"""
MIT License
Pyright - A static type checker for the Python language
Copyright (c) Microsoft Corporation. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE
"""
- rust-analyzer/text-size, licensed under the MIT license:
"""
Permission is hereby granted, free of charge, to any

View File

@ -14,9 +14,9 @@ An extremely fast Python linter, written in Rust.
<p align="center">
<picture align="center">
<source media="(prefers-color-scheme: dark)" srcset="https://user-images.githubusercontent.com/1309177/212613422-7faaf278-706b-4294-ad92-236ffcab3430.svg">
<source media="(prefers-color-scheme: light)" srcset="https://user-images.githubusercontent.com/1309177/212613257-5f4bca12-6d6b-4c79-9bac-51a4c6d08928.svg">
<img alt="Shows a bar chart with benchmark results." src="https://user-images.githubusercontent.com/1309177/212613257-5f4bca12-6d6b-4c79-9bac-51a4c6d08928.svg">
<source media="(prefers-color-scheme: dark)" srcset="https://user-images.githubusercontent.com/1309177/232603514-c95e9b0f-6b31-43de-9a80-9e844173fd6a.svg">
<source media="(prefers-color-scheme: light)" srcset="https://user-images.githubusercontent.com/1309177/232603516-4fb4892d-585c-4b20-b810-3db9161831e4.svg">
<img alt="Shows a bar chart with benchmark results." src="https://user-images.githubusercontent.com/1309177/232603516-4fb4892d-585c-4b20-b810-3db9161831e4.svg">
</picture>
</p>
@ -34,7 +34,8 @@ An extremely fast Python linter, written in Rust.
- ⚖️ [Near-parity](https://beta.ruff.rs/docs/faq/#how-does-ruff-compare-to-flake8) with the
built-in Flake8 rule set
- 🔌 Native re-implementations of dozens of Flake8 plugins, like flake8-bugbear
- ⌨️ First-party editor integrations for [VS Code](https://github.com/astral-sh/ruff-vscode) and [more](https://github.com/astral-sh/ruff-lsp)
- ⌨️ First-party [editor integrations](https://beta.ruff.rs/docs/editor-integrations/) for
[VS Code](https://github.com/astral-sh/ruff-vscode) and [more](https://github.com/astral-sh/ruff-lsp)
- 🌎 Monorepo-friendly, with [hierarchical and cascading configuration](https://beta.ruff.rs/docs/configuration/#pyprojecttoml-discovery)
Ruff aims to be orders of magnitude faster than alternative tools while integrating more
@ -139,7 +140,7 @@ Ruff can also be used as a [pre-commit](https://pre-commit.com) hook:
```yaml
- repo: https://github.com/astral-sh/ruff-pre-commit
# Ruff version.
rev: v0.0.272
rev: v0.0.278
hooks:
- id: ruff
```
@ -261,6 +262,7 @@ quality tools, including:
- [flake8-builtins](https://pypi.org/project/flake8-builtins/)
- [flake8-commas](https://pypi.org/project/flake8-commas/)
- [flake8-comprehensions](https://pypi.org/project/flake8-comprehensions/)
- [flake8-copyright](https://pypi.org/project/flake8-copyright/)
- [flake8-datetimez](https://pypi.org/project/flake8-datetimez/)
- [flake8-debugger](https://pypi.org/project/flake8-debugger/)
- [flake8-django](https://pypi.org/project/flake8-django/)
@ -329,9 +331,11 @@ We're grateful to the maintainers of these tools for their work, and for all
the value they've provided to the Python community.
Ruff's autoformatter is built on a fork of Rome's [`rome_formatter`](https://github.com/rome/tools/tree/main/crates/rome_formatter),
and again draws on both the APIs and implementation details of [Rome](https://github.com/rome/tools),
and again draws on both API and implementation details from [Rome](https://github.com/rome/tools),
[Prettier](https://github.com/prettier/prettier), and [Black](https://github.com/psf/black).
Ruff's import resolver is based on the import resolution algorithm from [Pyright](https://github.com/microsoft/pyright).
Ruff is also influenced by a number of tools outside the Python ecosystem, like
[Clippy](https://github.com/rust-lang/rust-clippy) and [ESLint](https://github.com/eslint/eslint).
@ -344,6 +348,7 @@ Ruff is released under the MIT license.
Ruff is used by a number of major open-source projects and companies, including:
- Amazon ([AWS SAM](https://github.com/aws/serverless-application-model))
- Anthropic ([Python SDK](https://github.com/anthropics/anthropic-sdk-python))
- [Apache Airflow](https://github.com/apache/airflow)
- AstraZeneca ([Magnus](https://github.com/AstraZeneca/magnus-core))
- Benchling ([Refac](https://github.com/benchling/refac))
@ -353,26 +358,30 @@ Ruff is used by a number of major open-source projects and companies, including:
- [DVC](https://github.com/iterative/dvc)
- [Dagger](https://github.com/dagger/dagger)
- [Dagster](https://github.com/dagster-io/dagster)
- Databricks ([MLflow](https://github.com/mlflow/mlflow))
- [FastAPI](https://github.com/tiangolo/fastapi)
- [Gradio](https://github.com/gradio-app/gradio)
- [Great Expectations](https://github.com/great-expectations/great_expectations)
- [HTTPX](https://github.com/encode/httpx)
- Hugging Face ([Transformers](https://github.com/huggingface/transformers),
[Datasets](https://github.com/huggingface/datasets),
[Diffusers](https://github.com/huggingface/diffusers))
- [Hatch](https://github.com/pypa/hatch)
- [Home Assistant](https://github.com/home-assistant/core)
- ING Bank ([popmon](https://github.com/ing-bank/popmon), [probatus](https://github.com/ing-bank/probatus))
- [Ibis](https://github.com/ibis-project/ibis)
- [Jupyter](https://github.com/jupyter-server/jupyter_server)
- [LangChain](https://github.com/hwchase17/langchain)
- [LlamaIndex](https://github.com/jerryjliu/llama_index)
- Matrix ([Synapse](https://github.com/matrix-org/synapse))
- Meltano ([Meltano CLI](https://github.com/meltano/meltano), [Singer SDK](https://github.com/meltano/sdk))
- Modern Treasury ([Python SDK](https://github.com/Modern-Treasury/modern-treasury-python-sdk))
- Mozilla ([Firefox](https://github.com/mozilla/gecko-dev))
- [MegaLinter](https://github.com/oxsecurity/megalinter)
- Meltano ([Meltano CLI](https://github.com/meltano/meltano), [Singer SDK](https://github.com/meltano/sdk))
- Microsoft ([Semantic Kernel](https://github.com/microsoft/semantic-kernel),
[ONNX Runtime](https://github.com/microsoft/onnxruntime),
[LightGBM](https://github.com/microsoft/LightGBM))
- Modern Treasury ([Python SDK](https://github.com/Modern-Treasury/modern-treasury-python-sdk))
- Mozilla ([Firefox](https://github.com/mozilla/gecko-dev))
- [Mypy](https://github.com/python/mypy)
- Netflix ([Dispatch](https://github.com/Netflix/dispatch))
- [Neon](https://github.com/neondatabase/neon)
- [ONNX](https://github.com/onnx/onnx)
@ -408,6 +417,7 @@ Ruff is used by a number of major open-source projects and companies, including:
- [featuretools](https://github.com/alteryx/featuretools)
- [meson-python](https://github.com/mesonbuild/meson-python)
- [nox](https://github.com/wntrblm/nox)
- [pip](https://github.com/pypa/pip)
### Show Your Support

View File

@ -2,9 +2,9 @@
extend-exclude = ["resources", "snapshots"]
[default.extend-words]
trivias = "trivias"
hel = "hel"
whos = "whos"
spawnve = "spawnve"
ned = "ned"
poit = "poit"
BA = "BA" # acronym for "Bad Allowed", used in testing.

BIN
assets/png/Astral.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 3.8 KiB

View File

@ -1,8 +1,16 @@
[package]
name = "flake8-to-ruff"
version = "0.0.272"
version = "0.0.278"
description = """
Convert Flake8 configuration files to Ruff configuration files.
"""
authors = { workspace = true }
edition = { workspace = true }
rust-version = { workspace = true }
homepage = { workspace = true }
documentation = { workspace = true }
repository = { workspace = true }
license = { workspace = true }
[dependencies]
ruff = { path = "../ruff", default-features = false }

View File

@ -1,14 +1,15 @@
[package]
name = "ruff"
version = "0.0.272"
authors.workspace = true
edition.workspace = true
rust-version.workspace = true
documentation.workspace = true
homepage.workspace = true
repository.workspace = true
version = "0.0.278"
publish = false
authors = { workspace = true }
edition = { workspace = true }
rust-version = { workspace = true }
homepage = { workspace = true }
documentation = { workspace = true }
repository = { workspace = true }
license = { workspace = true }
readme = "README.md"
license = "MIT"
[lib]
name = "ruff"
@ -16,6 +17,7 @@ name = "ruff"
[dependencies]
ruff_cache = { path = "../ruff_cache" }
ruff_diagnostics = { path = "../ruff_diagnostics", features = ["serde"] }
ruff_index = { path = "../ruff_index" }
ruff_macros = { path = "../ruff_macros" }
ruff_python_whitespace = { path = "../ruff_python_whitespace" }
ruff_python_ast = { path = "../ruff_python_ast", features = ["serde"] }
@ -41,6 +43,7 @@ is-macro = { workspace = true }
itertools = { workspace = true }
libcst = { workspace = true }
log = { workspace = true }
memchr = { workspace = true }
natord = { version = "1.0.9" }
nohash-hasher = { workspace = true }
num-bigint = { workspace = true }
@ -64,16 +67,18 @@ schemars = { workspace = true, optional = true }
semver = { version = "1.0.16" }
serde = { workspace = true }
serde_json = { workspace = true }
similar = { workspace = true, features = ["inline"] }
serde_with = { version = "3.0.0" }
similar = { workspace = true }
shellexpand = { workspace = true }
smallvec = { workspace = true }
strum = { workspace = true }
strum_macros = { workspace = true }
thiserror = { version = "1.0.38" }
thiserror = { version = "1.0.43" }
toml = { workspace = true }
typed-arena = { version = "2.0.2" }
unicode-width = { version = "0.1.10" }
unicode_names2 = { version = "0.6.0", git = "https://github.com/youknowone/unicode_names2.git", rev = "4ce16aa85cbcdd9cc830410f1a72ef9a235f2fde" }
wsl = { version = "0.1.0" }
[dev-dependencies]
insta = { workspace = true }
@ -85,4 +90,5 @@ colored = { workspace = true, features = ["no-color"] }
[features]
default = []
schemars = ["dep:schemars"]
jupyter_notebook = []
# Enables the UnreachableCode rule
unreachable-code = []

View File

@ -0,0 +1,11 @@
def func():
assert True
def func():
assert False
def func():
assert True, "oops"
def func():
assert False, "oops"

View File

@ -0,0 +1,41 @@
def func():
async for i in range(5):
print(i)
def func():
async for i in range(20):
print(i)
else:
return 0
def func():
async for i in range(10):
if i == 5:
return 1
return 0
def func():
async for i in range(111):
if i == 5:
return 1
else:
return 0
return 2
def func():
async for i in range(12):
continue
def func():
async for i in range(1110):
if True:
continue
def func():
async for i in range(13):
break
def func():
async for i in range(1110):
if True:
break

View File

@ -0,0 +1,41 @@
def func():
for i in range(5):
print(i)
def func():
for i in range(20):
print(i)
else:
return 0
def func():
for i in range(10):
if i == 5:
return 1
return 0
def func():
for i in range(111):
if i == 5:
return 1
else:
return 0
return 2
def func():
for i in range(12):
continue
def func():
for i in range(1110):
if True:
continue
def func():
for i in range(13):
break
def func():
for i in range(1110):
if True:
break

View File

@ -0,0 +1,108 @@
def func():
if False:
return 0
return 1
def func():
if True:
return 1
return 0
def func():
if False:
return 0
else:
return 1
def func():
if True:
return 1
else:
return 0
def func():
if False:
return 0
else:
return 1
return "unreachable"
def func():
if True:
return 1
else:
return 0
return "unreachable"
def func():
if True:
if True:
return 1
return 2
else:
return 3
return "unreachable2"
def func():
if False:
return 0
def func():
if True:
return 1
def func():
if True:
return 1
elif False:
return 2
else:
return 0
def func():
if False:
return 1
elif True:
return 2
else:
return 0
def func():
if True:
if False:
return 0
elif True:
return 1
else:
return 2
return 3
elif True:
return 4
else:
return 5
return 6
def func():
if False:
return "unreached"
elif False:
return "also unreached"
return "reached"
# Test case found in the Bokeh repository that trigger a false positive.
def func(self, obj: BytesRep) -> bytes:
data = obj["data"]
if isinstance(data, str):
return base64.b64decode(data)
elif isinstance(data, Buffer):
buffer = data
else:
id = data["id"]
if id in self._buffers:
buffer = self._buffers[id]
else:
self.error(f"can't resolve buffer '{id}'")
return buffer.data

View File

@ -0,0 +1,131 @@
def func(status):
match status:
case _:
return 0
return "unreachable"
def func(status):
match status:
case 1:
return 1
return 0
def func(status):
match status:
case 1:
return 1
case _:
return 0
def func(status):
match status:
case 1 | 2 | 3:
return 5
return 6
def func(status):
match status:
case 1 | 2 | 3:
return 5
case _:
return 10
return 0
def func(status):
match status:
case 0:
return 0
case 1:
return 1
case 1:
return "1 again"
case _:
return 3
def func(status):
i = 0
match status, i:
case _, _:
return 0
def func(status):
i = 0
match status, i:
case _, 0:
return 0
case _, 2:
return 0
def func(point):
match point:
case (0, 0):
print("Origin")
case _:
raise ValueError("oops")
def func(point):
match point:
case (0, 0):
print("Origin")
case (0, y):
print(f"Y={y}")
case (x, 0):
print(f"X={x}")
case (x, y):
print(f"X={x}, Y={y}")
case _:
raise ValueError("Not a point")
def where_is(point):
class Point:
x: int
y: int
match point:
case Point(x=0, y=0):
print("Origin")
case Point(x=0, y=y):
print(f"Y={y}")
case Point(x=x, y=0):
print(f"X={x}")
case Point():
print("Somewhere else")
case _:
print("Not a point")
def func(points):
match points:
case []:
print("No points")
case [Point(0, 0)]:
print("The origin")
case [Point(x, y)]:
print(f"Single point {x}, {y}")
case [Point(0, y1), Point(0, y2)]:
print(f"Two on the Y axis at {y1}, {y2}")
case _:
print("Something else")
def func(point):
match point:
case Point(x, y) if x == y:
print(f"Y=X at {x}")
case Point(x, y):
print(f"Not on the diagonal")
def func():
from enum import Enum
class Color(Enum):
RED = 'red'
GREEN = 'green'
BLUE = 'blue'
color = Color(input("Enter your choice of 'red', 'blue' or 'green': "))
match color:
case Color.RED:
print("I see red!")
case Color.GREEN:
print("Grass is green")
case Color.BLUE:
print("I'm feeling the blues :(")

View File

@ -0,0 +1,5 @@
def func():
raise Exception
def func():
raise "a glass!"

View File

@ -0,0 +1,23 @@
def func():
pass
def func():
pass
def func():
return
def func():
return 1
def func():
return 1
return "unreachable"
def func():
i = 0
def func():
i = 0
i += 2
return i

View File

@ -0,0 +1,41 @@
def func():
try:
...
except Exception:
...
except OtherException as e:
...
else:
...
finally:
...
def func():
try:
...
except Exception:
...
def func():
try:
...
except Exception:
...
except OtherException as e:
...
def func():
try:
...
except Exception:
...
except OtherException as e:
...
else:
...
def func():
try:
...
finally:
...

View File

@ -0,0 +1,121 @@
def func():
while False:
return "unreachable"
return 1
def func():
while False:
return "unreachable"
else:
return 1
def func():
while False:
return "unreachable"
else:
return 1
return "also unreachable"
def func():
while True:
return 1
return "unreachable"
def func():
while True:
return 1
else:
return "unreachable"
def func():
while True:
return 1
else:
return "unreachable"
return "also unreachable"
def func():
i = 0
while False:
i += 1
return i
def func():
i = 0
while True:
i += 1
return i
def func():
while True:
pass
return 1
def func():
i = 0
while True:
if True:
print("ok")
i += 1
return i
def func():
i = 0
while True:
if False:
print("ok")
i += 1
return i
def func():
while True:
if True:
return 1
return 0
def func():
while True:
continue
def func():
while False:
continue
def func():
while True:
break
def func():
while False:
break
def func():
while True:
if True:
continue
def func():
while True:
if True:
break
'''
TODO: because `try` statements aren't handled this triggers a false positive as
the last statement is reached, but the rules thinks it isn't (it doesn't
see/process the break statement).
# Test case found in the Bokeh repository that trigger a false positive.
def bokeh2(self, host: str = DEFAULT_HOST, port: int = DEFAULT_PORT) -> None:
self.stop_serving = False
while True:
try:
self.server = HTTPServer((host, port), HtmlOnlyHandler)
self.host = host
self.port = port
break
except OSError:
log.debug(f"port {port} is in use, trying to next one")
port += 1
self.thread = threading.Thread(target=self._run_web_server)
'''

View File

@ -1,4 +1,4 @@
from typing import Any, Type
from typing import Annotated, Any, Optional, Type, Union
from typing_extensions import override
# Error
@ -95,27 +95,27 @@ class Foo:
def foo(self: "Foo", a: int, *params: str, **options: Any) -> int:
pass
# ANN401
# OK
@override
def foo(self: "Foo", a: Any, *params: str, **options: str) -> int:
pass
# ANN401
# OK
@override
def foo(self: "Foo", a: int, *params: str, **options: str) -> Any:
pass
# ANN401
# OK
@override
def foo(self: "Foo", a: int, *params: Any, **options: Any) -> int:
pass
# ANN401
# OK
@override
def foo(self: "Foo", a: int, *params: Any, **options: str) -> int:
pass
# ANN401
# OK
@override
def foo(self: "Foo", a: int, *params: str, **options: Any) -> int:
pass
@ -137,3 +137,17 @@ class Foo:
# OK
def f(*args: *tuple[int]) -> None: ...
def f(a: object) -> None: ...
def f(a: str | bytes) -> None: ...
def f(a: Union[str, bytes]) -> None: ...
def f(a: Optional[str]) -> None: ...
def f(a: Annotated[str, ...]) -> None: ...
def f(a: "Union[str, bytes]") -> None: ...
# ANN401
def f(a: Any | int) -> None: ...
def f(a: int | Any) -> None: ...
def f(a: Union[str, bytes, Any]) -> None: ...
def f(a: Optional[Any]) -> None: ...
def f(a: Annotated[Any, ...]) -> None: ...
def f(a: "Union[str, bytes, Any]") -> None: ...

View File

@ -0,0 +1,12 @@
import os
print(eval("1+1")) # S307
print(eval("os.getcwd()")) # S307
class Class(object):
def eval(self):
print("hi")
def foo(self):
self.eval() # OK

View File

@ -23,6 +23,10 @@ class Foobar(unittest.TestCase):
with self.assertRaises(Exception):
raise Exception("Evil I say!")
def also_evil_raises(self) -> None:
with self.assertRaises(BaseException):
raise Exception("Evil I say!")
def context_manager_raises(self) -> None:
with self.assertRaises(Exception) as ex:
raise Exception("Context manager is good")
@ -41,6 +45,9 @@ def test_pytest_raises():
with pytest.raises(Exception):
raise ValueError("Hello")
with pytest.raises(Exception), pytest.raises(ValueError):
raise ValueError("Hello")
with pytest.raises(Exception, "hello"):
raise ValueError("This is fine")

View File

@ -0,0 +1,27 @@
import re
from re import sub
# B034
re.sub("a", "b", "aaa", re.IGNORECASE)
re.sub("a", "b", "aaa", 5)
re.sub("a", "b", "aaa", 5, re.IGNORECASE)
re.subn("a", "b", "aaa", re.IGNORECASE)
re.subn("a", "b", "aaa", 5)
re.subn("a", "b", "aaa", 5, re.IGNORECASE)
re.split(" ", "a a a a", re.I)
re.split(" ", "a a a a", 2)
re.split(" ", "a a a a", 2, re.I)
sub("a", "b", "aaa", re.IGNORECASE)
# OK
re.sub("a", "b", "aaa")
re.sub("a", "b", "aaa", flags=re.IGNORECASE)
re.sub("a", "b", "aaa", count=5)
re.sub("a", "b", "aaa", count=5, flags=re.IGNORECASE)
re.subn("a", "b", "aaa")
re.subn("a", "b", "aaa", flags=re.IGNORECASE)
re.subn("a", "b", "aaa", count=5)
re.subn("a", "b", "aaa", count=5, flags=re.IGNORECASE)
re.split(" ", "a a a a", flags=re.I)
re.split(" ", "a a a a", maxsplit=2)
re.split(" ", "a a a a", maxsplit=2, flags=re.I)

View File

@ -1,6 +1,6 @@
class MyClass:
ImportError = 4
id = 5
id: int
dir = "/"
def __init__(self):
@ -10,3 +10,10 @@ class MyClass:
def str(self):
pass
from typing import TypedDict
class MyClass(TypedDict):
id: int

View File

@ -25,10 +25,15 @@ map(lambda x=2, y=1: x + y, nums, nums)
set(map(lambda x, y: x, nums, nums))
def myfunc(arg1: int, arg2: int = 4):
def func(arg1: int, arg2: int = 4):
return 2 * arg1 + arg2
list(map(myfunc, nums))
# Non-error: `func` is not a lambda.
list(map(func, nums))
[x for x in nums]
# False positive: need to preserve the late-binding of `x` in the inner lambda.
map(lambda x: lambda: x, range(4))
# Error: the `x` is overridden by the inner lambda.
map(lambda x: lambda x: x, range(4))

View File

@ -19,3 +19,6 @@ from datetime import datetime
# no args unqualified
datetime(2000, 1, 1, 0, 0, 0)
# uses `astimezone` method
datetime(2000, 1, 1, 0, 0, 0).astimezone()

View File

@ -7,3 +7,6 @@ from datetime import datetime
# unqualified
datetime.today()
# uses `astimezone` method
datetime.today().astimezone()

View File

@ -7,3 +7,6 @@ from datetime import datetime
# unqualified
datetime.utcnow()
# uses `astimezone` method
datetime.utcnow().astimezone()

View File

@ -7,3 +7,6 @@ from datetime import datetime
# unqualified
datetime.utcfromtimestamp(1234)
# uses `astimezone` method
datetime.utcfromtimestamp(1234).astimezone()

View File

@ -16,3 +16,6 @@ from datetime import datetime
# no args unqualified
datetime.now()
# uses `astimezone` method
datetime.now().astimezone()

View File

@ -16,3 +16,6 @@ from datetime import datetime
# no args unqualified
datetime.fromtimestamp(1234)
# uses `astimezone` method
datetime.fromtimestamp(1234).astimezone()

View File

@ -111,3 +111,19 @@ class PerfectlyFine(models.Model):
@property
def random_property(self):
return "%s" % self
class MultipleConsecutiveFields(models.Model):
"""Model that contains multiple out-of-order field definitions in a row."""
class Meta:
verbose_name = "test"
first_name = models.CharField(max_length=32)
last_name = models.CharField(max_length=32)
def get_absolute_url(self):
pass
middle_name = models.CharField(max_length=32)

View File

@ -34,3 +34,19 @@ _ = (
b"abc"
b"def"
)
_ = """a""" """b"""
_ = """a
b""" """c
d"""
_ = f"""a""" f"""b"""
_ = f"a" "b"
_ = """a""" "b"
_ = 'a' "b"
_ = rf"a" rf"b"

View File

@ -5,15 +5,18 @@ import matplotlib.pyplot # unconventional
import numpy # unconventional
import pandas # unconventional
import seaborn # unconventional
import tkinter # unconventional
import altair as altr # unconventional
import matplotlib.pyplot as plot # unconventional
import numpy as nmp # unconventional
import pandas as pdas # unconventional
import seaborn as sbrn # unconventional
import tkinter as tkr # unconventional
import altair as alt # conventional
import matplotlib.pyplot as plt # conventional
import numpy as np # conventional
import pandas as pd # conventional
import seaborn as sns # conventional
import tkinter as tk # conventional

View File

@ -0,0 +1,6 @@
import sys
if sys.version == 'Python 2.7.10': ... # Y002 If test must be a simple comparison against sys.platform or sys.version_info
if 'linux' == sys.platform: ... # Y002 If test must be a simple comparison against sys.platform or sys.version_info
if hasattr(sys, 'maxint'): ... # Y002 If test must be a simple comparison against sys.platform or sys.version_info
if sys.maxsize == 42: ... # Y002 If test must be a simple comparison against sys.platform or sys.version_info

View File

@ -0,0 +1,6 @@
import sys
if sys.version == 'Python 2.7.10': ... # Y002 If test must be a simple comparison against sys.platform or sys.version_info
if 'linux' == sys.platform: ... # Y002 If test must be a simple comparison against sys.platform or sys.version_info
if hasattr(sys, 'maxint'): ... # Y002 If test must be a simple comparison against sys.platform or sys.version_info
if sys.maxsize == 42: ... # Y002 If test must be a simple comparison against sys.platform or sys.version_info

View File

@ -0,0 +1,31 @@
import sys
if sys.version_info[0] == 2: ...
if sys.version_info[0] == True: ... # Y003 Unrecognized sys.version_info check # E712 comparison to True should be 'if cond is True:' or 'if cond:'
if sys.version_info[0.0] == 2: ... # Y003 Unrecognized sys.version_info check
if sys.version_info[False] == 2: ... # Y003 Unrecognized sys.version_info check
if sys.version_info[0j] == 2: ... # Y003 Unrecognized sys.version_info check
if sys.version_info[0] == (2, 7): ... # Y003 Unrecognized sys.version_info check
if sys.version_info[0] == '2': ... # Y003 Unrecognized sys.version_info check
if sys.version_info[1:] >= (7, 11): ... # Y003 Unrecognized sys.version_info check
if sys.version_info[::-1] < (11, 7): ... # Y003 Unrecognized sys.version_info check
if sys.version_info[:3] >= (2, 7): ... # Y003 Unrecognized sys.version_info check
if sys.version_info[:True] >= (2, 7): ... # Y003 Unrecognized sys.version_info check
if sys.version_info[:1] == (2,): ...
if sys.version_info[:1] == (True,): ... # Y003 Unrecognized sys.version_info check
if sys.version_info[:1] == (2, 7): ... # Y005 Version comparison must be against a length-1 tuple
if sys.version_info[:2] == (2, 7): ...
if sys.version_info[:2] == (2,): ... # Y005 Version comparison must be against a length-2 tuple
if sys.version_info[:2] == "lol": ... # Y003 Unrecognized sys.version_info check
if sys.version_info[:2.0] >= (3, 9): ... # Y003 Unrecognized sys.version_info check
if sys.version_info[:2j] >= (3, 9): ... # Y003 Unrecognized sys.version_info check
if sys.version_info[:, :] >= (2, 7): ... # Y003 Unrecognized sys.version_info check
if sys.version_info < [3, 0]: ... # Y003 Unrecognized sys.version_info check
if sys.version_info < ('3', '0'): ... # Y003 Unrecognized sys.version_info check
if sys.version_info >= (3, 4, 3): ... # Y004 Version comparison must use only major and minor version
if sys.version_info == (3, 4): ... # Y006 Use only < and >= for version comparisons
if sys.version_info > (3, 0): ... # Y006 Use only < and >= for version comparisons
if sys.version_info <= (3, 0): ... # Y006 Use only < and >= for version comparisons
if sys.version_info < (3, 5): ...
if sys.version_info >= (3, 5): ...
if (2, 7) <= sys.version_info < (3, 5): ... # Y002 If test must be a simple comparison against sys.platform or sys.version_info

View File

@ -0,0 +1,31 @@
import sys
if sys.version_info[0] == 2: ...
if sys.version_info[0] == True: ... # Y003 Unrecognized sys.version_info check # E712 comparison to True should be 'if cond is True:' or 'if cond:'
if sys.version_info[0.0] == 2: ... # Y003 Unrecognized sys.version_info check
if sys.version_info[False] == 2: ... # Y003 Unrecognized sys.version_info check
if sys.version_info[0j] == 2: ... # Y003 Unrecognized sys.version_info check
if sys.version_info[0] == (2, 7): ... # Y003 Unrecognized sys.version_info check
if sys.version_info[0] == '2': ... # Y003 Unrecognized sys.version_info check
if sys.version_info[1:] >= (7, 11): ... # Y003 Unrecognized sys.version_info check
if sys.version_info[::-1] < (11, 7): ... # Y003 Unrecognized sys.version_info check
if sys.version_info[:3] >= (2, 7): ... # Y003 Unrecognized sys.version_info check
if sys.version_info[:True] >= (2, 7): ... # Y003 Unrecognized sys.version_info check
if sys.version_info[:1] == (2,): ...
if sys.version_info[:1] == (True,): ... # Y003 Unrecognized sys.version_info check
if sys.version_info[:1] == (2, 7): ... # Y005 Version comparison must be against a length-1 tuple
if sys.version_info[:2] == (2, 7): ...
if sys.version_info[:2] == (2,): ... # Y005 Version comparison must be against a length-2 tuple
if sys.version_info[:2] == "lol": ... # Y003 Unrecognized sys.version_info check
if sys.version_info[:2.0] >= (3, 9): ... # Y003 Unrecognized sys.version_info check
if sys.version_info[:2j] >= (3, 9): ... # Y003 Unrecognized sys.version_info check
if sys.version_info[:, :] >= (2, 7): ... # Y003 Unrecognized sys.version_info check
if sys.version_info < [3, 0]: ... # Y003 Unrecognized sys.version_info check
if sys.version_info < ('3', '0'): ... # Y003 Unrecognized sys.version_info check
if sys.version_info >= (3, 4, 3): ... # Y004 Version comparison must use only major and minor version
if sys.version_info == (3, 4): ... # Y006 Use only < and >= for version comparisons
if sys.version_info > (3, 0): ... # Y006 Use only < and >= for version comparisons
if sys.version_info <= (3, 0): ... # Y006 Use only < and >= for version comparisons
if sys.version_info < (3, 5): ...
if sys.version_info >= (3, 5): ...
if (2, 7) <= sys.version_info < (3, 5): ... # Y002 If test must be a simple comparison against sys.platform or sys.version_info

View File

@ -0,0 +1,15 @@
import sys
from sys import version_info
if sys.version_info >= (3, 4, 3): ... # PYI004
if sys.version_info < (3, 4, 3): ... # PYI004
if sys.version_info == (3, 4, 3): ... # PYI004
if sys.version_info != (3, 4, 3): ... # PYI004
if sys.version_info[0] == 2: ...
if version_info[0] == 2: ...
if sys.version_info < (3, 5): ...
if version_info >= (3, 5): ...
if sys.version_info[:2] == (2, 7): ...
if sys.version_info[:1] == (2,): ...
if sys.platform == 'linux': ...

View File

@ -0,0 +1,15 @@
import sys
from sys import version_info
if sys.version_info >= (3, 4, 3): ... # PYI004
if sys.version_info < (3, 4, 3): ... # PYI004
if sys.version_info == (3, 4, 3): ... # PYI004
if sys.version_info != (3, 4, 3): ... # PYI004
if sys.version_info[0] == 2: ...
if version_info[0] == 2: ...
if sys.version_info < (3, 5): ...
if version_info >= (3, 5): ...
if sys.version_info[:2] == (2, 7): ...
if sys.version_info[:1] == (2,): ...
if sys.platform == 'linux': ...

View File

@ -0,0 +1,14 @@
import sys
from sys import platform, version_info
if sys.version_info[:1] == (2, 7): ... # Y005
if sys.version_info[:2] == (2,): ... # Y005
if sys.version_info[0] == 2: ...
if version_info[0] == 2: ...
if sys.version_info < (3, 5): ...
if version_info >= (3, 5): ...
if sys.version_info[:2] == (2, 7): ...
if sys.version_info[:1] == (2,): ...
if platform == 'linux': ...

View File

@ -0,0 +1,14 @@
import sys
from sys import platform, version_info
if sys.version_info[:1] == (2, 7): ... # Y005
if sys.version_info[:2] == (2,): ... # Y005
if sys.version_info[0] == 2: ...
if version_info[0] == 2: ...
if sys.version_info < (3, 5): ...
if version_info >= (3, 5): ...
if sys.version_info[:2] == (2, 7): ...
if sys.version_info[:1] == (2,): ...
if platform == 'linux': ...

View File

@ -91,3 +91,4 @@ field27 = list[str]
field28 = builtins.str
field29 = str
field30 = str | bytes | None
field31: typing.Final = field30

View File

@ -98,3 +98,4 @@ field27 = list[str]
field28 = builtins.str
field29 = str
field30 = str | bytes | None
field31: typing.Final = field30

View File

@ -1,3 +1,5 @@
import typing
# Shouldn't affect non-union field types.
field1: str
@ -30,3 +32,42 @@ field10: (str | int) | str # PYI016: Duplicate union member `str`
# Should emit for nested unions.
field11: dict[int | int, str]
# Should emit for unions with more than two cases
field12: int | int | int # Error
field13: int | int | int | int # Error
# Should emit for unions with more than two cases, even if not directly adjacent
field14: int | int | str | int # Error
# Should emit for duplicate literal types; also covered by PYI030
field15: typing.Literal[1] | typing.Literal[1] # Error
# Shouldn't emit if in new parent type
field16: int | dict[int, str] # OK
# Shouldn't emit if not in a union parent
field17: dict[int, int] # OK
# Should emit in cases with newlines
field18: typing.Union[
set[
int # foo
],
set[
int # bar
],
] # Error, newline and comment will not be emitted in message
# Should emit in cases with `typing.Union` instead of `|`
field19: typing.Union[int, int] # Error
# Should emit in cases with nested `typing.Union`
field20: typing.Union[int, typing.Union[int, str]] # Error
# Should emit in cases with mixed `typing.Union` and `|`
field21: typing.Union[int, int | str] # Error
# Should emit only once in cases with multiple nested `typing.Union`
field22: typing.Union[int, typing.Union[int, typing.Union[int, int]]] # Error

View File

@ -1,19 +1,19 @@
def f():
from collections.abc import Set as AbstractSet # Ok
from collections.abc import Set # Ok
def f():
from collections.abc import Container, Sized, Set as AbstractSet, ValuesView # Ok
from collections.abc import (
Container,
Sized,
Set, # Ok
ValuesView
)
def f():
from collections.abc import Set # PYI025
from collections.abc import (
Container,
Sized,
Set as AbstractSet, # Ok
ValuesView
)
def f():
from collections.abc import Container, Sized, Set, ValuesView # PYI025
GLOBAL: Set[int] = set()
class Class:
member: Set[int]

View File

@ -1,19 +1,50 @@
def f():
from collections.abc import Set as AbstractSet # Ok
def f():
from collections.abc import Container, Sized, Set as AbstractSet, ValuesView # Ok
def f():
from collections.abc import Set # PYI025
def f():
from collections.abc import Container, Sized, Set, ValuesView # PYI025
from collections.abc import (
Container,
Sized,
Set, # PYI025
ValuesView
)
def f():
"""Test: local symbol renaming."""
if True:
from collections.abc import Set
else:
Set = 1
from collections.abc import (
Container,
Sized,
Set as AbstractSet,
ValuesView # Ok
)
x: Set = set()
x: Set
del Set
def f():
print(Set)
def Set():
pass
print(Set)
from collections.abc import Set
def f():
"""Test: global symbol renaming."""
global Set
Set = 1
print(Set)
def f():
"""Test: nonlocal symbol renaming."""
from collections.abc import Set
def g():
nonlocal Set
Set = 1
print(Set)

View File

@ -0,0 +1,24 @@
from typing import Literal
# Shouldn't emit for any cases in the non-stub file for compatibility with flake8-pyi.
# Note that this rule could be applied here in the future.
field1: Literal[1] # OK
field2: Literal[1] | Literal[2] # OK
def func1(arg1: Literal[1] | Literal[2]): # OK
print(arg1)
def func2() -> Literal[1] | Literal[2]: # OK
return "my Literal[1]ing"
field3: Literal[1] | Literal[2] | str # OK
field4: str | Literal[1] | Literal[2] # OK
field5: Literal[1] | str | Literal[2] # OK
field6: Literal[1] | bool | Literal[2] | str # OK
field7 = Literal[1] | Literal[2] # OK
field8: Literal[1] | (Literal[2] | str) # OK
field9: Literal[1] | (Literal[2] | str) # OK
field10: (Literal[1] | str) | Literal[2] # OK
field11: dict[Literal[1] | Literal[2], str] # OK

View File

@ -0,0 +1,86 @@
import typing
import typing_extensions
from typing import Literal
# Shouldn't affect non-union field types.
field1: Literal[1] # OK
# Should emit for duplicate field types.
field2: Literal[1] | Literal[2] # Error
# Should emit for union types in arguments.
def func1(arg1: Literal[1] | Literal[2]): # Error
print(arg1)
# Should emit for unions in return types.
def func2() -> Literal[1] | Literal[2]: # Error
return "my Literal[1]ing"
# Should emit in longer unions, even if not directly adjacent.
field3: Literal[1] | Literal[2] | str # Error
field4: str | Literal[1] | Literal[2] # Error
field5: Literal[1] | str | Literal[2] # Error
field6: Literal[1] | bool | Literal[2] | str # Error
# Should emit for non-type unions.
field7 = Literal[1] | Literal[2] # Error
# Should emit for parenthesized unions.
field8: Literal[1] | (Literal[2] | str) # Error
# Should handle user parentheses when fixing.
field9: Literal[1] | (Literal[2] | str) # Error
field10: (Literal[1] | str) | Literal[2] # Error
# Should emit for union in generic parent type.
field11: dict[Literal[1] | Literal[2], str] # Error
# Should emit for unions with more than two cases
field12: Literal[1] | Literal[2] | Literal[3] # Error
field13: Literal[1] | Literal[2] | Literal[3] | Literal[4] # Error
# Should emit for unions with more than two cases, even if not directly adjacent
field14: Literal[1] | Literal[2] | str | Literal[3] # Error
# Should emit for unions with mixed literal internal types
field15: Literal[1] | Literal["foo"] | Literal[True] # Error
# Shouldn't emit for duplicate field types with same value; covered by Y016
field16: Literal[1] | Literal[1] # OK
# Shouldn't emit if in new parent type
field17: Literal[1] | dict[Literal[2], str] # OK
# Shouldn't emit if not in a union parent
field18: dict[Literal[1], Literal[2]] # OK
# Should respect name of literal type used
field19: typing.Literal[1] | typing.Literal[2] # Error
# Should emit in cases with newlines
field20: typing.Union[
Literal[
1 # test
],
Literal[2],
] # Error, newline and comment will not be emitted in message
# Should handle multiple unions with multiple members
field21: Literal[1, 2] | Literal[3, 4] # Error
# Should emit in cases with `typing.Union` instead of `|`
field22: typing.Union[Literal[1], Literal[2]] # Error
# Should emit in cases with `typing_extensions.Literal`
field23: typing_extensions.Literal[1] | typing_extensions.Literal[2] # Error
# Should emit in cases with nested `typing.Union`
field24: typing.Union[Literal[1], typing.Union[Literal[2], str]] # Error
# Should emit in cases with mixed `typing.Union` and `|`
field25: typing.Union[Literal[1], Literal[2] | str] # Error
# Should emit only once in cases with multiple nested `typing.Union`
field24: typing.Union[Literal[1], typing.Union[Literal[2], typing.Union[Literal[3], Literal[4]]]] # Error

View File

@ -0,0 +1,75 @@
import builtins
import types
import typing
from collections.abc import Awaitable
from types import TracebackType
from typing import Any, Type
import _typeshed
import typing_extensions
from _typeshed import Unused
class GoodOne:
def __exit__(self, *args: object) -> None: ...
async def __aexit__(self, *args) -> str: ...
class GoodTwo:
def __exit__(self, typ: type[builtins.BaseException] | None, *args: builtins.object) -> bool | None: ...
async def __aexit__(self, /, typ: Type[BaseException] | None, *args: object, **kwargs) -> bool: ...
class GoodThree:
def __exit__(self, __typ: typing.Type[BaseException] | None, exc: BaseException | None, *args: object) -> None: ...
async def __aexit__(self, typ: typing_extensions.Type[BaseException] | None, __exc: BaseException | None, *args: object) -> None: ...
class GoodFour:
def __exit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None) -> None: ...
async def __aexit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: types.TracebackType | None, *args: list[None]) -> None: ...
class GoodFive:
def __exit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None, weird_extra_arg: int = ..., *args: int, **kwargs: str) -> None: ...
async def __aexit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None) -> Awaitable[None]: ...
class GoodSix:
def __exit__(self, typ: object, exc: builtins.object, tb: object) -> None: ...
async def __aexit__(self, typ: object, exc: object, tb: builtins.object) -> None: ...
class GoodSeven:
def __exit__(self, *args: Unused) -> bool: ...
async def __aexit__(self, typ: Type[BaseException] | None, *args: _typeshed.Unused) -> Awaitable[None]: ...
class GoodEight:
def __exit__(self, __typ: typing.Type[BaseException] | None, exc: BaseException | None, *args: _typeshed.Unused) -> bool: ...
async def __aexit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None, weird_extra_arg: int = ..., *args: Unused, **kwargs: Unused) -> Awaitable[None]: ...
class GoodNine:
def __exit__(self, __typ: typing.Union[typing.Type[BaseException] , None], exc: typing.Union[BaseException , None], *args: _typeshed.Unused) -> bool: ...
async def __aexit__(self, typ: typing.Union[typing.Type[BaseException], None], exc: typing.Union[BaseException , None], tb: typing.Union[TracebackType , None], weird_extra_arg: int = ..., *args: Unused, **kwargs: Unused) -> Awaitable[None]: ...
class GoodTen:
def __exit__(self, __typ: typing.Optional[typing.Type[BaseException]], exc: typing.Optional[BaseException], *args: _typeshed.Unused) -> bool: ...
async def __aexit__(self, typ: typing.Optional[typing.Type[BaseException]], exc: typing.Optional[BaseException], tb: typing.Optional[TracebackType], weird_extra_arg: int = ..., *args: Unused, **kwargs: Unused) -> Awaitable[None]: ...
class BadOne:
def __exit__(self, *args: Any) -> None: ... # PYI036: Bad star-args annotation
async def __aexit__(self) -> None: ... # PYI036: Missing args
class BadTwo:
def __exit__(self, typ, exc, tb, weird_extra_arg) -> None: ... # PYI036: Extra arg must have default
async def __aexit__(self, typ, exc, tb, *, weird_extra_arg) -> None: ...# PYI036: Extra arg must have default
class BadThree:
def __exit__(self, typ: type[BaseException], exc: BaseException | None, tb: TracebackType | None) -> None: ... # PYI036: First arg has bad annotation
async def __aexit__(self, __typ: type[BaseException] | None, __exc: BaseException, __tb: TracebackType) -> bool | None: ... # PYI036: Second arg has bad annotation
class BadFour:
def __exit__(self, typ: typing.Optional[type[BaseException]], exc: typing.Union[BaseException, None], tb: TracebackType) -> None: ... # PYI036: Third arg has bad annotation
async def __aexit__(self, __typ: type[BaseException] | None, __exc: BaseException | None, __tb: typing.Union[TracebackType, None, int]) -> bool | None: ... # PYI036: Third arg has bad annotation
class BadFive:
def __exit__(self, typ: BaseException | None, *args: list[str]) -> bool: ... # PYI036: Bad star-args annotation
async def __aexit__(self, /, typ: type[BaseException] | None, *args: Any) -> Awaitable[None]: ... # PYI036: Bad star-args annotation
class BadSix:
def __exit__(self, typ, exc, tb, weird_extra_arg, extra_arg2 = None) -> None: ... # PYI036: Extra arg must have default
async def __aexit__(self, typ, exc, tb, *, weird_extra_arg) -> None: ... # PYI036: kwargs must have default

View File

@ -0,0 +1,75 @@
import builtins
import types
import typing
from collections.abc import Awaitable
from types import TracebackType
from typing import Any, Type
import _typeshed
import typing_extensions
from _typeshed import Unused
class GoodOne:
def __exit__(self, *args: object) -> None: ...
async def __aexit__(self, *args) -> str: ...
class GoodTwo:
def __exit__(self, typ: type[builtins.BaseException] | None, *args: builtins.object) -> bool | None: ...
async def __aexit__(self, /, typ: Type[BaseException] | None, *args: object, **kwargs) -> bool: ...
class GoodThree:
def __exit__(self, __typ: typing.Type[BaseException] | None, exc: BaseException | None, *args: object) -> None: ...
async def __aexit__(self, typ: typing_extensions.Type[BaseException] | None, __exc: BaseException | None, *args: object) -> None: ...
class GoodFour:
def __exit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None) -> None: ...
async def __aexit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: types.TracebackType | None, *args: list[None]) -> None: ...
class GoodFive:
def __exit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None, weird_extra_arg: int = ..., *args: int, **kwargs: str) -> None: ...
async def __aexit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None) -> Awaitable[None]: ...
class GoodSix:
def __exit__(self, typ: object, exc: builtins.object, tb: object) -> None: ...
async def __aexit__(self, typ: object, exc: object, tb: builtins.object) -> None: ...
class GoodSeven:
def __exit__(self, *args: Unused) -> bool: ...
async def __aexit__(self, typ: Type[BaseException] | None, *args: _typeshed.Unused) -> Awaitable[None]: ...
class GoodEight:
def __exit__(self, __typ: typing.Type[BaseException] | None, exc: BaseException | None, *args: _typeshed.Unused) -> bool: ...
async def __aexit__(self, typ: type[BaseException] | None, exc: BaseException | None, tb: TracebackType | None, weird_extra_arg: int = ..., *args: Unused, **kwargs: Unused) -> Awaitable[None]: ...
class GoodNine:
def __exit__(self, __typ: typing.Union[typing.Type[BaseException] , None], exc: typing.Union[BaseException , None], *args: _typeshed.Unused) -> bool: ...
async def __aexit__(self, typ: typing.Union[typing.Type[BaseException], None], exc: typing.Union[BaseException , None], tb: typing.Union[TracebackType , None], weird_extra_arg: int = ..., *args: Unused, **kwargs: Unused) -> Awaitable[None]: ...
class GoodTen:
def __exit__(self, __typ: typing.Optional[typing.Type[BaseException]], exc: typing.Optional[BaseException], *args: _typeshed.Unused) -> bool: ...
async def __aexit__(self, typ: typing.Optional[typing.Type[BaseException]], exc: typing.Optional[BaseException], tb: typing.Optional[TracebackType], weird_extra_arg: int = ..., *args: Unused, **kwargs: Unused) -> Awaitable[None]: ...
class BadOne:
def __exit__(self, *args: Any) -> None: ... # PYI036: Bad star-args annotation
async def __aexit__(self) -> None: ... # PYI036: Missing args
class BadTwo:
def __exit__(self, typ, exc, tb, weird_extra_arg) -> None: ... # PYI036: Extra arg must have default
async def __aexit__(self, typ, exc, tb, *, weird_extra_arg1, weird_extra_arg2) -> None: ...# PYI036: kwargs must have default
class BadThree:
def __exit__(self, typ: type[BaseException], exc: BaseException | None, tb: TracebackType | None) -> None: ... # PYI036: First arg has bad annotation
async def __aexit__(self, __typ: type[BaseException] | None, __exc: BaseException, __tb: TracebackType) -> bool | None: ... # PYI036: Second arg has bad annotation
class BadFour:
def __exit__(self, typ: typing.Optional[type[BaseException]], exc: typing.Union[BaseException, None], tb: TracebackType) -> None: ... # PYI036: Third arg has bad annotation
async def __aexit__(self, __typ: type[BaseException] | None, __exc: BaseException | None, __tb: typing.Union[TracebackType, None, int]) -> bool | None: ... # PYI036: Third arg has bad annotation
class BadFive:
def __exit__(self, typ: BaseException | None, *args: list[str]) -> bool: ... # PYI036: Bad star-args annotation
async def __aexit__(self, /, typ: type[BaseException] | None, *args: Any) -> Awaitable[None]: ... # PYI036: Bad star-args annotation
class BadSix:
def __exit__(self, typ, exc, tb, weird_extra_arg, extra_arg2 = None) -> None: ... # PYI036: Extra arg must have default
async def __aexit__(self, typ, exc, tb, *, weird_extra_arg) -> None: ... # PYI036: kwargs must have default

View File

@ -0,0 +1,7 @@
# Bad import.
from __future__ import annotations # Not PYI044 (not a stubfile).
# Good imports.
from __future__ import Something
import sys
from socket import AF_INET

View File

@ -0,0 +1,7 @@
# Bad import.
from __future__ import annotations # PYI044.
# Good imports.
from __future__ import Something
import sys
from socket import AF_INET

View File

@ -36,3 +36,11 @@ bar: str = "51 character stringgggggggggggggggggggggggggggggggg"
baz: bytes = b"50 character byte stringgggggggggggggggggggggggggg"
qux: bytes = b"51 character byte stringggggggggggggggggggggggggggg\xff"
class Demo:
"""Docstrings are excluded from this rule. Some padding."""
def func() -> None:
"""Docstrings are excluded from this rule. Some padding."""

View File

@ -28,3 +28,9 @@ bar: str = "51 character stringgggggggggggggggggggggggggggggggg" # Error: PYI05
baz: bytes = b"50 character byte stringgggggggggggggggggggggggggg" # OK
qux: bytes = b"51 character byte stringggggggggggggggggggggggggggg\xff" # Error: PYI053
class Demo:
"""Docstrings are excluded from this rule. Some padding.""" # OK
def func() -> None:
"""Docstrings are excluded from this rule. Some padding.""" # OK

View File

@ -1,17 +1,25 @@
import pytest
def test_xxx():
pytest.fail("this is a failure") # Test OK arg
# OK
def f():
pytest.fail("this is a failure")
def test_xxx():
pytest.fail(msg="this is a failure") # Test OK kwarg
def f():
pytest.fail(msg="this is a failure")
def test_xxx(): # Error
def f():
pytest.fail(reason="this is a failure")
# Errors
def f():
pytest.fail()
pytest.fail("")
pytest.fail(f"")
pytest.fail(msg="")
pytest.fail(msg=f"")
pytest.fail(reason="")
pytest.fail(reason=f"")

View File

@ -21,6 +21,13 @@ def test_error():
assert something and something_else == """error
message
"""
assert (
something
and something_else
== """error
message
"""
)
# recursive case
assert not (a or not (b or c))
@ -31,14 +38,6 @@ def test_error():
assert not (something or something_else and something_third), "with message"
# detected, but no autofix for mixed conditions (e.g. `a or b and c`)
assert not (something or something_else and something_third)
# detected, but no autofix for parenthesized conditions
assert (
something
and something_else
== """error
message
"""
)
assert something # OK

View File

@ -29,6 +29,26 @@ raise TypeError(
# Hello, world!
)
# OK
raise AssertionError
# OK
raise AttributeError("test message")
def return_error():
return ValueError("Something")
# OK
raise return_error()
class Class:
@staticmethod
def error():
return ValueError("Something")
# OK
raise Class.error()

View File

@ -171,3 +171,17 @@ def f():
if x.isdigit():
return True
return False
async def f():
# OK
for x in iterable:
if await check(x):
return True
return False
async def f():
# SIM110
for x in iterable:
if check(x):
return True
return False

View File

@ -33,17 +33,17 @@ with A() as a:
print("hello")
a()
# OK
# OK, can't merge async with and with.
async with A() as a:
with B() as b:
print("hello")
# OK
# OK, can't merge async with and with.
with A() as a:
async with B() as b:
print("hello")
# OK
# SIM117
async with A() as a:
async with B() as b:
print("hello")
@ -100,3 +100,24 @@ with A("01ß9💣28901ß9💣28901ß9💣289") as a:
with A("01ß9💣28901ß9💣28901ß9💣2890") as a:
with B("01ß9💣28901ß9💣28901ß9💣289") as b:
print("hello")
# From issue #3025.
async def main():
async with A() as a: # SIM117.
async with B() as b:
print("async-inside!")
return 0
# OK. Can't merge across different kinds of with statements.
with a as a2:
async with b as b2:
with c as c2:
async with d as d2:
f(a2, b2, c2, d2)
# OK. Can't merge across different kinds of with statements.
async with b as b2:
with c as c2:
async with d as d2:
f(b2, c2, d2)

View File

@ -1,6 +1,12 @@
# T002 - accepted
# TODO (evanrittenhouse): this has an author
# TODO(evanrittenhouse): this also has an author
# TODO(evanrittenhouse): this has an author
# TODO (evanrittenhouse) and more: this has an author
# TODO(evanrittenhouse) and more: this has an author
# TODO@mayrholu: this has an author
# TODO @mayrholu: this has an author
# TODO@mayrholu and more: this has an author
# TODO @mayrholu and more: this has an author
# T002 - errors
# TODO: this has no author
# FIXME: neither does this

View File

@ -164,3 +164,11 @@ def f():
)
x: DataFrame = 2
def f():
global Member
from module import Member
x: Member = 1

View File

@ -0,0 +1,9 @@
import A
import B
import b
import C
import d
import E
import f
from g import a, B, c
from h import A, b, C

View File

@ -26,3 +26,9 @@ def f():
import os # isort:skip
import collections
import abc
def f():
import sys; import os # isort:skip
import sys; import os # isort:skip # isort:skip
import sys; import os

View File

@ -19,3 +19,13 @@ if True:
import D
import B
import e
import f
# isort: split
# isort: split
import d
import c

View File

@ -0,0 +1,37 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "1",
"metadata": {},
"outputs": [],
"source": [
"import math\n",
"\n",
"math.pi"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python (ruff)",
"language": "python",
"name": "ruff"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,38 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "1",
"metadata": {},
"outputs": [],
"source": [
"import math\n",
"import os\n",
"\n",
"math.pi"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python (ruff)",
"language": "python",
"name": "ruff"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,8 @@
{
"execution_count": null,
"cell_type": "code",
"id": "1",
"metadata": {},
"outputs": [],
"source": ["def foo():\n", " pass\n", "\n", "%timeit foo()"]
}

View File

@ -0,0 +1,6 @@
{
"cell_type": "markdown",
"id": "1",
"metadata": {},
"source": ["This is a markdown cell\n", "Some more content"]
}

View File

@ -0,0 +1,8 @@
{
"execution_count": null,
"cell_type": "code",
"id": "1",
"metadata": {},
"outputs": [],
"source": ["def foo():\n", " pass"]
}

View File

@ -0,0 +1,8 @@
{
"execution_count": null,
"cell_type": "code",
"id": "1",
"metadata": {},
"outputs": [],
"source": "%timeit print('hello world')"
}

View File

@ -0,0 +1,51 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "0c7535f6-43cb-423f-bfe1-d263b8f55da0",
"metadata": {},
"outputs": [],
"source": [
"from pathlib import Path\n",
"import random\n",
"import math"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c066fa1a-5682-47af-8c17-5afec3cf4ad0",
"metadata": {},
"outputs": [],
"source": [
"from typing import Any\n",
"import collections\n",
"# Newline should be added here\n",
"def foo():\n",
" pass"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python (ruff)",
"language": "python",
"name": "ruff"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,53 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "663ba955-baca-4f34-9ebb-840d2573ae3f",
"metadata": {},
"outputs": [],
"source": [
"import math\n",
"import random\n",
"from pathlib import Path"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "d0adfe23-8aea-47e9-bf67-d856cfcb96ea",
"metadata": {},
"outputs": [],
"source": [
"import collections\n",
"from typing import Any\n",
"\n",
"\n",
"# Newline should be added here\n",
"def foo():\n",
" pass"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python (ruff)",
"language": "python",
"name": "ruff"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -0,0 +1,38 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "4cec6161-f594-446c-ab65-37395bbb3127",
"metadata": {},
"outputs": [],
"source": [
"import math\n",
"import os\n",
"\n",
"_ = math.pi"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python (ruff)",
"language": "python",
"name": "ruff"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.3"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

View File

@ -3,6 +3,16 @@
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"ExecuteTime": {
"end_time": "2023-03-08T23:01:09.782916Z",
"start_time": "2023-03-08T23:01:09.705831Z"
},
"collapsed": false,
"jupyter": {
"outputs_hidden": false
}
},
"outputs": [
{
"name": "stdout",
@ -19,32 +29,26 @@
" print(f\"cell one: {y}\")\n",
"\n",
"unused_variable()"
],
"metadata": {
"collapsed": false,
"ExecuteTime": {
"start_time": "2023-03-08T23:01:09.705831Z",
"end_time": "2023-03-08T23:01:09.782916Z"
}
}
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's do another mistake"
],
"metadata": {
"collapsed": false
}
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": true,
"ExecuteTime": {
"start_time": "2023-03-08T23:01:09.733809Z",
"end_time": "2023-03-08T23:01:09.915760Z"
"end_time": "2023-03-08T23:01:09.915760Z",
"start_time": "2023-03-08T23:01:09.733809Z"
},
"collapsed": true,
"jupyter": {
"outputs_hidden": true
}
},
"outputs": [
@ -62,27 +66,66 @@
"\n",
"mutable_argument()\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Let's create an empty cell"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Multi-line empty cell!"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"after empty cells\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"display_name": "Python (ruff)",
"language": "python",
"name": "python3"
"name": "ruff"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 2
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython2",
"version": "2.7.6"
"pygments_lexer": "ipython3",
"version": "3.11.3"
}
},
"nbformat": 4,
"nbformat_minor": 0
"nbformat_minor": 4
}

View File

@ -1,5 +1,6 @@
# Do this (new version)
from numpy.random import default_rng
rng = default_rng()
vals = rng.standard_normal(10)
more_vals = rng.standard_normal(10)
@ -7,11 +8,13 @@ numbers = rng.integers(high, size=5)
# instead of this (legacy version)
from numpy import random
vals = random.standard_normal(10)
more_vals = random.standard_normal(10)
numbers = random.integers(high, size=5)
import numpy
numpy.random.seed()
numpy.random.get_state()
numpy.random.set_state()

View File

@ -0,0 +1,15 @@
import numpy as np
np.round_(np.random.rand(5, 5), 2)
np.product(np.random.rand(5, 5))
np.cumproduct(np.random.rand(5, 5))
np.sometrue(np.random.rand(5, 5))
np.alltrue(np.random.rand(5, 5))
from numpy import round_, product, cumproduct, sometrue, alltrue
round_(np.random.rand(5, 5), 2)
product(np.random.rand(5, 5))
cumproduct(np.random.rand(5, 5))
sometrue(np.random.rand(5, 5))
alltrue(np.random.rand(5, 5))

View File

@ -4,7 +4,9 @@ x = pd.DataFrame()
x.drop(["a"], axis=1, inplace=True)
x.drop(["a"], axis=1, inplace=True)
x.y.drop(["a"], axis=1, inplace=True)
x["y"].drop(["a"], axis=1, inplace=True)
x.drop(
inplace=True,
@ -23,6 +25,7 @@ x.drop(["a"], axis=1, **kwargs, inplace=True)
x.drop(["a"], axis=1, inplace=True, **kwargs)
f(x.drop(["a"], axis=1, inplace=True))
x.apply(lambda x: x.sort_values('a', inplace=True))
x.apply(lambda x: x.sort_values("a", inplace=True))
import torch
torch.m.ReLU(inplace=True) # safe because this isn't a pandas call

View File

@ -1,4 +1,4 @@
from abc import ABCMeta
import abc
import pydantic
@ -19,6 +19,10 @@ class Class:
def class_method(cls):
pass
@abc.abstractclassmethod
def abstract_class_method(cls):
pass
@staticmethod
def static_method(x):
return x
@ -41,7 +45,7 @@ class Class:
...
class MetaClass(ABCMeta):
class MetaClass(abc.ABCMeta):
def bad_method(self):
pass

View File

@ -0,0 +1,11 @@
class badAllowed:
pass
class stillBad:
pass
class BAD_ALLOWED:
pass
class STILL_BAD:
pass

View File

@ -0,0 +1,14 @@
import unittest
def badAllowed():
pass
def stillBad():
pass
class Test(unittest.TestCase):
def badAllowed(self):
return super().tearDown()
def stillBad(self):
return super().tearDown()

View File

@ -0,0 +1,12 @@
def func(_, a, badAllowed):
return _, a, badAllowed
def func(_, a, stillBad):
return _, a, stillBad
class Class:
def method(self, _, a, badAllowed):
return _, a, badAllowed
def method(self, _, a, stillBad):
return _, a, stillBad

View File

@ -0,0 +1,22 @@
from abc import ABCMeta
class Class:
def __init_subclass__(self, default_name, **kwargs):
...
@classmethod
def badAllowed(self, x, /, other):
...
@classmethod
def stillBad(self, x, /, other):
...
class MetaClass(ABCMeta):
def badAllowed(self):
pass
def stillBad(self):
pass

View File

@ -0,0 +1,59 @@
import abc
import pydantic
class Class:
def badAllowed(this):
pass
def stillBad(this):
pass
if False:
def badAllowed(this):
pass
def stillBad(this):
pass
@pydantic.validator
def badAllowed(cls, my_field: str) -> str:
pass
@pydantic.validator
def stillBad(cls, my_field: str) -> str:
pass
@pydantic.validator("my_field")
def badAllowed(cls, my_field: str) -> str:
pass
@pydantic.validator("my_field")
def stillBad(cls, my_field: str) -> str:
pass
@classmethod
def badAllowed(cls):
pass
@classmethod
def stillBad(cls):
pass
@abc.abstractclassmethod
def badAllowed(cls):
pass
@abc.abstractclassmethod
def stillBad(cls):
pass
class PosOnlyClass:
def badAllowed(this, blah, /, self, something: str):
pass
def stillBad(this, blah, /, self, something: str):
pass

View File

@ -0,0 +1,6 @@
def assign():
badAllowed = 0
stillBad = 0
BAD_ALLOWED = 0
STILL_BAD = 0

View File

@ -0,0 +1,13 @@
def __badAllowed__():
pass
def __stillBad__():
pass
def nested():
def __badAllowed__():
pass
def __stillBad__():
pass

View File

@ -0,0 +1,5 @@
import mod.BAD_ALLOWED as badAllowed
import mod.STILL_BAD as stillBad
from mod import BAD_ALLOWED as badAllowed
from mod import STILL_BAD as stillBad

View File

@ -0,0 +1,5 @@
import mod.badallowed as badAllowed
import mod.stillbad as stillBad
from mod import badallowed as BadAllowed
from mod import stillbad as StillBad

View File

@ -0,0 +1,8 @@
import mod.BadAllowed as badallowed
import mod.stillBad as stillbad
from mod import BadAllowed as badallowed
from mod import StillBad as stillbad
from mod import BadAllowed as bad_allowed
from mod import StillBad as still_bad

View File

@ -0,0 +1,8 @@
import mod.BadAllowed as BADALLOWED
import mod.StillBad as STILLBAD
from mod import BadAllowed as BADALLOWED
from mod import StillBad as STILLBAD
from mod import BadAllowed as BAD_ALLOWED
from mod import StillBad as STILL_BAD

View File

@ -0,0 +1,19 @@
class C:
badAllowed = 0
stillBad = 0
_badAllowed = 0
_stillBad = 0
bad_Allowed = 0
still_Bad = 0
class D(TypedDict):
badAllowed: bool
stillBad: bool
_badAllowed: list
_stillBad: list
bad_Allowed: set
still_Bad: set

View File

@ -0,0 +1,8 @@
badAllowed = 0
stillBad = 0
_badAllowed = 0
_stillBad = 0
bad_Allowed = 0
still_Bad = 0

Some files were not shown because too many files have changed in this diff Show More