[ruff] Detect duplicate entries in __all__ (RUF068) (#22114)

Hello,

This MR adds a new rule and its fix, `RUF069`,
`DuplicateEntryInDunderAll`. I'm using `RUF069` because we already have
[RUF068](https://github.com/astral-sh/ruff/pull/20585) and
[RUF069](https://github.com/astral-sh/ruff/pull/21079#issuecomment-3493839453)
in the works.

The rule job is to prevent users from accidentally adding duplicate
entries to `__all__`, which, for example, can result from copy-paste
mistakes.

It deals with the following syntaxes:

```python
__all__: list[str] = ["a", "a"]
__all__: typing.Any = ("a", "a")
__all__.extend(["a", "a"])
__all__ += ["a", "a"]
```

But it does not keep track of `__all__` contents, meaning the following
code snippet is a false negative:
```python
class A: ...

__all__ = ["A"]
__all__.extend(["A"])
```

## Violation Example

```console
RUF069 `__all__` contains duplicate entries
 --> RUF069.py:2:17
  |
1 | __all__ = ["A", "A", "B"]
  |                 ^^^
help: Remove duplicate entries from `__all__`
1 | __all__ = ["A", "B"]
  - __all__ = ["A", "A", "B"]
```

## Ecosystem Report

The `ruff-ecosystem` results contain seven violations in four projects,
all of them seem like true positives, with one instance appearing to be
an actual bug.

This [code
snippet](90d855985b/stubs/reportlab/reportlab/lib/rltempfile.pyi (L4))
from `reportlab` contains the same entry twice instead of exporting both
functions.

```python
def get_rl_tempdir(*subdirs: str) -> str: ...
def get_rl_tempfile(fn: str | None = None) -> str: ...

__all__ = ("get_rl_tempdir", "get_rl_tempdir")
```

Closes [#21945](https://github.com/astral-sh/ruff/issues/21945)

---------

Co-authored-by: Brent Westbrook <brentrwestbrook@gmail.com>
This commit is contained in:
Leandro Braga
2026-01-16 16:58:06 -03:00
committed by GitHub
parent 337e3ebd27
commit b80d8ff6ff
11 changed files with 546 additions and 44 deletions

View File

@@ -0,0 +1,41 @@
import typing
class A: ...
class B: ...
# Good
__all__ = "A" + "B"
__all__: list[str] = ["A", "B"]
__all__: typing.Any = ("A", "B")
__all__ = ["A", "B"]
__all__ = [A, "B", "B"]
__all__ += ["A", "B"]
__all__.extend(["A", "B"])
# Bad
__all__: list[str] = ["A", "B", "A"]
__all__: typing.Any = ("A", "B", "B")
__all__ = ["A", "B", "A"]
__all__ = ["A", "A", "B", "B"]
__all__ = [
"A",
"A",
"B",
"B"
]
__all__ += ["B", "B"]
__all__.extend(["B", "B"])
# Bad, unsafe
__all__ = [
"A",
"A",
"B",
# Comment
"B", # 2
# 3
]

View File

@@ -1245,6 +1245,9 @@ pub(crate) fn expression(expr: &Expr, checker: &Checker) {
if checker.is_rule_enabled(Rule::UnsortedDunderAll) {
ruff::rules::sort_dunder_all_extend_call(checker, call);
}
if checker.is_rule_enabled(Rule::DuplicateEntryInDunderAll) {
ruff::rules::duplicate_entry_in_dunder_all_extend_call(checker, call);
}
if checker.is_rule_enabled(Rule::DefaultFactoryKwarg) {
ruff::rules::default_factory_kwarg(checker, call);
}

View File

@@ -966,6 +966,9 @@ pub(crate) fn statement(stmt: &Stmt, checker: &mut Checker) {
if checker.is_rule_enabled(Rule::UnsortedDunderAll) {
ruff::rules::sort_dunder_all_aug_assign(checker, aug_assign);
}
if checker.is_rule_enabled(Rule::DuplicateEntryInDunderAll) {
ruff::rules::duplicate_entry_in_dunder_all_aug_assign(checker, aug_assign);
}
}
Stmt::If(
if_ @ ast::StmtIf {
@@ -1434,6 +1437,9 @@ pub(crate) fn statement(stmt: &Stmt, checker: &mut Checker) {
if checker.is_rule_enabled(Rule::UnsortedDunderAll) {
ruff::rules::sort_dunder_all_assign(checker, assign);
}
if checker.is_rule_enabled(Rule::DuplicateEntryInDunderAll) {
ruff::rules::duplicate_entry_in_dunder_all_assign(checker, assign);
}
if checker.source_type.is_stub() {
if checker.any_rule_enabled(&[
Rule::UnprefixedTypeParam,
@@ -1525,6 +1531,9 @@ pub(crate) fn statement(stmt: &Stmt, checker: &mut Checker) {
if checker.is_rule_enabled(Rule::UnsortedDunderAll) {
ruff::rules::sort_dunder_all_ann_assign(checker, assign_stmt);
}
if checker.is_rule_enabled(Rule::DuplicateEntryInDunderAll) {
ruff::rules::duplicate_entry_in_dunder_all_ann_assign(checker, assign_stmt);
}
if checker.source_type.is_stub() {
if let Some(value) = value {
if checker.is_rule_enabled(Rule::AssignmentDefaultInStub) {

View File

@@ -1061,6 +1061,7 @@ pub fn code_to_rule(linter: Linter, code: &str) -> Option<(RuleGroup, Rule)> {
(Ruff, "065") => rules::ruff::rules::LoggingEagerConversion,
(Ruff, "066") => rules::ruff::rules::PropertyWithoutReturn,
(Ruff, "067") => rules::ruff::rules::NonEmptyInitModule,
(Ruff, "068") => rules::ruff::rules::DuplicateEntryInDunderAll,
(Ruff, "100") => rules::ruff::rules::UnusedNOQA,
(Ruff, "101") => rules::ruff::rules::RedirectedNOQA,

View File

@@ -284,6 +284,46 @@ pub(crate) fn add_argument(argument: &str, arguments: &Arguments, tokens: &Token
}
}
/// Remove the member at the given index from a sequence of expressions.
pub(crate) fn remove_member(elts: &[ast::Expr], index: usize, source: &str) -> Result<Edit> {
if index < elts.len() - 1 {
// Case 1: the expression is _not_ the last node, so delete from the start of the
// expression to the end of the subsequent comma.
// Ex) Delete `"a"` in `{"a", "b", "c"}`.
let mut tokenizer = SimpleTokenizer::starts_at(elts[index].end(), source);
// Find the trailing comma.
tokenizer
.find(|token| token.kind == SimpleTokenKind::Comma)
.context("Unable to find trailing comma")?;
// Find the next non-whitespace token.
let next = tokenizer
.find(|token| {
token.kind != SimpleTokenKind::Whitespace && token.kind != SimpleTokenKind::Newline
})
.context("Unable to find next token")?;
Ok(Edit::deletion(elts[index].start(), next.start()))
} else if index > 0 {
// Case 2: the expression is the last node, but not the _only_ node, so delete from the
// start of the previous comma to the end of the expression.
// Ex) Delete `"c"` in `{"a", "b", "c"}`.
let mut tokenizer = SimpleTokenizer::starts_at(elts[index - 1].end(), source);
// Find the trailing comma.
let comma = tokenizer
.find(|token| token.kind == SimpleTokenKind::Comma)
.context("Unable to find trailing comma")?;
Ok(Edit::deletion(comma.start(), elts[index].end()))
} else {
// Case 3: expression is the only node, so delete it.
// Ex) Delete `"a"` in `{"a"}`.
Ok(Edit::range_deletion(elts[index].range()))
}
}
/// Generic function to add a (regular) parameter to a function definition.
pub(crate) fn add_parameter(parameter: &str, parameters: &Parameters, source: &str) -> Edit {
if let Some(last) = parameters.args.iter().rfind(|arg| arg.default.is_none()) {

View File

@@ -1,15 +1,15 @@
use anyhow::{Context, Result};
use ruff_diagnostics::Fix;
use rustc_hash::FxHashMap;
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_python_ast::Expr;
use ruff_python_ast::comparable::HashableExpr;
use ruff_python_trivia::{SimpleTokenKind, SimpleTokenizer};
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::{Edit, Fix, FixAvailability, Violation};
use crate::fix::edits;
use crate::{FixAvailability, Violation};
/// ## What it does
/// Checks for set literals that contain duplicate items.
@@ -70,49 +70,10 @@ pub(crate) fn duplicate_value(checker: &Checker, set: &ast::ExprSet) {
);
diagnostic.try_set_fix(|| {
remove_member(set, index, checker.locator().contents()).map(Fix::safe_edit)
edits::remove_member(&set.elts, index, checker.locator().contents())
.map(Fix::safe_edit)
});
}
}
}
}
/// Remove the member at the given index from the [`ast::ExprSet`].
fn remove_member(set: &ast::ExprSet, index: usize, source: &str) -> Result<Edit> {
if index < set.len() - 1 {
// Case 1: the expression is _not_ the last node, so delete from the start of the
// expression to the end of the subsequent comma.
// Ex) Delete `"a"` in `{"a", "b", "c"}`.
let mut tokenizer = SimpleTokenizer::starts_at(set.elts[index].end(), source);
// Find the trailing comma.
tokenizer
.find(|token| token.kind == SimpleTokenKind::Comma)
.context("Unable to find trailing comma")?;
// Find the next non-whitespace token.
let next = tokenizer
.find(|token| {
token.kind != SimpleTokenKind::Whitespace && token.kind != SimpleTokenKind::Newline
})
.context("Unable to find next token")?;
Ok(Edit::deletion(set.elts[index].start(), next.start()))
} else if index > 0 {
// Case 2: the expression is the last node, but not the _only_ node, so delete from the
// start of the previous comma to the end of the expression.
// Ex) Delete `"c"` in `{"a", "b", "c"}`.
let mut tokenizer = SimpleTokenizer::starts_at(set.elts[index - 1].end(), source);
// Find the trailing comma.
let comma = tokenizer
.find(|token| token.kind == SimpleTokenKind::Comma)
.context("Unable to find trailing comma")?;
Ok(Edit::deletion(comma.start(), set.elts[index].end()))
} else {
// Case 3: expression is the only node, so delete it.
// Ex) Delete `"a"` in `{"a"}`.
Ok(Edit::range_deletion(set.elts[index].range()))
}
}

View File

@@ -117,6 +117,7 @@ mod tests {
#[test_case(Rule::LoggingEagerConversion, Path::new("RUF065_0.py"))]
#[test_case(Rule::LoggingEagerConversion, Path::new("RUF065_1.py"))]
#[test_case(Rule::PropertyWithoutReturn, Path::new("RUF066.py"))]
#[test_case(Rule::DuplicateEntryInDunderAll, Path::new("RUF068.py"))]
#[test_case(Rule::RedirectedNOQA, Path::new("RUF101_0.py"))]
#[test_case(Rule::RedirectedNOQA, Path::new("RUF101_1.py"))]
#[test_case(Rule::InvalidRuleCode, Path::new("RUF102.py"))]

View File

@@ -0,0 +1,180 @@
use rustc_hash::{FxBuildHasher, FxHashMap};
use ruff_diagnostics::{Applicability, Fix};
use ruff_macros::{ViolationMetadata, derive_message_formats};
use ruff_python_ast as ast;
use ruff_text_size::Ranged;
use crate::checkers::ast::Checker;
use crate::fix::edits;
use crate::{FixAvailability, Violation};
/// ## What it does
/// Detects duplicate elements in `__all__` definitions.
///
/// ## Why is this bad?
/// Duplicate elements in `__all__` serve no purpose and can indicate copy-paste errors or
/// incomplete refactoring.
///
/// ## Example
/// ```python
/// __all__ = [
/// "DatabaseConnection",
/// "Product",
/// "User",
/// "DatabaseConnection", # Duplicate
/// ]
/// ```
///
/// Use instead:
/// ```python
/// __all__ = [
/// "DatabaseConnection",
/// "Product",
/// "User",
/// ]
/// ```
///
/// ## Fix Safety
/// This rule's fix is marked as unsafe if the replacement would remove comments attached to the
/// original expression, potentially losing important context or documentation.
///
/// For example:
/// ```python
/// __all__ = [
/// "PublicAPI",
/// # TODO: Remove this in v2.0
/// "PublicAPI", # Deprecated alias
/// ]
/// ```
#[derive(ViolationMetadata)]
#[violation_metadata(preview_since = "0.14.14")]
pub(crate) struct DuplicateEntryInDunderAll;
impl Violation for DuplicateEntryInDunderAll {
const FIX_AVAILABILITY: FixAvailability = FixAvailability::Sometimes;
#[derive_message_formats]
fn message(&self) -> String {
"`__all__` contains duplicate entries".to_string()
}
fn fix_title(&self) -> Option<String> {
Some("Remove duplicate entries from `__all__`".to_string())
}
}
/// Apply RUF068 to `StmtAssign` AST node. For example: `__all__ = ["a", "b", "a"]`.
pub(crate) fn duplicate_entry_in_dunder_all_assign(
checker: &Checker,
ast::StmtAssign { value, targets, .. }: &ast::StmtAssign,
) {
if let [expr] = targets.as_slice() {
duplicate_entry_in_dunder_all(checker, expr, value);
}
}
/// Apply RUF068 to `StmtAugAssign` AST node. For example: `__all__ += ["a", "b", "a"]`.
pub(crate) fn duplicate_entry_in_dunder_all_aug_assign(
checker: &Checker,
node: &ast::StmtAugAssign,
) {
if node.op.is_add() {
duplicate_entry_in_dunder_all(checker, &node.target, &node.value);
}
}
/// Apply RUF068 to `__all__.extend()`.
pub(crate) fn duplicate_entry_in_dunder_all_extend_call(
checker: &Checker,
ast::ExprCall {
func,
arguments: ast::Arguments { args, keywords, .. },
..
}: &ast::ExprCall,
) {
let ([value_passed], []) = (&**args, &**keywords) else {
return;
};
let ast::Expr::Attribute(ast::ExprAttribute { value, attr, .. }) = &**func else {
return;
};
if attr == "extend" {
duplicate_entry_in_dunder_all(checker, value, value_passed);
}
}
/// Apply RUF068 to a `StmtAnnAssign` AST node.
/// For example: `__all__: list[str] = ["a", "b", "a"]`.
pub(crate) fn duplicate_entry_in_dunder_all_ann_assign(
checker: &Checker,
node: &ast::StmtAnnAssign,
) {
if let Some(value) = &node.value {
duplicate_entry_in_dunder_all(checker, &node.target, value);
}
}
/// RUF068
/// This routine checks whether `__all__` contains duplicated entries, and emits
/// a violation if it does.
fn duplicate_entry_in_dunder_all(checker: &Checker, target: &ast::Expr, value: &ast::Expr) {
let ast::Expr::Name(ast::ExprName { id, .. }) = target else {
return;
};
if id != "__all__" {
return;
}
// We're only interested in `__all__` in the global scope
if !checker.semantic().current_scope().kind.is_module() {
return;
}
let elts = match value {
ast::Expr::List(ast::ExprList { elts, .. }) => elts,
ast::Expr::Tuple(ast::ExprTuple { elts, .. }) => elts,
_ => return,
};
// It's impossible to have duplicates if there is one or no element
if elts.len() <= 1 {
return;
}
let mut deduplicated_elts = FxHashMap::with_capacity_and_hasher(elts.len(), FxBuildHasher);
let source = checker.locator().contents();
for (index, expr) in elts.iter().enumerate() {
let Some(string_value) = expr.as_string_literal_expr() else {
// In the example below we're ignoring `foo`:
// __all__ = [foo, "bar", "bar"]
continue;
};
let name = string_value.value.to_str();
if let Some(previous_expr) = deduplicated_elts.insert(name, expr) {
let mut diagnostic = checker.report_diagnostic(DuplicateEntryInDunderAll, expr.range());
diagnostic.secondary_annotation(
format_args!("previous occurrence of `{name}` here"),
previous_expr,
);
diagnostic.set_primary_message(format_args!("`{name}` duplicated here"));
diagnostic.try_set_fix(|| {
edits::remove_member(elts, index, source).map(|edit| {
let applicability = if checker.comment_ranges().intersects(edit.range()) {
Applicability::Unsafe
} else {
Applicability::Safe
};
Fix::applicable_edit(edit, applicability)
})
});
}
}
}

View File

@@ -8,6 +8,7 @@ pub(crate) use collection_literal_concatenation::*;
pub(crate) use dataclass_enum::*;
pub(crate) use decimal_from_float_literal::*;
pub(crate) use default_factory_kwarg::*;
pub(crate) use duplicate_entry_in_dunder_all::*;
pub(crate) use explicit_f_string_type_conversion::*;
pub(crate) use falsy_dict_get_fallback::*;
pub(crate) use function_call_in_dataclass_default::*;
@@ -76,6 +77,7 @@ mod confusables;
mod dataclass_enum;
mod decimal_from_float_literal;
mod default_factory_kwarg;
mod duplicate_entry_in_dunder_all;
mod explicit_f_string_type_conversion;
mod falsy_dict_get_fallback;
mod function_call_in_dataclass_default;

View File

@@ -0,0 +1,263 @@
---
source: crates/ruff_linter/src/rules/ruff/mod.rs
---
RUF068 [*] `__all__` contains duplicate entries
--> RUF068.py:15:15
|
13 | __all__: typing.Any = ("A", "B")
14 | __all__ = ["A", "B"]
15 | __all__ = [A, "B", "B"]
| --- ^^^ `B` duplicated here
| |
| previous occurrence of `B` here
16 | __all__ += ["A", "B"]
17 | __all__.extend(["A", "B"])
|
help: Remove duplicate entries from `__all__`
12 | __all__: list[str] = ["A", "B"]
13 | __all__: typing.Any = ("A", "B")
14 | __all__ = ["A", "B"]
- __all__ = [A, "B", "B"]
15 + __all__ = [A, "B"]
16 | __all__ += ["A", "B"]
17 | __all__.extend(["A", "B"])
18 |
RUF068 [*] `__all__` contains duplicate entries
--> RUF068.py:20:23
|
19 | # Bad
20 | __all__: list[str] = ["A", "B", "A"]
| --- ^^^ `A` duplicated here
| |
| previous occurrence of `A` here
21 | __all__: typing.Any = ("A", "B", "B")
22 | __all__ = ["A", "B", "A"]
|
help: Remove duplicate entries from `__all__`
17 | __all__.extend(["A", "B"])
18 |
19 | # Bad
- __all__: list[str] = ["A", "B", "A"]
20 + __all__: list[str] = ["A", "B"]
21 | __all__: typing.Any = ("A", "B", "B")
22 | __all__ = ["A", "B", "A"]
23 | __all__ = ["A", "A", "B", "B"]
RUF068 [*] `__all__` contains duplicate entries
--> RUF068.py:21:29
|
19 | # Bad
20 | __all__: list[str] = ["A", "B", "A"]
21 | __all__: typing.Any = ("A", "B", "B")
| --- ^^^ `B` duplicated here
| |
| previous occurrence of `B` here
22 | __all__ = ["A", "B", "A"]
23 | __all__ = ["A", "A", "B", "B"]
|
help: Remove duplicate entries from `__all__`
18 |
19 | # Bad
20 | __all__: list[str] = ["A", "B", "A"]
- __all__: typing.Any = ("A", "B", "B")
21 + __all__: typing.Any = ("A", "B")
22 | __all__ = ["A", "B", "A"]
23 | __all__ = ["A", "A", "B", "B"]
24 | __all__ = [
RUF068 [*] `__all__` contains duplicate entries
--> RUF068.py:22:12
|
20 | __all__: list[str] = ["A", "B", "A"]
21 | __all__: typing.Any = ("A", "B", "B")
22 | __all__ = ["A", "B", "A"]
| --- ^^^ `A` duplicated here
| |
| previous occurrence of `A` here
23 | __all__ = ["A", "A", "B", "B"]
24 | __all__ = [
|
help: Remove duplicate entries from `__all__`
19 | # Bad
20 | __all__: list[str] = ["A", "B", "A"]
21 | __all__: typing.Any = ("A", "B", "B")
- __all__ = ["A", "B", "A"]
22 + __all__ = ["A", "B"]
23 | __all__ = ["A", "A", "B", "B"]
24 | __all__ = [
25 | "A",
RUF068 [*] `__all__` contains duplicate entries
--> RUF068.py:23:12
|
21 | __all__: typing.Any = ("A", "B", "B")
22 | __all__ = ["A", "B", "A"]
23 | __all__ = ["A", "A", "B", "B"]
| --- ^^^ `A` duplicated here
| |
| previous occurrence of `A` here
24 | __all__ = [
25 | "A",
|
help: Remove duplicate entries from `__all__`
20 | __all__: list[str] = ["A", "B", "A"]
21 | __all__: typing.Any = ("A", "B", "B")
22 | __all__ = ["A", "B", "A"]
- __all__ = ["A", "A", "B", "B"]
23 + __all__ = ["A", "B", "B"]
24 | __all__ = [
25 | "A",
26 | "A",
RUF068 [*] `__all__` contains duplicate entries
--> RUF068.py:23:22
|
21 | __all__: typing.Any = ("A", "B", "B")
22 | __all__ = ["A", "B", "A"]
23 | __all__ = ["A", "A", "B", "B"]
| --- ^^^ `B` duplicated here
| |
| previous occurrence of `B` here
24 | __all__ = [
25 | "A",
|
help: Remove duplicate entries from `__all__`
20 | __all__: list[str] = ["A", "B", "A"]
21 | __all__: typing.Any = ("A", "B", "B")
22 | __all__ = ["A", "B", "A"]
- __all__ = ["A", "A", "B", "B"]
23 + __all__ = ["A", "A", "B"]
24 | __all__ = [
25 | "A",
26 | "A",
RUF068 [*] `__all__` contains duplicate entries
--> RUF068.py:25:5
|
23 | __all__ = ["A", "A", "B", "B"]
24 | __all__ = [
25 | "A",
| --- previous occurrence of `A` here
26 | "A",
| ^^^ `A` duplicated here
27 | "B",
28 | "B"
|
help: Remove duplicate entries from `__all__`
23 | __all__ = ["A", "A", "B", "B"]
24 | __all__ = [
25 | "A",
- "A",
26 | "B",
27 | "B"
28 | ]
RUF068 [*] `__all__` contains duplicate entries
--> RUF068.py:27:5
|
25 | "A",
26 | "A",
27 | "B",
| --- previous occurrence of `B` here
28 | "B"
| ^^^ `B` duplicated here
29 | ]
30 | __all__ += ["B", "B"]
|
help: Remove duplicate entries from `__all__`
24 | __all__ = [
25 | "A",
26 | "A",
- "B",
27 | "B"
28 | ]
29 | __all__ += ["B", "B"]
RUF068 [*] `__all__` contains duplicate entries
--> RUF068.py:30:13
|
28 | "B"
29 | ]
30 | __all__ += ["B", "B"]
| --- ^^^ `B` duplicated here
| |
| previous occurrence of `B` here
31 | __all__.extend(["B", "B"])
|
help: Remove duplicate entries from `__all__`
27 | "B",
28 | "B"
29 | ]
- __all__ += ["B", "B"]
30 + __all__ += ["B"]
31 | __all__.extend(["B", "B"])
32 |
33 | # Bad, unsafe
RUF068 [*] `__all__` contains duplicate entries
--> RUF068.py:31:17
|
29 | ]
30 | __all__ += ["B", "B"]
31 | __all__.extend(["B", "B"])
| --- ^^^ `B` duplicated here
| |
| previous occurrence of `B` here
32 |
33 | # Bad, unsafe
|
help: Remove duplicate entries from `__all__`
28 | "B"
29 | ]
30 | __all__ += ["B", "B"]
- __all__.extend(["B", "B"])
31 + __all__.extend(["B"])
32 |
33 | # Bad, unsafe
34 | __all__ = [
RUF068 [*] `__all__` contains duplicate entries
--> RUF068.py:35:5
|
33 | # Bad, unsafe
34 | __all__ = [
35 | "A",
| --- previous occurrence of `A` here
36 | "A",
| ^^^ `A` duplicated here
37 | "B",
38 | # Comment
|
help: Remove duplicate entries from `__all__`
33 | # Bad, unsafe
34 | __all__ = [
35 | "A",
- "A",
36 | "B",
37 | # Comment
38 | "B", # 2
RUF068 [*] `__all__` contains duplicate entries
--> RUF068.py:37:5
|
35 | "A",
36 | "A",
37 | "B",
| --- previous occurrence of `B` here
38 | # Comment
39 | "B", # 2
| ^^^ `B` duplicated here
40 | # 3
41 | ]
|
help: Remove duplicate entries from `__all__`
34 | __all__ = [
35 | "A",
36 | "A",
- "B",
- # Comment
37 | "B", # 2
38 | # 3
39 | ]
note: This is an unsafe fix and may change runtime behavior

1
ruff.schema.json generated
View File

@@ -4088,6 +4088,7 @@
"RUF065",
"RUF066",
"RUF067",
"RUF068",
"RUF1",
"RUF10",
"RUF100",