mirror of https://github.com/astral-sh/ruff
Use `tokenize` for linter benchmark (#11417)
## Summary This PR updates the linter benchmark to use the `tokenize` function instead of the lexer. The linter expects the token list to be up to and including the first error which is what the `ruff_python_parser::tokenize` function returns. This was not a problem before because the benchmarks only uses valid Python code.
This commit is contained in:
parent
aceb182db6
commit
50f14d017e
|
|
@ -10,7 +10,7 @@ use ruff_linter::settings::{flags, LinterSettings};
|
|||
use ruff_linter::source_kind::SourceKind;
|
||||
use ruff_linter::{registry::Rule, RuleSelector};
|
||||
use ruff_python_ast::PySourceType;
|
||||
use ruff_python_parser::{lexer, parse_program_tokens, Mode};
|
||||
use ruff_python_parser::{parse_program_tokens, tokenize, Mode};
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
#[global_allocator]
|
||||
|
|
@ -55,7 +55,7 @@ fn benchmark_linter(mut group: BenchmarkGroup, settings: &LinterSettings) {
|
|||
&case,
|
||||
|b, case| {
|
||||
// Tokenize the source.
|
||||
let tokens: Vec<_> = lexer::lex(case.code(), Mode::Module).collect();
|
||||
let tokens = tokenize(case.code(), Mode::Module);
|
||||
|
||||
// Parse the source.
|
||||
let ast = parse_program_tokens(tokens.clone(), case.code(), false).unwrap();
|
||||
|
|
|
|||
Loading…
Reference in New Issue