[ty] Implement streaming for workspace diagnostics (#19657)

This commit is contained in:
Micha Reiser 2025-08-04 11:34:29 +02:00 committed by GitHub
parent b95d22c08e
commit 808c94d509
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 1123 additions and 228 deletions

View File

@ -22,12 +22,13 @@ use colored::Colorize;
use crossbeam::channel as crossbeam_channel;
use rayon::ThreadPoolBuilder;
use ruff_db::diagnostic::{Diagnostic, DisplayDiagnosticConfig, Severity};
use ruff_db::files::File;
use ruff_db::max_parallelism;
use ruff_db::system::{OsSystem, SystemPath, SystemPathBuf};
use salsa::Database;
use ty_project::metadata::options::ProjectOptionsOverrides;
use ty_project::watch::ProjectWatcher;
use ty_project::{Db, watch};
use ty_project::{CollectReporter, Db, watch};
use ty_project::{ProjectDatabase, ProjectMetadata};
use ty_server::run_server;
@ -268,10 +269,13 @@ impl MainLoop {
// Spawn a new task that checks the project. This needs to be done in a separate thread
// to prevent blocking the main loop here.
rayon::spawn(move || {
let reporter = IndicatifReporter::from(self.printer);
let mut reporter = IndicatifReporter::from(self.printer);
let bar = reporter.bar.clone();
match salsa::Cancelled::catch(|| {
let mut reporter = reporter.clone();
db.check_with_reporter(&mut reporter)
db.check_with_reporter(&mut reporter);
reporter.bar.finish();
reporter.collector.into_sorted(&db)
}) {
Ok(result) => {
// Send the result back to the main loop for printing.
@ -280,7 +284,7 @@ impl MainLoop {
.unwrap();
}
Err(cancelled) => {
reporter.bar.finish_and_clear();
bar.finish_and_clear();
tracing::debug!("Check has been cancelled: {cancelled:?}");
}
}
@ -390,8 +394,9 @@ impl MainLoop {
}
/// A progress reporter for `ty check`.
#[derive(Clone)]
struct IndicatifReporter {
collector: CollectReporter,
/// A reporter that is ready, containing a progress bar to report to.
///
/// Initialization of the bar is deferred to [`ty_project::ProgressReporter::set_files`] so we
@ -406,6 +411,7 @@ impl From<Printer> for IndicatifReporter {
fn from(printer: Printer) -> Self {
Self {
bar: indicatif::ProgressBar::hidden(),
collector: CollectReporter::default(),
printer,
}
}
@ -413,6 +419,8 @@ impl From<Printer> for IndicatifReporter {
impl ty_project::ProgressReporter for IndicatifReporter {
fn set_files(&mut self, files: usize) {
self.collector.set_files(files);
self.bar.set_length(files as u64);
self.bar.set_message("Checking");
self.bar.set_style(
@ -425,9 +433,14 @@ impl ty_project::ProgressReporter for IndicatifReporter {
self.bar.set_draw_target(self.printer.progress_target());
}
fn report_file(&self, _file: &ruff_db::files::File) {
fn report_checked_file(&self, db: &dyn Db, file: File, diagnostics: &[Diagnostic]) {
self.collector.report_checked_file(db, file, diagnostics);
self.bar.inc(1);
}
fn report_diagnostics(&mut self, db: &dyn Db, diagnostics: Vec<Diagnostic>) {
self.collector.report_diagnostics(db, diagnostics);
}
}
#[derive(Debug)]

View File

@ -863,10 +863,18 @@ fn overrides_unknown_rules() -> anyhow::Result<()> {
),
])?;
assert_cmd_snapshot!(case.command(), @r#"
assert_cmd_snapshot!(case.command(), @r###"
success: false
exit_code: 1
----- stdout -----
error[division-by-zero]: Cannot divide object of type `Literal[4]` by zero
--> main.py:2:5
|
2 | y = 4 / 0
| ^^^^^
|
info: rule `division-by-zero` was selected in the configuration file
warning[unknown-rule]: Unknown lint rule `division-by-zer`
--> pyproject.toml:10:1
|
@ -876,14 +884,6 @@ fn overrides_unknown_rules() -> anyhow::Result<()> {
| ^^^^^^^^^^^^^^^
|
error[division-by-zero]: Cannot divide object of type `Literal[4]` by zero
--> main.py:2:5
|
2 | y = 4 / 0
| ^^^^^
|
info: rule `division-by-zero` was selected in the configuration file
warning[division-by-zero]: Cannot divide object of type `Literal[4]` by zero
--> tests/test_main.py:2:5
|
@ -896,7 +896,7 @@ fn overrides_unknown_rules() -> anyhow::Result<()> {
----- stderr -----
WARN ty is pre-release software and not ready for production use. Expect to encounter bugs, missing features, and fatal errors.
"#);
"###);
Ok(())
}

View File

@ -5,7 +5,7 @@ use std::{cmp, fmt};
pub use self::changes::ChangeResult;
use crate::metadata::settings::file_settings;
use crate::{DEFAULT_LINT_REGISTRY, DummyReporter};
use crate::{CollectReporter, DEFAULT_LINT_REGISTRY};
use crate::{ProgressReporter, Project, ProjectMetadata};
use ruff_db::Db as SourceDb;
use ruff_db::diagnostic::Diagnostic;
@ -86,7 +86,9 @@ impl ProjectDatabase {
///
/// [`set_check_mode`]: ProjectDatabase::set_check_mode
pub fn check(&self) -> Vec<Diagnostic> {
self.project().check(self, &mut DummyReporter)
let mut collector = CollectReporter::default();
self.project().check(self, &mut collector);
collector.into_sorted(self)
}
/// Checks the files in the project and its dependencies, using the given reporter.
@ -94,8 +96,8 @@ impl ProjectDatabase {
/// Use [`set_check_mode`] to update the check mode.
///
/// [`set_check_mode`]: ProjectDatabase::set_check_mode
pub fn check_with_reporter(&self, reporter: &mut dyn ProgressReporter) -> Vec<Diagnostic> {
self.project().check(self, reporter)
pub fn check_with_reporter(&self, reporter: &mut dyn ProgressReporter) {
self.project().check(self, reporter);
}
#[tracing::instrument(level = "debug", skip(self))]

View File

@ -127,17 +127,46 @@ pub trait ProgressReporter: Send + Sync {
/// Initialize the reporter with the number of files.
fn set_files(&mut self, files: usize);
/// Report the completion of a given file.
fn report_file(&self, file: &File);
/// Report the completion of checking a given file along with its diagnostics.
fn report_checked_file(&self, db: &dyn Db, file: File, diagnostics: &[Diagnostic]);
/// Reports settings or IO related diagnostics. The diagnostics
/// can belong to different files or no file at all.
/// But it's never a file for which [`Self::report_checked_file`] gets called.
fn report_diagnostics(&mut self, db: &dyn Db, diagnostics: Vec<Diagnostic>);
}
/// A no-op implementation of [`ProgressReporter`].
/// Reporter that collects all diagnostics into a `Vec`.
#[derive(Default)]
pub struct DummyReporter;
pub struct CollectReporter(std::sync::Mutex<Vec<Diagnostic>>);
impl ProgressReporter for DummyReporter {
impl CollectReporter {
pub fn into_sorted(self, db: &dyn Db) -> Vec<Diagnostic> {
let mut diagnostics = self.0.into_inner().unwrap();
diagnostics.sort_by(|left, right| {
left.rendering_sort_key(db)
.cmp(&right.rendering_sort_key(db))
});
diagnostics
}
}
impl ProgressReporter for CollectReporter {
fn set_files(&mut self, _files: usize) {}
fn report_file(&self, _file: &File) {}
fn report_checked_file(&self, _db: &dyn Db, _file: File, diagnostics: &[Diagnostic]) {
if diagnostics.is_empty() {
return;
}
self.0
.lock()
.unwrap()
.extend(diagnostics.iter().map(Clone::clone));
}
fn report_diagnostics(&mut self, _db: &dyn Db, diagnostics: Vec<Diagnostic>) {
self.0.get_mut().unwrap().extend(diagnostics);
}
}
#[salsa::tracked]
@ -225,11 +254,7 @@ impl Project {
}
/// Checks the project and its dependencies according to the project's check mode.
pub(crate) fn check(
self,
db: &ProjectDatabase,
reporter: &mut dyn ProgressReporter,
) -> Vec<Diagnostic> {
pub(crate) fn check(self, db: &ProjectDatabase, reporter: &mut dyn ProgressReporter) {
let project_span = tracing::debug_span!("Project::check");
let _span = project_span.enter();
@ -239,12 +264,11 @@ impl Project {
name = self.name(db)
);
let mut diagnostics: Vec<Diagnostic> = Vec::new();
diagnostics.extend(
self.settings_diagnostics(db)
let mut diagnostics: Vec<Diagnostic> = self
.settings_diagnostics(db)
.iter()
.map(OptionDiagnostic::to_diagnostic),
);
.map(OptionDiagnostic::to_diagnostic)
.collect();
let files = ProjectFiles::new(db, self);
reporter.set_files(files.len());
@ -256,19 +280,19 @@ impl Project {
.map(IOErrorDiagnostic::to_diagnostic),
);
reporter.report_diagnostics(db, diagnostics);
let open_files = self.open_files(db);
let check_start = ruff_db::Instant::now();
let file_diagnostics = std::sync::Mutex::new(vec![]);
{
let db = db.clone();
let file_diagnostics = &file_diagnostics;
let project_span = &project_span;
let reporter = &reporter;
rayon::scope(move |scope| {
for file in &files {
let db = db.clone();
let reporter = &*reporter;
scope.spawn(move |_| {
let check_file_span =
tracing::debug_span!(parent: project_span, "check_file", ?file);
@ -276,10 +300,7 @@ impl Project {
match check_file_impl(&db, file) {
Ok(diagnostics) => {
file_diagnostics
.lock()
.unwrap()
.extend(diagnostics.iter().map(Clone::clone));
reporter.report_checked_file(&db, file, diagnostics);
// This is outside `check_file_impl` to avoid that opening or closing
// a file invalidates the `check_file_impl` query of every file!
@ -295,28 +316,22 @@ impl Project {
}
}
Err(io_error) => {
file_diagnostics.lock().unwrap().push(io_error.clone());
reporter.report_checked_file(
&db,
file,
std::slice::from_ref(io_error),
);
}
}
reporter.report_file(&file);
});
}
});
}
};
tracing::debug!(
"Checking all files took {:.3}s",
check_start.elapsed().as_secs_f64(),
);
let mut file_diagnostics = file_diagnostics.into_inner().unwrap();
file_diagnostics.sort_by(|left, right| {
left.rendering_sort_key(db)
.cmp(&right.rendering_sort_key(db))
});
diagnostics.extend(file_diagnostics);
diagnostics
}
pub(crate) fn check_file(self, db: &dyn Db, file: File) -> Vec<Diagnostic> {

View File

@ -5,7 +5,7 @@ use lsp_server::Connection;
use ruff_db::system::{OsSystem, SystemPathBuf};
pub use crate::logging::{LogLevel, init_logging};
pub use crate::server::Server;
pub use crate::server::{PartialWorkspaceProgress, PartialWorkspaceProgressParams, Server};
pub use crate::session::{ClientOptions, DiagnosticMode};
pub use document::{NotebookDocument, PositionEncoding, TextDocument};
pub(crate) use session::{DocumentQuery, Session};
@ -47,7 +47,7 @@ pub fn run_server() -> anyhow::Result<()> {
// This is to complement the `LSPSystem` if the document is not available in the index.
let fallback_system = Arc::new(OsSystem::new(cwd));
let server_result = Server::new(worker_threads, connection, fallback_system, true)
let server_result = Server::new(worker_threads, connection, fallback_system, false)
.context("Failed to start server")?
.run();

View File

@ -29,10 +29,10 @@ pub(crate) use main_loop::{
Action, ConnectionSender, Event, MainLoopReceiver, MainLoopSender, SendRequest,
};
pub(crate) type Result<T> = std::result::Result<T, api::Error>;
pub use api::{PartialWorkspaceProgress, PartialWorkspaceProgressParams};
pub struct Server {
connection: Connection,
client_capabilities: ClientCapabilities,
worker_threads: NonZeroUsize,
main_loop_receiver: MainLoopReceiver,
main_loop_sender: MainLoopSender,
@ -44,7 +44,7 @@ impl Server {
worker_threads: NonZeroUsize,
connection: Connection,
native_system: Arc<dyn System + 'static + Send + Sync + RefUnwindSafe>,
initialize_logging: bool,
in_test: bool,
) -> crate::Result<Self> {
let (id, init_value) = connection.initialize_start()?;
let init_params: InitializeParams = serde_json::from_value(init_value)?;
@ -81,7 +81,7 @@ impl Server {
let (main_loop_sender, main_loop_receiver) = crossbeam::channel::bounded(32);
let client = Client::new(main_loop_sender.clone(), connection.sender.clone());
if initialize_logging {
if !in_test {
crate::logging::init_logging(
global_options.tracing.log_level.unwrap_or_default(),
global_options.tracing.log_file.as_deref(),
@ -160,8 +160,8 @@ impl Server {
global_options,
workspaces,
native_system,
in_test,
)?,
client_capabilities,
})
}

View File

@ -19,6 +19,7 @@ use self::traits::{NotificationHandler, RequestHandler};
use super::{Result, schedule::BackgroundSchedule};
use crate::session::client::Client;
pub(crate) use diagnostics::publish_settings_diagnostics;
pub use requests::{PartialWorkspaceProgress, PartialWorkspaceProgressParams};
use ruff_db::panic::PanicError;
/// Processes a request from the client to the server.

View File

@ -33,3 +33,5 @@ pub(super) use shutdown::ShutdownHandler;
pub(super) use signature_help::SignatureHelpRequestHandler;
pub(super) use workspace_diagnostic::WorkspaceDiagnosticRequestHandler;
pub(super) use workspace_symbols::WorkspaceSymbolRequestHandler;
pub use workspace_diagnostic::{PartialWorkspaceProgress, PartialWorkspaceProgressParams};

View File

@ -1,15 +1,4 @@
use lsp_types::request::WorkspaceDiagnosticRequest;
use lsp_types::{
FullDocumentDiagnosticReport, UnchangedDocumentDiagnosticReport, Url,
WorkspaceDiagnosticParams, WorkspaceDiagnosticReport, WorkspaceDiagnosticReportResult,
WorkspaceDocumentDiagnosticReport, WorkspaceFullDocumentDiagnosticReport,
WorkspaceUnchangedDocumentDiagnosticReport,
};
use ruff_db::files::File;
use std::collections::BTreeMap;
use std::sync::atomic::{AtomicUsize, Ordering};
use ty_project::ProgressReporter;
use crate::PositionEncoding;
use crate::server::Result;
use crate::server::api::diagnostics::{Diagnostics, to_lsp_diagnostic};
use crate::server::api::traits::{
@ -18,7 +7,23 @@ use crate::server::api::traits::{
use crate::server::lazy_work_done_progress::LazyWorkDoneProgress;
use crate::session::SessionSnapshot;
use crate::session::client::Client;
use crate::session::index::Index;
use crate::system::file_to_url;
use lsp_types::request::WorkspaceDiagnosticRequest;
use lsp_types::{
FullDocumentDiagnosticReport, PreviousResultId, ProgressToken,
UnchangedDocumentDiagnosticReport, Url, WorkspaceDiagnosticParams, WorkspaceDiagnosticReport,
WorkspaceDiagnosticReportPartialResult, WorkspaceDiagnosticReportResult,
WorkspaceDocumentDiagnosticReport, WorkspaceFullDocumentDiagnosticReport,
WorkspaceUnchangedDocumentDiagnosticReport, notification::Notification,
};
use ruff_db::diagnostic::Diagnostic;
use ruff_db::files::File;
use serde::{Deserialize, Serialize};
use std::collections::BTreeMap;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::time::Instant;
use ty_project::{Db, ProgressReporter};
pub(crate) struct WorkspaceDiagnosticRequestHandler;
@ -41,12 +46,12 @@ impl BackgroundRequestHandler for WorkspaceDiagnosticRequestHandler {
));
}
// Create a map of previous result IDs for efficient lookup
let mut previous_results: BTreeMap<_, _> = params
.previous_result_ids
.into_iter()
.map(|prev| (prev.uri, prev.value))
.collect();
let writer = ResponseWriter::new(
params.partial_result_params.partial_result_token,
params.previous_result_ids,
&snapshot,
client,
);
// Use the work done progress token from the client request, if provided
// Note: neither VS Code nor Zed currently support this,
@ -58,103 +63,13 @@ impl BackgroundRequestHandler for WorkspaceDiagnosticRequestHandler {
"Checking",
snapshot.resolved_client_capabilities(),
);
// Collect all diagnostics from all projects with their database references
let mut items = Vec::new();
let mut reporter = WorkspaceDiagnosticsProgressReporter::new(work_done_progress, writer);
for db in snapshot.projects() {
let diagnostics = db.check_with_reporter(
&mut WorkspaceDiagnosticsProgressReporter::new(work_done_progress.clone()),
);
// Group diagnostics by URL
let mut diagnostics_by_url: BTreeMap<Url, Vec<_>> = BTreeMap::default();
for diagnostic in diagnostics {
if let Some(span) = diagnostic.primary_span() {
let file = span.expect_ty_file();
let Some(url) = file_to_url(db, file) else {
tracing::debug!("Failed to convert file to URL at {}", file.path(db));
continue;
};
diagnostics_by_url.entry(url).or_default().push(diagnostic);
}
db.check_with_reporter(&mut reporter);
}
items.reserve(diagnostics_by_url.len());
// Convert to workspace diagnostic report format
for (url, file_diagnostics) in diagnostics_by_url {
let version = index
.key_from_url(url.clone())
.ok()
.and_then(|key| index.make_document_ref(key).ok())
.map(|doc| i64::from(doc.version()));
let result_id = Diagnostics::result_id_from_hash(&file_diagnostics);
// Check if this file's diagnostics have changed since the previous request
if let Some(previous_result_id) = previous_results.remove(&url) {
if previous_result_id == result_id {
// Diagnostics haven't changed, return unchanged report
items.push(WorkspaceDocumentDiagnosticReport::Unchanged(
WorkspaceUnchangedDocumentDiagnosticReport {
uri: url,
version,
unchanged_document_diagnostic_report:
UnchangedDocumentDiagnosticReport { result_id },
},
));
continue;
}
}
// Convert diagnostics to LSP format
let lsp_diagnostics = file_diagnostics
.into_iter()
.map(|diagnostic| {
to_lsp_diagnostic(db, &diagnostic, snapshot.position_encoding())
})
.collect::<Vec<_>>();
// Diagnostics have changed or this is the first request, return full report
items.push(WorkspaceDocumentDiagnosticReport::Full(
WorkspaceFullDocumentDiagnosticReport {
uri: url,
version,
full_document_diagnostic_report: FullDocumentDiagnosticReport {
result_id: Some(result_id),
items: lsp_diagnostics,
},
},
));
}
}
// Handle files that had diagnostics in previous request but no longer have any
// Any remaining entries in previous_results are files that were fixed
for (previous_url, _previous_result_id) in previous_results {
// This file had diagnostics before but doesn't now, so we need to report it as having no diagnostics
let version = index
.key_from_url(previous_url.clone())
.ok()
.and_then(|key| index.make_document_ref(key).ok())
.map(|doc| i64::from(doc.version()));
items.push(WorkspaceDocumentDiagnosticReport::Full(
WorkspaceFullDocumentDiagnosticReport {
uri: previous_url,
version,
full_document_diagnostic_report: FullDocumentDiagnosticReport {
result_id: None, // No result ID needed for empty diagnostics
items: vec![], // No diagnostics
},
},
));
}
Ok(WorkspaceDiagnosticReportResult::Report(
WorkspaceDiagnosticReport { items },
))
Ok(reporter.into_final_report())
}
}
@ -171,21 +86,32 @@ impl RetriableRequestHandler for WorkspaceDiagnosticRequestHandler {
}
}
struct WorkspaceDiagnosticsProgressReporter {
/// ty progress reporter that streams the diagnostics to the client
/// and sends progress reports (checking X/Y files).
///
/// Diagnostics are only streamed if the client sends a partial result token.
struct WorkspaceDiagnosticsProgressReporter<'a> {
total_files: usize,
checked_files: AtomicUsize,
work_done: LazyWorkDoneProgress,
response: std::sync::Mutex<ResponseWriter<'a>>,
}
impl WorkspaceDiagnosticsProgressReporter {
fn new(work_done: LazyWorkDoneProgress) -> Self {
impl<'a> WorkspaceDiagnosticsProgressReporter<'a> {
fn new(work_done: LazyWorkDoneProgress, response: ResponseWriter<'a>) -> Self {
Self {
total_files: 0,
checked_files: AtomicUsize::new(0),
work_done,
response: std::sync::Mutex::new(response),
}
}
fn into_final_report(self) -> WorkspaceDiagnosticReportResult {
let writer = self.response.into_inner().unwrap();
writer.into_final_report()
}
fn report_progress(&self) {
let checked = self.checked_files.load(Ordering::Relaxed);
let total = self.total_files;
@ -207,18 +133,339 @@ impl WorkspaceDiagnosticsProgressReporter {
}
}
impl ProgressReporter for WorkspaceDiagnosticsProgressReporter {
impl ProgressReporter for WorkspaceDiagnosticsProgressReporter<'_> {
fn set_files(&mut self, files: usize) {
self.total_files += files;
self.report_progress();
}
fn report_file(&self, _file: &File) {
fn report_checked_file(&self, db: &dyn Db, file: File, diagnostics: &[Diagnostic]) {
let checked = self.checked_files.fetch_add(1, Ordering::Relaxed) + 1;
if checked % 10 == 0 || checked == self.total_files {
// Report progress every 10 files or when all files are checked
if checked % 100 == 0 || checked == self.total_files {
// Report progress every 100 files or when all files are checked
self.report_progress();
}
let mut response = self.response.lock().unwrap();
// Don't report empty diagnostics. We clear previous diagnostics in `into_response`
// which also handles the case where a file no longer has diagnostics because
// it's no longer part of the project.
if !diagnostics.is_empty() {
response.write_diagnostics_for_file(db, file, diagnostics);
}
response.maybe_flush();
}
fn report_diagnostics(&mut self, db: &dyn Db, diagnostics: Vec<Diagnostic>) {
let mut by_file: BTreeMap<File, Vec<Diagnostic>> = BTreeMap::new();
for diagnostic in diagnostics {
if let Some(file) = diagnostic.primary_span().map(|span| span.expect_ty_file()) {
by_file.entry(file).or_default().push(diagnostic);
} else {
tracing::debug!(
"Ignoring diagnostic without a file: {diagnostic}",
diagnostic = diagnostic.primary_message()
);
}
}
let response = self.response.get_mut().unwrap();
for (file, diagnostics) in by_file {
response.write_diagnostics_for_file(db, file, &diagnostics);
}
response.maybe_flush();
}
}
#[derive(Debug)]
struct ResponseWriter<'a> {
mode: ReportingMode,
index: &'a Index,
position_encoding: PositionEncoding,
previous_result_ids: BTreeMap<Url, String>,
}
impl<'a> ResponseWriter<'a> {
fn new(
partial_result_token: Option<ProgressToken>,
previous_result_ids: Vec<PreviousResultId>,
snapshot: &'a SessionSnapshot,
client: &Client,
) -> Self {
let index = snapshot.index();
let position_encoding = snapshot.position_encoding();
let mode = if let Some(token) = partial_result_token {
ReportingMode::Streaming(Streaming {
first: true,
client: client.clone(),
token,
is_test: snapshot.in_test(),
last_flush: Instant::now(),
batched: Vec::new(),
unchanged: Vec::with_capacity(previous_result_ids.len()),
})
} else {
ReportingMode::Bulk(Vec::new())
};
let previous_result_ids = previous_result_ids
.into_iter()
.map(|prev| (prev.uri, prev.value))
.collect();
Self {
mode,
index,
position_encoding,
previous_result_ids,
}
}
fn write_diagnostics_for_file(&mut self, db: &dyn Db, file: File, diagnostics: &[Diagnostic]) {
let Some(url) = file_to_url(db, file) else {
tracing::debug!("Failed to convert file to URL at {}", file.path(db));
return;
};
let version = self
.index
.key_from_url(url.clone())
.ok()
.and_then(|key| self.index.make_document_ref(key).ok())
.map(|doc| i64::from(doc.version()));
let result_id = Diagnostics::result_id_from_hash(diagnostics);
let is_unchanged = self
.previous_result_ids
.remove(&url)
.is_some_and(|previous_result_id| previous_result_id == result_id);
let report = if is_unchanged {
WorkspaceDocumentDiagnosticReport::Unchanged(
WorkspaceUnchangedDocumentDiagnosticReport {
uri: url,
version,
unchanged_document_diagnostic_report: UnchangedDocumentDiagnosticReport {
result_id,
},
},
)
} else {
let lsp_diagnostics = diagnostics
.iter()
.map(|diagnostic| to_lsp_diagnostic(db, diagnostic, self.position_encoding))
.collect::<Vec<_>>();
WorkspaceDocumentDiagnosticReport::Full(WorkspaceFullDocumentDiagnosticReport {
uri: url,
version,
full_document_diagnostic_report: FullDocumentDiagnosticReport {
result_id: Some(result_id),
items: lsp_diagnostics,
},
})
};
self.write_report(report);
}
fn write_report(&mut self, report: WorkspaceDocumentDiagnosticReport) {
match &mut self.mode {
ReportingMode::Streaming(streaming) => {
streaming.write_report(report);
}
ReportingMode::Bulk(all) => {
all.push(report);
}
}
}
/// Flush any pending reports if streaming diagnostics.
///
/// Note: The flush is throttled when streaming.
fn maybe_flush(&mut self) {
match &mut self.mode {
ReportingMode::Streaming(streaming) => streaming.maybe_flush(),
ReportingMode::Bulk(_) => {}
}
}
/// Creates the final response after all files have been processed.
///
/// The result can be a partial or full report depending on whether the server's streaming
/// diagnostics and if it already sent some diagnostics.
fn into_final_report(mut self) -> WorkspaceDiagnosticReportResult {
let mut items = Vec::new();
// Handle files that had diagnostics in previous request but no longer have any
// Any remaining entries in previous_results are files that were fixed
for previous_url in self.previous_result_ids.into_keys() {
// This file had diagnostics before but doesn't now, so we need to report it as having no diagnostics
let version = self
.index
.key_from_url(previous_url.clone())
.ok()
.and_then(|key| self.index.make_document_ref(key).ok())
.map(|doc| i64::from(doc.version()));
items.push(WorkspaceDocumentDiagnosticReport::Full(
WorkspaceFullDocumentDiagnosticReport {
uri: previous_url,
version,
full_document_diagnostic_report: FullDocumentDiagnosticReport {
result_id: None, // No result ID needed for empty diagnostics
items: vec![], // No diagnostics
},
},
));
}
match &mut self.mode {
ReportingMode::Streaming(streaming) => {
items.extend(
std::mem::take(&mut streaming.batched)
.into_iter()
.map(WorkspaceDocumentDiagnosticReport::Full),
);
items.extend(
std::mem::take(&mut streaming.unchanged)
.into_iter()
.map(WorkspaceDocumentDiagnosticReport::Unchanged),
);
}
ReportingMode::Bulk(all) => {
all.extend(items);
items = std::mem::take(all);
}
}
self.mode.create_result(items)
}
}
#[derive(Debug)]
enum ReportingMode {
/// Streams the diagnostics to the client as they are computed (file by file).
/// Requires that the client provides a partial result token.
Streaming(Streaming),
/// For clients that don't support streaming diagnostics. Collects all workspace
/// diagnostics and sends them in the `workspace/diagnostic` response.
Bulk(Vec<WorkspaceDocumentDiagnosticReport>),
}
impl ReportingMode {
fn create_result(
&mut self,
items: Vec<WorkspaceDocumentDiagnosticReport>,
) -> WorkspaceDiagnosticReportResult {
match self {
ReportingMode::Streaming(streaming) => streaming.create_result(items),
ReportingMode::Bulk(..) => {
WorkspaceDiagnosticReportResult::Report(WorkspaceDiagnosticReport { items })
}
}
}
}
#[derive(Debug)]
struct Streaming {
first: bool,
client: Client,
/// The partial result token.
token: ProgressToken,
/// Throttles the flush reports to not happen more than once every 100ms.
last_flush: Instant,
is_test: bool,
/// The reports for files with changed diagnostics.
/// The implementation uses batching to avoid too many
/// requests for large projects (can slow down the entire
/// analysis).
batched: Vec<WorkspaceFullDocumentDiagnosticReport>,
/// All the unchanged reports. Don't stream them,
/// since nothing has changed.
unchanged: Vec<WorkspaceUnchangedDocumentDiagnosticReport>,
}
impl Streaming {
fn write_report(&mut self, report: WorkspaceDocumentDiagnosticReport) {
match report {
WorkspaceDocumentDiagnosticReport::Full(full) => {
self.batched.push(full);
}
WorkspaceDocumentDiagnosticReport::Unchanged(unchanged) => {
self.unchanged.push(unchanged);
}
}
}
fn maybe_flush(&mut self) {
if self.batched.is_empty() {
return;
}
// Flush every ~50ms or whenever we have two items and this is a test run.
let should_flush = if self.is_test {
self.batched.len() >= 2
} else {
self.last_flush.elapsed().as_millis() >= 50
};
if !should_flush {
return;
}
let items = self
.batched
.drain(..)
.map(WorkspaceDocumentDiagnosticReport::Full)
.collect();
let report = self.create_result(items);
self.client
.send_notification::<PartialWorkspaceProgress>(PartialWorkspaceProgressParams {
token: self.token.clone(),
value: report,
});
self.last_flush = Instant::now();
}
fn create_result(
&mut self,
items: Vec<WorkspaceDocumentDiagnosticReport>,
) -> WorkspaceDiagnosticReportResult {
// As per the LSP spec:
// > partial result: The first literal send need to be a WorkspaceDiagnosticReport followed
// > by `n` WorkspaceDiagnosticReportPartialResult literals defined as follows:
if self.first {
self.first = false;
WorkspaceDiagnosticReportResult::Report(WorkspaceDiagnosticReport { items })
} else {
WorkspaceDiagnosticReportResult::Partial(WorkspaceDiagnosticReportPartialResult {
items,
})
}
}
}
/// The `$/progress` notification for partial workspace diagnostics.
///
/// This type is missing in `lsp_types`. That's why we define it here.
pub struct PartialWorkspaceProgress;
impl Notification for PartialWorkspaceProgress {
type Params = PartialWorkspaceProgressParams;
const METHOD: &'static str = "$/progress";
}
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone)]
pub struct PartialWorkspaceProgressParams {
pub token: ProgressToken,
pub value: WorkspaceDiagnosticReportResult,
}

View File

@ -227,11 +227,9 @@ impl Server {
);
let fs_watcher = self
.client_capabilities
.workspace
.as_ref()
.and_then(|workspace| workspace.did_change_watched_files?.dynamic_registration)
.unwrap_or_default();
.session
.client_capabilities()
.supports_did_change_watched_files_dynamic_registration();
if fs_watcher {
let registration = lsp_types::Registration {

View File

@ -77,6 +77,9 @@ pub(crate) struct Session {
/// Has the client requested the server to shutdown.
shutdown_requested: bool,
/// Is the connected client a `TestServer` instance.
in_test: bool,
deferred_messages: VecDeque<Message>,
}
@ -113,6 +116,7 @@ impl Session {
global_options: GlobalOptions,
workspace_folders: Vec<(Url, ClientOptions)>,
native_system: Arc<dyn System + 'static + Send + Sync + RefUnwindSafe>,
in_test: bool,
) -> crate::Result<Self> {
let index = Arc::new(Index::new(global_options.into_settings()));
@ -132,6 +136,7 @@ impl Session {
resolved_client_capabilities: ResolvedClientCapabilities::new(client_capabilities),
request_queue: RequestQueue::new(),
shutdown_requested: false,
in_test,
})
}
@ -458,6 +463,7 @@ impl Session {
.collect(),
index: self.index.clone().unwrap(),
position_encoding: self.position_encoding,
in_test: self.in_test,
resolved_client_capabilities: self.resolved_client_capabilities,
}
}
@ -649,6 +655,7 @@ pub(crate) struct SessionSnapshot {
index: Arc<Index>,
position_encoding: PositionEncoding,
resolved_client_capabilities: ResolvedClientCapabilities,
in_test: bool,
/// IMPORTANT: It's important that the databases come last, or at least,
/// after any `Arc` that we try to extract or mutate in-place using `Arc::into_inner`
@ -678,6 +685,10 @@ impl SessionSnapshot {
pub(crate) fn resolved_client_capabilities(&self) -> ResolvedClientCapabilities {
self.resolved_client_capabilities
}
pub(crate) const fn in_test(&self) -> bool {
self.in_test
}
}
#[derive(Debug, Default)]

View File

@ -18,6 +18,7 @@ bitflags::bitflags! {
const SIGNATURE_ACTIVE_PARAMETER_SUPPORT = 1 << 9;
const HIERARCHICAL_DOCUMENT_SYMBOL_SUPPORT = 1 << 10;
const WORK_DONE_PROGRESS = 1 << 11;
const DID_CHANGE_WATCHED_FILES_DYNAMIC_REGISTRATION= 1 << 12;
}
}
@ -82,6 +83,11 @@ impl ResolvedClientCapabilities {
self.contains(Self::WORK_DONE_PROGRESS)
}
/// Returns `true` if the client supports dynamic registration for watched files changes.
pub(crate) const fn supports_did_change_watched_files_dynamic_registration(self) -> bool {
self.contains(Self::DID_CHANGE_WATCHED_FILES_DYNAMIC_REGISTRATION)
}
pub(super) fn new(client_capabilities: &ClientCapabilities) -> Self {
let mut flags = Self::empty();
@ -206,6 +212,15 @@ impl ResolvedClientCapabilities {
flags |= Self::WORK_DONE_PROGRESS;
}
if client_capabilities
.workspace
.as_ref()
.and_then(|workspace| workspace.did_change_watched_files?.dynamic_registration)
.unwrap_or_default()
{
flags |= Self::DID_CHANGE_WATCHED_FILES_DYNAMIC_REGISTRATION;
}
flags
}
}

View File

@ -101,17 +101,16 @@ impl Client {
where
N: lsp_types::notification::Notification,
{
let method = N::METHOD.to_string();
if let Err(err) =
self.client_sender
.send(lsp_server::Message::Notification(Notification::new(
method, params,
N::METHOD.to_string(),
params,
)))
{
tracing::error!(
"Failed to send notification `{}` because the client sender is closed: {err}",
N::METHOD
"Failed to send notification `{method}` because the client sender is closed: {err}",
method = N::METHOD,
);
}
}

View File

@ -174,7 +174,7 @@ impl TestServer {
let worker_threads = NonZeroUsize::new(1).unwrap();
let test_system = Arc::new(TestSystem::new(os_system));
match Server::new(worker_threads, server_connection, test_system, false) {
match Server::new(worker_threads, server_connection, test_system, true) {
Ok(server) => {
if let Err(err) = server.run() {
panic!("Server stopped with error: {err:?}");
@ -491,9 +491,12 @@ impl TestServer {
fn handle_message(&mut self, message: Message) -> Result<(), TestServerError> {
match message {
Message::Request(request) => {
tracing::debug!("Received server request {}", &request.method);
self.requests.push_back(request);
}
Message::Response(response) => match self.responses.entry(response.id.clone()) {
Message::Response(response) => {
tracing::debug!("Received server response for request {}", &response.id);
match self.responses.entry(response.id.clone()) {
Entry::Occupied(existing) => {
return Err(TestServerError::DuplicateResponse(
response.id,
@ -503,8 +506,10 @@ impl TestServer {
Entry::Vacant(entry) => {
entry.insert(response);
}
},
}
}
Message::Notification(notification) => {
tracing::debug!("Received notification {}", &notification.method);
self.notifications.push_back(notification);
}
}
@ -756,6 +761,9 @@ impl TestServerBuilder {
configuration: Some(true),
..Default::default()
}),
experimental: Some(json!({
"ty_test_server": true
})),
..Default::default()
};

View File

@ -1,9 +1,12 @@
use anyhow::Result;
use insta::assert_debug_snapshot;
use lsp_types::request::WorkspaceDiagnosticRequest;
use lsp_types::{
PreviousResultId, WorkspaceDiagnosticReportResult, WorkspaceDocumentDiagnosticReport,
PartialResultParams, PreviousResultId, Url, WorkDoneProgressParams, WorkspaceDiagnosticParams,
WorkspaceDiagnosticReportResult, WorkspaceDocumentDiagnosticReport,
};
use ruff_db::system::SystemPath;
use ty_server::{ClientOptions, DiagnosticMode};
use ty_server::{ClientOptions, DiagnosticMode, PartialWorkspaceProgress};
use crate::{TestServer, TestServerBuilder};
@ -233,7 +236,8 @@ def foo() -> str:
server.open_text_document(file_a, &file_a_content, 1);
// First request with no previous result IDs
let first_response = server.workspace_diagnostic_request(None)?;
let mut first_response = server.workspace_diagnostic_request(None)?;
sort_workspace_diagnostic_response(&mut first_response);
insta::assert_debug_snapshot!("workspace_diagnostic_initial_state", first_response);
@ -320,7 +324,8 @@ def foo() -> str:
// - File C: Full report with empty diagnostics (diagnostic was removed)
// - File D: Full report (diagnostic content changed)
// - File E: Full report (the range changes)
let second_response = server.workspace_diagnostic_request(Some(previous_result_ids))?;
let mut second_response = server.workspace_diagnostic_request(Some(previous_result_ids))?;
sort_workspace_diagnostic_response(&mut second_response);
// Consume all progress notifications sent during the second workspace diagnostics
consume_all_progress_notifications(&mut server)?;
@ -364,3 +369,275 @@ fn consume_all_progress_notifications(server: &mut TestServer) -> Result<()> {
Ok(())
}
/// Tests that the server sends partial results for workspace diagnostics
/// if a client sets the `partial_result_token` in the request.
///
/// Note: In production, the server throttles the partial results to one every 50ms. However,
/// this behavior makes testing very hard. That's why the server, in tests, sends a partial response
/// as soon as it batched at least 2 diagnostics together.
#[test]
fn workspace_diagnostic_streaming() -> Result<()> {
const NUM_FILES: usize = 5;
let _filter = filter_result_id();
let workspace_root = SystemPath::new("src");
// Create 60 files with the same error to trigger streaming batching (server batches at 50 files)
let error_content = "\
def foo() -> str:
return 42 # Type error: expected str, got int
";
let global_options = ClientOptions::default().with_diagnostic_mode(DiagnosticMode::Workspace);
let mut builder = TestServerBuilder::new()?
.with_workspace(
workspace_root,
ClientOptions::default().with_diagnostic_mode(DiagnosticMode::Workspace),
)?
.with_initialization_options(global_options);
for i in 0..NUM_FILES {
let file_path_string = format!("src/file_{i:03}.py");
let file_path = SystemPath::new(&file_path_string);
builder = builder.with_file(file_path, error_content)?;
}
let mut server = builder
.enable_pull_diagnostics(true)
.build()?
.wait_until_workspaces_are_initialized()?;
let partial_token = lsp_types::ProgressToken::String("streaming-diagnostics".to_string());
let request_id = server.send_request::<WorkspaceDiagnosticRequest>(WorkspaceDiagnosticParams {
identifier: None,
previous_result_ids: Vec::new(),
work_done_progress_params: WorkDoneProgressParams {
work_done_token: None,
},
partial_result_params: PartialResultParams {
partial_result_token: Some(partial_token.clone()),
},
});
let mut received_results = 0usize;
// First, read the response of the workspace diagnostic request.
// Note: This response comes after the progress notifications but it simplifies the test to read it first.
let final_response = server.await_response::<WorkspaceDiagnosticReportResult>(request_id)?;
// Process the final report.
// This should always be a partial report. However, the type definition in the LSP specification
// is broken in the sense that both `Report` and `Partial` have the exact same shape
// and deserializing a previously serialized `Partial` result will yield a `Report` type.
let response_items = match final_response {
WorkspaceDiagnosticReportResult::Report(report) => report.items,
WorkspaceDiagnosticReportResult::Partial(partial) => partial.items,
};
// The last batch should contain 1 item because the server sends a partial result with
// 2 items each.
assert_eq!(response_items.len(), 1);
received_results += response_items.len();
// Collect any partial results sent via progress notifications
while let Ok(params) = server.await_notification::<PartialWorkspaceProgress>() {
if params.token == partial_token {
let streamed_items = match params.value {
// Ideally we'd assert that only the first response is a full report
// However, the type definition in the LSP specification is broken
// in the sense that both `Report` and `Partial` have the exact same structure
// but it also doesn't use a tag to tell them apart...
// That means, a client can never tell if it's a full report or a partial report
WorkspaceDiagnosticReportResult::Report(report) => report.items,
WorkspaceDiagnosticReportResult::Partial(partial) => partial.items,
};
// All streamed batches should contain 2 items (test behavior).
assert_eq!(streamed_items.len(), 2);
received_results += streamed_items.len();
if received_results == NUM_FILES {
break;
}
}
}
assert_eq!(received_results, NUM_FILES);
Ok(())
}
/// Tests that the server's diagnostic streaming (partial results) work correctly
/// with result ids.
#[test]
fn workspace_diagnostic_streaming_with_caching() -> Result<()> {
const NUM_FILES: usize = 7;
let _filter = filter_result_id();
let workspace_root = SystemPath::new("src");
let error_content = "def foo() -> str:\n return 42 # Error";
let changed_content = "def foo() -> str:\n return true # Error";
let global_options = ClientOptions::default().with_diagnostic_mode(DiagnosticMode::Workspace);
let mut builder = TestServerBuilder::new()?
.with_workspace(workspace_root, global_options.clone())?
.with_initialization_options(global_options);
for i in 0..NUM_FILES {
let file_path_string = format!("src/error_{i}.py");
let file_path = SystemPath::new(&file_path_string);
builder = builder.with_file(file_path, error_content)?; // All files have errors initially
}
let mut server = builder
.enable_pull_diagnostics(true)
.build()?
.wait_until_workspaces_are_initialized()?;
server.open_text_document(SystemPath::new("src/error_0.py"), &error_content, 1);
server.open_text_document(SystemPath::new("src/error_1.py"), &error_content, 1);
server.open_text_document(SystemPath::new("src/error_2.py"), &error_content, 1);
// First request to get result IDs (non-streaming for simplicity)
let first_response = server.workspace_diagnostic_request(None)?;
// Consume progress notifications from first request
consume_all_progress_notifications(&mut server)?;
let result_ids = match first_response {
WorkspaceDiagnosticReportResult::Report(report) => report
.items
.into_iter()
.filter_map(|item| {
if let WorkspaceDocumentDiagnosticReport::Full(full) = item {
full.full_document_diagnostic_report
.result_id
.map(|id| PreviousResultId {
uri: full.uri,
value: id,
})
} else {
panic!("Expected Full report in initial response");
}
})
.collect::<Vec<_>>(),
WorkspaceDiagnosticReportResult::Partial(_) => {
panic!("Request without a partial response token should not use streaming")
}
};
assert_eq!(result_ids.len(), NUM_FILES);
// Fix three errors
server.change_text_document(
SystemPath::new("src/error_0.py"),
vec![lsp_types::TextDocumentContentChangeEvent {
range: None,
range_length: None,
text: changed_content.to_string(),
}],
2,
);
server.change_text_document(
SystemPath::new("src/error_1.py"),
vec![lsp_types::TextDocumentContentChangeEvent {
range: None,
range_length: None,
text: changed_content.to_string(),
}],
2,
);
server.change_text_document(
SystemPath::new("src/error_2.py"),
vec![lsp_types::TextDocumentContentChangeEvent {
range: None,
range_length: None,
text: changed_content.to_string(),
}],
2,
);
// Second request with caching - use streaming to test the caching behavior
let partial_token = lsp_types::ProgressToken::String("streaming-diagnostics".to_string());
let request2_id =
server.send_request::<WorkspaceDiagnosticRequest>(WorkspaceDiagnosticParams {
identifier: None,
previous_result_ids: result_ids,
work_done_progress_params: WorkDoneProgressParams {
work_done_token: None,
},
partial_result_params: PartialResultParams {
partial_result_token: Some(partial_token.clone()),
},
});
let final_response2 = server.await_response::<WorkspaceDiagnosticReportResult>(request2_id)?;
let mut all_items = Vec::new();
// The final response should contain one fixed file and all unchanged files
let items = match final_response2 {
WorkspaceDiagnosticReportResult::Report(report) => report.items,
WorkspaceDiagnosticReportResult::Partial(partial) => partial.items,
};
assert_eq!(items.len(), NUM_FILES - 3 + 1); // 3 fixed, 4 unchanged, 1 full report for fixed file
all_items.extend(items);
// Collect any partial results sent via progress notifications
while let Ok(params) = server.await_notification::<PartialWorkspaceProgress>() {
if params.token == partial_token {
let streamed_items = match params.value {
// Ideally we'd assert that only the first response is a full report
// However, the type definition in the LSP specification is broken
// in the sense that both `Report` and `Partial` have the exact same structure
// but it also doesn't use a tag to tell them apart...
// That means, a client can never tell if it's a full report or a partial report
WorkspaceDiagnosticReportResult::Report(report) => report.items,
WorkspaceDiagnosticReportResult::Partial(partial) => partial.items,
};
// All streamed batches should contain 2 items.
assert_eq!(streamed_items.len(), 2);
all_items.extend(streamed_items);
if all_items.len() == NUM_FILES {
break;
}
}
}
sort_workspace_report_items(&mut all_items);
assert_debug_snapshot!(all_items);
Ok(())
}
fn sort_workspace_diagnostic_response(response: &mut WorkspaceDiagnosticReportResult) {
let items = match response {
WorkspaceDiagnosticReportResult::Report(report) => &mut report.items,
WorkspaceDiagnosticReportResult::Partial(partial) => &mut partial.items,
};
sort_workspace_report_items(items);
}
fn sort_workspace_report_items(items: &mut [WorkspaceDocumentDiagnosticReport]) {
fn item_uri(item: &WorkspaceDocumentDiagnosticReport) -> &Url {
match item {
WorkspaceDocumentDiagnosticReport::Full(full_report) => &full_report.uri,
WorkspaceDocumentDiagnosticReport::Unchanged(unchanged_report) => &unchanged_report.uri,
}
}
items.sort_unstable_by(|a, b| item_uri(a).cmp(item_uri(b)));
}

View File

@ -107,6 +107,28 @@ Report(
},
},
),
Full(
WorkspaceFullDocumentDiagnosticReport {
uri: Url {
scheme: "file",
cannot_be_a_base: false,
username: "",
password: None,
host: None,
port: None,
path: "<temp_dir>/src/fixed_error.py",
query: None,
fragment: None,
},
version: Some(
2,
),
full_document_diagnostic_report: FullDocumentDiagnosticReport {
result_id: None,
items: [],
},
},
),
Full(
WorkspaceFullDocumentDiagnosticReport {
uri: Url {
@ -332,28 +354,6 @@ Report(
},
},
),
Full(
WorkspaceFullDocumentDiagnosticReport {
uri: Url {
scheme: "file",
cannot_be_a_base: false,
username: "",
password: None,
host: None,
port: None,
path: "<temp_dir>/src/fixed_error.py",
query: None,
fragment: None,
},
version: Some(
2,
),
full_document_diagnostic_report: FullDocumentDiagnosticReport {
result_id: None,
items: [],
},
},
),
],
},
)

View File

@ -0,0 +1,307 @@
---
source: crates/ty_server/tests/e2e/pull_diagnostics.rs
expression: all_items
---
[
Full(
WorkspaceFullDocumentDiagnosticReport {
uri: Url {
scheme: "file",
cannot_be_a_base: false,
username: "",
password: None,
host: None,
port: None,
path: "<temp_dir>/src/error_0.py",
query: None,
fragment: None,
},
version: Some(
2,
),
full_document_diagnostic_report: FullDocumentDiagnosticReport {
result_id: Some(
"[RESULT_ID]",
),
items: [
Diagnostic {
range: Range {
start: Position {
line: 1,
character: 11,
},
end: Position {
line: 1,
character: 15,
},
},
severity: Some(
Error,
),
code: Some(
String(
"unresolved-reference",
),
),
code_description: Some(
CodeDescription {
href: Url {
scheme: "https",
cannot_be_a_base: false,
username: "",
password: None,
host: Some(
Domain(
"ty.dev",
),
),
port: None,
path: "/rules",
query: None,
fragment: Some(
"unresolved-reference",
),
},
},
),
source: Some(
"ty",
),
message: "Name `true` used when not defined",
related_information: Some(
[],
),
tags: None,
data: None,
},
],
},
},
),
Full(
WorkspaceFullDocumentDiagnosticReport {
uri: Url {
scheme: "file",
cannot_be_a_base: false,
username: "",
password: None,
host: None,
port: None,
path: "<temp_dir>/src/error_1.py",
query: None,
fragment: None,
},
version: Some(
2,
),
full_document_diagnostic_report: FullDocumentDiagnosticReport {
result_id: Some(
"[RESULT_ID]",
),
items: [
Diagnostic {
range: Range {
start: Position {
line: 1,
character: 11,
},
end: Position {
line: 1,
character: 15,
},
},
severity: Some(
Error,
),
code: Some(
String(
"unresolved-reference",
),
),
code_description: Some(
CodeDescription {
href: Url {
scheme: "https",
cannot_be_a_base: false,
username: "",
password: None,
host: Some(
Domain(
"ty.dev",
),
),
port: None,
path: "/rules",
query: None,
fragment: Some(
"unresolved-reference",
),
},
},
),
source: Some(
"ty",
),
message: "Name `true` used when not defined",
related_information: Some(
[],
),
tags: None,
data: None,
},
],
},
},
),
Full(
WorkspaceFullDocumentDiagnosticReport {
uri: Url {
scheme: "file",
cannot_be_a_base: false,
username: "",
password: None,
host: None,
port: None,
path: "<temp_dir>/src/error_2.py",
query: None,
fragment: None,
},
version: Some(
2,
),
full_document_diagnostic_report: FullDocumentDiagnosticReport {
result_id: Some(
"[RESULT_ID]",
),
items: [
Diagnostic {
range: Range {
start: Position {
line: 1,
character: 11,
},
end: Position {
line: 1,
character: 15,
},
},
severity: Some(
Error,
),
code: Some(
String(
"unresolved-reference",
),
),
code_description: Some(
CodeDescription {
href: Url {
scheme: "https",
cannot_be_a_base: false,
username: "",
password: None,
host: Some(
Domain(
"ty.dev",
),
),
port: None,
path: "/rules",
query: None,
fragment: Some(
"unresolved-reference",
),
},
},
),
source: Some(
"ty",
),
message: "Name `true` used when not defined",
related_information: Some(
[],
),
tags: None,
data: None,
},
],
},
},
),
Unchanged(
WorkspaceUnchangedDocumentDiagnosticReport {
uri: Url {
scheme: "file",
cannot_be_a_base: false,
username: "",
password: None,
host: None,
port: None,
path: "<temp_dir>/src/error_3.py",
query: None,
fragment: None,
},
version: None,
unchanged_document_diagnostic_report: UnchangedDocumentDiagnosticReport {
result_id: "[RESULT_ID]",
},
},
),
Unchanged(
WorkspaceUnchangedDocumentDiagnosticReport {
uri: Url {
scheme: "file",
cannot_be_a_base: false,
username: "",
password: None,
host: None,
port: None,
path: "<temp_dir>/src/error_4.py",
query: None,
fragment: None,
},
version: None,
unchanged_document_diagnostic_report: UnchangedDocumentDiagnosticReport {
result_id: "[RESULT_ID]",
},
},
),
Unchanged(
WorkspaceUnchangedDocumentDiagnosticReport {
uri: Url {
scheme: "file",
cannot_be_a_base: false,
username: "",
password: None,
host: None,
port: None,
path: "<temp_dir>/src/error_5.py",
query: None,
fragment: None,
},
version: None,
unchanged_document_diagnostic_report: UnchangedDocumentDiagnosticReport {
result_id: "[RESULT_ID]",
},
},
),
Unchanged(
WorkspaceUnchangedDocumentDiagnosticReport {
uri: Url {
scheme: "file",
cannot_be_a_base: false,
username: "",
password: None,
host: None,
port: None,
path: "<temp_dir>/src/error_6.py",
query: None,
fragment: None,
},
version: None,
unchanged_document_diagnostic_report: UnchangedDocumentDiagnosticReport {
result_id: "[RESULT_ID]",
},
},
),
]