Guard against concurrent cache writes on Windows (#11007)

## Summary

On Windows, we have a lot of issues with atomic replacement and such.
There are a bunch of different failure modes, but they generally
involve: trying to persist a fail to a path at which the file already
exists, trying to replace or remove a file while someone else is reading
it, etc.

This PR adds locks to all of the relevant database paths. We already use
these advisory locks when building source distributions; now we use them
when unzipping wheels, storing metadata, etc.

Closes #11002.

## Test Plan

I ran the following script:

```shell
# Define the cache directory path
$cacheDir = "C:\Users\crmar\workspace\uv\cache"

# Clear the cache directory if it exists
if (Test-Path $cacheDir) {
    Remove-Item -Recurse -Force $cacheDir
}

# Create the cache directory again
New-Item -ItemType Directory -Force -Path $cacheDir

# Define the command to run with --cache-dir flag
$command = {
    param ($venvPath)

    # Create a virtual environment in the specified path with --python
    uv venv $venvPath

    # Run the pip install command with --cache-dir flag
    C:\Users\crmar\workspace\uv\target\profiling\uv.exe pip install flask==1.0.4 --no-binary flask --cache-dir C:\Users\crmar\workspace\uv\cache -v --python $venvPath
}

# Define the paths for the different virtual environments
$venv1 = "C:\Users\crmar\workspace\uv\venv1"
$venv2 = "C:\Users\crmar\workspace\uv\venv2"
$venv3 = "C:\Users\crmar\workspace\uv\venv3"
$venv4 = "C:\Users\crmar\workspace\uv\venv4"
$venv5 = "C:\Users\crmar\workspace\uv\venv5"

# Start the command in parallel five times using Start-Job, each with a different venv
$job1 = Start-Job -ScriptBlock $command -ArgumentList $venv1
$job2 = Start-Job -ScriptBlock $command -ArgumentList $venv2
$job3 = Start-Job -ScriptBlock $command -ArgumentList $venv3
$job4 = Start-Job -ScriptBlock $command -ArgumentList $venv4
$job5 = Start-Job -ScriptBlock $command -ArgumentList $venv5

# Wait for all jobs to complete
$jobs = @($job1, $job2, $job3, $job4, $job5)
$jobs | ForEach-Object { Wait-Job $_ }

# Retrieve the results (optional)
$jobs | ForEach-Object { Receive-Job -Job $_ }

# Clean up the jobs
$jobs | ForEach-Object { Remove-Job -Job $_ }
```

And ensured it succeeded in five straight invocations (whereas on
`main`, it consistently fails with a variety of different traces).
This commit is contained in:
Charlie Marsh 2025-01-28 15:33:49 -05:00 committed by GitHub
parent 321f8ccf45
commit f1840c77b6
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
7 changed files with 137 additions and 71 deletions

View File

@ -12,7 +12,7 @@ use tracing::debug;
pub use archive::ArchiveId; pub use archive::ArchiveId;
use uv_cache_info::Timestamp; use uv_cache_info::Timestamp;
use uv_distribution_types::InstalledDist; use uv_distribution_types::InstalledDist;
use uv_fs::{cachedir, directories}; use uv_fs::{cachedir, directories, LockedFile};
use uv_normalize::PackageName; use uv_normalize::PackageName;
use uv_pypi_types::ResolutionMetadata; use uv_pypi_types::ResolutionMetadata;
@ -74,6 +74,12 @@ impl CacheEntry {
pub fn with_file(&self, file: impl AsRef<Path>) -> Self { pub fn with_file(&self, file: impl AsRef<Path>) -> Self {
Self(self.dir().join(file)) Self(self.dir().join(file))
} }
/// Acquire the [`CacheEntry`] as an exclusive lock.
pub async fn lock(&self) -> Result<LockedFile, io::Error> {
fs_err::create_dir_all(self.dir())?;
LockedFile::acquire(self.path(), self.path().display()).await
}
} }
impl AsRef<Path> for CacheEntry { impl AsRef<Path> for CacheEntry {
@ -97,6 +103,12 @@ impl CacheShard {
pub fn shard(&self, dir: impl AsRef<Path>) -> Self { pub fn shard(&self, dir: impl AsRef<Path>) -> Self {
Self(self.0.join(dir.as_ref())) Self(self.0.join(dir.as_ref()))
} }
/// Acquire the cache entry as an exclusive lock.
pub async fn lock(&self) -> Result<LockedFile, io::Error> {
fs_err::create_dir_all(self.as_ref())?;
LockedFile::acquire(self.join(".lock"), self.display()).await
}
} }
impl AsRef<Path> for CacheShard { impl AsRef<Path> for CacheShard {

View File

@ -336,6 +336,13 @@ impl RegistryClient {
Connectivity::Offline => CacheControl::AllowStale, Connectivity::Offline => CacheControl::AllowStale,
}; };
// Acquire an advisory lock, to guard against concurrent writes.
#[cfg(windows)]
let _lock = {
let lock_entry = cache_entry.with_file(format!("{package_name}.lock"));
lock_entry.lock().await.map_err(ErrorKind::CacheWrite)?
};
let result = if matches!(index, IndexUrl::Path(_)) { let result = if matches!(index, IndexUrl::Path(_)) {
self.fetch_local_index(package_name, &url).await self.fetch_local_index(package_name, &url).await
} else { } else {
@ -614,6 +621,13 @@ impl RegistryClient {
Connectivity::Offline => CacheControl::AllowStale, Connectivity::Offline => CacheControl::AllowStale,
}; };
// Acquire an advisory lock, to guard against concurrent writes.
#[cfg(windows)]
let _lock = {
let lock_entry = cache_entry.with_file(format!("{}.lock", filename.stem()));
lock_entry.lock().await.map_err(ErrorKind::CacheWrite)?
};
let response_callback = |response: Response| async { let response_callback = |response: Response| async {
let bytes = response let bytes = response
.bytes() .bytes()
@ -677,6 +691,13 @@ impl RegistryClient {
Connectivity::Offline => CacheControl::AllowStale, Connectivity::Offline => CacheControl::AllowStale,
}; };
// Acquire an advisory lock, to guard against concurrent writes.
#[cfg(windows)]
let _lock = {
let lock_entry = cache_entry.with_file(format!("{}.lock", filename.stem()));
lock_entry.lock().await.map_err(ErrorKind::CacheWrite)?
};
// Attempt to fetch via a range request. // Attempt to fetch via a range request.
if index.map_or(true, |index| capabilities.supports_range_requests(index)) { if index.map_or(true, |index| capabilities.supports_range_requests(index)) {
let req = self let req = self

View File

@ -356,6 +356,19 @@ impl<'a, Context: BuildContext> DistributionDatabase<'a, Context> {
.boxed_local() .boxed_local()
.await?; .await?;
// Acquire the advisory lock.
#[cfg(windows)]
let _lock = {
let lock_entry = CacheEntry::new(
built_wheel.target.parent().unwrap(),
format!(
"{}.lock",
built_wheel.target.file_name().unwrap().to_str().unwrap()
),
);
lock_entry.lock().await.map_err(Error::CacheWrite)?
};
// If the wheel was unzipped previously, respect it. Source distributions are // If the wheel was unzipped previously, respect it. Source distributions are
// cached under a unique revision ID, so unzipped directories are never stale. // cached under a unique revision ID, so unzipped directories are never stale.
match built_wheel.target.canonicalize() { match built_wheel.target.canonicalize() {
@ -515,6 +528,13 @@ impl<'a, Context: BuildContext> DistributionDatabase<'a, Context> {
dist: &BuiltDist, dist: &BuiltDist,
hashes: HashPolicy<'_>, hashes: HashPolicy<'_>,
) -> Result<Archive, Error> { ) -> Result<Archive, Error> {
// Acquire an advisory lock, to guard against concurrent writes.
#[cfg(windows)]
let _lock = {
let lock_entry = wheel_entry.with_file(format!("{}.lock", filename.stem()));
lock_entry.lock().await.map_err(Error::CacheWrite)?
};
// Create an entry for the HTTP cache. // Create an entry for the HTTP cache.
let http_entry = wheel_entry.with_file(format!("{}.http", filename.stem())); let http_entry = wheel_entry.with_file(format!("{}.http", filename.stem()));
@ -640,6 +660,13 @@ impl<'a, Context: BuildContext> DistributionDatabase<'a, Context> {
dist: &BuiltDist, dist: &BuiltDist,
hashes: HashPolicy<'_>, hashes: HashPolicy<'_>,
) -> Result<Archive, Error> { ) -> Result<Archive, Error> {
// Acquire an advisory lock, to guard against concurrent writes.
#[cfg(windows)]
let _lock = {
let lock_entry = wheel_entry.with_file(format!("{}.lock", filename.stem()));
lock_entry.lock().await.map_err(Error::CacheWrite)?
};
// Create an entry for the HTTP cache. // Create an entry for the HTTP cache.
let http_entry = wheel_entry.with_file(format!("{}.http", filename.stem())); let http_entry = wheel_entry.with_file(format!("{}.http", filename.stem()));
@ -796,6 +823,12 @@ impl<'a, Context: BuildContext> DistributionDatabase<'a, Context> {
dist: &BuiltDist, dist: &BuiltDist,
hashes: HashPolicy<'_>, hashes: HashPolicy<'_>,
) -> Result<LocalWheel, Error> { ) -> Result<LocalWheel, Error> {
#[cfg(windows)]
let _lock = {
let lock_entry = wheel_entry.with_file(format!("{}.lock", filename.stem()));
lock_entry.lock().await.map_err(Error::CacheWrite)?
};
// Determine the last-modified time of the wheel. // Determine the last-modified time of the wheel.
let modified = Timestamp::from_path(path).map_err(Error::CacheRead)?; let modified = Timestamp::from_path(path).map_err(Error::CacheRead)?;
@ -890,10 +923,11 @@ impl<'a, Context: BuildContext> DistributionDatabase<'a, Context> {
let temp_dir = tokio::task::spawn_blocking({ let temp_dir = tokio::task::spawn_blocking({
let path = path.to_owned(); let path = path.to_owned();
let root = self.build_context.cache().root().to_path_buf(); let root = self.build_context.cache().root().to_path_buf();
move || -> Result<TempDir, uv_extract::Error> { move || -> Result<TempDir, Error> {
// Unzip the wheel into a temporary directory. // Unzip the wheel into a temporary directory.
let temp_dir = tempfile::tempdir_in(root)?; let temp_dir = tempfile::tempdir_in(root).map_err(Error::CacheWrite)?;
uv_extract::unzip(fs_err::File::open(path)?, temp_dir.path())?; let reader = fs_err::File::open(path).map_err(Error::CacheWrite)?;
uv_extract::unzip(reader, temp_dir.path())?;
Ok(temp_dir) Ok(temp_dir)
} }
}) })

View File

@ -39,7 +39,7 @@ use uv_distribution_types::{
PathSourceUrl, SourceDist, SourceUrl, PathSourceUrl, SourceDist, SourceUrl,
}; };
use uv_extract::hash::Hasher; use uv_extract::hash::Hasher;
use uv_fs::{rename_with_retry, write_atomic, LockedFile}; use uv_fs::{rename_with_retry, write_atomic};
use uv_git::{GitHubRepository, GitOid}; use uv_git::{GitHubRepository, GitOid};
use uv_metadata::read_archive_metadata; use uv_metadata::read_archive_metadata;
use uv_normalize::PackageName; use uv_normalize::PackageName;
@ -392,7 +392,7 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
hashes: HashPolicy<'_>, hashes: HashPolicy<'_>,
client: &ManagedClient<'_>, client: &ManagedClient<'_>,
) -> Result<BuiltWheelMetadata, Error> { ) -> Result<BuiltWheelMetadata, Error> {
let _lock = lock_shard(cache_shard).await?; let _lock = cache_shard.lock().await.map_err(Error::CacheWrite)?;
// Fetch the revision for the source distribution. // Fetch the revision for the source distribution.
let revision = self let revision = self
@ -505,7 +505,7 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
hashes: HashPolicy<'_>, hashes: HashPolicy<'_>,
client: &ManagedClient<'_>, client: &ManagedClient<'_>,
) -> Result<ArchiveMetadata, Error> { ) -> Result<ArchiveMetadata, Error> {
let _lock = lock_shard(cache_shard).await?; let _lock = cache_shard.lock().await.map_err(Error::CacheWrite)?;
// Fetch the revision for the source distribution. // Fetch the revision for the source distribution.
let revision = self let revision = self
@ -753,7 +753,7 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
tags: &Tags, tags: &Tags,
hashes: HashPolicy<'_>, hashes: HashPolicy<'_>,
) -> Result<BuiltWheelMetadata, Error> { ) -> Result<BuiltWheelMetadata, Error> {
let _lock = lock_shard(cache_shard).await?; let _lock = cache_shard.lock().await.map_err(Error::CacheWrite)?;
// Fetch the revision for the source distribution. // Fetch the revision for the source distribution.
let LocalRevisionPointer { let LocalRevisionPointer {
@ -847,7 +847,7 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
cache_shard: &CacheShard, cache_shard: &CacheShard,
hashes: HashPolicy<'_>, hashes: HashPolicy<'_>,
) -> Result<ArchiveMetadata, Error> { ) -> Result<ArchiveMetadata, Error> {
let _lock = lock_shard(cache_shard).await?; let _lock = cache_shard.lock().await.map_err(Error::CacheWrite)?;
// Fetch the revision for the source distribution. // Fetch the revision for the source distribution.
let LocalRevisionPointer { revision, .. } = self let LocalRevisionPointer { revision, .. } = self
@ -1058,7 +1058,8 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
}, },
); );
let _lock = lock_shard(&cache_shard).await?; // Acquire the advisory lock.
let _lock = cache_shard.lock().await.map_err(Error::CacheWrite)?;
// Fetch the revision for the source distribution. // Fetch the revision for the source distribution.
let LocalRevisionPointer { let LocalRevisionPointer {
@ -1168,7 +1169,8 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
}, },
); );
let _lock = lock_shard(&cache_shard).await?; // Acquire the advisory lock.
let _lock = cache_shard.lock().await.map_err(Error::CacheWrite)?;
// Fetch the revision for the source distribution. // Fetch the revision for the source distribution.
let LocalRevisionPointer { revision, .. } = self let LocalRevisionPointer { revision, .. } = self
@ -1430,7 +1432,8 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
); );
let metadata_entry = cache_shard.entry(METADATA); let metadata_entry = cache_shard.entry(METADATA);
let _lock = lock_shard(&cache_shard).await?; // Acquire the advisory lock.
let _lock = cache_shard.lock().await.map_err(Error::CacheWrite)?;
// If there are build settings, we need to scope to a cache shard. // If there are build settings, we need to scope to a cache shard.
let config_settings = self.build_context.config_settings(); let config_settings = self.build_context.config_settings();
@ -1581,7 +1584,8 @@ impl<'a, T: BuildContext> SourceDistributionBuilder<'a, T> {
); );
let metadata_entry = cache_shard.entry(METADATA); let metadata_entry = cache_shard.entry(METADATA);
let _lock = lock_shard(&cache_shard).await?; // Acquire the advisory lock.
let _lock = cache_shard.lock().await.map_err(Error::CacheWrite)?;
let path = if let Some(subdirectory) = resource.subdirectory { let path = if let Some(subdirectory) = resource.subdirectory {
Cow::Owned(fetch.path().join(subdirectory)) Cow::Owned(fetch.path().join(subdirectory))
@ -2882,16 +2886,3 @@ fn read_wheel_metadata(
.map_err(|err| Error::WheelMetadata(wheel.to_path_buf(), Box::new(err)))?; .map_err(|err| Error::WheelMetadata(wheel.to_path_buf(), Box::new(err)))?;
Ok(ResolutionMetadata::parse_metadata(&dist_info)?) Ok(ResolutionMetadata::parse_metadata(&dist_info)?)
} }
/// Apply an advisory lock to a [`CacheShard`] to prevent concurrent builds.
async fn lock_shard(cache_shard: &CacheShard) -> Result<LockedFile, Error> {
let root = cache_shard.as_ref();
fs_err::create_dir_all(root).map_err(Error::CacheWrite)?;
let lock = LockedFile::acquire(root.join(".lock"), root.display())
.await
.map_err(Error::CacheWrite)?;
Ok(lock)
}

View File

@ -45,8 +45,11 @@ pub async fn read_to_string_transcode(path: impl AsRef<Path>) -> std::io::Result
/// Create a symlink at `dst` pointing to `src`, replacing any existing symlink. /// Create a symlink at `dst` pointing to `src`, replacing any existing symlink.
/// ///
/// On Windows, this uses the `junction` crate to create a junction point. /// On Windows, this uses the `junction` crate to create a junction point. The
/// Note because junctions are used, the source must be a directory. /// operation is _not_ atomic, as we first delete the junction, then create a
/// junction at the same path.
///
/// Note that because junctions are used, the source must be a directory.
#[cfg(windows)] #[cfg(windows)]
pub fn replace_symlink(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> std::io::Result<()> { pub fn replace_symlink(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> std::io::Result<()> {
// If the source is a file, we can't create a junction // If the source is a file, we can't create a junction
@ -79,6 +82,10 @@ pub fn replace_symlink(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> std::io:
} }
/// Create a symlink at `dst` pointing to `src`, replacing any existing symlink if necessary. /// Create a symlink at `dst` pointing to `src`, replacing any existing symlink if necessary.
///
/// On Unix, this method creates a temporary file, then moves it into place.
///
/// TODO(charlie): Consider using the `rust-atomicwrites` crate.
#[cfg(unix)] #[cfg(unix)]
pub fn replace_symlink(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> std::io::Result<()> { pub fn replace_symlink(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> std::io::Result<()> {
// Attempt to create the symlink directly. // Attempt to create the symlink directly.

View File

@ -63,11 +63,13 @@ fn clean_package_pypi() -> Result<()> {
.filters() .filters()
.into_iter() .into_iter()
.chain([ .chain([
// The cache entry does not have a stable key, so we filter it out // The cache entry does not have a stable key, so we filter it out.
( (
r"\[CACHE_DIR\](\\|\/)(.+)(\\|\/).*", r"\[CACHE_DIR\](\\|\/)(.+)(\\|\/).*",
"[CACHE_DIR]/$2/[ENTRY]", "[CACHE_DIR]/$2/[ENTRY]",
), ),
// The file count varies by operating system, so we filter it out.
("Removed \\d+ files?", "Removed [N] files"),
]) ])
.collect(); .collect();
@ -79,7 +81,7 @@ fn clean_package_pypi() -> Result<()> {
----- stderr ----- ----- stderr -----
DEBUG uv [VERSION] ([COMMIT] DATE) DEBUG uv [VERSION] ([COMMIT] DATE)
DEBUG Removing dangling cache entry: [CACHE_DIR]/archive-v0/[ENTRY] DEBUG Removing dangling cache entry: [CACHE_DIR]/archive-v0/[ENTRY]
Removed 12 files ([SIZE]) Removed [N] files ([SIZE])
"###); "###);
// Assert that the `.rkyv` file is removed for `iniconfig`. // Assert that the `.rkyv` file is removed for `iniconfig`.
@ -136,11 +138,13 @@ fn clean_package_index() -> Result<()> {
.filters() .filters()
.into_iter() .into_iter()
.chain([ .chain([
// The cache entry does not have a stable key, so we filter it out // The cache entry does not have a stable key, so we filter it out.
( (
r"\[CACHE_DIR\](\\|\/)(.+)(\\|\/).*", r"\[CACHE_DIR\](\\|\/)(.+)(\\|\/).*",
"[CACHE_DIR]/$2/[ENTRY]", "[CACHE_DIR]/$2/[ENTRY]",
), ),
// The file count varies by operating system, so we filter it out.
("Removed \\d+ files?", "Removed [N] files"),
]) ])
.collect(); .collect();
@ -152,7 +156,7 @@ fn clean_package_index() -> Result<()> {
----- stderr ----- ----- stderr -----
DEBUG uv [VERSION] ([COMMIT] DATE) DEBUG uv [VERSION] ([COMMIT] DATE)
DEBUG Removing dangling cache entry: [CACHE_DIR]/archive-v0/[ENTRY] DEBUG Removing dangling cache entry: [CACHE_DIR]/archive-v0/[ENTRY]
Removed 12 files ([SIZE]) Removed [N] files ([SIZE])
"###); "###);
// Assert that the `.rkyv` file is removed for `iniconfig`. // Assert that the `.rkyv` file is removed for `iniconfig`.

View File

@ -1418,14 +1418,18 @@ fn install_url_source_dist_cached() -> Result<()> {
// Clear the cache, then re-run the installation in a new virtual environment. // Clear the cache, then re-run the installation in a new virtual environment.
context.reset_venv(); context.reset_venv();
uv_snapshot!(context.clean() let filters = std::iter::once(("Removed \\d+ files?", "Removed [N] files"))
.arg("source_distribution"), @r###" .chain(context.filters())
.collect::<Vec<_>>();
uv_snapshot!(
filters,
context.clean().arg("source_distribution"), @r###"
success: true success: true
exit_code: 0 exit_code: 0
----- stdout ----- ----- stdout -----
----- stderr ----- ----- stderr -----
Removed 19 files ([SIZE]) Removed [N] files ([SIZE])
"### "###
); );
@ -1600,19 +1604,9 @@ fn install_registry_source_dist_cached() -> Result<()> {
// Clear the cache, then re-run the installation in a new virtual environment. // Clear the cache, then re-run the installation in a new virtual environment.
context.reset_venv(); context.reset_venv();
let filters: Vec<(&str, &str)> = if cfg!(windows) { let filters = std::iter::once(("Removed \\d+ files?", "Removed [N] files"))
// On Windows, the number of files removed is different. .chain(context.filters())
[("Removed 13 files", "Removed 14 files")] .collect::<Vec<_>>();
.into_iter()
.chain(context.filters())
.collect()
} else {
// For some Linux distributions, like Gentoo, the number of files removed is different.
[("Removed 12 files", "Removed 14 files")]
.into_iter()
.chain(context.filters())
.collect()
};
uv_snapshot!(filters, context.clean() uv_snapshot!(filters, context.clean()
.arg("source_distribution"), @r###" .arg("source_distribution"), @r###"
success: true success: true
@ -1620,7 +1614,7 @@ fn install_registry_source_dist_cached() -> Result<()> {
----- stdout ----- ----- stdout -----
----- stderr ----- ----- stderr -----
Removed 20 files ([SIZE]) Removed [N] files ([SIZE])
"### "###
); );
@ -1710,14 +1704,18 @@ fn install_path_source_dist_cached() -> Result<()> {
// Clear the cache, then re-run the installation in a new virtual environment. // Clear the cache, then re-run the installation in a new virtual environment.
context.reset_venv(); context.reset_venv();
uv_snapshot!(context.clean() let filters = std::iter::once(("Removed \\d+ files?", "Removed [N] files"))
.arg("source-distribution"), @r###" .chain(context.filters())
.collect::<Vec<_>>();
uv_snapshot!(
filters,
context.clean().arg("source-distribution"), @r###"
success: true success: true
exit_code: 0 exit_code: 0
----- stdout ----- ----- stdout -----
----- stderr ----- ----- stderr -----
Removed 19 files ([SIZE]) Removed [N] files ([SIZE])
"### "###
); );
@ -1800,23 +1798,18 @@ fn install_path_built_dist_cached() -> Result<()> {
// Clear the cache, then re-run the installation in a new virtual environment. // Clear the cache, then re-run the installation in a new virtual environment.
context.reset_venv(); context.reset_venv();
let filters = if cfg!(windows) { let filters = std::iter::once(("Removed \\d+ files?", "Removed [N] files"))
// We do not display sizes on Windows .chain(context.filters())
[("Removed 1 file", "Removed 1 file ([SIZE])")] .collect::<Vec<_>>();
.into_iter() uv_snapshot!(
.chain(context.filters()) filters,
.collect() context.clean().arg("tomli"), @r###"
} else {
context.filters()
};
uv_snapshot!(filters, context.clean()
.arg("tomli"), @r###"
success: true success: true
exit_code: 0 exit_code: 0
----- stdout ----- ----- stdout -----
----- stderr ----- ----- stderr -----
Removed 11 files ([SIZE]) Removed [N] files ([SIZE])
"### "###
); );
@ -1849,7 +1842,7 @@ fn install_url_built_dist_cached() -> Result<()> {
let requirements_txt = context.temp_dir.child("requirements.txt"); let requirements_txt = context.temp_dir.child("requirements.txt");
requirements_txt.write_str("tqdm @ https://files.pythonhosted.org/packages/00/e5/f12a80907d0884e6dff9c16d0c0114d81b8cd07dc3ae54c5e962cc83037e/tqdm-4.66.1-py3-none-any.whl")?; requirements_txt.write_str("tqdm @ https://files.pythonhosted.org/packages/00/e5/f12a80907d0884e6dff9c16d0c0114d81b8cd07dc3ae54c5e962cc83037e/tqdm-4.66.1-py3-none-any.whl")?;
let filters = if cfg!(windows) { let context_filters = if cfg!(windows) {
[("warning: The package `tqdm` requires `colorama ; sys_platform == 'win32'`, but it's not installed\n", "")] [("warning: The package `tqdm` requires `colorama ; sys_platform == 'win32'`, but it's not installed\n", "")]
.into_iter() .into_iter()
.chain(context.filters()) .chain(context.filters())
@ -1857,7 +1850,7 @@ fn install_url_built_dist_cached() -> Result<()> {
} else { } else {
context.filters() context.filters()
}; };
uv_snapshot!(filters, context.pip_sync() uv_snapshot!(context_filters, context.pip_sync()
.arg("requirements.txt") .arg("requirements.txt")
.arg("--strict"), @r###" .arg("--strict"), @r###"
success: true success: true
@ -1877,7 +1870,7 @@ fn install_url_built_dist_cached() -> Result<()> {
// Re-run the installation in a new virtual environment. // Re-run the installation in a new virtual environment.
context.reset_venv(); context.reset_venv();
uv_snapshot!(filters, context.pip_sync() uv_snapshot!(context_filters, context.pip_sync()
.arg("requirements.txt") .arg("requirements.txt")
.arg("--strict") .arg("--strict")
, @r###" , @r###"
@ -1897,18 +1890,22 @@ fn install_url_built_dist_cached() -> Result<()> {
// Clear the cache, then re-run the installation in a new virtual environment. // Clear the cache, then re-run the installation in a new virtual environment.
context.reset_venv(); context.reset_venv();
uv_snapshot!(context.clean() let filters = std::iter::once(("Removed \\d+ files?", "Removed [N] files"))
.arg("tqdm"), @r###" .chain(context_filters.clone())
.collect::<Vec<_>>();
uv_snapshot!(
filters,
context.clean().arg("tqdm"), @r###"
success: true success: true
exit_code: 0 exit_code: 0
----- stdout ----- ----- stdout -----
----- stderr ----- ----- stderr -----
Removed 43 files ([SIZE]) Removed [N] files ([SIZE])
"### "###
); );
uv_snapshot!(filters, context.pip_sync() uv_snapshot!(context_filters, context.pip_sync()
.arg("requirements.txt") .arg("requirements.txt")
.arg("--strict") .arg("--strict")
, @r###" , @r###"