Automatically infer the PyTorch index via `--torch-backend=auto` (#12070)

## Summary

This is a prototype that I'm considering shipping under `--preview`,
based on [`light-the-torch`](https://github.com/pmeier/light-the-torch).

`light-the-torch` patches pip to pull PyTorch packages from the PyTorch
indexes automatically. And, in particular, `light-the-torch` will query
the installed CUDA drivers to determine which indexes are compatible
with your system.

This PR implements equivalent behavior under `--torch-backend auto`,
though you can also set `--torch-backend cpu`, etc. for convenience.
When enabled, the registry client will fetch from the appropriate
PyTorch index when it sees a package from the PyTorch ecosystem (and
ignore any other configured indexes, _unless_ the package is explicitly
pinned to a different index).

Right now, this is only implemented in the `uv pip` CLI, since it
doesn't quite fit into the lockfile APIs given that it relies on feature
detection on the currently-running machine.

## Test Plan

On macOS, you can test this with (e.g.):

```shell
UV_TORCH_BACKEND=auto UV_CUDA_DRIVER_VERSION=450.80.2 cargo run \
  pip install torch --python-platform linux --python-version 3.12
```

On a GPU-enabled EC2 machine:

```shell
ubuntu@ip-172-31-47-149:~/uv$ UV_TORCH_BACKEND=auto cargo run pip install torch -v
    Finished `dev` profile [unoptimized + debuginfo] target(s) in 0.31s
     Running `target/debug/uv pip install torch -v`
DEBUG uv 0.6.6 (e95ca063b 2025-03-14)
DEBUG Searching for default Python interpreter in virtual environments
DEBUG Found `cpython-3.13.0-linux-x86_64-gnu` at `/home/ubuntu/uv/.venv/bin/python3` (virtual environment)
DEBUG Using Python 3.13.0 environment at: .venv
DEBUG Acquired lock for `.venv`
DEBUG At least one requirement is not satisfied: torch
warning: The `--torch-backend` setting is experimental and may change without warning. Pass `--preview` to disable this warning.
DEBUG Detected CUDA driver version from `/sys/module/nvidia/version`: 550.144.3
...
```
This commit is contained in:
Charlie Marsh 2025-03-19 07:37:08 -07:00 committed by GitHub
parent e40c551b80
commit 5173b59b50
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
31 changed files with 1289 additions and 29 deletions

22
Cargo.lock generated
View File

@ -4625,6 +4625,7 @@ dependencies = [
"uv-shell", "uv-shell",
"uv-static", "uv-static",
"uv-tool", "uv-tool",
"uv-torch",
"uv-trampoline-builder", "uv-trampoline-builder",
"uv-types", "uv-types",
"uv-version", "uv-version",
@ -4844,6 +4845,7 @@ dependencies = [
"uv-resolver", "uv-resolver",
"uv-settings", "uv-settings",
"uv-static", "uv-static",
"uv-torch",
"uv-version", "uv-version",
"uv-warnings", "uv-warnings",
] ]
@ -4897,6 +4899,7 @@ dependencies = [
"uv-pypi-types", "uv-pypi-types",
"uv-small-str", "uv-small-str",
"uv-static", "uv-static",
"uv-torch",
"uv-version", "uv-version",
"uv-warnings", "uv-warnings",
] ]
@ -5700,6 +5703,7 @@ dependencies = [
"uv-python", "uv-python",
"uv-resolver", "uv-resolver",
"uv-static", "uv-static",
"uv-torch",
"uv-warnings", "uv-warnings",
] ]
@ -5771,6 +5775,24 @@ dependencies = [
"uv-virtualenv", "uv-virtualenv",
] ]
[[package]]
name = "uv-torch"
version = "0.1.0"
dependencies = [
"clap",
"either",
"fs-err 3.1.0",
"schemars",
"serde",
"thiserror 2.0.12",
"tracing",
"uv-distribution-types",
"uv-normalize",
"uv-pep440",
"uv-platform-tags",
"uv-static",
]
[[package]] [[package]]
name = "uv-trampoline-builder" name = "uv-trampoline-builder"
version = "0.0.1" version = "0.0.1"

View File

@ -63,6 +63,7 @@ uv-small-str = { path = "crates/uv-small-str" }
uv-state = { path = "crates/uv-state" } uv-state = { path = "crates/uv-state" }
uv-static = { path = "crates/uv-static" } uv-static = { path = "crates/uv-static" }
uv-tool = { path = "crates/uv-tool" } uv-tool = { path = "crates/uv-tool" }
uv-torch = { path = "crates/uv-torch" }
uv-trampoline-builder = { path = "crates/uv-trampoline-builder" } uv-trampoline-builder = { path = "crates/uv-trampoline-builder" }
uv-types = { path = "crates/uv-types" } uv-types = { path = "crates/uv-types" }
uv-version = { path = "crates/uv-version" } uv-version = { path = "crates/uv-version" }

View File

@ -5,6 +5,7 @@ doc-valid-idents = [
"CPython", "CPython",
"GraalPy", "GraalPy",
"ReFS", "ReFS",
"PyTorch",
".." # Include the defaults ".." # Include the defaults
] ]

View File

@ -28,6 +28,7 @@ uv-python = { workspace = true, features = ["clap", "schemars"]}
uv-resolver = { workspace = true, features = ["clap"] } uv-resolver = { workspace = true, features = ["clap"] }
uv-settings = { workspace = true, features = ["schemars"] } uv-settings = { workspace = true, features = ["schemars"] }
uv-static = { workspace = true } uv-static = { workspace = true }
uv-torch = { workspace = true, features = ["clap"] }
uv-version = { workspace = true } uv-version = { workspace = true }
uv-warnings = { workspace = true } uv-warnings = { workspace = true }

View File

@ -21,6 +21,7 @@ use uv_pypi_types::VerbatimParsedUrl;
use uv_python::{PythonDownloads, PythonPreference, PythonVersion}; use uv_python::{PythonDownloads, PythonPreference, PythonVersion};
use uv_resolver::{AnnotationStyle, ExcludeNewer, ForkStrategy, PrereleaseMode, ResolutionMode}; use uv_resolver::{AnnotationStyle, ExcludeNewer, ForkStrategy, PrereleaseMode, ResolutionMode};
use uv_static::EnvVars; use uv_static::EnvVars;
use uv_torch::TorchMode;
pub mod comma; pub mod comma;
pub mod compat; pub mod compat;
@ -1290,6 +1291,21 @@ pub struct PipCompileArgs {
#[arg(long, overrides_with("emit_index_annotation"), hide = true)] #[arg(long, overrides_with("emit_index_annotation"), hide = true)]
pub no_emit_index_annotation: bool, pub no_emit_index_annotation: bool,
/// The backend to use when fetching packages in the PyTorch ecosystem (e.g., `cpu`, `cu126`, or `auto`).
///
/// When set, uv will ignore the configured index URLs for packages in the PyTorch ecosystem,
/// and will instead use the defined backend.
///
/// For example, when set to `cpu`, uv will use the CPU-only PyTorch index; when set to `cu126`,
/// uv will use the PyTorch index for CUDA 12.6.
///
/// The `auto` mode will attempt to detect the appropriate PyTorch index based on the currently
/// installed CUDA drivers.
///
/// This option is in preview and may change in any future release.
#[arg(long, value_enum, env = EnvVars::UV_TORCH_BACKEND)]
pub torch_backend: Option<TorchMode>,
#[command(flatten)] #[command(flatten)]
pub compat_args: compat::PipCompileCompatArgs, pub compat_args: compat::PipCompileCompatArgs,
} }
@ -1531,6 +1547,21 @@ pub struct PipSyncArgs {
#[arg(long)] #[arg(long)]
pub dry_run: bool, pub dry_run: bool,
/// The backend to use when fetching packages in the PyTorch ecosystem (e.g., `cpu`, `cu126`, or `auto`).
///
/// When set, uv will ignore the configured index URLs for packages in the PyTorch ecosystem,
/// and will instead use the defined backend.
///
/// For example, when set to `cpu`, uv will use the CPU-only PyTorch index; when set to `cu126`,
/// uv will use the PyTorch index for CUDA 12.6.
///
/// The `auto` mode will attempt to detect the appropriate PyTorch index based on the currently
/// installed CUDA drivers.
///
/// This option is in preview and may change in any future release.
#[arg(long, value_enum, env = EnvVars::UV_TORCH_BACKEND)]
pub torch_backend: Option<TorchMode>,
#[command(flatten)] #[command(flatten)]
pub compat_args: compat::PipSyncCompatArgs, pub compat_args: compat::PipSyncCompatArgs,
} }
@ -1831,6 +1862,21 @@ pub struct PipInstallArgs {
#[arg(long)] #[arg(long)]
pub dry_run: bool, pub dry_run: bool,
/// The backend to use when fetching packages in the PyTorch ecosystem (e.g., `cpu`, `cu126`, or `auto`)
///
/// When set, uv will ignore the configured index URLs for packages in the PyTorch ecosystem,
/// and will instead use the defined backend.
///
/// For example, when set to `cpu`, uv will use the CPU-only PyTorch index; when set to `cu126`,
/// uv will use the PyTorch index for CUDA 12.6.
///
/// The `auto` mode will attempt to detect the appropriate PyTorch index based on the currently
/// installed CUDA drivers.
///
/// This option is in preview and may change in any future release.
#[arg(long, value_enum, env = EnvVars::UV_TORCH_BACKEND)]
pub torch_backend: Option<TorchMode>,
#[command(flatten)] #[command(flatten)]
pub compat_args: compat::PipInstallCompatArgs, pub compat_args: compat::PipInstallCompatArgs,
} }

View File

@ -25,6 +25,7 @@ uv-platform-tags = { workspace = true }
uv-pypi-types = { workspace = true } uv-pypi-types = { workspace = true }
uv-small-str = { workspace = true } uv-small-str = { workspace = true }
uv-static = { workspace = true } uv-static = { workspace = true }
uv-torch = { workspace = true }
uv-version = { workspace = true } uv-version = { workspace = true }
uv-warnings = { workspace = true } uv-warnings = { workspace = true }

View File

@ -15,12 +15,6 @@ use tracing::{info_span, instrument, trace, warn, Instrument};
use url::Url; use url::Url;
use uv_auth::UrlAuthPolicies; use uv_auth::UrlAuthPolicies;
use crate::base_client::{BaseClientBuilder, ExtraMiddleware};
use crate::cached_client::CacheControl;
use crate::html::SimpleHtml;
use crate::remote_metadata::wheel_metadata_from_remote_zip;
use crate::rkyvutil::OwnedArchive;
use crate::{BaseClient, CachedClient, CachedClientError, Error, ErrorKind};
use uv_cache::{Cache, CacheBucket, CacheEntry, WheelCache}; use uv_cache::{Cache, CacheBucket, CacheEntry, WheelCache};
use uv_configuration::KeyringProviderType; use uv_configuration::KeyringProviderType;
use uv_configuration::{IndexStrategy, TrustedHost}; use uv_configuration::{IndexStrategy, TrustedHost};
@ -35,12 +29,21 @@ use uv_pep508::MarkerEnvironment;
use uv_platform_tags::Platform; use uv_platform_tags::Platform;
use uv_pypi_types::{ResolutionMetadata, SimpleJson}; use uv_pypi_types::{ResolutionMetadata, SimpleJson};
use uv_small_str::SmallString; use uv_small_str::SmallString;
use uv_torch::TorchStrategy;
use crate::base_client::{BaseClientBuilder, ExtraMiddleware};
use crate::cached_client::CacheControl;
use crate::html::SimpleHtml;
use crate::remote_metadata::wheel_metadata_from_remote_zip;
use crate::rkyvutil::OwnedArchive;
use crate::{BaseClient, CachedClient, CachedClientError, Error, ErrorKind};
/// A builder for an [`RegistryClient`]. /// A builder for an [`RegistryClient`].
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct RegistryClientBuilder<'a> { pub struct RegistryClientBuilder<'a> {
index_urls: IndexUrls, index_urls: IndexUrls,
index_strategy: IndexStrategy, index_strategy: IndexStrategy,
torch_backend: Option<TorchStrategy>,
cache: Cache, cache: Cache,
base_client_builder: BaseClientBuilder<'a>, base_client_builder: BaseClientBuilder<'a>,
} }
@ -50,6 +53,7 @@ impl RegistryClientBuilder<'_> {
Self { Self {
index_urls: IndexUrls::default(), index_urls: IndexUrls::default(),
index_strategy: IndexStrategy::default(), index_strategy: IndexStrategy::default(),
torch_backend: None,
cache, cache,
base_client_builder: BaseClientBuilder::new(), base_client_builder: BaseClientBuilder::new(),
} }
@ -69,6 +73,12 @@ impl<'a> RegistryClientBuilder<'a> {
self self
} }
#[must_use]
pub fn torch_backend(mut self, torch_backend: Option<TorchStrategy>) -> Self {
self.torch_backend = torch_backend;
self
}
#[must_use] #[must_use]
pub fn keyring(mut self, keyring_type: KeyringProviderType) -> Self { pub fn keyring(mut self, keyring_type: KeyringProviderType) -> Self {
self.base_client_builder = self.base_client_builder.keyring(keyring_type); self.base_client_builder = self.base_client_builder.keyring(keyring_type);
@ -154,6 +164,7 @@ impl<'a> RegistryClientBuilder<'a> {
RegistryClient { RegistryClient {
index_urls: self.index_urls, index_urls: self.index_urls,
index_strategy: self.index_strategy, index_strategy: self.index_strategy,
torch_backend: self.torch_backend,
cache: self.cache, cache: self.cache,
connectivity, connectivity,
client, client,
@ -175,6 +186,7 @@ impl<'a> RegistryClientBuilder<'a> {
RegistryClient { RegistryClient {
index_urls: self.index_urls, index_urls: self.index_urls,
index_strategy: self.index_strategy, index_strategy: self.index_strategy,
torch_backend: self.torch_backend,
cache: self.cache, cache: self.cache,
connectivity, connectivity,
client, client,
@ -190,6 +202,7 @@ impl<'a> TryFrom<BaseClientBuilder<'a>> for RegistryClientBuilder<'a> {
Ok(Self { Ok(Self {
index_urls: IndexUrls::default(), index_urls: IndexUrls::default(),
index_strategy: IndexStrategy::default(), index_strategy: IndexStrategy::default(),
torch_backend: None,
cache: Cache::temp()?, cache: Cache::temp()?,
base_client_builder: value, base_client_builder: value,
}) })
@ -203,6 +216,8 @@ pub struct RegistryClient {
index_urls: IndexUrls, index_urls: IndexUrls,
/// The strategy to use when fetching across multiple indexes. /// The strategy to use when fetching across multiple indexes.
index_strategy: IndexStrategy, index_strategy: IndexStrategy,
/// The strategy to use when selecting a PyTorch backend, if any.
torch_backend: Option<TorchStrategy>,
/// The underlying HTTP client. /// The underlying HTTP client.
client: CachedClient, client: CachedClient,
/// Used for the remote wheel METADATA cache. /// Used for the remote wheel METADATA cache.
@ -239,6 +254,15 @@ impl RegistryClient {
self.timeout self.timeout
} }
/// Return the appropriate index URLs for the given [`PackageName`].
fn index_urls_for(&self, package_name: &PackageName) -> impl Iterator<Item = &IndexUrl> {
self.torch_backend
.as_ref()
.and_then(|torch_backend| torch_backend.index_urls(package_name))
.map(Either::Left)
.unwrap_or_else(|| Either::Right(self.index_urls.indexes().map(Index::url)))
}
/// Fetch a package from the `PyPI` simple API. /// Fetch a package from the `PyPI` simple API.
/// ///
/// "simple" here refers to [PEP 503 Simple Repository API](https://peps.python.org/pep-0503/) /// "simple" here refers to [PEP 503 Simple Repository API](https://peps.python.org/pep-0503/)
@ -252,23 +276,24 @@ impl RegistryClient {
capabilities: &IndexCapabilities, capabilities: &IndexCapabilities,
download_concurrency: &Semaphore, download_concurrency: &Semaphore,
) -> Result<Vec<(&'index IndexUrl, OwnedArchive<SimpleMetadata>)>, Error> { ) -> Result<Vec<(&'index IndexUrl, OwnedArchive<SimpleMetadata>)>, Error> {
// If `--no-index` is specified, avoid fetching regardless of whether the index is implicit,
// explicit, etc.
if self.index_urls.no_index() {
return Err(ErrorKind::NoIndex(package_name.to_string()).into());
}
let indexes = if let Some(index) = index { let indexes = if let Some(index) = index {
Either::Left(std::iter::once(index)) Either::Left(std::iter::once(index))
} else { } else {
Either::Right(self.index_urls.indexes().map(Index::url)) Either::Right(self.index_urls_for(package_name))
}; };
let mut it = indexes.peekable();
if it.peek().is_none() {
return Err(ErrorKind::NoIndex(package_name.to_string()).into());
}
let mut results = Vec::new(); let mut results = Vec::new();
match self.index_strategy { match self.index_strategy {
// If we're searching for the first index that contains the package, fetch serially. // If we're searching for the first index that contains the package, fetch serially.
IndexStrategy::FirstIndex => { IndexStrategy::FirstIndex => {
for index in it { for index in indexes {
let _permit = download_concurrency.acquire().await; let _permit = download_concurrency.acquire().await;
if let Some(metadata) = self if let Some(metadata) = self
.simple_single_index(package_name, index, capabilities) .simple_single_index(package_name, index, capabilities)
@ -282,7 +307,7 @@ impl RegistryClient {
// Otherwise, fetch concurrently. // Otherwise, fetch concurrently.
IndexStrategy::UnsafeBestMatch | IndexStrategy::UnsafeFirstMatch => { IndexStrategy::UnsafeBestMatch | IndexStrategy::UnsafeFirstMatch => {
results = futures::stream::iter(it) results = futures::stream::iter(indexes)
.map(|index| async move { .map(|index| async move {
let _permit = download_concurrency.acquire().await; let _permit = download_concurrency.acquire().await;
let metadata = self let metadata = self

View File

@ -528,6 +528,11 @@ impl<'a> IndexUrls {
) )
} }
} }
/// Return the `--no-index` flag.
pub fn no_index(&self) -> bool {
self.no_index
}
} }
bitflags::bitflags! { bitflags::bitflags! {

View File

@ -1332,7 +1332,7 @@ impl<InstalledPackages: InstalledPackagesProvider> ResolverState<InstalledPackag
/// apply it in two cases: /// apply it in two cases:
/// ///
/// 1. Local versions, where the non-local version has greater platform coverage. The intent is /// 1. Local versions, where the non-local version has greater platform coverage. The intent is
/// such that, if we're resolving `PyTorch`, and we choose `torch==2.5.2+cpu`, we want to /// such that, if we're resolving PyTorch, and we choose `torch==2.5.2+cpu`, we want to
/// fork so that we can select `torch==2.5.2` on macOS (since the `+cpu` variant doesn't /// fork so that we can select `torch==2.5.2` on macOS (since the `+cpu` variant doesn't
/// include any macOS wheels). /// include any macOS wheels).
/// 2. Platforms that the user explicitly marks as "required" (opt-in). For example, the user /// 2. Platforms that the user explicitly marks as "required" (opt-in). For example, the user

View File

@ -17,19 +17,20 @@ workspace = true
[dependencies] [dependencies]
uv-cache-info = { workspace = true, features = ["schemars"] } uv-cache-info = { workspace = true, features = ["schemars"] }
uv-configuration = { workspace = true, features = ["schemars", "clap"] } uv-configuration = { workspace = true, features = ["schemars"] }
uv-dirs = { workspace = true } uv-dirs = { workspace = true }
uv-distribution-types = { workspace = true, features = ["schemars"] } uv-distribution-types = { workspace = true, features = ["schemars"] }
uv-fs = { workspace = true } uv-fs = { workspace = true }
uv-install-wheel = { workspace = true, features = ["schemars", "clap"] } uv-install-wheel = { workspace = true, features = ["schemars"] }
uv-macros = { workspace = true } uv-macros = { workspace = true }
uv-normalize = { workspace = true, features = ["schemars"] } uv-normalize = { workspace = true, features = ["schemars"] }
uv-options-metadata = { workspace = true } uv-options-metadata = { workspace = true }
uv-pep508 = { workspace = true } uv-pep508 = { workspace = true }
uv-pypi-types = { workspace = true } uv-pypi-types = { workspace = true }
uv-python = { workspace = true, features = ["schemars", "clap"] } uv-python = { workspace = true, features = ["schemars"] }
uv-resolver = { workspace = true, features = ["schemars", "clap"] } uv-resolver = { workspace = true, features = ["schemars"] }
uv-static = { workspace = true } uv-static = { workspace = true }
uv-torch = { workspace = true, features = ["schemars"] }
uv-warnings = { workspace = true } uv-warnings = { workspace = true }
clap = { workspace = true } clap = { workspace = true }

View File

@ -12,6 +12,7 @@ use uv_install_wheel::LinkMode;
use uv_pypi_types::{SchemaConflicts, SupportedEnvironments}; use uv_pypi_types::{SchemaConflicts, SupportedEnvironments};
use uv_python::{PythonDownloads, PythonPreference, PythonVersion}; use uv_python::{PythonDownloads, PythonPreference, PythonVersion};
use uv_resolver::{AnnotationStyle, ExcludeNewer, ForkStrategy, PrereleaseMode, ResolutionMode}; use uv_resolver::{AnnotationStyle, ExcludeNewer, ForkStrategy, PrereleaseMode, ResolutionMode};
use uv_torch::TorchMode;
use crate::{FilesystemOptions, Options, PipOptions}; use crate::{FilesystemOptions, Options, PipOptions};
@ -95,6 +96,7 @@ impl_combine_or!(SchemaConflicts);
impl_combine_or!(String); impl_combine_or!(String);
impl_combine_or!(SupportedEnvironments); impl_combine_or!(SupportedEnvironments);
impl_combine_or!(TargetTriple); impl_combine_or!(TargetTriple);
impl_combine_or!(TorchMode);
impl_combine_or!(TrustedPublishing); impl_combine_or!(TrustedPublishing);
impl_combine_or!(Url); impl_combine_or!(Url);
impl_combine_or!(bool); impl_combine_or!(bool);

View File

@ -19,6 +19,7 @@ use uv_pypi_types::{SupportedEnvironments, VerbatimParsedUrl};
use uv_python::{PythonDownloads, PythonPreference, PythonVersion}; use uv_python::{PythonDownloads, PythonPreference, PythonVersion};
use uv_resolver::{AnnotationStyle, ExcludeNewer, ForkStrategy, PrereleaseMode, ResolutionMode}; use uv_resolver::{AnnotationStyle, ExcludeNewer, ForkStrategy, PrereleaseMode, ResolutionMode};
use uv_static::EnvVars; use uv_static::EnvVars;
use uv_torch::TorchMode;
/// A `pyproject.toml` with an (optional) `[tool.uv]` section. /// A `pyproject.toml` with an (optional) `[tool.uv]` section.
#[allow(dead_code)] #[allow(dead_code)]
@ -1543,6 +1544,26 @@ pub struct PipOptions {
"# "#
)] )]
pub reinstall_package: Option<Vec<PackageName>>, pub reinstall_package: Option<Vec<PackageName>>,
/// The backend to use when fetching packages in the PyTorch ecosystem.
///
/// When set, uv will ignore the configured index URLs for packages in the PyTorch ecosystem,
/// and will instead use the defined backend.
///
/// For example, when set to `cpu`, uv will use the CPU-only PyTorch index; when set to `cu126`,
/// uv will use the PyTorch index for CUDA 12.6.
///
/// The `auto` mode will attempt to detect the appropriate PyTorch index based on the currently
/// installed CUDA drivers.
///
/// This option is in preview and may change in any future release.
#[option(
default = "null",
value_type = "str",
example = r#"
torch-backend = "auto"
"#
)]
pub torch_backend: Option<TorchMode>,
} }
impl PipOptions { impl PipOptions {

View File

@ -678,4 +678,11 @@ impl EnvVars {
/// ///
/// This is a quasi-standard variable, described, e.g., in `ncurses(3x)`. /// This is a quasi-standard variable, described, e.g., in `ncurses(3x)`.
pub const COLUMNS: &'static str = "COLUMNS"; pub const COLUMNS: &'static str = "COLUMNS";
/// The CUDA driver version to assume when inferring the PyTorch backend.
#[attr_hidden]
pub const UV_CUDA_DRIVER_VERSION: &'static str = "UV_CUDA_DRIVER_VERSION";
/// Equivalent to the `--torch-backend` command-line argument (e.g., `cpu`, `cu126`, or `auto`).
pub const UV_TORCH_BACKEND: &'static str = "UV_TORCH_BACKEND";
} }

View File

@ -0,0 +1,28 @@
[package]
name = "uv-torch"
version = "0.1.0"
edition.workspace = true
rust-version.workspace = true
homepage.workspace = true
documentation.workspace = true
repository.workspace = true
authors.workspace = true
license.workspace = true
[dependencies]
uv-distribution-types = { workspace = true }
uv-normalize = { workspace = true }
uv-pep440 = { workspace = true }
uv-platform-tags = { workspace = true }
uv-static = { workspace = true }
clap = { workspace = true, optional = true }
either = { workspace = true }
fs-err = { workspace = true }
schemars = { workspace = true, optional = true }
serde = { workspace = true }
thiserror = { workspace = true }
tracing = { workspace = true }
[lints]
workspace = true

View File

@ -0,0 +1,142 @@
use std::str::FromStr;
use tracing::debug;
use uv_pep440::Version;
use uv_static::EnvVars;
#[derive(Debug, thiserror::Error)]
pub enum AcceleratorError {
#[error(transparent)]
Io(#[from] std::io::Error),
#[error(transparent)]
Version(#[from] uv_pep440::VersionParseError),
#[error(transparent)]
Utf8(#[from] std::string::FromUtf8Error),
}
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum Accelerator {
Cuda { driver_version: Version },
}
impl std::fmt::Display for Accelerator {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Self::Cuda { driver_version } => write!(f, "CUDA {driver_version}"),
}
}
}
impl Accelerator {
/// Detect the CUDA driver version from the system.
///
/// Query, in order:
/// 1. The `UV_CUDA_DRIVER_VERSION` environment variable.
/// 2. `/sys/module/nvidia/version`, which contains the driver version (e.g., `550.144.03`).
/// 3. `/proc/driver/nvidia/version`, which contains the driver version among other information.
/// 4. `nvidia-smi --query-gpu=driver_version --format=csv,noheader`.
pub fn detect() -> Result<Option<Self>, AcceleratorError> {
// Read from `UV_CUDA_DRIVER_VERSION`.
if let Ok(driver_version) = std::env::var(EnvVars::UV_CUDA_DRIVER_VERSION) {
let driver_version = Version::from_str(&driver_version)?;
debug!("Detected CUDA driver version from `UV_CUDA_DRIVER_VERSION`: {driver_version}");
return Ok(Some(Self::Cuda { driver_version }));
}
// Read from `/sys/module/nvidia/version`.
match fs_err::read_to_string("/sys/module/nvidia/version") {
Ok(content) => {
return match parse_sys_module_nvidia_version(&content) {
Ok(driver_version) => {
debug!("Detected CUDA driver version from `/sys/module/nvidia/version`: {driver_version}");
Ok(Some(Self::Cuda { driver_version }))
}
Err(e) => Err(e),
}
}
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {}
Err(e) => return Err(e.into()),
}
// Read from `/proc/driver/nvidia/version`
match fs_err::read_to_string("/proc/driver/nvidia/version") {
Ok(content) => {
match parse_proc_driver_nvidia_version(&content) {
Ok(Some(driver_version)) => {
debug!("Detected CUDA driver version from `/proc/driver/nvidia/version`: {driver_version}");
return Ok(Some(Self::Cuda { driver_version }));
}
Ok(None) => {
debug!("Failed to parse CUDA driver version from `/proc/driver/nvidia/version`");
}
Err(e) => return Err(e),
}
}
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {}
Err(e) => return Err(e.into()),
}
// Query `nvidia-smi`.
if let Ok(output) = std::process::Command::new("nvidia-smi")
.arg("--query-gpu=driver_version")
.arg("--format=csv,noheader")
.output()
{
if output.status.success() {
let driver_version = Version::from_str(&String::from_utf8(output.stdout)?)?;
debug!("Detected CUDA driver version from `nvidia-smi`: {driver_version}");
return Ok(Some(Self::Cuda { driver_version }));
}
debug!(
"Failed to query CUDA driver version with `nvidia-smi` with status `{}`: {}",
output.status,
String::from_utf8_lossy(&output.stderr)
);
}
debug!("Failed to detect CUDA driver version");
Ok(None)
}
}
/// Parse the CUDA driver version from the content of `/sys/module/nvidia/version`.
fn parse_sys_module_nvidia_version(content: &str) -> Result<Version, AcceleratorError> {
// Parse, e.g.:
// ```text
// 550.144.03
// ```
let driver_version = Version::from_str(content.trim())?;
Ok(driver_version)
}
/// Parse the CUDA driver version from the content of `/proc/driver/nvidia/version`.
fn parse_proc_driver_nvidia_version(content: &str) -> Result<Option<Version>, AcceleratorError> {
// Parse, e.g.:
// ```text
// NVRM version: NVIDIA UNIX Open Kernel Module for x86_64 550.144.03 Release Build (dvs-builder@U16-I3-D08-1-2) Mon Dec 30 17:26:13 UTC 2024
// GCC version: gcc version 12.3.0 (Ubuntu 12.3.0-1ubuntu1~22.04)
// ```
let Some(version) = content.split(" ").nth(1) else {
return Ok(None);
};
let driver_version = Version::from_str(version.trim())?;
Ok(Some(driver_version))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn proc_driver_nvidia_version() {
let content = "NVRM version: NVIDIA UNIX Open Kernel Module for x86_64 550.144.03 Release Build (dvs-builder@U16-I3-D08-1-2) Mon Dec 30 17:26:13 UTC 2024\nGCC version: gcc version 12.3.0 (Ubuntu 12.3.0-1ubuntu1~22.04)";
let result = parse_proc_driver_nvidia_version(content).unwrap();
assert_eq!(result, Some(Version::from_str("550.144.03").unwrap()));
let content = "NVRM version: NVIDIA UNIX x86_64 Kernel Module 375.74 Wed Jun 14 01:39:39 PDT 2017\nGCC version: gcc version 5.4.0 20160609 (Ubuntu 5.4.0-6ubuntu1~16.04.4)";
let result = parse_proc_driver_nvidia_version(content).unwrap();
assert_eq!(result, Some(Version::from_str("375.74").unwrap()));
}
}

View File

@ -0,0 +1,417 @@
//! `uv-torch` is a library for determining the appropriate PyTorch index based on the operating
//! system and CUDA driver version.
//!
//! This library is derived from `light-the-torch` by Philipp Meier, which is available under the
//! following BSD-3 Clause license:
//!
//! ```text
//! BSD 3-Clause License
//!
//! Copyright (c) 2020, Philip Meier
//! All rights reserved.
//!
//! Redistribution and use in source and binary forms, with or without
//! modification, are permitted provided that the following conditions are met:
//!
//! 1. Redistributions of source code must retain the above copyright notice, this
//! list of conditions and the following disclaimer.
//!
//! 2. Redistributions in binary form must reproduce the above copyright notice,
//! this list of conditions and the following disclaimer in the documentation
//! and/or other materials provided with the distribution.
//!
//! 3. Neither the name of the copyright holder nor the names of its
//! contributors may be used to endorse or promote products derived from
//! this software without specific prior written permission.
//!
//! THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
//! AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
//! IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
//! DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
//! FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
//! DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
//! SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
//! CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
//! OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
//! OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//! ```
//!
use std::str::FromStr;
use std::sync::LazyLock;
use either::Either;
use uv_distribution_types::IndexUrl;
use uv_normalize::PackageName;
use uv_pep440::Version;
use uv_platform_tags::Os;
use crate::{Accelerator, AcceleratorError};
/// The strategy to use when determining the appropriate PyTorch index.
#[derive(Debug, Copy, Clone, Eq, PartialEq, serde::Deserialize, serde::Serialize)]
#[cfg_attr(feature = "clap", derive(clap::ValueEnum))]
#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
#[serde(rename_all = "kebab-case")]
pub enum TorchMode {
/// Select the appropriate PyTorch index based on the operating system and CUDA driver version.
Auto,
/// Use the CPU-only PyTorch index.
Cpu,
/// Use the PyTorch index for CUDA 12.6.
Cu126,
/// Use the PyTorch index for CUDA 12.5.
Cu125,
/// Use the PyTorch index for CUDA 12.4.
Cu124,
/// Use the PyTorch index for CUDA 12.3.
Cu123,
/// Use the PyTorch index for CUDA 12.2.
Cu122,
/// Use the PyTorch index for CUDA 12.1.
Cu121,
/// Use the PyTorch index for CUDA 12.0.
Cu120,
/// Use the PyTorch index for CUDA 11.8.
Cu118,
/// Use the PyTorch index for CUDA 11.7.
Cu117,
/// Use the PyTorch index for CUDA 11.6.
Cu116,
/// Use the PyTorch index for CUDA 11.5.
Cu115,
/// Use the PyTorch index for CUDA 11.4.
Cu114,
/// Use the PyTorch index for CUDA 11.3.
Cu113,
/// Use the PyTorch index for CUDA 11.2.
Cu112,
/// Use the PyTorch index for CUDA 11.1.
Cu111,
/// Use the PyTorch index for CUDA 11.0.
Cu110,
/// Use the PyTorch index for CUDA 10.2.
Cu102,
/// Use the PyTorch index for CUDA 10.1.
Cu101,
/// Use the PyTorch index for CUDA 10.0.
Cu100,
/// Use the PyTorch index for CUDA 9.2.
Cu92,
/// Use the PyTorch index for CUDA 9.1.
Cu91,
/// Use the PyTorch index for CUDA 9.0.
Cu90,
/// Use the PyTorch index for CUDA 8.0.
Cu80,
}
/// The strategy to use when determining the appropriate PyTorch index.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum TorchStrategy {
/// Select the appropriate PyTorch index based on the operating system and CUDA driver version.
Auto { os: Os, driver_version: Version },
/// Use the specified PyTorch index.
Backend(TorchBackend),
}
impl TorchStrategy {
/// Determine the [`TorchStrategy`] from the given [`TorchMode`], [`Os`], and [`Accelerator`].
pub fn from_mode(mode: TorchMode, os: &Os) -> Result<Self, AcceleratorError> {
match mode {
TorchMode::Auto => {
if let Some(Accelerator::Cuda { driver_version }) = Accelerator::detect()? {
Ok(Self::Auto {
os: os.clone(),
driver_version: driver_version.clone(),
})
} else {
Ok(Self::Backend(TorchBackend::Cpu))
}
}
TorchMode::Cpu => Ok(Self::Backend(TorchBackend::Cpu)),
TorchMode::Cu126 => Ok(Self::Backend(TorchBackend::Cu126)),
TorchMode::Cu125 => Ok(Self::Backend(TorchBackend::Cu125)),
TorchMode::Cu124 => Ok(Self::Backend(TorchBackend::Cu124)),
TorchMode::Cu123 => Ok(Self::Backend(TorchBackend::Cu123)),
TorchMode::Cu122 => Ok(Self::Backend(TorchBackend::Cu122)),
TorchMode::Cu121 => Ok(Self::Backend(TorchBackend::Cu121)),
TorchMode::Cu120 => Ok(Self::Backend(TorchBackend::Cu120)),
TorchMode::Cu118 => Ok(Self::Backend(TorchBackend::Cu118)),
TorchMode::Cu117 => Ok(Self::Backend(TorchBackend::Cu117)),
TorchMode::Cu116 => Ok(Self::Backend(TorchBackend::Cu116)),
TorchMode::Cu115 => Ok(Self::Backend(TorchBackend::Cu115)),
TorchMode::Cu114 => Ok(Self::Backend(TorchBackend::Cu114)),
TorchMode::Cu113 => Ok(Self::Backend(TorchBackend::Cu113)),
TorchMode::Cu112 => Ok(Self::Backend(TorchBackend::Cu112)),
TorchMode::Cu111 => Ok(Self::Backend(TorchBackend::Cu111)),
TorchMode::Cu110 => Ok(Self::Backend(TorchBackend::Cu110)),
TorchMode::Cu102 => Ok(Self::Backend(TorchBackend::Cu102)),
TorchMode::Cu101 => Ok(Self::Backend(TorchBackend::Cu101)),
TorchMode::Cu100 => Ok(Self::Backend(TorchBackend::Cu100)),
TorchMode::Cu92 => Ok(Self::Backend(TorchBackend::Cu92)),
TorchMode::Cu91 => Ok(Self::Backend(TorchBackend::Cu91)),
TorchMode::Cu90 => Ok(Self::Backend(TorchBackend::Cu90)),
TorchMode::Cu80 => Ok(Self::Backend(TorchBackend::Cu80)),
}
}
/// Return the appropriate index URLs for the given [`TorchStrategy`] and [`PackageName`].
pub fn index_urls(
&self,
package_name: &PackageName,
) -> Option<impl Iterator<Item = &IndexUrl>> {
if !matches!(
package_name.as_str(),
"torch"
| "torch-model-archiver"
| "torch-tb-profiler"
| "torcharrow"
| "torchaudio"
| "torchcsprng"
| "torchdata"
| "torchdistx"
| "torchserve"
| "torchtext"
| "torchvision"
| "pytorch-triton"
) {
return None;
}
match self {
TorchStrategy::Auto { os, driver_version } => {
// If this is a GPU-enabled package, and CUDA drivers are installed, use PyTorch's CUDA
// indexes.
//
// See: https://github.com/pmeier/light-the-torch/blob/33397cbe45d07b51ad8ee76b004571a4c236e37f/light_the_torch/_patch.py#L36-L49
match os {
Os::Manylinux { .. } | Os::Musllinux { .. } => {
Some(Either::Left(Either::Left(
LINUX_DRIVERS
.iter()
.filter_map(move |(backend, version)| {
if driver_version >= version {
Some(backend.index_url())
} else {
None
}
})
.chain(std::iter::once(TorchBackend::Cpu.index_url())),
)))
}
Os::Windows => Some(Either::Left(Either::Right(
WINDOWS_CUDA_VERSIONS
.iter()
.filter_map(move |(backend, version)| {
if driver_version >= version {
Some(backend.index_url())
} else {
None
}
})
.chain(std::iter::once(TorchBackend::Cpu.index_url())),
))),
Os::Macos { .. }
| Os::FreeBsd { .. }
| Os::NetBsd { .. }
| Os::OpenBsd { .. }
| Os::Dragonfly { .. }
| Os::Illumos { .. }
| Os::Haiku { .. }
| Os::Android { .. } => Some(Either::Right(std::iter::once(
TorchBackend::Cpu.index_url(),
))),
}
}
TorchStrategy::Backend(backend) => {
Some(Either::Right(std::iter::once(backend.index_url())))
}
}
}
}
/// The available backends for PyTorch.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum TorchBackend {
Cpu,
Cu126,
Cu125,
Cu124,
Cu123,
Cu122,
Cu121,
Cu120,
Cu118,
Cu117,
Cu116,
Cu115,
Cu114,
Cu113,
Cu112,
Cu111,
Cu110,
Cu102,
Cu101,
Cu100,
Cu92,
Cu91,
Cu90,
Cu80,
}
impl TorchBackend {
/// Return the appropriate index URL for the given [`TorchBackend`].
fn index_url(&self) -> &'static IndexUrl {
match self {
Self::Cpu => &CPU_INDEX_URL,
Self::Cu126 => &CU126_INDEX_URL,
Self::Cu125 => &CU125_INDEX_URL,
Self::Cu124 => &CU124_INDEX_URL,
Self::Cu123 => &CU123_INDEX_URL,
Self::Cu122 => &CU122_INDEX_URL,
Self::Cu121 => &CU121_INDEX_URL,
Self::Cu120 => &CU120_INDEX_URL,
Self::Cu118 => &CU118_INDEX_URL,
Self::Cu117 => &CU117_INDEX_URL,
Self::Cu116 => &CU116_INDEX_URL,
Self::Cu115 => &CU115_INDEX_URL,
Self::Cu114 => &CU114_INDEX_URL,
Self::Cu113 => &CU113_INDEX_URL,
Self::Cu112 => &CU112_INDEX_URL,
Self::Cu111 => &CU111_INDEX_URL,
Self::Cu110 => &CU110_INDEX_URL,
Self::Cu102 => &CU102_INDEX_URL,
Self::Cu101 => &CU101_INDEX_URL,
Self::Cu100 => &CU100_INDEX_URL,
Self::Cu92 => &CU92_INDEX_URL,
Self::Cu91 => &CU91_INDEX_URL,
Self::Cu90 => &CU90_INDEX_URL,
Self::Cu80 => &CU80_INDEX_URL,
}
}
}
/// Linux CUDA driver versions and the corresponding CUDA versions.
///
/// See: <https://github.com/pmeier/light-the-torch/blob/33397cbe45d07b51ad8ee76b004571a4c236e37f/light_the_torch/_cb.py#L150-L213>
static LINUX_DRIVERS: LazyLock<[(TorchBackend, Version); 23]> = LazyLock::new(|| {
[
// Table 2 from
// https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html
(TorchBackend::Cu126, Version::new([525, 60, 13])),
(TorchBackend::Cu125, Version::new([525, 60, 13])),
(TorchBackend::Cu124, Version::new([525, 60, 13])),
(TorchBackend::Cu123, Version::new([525, 60, 13])),
(TorchBackend::Cu122, Version::new([525, 60, 13])),
(TorchBackend::Cu121, Version::new([525, 60, 13])),
(TorchBackend::Cu120, Version::new([525, 60, 13])),
// Table 2 from
// https://docs.nvidia.com/cuda/archive/11.8.0/cuda-toolkit-release-notes/index.html
(TorchBackend::Cu118, Version::new([450, 80, 2])),
(TorchBackend::Cu117, Version::new([450, 80, 2])),
(TorchBackend::Cu116, Version::new([450, 80, 2])),
(TorchBackend::Cu115, Version::new([450, 80, 2])),
(TorchBackend::Cu114, Version::new([450, 80, 2])),
(TorchBackend::Cu113, Version::new([450, 80, 2])),
(TorchBackend::Cu112, Version::new([450, 80, 2])),
(TorchBackend::Cu111, Version::new([450, 80, 2])),
(TorchBackend::Cu110, Version::new([450, 36, 6])),
// Table 1 from
// https://docs.nvidia.com/cuda/archive/10.2/cuda-toolkit-release-notes/index.html
(TorchBackend::Cu102, Version::new([440, 33])),
(TorchBackend::Cu101, Version::new([418, 39])),
(TorchBackend::Cu100, Version::new([410, 48])),
(TorchBackend::Cu92, Version::new([396, 26])),
(TorchBackend::Cu91, Version::new([390, 46])),
(TorchBackend::Cu90, Version::new([384, 81])),
(TorchBackend::Cu80, Version::new([375, 26])),
]
});
/// Windows CUDA driver versions and the corresponding CUDA versions.
///
/// See: <https://github.com/pmeier/light-the-torch/blob/33397cbe45d07b51ad8ee76b004571a4c236e37f/light_the_torch/_cb.py#L150-L213>
static WINDOWS_CUDA_VERSIONS: LazyLock<[(TorchBackend, Version); 23]> = LazyLock::new(|| {
[
// Table 2 from
// https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html
(TorchBackend::Cu126, Version::new([528, 33])),
(TorchBackend::Cu125, Version::new([528, 33])),
(TorchBackend::Cu124, Version::new([528, 33])),
(TorchBackend::Cu123, Version::new([528, 33])),
(TorchBackend::Cu122, Version::new([528, 33])),
(TorchBackend::Cu121, Version::new([528, 33])),
(TorchBackend::Cu120, Version::new([528, 33])),
// Table 2 from
// https://docs.nvidia.com/cuda/archive/11.8.0/cuda-toolkit-release-notes/index.html
(TorchBackend::Cu118, Version::new([452, 39])),
(TorchBackend::Cu117, Version::new([452, 39])),
(TorchBackend::Cu116, Version::new([452, 39])),
(TorchBackend::Cu115, Version::new([452, 39])),
(TorchBackend::Cu114, Version::new([452, 39])),
(TorchBackend::Cu113, Version::new([452, 39])),
(TorchBackend::Cu112, Version::new([452, 39])),
(TorchBackend::Cu111, Version::new([452, 39])),
(TorchBackend::Cu110, Version::new([451, 22])),
// Table 1 from
// https://docs.nvidia.com/cuda/archive/10.2/cuda-toolkit-release-notes/index.html
(TorchBackend::Cu102, Version::new([441, 22])),
(TorchBackend::Cu101, Version::new([418, 96])),
(TorchBackend::Cu100, Version::new([411, 31])),
(TorchBackend::Cu92, Version::new([398, 26])),
(TorchBackend::Cu91, Version::new([391, 29])),
(TorchBackend::Cu90, Version::new([385, 54])),
(TorchBackend::Cu80, Version::new([376, 51])),
]
});
static CPU_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cpu").unwrap());
static CU126_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu126").unwrap());
static CU125_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu125").unwrap());
static CU124_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu124").unwrap());
static CU123_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu123").unwrap());
static CU122_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu122").unwrap());
static CU121_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu121").unwrap());
static CU120_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu120").unwrap());
static CU118_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu118").unwrap());
static CU117_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu117").unwrap());
static CU116_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu116").unwrap());
static CU115_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu115").unwrap());
static CU114_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu114").unwrap());
static CU113_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu113").unwrap());
static CU112_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu112").unwrap());
static CU111_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu111").unwrap());
static CU110_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu110").unwrap());
static CU102_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu102").unwrap());
static CU101_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu101").unwrap());
static CU100_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu100").unwrap());
static CU92_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu92").unwrap());
static CU91_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu91").unwrap());
static CU90_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu90").unwrap());
static CU80_INDEX_URL: LazyLock<IndexUrl> =
LazyLock::new(|| IndexUrl::from_str("https://download.pytorch.org/whl/cu80").unwrap());

View File

@ -0,0 +1,5 @@
mod accelerator;
mod backend;
pub use accelerator::*;
pub use backend::*;

View File

@ -51,6 +51,7 @@ uv-settings = { workspace = true, features = ["schemars"] }
uv-shell = { workspace = true } uv-shell = { workspace = true }
uv-static = { workspace = true } uv-static = { workspace = true }
uv-tool = { workspace = true } uv-tool = { workspace = true }
uv-torch = { workspace = true }
uv-trampoline-builder = { workspace = true } uv-trampoline-builder = { workspace = true }
uv-types = { workspace = true } uv-types = { workspace = true }
uv-version = { workspace = true } uv-version = { workspace = true }

View File

@ -38,6 +38,7 @@ use uv_resolver::{
InMemoryIndex, OptionsBuilder, PrereleaseMode, PythonRequirement, RequiresPython, InMemoryIndex, OptionsBuilder, PrereleaseMode, PythonRequirement, RequiresPython,
ResolutionMode, ResolverEnvironment, ResolutionMode, ResolverEnvironment,
}; };
use uv_torch::{TorchMode, TorchStrategy};
use uv_types::{BuildIsolation, EmptyInstalledPackages, HashStrategy}; use uv_types::{BuildIsolation, EmptyInstalledPackages, HashStrategy};
use uv_warnings::warn_user; use uv_warnings::warn_user;
use uv_workspace::WorkspaceCache; use uv_workspace::WorkspaceCache;
@ -81,6 +82,7 @@ pub(crate) async fn pip_compile(
include_index_annotation: bool, include_index_annotation: bool,
index_locations: IndexLocations, index_locations: IndexLocations,
index_strategy: IndexStrategy, index_strategy: IndexStrategy,
torch_backend: Option<TorchMode>,
dependency_metadata: DependencyMetadata, dependency_metadata: DependencyMetadata,
keyring_provider: KeyringProviderType, keyring_provider: KeyringProviderType,
network_settings: &NetworkSettings, network_settings: &NetworkSettings,
@ -337,11 +339,28 @@ pub(crate) async fn pip_compile(
} }
} }
// Determine the PyTorch backend.
let torch_backend = torch_backend.map(|mode| {
if preview.is_disabled() {
warn_user!("The `--torch-backend` setting is experimental and may change without warning. Pass `--preview` to disable this warning.");
}
TorchStrategy::from_mode(
mode,
python_platform
.map(TargetTriple::platform)
.as_ref()
.unwrap_or(interpreter.platform())
.os(),
)
}).transpose()?;
// Initialize the registry client. // Initialize the registry client.
let client = RegistryClientBuilder::try_from(client_builder)? let client = RegistryClientBuilder::try_from(client_builder)?
.cache(cache.clone()) .cache(cache.clone())
.index_urls(index_locations.index_urls()) .index_urls(index_locations.index_urls())
.index_strategy(index_strategy) .index_strategy(index_strategy)
.torch_backend(torch_backend)
.markers(interpreter.markers()) .markers(interpreter.markers())
.platform(interpreter.platform()) .platform(interpreter.platform())
.build(); .build();

View File

@ -34,7 +34,9 @@ use uv_resolver::{
DependencyMode, ExcludeNewer, FlatIndex, OptionsBuilder, PrereleaseMode, PythonRequirement, DependencyMode, ExcludeNewer, FlatIndex, OptionsBuilder, PrereleaseMode, PythonRequirement,
ResolutionMode, ResolverEnvironment, ResolutionMode, ResolverEnvironment,
}; };
use uv_torch::{TorchMode, TorchStrategy};
use uv_types::{BuildIsolation, HashStrategy}; use uv_types::{BuildIsolation, HashStrategy};
use uv_warnings::warn_user;
use uv_workspace::WorkspaceCache; use uv_workspace::WorkspaceCache;
use crate::commands::pip::loggers::{DefaultInstallLogger, DefaultResolveLogger, InstallLogger}; use crate::commands::pip::loggers::{DefaultInstallLogger, DefaultResolveLogger, InstallLogger};
@ -63,6 +65,7 @@ pub(crate) async fn pip_install(
upgrade: Upgrade, upgrade: Upgrade,
index_locations: IndexLocations, index_locations: IndexLocations,
index_strategy: IndexStrategy, index_strategy: IndexStrategy,
torch_backend: Option<TorchMode>,
dependency_metadata: DependencyMetadata, dependency_metadata: DependencyMetadata,
keyring_provider: KeyringProviderType, keyring_provider: KeyringProviderType,
network_settings: &NetworkSettings, network_settings: &NetworkSettings,
@ -332,11 +335,28 @@ pub(crate) async fn pip_install(
} }
} }
// Determine the PyTorch backend.
let torch_backend = torch_backend.map(|mode| {
if preview.is_disabled() {
warn_user!("The `--torch-backend` setting is experimental and may change without warning. Pass `--preview` to disable this warning.");
}
TorchStrategy::from_mode(
mode,
python_platform
.map(TargetTriple::platform)
.as_ref()
.unwrap_or(interpreter.platform())
.os(),
)
}).transpose()?;
// Initialize the registry client. // Initialize the registry client.
let client = RegistryClientBuilder::try_from(client_builder)? let client = RegistryClientBuilder::try_from(client_builder)?
.cache(cache.clone()) .cache(cache.clone())
.index_urls(index_locations.index_urls()) .index_urls(index_locations.index_urls())
.index_strategy(index_strategy) .index_strategy(index_strategy)
.torch_backend(torch_backend)
.markers(interpreter.markers()) .markers(interpreter.markers())
.platform(interpreter.platform()) .platform(interpreter.platform())
.build(); .build();

View File

@ -29,7 +29,9 @@ use uv_resolver::{
DependencyMode, ExcludeNewer, FlatIndex, OptionsBuilder, PrereleaseMode, PythonRequirement, DependencyMode, ExcludeNewer, FlatIndex, OptionsBuilder, PrereleaseMode, PythonRequirement,
ResolutionMode, ResolverEnvironment, ResolutionMode, ResolverEnvironment,
}; };
use uv_torch::{TorchMode, TorchStrategy};
use uv_types::{BuildIsolation, HashStrategy}; use uv_types::{BuildIsolation, HashStrategy};
use uv_warnings::warn_user;
use uv_workspace::WorkspaceCache; use uv_workspace::WorkspaceCache;
use crate::commands::pip::loggers::{DefaultInstallLogger, DefaultResolveLogger}; use crate::commands::pip::loggers::{DefaultInstallLogger, DefaultResolveLogger};
@ -52,6 +54,7 @@ pub(crate) async fn pip_sync(
hash_checking: Option<HashCheckingMode>, hash_checking: Option<HashCheckingMode>,
index_locations: IndexLocations, index_locations: IndexLocations,
index_strategy: IndexStrategy, index_strategy: IndexStrategy,
torch_backend: Option<TorchMode>,
dependency_metadata: DependencyMetadata, dependency_metadata: DependencyMetadata,
keyring_provider: KeyringProviderType, keyring_provider: KeyringProviderType,
network_settings: &NetworkSettings, network_settings: &NetworkSettings,
@ -260,11 +263,28 @@ pub(crate) async fn pip_sync(
} }
} }
// Determine the PyTorch backend.
let torch_backend = torch_backend.map(|mode| {
if preview.is_disabled() {
warn_user!("The `--torch-backend` setting is experimental and may change without warning. Pass `--preview` to disable this warning.");
}
TorchStrategy::from_mode(
mode,
python_platform
.map(TargetTriple::platform)
.as_ref()
.unwrap_or(interpreter.platform())
.os(),
)
}).transpose()?;
// Initialize the registry client. // Initialize the registry client.
let client = RegistryClientBuilder::try_from(client_builder)? let client = RegistryClientBuilder::try_from(client_builder)?
.cache(cache.clone()) .cache(cache.clone())
.index_urls(index_locations.index_urls()) .index_urls(index_locations.index_urls())
.index_strategy(index_strategy) .index_strategy(index_strategy)
.torch_backend(torch_backend)
.markers(interpreter.markers()) .markers(interpreter.markers())
.platform(interpreter.platform()) .platform(interpreter.platform())
.build(); .build();

View File

@ -440,6 +440,7 @@ async fn run(mut cli: Cli) -> Result<ExitStatus> {
args.settings.emit_index_annotation, args.settings.emit_index_annotation,
args.settings.index_locations, args.settings.index_locations,
args.settings.index_strategy, args.settings.index_strategy,
args.settings.torch_backend,
args.settings.dependency_metadata, args.settings.dependency_metadata,
args.settings.keyring_provider, args.settings.keyring_provider,
&globals.network_settings, &globals.network_settings,
@ -507,6 +508,7 @@ async fn run(mut cli: Cli) -> Result<ExitStatus> {
args.settings.hash_checking, args.settings.hash_checking,
args.settings.index_locations, args.settings.index_locations,
args.settings.index_strategy, args.settings.index_strategy,
args.settings.torch_backend,
args.settings.dependency_metadata, args.settings.dependency_metadata,
args.settings.keyring_provider, args.settings.keyring_provider,
&globals.network_settings, &globals.network_settings,
@ -653,6 +655,7 @@ async fn run(mut cli: Cli) -> Result<ExitStatus> {
args.settings.upgrade, args.settings.upgrade,
args.settings.index_locations, args.settings.index_locations,
args.settings.index_strategy, args.settings.index_strategy,
args.settings.torch_backend,
args.settings.dependency_metadata, args.settings.dependency_metadata,
args.settings.keyring_provider, args.settings.keyring_provider,
&globals.network_settings, &globals.network_settings,

View File

@ -42,6 +42,7 @@ use uv_settings::{
ResolverInstallerOptions, ResolverOptions, ResolverInstallerOptions, ResolverOptions,
}; };
use uv_static::EnvVars; use uv_static::EnvVars;
use uv_torch::TorchMode;
use uv_warnings::warn_user_once; use uv_warnings::warn_user_once;
use uv_workspace::pyproject::DependencyType; use uv_workspace::pyproject::DependencyType;
@ -1661,6 +1662,7 @@ impl PipCompileSettings {
no_emit_marker_expression, no_emit_marker_expression,
emit_index_annotation, emit_index_annotation,
no_emit_index_annotation, no_emit_index_annotation,
torch_backend,
compat_args: _, compat_args: _,
} = args; } = args;
@ -1759,6 +1761,7 @@ impl PipCompileSettings {
emit_marker_expression: flag(emit_marker_expression, no_emit_marker_expression), emit_marker_expression: flag(emit_marker_expression, no_emit_marker_expression),
emit_index_annotation: flag(emit_index_annotation, no_emit_index_annotation), emit_index_annotation: flag(emit_index_annotation, no_emit_index_annotation),
annotation_style, annotation_style,
torch_backend,
..PipOptions::from(resolver) ..PipOptions::from(resolver)
}, },
filesystem, filesystem,
@ -1810,6 +1813,7 @@ impl PipSyncSettings {
strict, strict,
no_strict, no_strict,
dry_run, dry_run,
torch_backend,
compat_args: _, compat_args: _,
} = *args; } = *args;
@ -1844,6 +1848,7 @@ impl PipSyncSettings {
python_version, python_version,
python_platform, python_platform,
strict: flag(strict, no_strict), strict: flag(strict, no_strict),
torch_backend,
..PipOptions::from(installer) ..PipOptions::from(installer)
}, },
filesystem, filesystem,
@ -1911,6 +1916,7 @@ impl PipInstallSettings {
strict, strict,
no_strict, no_strict,
dry_run, dry_run,
torch_backend,
compat_args: _, compat_args: _,
} = args; } = args;
@ -2001,6 +2007,7 @@ impl PipInstallSettings {
python_platform, python_platform,
require_hashes: flag(require_hashes, no_require_hashes), require_hashes: flag(require_hashes, no_require_hashes),
verify_hashes: flag(verify_hashes, no_verify_hashes), verify_hashes: flag(verify_hashes, no_verify_hashes),
torch_backend,
..PipOptions::from(installer) ..PipOptions::from(installer)
}, },
filesystem, filesystem,
@ -2621,6 +2628,7 @@ pub(crate) struct PipSettings {
pub(crate) prefix: Option<Prefix>, pub(crate) prefix: Option<Prefix>,
pub(crate) index_strategy: IndexStrategy, pub(crate) index_strategy: IndexStrategy,
pub(crate) keyring_provider: KeyringProviderType, pub(crate) keyring_provider: KeyringProviderType,
pub(crate) torch_backend: Option<TorchMode>,
pub(crate) no_build_isolation: bool, pub(crate) no_build_isolation: bool,
pub(crate) no_build_isolation_package: Vec<PackageName>, pub(crate) no_build_isolation_package: Vec<PackageName>,
pub(crate) build_options: BuildOptions, pub(crate) build_options: BuildOptions,
@ -2682,6 +2690,7 @@ impl PipSettings {
no_index, no_index,
find_links, find_links,
index_strategy, index_strategy,
torch_backend,
keyring_provider, keyring_provider,
no_build, no_build,
no_binary, no_binary,
@ -2871,6 +2880,7 @@ impl PipSettings {
.config_settings .config_settings
.combine(config_settings) .combine(config_settings)
.unwrap_or_default(), .unwrap_or_default(),
torch_backend: args.torch_backend.combine(torch_backend),
python_version: args.python_version.combine(python_version), python_version: args.python_version.combine(python_version),
python_platform: args.python_platform.combine(python_platform), python_platform: args.python_platform.combine(python_platform),
universal: args.universal.combine(universal).unwrap_or_default(), universal: args.universal.combine(universal).unwrap_or_default(),

View File

@ -15223,9 +15223,7 @@ fn lock_explicit_default_index() -> Result<()> {
DEBUG Searching for a compatible version of project @ file://[TEMP_DIR]/ (<0.1.0 | >0.1.0) DEBUG Searching for a compatible version of project @ file://[TEMP_DIR]/ (<0.1.0 | >0.1.0)
DEBUG No compatible version found for: project DEBUG No compatible version found for: project
× No solution found when resolving dependencies: × No solution found when resolving dependencies:
Because anyio was not found in the provided package locations and your project depends on anyio, we can conclude that your project's requirements are unsatisfiable. Because anyio was not found in the package registry and your project depends on anyio, we can conclude that your project's requirements are unsatisfiable.
hint: Packages were unavailable because index lookups were disabled and no additional package locations were provided (try: `--find-links <uri>`)
"#); "#);
let lock = fs_err::read_to_string(context.temp_dir.join("uv.lock")).unwrap(); let lock = fs_err::read_to_string(context.temp_dir.join("uv.lock")).unwrap();
@ -24883,9 +24881,9 @@ fn lock_pytorch_cpu() -> Result<()> {
Ok(()) Ok(())
} }
/// Ensure that the `PyTorch` index-specific forks don't use the PyPI preference. If we solve a PyPI /// Ensure that the PyTorch index-specific forks don't use the PyPI preference. If we solve a PyPI
/// fork first, and reuse the preferences, we'll end up selecting `2.2.2` (rather than `2.2.2+cpu`) /// fork first, and reuse the preferences, we'll end up selecting `2.2.2` (rather than `2.2.2+cpu`)
/// in the `PyTorch` forks. /// in the PyTorch forks.
/// ///
/// Regression test for: <https://github.com/astral-sh/uv/issues/10772> /// Regression test for: <https://github.com/astral-sh/uv/issues/10772>
#[test] #[test]

View File

@ -5354,7 +5354,7 @@ fn find_links_directory() -> Result<()> {
Ok(()) Ok(())
} }
/// Compile using `--find-links` with a URL by resolving `tqdm` from the `PyTorch` wheels index. /// Compile using `--find-links` with a URL by resolving `tqdm` from the PyTorch wheels index.
#[test] #[test]
fn find_links_url() -> Result<()> { fn find_links_url() -> Result<()> {
let context = TestContext::new("3.12").with_exclude_newer("2025-01-30T00:00:00Z"); let context = TestContext::new("3.12").with_exclude_newer("2025-01-30T00:00:00Z");
@ -5409,7 +5409,7 @@ fn find_links_env_var() -> Result<()> {
Ok(()) Ok(())
} }
/// Compile using `--find-links` with a URL by resolving `tqdm` from the `PyTorch` wheels index, /// Compile using `--find-links` with a URL by resolving `tqdm` from the PyTorch wheels index,
/// with the URL itself provided in a `requirements.txt` file. /// with the URL itself provided in a `requirements.txt` file.
#[test] #[test]
fn find_links_requirements_txt() -> Result<()> { fn find_links_requirements_txt() -> Result<()> {

View File

@ -5835,7 +5835,7 @@ fn already_installed_local_path_dependent() {
.arg(root_path.join("second_local")) .arg(root_path.join("second_local"))
.arg(root_path.join("first_local")) .arg(root_path.join("first_local"))
.arg("--reinstall-package") .arg("--reinstall-package")
.arg("first-local"), @r###" .arg("first-local"), @r"
success: true success: true
exit_code: 0 exit_code: 0
----- stdout ----- ----- stdout -----
@ -5847,7 +5847,7 @@ fn already_installed_local_path_dependent() {
Installed 2 packages in [TIME] Installed 2 packages in [TIME]
~ first-local==0.1.0 (from file://[WORKSPACE]/scripts/packages/dependent_locals/first_local) ~ first-local==0.1.0 (from file://[WORKSPACE]/scripts/packages/dependent_locals/first_local)
~ second-local==0.1.0 (from file://[WORKSPACE]/scripts/packages/dependent_locals/second_local) ~ second-local==0.1.0 (from file://[WORKSPACE]/scripts/packages/dependent_locals/second_local)
"### "
); );
// Request upgrade of the first package // Request upgrade of the first package
@ -8506,16 +8506,20 @@ fn avoid_cached_wheel() {
.venv() .venv()
.arg("--python") .arg("--python")
.arg("3.10") .arg("3.10")
.arg(".venv-3.10")
.assert() .assert()
.success(); .success();
uv_snapshot!(context.filters(), context.pip_install() uv_snapshot!(context.filters(), context.pip_install()
.arg("--python")
.arg(".venv-3.10")
.arg("multiprocess"), @r" .arg("multiprocess"), @r"
success: true success: true
exit_code: 0 exit_code: 0
----- stdout ----- ----- stdout -----
----- stderr ----- ----- stderr -----
Using Python 3.10.[X] environment at: .venv-3.10
Resolved 2 packages in [TIME] Resolved 2 packages in [TIME]
Prepared 2 packages in [TIME] Prepared 2 packages in [TIME]
Installed 2 packages in [TIME] Installed 2 packages in [TIME]
@ -8529,19 +8533,21 @@ fn avoid_cached_wheel() {
.venv() .venv()
.arg("--python") .arg("--python")
.arg("3.11") .arg("3.11")
.arg(".venv-3.11")
.assert() .assert()
.success(); .success();
// `multiprocessing` should be re-downloaded (i.e., we should have a `Prepare` step here). // `multiprocessing` should be re-downloaded (i.e., we should have a `Prepare` step here).
uv_snapshot!(context.filters(), context.pip_install() uv_snapshot!(context.filters(), context.pip_install()
.arg("--python") .arg("--python")
.arg("3.11") .arg(".venv-3.11")
.arg("multiprocess"), @r" .arg("multiprocess"), @r"
success: true success: true
exit_code: 0 exit_code: 0
----- stdout ----- ----- stdout -----
----- stderr ----- ----- stderr -----
Using Python 3.11.[X] environment at: .venv-3.11
Resolved 2 packages in [TIME] Resolved 2 packages in [TIME]
Prepared 1 package in [TIME] Prepared 1 package in [TIME]
Installed 2 packages in [TIME] Installed 2 packages in [TIME]

View File

@ -154,6 +154,7 @@ fn resolve_uv_toml() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -312,6 +313,7 @@ fn resolve_uv_toml() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -471,6 +473,7 @@ fn resolve_uv_toml() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -662,6 +665,7 @@ fn resolve_pyproject_toml() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -791,6 +795,7 @@ fn resolve_pyproject_toml() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -960,6 +965,7 @@ fn resolve_pyproject_toml() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -1173,6 +1179,7 @@ fn resolve_index_url() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -1395,6 +1402,7 @@ fn resolve_index_url() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -1578,6 +1586,7 @@ fn resolve_find_links() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -1729,6 +1738,7 @@ fn resolve_top_level() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -1934,6 +1944,7 @@ fn resolve_top_level() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -2122,6 +2133,7 @@ fn resolve_top_level() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -2272,6 +2284,7 @@ fn resolve_user_configuration() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -2406,6 +2419,7 @@ fn resolve_user_configuration() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -2540,6 +2554,7 @@ fn resolve_user_configuration() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -2676,6 +2691,7 @@ fn resolve_user_configuration() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -2994,6 +3010,7 @@ fn resolve_poetry_toml() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -3187,6 +3204,7 @@ fn resolve_both() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -3470,6 +3488,7 @@ fn resolve_config_file() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -3698,6 +3717,7 @@ fn resolve_skip_empty() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -3835,6 +3855,7 @@ fn resolve_skip_empty() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -3991,6 +4012,7 @@ fn allow_insecure_host() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -4202,6 +4224,7 @@ fn index_priority() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -4392,6 +4415,7 @@ fn index_priority() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -4588,6 +4612,7 @@ fn index_priority() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -4779,6 +4804,7 @@ fn index_priority() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -4977,6 +5003,7 @@ fn index_priority() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -5168,6 +5195,7 @@ fn index_priority() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -5310,6 +5338,7 @@ fn verify_hashes() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -5438,6 +5467,7 @@ fn verify_hashes() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -5564,6 +5594,7 @@ fn verify_hashes() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -5692,6 +5723,7 @@ fn verify_hashes() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -5818,6 +5850,7 @@ fn verify_hashes() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {
@ -5945,6 +5978,7 @@ fn verify_hashes() -> anyhow::Result<()> {
prefix: None, prefix: None,
index_strategy: FirstIndex, index_strategy: FirstIndex,
keyring_provider: Disabled, keyring_provider: Disabled,
torch_backend: None,
no_build_isolation: false, no_build_isolation: false,
no_build_isolation_package: [], no_build_isolation_package: [],
build_options: BuildOptions { build_options: BuildOptions {

View File

@ -384,6 +384,10 @@ Specifies the "bin" directory for installing tool executables.
Specifies the directory where uv stores managed tools. Specifies the directory where uv stores managed tools.
### `UV_TORCH_BACKEND`
Equivalent to the `--torch-backend` command-line argument (e.g., `cpu`, `cu126`, or `auto`).
### `UV_UNMANAGED_INSTALL` ### `UV_UNMANAGED_INSTALL`
Used ephemeral environments like CI to install uv to a specific path while preventing Used ephemeral environments like CI to install uv to a specific path while preventing

View File

@ -5935,6 +5935,70 @@ uv pip compile [OPTIONS] <SRC_FILE|--group <GROUP>>
<p>By default, uv uses the virtual environment in the current working directory or any parent directory, falling back to searching for a Python executable in <code>PATH</code>. The <code>--system</code> option instructs uv to avoid using a virtual environment Python and restrict its search to the system path.</p> <p>By default, uv uses the virtual environment in the current working directory or any parent directory, falling back to searching for a Python executable in <code>PATH</code>. The <code>--system</code> option instructs uv to avoid using a virtual environment Python and restrict its search to the system path.</p>
<p>May also be set with the <code>UV_SYSTEM_PYTHON</code> environment variable.</p> <p>May also be set with the <code>UV_SYSTEM_PYTHON</code> environment variable.</p>
</dd><dt id="uv-pip-compile--torch-backend"><a href="#uv-pip-compile--torch-backend"><code>--torch-backend</code></a> <i>torch-backend</i></dt><dd><p>The backend to use when fetching packages in the PyTorch ecosystem (e.g., <code>cpu</code>, <code>cu126</code>, or <code>auto</code>).</p>
<p>When set, uv will ignore the configured index URLs for packages in the PyTorch ecosystem, and will instead use the defined backend.</p>
<p>For example, when set to <code>cpu</code>, uv will use the CPU-only PyTorch index; when set to <code>cu126</code>, uv will use the PyTorch index for CUDA 12.6.</p>
<p>The <code>auto</code> mode will attempt to detect the appropriate PyTorch index based on the currently installed CUDA drivers.</p>
<p>This option is in preview and may change in any future release.</p>
<p>May also be set with the <code>UV_TORCH_BACKEND</code> environment variable.</p>
<p>Possible values:</p>
<ul>
<li><code>auto</code>: Select the appropriate PyTorch index based on the operating system and CUDA driver version</li>
<li><code>cpu</code>: Use the CPU-only PyTorch index</li>
<li><code>cu126</code>: Use the PyTorch index for CUDA 12.6</li>
<li><code>cu125</code>: Use the PyTorch index for CUDA 12.5</li>
<li><code>cu124</code>: Use the PyTorch index for CUDA 12.4</li>
<li><code>cu123</code>: Use the PyTorch index for CUDA 12.3</li>
<li><code>cu122</code>: Use the PyTorch index for CUDA 12.2</li>
<li><code>cu121</code>: Use the PyTorch index for CUDA 12.1</li>
<li><code>cu120</code>: Use the PyTorch index for CUDA 12.0</li>
<li><code>cu118</code>: Use the PyTorch index for CUDA 11.8</li>
<li><code>cu117</code>: Use the PyTorch index for CUDA 11.7</li>
<li><code>cu116</code>: Use the PyTorch index for CUDA 11.6</li>
<li><code>cu115</code>: Use the PyTorch index for CUDA 11.5</li>
<li><code>cu114</code>: Use the PyTorch index for CUDA 11.4</li>
<li><code>cu113</code>: Use the PyTorch index for CUDA 11.3</li>
<li><code>cu112</code>: Use the PyTorch index for CUDA 11.2</li>
<li><code>cu111</code>: Use the PyTorch index for CUDA 11.1</li>
<li><code>cu110</code>: Use the PyTorch index for CUDA 11.0</li>
<li><code>cu102</code>: Use the PyTorch index for CUDA 10.2</li>
<li><code>cu101</code>: Use the PyTorch index for CUDA 10.1</li>
<li><code>cu100</code>: Use the PyTorch index for CUDA 10.0</li>
<li><code>cu92</code>: Use the PyTorch index for CUDA 9.2</li>
<li><code>cu91</code>: Use the PyTorch index for CUDA 9.1</li>
<li><code>cu90</code>: Use the PyTorch index for CUDA 9.0</li>
<li><code>cu80</code>: Use the PyTorch index for CUDA 8.0</li>
</ul>
</dd><dt id="uv-pip-compile--universal"><a href="#uv-pip-compile--universal"><code>--universal</code></a></dt><dd><p>Perform a universal resolution, attempting to generate a single <code>requirements.txt</code> output file that is compatible with all operating systems, architectures, and Python implementations.</p> </dd><dt id="uv-pip-compile--universal"><a href="#uv-pip-compile--universal"><code>--universal</code></a></dt><dd><p>Perform a universal resolution, attempting to generate a single <code>requirements.txt</code> output file that is compatible with all operating systems, architectures, and Python implementations.</p>
<p>In universal mode, the current Python version (or user-provided <code>--python-version</code>) will be treated as a lower bound. For example, <code>--universal --python-version 3.7</code> would produce a universal resolution for Python 3.7 and later.</p> <p>In universal mode, the current Python version (or user-provided <code>--python-version</code>) will be treated as a lower bound. For example, <code>--universal --python-version 3.7</code> would produce a universal resolution for Python 3.7 and later.</p>
@ -6339,6 +6403,70 @@ uv pip sync [OPTIONS] <SRC_FILE>...
<p>May also be set with the <code>UV_SYSTEM_PYTHON</code> environment variable.</p> <p>May also be set with the <code>UV_SYSTEM_PYTHON</code> environment variable.</p>
</dd><dt id="uv-pip-sync--target"><a href="#uv-pip-sync--target"><code>--target</code></a> <i>target</i></dt><dd><p>Install packages into the specified directory, rather than into the virtual or system Python environment. The packages will be installed at the top-level of the directory</p> </dd><dt id="uv-pip-sync--target"><a href="#uv-pip-sync--target"><code>--target</code></a> <i>target</i></dt><dd><p>Install packages into the specified directory, rather than into the virtual or system Python environment. The packages will be installed at the top-level of the directory</p>
</dd><dt id="uv-pip-sync--torch-backend"><a href="#uv-pip-sync--torch-backend"><code>--torch-backend</code></a> <i>torch-backend</i></dt><dd><p>The backend to use when fetching packages in the PyTorch ecosystem (e.g., <code>cpu</code>, <code>cu126</code>, or <code>auto</code>).</p>
<p>When set, uv will ignore the configured index URLs for packages in the PyTorch ecosystem, and will instead use the defined backend.</p>
<p>For example, when set to <code>cpu</code>, uv will use the CPU-only PyTorch index; when set to <code>cu126</code>, uv will use the PyTorch index for CUDA 12.6.</p>
<p>The <code>auto</code> mode will attempt to detect the appropriate PyTorch index based on the currently installed CUDA drivers.</p>
<p>This option is in preview and may change in any future release.</p>
<p>May also be set with the <code>UV_TORCH_BACKEND</code> environment variable.</p>
<p>Possible values:</p>
<ul>
<li><code>auto</code>: Select the appropriate PyTorch index based on the operating system and CUDA driver version</li>
<li><code>cpu</code>: Use the CPU-only PyTorch index</li>
<li><code>cu126</code>: Use the PyTorch index for CUDA 12.6</li>
<li><code>cu125</code>: Use the PyTorch index for CUDA 12.5</li>
<li><code>cu124</code>: Use the PyTorch index for CUDA 12.4</li>
<li><code>cu123</code>: Use the PyTorch index for CUDA 12.3</li>
<li><code>cu122</code>: Use the PyTorch index for CUDA 12.2</li>
<li><code>cu121</code>: Use the PyTorch index for CUDA 12.1</li>
<li><code>cu120</code>: Use the PyTorch index for CUDA 12.0</li>
<li><code>cu118</code>: Use the PyTorch index for CUDA 11.8</li>
<li><code>cu117</code>: Use the PyTorch index for CUDA 11.7</li>
<li><code>cu116</code>: Use the PyTorch index for CUDA 11.6</li>
<li><code>cu115</code>: Use the PyTorch index for CUDA 11.5</li>
<li><code>cu114</code>: Use the PyTorch index for CUDA 11.4</li>
<li><code>cu113</code>: Use the PyTorch index for CUDA 11.3</li>
<li><code>cu112</code>: Use the PyTorch index for CUDA 11.2</li>
<li><code>cu111</code>: Use the PyTorch index for CUDA 11.1</li>
<li><code>cu110</code>: Use the PyTorch index for CUDA 11.0</li>
<li><code>cu102</code>: Use the PyTorch index for CUDA 10.2</li>
<li><code>cu101</code>: Use the PyTorch index for CUDA 10.1</li>
<li><code>cu100</code>: Use the PyTorch index for CUDA 10.0</li>
<li><code>cu92</code>: Use the PyTorch index for CUDA 9.2</li>
<li><code>cu91</code>: Use the PyTorch index for CUDA 9.1</li>
<li><code>cu90</code>: Use the PyTorch index for CUDA 9.0</li>
<li><code>cu80</code>: Use the PyTorch index for CUDA 8.0</li>
</ul>
</dd><dt id="uv-pip-sync--verbose"><a href="#uv-pip-sync--verbose"><code>--verbose</code></a>, <code>-v</code></dt><dd><p>Use verbose output.</p> </dd><dt id="uv-pip-sync--verbose"><a href="#uv-pip-sync--verbose"><code>--verbose</code></a>, <code>-v</code></dt><dd><p>Use verbose output.</p>
<p>You can configure fine-grained logging using the <code>RUST_LOG</code> environment variable. (&lt;https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives&gt;)</p> <p>You can configure fine-grained logging using the <code>RUST_LOG</code> environment variable. (&lt;https://docs.rs/tracing-subscriber/latest/tracing_subscriber/filter/struct.EnvFilter.html#directives&gt;)</p>
@ -6819,6 +6947,70 @@ uv pip install [OPTIONS] <PACKAGE|--requirements <REQUIREMENTS>|--editable <EDIT
<p>May also be set with the <code>UV_SYSTEM_PYTHON</code> environment variable.</p> <p>May also be set with the <code>UV_SYSTEM_PYTHON</code> environment variable.</p>
</dd><dt id="uv-pip-install--target"><a href="#uv-pip-install--target"><code>--target</code></a> <i>target</i></dt><dd><p>Install packages into the specified directory, rather than into the virtual or system Python environment. The packages will be installed at the top-level of the directory</p> </dd><dt id="uv-pip-install--target"><a href="#uv-pip-install--target"><code>--target</code></a> <i>target</i></dt><dd><p>Install packages into the specified directory, rather than into the virtual or system Python environment. The packages will be installed at the top-level of the directory</p>
</dd><dt id="uv-pip-install--torch-backend"><a href="#uv-pip-install--torch-backend"><code>--torch-backend</code></a> <i>torch-backend</i></dt><dd><p>The backend to use when fetching packages in the PyTorch ecosystem (e.g., <code>cpu</code>, <code>cu126</code>, or <code>auto</code>)</p>
<p>When set, uv will ignore the configured index URLs for packages in the PyTorch ecosystem, and will instead use the defined backend.</p>
<p>For example, when set to <code>cpu</code>, uv will use the CPU-only PyTorch index; when set to <code>cu126</code>, uv will use the PyTorch index for CUDA 12.6.</p>
<p>The <code>auto</code> mode will attempt to detect the appropriate PyTorch index based on the currently installed CUDA drivers.</p>
<p>This option is in preview and may change in any future release.</p>
<p>May also be set with the <code>UV_TORCH_BACKEND</code> environment variable.</p>
<p>Possible values:</p>
<ul>
<li><code>auto</code>: Select the appropriate PyTorch index based on the operating system and CUDA driver version</li>
<li><code>cpu</code>: Use the CPU-only PyTorch index</li>
<li><code>cu126</code>: Use the PyTorch index for CUDA 12.6</li>
<li><code>cu125</code>: Use the PyTorch index for CUDA 12.5</li>
<li><code>cu124</code>: Use the PyTorch index for CUDA 12.4</li>
<li><code>cu123</code>: Use the PyTorch index for CUDA 12.3</li>
<li><code>cu122</code>: Use the PyTorch index for CUDA 12.2</li>
<li><code>cu121</code>: Use the PyTorch index for CUDA 12.1</li>
<li><code>cu120</code>: Use the PyTorch index for CUDA 12.0</li>
<li><code>cu118</code>: Use the PyTorch index for CUDA 11.8</li>
<li><code>cu117</code>: Use the PyTorch index for CUDA 11.7</li>
<li><code>cu116</code>: Use the PyTorch index for CUDA 11.6</li>
<li><code>cu115</code>: Use the PyTorch index for CUDA 11.5</li>
<li><code>cu114</code>: Use the PyTorch index for CUDA 11.4</li>
<li><code>cu113</code>: Use the PyTorch index for CUDA 11.3</li>
<li><code>cu112</code>: Use the PyTorch index for CUDA 11.2</li>
<li><code>cu111</code>: Use the PyTorch index for CUDA 11.1</li>
<li><code>cu110</code>: Use the PyTorch index for CUDA 11.0</li>
<li><code>cu102</code>: Use the PyTorch index for CUDA 10.2</li>
<li><code>cu101</code>: Use the PyTorch index for CUDA 10.1</li>
<li><code>cu100</code>: Use the PyTorch index for CUDA 10.0</li>
<li><code>cu92</code>: Use the PyTorch index for CUDA 9.2</li>
<li><code>cu91</code>: Use the PyTorch index for CUDA 9.1</li>
<li><code>cu90</code>: Use the PyTorch index for CUDA 9.0</li>
<li><code>cu80</code>: Use the PyTorch index for CUDA 8.0</li>
</ul>
</dd><dt id="uv-pip-install--upgrade"><a href="#uv-pip-install--upgrade"><code>--upgrade</code></a>, <code>-U</code></dt><dd><p>Allow package upgrades, ignoring pinned versions in any existing output file. Implies <code>--refresh</code></p> </dd><dt id="uv-pip-install--upgrade"><a href="#uv-pip-install--upgrade"><code>--upgrade</code></a>, <code>-U</code></dt><dd><p>Allow package upgrades, ignoring pinned versions in any existing output file. Implies <code>--refresh</code></p>
</dd><dt id="uv-pip-install--upgrade-package"><a href="#uv-pip-install--upgrade-package"><code>--upgrade-package</code></a>, <code>-P</code> <i>upgrade-package</i></dt><dd><p>Allow upgrades for a specific package, ignoring pinned versions in any existing output file. Implies <code>--refresh-package</code></p> </dd><dt id="uv-pip-install--upgrade-package"><a href="#uv-pip-install--upgrade-package"><code>--upgrade-package</code></a>, <code>-P</code> <i>upgrade-package</i></dt><dd><p>Allow upgrades for a specific package, ignoring pinned versions in any existing output file. Implies <code>--refresh-package</code></p>

View File

@ -3275,6 +3275,43 @@ environment. The packages will be installed at the top-level of the directory.
--- ---
#### [`torch-backend`](#pip_torch-backend) {: #pip_torch-backend }
<span id="torch-backend"></span>
The backend to use when fetching packages in the PyTorch ecosystem.
When set, uv will ignore the configured index URLs for packages in the PyTorch ecosystem,
and will instead use the defined backend.
For example, when set to `cpu`, uv will use the CPU-only PyTorch index; when set to `cu126`,
uv will use the PyTorch index for CUDA 12.6.
The `auto` mode will attempt to detect the appropriate PyTorch index based on the currently
installed CUDA drivers.
This option is in preview and may change in any future release.
**Default value**: `null`
**Type**: `str`
**Example usage**:
=== "pyproject.toml"
```toml
[tool.uv.pip]
torch-backend = "auto"
```
=== "uv.toml"
```toml
[pip]
torch-backend = "auto"
```
---
#### [`universal`](#pip_universal) {: #pip_universal } #### [`universal`](#pip_universal) {: #pip_universal }
<span id="universal"></span> <span id="universal"></span>

191
uv.schema.json generated
View File

@ -1382,6 +1382,17 @@
"null" "null"
] ]
}, },
"torch-backend": {
"description": "The backend to use when fetching packages in the PyTorch ecosystem.\n\nWhen set, uv will ignore the configured index URLs for packages in the PyTorch ecosystem, and will instead use the defined backend.\n\nFor example, when set to `cpu`, uv will use the CPU-only PyTorch index; when set to `cu126`, uv will use the PyTorch index for CUDA 12.6.\n\nThe `auto` mode will attempt to detect the appropriate PyTorch index based on the currently installed CUDA drivers.\n\nThis option is in preview and may change in any future release.",
"anyOf": [
{
"$ref": "#/definitions/TorchMode"
},
{
"type": "null"
}
]
},
"universal": { "universal": {
"description": "Perform a universal resolution, attempting to generate a single `requirements.txt` output file that is compatible with all operating systems, architectures, and Python implementations.\n\nIn universal mode, the current Python version (or user-provided `--python-version`) will be treated as a lower bound. For example, `--universal --python-version 3.7` would produce a universal resolution for Python 3.7 and later.", "description": "Perform a universal resolution, attempting to generate a single `requirements.txt` output file that is compatible with all operating systems, architectures, and Python implementations.\n\nIn universal mode, the current Python version (or user-provided `--python-version`) will be treated as a lower bound. For example, `--universal --python-version 3.7` would produce a universal resolution for Python 3.7 and later.",
"type": [ "type": [
@ -2201,6 +2212,186 @@
}, },
"additionalProperties": false "additionalProperties": false
}, },
"TorchMode": {
"description": "The strategy to use when determining the appropriate PyTorch index.",
"oneOf": [
{
"description": "Select the appropriate PyTorch index based on the operating system and CUDA driver version.",
"type": "string",
"enum": [
"auto"
]
},
{
"description": "Use the CPU-only PyTorch index.",
"type": "string",
"enum": [
"cpu"
]
},
{
"description": "Use the PyTorch index for CUDA 12.6.",
"type": "string",
"enum": [
"cu126"
]
},
{
"description": "Use the PyTorch index for CUDA 12.5.",
"type": "string",
"enum": [
"cu125"
]
},
{
"description": "Use the PyTorch index for CUDA 12.4.",
"type": "string",
"enum": [
"cu124"
]
},
{
"description": "Use the PyTorch index for CUDA 12.3.",
"type": "string",
"enum": [
"cu123"
]
},
{
"description": "Use the PyTorch index for CUDA 12.2.",
"type": "string",
"enum": [
"cu122"
]
},
{
"description": "Use the PyTorch index for CUDA 12.1.",
"type": "string",
"enum": [
"cu121"
]
},
{
"description": "Use the PyTorch index for CUDA 12.0.",
"type": "string",
"enum": [
"cu120"
]
},
{
"description": "Use the PyTorch index for CUDA 11.8.",
"type": "string",
"enum": [
"cu118"
]
},
{
"description": "Use the PyTorch index for CUDA 11.7.",
"type": "string",
"enum": [
"cu117"
]
},
{
"description": "Use the PyTorch index for CUDA 11.6.",
"type": "string",
"enum": [
"cu116"
]
},
{
"description": "Use the PyTorch index for CUDA 11.5.",
"type": "string",
"enum": [
"cu115"
]
},
{
"description": "Use the PyTorch index for CUDA 11.4.",
"type": "string",
"enum": [
"cu114"
]
},
{
"description": "Use the PyTorch index for CUDA 11.3.",
"type": "string",
"enum": [
"cu113"
]
},
{
"description": "Use the PyTorch index for CUDA 11.2.",
"type": "string",
"enum": [
"cu112"
]
},
{
"description": "Use the PyTorch index for CUDA 11.1.",
"type": "string",
"enum": [
"cu111"
]
},
{
"description": "Use the PyTorch index for CUDA 11.0.",
"type": "string",
"enum": [
"cu110"
]
},
{
"description": "Use the PyTorch index for CUDA 10.2.",
"type": "string",
"enum": [
"cu102"
]
},
{
"description": "Use the PyTorch index for CUDA 10.1.",
"type": "string",
"enum": [
"cu101"
]
},
{
"description": "Use the PyTorch index for CUDA 10.0.",
"type": "string",
"enum": [
"cu100"
]
},
{
"description": "Use the PyTorch index for CUDA 9.2.",
"type": "string",
"enum": [
"cu92"
]
},
{
"description": "Use the PyTorch index for CUDA 9.1.",
"type": "string",
"enum": [
"cu91"
]
},
{
"description": "Use the PyTorch index for CUDA 9.0.",
"type": "string",
"enum": [
"cu90"
]
},
{
"description": "Use the PyTorch index for CUDA 8.0.",
"type": "string",
"enum": [
"cu80"
]
}
]
},
"TrustedHost": { "TrustedHost": {
"description": "A host or host-port pair.", "description": "A host or host-port pair.",
"type": "string" "type": "string"