Switch from vendored PubGrub to a fork (#438)

A fork will let us stay up to date with the upstream while replaying our
work on top of it.

I expect a similar workflow to the RustPython-Parser fork we maintained,
except that I wrote an automation to create tags for each commit on the
fork (https://github.com/zanieb/pubgrub/pull/2) so we do not need to
manually tag and document each commit.

To update with the upstream:

- Rebase our fork's `main` branch on top of the latest changes in
upstream's `dev` branch
- Force push, overwriting our `main` branch history
- Change the commit hash here to the last commit on `main` in our fork

Since we automatically tag each commit on the fork, we should never lose
the commits that are dropped from `main` during rebase.
This commit is contained in:
Zanie Blue 2023-11-16 13:49:19 -06:00 committed by GitHub
parent e41ec12239
commit 832058dbba
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
38 changed files with 4 additions and 11769 deletions

1
Cargo.lock generated
View File

@ -2287,6 +2287,7 @@ dependencies = [
[[package]] [[package]]
name = "pubgrub" name = "pubgrub"
version = "0.2.1" version = "0.2.1"
source = "git+https://github.com/zanieb/pubgrub?rev=46f1214fe6b7886709a35d8d2f2c0e1b56433b26#46f1214fe6b7886709a35d8d2f2c0e1b56433b26"
dependencies = [ dependencies = [
"indexmap 2.1.0", "indexmap 2.1.0",
"log", "log",

View File

@ -1,6 +1,5 @@
[workspace] [workspace]
members = ["crates/*"] members = ["crates/*"]
exclude = ["vendor/pubgrub"]
resolver = "2" resolver = "2"
[workspace.package] [workspace.package]
@ -49,6 +48,7 @@ once_cell = { version = "1.18.0" }
petgraph = { version = "0.6.4" } petgraph = { version = "0.6.4" }
platform-info = { version = "2.0.2" } platform-info = { version = "2.0.2" }
plist = { version = "1.6.0" } plist = { version = "1.6.0" }
pubgrub = { git = "https://github.com/zanieb/pubgrub", rev = "46f1214fe6b7886709a35d8d2f2c0e1b56433b26" }
pyproject-toml = { version = "0.8.0" } pyproject-toml = { version = "0.8.0" }
rand = { version = "0.8.5" } rand = { version = "0.8.5" }
rayon = { version = "1.8.0" } rayon = { version = "1.8.0" }

View File

@ -20,7 +20,6 @@ pep440_rs = { path = "../pep440-rs" }
pep508_rs = { path = "../pep508-rs" } pep508_rs = { path = "../pep508-rs" }
platform-host = { path = "../platform-host" } platform-host = { path = "../platform-host" }
platform-tags = { path = "../platform-tags" } platform-tags = { path = "../platform-tags" }
pubgrub = { path = "../../vendor/pubgrub" }
puffin-client = { path = "../puffin-client" } puffin-client = { path = "../puffin-client" }
puffin-dispatch = { path = "../puffin-dispatch" } puffin-dispatch = { path = "../puffin-dispatch" }
puffin-distribution = { path = "../puffin-distribution" } puffin-distribution = { path = "../puffin-distribution" }
@ -45,6 +44,7 @@ futures = { workspace = true }
indicatif = { workspace = true } indicatif = { workspace = true }
itertools = { workspace = true } itertools = { workspace = true }
miette = { workspace = true, features = ["fancy"] } miette = { workspace = true, features = ["fancy"] }
pubgrub = { workspace = true }
pyproject-toml = { workspace = true } pyproject-toml = { workspace = true }
tempfile = { workspace = true } tempfile = { workspace = true }
thiserror = { workspace = true } thiserror = { workspace = true }

View File

@ -16,7 +16,6 @@ pep440_rs = { path = "../pep440-rs" }
pep508_rs = { path = "../pep508-rs" } pep508_rs = { path = "../pep508-rs" }
platform-host = { path = "../platform-host" } platform-host = { path = "../platform-host" }
platform-tags = { path = "../platform-tags" } platform-tags = { path = "../platform-tags" }
pubgrub = { path = "../../vendor/pubgrub" }
puffin-cache = { path = "../puffin-cache" } puffin-cache = { path = "../puffin-cache" }
puffin-client = { path = "../puffin-client" } puffin-client = { path = "../puffin-client" }
puffin-distribution = { path = "../puffin-distribution" } puffin-distribution = { path = "../puffin-distribution" }
@ -37,6 +36,7 @@ fxhash = { workspace = true }
itertools = { workspace = true } itertools = { workspace = true }
once_cell = { workspace = true } once_cell = { workspace = true }
petgraph = { workspace = true } petgraph = { workspace = true }
pubgrub = { workspace = true }
tempfile = { workspace = true } tempfile = { workspace = true }
thiserror = { workspace = true } thiserror = { workspace = true }
tokio = { workspace = true } tokio = { workspace = true }

View File

@ -1,170 +0,0 @@
# Changelog
All notable changes to this project will be documented in this file.
## Unreleased [(diff)][unreleased-diff]
## [0.2.1] - 2021-06-30 - [(diff with 0.2.0)][0.2.0-diff]
This release is focused on performance improvements and code readability, without any change to the public API.
The code tends to be simpler around tricky parts of the algorithm such as conflict resolution.
Some data structures have been rewritten (with no unsafe) to lower memory usage.
Depending on scenarios, version 0.2.1 is 3 to 8 times faster than 0.2.0.
As an example, solving all elm package versions existing went from 580ms to 175ms on my laptop.
While solving a specific subset of packages from crates.io went from 2.5s to 320ms on my laptop.
Below are listed all the important changes in the internal parts of the API.
#### Added
- New `SmallVec` data structure (with no unsafe) using fixed size arrays for up to 2 entries.
- New `SmallMap` data structure (with no unsafe) using fixed size arrays for up to 2 entries.
- New `Arena` data structure (with no unsafe) backed by a `Vec` and indexed with `Id<T>` where `T` is phantom data.
#### Changed
- Updated the `large_case` benchmark to run with both u16 and string package identifiers in registries.
- Use the new `Arena` for the incompatibility store, and use its `Id<T>` identifiers to reference incompatibilities instead of full owned copies in the `incompatibilities` field of the solver `State`.
- Save satisfier indices of each package involved in an incompatibility when looking for its satisfier. This speeds up the search for the previous satisfier.
- Early unit propagation loop restart at the first conflict found instead of continuing evaluation for the current package.
- Index incompatibilities by package in a hash map instead of using a vec.
- Keep track of already contradicted incompatibilities in a `Set` until the next backtrack to speed up unit propagation.
- Unify `history` and `memory` in `partial_solution` under a unique hash map indexed by packages. This should speed up access to relevan terms in conflict resolution.
## [0.2.0] - 2020-11-19 - [(diff with 0.1.0)][0.1.0-diff]
This release brings many important improvements to PubGrub.
The gist of it is:
- A bug in the algorithm's implementation was [fixed](https://github.com/pubgrub-rs/pubgrub/pull/23).
- The solver is now implemented in a `resolve` function taking as argument
an implementer of the `DependencyProvider` trait,
which has more control over the decision making process.
- End-to-end property testing of large synthetic registries was added.
- More than 10x performance improvement.
### Changes affecting the public API
#### Added
- Links to code items in the code documentation.
- New `"serde"` feature that allows serializing some library types, useful for making simple reproducible bug reports.
- New variants for `error::PubGrubError` which are `DependencyOnTheEmptySet`,
`SelfDependency`, `ErrorChoosingPackageVersion` and `ErrorInShouldCancel`.
- New `type_alias::Map` defined as `rustc_hash::FxHashMap`.
- New `type_alias::SelectedDependencies<P, V>` defined as `Map<P, V>`.
- The types `Dependencies` and `DependencyConstraints` were introduced to clarify intent.
- New function `choose_package_with_fewest_versions` to help implement
the `choose_package_version` method of a `DependencyProvider`.
- Implement `FromStr` for `SemanticVersion`.
- Add the `VersionParseError` type for parsing of semantic versions.
#### Changed
- The `Solver` trait was replaced by a `DependencyProvider` trait
which now must implement a `choose_package_version` method
instead of `list_available_versions`.
So it now has the ability to choose a package in addition to a version.
The `DependencyProvider` also has a new optional method `should_cancel`
that may be used to stop the solver if needed.
- The `choose_package_version` and `get_dependencies` methods of the
`DependencyProvider` trait now take an immutable reference to `self`.
Interior mutability can be used by implementor if mutability is needed.
- The `Solver.run` method was thus replaced by a free function `solver::resolve`
taking a dependency provider as first argument.
- The `OfflineSolver` is thus replaced by an `OfflineDependencyProvider`.
- `SemanticVersion` now takes `u32` instead of `usize` for its 3 parts.
- `NumberVersion` now uses `u32` instead of `usize`.
#### Removed
- `ErrorRetrievingVersions` variant of `error::PubGrubError`.
### Changes in the internal parts of the API
#### Added
- `benches/large_case.rs` enables benchmarking of serialized registries of packages.
- `examples/caching_dependency_provider.rs` an example dependency provider caching dependencies.
- `PackageTerm<P, V> = (P, Term<V>)` new type alias for readability.
- `Memory.term_intersection_for_package(&mut self, package: &P) -> Option<&Term<V>>`
- New types were introduces for conflict resolution in `internal::partial_solution`
to clarify the intent and return values of some functions.
Those types are `DatedAssignment` and `SatisfierAndPreviousHistory`.
- `PartialSolution.term_intersection_for_package` calling the same function
from its `memory`.
- New property tests for ranges: `negate_contains_opposite`, `intesection_contains_both`
and `union_contains_either`.
- A large synthetic test case was added in `test-examples/`.
- A new test example `double_choices` was added
for the detection of a bug (fixed) in the implementation.
- Property testing of big synthetic datasets was added in `tests/proptest.rs`.
- Comparison of PubGrub solver and a SAT solver
was added with `tests/sat_dependency_provider.rs`.
- Other regression and unit tests were added to `tests/tests.rs`.
#### Changed
- CI workflow was improved (`./github/workflows/`), including a check for
[Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) and
[Clippy](https://github.com/rust-lang/rust-clippy) for source code linting.
- Using SPDX license identifiers instead of MPL-2.0 classic file headers.
- `State.incompatibilities` is now wrapped inside a `Rc`.
- `DecisionLevel(u32)` is used in place of `usize` for partial solution decision levels.
- `State.conflict_resolution` now also returns the almost satisfied package
to avoid an unnecessary call to `self.partial_solution.relation(...)` after conflict resolution.
- `Kind::NoVersion` renamed to `Kind::NoVersions` and all other usage of `noversion`
has been changed to `no_versions`.
- Variants of the `incompatibility::Relation` enum have changed.
- Incompatibility now uses a deterministic hasher to store packages in its hash map.
- `incompatibility.relation(...)` now takes a function as argument to avoid computations
of unnecessary terms intersections.
- `Memory` now uses a deterministic hasher instead of the default one.
- `memory::PackageAssignments` is now an enum instead of a struct.
- Derivations in a `PackageAssignments` keep a precomputed intersection of derivation terms.
- `potential_packages` method now returns a `Range`
instead of a `Term` for the versions constraint of each package.
- `PartialSolution.relation` now takes `&mut self` instead of `&self`
to be able to store computation of terms intersection.
- `Term.accept_version` was renamed `Term.contains`.
- The `satisfied_by` and `contradicted_by` methods of a `Term`
now directly takes a reference to the intersection of other terms.
Same for `relation_with`.
#### Removed
- `term` field of an `Assignment::Derivation` variant.
- `Memory.all_terms` method was removed.
- `Memory.remove_decision` method was removed in favor of a check before using `Memory.add_decision`.
- `PartialSolution` methods `pick_package` and `pick_version` have been removed
since control was given back to the dependency provider to choose a package version.
- `PartialSolution` methods `remove_last_decision` and `satisfies_any_of` were removed
in favor of a preventive check before calling `add_decision`.
- `Term.is_negative`.
#### Fixed
- Prior cause computation (`incompatibility::prior_cause`) now uses the intersection of package terms
instead of their union, which was an implementation error.
## [0.1.0] - 2020-10-01
### Added
- `README.md` as the home page of this repository.
- `LICENSE`, code is provided under the MPL 2.0 license.
- `Cargo.toml` configuration of this Rust project.
- `src/` containing all the source code for this first implementation of PubGrub in Rust.
- `tests/` containing test end-to-end examples.
- `examples/` other examples, not in the form of tests.
- `.gitignore` configured for a Rust project.
- `.github/workflows/` CI to automatically build, test and document on push and pull requests.
[0.2.1]: https://github.com/pubgrub-rs/pubgrub/releases/tag/v0.2.1
[0.2.0]: https://github.com/pubgrub-rs/pubgrub/releases/tag/v0.2.0
[0.1.0]: https://github.com/pubgrub-rs/pubgrub/releases/tag/v0.1.0
[unreleased-diff]: https://github.com/pubgrub-rs/pubgrub/compare/release...dev
[0.2.0-diff]: https://github.com/pubgrub-rs/pubgrub/compare/v0.2.0...v0.2.1
[0.1.0-diff]: https://github.com/pubgrub-rs/pubgrub/compare/v0.1.0...v0.2.0

View File

@ -1,40 +0,0 @@
# SPDX-License-Identifier: MPL-2.0
[package]
name = "pubgrub"
version = "0.2.1"
authors = [
"Matthieu Pizenberg <matthieu.pizenberg@gmail.com>",
"Alex Tokarev <aleksator@gmail.com>",
"Jacob Finkelman <Eh2406@wayne.edu>",
]
edition = "2021"
description = "PubGrub version solving algorithm"
readme = "README.md"
repository = "https://github.com/pubgrub-rs/pubgrub"
license = "MPL-2.0"
keywords = ["dependency", "pubgrub", "semver", "solver", "version"]
categories = ["algorithms"]
include = ["Cargo.toml", "LICENSE", "README.md", "src/**", "tests/**", "examples/**", "benches/**"]
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
indexmap = "2.0.2"
priority-queue = "1.1.1"
thiserror = "1.0"
rustc-hash = "1.1.0"
serde = { version = "1.0", features = ["derive"], optional = true }
log = "0.4.14" # for debug logs in tests
[dev-dependencies]
proptest = "0.10.1"
ron = "0.6"
varisat = "0.2.2"
criterion = "0.3"
env_logger = "0.9.0"
[[bench]]
name = "large_case"
harness = false
required-features = ["serde"]

373
vendor/pubgrub/LICENSE vendored
View File

@ -1,373 +0,0 @@
Mozilla Public License Version 2.0
==================================
1. Definitions
--------------
1.1. "Contributor"
means each individual or legal entity that creates, contributes to
the creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used
by a Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached
the notice in Exhibit A, the Executable Form of such Source Code
Form, and Modifications of such Source Code Form, in each case
including portions thereof.
1.5. "Incompatible With Secondary Licenses"
means
(a) that the initial Contributor has attached the notice described
in Exhibit B to the Covered Software; or
(b) that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the
terms of a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in
a separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible,
whether at the time of the initial grant or subsequently, any and
all of the rights conveyed by this License.
1.10. "Modifications"
means any of the following:
(a) any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered
Software; or
(b) any new file in Source Code Form that contains any Covered
Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the
License, by the making, using, selling, offering for sale, having
made, import, or transfer of either its Contributions or its
Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU
Lesser General Public License, Version 2.1, the GNU Affero General
Public License, Version 3.0, or any later versions of those
licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that
controls, is controlled by, or is under common control with You. For
purposes of this definition, "control" means (a) the power, direct
or indirect, to cause the direction or management of such entity,
whether by contract or otherwise, or (b) ownership of more than
fifty percent (50%) of the outstanding shares or beneficial
ownership of such entity.
2. License Grants and Conditions
--------------------------------
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
(a) under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
(b) under Patent Claims of such Contributor to make, use, sell, offer
for sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
(a) for any code that a Contributor has removed from Covered Software;
or
(b) for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
(c) under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights
to grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted
in Section 2.1.
3. Responsibilities
-------------------
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
(a) such Covered Software must also be made available in Source Code
Form, as described in Section 3.1, and You must inform recipients of
the Executable Form how they can obtain a copy of such Source Code
Form by reasonable means in a timely manner, at a charge no more
than the cost of distribution to the recipient; and
(b) You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter
the recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty,
or limitations of liability) contained within the Source Code Form of
the Covered Software, except that You may alter any license notices to
the extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
---------------------------------------------------
If it is impossible for You to comply with any of the terms of this
License with respect to some or all of the Covered Software due to
statute, judicial order, or regulation then You must: (a) comply with
the terms of this License to the maximum extent possible; and (b)
describe the limitations and the code they affect. Such description must
be placed in a text file included with all distributions of the Covered
Software under this License. Except to the extent prohibited by statute
or regulation, such description must be sufficiently detailed for a
recipient of ordinary skill to be able to understand it.
5. Termination
--------------
5.1. The rights granted under this License will terminate automatically
if You fail to comply with any of its terms. However, if You become
compliant, then the rights granted under this License from a particular
Contributor are reinstated (a) provisionally, unless and until such
Contributor explicitly and finally terminates Your grants, and (b) on an
ongoing basis, if such Contributor fails to notify You of the
non-compliance by some reasonable means prior to 60 days after You have
come back into compliance. Moreover, Your grants from a particular
Contributor are reinstated on an ongoing basis if such Contributor
notifies You of the non-compliance by some reasonable means, this is the
first time You have received notice of non-compliance with this License
from such Contributor, and You become compliant prior to 30 days after
Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all
end user license agreements (excluding distributors and resellers) which
have been validly granted by You or Your distributors under this License
prior to termination shall survive termination.
************************************************************************
* *
* 6. Disclaimer of Warranty *
* ------------------------- *
* *
* Covered Software is provided under this License on an "as is" *
* basis, without warranty of any kind, either expressed, implied, or *
* statutory, including, without limitation, warranties that the *
* Covered Software is free of defects, merchantable, fit for a *
* particular purpose or non-infringing. The entire risk as to the *
* quality and performance of the Covered Software is with You. *
* Should any Covered Software prove defective in any respect, You *
* (not any Contributor) assume the cost of any necessary servicing, *
* repair, or correction. This disclaimer of warranty constitutes an *
* essential part of this License. No use of any Covered Software is *
* authorized under this License except under this disclaimer. *
* *
************************************************************************
************************************************************************
* *
* 7. Limitation of Liability *
* -------------------------- *
* *
* Under no circumstances and under no legal theory, whether tort *
* (including negligence), contract, or otherwise, shall any *
* Contributor, or anyone who distributes Covered Software as *
* permitted above, be liable to You for any direct, indirect, *
* special, incidental, or consequential damages of any character *
* including, without limitation, damages for lost profits, loss of *
* goodwill, work stoppage, computer failure or malfunction, or any *
* and all other commercial damages or losses, even if such party *
* shall have been informed of the possibility of such damages. This *
* limitation of liability shall not apply to liability for death or *
* personal injury resulting from such party's negligence to the *
* extent applicable law prohibits such limitation. Some *
* jurisdictions do not allow the exclusion or limitation of *
* incidental or consequential damages, so this exclusion and *
* limitation may not apply to You. *
* *
************************************************************************
8. Litigation
-------------
Any litigation relating to this License may be brought only in the
courts of a jurisdiction where the defendant maintains its principal
place of business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions.
Nothing in this Section shall prevent a party's ability to bring
cross-claims or counter-claims.
9. Miscellaneous
----------------
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides
that the language of a contract shall be construed against the drafter
shall not be used to construe this License against a Contributor.
10. Versions of the License
---------------------------
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
-------------------------------------------
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular
file, then You may include the notice in a location (such as a LICENSE
file in a relevant directory) where a recipient would be likely to look
for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
---------------------------------------------------------
This Source Code Form is "Incompatible With Secondary Licenses", as
defined by the Mozilla Public License, v. 2.0.

View File

@ -1,83 +0,0 @@
# PubGrub version solving algorithm
![license](https://img.shields.io/crates/l/pubgrub.svg)
[![crates.io](https://img.shields.io/crates/v/pubgrub.svg?logo=rust)][crates]
[![docs.rs](https://img.shields.io/badge/docs.rs-pubgrub-yellow)][docs]
[![guide](https://img.shields.io/badge/guide-pubgrub-pink?logo=read-the-docs)][guide]
Version solving consists in efficiently finding a set of packages and versions
that satisfy all the constraints of a given project dependencies.
In addition, when that is not possible,
PubGrub tries to provide a very human-readable and clear
explanation as to why that failed.
The [introductory blog post about PubGrub][medium-pubgrub] presents
one such example of failure explanation:
```txt
Because dropdown >=2.0.0 depends on icons >=2.0.0 and
root depends on icons <2.0.0, dropdown >=2.0.0 is forbidden.
And because menu >=1.1.0 depends on dropdown >=2.0.0,
menu >=1.1.0 is forbidden.
And because menu <1.1.0 depends on dropdown >=1.0.0 <2.0.0
which depends on intl <4.0.0, every version of menu
requires intl <4.0.0.
So, because root depends on both menu >=1.0.0 and intl >=5.0.0,
version solving failed.
```
This pubgrub crate provides a Rust implementation of PubGrub.
It is generic and works for any type of dependency system
as long as packages (P) and versions (V) implement
the provided `Package` and `Version` traits.
## Using the pubgrub crate
A [guide][guide] with both high-level explanations and
in-depth algorithm details is available online.
The [API documentation is available on docs.rs][docs].
A version of the [API docs for the unreleased functionality][docs-dev] from `dev` branch is also
accessible for convenience.
## Contributing
Discussion and development happens here on GitHub and on our
[Zulip stream](https://rust-lang.zulipchat.com/#narrow/stream/260232-t-cargo.2FPubGrub).
Please join in!
Remember to always be considerate of others,
who may have different native languages, cultures and experiences.
We want everyone to feel welcomed,
let us know with a private message on Zulip if you don't feel that way.
## PubGrub
PubGrub is a version solving algorithm,
written in 2018 by Natalie Weizenbaum
for the Dart package manager.
It is supposed to be very fast and to explain errors
more clearly than the alternatives.
An introductory blog post was
[published on Medium][medium-pubgrub] by its author.
The detailed explanation of the algorithm is
[provided on GitHub][github-pubgrub],
and complemented by the ["Internals" section of our guide][guide-internals].
The foundation of the algorithm is based on ASP (Answer Set Programming),
and a book called
"[Answer Set Solving in Practice][potassco-book]"
by Martin Gebser, Roland Kaminski, Benjamin Kaufmann and Torsten Schaub.
[crates]: https://crates.io/crates/pubgrub
[guide]: https://pubgrub-rs-guide.netlify.app/
[guide-internals]: https://pubgrub-rs-guide.netlify.app/internals/intro.html
[docs]: https://docs.rs/pubgrub
[docs-dev]: https://pubgrub-rs.github.io/pubgrub/pubgrub/
[medium-pubgrub]: https://medium.com/@nex3/pubgrub-2fb6470504f
[github-pubgrub]: https://github.com/dart-lang/pub/blob/master/doc/solver.md
[potassco-book]: https://potassco.org/book/

View File

@ -1,54 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
use std::time::Duration;
extern crate criterion;
use self::criterion::*;
use pubgrub::package::Package;
use pubgrub::range::Range;
use pubgrub::solver::{resolve, OfflineDependencyProvider};
use pubgrub::version::{NumberVersion, SemanticVersion};
use pubgrub::version_set::VersionSet;
use serde::de::Deserialize;
fn bench<'a, P: Package + Deserialize<'a>, VS: VersionSet + Deserialize<'a>>(
b: &mut Bencher,
case: &'a str,
) where
<VS as VersionSet>::V: Deserialize<'a>,
{
let dependency_provider: OfflineDependencyProvider<P, VS> = ron::de::from_str(&case).unwrap();
b.iter(|| {
for p in dependency_provider.packages() {
for n in dependency_provider.versions(p).unwrap() {
let _ = resolve(&dependency_provider, p.clone(), n.clone());
}
}
});
}
fn bench_nested(c: &mut Criterion) {
let mut group = c.benchmark_group("large_cases");
group.measurement_time(Duration::from_secs(20));
for case in std::fs::read_dir("test-examples").unwrap() {
let case = case.unwrap().path();
let name = case.file_name().unwrap().to_string_lossy();
let data = std::fs::read_to_string(&case).unwrap();
if name.ends_with("u16_NumberVersion.ron") {
group.bench_function(name, |b| {
bench::<u16, Range<NumberVersion>>(b, &data);
});
} else if name.ends_with("str_SemanticVersion.ron") {
group.bench_function(name, |b| {
bench::<&str, Range<SemanticVersion>>(b, &data);
});
}
}
group.finish();
}
criterion_group!(benches, bench_nested);
criterion_main!(benches);

View File

@ -1,67 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
use pubgrub::error::PubGrubError;
use pubgrub::range::Range;
use pubgrub::report::{DefaultStringReporter, Reporter};
use pubgrub::solver::{resolve, OfflineDependencyProvider};
use pubgrub::version::SemanticVersion;
type SemVS = Range<SemanticVersion>;
// https://github.com/dart-lang/pub/blob/master/doc/solver.md#branching-error-reporting
fn main() {
let mut dependency_provider = OfflineDependencyProvider::<&str, SemVS>::new();
#[rustfmt::skip]
// root 1.0.0 depends on foo ^1.0.0
dependency_provider.add_dependencies(
"root", (1, 0, 0),
[("foo", Range::from_range_bounds((1, 0, 0)..(2, 0, 0)))],
);
#[rustfmt::skip]
// foo 1.0.0 depends on a ^1.0.0 and b ^1.0.0
dependency_provider.add_dependencies(
"foo", (1, 0, 0),
[
("a", Range::from_range_bounds((1, 0, 0)..(2, 0, 0))),
("b", Range::from_range_bounds((1, 0, 0)..(2, 0, 0))),
],
);
#[rustfmt::skip]
// foo 1.1.0 depends on x ^1.0.0 and y ^1.0.0
dependency_provider.add_dependencies(
"foo", (1, 1, 0),
[
("x", Range::from_range_bounds((1, 0, 0)..(2, 0, 0))),
("y", Range::from_range_bounds((1, 0, 0)..(2, 0, 0))),
],
);
#[rustfmt::skip]
// a 1.0.0 depends on b ^2.0.0
dependency_provider.add_dependencies(
"a", (1, 0, 0),
[("b", Range::from_range_bounds((2, 0, 0)..(3, 0, 0)))],
);
// b 1.0.0 and 2.0.0 have no dependencies.
dependency_provider.add_dependencies("b", (1, 0, 0), []);
dependency_provider.add_dependencies("b", (2, 0, 0), []);
#[rustfmt::skip]
// x 1.0.0 depends on y ^2.0.0.
dependency_provider.add_dependencies(
"x", (1, 0, 0),
[("y", Range::from_range_bounds((2, 0, 0)..(3, 0, 0)))],
);
// y 1.0.0 and 2.0.0 have no dependencies.
dependency_provider.add_dependencies("y", (1, 0, 0), []);
dependency_provider.add_dependencies("y", (2, 0, 0), []);
// Run the algorithm.
match resolve(&dependency_provider, "root", (1, 0, 0)) {
Ok(sol) => println!("{:?}", sol),
Err(PubGrubError::NoSolution(mut derivation_tree)) => {
derivation_tree.collapse_no_versions();
eprintln!("{}", DefaultStringReporter::report(&derivation_tree));
std::process::exit(1);
}
Err(err) => panic!("{:?}", err),
};
}

View File

@ -1,90 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
use std::cell::RefCell;
use std::error::Error;
use pubgrub::package::Package;
use pubgrub::range::Range;
use pubgrub::solver::{resolve, Dependencies, DependencyProvider, OfflineDependencyProvider};
use pubgrub::version::NumberVersion;
use pubgrub::version_set::VersionSet;
type NumVS = Range<NumberVersion>;
// An example implementing caching dependency provider that will
// store queried dependencies in memory and check them before querying more from remote.
struct CachingDependencyProvider<P: Package, VS: VersionSet, DP: DependencyProvider<P, VS>> {
remote_dependencies: DP,
cached_dependencies: RefCell<OfflineDependencyProvider<P, VS>>,
}
impl<P: Package, VS: VersionSet, DP: DependencyProvider<P, VS>>
CachingDependencyProvider<P, VS, DP>
{
pub fn new(remote_dependencies_provider: DP) -> Self {
CachingDependencyProvider {
remote_dependencies: remote_dependencies_provider,
cached_dependencies: RefCell::new(OfflineDependencyProvider::new()),
}
}
}
impl<P: Package, VS: VersionSet, DP: DependencyProvider<P, VS>> DependencyProvider<P, VS>
for CachingDependencyProvider<P, VS, DP>
{
// Caches dependencies if they were already queried
fn get_dependencies(
&self,
package: &P,
version: &VS::V,
) -> Result<Dependencies<P, VS>, Box<dyn Error + Send + Sync>> {
let mut cache = self.cached_dependencies.borrow_mut();
match cache.get_dependencies(package, version) {
Ok(Dependencies::Unknown) => {
let dependencies = self.remote_dependencies.get_dependencies(package, version);
match dependencies {
Ok(Dependencies::Known(dependencies)) => {
cache.add_dependencies(
package.clone(),
version.clone(),
dependencies.clone(),
);
Ok(Dependencies::Known(dependencies))
}
Ok(Dependencies::Unknown) => Ok(Dependencies::Unknown),
error @ Err(_) => error,
}
}
dependencies @ Ok(_) => dependencies,
error @ Err(_) => error,
}
}
fn choose_version(
&self,
package: &P,
range: &VS,
) -> Result<Option<VS::V>, Box<dyn Error + Send + Sync>> {
self.remote_dependencies.choose_version(package, range)
}
type Priority = DP::Priority;
fn prioritize(&self, package: &P, range: &VS) -> Self::Priority {
self.remote_dependencies.prioritize(package, range)
}
}
fn main() {
// Simulating remote provider locally.
let mut remote_dependencies_provider = OfflineDependencyProvider::<&str, NumVS>::new();
// Add dependencies as needed. Here only root package is added.
remote_dependencies_provider.add_dependencies("root", 1, Vec::new());
let caching_dependencies_provider =
CachingDependencyProvider::new(remote_dependencies_provider);
let solution = resolve(&caching_dependencies_provider, "root", 1);
println!("Solution: {:?}", solution);
}

View File

@ -1,26 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
use pubgrub::range::Range;
use pubgrub::solver::{resolve, OfflineDependencyProvider};
use pubgrub::version::NumberVersion;
type NumVS = Range<NumberVersion>;
// `root` depends on `menu` and `icons`
// `menu` depends on `dropdown`
// `dropdown` depends on `icons`
// `icons` has no dependency
#[rustfmt::skip]
fn main() {
let mut dependency_provider = OfflineDependencyProvider::<&str, NumVS>::new();
dependency_provider.add_dependencies(
"root", 1, [("menu", Range::full()), ("icons", Range::full())],
);
dependency_provider.add_dependencies("menu", 1, [("dropdown", Range::full())]);
dependency_provider.add_dependencies("dropdown", 1, [("icons", Range::full())]);
dependency_provider.add_dependencies("icons", 1, []);
// Run the algorithm.
let solution = resolve(&dependency_provider, "root", 1);
println!("Solution: {:?}", solution);
}

View File

@ -1,83 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
use pubgrub::error::PubGrubError;
use pubgrub::range::Range;
use pubgrub::report::{DefaultStringReporter, Reporter};
use pubgrub::solver::{resolve, OfflineDependencyProvider};
use pubgrub::version::SemanticVersion;
type SemVS = Range<SemanticVersion>;
// `root` depends on `menu`, `icons 1.0.0` and `intl 5.0.0`
// `menu 1.0.0` depends on `dropdown < 2.0.0`
// `menu >= 1.1.0` depends on `dropdown >= 2.0.0`
// `dropdown 1.8.0` depends on `intl 3.0.0`
// `dropdown >= 2.0.0` depends on `icons 2.0.0`
// `icons` has no dependency
// `intl` has no dependency
#[rustfmt::skip]
fn main() {
let mut dependency_provider = OfflineDependencyProvider::<&str, SemVS>::new();
// Direct dependencies: menu and icons.
dependency_provider.add_dependencies("root", (1, 0, 0), [
("menu", Range::full()),
("icons", Range::singleton((1, 0, 0))),
("intl", Range::singleton((5, 0, 0))),
]);
// Dependencies of the menu lib.
dependency_provider.add_dependencies("menu", (1, 0, 0), [
("dropdown", Range::from_range_bounds(..(2, 0, 0))),
]);
dependency_provider.add_dependencies("menu", (1, 1, 0), [
("dropdown", Range::from_range_bounds((2, 0, 0)..)),
]);
dependency_provider.add_dependencies("menu", (1, 2, 0), [
("dropdown", Range::from_range_bounds((2, 0, 0)..)),
]);
dependency_provider.add_dependencies("menu", (1, 3, 0), [
("dropdown", Range::from_range_bounds((2, 0, 0)..)),
]);
dependency_provider.add_dependencies("menu", (1, 4, 0), [
("dropdown", Range::from_range_bounds((2, 0, 0)..)),
]);
dependency_provider.add_dependencies("menu", (1, 5, 0), [
("dropdown", Range::from_range_bounds((2, 0, 0)..)),
]);
// Dependencies of the dropdown lib.
dependency_provider.add_dependencies("dropdown", (1, 8, 0), [
("intl", Range::singleton((3, 0, 0))),
]);
dependency_provider.add_dependencies("dropdown", (2, 0, 0), [
("icons", Range::singleton((2, 0, 0))),
]);
dependency_provider.add_dependencies("dropdown", (2, 1, 0), [
("icons", Range::singleton((2, 0, 0))),
]);
dependency_provider.add_dependencies("dropdown", (2, 2, 0), [
("icons", Range::singleton((2, 0, 0))),
]);
dependency_provider.add_dependencies("dropdown", (2, 3, 0), [
("icons", Range::singleton((2, 0, 0))),
]);
// Icons have no dependencies.
dependency_provider.add_dependencies("icons", (1, 0, 0), []);
dependency_provider.add_dependencies("icons", (2, 0, 0), []);
// Intl have no dependencies.
dependency_provider.add_dependencies("intl", (3, 0, 0), []);
dependency_provider.add_dependencies("intl", (4, 0, 0), []);
dependency_provider.add_dependencies("intl", (5, 0, 0), []);
// Run the algorithm.
match resolve(&dependency_provider, "root", (1, 0, 0)) {
Ok(sol) => println!("{:?}", sol),
Err(PubGrubError::NoSolution(mut derivation_tree)) => {
derivation_tree.collapse_no_versions();
eprintln!("{}", DefaultStringReporter::report(&derivation_tree));
}
Err(err) => panic!("{:?}", err),
};
}

View File

@ -1,74 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
use pubgrub::error::PubGrubError;
use pubgrub::range::Range;
use pubgrub::report::{DefaultStringReporter, Reporter};
use pubgrub::solver::{resolve, OfflineDependencyProvider};
use pubgrub::version::SemanticVersion;
type SemVS = Range<SemanticVersion>;
// `root` depends on `menu` and `icons 1.0.0`
// `menu 1.0.0` depends on `dropdown < 2.0.0`
// `menu >= 1.1.0` depends on `dropdown >= 2.0.0`
// `dropdown 1.8.0` has no dependency
// `dropdown >= 2.0.0` depends on `icons 2.0.0`
// `icons` has no dependency
#[rustfmt::skip]
fn main() {
let mut dependency_provider = OfflineDependencyProvider::<&str, SemVS>::new();
// Direct dependencies: menu and icons.
dependency_provider.add_dependencies("root", (1, 0, 0), [
("menu", Range::full()),
("icons", Range::singleton((1, 0, 0))),
]);
// Dependencies of the menu lib.
dependency_provider.add_dependencies("menu", (1, 0, 0), [
("dropdown", Range::from_range_bounds(..(2, 0, 0))),
]);
dependency_provider.add_dependencies("menu", (1, 1, 0), [
("dropdown", Range::from_range_bounds((2, 0, 0)..)),
]);
dependency_provider.add_dependencies("menu", (1, 2, 0), [
("dropdown", Range::from_range_bounds((2, 0, 0)..)),
]);
dependency_provider.add_dependencies("menu", (1, 3, 0), [
("dropdown", Range::from_range_bounds((2, 0, 0)..)),
]);
dependency_provider.add_dependencies("menu", (1, 4, 0), [
("dropdown", Range::from_range_bounds((2, 0, 0)..)),
]);
dependency_provider.add_dependencies("menu", (1, 5, 0), [
("dropdown", Range::from_range_bounds((2, 0, 0)..)),
]);
// Dependencies of the dropdown lib.
dependency_provider.add_dependencies("dropdown", (1, 8, 0), []);
dependency_provider.add_dependencies("dropdown", (2, 0, 0), [
("icons", Range::singleton((2, 0, 0))),
]);
dependency_provider.add_dependencies("dropdown", (2, 1, 0), [
("icons", Range::singleton((2, 0, 0))),
]);
dependency_provider.add_dependencies("dropdown", (2, 2, 0), [
("icons", Range::singleton((2, 0, 0))),
]);
dependency_provider.add_dependencies("dropdown", (2, 3, 0), [
("icons", Range::singleton((2, 0, 0))),
]);
// Icons has no dependency.
dependency_provider.add_dependencies("icons", (1, 0, 0), []);
dependency_provider.add_dependencies("icons", (2, 0, 0), []);
// Run the algorithm.
match resolve(&dependency_provider, "root", (1, 0, 0)) {
Ok(sol) => println!("{:?}", sol),
Err(PubGrubError::NoSolution(mut derivation_tree)) => {
derivation_tree.collapse_no_versions();
eprintln!("{}", DefaultStringReporter::report(&derivation_tree));
}
Err(err) => panic!("{:?}", err),
};
}

View File

@ -1,49 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
use pubgrub::error::PubGrubError;
use pubgrub::range::Range;
use pubgrub::report::{DefaultStringReporter, Reporter};
use pubgrub::solver::{resolve, OfflineDependencyProvider};
use pubgrub::version::SemanticVersion;
type SemVS = Range<SemanticVersion>;
// https://github.com/dart-lang/pub/blob/master/doc/solver.md#linear-error-reporting
fn main() {
let mut dependency_provider = OfflineDependencyProvider::<&str, SemVS>::new();
#[rustfmt::skip]
// root 1.0.0 depends on foo ^1.0.0 and baz ^1.0.0
dependency_provider.add_dependencies(
"root", (1, 0, 0),
[
("foo", Range::from_range_bounds((1, 0, 0)..(2, 0, 0))),
("baz", Range::from_range_bounds((1, 0, 0)..(2, 0, 0))),
],
);
#[rustfmt::skip]
// foo 1.0.0 depends on bar ^2.0.0
dependency_provider.add_dependencies(
"foo", (1, 0, 0),
[("bar", Range::from_range_bounds((2, 0, 0)..(3, 0, 0)))],
);
#[rustfmt::skip]
// bar 2.0.0 depends on baz ^3.0.0
dependency_provider.add_dependencies(
"bar", (2, 0, 0),
[("baz", Range::from_range_bounds((3, 0, 0)..(4, 0, 0)))],
);
// baz 1.0.0 and 3.0.0 have no dependencies
dependency_provider.add_dependencies("baz", (1, 0, 0), []);
dependency_provider.add_dependencies("baz", (3, 0, 0), []);
// Run the algorithm.
match resolve(&dependency_provider, "root", (1, 0, 0)) {
Ok(sol) => println!("{:?}", sol),
Err(PubGrubError::NoSolution(mut derivation_tree)) => {
derivation_tree.collapse_no_versions();
eprintln!("{}", DefaultStringReporter::report(&derivation_tree));
std::process::exit(1);
}
Err(err) => panic!("{:?}", err),
};
}

View File

@ -1,28 +0,0 @@
# Creation of a new release
This is taking the 0.2.1 release as an example.
## GitHub stuff
- Checkout the prep-v0.2.1 branch
- Update the release date in the changelog and push to the PR.
- Squash merge the PR to the dev branch
- Check that the merged PR is passing the tests on the dev branch
- Pull the updated dev locally
- Switch to the release branch
- Merge locally dev into release in fast-forward mode, we want to keep the history of commits and the merge point.
- `git tag -a v0.2.1 -m "v0.2.1: mostly perf improvements"`
- (Optional) cryptographically sign the tag
- On GitHub, edit the branch protection setting for release: uncheck include admin, and save
- Push release to github: git push --follow-tags
- Reset the release branch protection to include admins
- On GitHub, create a release from that tag.
## Crates.io stuff
- `cargo publish --dry-run`
- `cargo publish`
## Community stuff
Talk about the awesome new features of the new release online.

View File

@ -1,61 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! Handling pubgrub errors.
use thiserror::Error;
use crate::package::Package;
use crate::report::DerivationTree;
use crate::version_set::VersionSet;
/// Errors that may occur while solving dependencies.
#[derive(Error, Debug)]
pub enum PubGrubError<P: Package, VS: VersionSet> {
/// There is no solution for this set of dependencies.
#[error("No solution")]
NoSolution(DerivationTree<P, VS>),
/// Error arising when the implementer of
/// [DependencyProvider](crate::solver::DependencyProvider)
/// returned an error in the method
/// [get_dependencies](crate::solver::DependencyProvider::get_dependencies).
#[error("Retrieving dependencies of {package} {version} failed")]
ErrorRetrievingDependencies {
/// Package whose dependencies we want.
package: P,
/// Version of the package for which we want the dependencies.
version: VS::V,
/// Error raised by the implementer of
/// [DependencyProvider](crate::solver::DependencyProvider).
source: Box<dyn std::error::Error + Send + Sync>,
},
/// Error arising when the implementer of
/// [DependencyProvider](crate::solver::DependencyProvider)
/// returned a dependency on the requested package.
/// This technically means that the package directly depends on itself,
/// and is clearly some kind of mistake.
#[error("{package} {version} depends on itself")]
SelfDependency {
/// Package whose dependencies we want.
package: P,
/// Version of the package for which we want the dependencies.
version: VS::V,
},
/// Error arising when the implementer of
/// [DependencyProvider](crate::solver::DependencyProvider)
/// returned an error in the method
/// [choose_version](crate::solver::DependencyProvider::choose_version).
#[error("Decision making failed")]
ErrorChoosingPackageVersion(Box<dyn std::error::Error + Send + Sync>),
/// Error arising when the implementer of [DependencyProvider](crate::solver::DependencyProvider)
/// returned an error in the method [should_cancel](crate::solver::DependencyProvider::should_cancel).
#[error("We should cancel")]
ErrorInShouldCancel(Box<dyn std::error::Error + Send + Sync>),
/// Something unexpected happened.
#[error("{0}")]
Failure(String),
}

View File

@ -1,122 +0,0 @@
use std::{
fmt,
hash::{Hash, Hasher},
marker::PhantomData,
ops::{Index, Range},
};
/// The index of a value allocated in an arena that holds `T`s.
///
/// The Clone, Copy and other traits are defined manually because
/// deriving them adds some additional constraints on the `T` generic type
/// that we actually don't need since it is phantom.
///
/// <https://github.com/rust-lang/rust/issues/26925>
pub struct Id<T> {
raw: u32,
_ty: PhantomData<fn() -> T>,
}
impl<T> Clone for Id<T> {
fn clone(&self) -> Self {
*self
}
}
impl<T> Copy for Id<T> {}
impl<T> PartialEq for Id<T> {
fn eq(&self, other: &Id<T>) -> bool {
self.raw == other.raw
}
}
impl<T> Eq for Id<T> {}
impl<T> Hash for Id<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.raw.hash(state)
}
}
impl<T> fmt::Debug for Id<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut type_name = std::any::type_name::<T>();
if let Some(id) = type_name.rfind(':') {
type_name = &type_name[id + 1..]
}
write!(f, "Id::<{}>({})", type_name, self.raw)
}
}
impl<T> Id<T> {
pub fn into_raw(self) -> usize {
self.raw as usize
}
fn from(n: u32) -> Self {
Self {
raw: n,
_ty: PhantomData,
}
}
pub fn range_to_iter(range: Range<Self>) -> impl Iterator<Item = Self> {
let start = range.start.raw;
let end = range.end.raw;
(start..end).map(Self::from)
}
}
/// Yet another index-based arena.
///
/// An arena is a kind of simple grow-only allocator, backed by a `Vec`
/// where all items have the same lifetime, making it easier
/// to have references between those items.
/// They are all dropped at once when the arena is dropped.
#[derive(Clone, PartialEq, Eq)]
pub struct Arena<T> {
data: Vec<T>,
}
impl<T: fmt::Debug> fmt::Debug for Arena<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Arena")
.field("len", &self.data.len())
.field("data", &self.data)
.finish()
}
}
impl<T> Arena<T> {
pub fn new() -> Arena<T> {
Arena { data: Vec::new() }
}
pub fn alloc(&mut self, value: T) -> Id<T> {
let raw = self.data.len();
self.data.push(value);
Id::from(raw as u32)
}
pub fn alloc_iter<I: Iterator<Item = T>>(&mut self, values: I) -> Range<Id<T>> {
let start = Id::from(self.data.len() as u32);
values.for_each(|v| {
self.alloc(v);
});
let end = Id::from(self.data.len() as u32);
Range { start, end }
}
}
impl<T> Index<Id<T>> for Arena<T> {
type Output = T;
fn index(&self, id: Id<T>) -> &T {
&self.data[id.raw as usize]
}
}
impl<T> Index<Range<Id<T>>> for Arena<T> {
type Output = [T];
fn index(&self, id: Range<Id<T>>) -> &[T] {
&self.data[(id.start.raw as usize)..(id.end.raw as usize)]
}
}

View File

@ -1,273 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! Core model and functions
//! to write a functional PubGrub algorithm.
use std::collections::HashSet as Set;
use crate::error::PubGrubError;
use crate::internal::arena::Arena;
use crate::internal::incompatibility::{IncompId, Incompatibility, Relation};
use crate::internal::partial_solution::SatisfierSearch::{
DifferentDecisionLevels, SameDecisionLevels,
};
use crate::internal::partial_solution::{DecisionLevel, PartialSolution};
use crate::internal::small_vec::SmallVec;
use crate::package::Package;
use crate::report::DerivationTree;
use crate::type_aliases::{DependencyConstraints, Map};
use crate::version_set::VersionSet;
/// Current state of the PubGrub algorithm.
#[derive(Clone)]
pub struct State<P: Package, VS: VersionSet, Priority: Ord + Clone> {
root_package: P,
root_version: VS::V,
pub incompatibilities: Map<P, Vec<IncompId<P, VS>>>,
/// Store the ids of incompatibilities that are already contradicted
/// and will stay that way until the next conflict and backtrack is operated.
contradicted_incompatibilities: rustc_hash::FxHashSet<IncompId<P, VS>>,
/// Partial solution.
/// TODO: remove pub.
pub partial_solution: PartialSolution<P, VS, Priority>,
/// The store is the reference storage for all incompatibilities.
pub incompatibility_store: Arena<Incompatibility<P, VS>>,
/// This is a stack of work to be done in `unit_propagation`.
/// It can definitely be a local variable to that method, but
/// this way we can reuse the same allocation for better performance.
unit_propagation_buffer: SmallVec<P>,
}
impl<P: Package, VS: VersionSet, Priority: Ord + Clone> State<P, VS, Priority> {
/// Initialization of PubGrub state.
pub fn init(root_package: P, root_version: VS::V) -> Self {
let mut incompatibility_store = Arena::new();
let not_root_id = incompatibility_store.alloc(Incompatibility::not_root(
root_package.clone(),
root_version.clone(),
));
let mut incompatibilities = Map::default();
incompatibilities.insert(root_package.clone(), vec![not_root_id]);
Self {
root_package,
root_version,
incompatibilities,
contradicted_incompatibilities: rustc_hash::FxHashSet::default(),
partial_solution: PartialSolution::empty(),
incompatibility_store,
unit_propagation_buffer: SmallVec::Empty,
}
}
/// Add an incompatibility to the state.
pub fn add_incompatibility(&mut self, incompat: Incompatibility<P, VS>) {
let id = self.incompatibility_store.alloc(incompat);
self.merge_incompatibility(id);
}
/// Add an incompatibility to the state.
pub fn add_incompatibility_from_dependencies(
&mut self,
package: P,
version: VS::V,
deps: &DependencyConstraints<P, VS>,
) -> std::ops::Range<IncompId<P, VS>> {
// Create incompatibilities and allocate them in the store.
let new_incompats_id_range = self
.incompatibility_store
.alloc_iter(deps.iter().map(|dep| {
Incompatibility::from_dependency(package.clone(), version.clone(), dep)
}));
// Merge the newly created incompatibilities with the older ones.
for id in IncompId::range_to_iter(new_incompats_id_range.clone()) {
self.merge_incompatibility(id);
}
new_incompats_id_range
}
/// Unit propagation is the core mechanism of the solving algorithm.
/// CF <https://github.com/dart-lang/pub/blob/master/doc/solver.md#unit-propagation>
pub fn unit_propagation(&mut self, package: P) -> Result<(), PubGrubError<P, VS>> {
self.unit_propagation_buffer.clear();
self.unit_propagation_buffer.push(package);
while let Some(current_package) = self.unit_propagation_buffer.pop() {
// Iterate over incompatibilities in reverse order
// to evaluate first the newest incompatibilities.
let mut conflict_id = None;
// We only care about incompatibilities if it contains the current package.
for &incompat_id in self.incompatibilities[&current_package].iter().rev() {
if self.contradicted_incompatibilities.contains(&incompat_id) {
continue;
}
let current_incompat = &self.incompatibility_store[incompat_id];
match self.partial_solution.relation(current_incompat) {
// If the partial solution satisfies the incompatibility
// we must perform conflict resolution.
Relation::Satisfied => {
log::info!(
"Start conflict resolution because incompat satisfied:\n {}",
current_incompat
);
conflict_id = Some(incompat_id);
break;
}
Relation::AlmostSatisfied(package_almost) => {
self.unit_propagation_buffer.push(package_almost.clone());
// Add (not term) to the partial solution with incompat as cause.
self.partial_solution.add_derivation(
package_almost,
incompat_id,
&self.incompatibility_store,
);
// With the partial solution updated, the incompatibility is now contradicted.
self.contradicted_incompatibilities.insert(incompat_id);
}
Relation::Contradicted(_) => {
self.contradicted_incompatibilities.insert(incompat_id);
}
_ => {}
}
}
if let Some(incompat_id) = conflict_id {
let (package_almost, root_cause) = self.conflict_resolution(incompat_id)?;
self.unit_propagation_buffer.clear();
self.unit_propagation_buffer.push(package_almost.clone());
// Add to the partial solution with incompat as cause.
self.partial_solution.add_derivation(
package_almost,
root_cause,
&self.incompatibility_store,
);
// After conflict resolution and the partial solution update,
// the root cause incompatibility is now contradicted.
self.contradicted_incompatibilities.insert(root_cause);
}
}
// If there are no more changed packages, unit propagation is done.
Ok(())
}
/// Return the root cause and the backtracked model.
/// CF <https://github.com/dart-lang/pub/blob/master/doc/solver.md#unit-propagation>
fn conflict_resolution(
&mut self,
incompatibility: IncompId<P, VS>,
) -> Result<(P, IncompId<P, VS>), PubGrubError<P, VS>> {
let mut current_incompat_id = incompatibility;
let mut current_incompat_changed = false;
loop {
if self.incompatibility_store[current_incompat_id]
.is_terminal(&self.root_package, &self.root_version)
{
return Err(PubGrubError::NoSolution(
self.build_derivation_tree(current_incompat_id),
));
} else {
let (package, satisfier_search_result) = self.partial_solution.satisfier_search(
&self.incompatibility_store[current_incompat_id],
&self.incompatibility_store,
);
match satisfier_search_result {
DifferentDecisionLevels {
previous_satisfier_level,
} => {
self.backtrack(
current_incompat_id,
current_incompat_changed,
previous_satisfier_level,
);
log::info!("backtrack to {:?}", previous_satisfier_level);
return Ok((package, current_incompat_id));
}
SameDecisionLevels { satisfier_cause } => {
let prior_cause = Incompatibility::prior_cause(
current_incompat_id,
satisfier_cause,
&package,
&self.incompatibility_store,
);
log::info!("prior cause: {}", prior_cause);
current_incompat_id = self.incompatibility_store.alloc(prior_cause);
current_incompat_changed = true;
}
}
}
}
}
/// Backtracking.
fn backtrack(
&mut self,
incompat: IncompId<P, VS>,
incompat_changed: bool,
decision_level: DecisionLevel,
) {
self.partial_solution
.backtrack(decision_level, &self.incompatibility_store);
self.contradicted_incompatibilities.clear();
if incompat_changed {
self.merge_incompatibility(incompat);
}
}
/// Add this incompatibility into the set of all incompatibilities.
///
/// Pub collapses identical dependencies from adjacent package versions
/// into individual incompatibilities.
/// This substantially reduces the total number of incompatibilities
/// and makes it much easier for Pub to reason about multiple versions of packages at once.
///
/// For example, rather than representing
/// foo 1.0.0 depends on bar ^1.0.0 and
/// foo 1.1.0 depends on bar ^1.0.0
/// as two separate incompatibilities,
/// they are collapsed together into the single incompatibility {foo ^1.0.0, not bar ^1.0.0}
/// (provided that no other version of foo exists between 1.0.0 and 2.0.0).
/// We could collapse them into { foo (1.0.0 1.1.0), not bar ^1.0.0 }
/// without having to check the existence of other versions though.
///
/// Here we do the simple stupid thing of just growing the Vec.
/// It may not be trivial since those incompatibilities
/// may already have derived others.
fn merge_incompatibility(&mut self, id: IncompId<P, VS>) {
for (pkg, term) in self.incompatibility_store[id].iter() {
if cfg!(debug_assertions) {
assert_ne!(term, &crate::term::Term::any());
}
self.incompatibilities
.entry(pkg.clone())
.or_default()
.push(id);
}
}
// Error reporting #########################################################
fn build_derivation_tree(&self, incompat: IncompId<P, VS>) -> DerivationTree<P, VS> {
let shared_ids = self.find_shared_ids(incompat);
Incompatibility::build_derivation_tree(incompat, &shared_ids, &self.incompatibility_store)
}
fn find_shared_ids(&self, incompat: IncompId<P, VS>) -> Set<IncompId<P, VS>> {
let mut all_ids = Set::new();
let mut shared_ids = Set::new();
let mut stack = vec![incompat];
while let Some(i) = stack.pop() {
if let Some((id1, id2)) = self.incompatibility_store[i].causes() {
if all_ids.contains(&i) {
shared_ids.insert(i);
} else {
all_ids.insert(i);
stack.push(id1);
stack.push(id2);
}
}
}
shared_ids
}
}

View File

@ -1,300 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! An incompatibility is a set of terms for different packages
//! that should never be satisfied all together.
use std::collections::HashSet as Set;
use std::fmt;
use crate::internal::arena::{Arena, Id};
use crate::internal::small_map::SmallMap;
use crate::package::Package;
use crate::report::{DefaultStringReporter, DerivationTree, Derived, External};
use crate::term::{self, Term};
use crate::version_set::VersionSet;
/// An incompatibility is a set of terms for different packages
/// that should never be satisfied all together.
/// An incompatibility usually originates from a package dependency.
/// For example, if package A at version 1 depends on package B
/// at version 2, you can never have both terms `A = 1`
/// and `not B = 2` satisfied at the same time in a partial solution.
/// This would mean that we found a solution with package A at version 1
/// but not with package B at version 2.
/// Yet A at version 1 depends on B at version 2 so this is not possible.
/// Therefore, the set `{ A = 1, not B = 2 }` is an incompatibility,
/// defined from dependencies of A at version 1.
///
/// Incompatibilities can also be derived from two other incompatibilities
/// during conflict resolution. More about all this in
/// [PubGrub documentation](https://github.com/dart-lang/pub/blob/master/doc/solver.md#incompatibility).
#[derive(Debug, Clone)]
pub struct Incompatibility<P: Package, VS: VersionSet> {
package_terms: SmallMap<P, Term<VS>>,
pub kind: Kind<P, VS>,
}
/// Type alias of unique identifiers for incompatibilities.
pub type IncompId<P, VS> = Id<Incompatibility<P, VS>>;
#[derive(Debug, Clone)]
pub enum Kind<P: Package, VS: VersionSet> {
/// Initial incompatibility aiming at picking the root package for the first decision.
NotRoot(P, VS::V),
/// There are no versions in the given range for this package.
NoVersions(P, VS),
/// Dependencies of the package are unavailable for versions in that range.
UnavailableDependencies(P, VS),
/// Incompatibility coming from the dependencies of a given package.
FromDependencyOf(P, VS, P, VS),
/// Derived from two causes. Stores cause ids.
DerivedFrom(IncompId<P, VS>, IncompId<P, VS>),
}
/// A Relation describes how a set of terms can be compared to an incompatibility.
/// Typically, the set of terms comes from the partial solution.
#[derive(Eq, PartialEq, Debug)]
pub enum Relation<P: Package> {
/// We say that a set of terms S satisfies an incompatibility I
/// if S satisfies every term in I.
Satisfied,
/// We say that S contradicts I
/// if S contradicts at least one term in I.
Contradicted(P),
/// If S satisfies all but one of I's terms and is inconclusive for the remaining term,
/// we say S "almost satisfies" I and we call the remaining term the "unsatisfied term".
AlmostSatisfied(P),
/// Otherwise, we say that their relation is inconclusive.
Inconclusive,
}
impl<P: Package, VS: VersionSet> Incompatibility<P, VS> {
/// Create the initial "not Root" incompatibility.
pub fn not_root(package: P, version: VS::V) -> Self {
Self {
package_terms: SmallMap::One([(
package.clone(),
Term::Negative(VS::singleton(version.clone())),
)]),
kind: Kind::NotRoot(package, version),
}
}
/// Create an incompatibility to remember
/// that a given set does not contain any version.
pub fn no_versions(package: P, term: Term<VS>) -> Self {
let set = match &term {
Term::Positive(r) => r.clone(),
Term::Negative(_) => panic!("No version should have a positive term"),
};
Self {
package_terms: SmallMap::One([(package.clone(), term)]),
kind: Kind::NoVersions(package, set),
}
}
/// Create an incompatibility to remember
/// that a package version is not selectable
/// because its list of dependencies is unavailable.
pub fn unavailable_dependencies(package: P, version: VS::V) -> Self {
let set = VS::singleton(version);
Self {
package_terms: SmallMap::One([(package.clone(), Term::Positive(set.clone()))]),
kind: Kind::UnavailableDependencies(package, set),
}
}
/// Build an incompatibility from a given dependency.
pub fn from_dependency(package: P, version: VS::V, dep: (&P, &VS)) -> Self {
let set1 = VS::singleton(version);
let (p2, set2) = dep;
Self {
package_terms: if set2 == &VS::empty() {
SmallMap::One([(package.clone(), Term::Positive(set1.clone()))])
} else {
SmallMap::Two([
(package.clone(), Term::Positive(set1.clone())),
(p2.clone(), Term::Negative(set2.clone())),
])
},
kind: Kind::FromDependencyOf(package, set1, p2.clone(), set2.clone()),
}
}
/// Prior cause of two incompatibilities using the rule of resolution.
pub fn prior_cause(
incompat: Id<Self>,
satisfier_cause: Id<Self>,
package: &P,
incompatibility_store: &Arena<Self>,
) -> Self {
let kind = Kind::DerivedFrom(incompat, satisfier_cause);
let mut package_terms = incompatibility_store[incompat].package_terms.clone();
let t1 = package_terms.remove(package).unwrap();
let satisfier_cause_terms = &incompatibility_store[satisfier_cause].package_terms;
package_terms.merge(
satisfier_cause_terms.iter().filter(|(p, _)| p != &package),
|t1, t2| Some(t1.intersection(t2)),
);
let term = t1.union(satisfier_cause_terms.get(package).unwrap());
if term != Term::any() {
package_terms.insert(package.clone(), term);
}
Self {
package_terms,
kind,
}
}
/// Check if an incompatibility should mark the end of the algorithm
/// because it satisfies the root package.
pub fn is_terminal(&self, root_package: &P, root_version: &VS::V) -> bool {
if self.package_terms.len() == 0 {
true
} else if self.package_terms.len() > 1 {
false
} else {
let (package, term) = self.package_terms.iter().next().unwrap();
(package == root_package) && term.contains(root_version)
}
}
/// Get the term related to a given package (if it exists).
pub fn get(&self, package: &P) -> Option<&Term<VS>> {
self.package_terms.get(package)
}
/// Iterate over packages.
pub fn iter(&self) -> impl Iterator<Item = (&P, &Term<VS>)> {
self.package_terms.iter()
}
// Reporting ###############################################################
/// Retrieve parent causes if of type DerivedFrom.
pub fn causes(&self) -> Option<(Id<Self>, Id<Self>)> {
match self.kind {
Kind::DerivedFrom(id1, id2) => Some((id1, id2)),
_ => None,
}
}
/// Build a derivation tree for error reporting.
pub fn build_derivation_tree(
self_id: Id<Self>,
shared_ids: &Set<Id<Self>>,
store: &Arena<Self>,
) -> DerivationTree<P, VS> {
match &store[self_id].kind {
Kind::DerivedFrom(id1, id2) => {
let cause1 = Self::build_derivation_tree(*id1, shared_ids, store);
let cause2 = Self::build_derivation_tree(*id2, shared_ids, store);
let derived = Derived {
terms: store[self_id].package_terms.as_map(),
shared_id: shared_ids.get(&self_id).map(|id| id.into_raw()),
cause1: Box::new(cause1),
cause2: Box::new(cause2),
};
DerivationTree::Derived(derived)
}
Kind::NotRoot(package, version) => {
DerivationTree::External(External::NotRoot(package.clone(), version.clone()))
}
Kind::NoVersions(package, set) => {
DerivationTree::External(External::NoVersions(package.clone(), set.clone()))
}
Kind::UnavailableDependencies(package, set) => DerivationTree::External(
External::UnavailableDependencies(package.clone(), set.clone()),
),
Kind::FromDependencyOf(package, set, dep_package, dep_set) => {
DerivationTree::External(External::FromDependencyOf(
package.clone(),
set.clone(),
dep_package.clone(),
dep_set.clone(),
))
}
}
}
}
impl<'a, P: Package, VS: VersionSet + 'a> Incompatibility<P, VS> {
/// CF definition of Relation enum.
pub fn relation(&self, terms: impl Fn(&P) -> Option<&'a Term<VS>>) -> Relation<P> {
let mut relation = Relation::Satisfied;
for (package, incompat_term) in self.package_terms.iter() {
match terms(package).map(|term| incompat_term.relation_with(term)) {
Some(term::Relation::Satisfied) => {}
Some(term::Relation::Contradicted) => {
return Relation::Contradicted(package.clone());
}
None | Some(term::Relation::Inconclusive) => {
// If a package is not present, the intersection is the same as [Term::any].
// According to the rules of satisfactions, the relation would be inconclusive.
// It could also be satisfied if the incompatibility term was also [Term::any],
// but we systematically remove those from incompatibilities
// so we're safe on that front.
if relation == Relation::Satisfied {
relation = Relation::AlmostSatisfied(package.clone());
} else {
relation = Relation::Inconclusive;
}
}
}
}
relation
}
}
impl<P: Package, VS: VersionSet> fmt::Display for Incompatibility<P, VS> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}",
DefaultStringReporter::string_terms(&self.package_terms.as_map())
)
}
}
// TESTS #######################################################################
#[cfg(test)]
pub mod tests {
use super::*;
use crate::range::Range;
use crate::term::tests::strategy as term_strat;
use crate::type_aliases::Map;
use proptest::prelude::*;
proptest! {
/// For any three different packages p1, p2 and p3,
/// for any three terms t1, t2 and t3,
/// if we have the two following incompatibilities:
/// { p1: t1, p2: not t2 }
/// { p2: t2, p3: t3 }
/// the rule of resolution says that we can deduce the following incompatibility:
/// { p1: t1, p3: t3 }
#[test]
fn rule_of_resolution(t1 in term_strat(), t2 in term_strat(), t3 in term_strat()) {
let mut store = Arena::new();
let i1 = store.alloc(Incompatibility {
package_terms: SmallMap::Two([("p1", t1.clone()), ("p2", t2.negate())]),
kind: Kind::UnavailableDependencies("0", Range::full())
});
let i2 = store.alloc(Incompatibility {
package_terms: SmallMap::Two([("p2", t2), ("p3", t3.clone())]),
kind: Kind::UnavailableDependencies("0", Range::full())
});
let mut i3 = Map::default();
i3.insert("p1", t1);
i3.insert("p3", t3);
let i_resolution = Incompatibility::prior_cause(i1, i2, &"p2", &store);
assert_eq!(i_resolution.package_terms.as_map(), i3);
}
}
}

View File

@ -1,10 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! Non exposed modules.
pub mod arena;
pub mod core;
pub mod incompatibility;
pub mod partial_solution;
pub mod small_map;
pub mod small_vec;

View File

@ -1,591 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! A Memory acts like a structured partial solution
//! where terms are regrouped by package in a [Map](crate::type_aliases::Map).
use std::fmt::Display;
use std::hash::BuildHasherDefault;
use priority_queue::PriorityQueue;
use rustc_hash::FxHasher;
use crate::internal::arena::Arena;
use crate::internal::incompatibility::{IncompId, Incompatibility, Relation};
use crate::internal::small_map::SmallMap;
use crate::package::Package;
use crate::term::Term;
use crate::type_aliases::SelectedDependencies;
use crate::version_set::VersionSet;
use super::small_vec::SmallVec;
type FnvIndexMap<K, V> = indexmap::IndexMap<K, V, BuildHasherDefault<rustc_hash::FxHasher>>;
#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq)]
pub struct DecisionLevel(pub u32);
impl DecisionLevel {
pub fn increment(self) -> Self {
Self(self.0 + 1)
}
}
/// The partial solution contains all package assignments,
/// organized by package and historically ordered.
#[derive(Clone, Debug)]
pub struct PartialSolution<P: Package, VS: VersionSet, Priority: Ord + Clone> {
next_global_index: u32,
current_decision_level: DecisionLevel,
/// `package_assignments` is primarily a HashMap from a package to its
/// `PackageAssignments`. But it can also keep the items in an order.
/// We maintain three sections in this order:
/// 1. `[..current_decision_level]` Are packages that have had a decision made sorted by the `decision_level`.
/// This makes it very efficient to extract the solution, And to backtrack to a particular decision level.
/// 2. `[current_decision_level..changed_this_decision_level]` Are packages that have **not** had there assignments
/// changed since the last time `prioritize` has bean called. Within this range there is no sorting.
/// 3. `[changed_this_decision_level..]` Containes all packages that **have** had there assignments changed since
/// the last time `prioritize` has bean called. The inverse is not necessarily true, some packages in the range
/// did not have a change. Within this range there is no sorting.
package_assignments: FnvIndexMap<P, PackageAssignments<P, VS>>,
/// `prioritized_potential_packages` is primarily a HashMap from a package with no desition and a positive assignment
/// to its `Priority`. But, it also maintains a max heap of packages by `Priority` order.
prioritized_potential_packages: PriorityQueue<P, Priority, BuildHasherDefault<FxHasher>>,
changed_this_decision_level: usize,
}
impl<P: Package, VS: VersionSet, Priority: Ord + Clone> Display
for PartialSolution<P, VS, Priority>
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut assignments: Vec<_> = self
.package_assignments
.iter()
.map(|(p, pa)| format!("{}: {}", p, pa))
.collect();
assignments.sort();
write!(
f,
"next_global_index: {}\ncurrent_decision_level: {:?}\npackage_assignements:\n{}",
self.next_global_index,
self.current_decision_level,
assignments.join("\t\n")
)
}
}
/// Package assignments contain the potential decision and derivations
/// that have already been made for a given package,
/// as well as the intersection of terms by all of these.
#[derive(Clone, Debug)]
struct PackageAssignments<P: Package, VS: VersionSet> {
smallest_decision_level: DecisionLevel,
highest_decision_level: DecisionLevel,
dated_derivations: SmallVec<DatedDerivation<P, VS>>,
assignments_intersection: AssignmentsIntersection<VS>,
}
impl<P: Package, VS: VersionSet> Display for PackageAssignments<P, VS> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let derivations: Vec<_> = self
.dated_derivations
.iter()
.map(|dd| dd.to_string())
.collect();
write!(
f,
"decision range: {:?}..{:?}\nderivations:\n {}\n,assignments_intersection: {}",
self.smallest_decision_level,
self.highest_decision_level,
derivations.join("\n "),
self.assignments_intersection
)
}
}
#[derive(Clone, Debug)]
pub struct DatedDerivation<P: Package, VS: VersionSet> {
global_index: u32,
decision_level: DecisionLevel,
cause: IncompId<P, VS>,
}
impl<P: Package, VS: VersionSet> Display for DatedDerivation<P, VS> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}, cause: {:?}", self.decision_level, self.cause)
}
}
#[derive(Clone, Debug)]
enum AssignmentsIntersection<VS: VersionSet> {
Decision((u32, VS::V, Term<VS>)),
Derivations(Term<VS>),
}
impl<VS: VersionSet> Display for AssignmentsIntersection<VS> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Decision((lvl, version, _)) => {
write!(f, "Decision: level {}, v = {}", lvl, version)
}
Self::Derivations(term) => write!(f, "Derivations term: {}", term),
}
}
}
#[derive(Clone, Debug)]
pub enum SatisfierSearch<P: Package, VS: VersionSet> {
DifferentDecisionLevels {
previous_satisfier_level: DecisionLevel,
},
SameDecisionLevels {
satisfier_cause: IncompId<P, VS>,
},
}
impl<P: Package, VS: VersionSet, Priority: Ord + Clone> PartialSolution<P, VS, Priority> {
/// Initialize an empty PartialSolution.
pub fn empty() -> Self {
Self {
next_global_index: 0,
current_decision_level: DecisionLevel(0),
package_assignments: FnvIndexMap::default(),
prioritized_potential_packages: PriorityQueue::default(),
changed_this_decision_level: 0,
}
}
/// Add a decision.
pub fn add_decision(&mut self, package: P, version: VS::V) {
// Check that add_decision is never used in the wrong context.
if cfg!(debug_assertions) {
match self.package_assignments.get_mut(&package) {
None => panic!("Derivations must already exist"),
Some(pa) => match &pa.assignments_intersection {
// Cannot be called when a decision has already been taken.
AssignmentsIntersection::Decision(_) => panic!("Already existing decision"),
// Cannot be called if the versions is not contained in the terms intersection.
AssignmentsIntersection::Derivations(term) => {
debug_assert!(
term.contains(&version),
"{}: {} was expected to be contained in {}",
package,
version,
term,
)
}
},
}
assert_eq!(
self.changed_this_decision_level,
self.package_assignments.len()
);
}
let new_idx = self.current_decision_level.0 as usize;
self.current_decision_level = self.current_decision_level.increment();
let (old_idx, _, pa) = self
.package_assignments
.get_full_mut(&package)
.expect("Derivations must already exist");
pa.highest_decision_level = self.current_decision_level;
pa.assignments_intersection = AssignmentsIntersection::Decision((
self.next_global_index,
version.clone(),
Term::exact(version),
));
// Maintain that the beginning of the `package_assignments` Have all decisions in sorted order.
if new_idx != old_idx {
self.package_assignments.swap_indices(new_idx, old_idx);
}
self.next_global_index += 1;
}
/// Add a derivation.
pub fn add_derivation(
&mut self,
package: P,
cause: IncompId<P, VS>,
store: &Arena<Incompatibility<P, VS>>,
) {
use indexmap::map::Entry;
let term = store[cause].get(&package).unwrap().negate();
let dated_derivation = DatedDerivation {
global_index: self.next_global_index,
decision_level: self.current_decision_level,
cause,
};
self.next_global_index += 1;
let pa_last_index = self.package_assignments.len().saturating_sub(1);
match self.package_assignments.entry(package) {
Entry::Occupied(mut occupied) => {
let idx = occupied.index();
let pa = occupied.get_mut();
pa.highest_decision_level = self.current_decision_level;
match &mut pa.assignments_intersection {
// Check that add_derivation is never called in the wrong context.
AssignmentsIntersection::Decision(_) => {
panic!("add_derivation should not be called after a decision")
}
AssignmentsIntersection::Derivations(t) => {
*t = t.intersection(&term);
if t.is_positive() {
// we can use `swap_indices` to make `changed_this_decision_level` only go down by 1
// but the copying is slower then the larger search
self.changed_this_decision_level =
std::cmp::min(self.changed_this_decision_level, idx);
}
}
}
pa.dated_derivations.push(dated_derivation);
}
Entry::Vacant(v) => {
if term.is_positive() {
self.changed_this_decision_level =
std::cmp::min(self.changed_this_decision_level, pa_last_index);
}
v.insert(PackageAssignments {
smallest_decision_level: self.current_decision_level,
highest_decision_level: self.current_decision_level,
dated_derivations: SmallVec::One([dated_derivation]),
assignments_intersection: AssignmentsIntersection::Derivations(term),
});
}
}
}
pub fn prioritized_packages(&self) -> impl Iterator<Item = (&P, &VS)> {
let check_all = self.changed_this_decision_level
== self.current_decision_level.0.saturating_sub(1) as usize;
let current_decision_level = self.current_decision_level;
self.package_assignments
.get_range(self.changed_this_decision_level..)
.unwrap()
.iter()
.filter(move |(_, pa)| {
// We only actually need to update the package if its Been changed
// since the last time we called prioritize.
// Which means it's highest decision level is the current decision level,
// or if we backtracked in the mean time.
check_all || pa.highest_decision_level == current_decision_level
})
.filter_map(|(p, pa)| pa.assignments_intersection.potential_package_filter(p))
}
pub fn pick_highest_priority_pkg(
&mut self,
prioritizer: impl Fn(&P, &VS) -> Priority,
) -> Option<P> {
let check_all = self.changed_this_decision_level
== self.current_decision_level.0.saturating_sub(1) as usize;
let current_decision_level = self.current_decision_level;
let prioritized_potential_packages = &mut self.prioritized_potential_packages;
self.package_assignments
.get_range(self.changed_this_decision_level..)
.unwrap()
.iter()
.filter(|(_, pa)| {
// We only actually need to update the package if its Been changed
// since the last time we called prioritize.
// Which means it's highest decision level is the current decision level,
// or if we backtracked in the mean time.
check_all || pa.highest_decision_level == current_decision_level
})
.filter_map(|(p, pa)| pa.assignments_intersection.potential_package_filter(p))
.for_each(|(p, r)| {
let priority = prioritizer(p, r);
prioritized_potential_packages.push(p.clone(), priority);
});
self.changed_this_decision_level = self.package_assignments.len();
prioritized_potential_packages.pop().map(|(p, _)| p)
}
/// If a partial solution has, for every positive derivation,
/// a corresponding decision that satisfies that assignment,
/// it's a total solution and version solving has succeeded.
pub fn extract_solution(&self) -> SelectedDependencies<P, VS::V> {
self.package_assignments
.iter()
.take(self.current_decision_level.0 as usize)
.map(|(p, pa)| match &pa.assignments_intersection {
AssignmentsIntersection::Decision((_, v, _)) => (p.clone(), v.clone()),
AssignmentsIntersection::Derivations(_) => {
panic!("Derivations in the Decision part")
}
})
.collect()
}
/// Backtrack the partial solution to a given decision level.
pub fn backtrack(
&mut self,
decision_level: DecisionLevel,
store: &Arena<Incompatibility<P, VS>>,
) {
self.current_decision_level = decision_level;
self.package_assignments.retain(|p, pa| {
if pa.smallest_decision_level > decision_level {
// Remove all entries that have a smallest decision level higher than the backtrack target.
false
} else if pa.highest_decision_level <= decision_level {
// Do not change entries older than the backtrack decision level target.
true
} else {
// smallest_decision_level <= decision_level < highest_decision_level
//
// Since decision_level < highest_decision_level,
// We can be certain that there will be no decision in this package assignments
// after backtracking, because such decision would have been the last
// assignment and it would have the "highest_decision_level".
// Truncate the history.
while pa.dated_derivations.last().map(|dd| dd.decision_level) > Some(decision_level)
{
pa.dated_derivations.pop();
}
debug_assert!(!pa.dated_derivations.is_empty());
// Update highest_decision_level.
pa.highest_decision_level = pa.dated_derivations.last().unwrap().decision_level;
// Recompute the assignments intersection.
pa.assignments_intersection = AssignmentsIntersection::Derivations(
pa.dated_derivations
.iter()
.fold(Term::any(), |acc, dated_derivation| {
let term = store[dated_derivation.cause].get(p).unwrap().negate();
acc.intersection(&term)
}),
);
true
}
});
// Throw away all stored priority levels, And mark that they all need to be recomputed.
self.prioritized_potential_packages.clear();
self.changed_this_decision_level = self.current_decision_level.0.saturating_sub(1) as usize;
}
/// We can add the version to the partial solution as a decision
/// if it doesn't produce any conflict with the new incompatibilities.
/// In practice I think it can only produce a conflict if one of the dependencies
/// (which are used to make the new incompatibilities)
/// is already in the partial solution with an incompatible version.
pub fn add_version(
&mut self,
package: P,
version: VS::V,
new_incompatibilities: std::ops::Range<IncompId<P, VS>>,
store: &Arena<Incompatibility<P, VS>>,
) {
let exact = Term::exact(version.clone());
let not_satisfied = |incompat: &Incompatibility<P, VS>| {
incompat.relation(|p| {
if p == &package {
Some(&exact)
} else {
self.term_intersection_for_package(p)
}
}) != Relation::Satisfied
};
// Check none of the dependencies (new_incompatibilities)
// would create a conflict (be satisfied).
if store[new_incompatibilities].iter().all(not_satisfied) {
log::info!("add_decision: {} @ {}", package, version);
self.add_decision(package, version);
} else {
log::info!(
"not adding {} @ {} because of its dependencies",
package,
version
);
}
}
/// Check if the terms in the partial solution satisfy the incompatibility.
pub fn relation(&self, incompat: &Incompatibility<P, VS>) -> Relation<P> {
incompat.relation(|package| self.term_intersection_for_package(package))
}
/// Retrieve intersection of terms related to package.
pub fn term_intersection_for_package(&self, package: &P) -> Option<&Term<VS>> {
self.package_assignments
.get(package)
.map(|pa| pa.assignments_intersection.term())
}
/// Figure out if the satisfier and previous satisfier are of different decision levels.
pub fn satisfier_search(
&self,
incompat: &Incompatibility<P, VS>,
store: &Arena<Incompatibility<P, VS>>,
) -> (P, SatisfierSearch<P, VS>) {
let satisfied_map = Self::find_satisfier(incompat, &self.package_assignments, store);
let (satisfier_package, &(satisfier_index, _, satisfier_decision_level)) = satisfied_map
.iter()
.max_by_key(|(_p, (_, global_index, _))| global_index)
.unwrap();
let satisfier_package = satisfier_package.clone();
let previous_satisfier_level = Self::find_previous_satisfier(
incompat,
&satisfier_package,
satisfied_map,
&self.package_assignments,
store,
);
if previous_satisfier_level < satisfier_decision_level {
let search_result = SatisfierSearch::DifferentDecisionLevels {
previous_satisfier_level,
};
(satisfier_package, search_result)
} else {
let satisfier_pa = self.package_assignments.get(&satisfier_package).unwrap();
let dd = &satisfier_pa.dated_derivations[satisfier_index];
let search_result = SatisfierSearch::SameDecisionLevels {
satisfier_cause: dd.cause,
};
(satisfier_package, search_result)
}
}
/// A satisfier is the earliest assignment in partial solution such that the incompatibility
/// is satisfied by the partial solution up to and including that assignment.
///
/// Returns a map indicating for each package term, when that was first satisfied in history.
/// If we effectively found a satisfier, the returned map must be the same size that incompat.
///
/// Question: This is possible since we added a "global_index" to every dated_derivation.
/// It would be nice if we could get rid of it, but I don't know if then it will be possible
/// to return a coherent previous_satisfier_level.
fn find_satisfier(
incompat: &Incompatibility<P, VS>,
package_assignments: &FnvIndexMap<P, PackageAssignments<P, VS>>,
store: &Arena<Incompatibility<P, VS>>,
) -> SmallMap<P, (usize, u32, DecisionLevel)> {
let mut satisfied = SmallMap::Empty;
for (package, incompat_term) in incompat.iter() {
let pa = package_assignments.get(package).expect("Must exist");
satisfied.insert(
package.clone(),
pa.satisfier(package, incompat_term, Term::any(), store),
);
}
satisfied
}
/// Earliest assignment in the partial solution before satisfier
/// such that incompatibility is satisfied by the partial solution up to
/// and including that assignment plus satisfier.
fn find_previous_satisfier(
incompat: &Incompatibility<P, VS>,
satisfier_package: &P,
mut satisfied_map: SmallMap<P, (usize, u32, DecisionLevel)>,
package_assignments: &FnvIndexMap<P, PackageAssignments<P, VS>>,
store: &Arena<Incompatibility<P, VS>>,
) -> DecisionLevel {
// First, let's retrieve the previous derivations and the initial accum_term.
let satisfier_pa = package_assignments.get(satisfier_package).unwrap();
let (satisfier_index, _gidx, _dl) = satisfied_map.get_mut(satisfier_package).unwrap();
let accum_term = if *satisfier_index == satisfier_pa.dated_derivations.len() {
match &satisfier_pa.assignments_intersection {
AssignmentsIntersection::Derivations(_) => panic!("must be a decision"),
AssignmentsIntersection::Decision((_, _, term)) => term.clone(),
}
} else {
let dd = &satisfier_pa.dated_derivations[*satisfier_index];
store[dd.cause].get(satisfier_package).unwrap().negate()
};
let incompat_term = incompat
.get(satisfier_package)
.expect("satisfier package not in incompat");
satisfied_map.insert(
satisfier_package.clone(),
satisfier_pa.satisfier(satisfier_package, incompat_term, accum_term, store),
);
// Finally, let's identify the decision level of that previous satisfier.
let (_, &(_, _, decision_level)) = satisfied_map
.iter()
.max_by_key(|(_p, (_, global_index, _))| global_index)
.unwrap();
decision_level.max(DecisionLevel(1))
}
}
impl<P: Package, VS: VersionSet> PackageAssignments<P, VS> {
fn satisfier(
&self,
package: &P,
incompat_term: &Term<VS>,
start_term: Term<VS>,
store: &Arena<Incompatibility<P, VS>>,
) -> (usize, u32, DecisionLevel) {
// Term where we accumulate intersections until incompat_term is satisfied.
let mut accum_term = start_term;
// Indicate if we found a satisfier in the list of derivations, otherwise it will be the decision.
for (idx, dated_derivation) in self.dated_derivations.iter().enumerate() {
let this_term = store[dated_derivation.cause].get(package).unwrap().negate();
accum_term = accum_term.intersection(&this_term);
if accum_term.subset_of(incompat_term) {
// We found the derivation causing satisfaction.
return (
idx,
dated_derivation.global_index,
dated_derivation.decision_level,
);
}
}
// If it wasn't found in the derivations,
// it must be the decision which is last (if called in the right context).
match self.assignments_intersection {
AssignmentsIntersection::Decision((global_index, _, _)) => (
self.dated_derivations.len(),
global_index,
self.highest_decision_level,
),
AssignmentsIntersection::Derivations(_) => {
unreachable!(
concat!(
"while processing package {}: ",
"accum_term = {} isn't a subset of incompat_term = {}, ",
"which means the last assignment should have been a decision, ",
"but instead it was a derivation. This shouldn't be possible! ",
"(Maybe your Version ordering is broken?)"
),
package, accum_term, incompat_term
)
}
}
}
}
impl<VS: VersionSet> AssignmentsIntersection<VS> {
/// Returns the term intersection of all assignments (decision included).
fn term(&self) -> &Term<VS> {
match self {
Self::Decision((_, _, term)) => term,
Self::Derivations(term) => term,
}
}
/// A package is a potential pick if there isn't an already
/// selected version (no "decision")
/// and if it contains at least one positive derivation term
/// in the partial solution.
fn potential_package_filter<'a, P: Package>(
&'a self,
package: &'a P,
) -> Option<(&'a P, &'a VS)> {
match self {
Self::Decision(_) => None,
Self::Derivations(term_intersection) => {
if term_intersection.is_positive() {
Some((package, term_intersection.unwrap_positive()))
} else {
None
}
}
}
}
}

View File

@ -1,195 +0,0 @@
use crate::type_aliases::Map;
use std::hash::Hash;
#[derive(Debug, Clone)]
pub enum SmallMap<K, V> {
Empty,
One([(K, V); 1]),
Two([(K, V); 2]),
Flexible(Map<K, V>),
}
impl<K: PartialEq + Eq + Hash, V> SmallMap<K, V> {
pub fn get(&self, key: &K) -> Option<&V> {
match self {
Self::Empty => None,
Self::One([(k, v)]) if k == key => Some(v),
Self::One(_) => None,
Self::Two([(k1, v1), _]) if key == k1 => Some(v1),
Self::Two([_, (k2, v2)]) if key == k2 => Some(v2),
Self::Two(_) => None,
Self::Flexible(data) => data.get(key),
}
}
pub fn get_mut(&mut self, key: &K) -> Option<&mut V> {
match self {
Self::Empty => None,
Self::One([(k, v)]) if k == key => Some(v),
Self::One(_) => None,
Self::Two([(k1, v1), _]) if key == k1 => Some(v1),
Self::Two([_, (k2, v2)]) if key == k2 => Some(v2),
Self::Two(_) => None,
Self::Flexible(data) => data.get_mut(key),
}
}
pub fn remove(&mut self, key: &K) -> Option<V> {
let out;
*self = match std::mem::take(self) {
Self::Empty => {
out = None;
Self::Empty
}
Self::One([(k, v)]) => {
if key == &k {
out = Some(v);
Self::Empty
} else {
out = None;
Self::One([(k, v)])
}
}
Self::Two([(k1, v1), (k2, v2)]) => {
if key == &k1 {
out = Some(v1);
Self::One([(k2, v2)])
} else if key == &k2 {
out = Some(v2);
Self::One([(k1, v1)])
} else {
out = None;
Self::Two([(k1, v1), (k2, v2)])
}
}
Self::Flexible(mut data) => {
out = data.remove(key);
Self::Flexible(data)
}
};
out
}
pub fn insert(&mut self, key: K, value: V) {
*self = match std::mem::take(self) {
Self::Empty => Self::One([(key, value)]),
Self::One([(k, v)]) => {
if key == k {
Self::One([(k, value)])
} else {
Self::Two([(k, v), (key, value)])
}
}
Self::Two([(k1, v1), (k2, v2)]) => {
if key == k1 {
Self::Two([(k1, value), (k2, v2)])
} else if key == k2 {
Self::Two([(k1, v1), (k2, value)])
} else {
let mut data: Map<K, V> = Map::with_capacity_and_hasher(3, Default::default());
data.insert(key, value);
data.insert(k1, v1);
data.insert(k2, v2);
Self::Flexible(data)
}
}
Self::Flexible(mut data) => {
data.insert(key, value);
Self::Flexible(data)
}
};
}
}
impl<K: Clone + PartialEq + Eq + Hash, V: Clone> SmallMap<K, V> {
/// Merge two hash maps.
///
/// When a key is common to both,
/// apply the provided function to both values.
/// If the result is None, remove that key from the merged map,
/// otherwise add the content of the Some(_).
pub fn merge<'a>(
&'a mut self,
map_2: impl Iterator<Item = (&'a K, &'a V)>,
f: impl Fn(&V, &V) -> Option<V>,
) {
for (key, val_2) in map_2 {
match self.get_mut(key) {
None => {
self.insert(key.clone(), val_2.clone());
}
Some(val_1) => match f(val_1, val_2) {
None => {
self.remove(key);
}
Some(merged_value) => *val_1 = merged_value,
},
}
}
}
}
impl<K, V> Default for SmallMap<K, V> {
fn default() -> Self {
Self::Empty
}
}
impl<K, V> SmallMap<K, V> {
pub fn len(&self) -> usize {
match self {
Self::Empty => 0,
Self::One(_) => 1,
Self::Two(_) => 2,
Self::Flexible(data) => data.len(),
}
}
}
impl<K: Eq + Hash + Clone, V: Clone> SmallMap<K, V> {
pub fn as_map(&self) -> Map<K, V> {
match self {
Self::Empty => Map::default(),
Self::One([(k, v)]) => {
let mut map = Map::with_capacity_and_hasher(1, Default::default());
map.insert(k.clone(), v.clone());
map
}
Self::Two(data) => {
let mut map = Map::with_capacity_and_hasher(2, Default::default());
for (k, v) in data {
map.insert(k.clone(), v.clone());
}
map
}
Self::Flexible(data) => data.clone(),
}
}
}
enum IterSmallMap<'a, K, V> {
Inline(std::slice::Iter<'a, (K, V)>),
Map(std::collections::hash_map::Iter<'a, K, V>),
}
impl<'a, K: 'a, V: 'a> Iterator for IterSmallMap<'a, K, V> {
type Item = (&'a K, &'a V);
fn next(&mut self) -> Option<Self::Item> {
match self {
IterSmallMap::Inline(inner) => inner.next().map(|(k, v)| (k, v)),
IterSmallMap::Map(inner) => inner.next(),
}
}
}
impl<K, V> SmallMap<K, V> {
pub fn iter(&self) -> impl Iterator<Item = (&K, &V)> {
match self {
Self::Empty => IterSmallMap::Inline([].iter()),
Self::One(data) => IterSmallMap::Inline(data.iter()),
Self::Two(data) => IterSmallMap::Inline(data.iter()),
Self::Flexible(data) => IterSmallMap::Map(data.iter()),
}
}
}

View File

@ -1,222 +0,0 @@
use std::fmt;
use std::hash::{Hash, Hasher};
use std::ops::Deref;
#[derive(Clone)]
pub enum SmallVec<T> {
Empty,
One([T; 1]),
Two([T; 2]),
Flexible(Vec<T>),
}
impl<T> SmallVec<T> {
pub fn empty() -> Self {
Self::Empty
}
pub fn one(t: T) -> Self {
Self::One([t])
}
pub fn as_slice(&self) -> &[T] {
match self {
Self::Empty => &[],
Self::One(v) => v,
Self::Two(v) => v,
Self::Flexible(v) => v,
}
}
pub fn push(&mut self, new: T) {
*self = match std::mem::take(self) {
Self::Empty => Self::One([new]),
Self::One([v1]) => Self::Two([v1, new]),
Self::Two([v1, v2]) => Self::Flexible(vec![v1, v2, new]),
Self::Flexible(mut v) => {
v.push(new);
Self::Flexible(v)
}
}
}
pub fn pop(&mut self) -> Option<T> {
match std::mem::take(self) {
Self::Empty => None,
Self::One([v1]) => {
*self = Self::Empty;
Some(v1)
}
Self::Two([v1, v2]) => {
*self = Self::One([v1]);
Some(v2)
}
Self::Flexible(mut v) => {
let out = v.pop();
*self = Self::Flexible(v);
out
}
}
}
pub fn clear(&mut self) {
if let Self::Flexible(mut v) = std::mem::take(self) {
v.clear();
*self = Self::Flexible(v);
} // else: self already eq Empty from the take
}
pub fn iter(&self) -> std::slice::Iter<'_, T> {
self.as_slice().iter()
}
}
impl<T> Default for SmallVec<T> {
fn default() -> Self {
Self::Empty
}
}
impl<T> Deref for SmallVec<T> {
type Target = [T];
fn deref(&self) -> &Self::Target {
self.as_slice()
}
}
impl<'a, T> IntoIterator for &'a SmallVec<T> {
type Item = &'a T;
type IntoIter = std::slice::Iter<'a, T>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<T: Eq> Eq for SmallVec<T> {}
impl<T: PartialEq> PartialEq for SmallVec<T> {
fn eq(&self, other: &Self) -> bool {
self.as_slice() == other.as_slice()
}
}
impl<T: fmt::Debug> fmt::Debug for SmallVec<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.as_slice().fmt(f)
}
}
impl<T: Hash> Hash for SmallVec<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
self.len().hash(state);
Hash::hash_slice(self.as_slice(), state);
}
}
#[cfg(feature = "serde")]
impl<T: serde::Serialize> serde::Serialize for SmallVec<T> {
fn serialize<S: serde::Serializer>(&self, s: S) -> Result<S::Ok, S::Error> {
serde::Serialize::serialize(self.as_slice(), s)
}
}
#[cfg(feature = "serde")]
impl<'de, T: serde::Deserialize<'de>> serde::Deserialize<'de> for SmallVec<T> {
fn deserialize<D: serde::Deserializer<'de>>(d: D) -> Result<Self, D::Error> {
struct SmallVecVisitor<T> {
marker: std::marker::PhantomData<T>,
}
impl<'de, T> serde::de::Visitor<'de> for SmallVecVisitor<T>
where
T: serde::Deserialize<'de>,
{
type Value = SmallVec<T>;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a sequence")
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: serde::de::SeqAccess<'de>,
{
let mut values = SmallVec::empty();
while let Some(value) = seq.next_element()? {
values.push(value);
}
Ok(values)
}
}
let visitor = SmallVecVisitor {
marker: Default::default(),
};
d.deserialize_seq(visitor)
}
}
impl<T> IntoIterator for SmallVec<T> {
type Item = T;
type IntoIter = SmallVecIntoIter<T>;
fn into_iter(self) -> Self::IntoIter {
match self {
SmallVec::Empty => SmallVecIntoIter::Empty,
SmallVec::One(a) => SmallVecIntoIter::One(a.into_iter()),
SmallVec::Two(a) => SmallVecIntoIter::Two(a.into_iter()),
SmallVec::Flexible(v) => SmallVecIntoIter::Flexible(v.into_iter()),
}
}
}
pub enum SmallVecIntoIter<T> {
Empty,
One(<[T; 1] as IntoIterator>::IntoIter),
Two(<[T; 2] as IntoIterator>::IntoIter),
Flexible(<Vec<T> as IntoIterator>::IntoIter),
}
impl<T> Iterator for SmallVecIntoIter<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
match self {
SmallVecIntoIter::Empty => None,
SmallVecIntoIter::One(it) => it.next(),
SmallVecIntoIter::Two(it) => it.next(),
SmallVecIntoIter::Flexible(it) => it.next(),
}
}
}
// TESTS #######################################################################
#[cfg(test)]
pub mod tests {
use super::*;
use proptest::prelude::*;
proptest! {
#[test]
fn push_and_pop(commands: Vec<Option<u8>>) {
let mut v = vec![];
let mut sv = SmallVec::Empty;
for command in commands {
match command {
Some(i) => {
v.push(i);
sv.push(i);
}
None => {
assert_eq!(v.pop(), sv.pop());
}
}
assert_eq!(v.as_slice(), sv.as_slice());
}
}
}
}

View File

@ -1,232 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! PubGrub version solving algorithm.
//!
//! Version solving consists in efficiently finding a set of packages and versions
//! that satisfy all the constraints of a given project dependencies.
//! In addition, when that is not possible,
//! we should try to provide a very human-readable and clear
//! explanation as to why that failed.
//!
//! # Package and Version traits
//!
//! All the code in this crate is manipulating packages and versions, and for this to work
//! we defined a [Package](package::Package) and [Version](version::Version) traits
//! that are used as bounds on most of the exposed types and functions.
//!
//! Package identifiers needs to implement our [Package](package::Package) trait,
//! which is automatic if the type already implements
//! [Clone] + [Eq] + [Hash] + [Debug] + [Display](std::fmt::Display).
//! So things like [String] will work out of the box.
//!
//! Our [Version](version::Version) trait requires
//! [Clone] + [Ord] + [Debug] + [Display](std::fmt::Display)
//! and also the definition of two methods,
//! [lowest() -> Self](version::Version::lowest) which returns the lowest version existing,
//! and [bump(&self) -> Self](version::Version::bump) which returns the next smallest version
//! strictly higher than the current one.
//! For convenience, this library already provides
//! two implementations of [Version](version::Version).
//! The first one is [NumberVersion](version::NumberVersion), basically a newtype for [u32].
//! The second one is [SemanticVersion](version::NumberVersion)
//! that implements semantic versioning rules.
//!
//! # Basic example
//!
//! Let's imagine that we are building a user interface
//! with a menu containing dropdowns with some icons,
//! icons that we are also directly using in other parts of the interface.
//! For this scenario our direct dependencies are `menu` and `icons`,
//! but the complete set of dependencies looks like follows:
//!
//! - `root` depends on `menu` and `icons`
//! - `menu` depends on `dropdown`
//! - `dropdown` depends on `icons`
//! - `icons` has no dependency
//!
//! We can model that scenario with this library as follows
//! ```
//! # use pubgrub::solver::{OfflineDependencyProvider, resolve};
//! # use pubgrub::version::NumberVersion;
//! # use pubgrub::range::Range;
//!
//! type NumVS = Range<NumberVersion>;
//!
//! let mut dependency_provider = OfflineDependencyProvider::<&str, NumVS>::new();
//!
//! dependency_provider.add_dependencies(
//! "root", 1, [("menu", Range::full()), ("icons", Range::full())],
//! );
//! dependency_provider.add_dependencies("menu", 1, [("dropdown", Range::full())]);
//! dependency_provider.add_dependencies("dropdown", 1, [("icons", Range::full())]);
//! dependency_provider.add_dependencies("icons", 1, []);
//!
//! // Run the algorithm.
//! let solution = resolve(&dependency_provider, "root", 1).unwrap();
//! ```
//!
//! # DependencyProvider trait
//!
//! In our previous example we used the
//! [OfflineDependencyProvider](solver::OfflineDependencyProvider),
//! which is a basic implementation of the [DependencyProvider](solver::DependencyProvider) trait.
//!
//! But we might want to implement the [DependencyProvider](solver::DependencyProvider)
//! trait for our own type.
//! Let's say that we will use [String] for packages,
//! and [SemanticVersion](version::SemanticVersion) for versions.
//! This may be done quite easily by implementing the three following functions.
//! ```
//! # use pubgrub::solver::{DependencyProvider, Dependencies};
//! # use pubgrub::version::SemanticVersion;
//! # use pubgrub::range::Range;
//! # use pubgrub::type_aliases::Map;
//! # use std::error::Error;
//! # use std::borrow::Borrow;
//! #
//! # struct MyDependencyProvider;
//! #
//! type SemVS = Range<SemanticVersion>;
//!
//! impl DependencyProvider<String, SemVS> for MyDependencyProvider {
//! fn choose_version(&self, package: &String, range: &SemVS) -> Result<Option<SemanticVersion>, Box<dyn Error + Send + Sync>> {
//! unimplemented!()
//! }
//!
//! type Priority = usize;
//! fn prioritize(&self, package: &String, range: &SemVS) -> Self::Priority {
//! unimplemented!()
//! }
//!
//! fn get_dependencies(
//! &self,
//! package: &String,
//! version: &SemanticVersion,
//! ) -> Result<Dependencies<String, SemVS>, Box<dyn Error + Send + Sync>> {
//! unimplemented!()
//! }
//! }
//! ```
//!
//! The first method
//! [choose_version](crate::solver::DependencyProvider::choose_version)
//! chooses a version compatible with the provided range for a package.
//! The second method
//! [prioritize](crate::solver::DependencyProvider::prioritize)
//! in which order different packages should be chosen.
//! Usually prioritizing packages
//! with the fewest number of compatible versions speeds up resolution.
//! But in general you are free to employ whatever strategy suits you best
//! to pick a package and a version.
//!
//! The third method [get_dependencies](crate::solver::DependencyProvider::get_dependencies)
//! aims at retrieving the dependencies of a given package at a given version.
//! Returns [None] if dependencies are unknown.
//!
//! In a real scenario, these two methods may involve reading the file system
//! or doing network request, so you may want to hold a cache in your
//! [DependencyProvider](solver::DependencyProvider) implementation.
//! How exactly this could be achieved is shown in `CachingDependencyProvider`
//! (see `examples/caching_dependency_provider.rs`).
//! You could also use the [OfflineDependencyProvider](solver::OfflineDependencyProvider)
//! type defined by the crate as guidance,
//! but you are free to use whatever approach makes sense in your situation.
//!
//! # Solution and error reporting
//!
//! When everything goes well, the algorithm finds and returns the complete
//! set of direct and indirect dependencies satisfying all the constraints.
//! The packages and versions selected are returned as
//! [SelectedDepedencies<P, V>](type_aliases::SelectedDependencies).
//! But sometimes there is no solution because dependencies are incompatible.
//! In such cases, [resolve(...)](solver::resolve) returns a
//! [PubGrubError::NoSolution(derivation_tree)](error::PubGrubError::NoSolution),
//! where the provided derivation tree is a custom binary tree
//! containing the full chain of reasons why there is no solution.
//!
//! All the items in the tree are called incompatibilities
//! and may be of two types, either "external" or "derived".
//! Leaves of the tree are external incompatibilities,
//! and nodes are derived.
//! External incompatibilities have reasons that are independent
//! of the way this algorithm is implemented such as
//! - dependencies: "package_a" at version 1 depends on "package_b" at version 4
//! - missing dependencies: dependencies of "package_a" are unknown
//! - absence of version: there is no version of "package_a" in the range [3.1.0 4.0.0[
//!
//! Derived incompatibilities are obtained during the algorithm execution by deduction,
//! such as if "a" depends on "b" and "b" depends on "c", "a" depends on "c".
//!
//! This crate defines a [Reporter](crate::report::Reporter) trait, with an associated
//! [Output](crate::report::Reporter::Output) type and a single method.
//! ```
//! # use pubgrub::package::Package;
//! # use pubgrub::version_set::VersionSet;
//! # use pubgrub::report::DerivationTree;
//! #
//! pub trait Reporter<P: Package, VS: VersionSet> {
//! type Output;
//!
//! fn report(derivation_tree: &DerivationTree<P, VS>) -> Self::Output;
//! }
//! ```
//! Implementing a [Reporter](crate::report::Reporter) may involve a lot of heuristics
//! to make the output human-readable and natural.
//! For convenience, we provide a default implementation
//! [DefaultStringReporter](crate::report::DefaultStringReporter)
//! that outputs the report as a [String].
//! You may use it as follows:
//! ```
//! # use pubgrub::solver::{resolve, OfflineDependencyProvider};
//! # use pubgrub::report::{DefaultStringReporter, Reporter};
//! # use pubgrub::error::PubGrubError;
//! # use pubgrub::version::NumberVersion;
//! # use pubgrub::range::Range;
//! #
//! # type NumVS = Range<NumberVersion>;
//! #
//! # let dependency_provider = OfflineDependencyProvider::<&str, NumVS>::new();
//! # let root_package = "root";
//! # let root_version = 1;
//! #
//! match resolve(&dependency_provider, root_package, root_version) {
//! Ok(solution) => println!("{:?}", solution),
//! Err(PubGrubError::NoSolution(mut derivation_tree)) => {
//! derivation_tree.collapse_no_versions();
//! eprintln!("{}", DefaultStringReporter::report(&derivation_tree));
//! }
//! Err(err) => panic!("{:?}", err),
//! };
//! ```
//! Notice that we also used
//! [collapse_no_versions()](crate::report::DerivationTree::collapse_no_versions) above.
//! This method simplifies the derivation tree to get rid of the
//! [NoVersions](crate::report::External::NoVersions)
//! external incompatibilities in the derivation tree.
//! So instead of seeing things like this in the report:
//! ```txt
//! Because there is no version of foo in 1.0.1 <= v < 2.0.0
//! and foo 1.0.0 depends on bar 2.0.0 <= v < 3.0.0,
//! foo 1.0.0 <= v < 2.0.0 depends on bar 2.0.0 <= v < 3.0.0.
//! ```
//! you may have directly:
//! ```txt
//! foo 1.0.0 <= v < 2.0.0 depends on bar 2.0.0 <= v < 3.0.0.
//! ```
//! Beware though that if you are using some kind of offline mode
//! with a cache, you may want to know that some versions
//! do not exist in your cache.
#![allow(clippy::all, unreachable_pub)]
pub mod error;
pub mod package;
pub mod range;
pub mod report;
pub mod solver;
pub mod term;
pub mod type_aliases;
pub mod version;
pub mod version_set;
mod internal;

View File

@ -1,17 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! Trait for identifying packages.
//! Automatically implemented for traits implementing
//! [Clone] + [Eq] + [Hash] + [Debug] + [Display].
use std::fmt::{Debug, Display};
use std::hash::Hash;
/// Trait for identifying packages.
/// Automatically implemented for types already implementing
/// [Clone] + [Eq] + [Hash] + [Debug] + [Display].
pub trait Package: Clone + Eq + Hash + Debug + Display {}
/// Automatically implement the Package trait for any type
/// that already implement [Clone] + [Eq] + [Hash] + [Debug] + [Display].
impl<T: Clone + Eq + Hash + Debug + Display> Package for T {}

View File

@ -1,613 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! Ranges are constraints defining sets of versions.
//!
//! Concretely, those constraints correspond to any set of versions
//! representable as the concatenation, union, and complement
//! of the ranges building blocks.
//!
//! Those building blocks are:
//! - [empty()](Range::empty): the empty set
//! - [full()](Range::full): the set of all possible versions
//! - [singleton(v)](Range::singleton): the set containing only the version v
//! - [higher_than(v)](Range::higher_than): the set defined by `v <= versions`
//! - [strictly_higher_than(v)](Range::strictly_higher_than): the set defined by `v < versions`
//! - [lower_than(v)](Range::lower_than): the set defined by `versions <= v`
//! - [strictly_lower_than(v)](Range::strictly_lower_than): the set defined by `versions < v`
//! - [between(v1, v2)](Range::between): the set defined by `v1 <= versions < v2`
//!
//! Ranges can be created from any type that implements [`Ord`] + [`Clone`].
//!
//! In order to advance the solver front, comparisons of versions sets are necessary in the algorithm.
//! To do those comparisons between two sets S1 and S2 we use the mathematical property that S1 ⊂ S2 if and only if S1 ∩ S2 == S1.
//! We can thus compute an intersection and evaluate an equality to answer if S1 is a subset of S2.
//! But this means that the implementation of equality must be correct semantically.
//! In practice, if equality is derived automatically, this means sets must have unique representations.
//!
//! By migrating from a custom representation for discrete sets in v0.2
//! to a generic bounded representation for continuous sets in v0.3
//! we are potentially breaking that assumption in two ways:
//!
//! 1. Minimal and maximal `Unbounded` values can be replaced by their equivalent if it exists.
//! 2. Simplifying adjacent bounds of discrete sets cannot be detected and automated in the generic intersection code.
//!
//! An example for each can be given when `T` is `u32`.
//! First, we can have both segments `S1 = (Unbounded, Included(42u32))` and `S2 = (Included(0), Included(42u32))`
//! that represent the same segment but are structurally different.
//! Thus, a derived equality check would answer `false` to `S1 == S2` while it's true.
//!
//! Second both segments `S1 = (Included(1), Included(5))` and `S2 = (Included(1), Included(3)) + (Included(4), Included(5))` are equal.
//! But without asking the user to provide a `bump` function for discrete sets,
//! the algorithm is not able tell that the space between the right `Included(3)` bound and the left `Included(4)` bound is empty.
//! Thus the algorithm is not able to reduce S2 to its canonical S1 form while computing sets operations like intersections in the generic code.
//!
//! This is likely to lead to user facing theoretically correct but practically nonsensical ranges,
//! like (Unbounded, Excluded(0)) or (Excluded(6), Excluded(7)).
//! In general nonsensical inputs often lead to hard to track bugs.
//! But as far as we can tell this should work in practice.
//! So for now this crate only provides an implementation for continuous ranges.
//! With the v0.3 api the user could choose to bring back the discrete implementation from v0.2, as documented in the guide.
//! If doing so regularly fixes bugs seen by users, we will bring it back into the core library.
//! If we do not see practical bugs, or we get a formal proof that the code cannot lead to error states, then we may remove this warning.
use crate::{internal::small_vec::SmallVec, version_set::VersionSet};
use std::ops::RangeBounds;
use std::{
fmt::{Debug, Display, Formatter},
ops::Bound::{self, Excluded, Included, Unbounded},
};
/// A Range represents multiple intervals of a continuous range of monotone increasing
/// values.
#[derive(Debug, Clone, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize))]
#[cfg_attr(feature = "serde", serde(transparent))]
pub struct Range<V> {
segments: SmallVec<Interval<V>>,
}
type Interval<V> = (Bound<V>, Bound<V>);
impl<V> Range<V> {
/// Empty set of versions.
pub fn empty() -> Self {
Self {
segments: SmallVec::empty(),
}
}
/// Set of all possible versions
pub fn full() -> Self {
Self {
segments: SmallVec::one((Unbounded, Unbounded)),
}
}
/// Set of all versions higher or equal to some version
pub fn higher_than(v: impl Into<V>) -> Self {
Self {
segments: SmallVec::one((Included(v.into()), Unbounded)),
}
}
/// Set of all versions higher to some version
pub fn strictly_higher_than(v: impl Into<V>) -> Self {
Self {
segments: SmallVec::one((Excluded(v.into()), Unbounded)),
}
}
/// Set of all versions lower to some version
pub fn strictly_lower_than(v: impl Into<V>) -> Self {
Self {
segments: SmallVec::one((Unbounded, Excluded(v.into()))),
}
}
/// Set of all versions lower or equal to some version
pub fn lower_than(v: impl Into<V>) -> Self {
Self {
segments: SmallVec::one((Unbounded, Included(v.into()))),
}
}
/// Set of versions greater or equal to `v1` but less than `v2`.
pub fn between(v1: impl Into<V>, v2: impl Into<V>) -> Self {
Self {
segments: SmallVec::one((Included(v1.into()), Excluded(v2.into()))),
}
}
}
impl<V: Clone> Range<V> {
/// Set containing exactly one version
pub fn singleton(v: impl Into<V>) -> Self {
let v = v.into();
Self {
segments: SmallVec::one((Included(v.clone()), Included(v))),
}
}
/// Returns the complement of this Range.
pub fn complement(&self) -> Self {
match self.segments.first() {
// Complement of ∅ is ∞
None => Self::full(),
// Complement of ∞ is ∅
Some((Unbounded, Unbounded)) => Self::empty(),
// First high bound is +∞
Some((Included(v), Unbounded)) => Self::strictly_lower_than(v.clone()),
Some((Excluded(v), Unbounded)) => Self::lower_than(v.clone()),
Some((Unbounded, Included(v))) => {
Self::negate_segments(Excluded(v.clone()), &self.segments[1..])
}
Some((Unbounded, Excluded(v))) => {
Self::negate_segments(Included(v.clone()), &self.segments[1..])
}
Some((Included(_), Included(_)))
| Some((Included(_), Excluded(_)))
| Some((Excluded(_), Included(_)))
| Some((Excluded(_), Excluded(_))) => Self::negate_segments(Unbounded, &self.segments),
}
}
/// Helper function performing the negation of intervals in segments.
fn negate_segments(start: Bound<V>, segments: &[Interval<V>]) -> Self {
let mut complement_segments: SmallVec<Interval<V>> = SmallVec::empty();
let mut start = start;
for (v1, v2) in segments {
complement_segments.push((
start,
match v1 {
Included(v) => Excluded(v.clone()),
Excluded(v) => Included(v.clone()),
Unbounded => unreachable!(),
},
));
start = match v2 {
Included(v) => Excluded(v.clone()),
Excluded(v) => Included(v.clone()),
Unbounded => Unbounded,
}
}
if !matches!(start, Unbounded) {
complement_segments.push((start, Unbounded));
}
Self {
segments: complement_segments,
}
}
}
impl<V: Ord> Range<V> {
/// Convert to something that can be used with
/// [BTreeMap::range](std::collections::BTreeMap::range).
/// All versions contained in self, will be in the output,
/// but there may be versions in the output that are not contained in self.
/// Returns None if the range is empty.
pub fn bounding_range(&self) -> Option<(Bound<&V>, Bound<&V>)> {
self.segments.first().map(|(start, _)| {
let end = self
.segments
.last()
.expect("if there is a first element, there must be a last element");
(bound_as_ref(start), bound_as_ref(&end.1))
})
}
/// Returns true if the this Range contains the specified value.
pub fn contains(&self, v: &V) -> bool {
for segment in self.segments.iter() {
if match segment {
(Unbounded, Unbounded) => true,
(Unbounded, Included(end)) => v <= end,
(Unbounded, Excluded(end)) => v < end,
(Included(start), Unbounded) => v >= start,
(Included(start), Included(end)) => v >= start && v <= end,
(Included(start), Excluded(end)) => v >= start && v < end,
(Excluded(start), Unbounded) => v > start,
(Excluded(start), Included(end)) => v > start && v <= end,
(Excluded(start), Excluded(end)) => v > start && v < end,
} {
return true;
}
}
false
}
/// Construct a simple range from anything that impls [RangeBounds] like `v1..v2`.
pub fn from_range_bounds<R, IV>(bounds: R) -> Self
where
R: RangeBounds<IV>,
IV: Clone + Into<V>,
{
let start = match bounds.start_bound() {
Included(v) => Included(v.clone().into()),
Excluded(v) => Excluded(v.clone().into()),
Unbounded => Unbounded,
};
let end = match bounds.end_bound() {
Included(v) => Included(v.clone().into()),
Excluded(v) => Excluded(v.clone().into()),
Unbounded => Unbounded,
};
if valid_segment(&start, &end) {
Self {
segments: SmallVec::one((start, end)),
}
} else {
Self::empty()
}
}
fn check_invariants(self) -> Self {
if cfg!(debug_assertions) {
for p in self.segments.as_slice().windows(2) {
match (&p[0].1, &p[1].0) {
(Included(l_end), Included(r_start)) => assert!(l_end < r_start),
(Included(l_end), Excluded(r_start)) => assert!(l_end < r_start),
(Excluded(l_end), Included(r_start)) => assert!(l_end < r_start),
(Excluded(l_end), Excluded(r_start)) => assert!(l_end <= r_start),
(_, Unbounded) => panic!(),
(Unbounded, _) => panic!(),
}
}
for (s, e) in self.segments.iter() {
assert!(valid_segment(s, e));
}
}
self
}
}
/// Implementation of [`Bound::as_ref`] which is currently marked as unstable.
fn bound_as_ref<V>(bound: &Bound<V>) -> Bound<&V> {
match bound {
Included(v) => Included(v),
Excluded(v) => Excluded(v),
Unbounded => Unbounded,
}
}
fn valid_segment<T: PartialOrd>(start: &Bound<T>, end: &Bound<T>) -> bool {
match (start, end) {
(Included(s), Included(e)) => s <= e,
(Included(s), Excluded(e)) => s < e,
(Excluded(s), Included(e)) => s < e,
(Excluded(s), Excluded(e)) => s < e,
(Unbounded, _) | (_, Unbounded) => true,
}
}
impl<V: Ord + Clone> Range<V> {
/// Computes the union of this `Range` and another.
pub fn union(&self, other: &Self) -> Self {
self.complement()
.intersection(&other.complement())
.complement()
.check_invariants()
}
/// Computes the intersection of two sets of versions.
pub fn intersection(&self, other: &Self) -> Self {
let mut segments: SmallVec<Interval<V>> = SmallVec::empty();
let mut left_iter = self.segments.iter().peekable();
let mut right_iter = other.segments.iter().peekable();
while let (Some((left_start, left_end)), Some((right_start, right_end))) =
(left_iter.peek(), right_iter.peek())
{
let start = match (left_start, right_start) {
(Included(l), Included(r)) => Included(std::cmp::max(l, r)),
(Excluded(l), Excluded(r)) => Excluded(std::cmp::max(l, r)),
(Included(i), Excluded(e)) | (Excluded(e), Included(i)) if i <= e => Excluded(e),
(Included(i), Excluded(e)) | (Excluded(e), Included(i)) if e < i => Included(i),
(s, Unbounded) | (Unbounded, s) => bound_as_ref(s),
_ => unreachable!(),
}
.cloned();
let end = match (left_end, right_end) {
(Included(l), Included(r)) => Included(std::cmp::min(l, r)),
(Excluded(l), Excluded(r)) => Excluded(std::cmp::min(l, r)),
(Included(i), Excluded(e)) | (Excluded(e), Included(i)) if i >= e => Excluded(e),
(Included(i), Excluded(e)) | (Excluded(e), Included(i)) if e > i => Included(i),
(s, Unbounded) | (Unbounded, s) => bound_as_ref(s),
_ => unreachable!(),
}
.cloned();
left_iter.next_if(|(_, e)| e == &end);
right_iter.next_if(|(_, e)| e == &end);
if valid_segment(&start, &end) {
segments.push((start, end))
}
}
Self { segments }.check_invariants()
}
}
impl<T: Debug + Display + Clone + Eq + Ord> VersionSet for Range<T> {
type V = T;
fn empty() -> Self {
Range::empty()
}
fn singleton(v: Self::V) -> Self {
Range::singleton(v)
}
fn complement(&self) -> Self {
Range::complement(self)
}
fn intersection(&self, other: &Self) -> Self {
Range::intersection(self, other)
}
fn contains(&self, v: &Self::V) -> bool {
Range::contains(self, v)
}
fn full() -> Self {
Range::full()
}
fn union(&self, other: &Self) -> Self {
Range::union(self, other)
}
}
// REPORT ######################################################################
impl<V: Display + Eq> Display for Range<V> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
if self.segments.is_empty() {
write!(f, "")?;
} else {
for (idx, segment) in self.segments.iter().enumerate() {
if idx > 0 {
write!(f, ", ")?;
}
match segment {
(Unbounded, Unbounded) => write!(f, "*")?,
(Unbounded, Included(v)) => write!(f, "<={v}")?,
(Unbounded, Excluded(v)) => write!(f, "<{v}")?,
(Included(v), Unbounded) => write!(f, ">={v}")?,
(Included(v), Included(b)) => {
if v == b {
write!(f, "=={v}")?
} else {
write!(f, ">={v},<={b}")?
}
}
(Included(v), Excluded(b)) => write!(f, ">={v}, <{b}")?,
(Excluded(v), Unbounded) => write!(f, ">{v}")?,
(Excluded(v), Included(b)) => write!(f, ">{v}, <={b}")?,
(Excluded(v), Excluded(b)) => write!(f, ">{v}, <{b}")?,
};
}
}
Ok(())
}
}
// SERIALIZATION ###############################################################
#[cfg(feature = "serde")]
impl<'de, V: serde::Deserialize<'de>> serde::Deserialize<'de> for Range<V> {
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
// This enables conversion from the "old" discrete implementation of `Range` to the new
// bounded one.
//
// Serialization is always performed in the new format.
#[derive(serde::Deserialize)]
#[serde(untagged)]
enum EitherInterval<V> {
B(Bound<V>, Bound<V>),
D(V, Option<V>),
}
let bounds: SmallVec<EitherInterval<V>> = serde::Deserialize::deserialize(deserializer)?;
let mut segments = SmallVec::Empty;
for i in bounds {
match i {
EitherInterval::B(l, r) => segments.push((l, r)),
EitherInterval::D(l, Some(r)) => segments.push((Included(l), Excluded(r))),
EitherInterval::D(l, None) => segments.push((Included(l), Unbounded)),
}
}
Ok(Range { segments })
}
}
// TESTS #######################################################################
#[cfg(test)]
pub mod tests {
use proptest::prelude::*;
use super::*;
/// Generate version sets from a random vector of deltas between bounds.
/// Each bound is randomly inclusive or exclusive.
pub fn strategy() -> impl Strategy<Value = Range<u32>> {
(
any::<bool>(),
prop::collection::vec(any::<(u32, bool)>(), 1..10),
)
.prop_map(|(start_unbounded, deltas)| {
let mut start = if start_unbounded {
Some(Unbounded)
} else {
None
};
let mut largest: u32 = 0;
let mut last_bound_was_inclusive = false;
let mut segments = SmallVec::Empty;
for (delta, inclusive) in deltas {
// Add the offset to the current bound
largest = match largest.checked_add(delta) {
Some(s) => s,
None => {
// Skip this offset, if it would result in a too large bound.
continue;
}
};
let current_bound = if inclusive {
Included(largest)
} else {
Excluded(largest)
};
// If we already have a start bound, the next offset defines the complete range.
// If we don't have a start bound, we have to generate one.
if let Some(start_bound) = start.take() {
// If the delta from the start bound is 0, the only authorized configuration is
// Included(x), Included(x)
if delta == 0 && !(matches!(start_bound, Included(_)) && inclusive) {
start = Some(start_bound);
continue;
}
last_bound_was_inclusive = inclusive;
segments.push((start_bound, current_bound));
} else {
// If the delta from the end bound of the last range is 0 and
// any of the last ending or current starting bound is inclusive,
// we skip the delta because they basically overlap.
if delta == 0 && (last_bound_was_inclusive || inclusive) {
continue;
}
start = Some(current_bound);
}
}
// If we still have a start bound, but didn't have enough deltas to complete another
// segment, we add an unbounded upperbound.
if let Some(start_bound) = start {
segments.push((start_bound, Unbounded));
}
return Range { segments }.check_invariants();
})
}
fn version_strat() -> impl Strategy<Value = u32> {
any::<u32>()
}
proptest! {
// Testing negate ----------------------------------
#[test]
fn negate_is_different(range in strategy()) {
assert_ne!(range.complement(), range);
}
#[test]
fn double_negate_is_identity(range in strategy()) {
assert_eq!(range.complement().complement(), range);
}
#[test]
fn negate_contains_opposite(range in strategy(), version in version_strat()) {
assert_ne!(range.contains(&version), range.complement().contains(&version));
}
// Testing intersection ----------------------------
#[test]
fn intersection_is_symmetric(r1 in strategy(), r2 in strategy()) {
assert_eq!(r1.intersection(&r2), r2.intersection(&r1));
}
#[test]
fn intersection_with_any_is_identity(range in strategy()) {
assert_eq!(Range::full().intersection(&range), range);
}
#[test]
fn intersection_with_none_is_none(range in strategy()) {
assert_eq!(Range::empty().intersection(&range), Range::empty());
}
#[test]
fn intersection_is_idempotent(r1 in strategy(), r2 in strategy()) {
assert_eq!(r1.intersection(&r2).intersection(&r2), r1.intersection(&r2));
}
#[test]
fn intersection_is_associative(r1 in strategy(), r2 in strategy(), r3 in strategy()) {
assert_eq!(r1.intersection(&r2).intersection(&r3), r1.intersection(&r2.intersection(&r3)));
}
#[test]
fn intesection_of_complements_is_none(range in strategy()) {
assert_eq!(range.complement().intersection(&range), Range::empty());
}
#[test]
fn intesection_contains_both(r1 in strategy(), r2 in strategy(), version in version_strat()) {
assert_eq!(r1.intersection(&r2).contains(&version), r1.contains(&version) && r2.contains(&version));
}
// Testing union -----------------------------------
#[test]
fn union_of_complements_is_any(range in strategy()) {
assert_eq!(range.complement().union(&range), Range::full());
}
#[test]
fn union_contains_either(r1 in strategy(), r2 in strategy(), version in version_strat()) {
assert_eq!(r1.union(&r2).contains(&version), r1.contains(&version) || r2.contains(&version));
}
// Testing contains --------------------------------
#[test]
fn always_contains_exact(version in version_strat()) {
assert!(Range::singleton(version).contains(&version));
}
#[test]
fn contains_negation(range in strategy(), version in version_strat()) {
assert_ne!(range.contains(&version), range.complement().contains(&version));
}
#[test]
fn contains_intersection(range in strategy(), version in version_strat()) {
assert_eq!(range.contains(&version), range.intersection(&Range::singleton(version)) != Range::empty());
}
#[test]
fn contains_bounding_range(range in strategy(), version in version_strat()) {
if range.contains(&version) {
assert!(range.bounding_range().map(|b| b.contains(&version)).unwrap_or(false));
}
}
#[test]
fn from_range_bounds(range in any::<(Bound<u32>, Bound<u32>)>(), version in version_strat()) {
let rv: Range<_> = Range::from_range_bounds(range);
assert_eq!(range.contains(&version), rv.contains(&version));
}
#[test]
fn from_range_bounds_round_trip(range in any::<(Bound<u32>, Bound<u32>)>()) {
let rv: Range<u32> = Range::from_range_bounds(range);
let rv2: Range<u32> = rv.bounding_range().map(Range::from_range_bounds::<_, u32>).unwrap_or_else(Range::empty);
assert_eq!(rv, rv2);
}
}
}

View File

@ -1,481 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! Build a report as clear as possible as to why
//! dependency solving failed.
use std::fmt;
use std::ops::{Deref, DerefMut};
use crate::package::Package;
use crate::term::Term;
use crate::type_aliases::Map;
use crate::version_set::VersionSet;
/// Reporter trait.
pub trait Reporter<P: Package, VS: VersionSet> {
/// Output type of the report.
type Output;
/// Generate a report from the derivation tree
/// describing the resolution failure.
fn report(derivation_tree: &DerivationTree<P, VS>) -> Self::Output;
}
/// Derivation tree resulting in the impossibility
/// to solve the dependencies of our root package.
#[derive(Debug, Clone)]
pub enum DerivationTree<P: Package, VS: VersionSet> {
/// External incompatibility.
External(External<P, VS>),
/// Incompatibility derived from two others.
Derived(Derived<P, VS>),
}
/// Incompatibilities that are not derived from others,
/// they have their own reason.
#[derive(Debug, Clone)]
pub enum External<P: Package, VS: VersionSet> {
/// Initial incompatibility aiming at picking the root package for the first decision.
NotRoot(P, VS::V),
/// There are no versions in the given set for this package.
NoVersions(P, VS),
/// Dependencies of the package are unavailable for versions in that set.
UnavailableDependencies(P, VS),
/// Incompatibility coming from the dependencies of a given package.
FromDependencyOf(P, VS, P, VS),
}
/// Incompatibility derived from two others.
#[derive(Debug, Clone)]
pub struct Derived<P: Package, VS: VersionSet> {
/// Terms of the incompatibility.
pub terms: Map<P, Term<VS>>,
/// Indicate if that incompatibility is present multiple times
/// in the derivation tree.
/// If that is the case, it has a unique id, provided in that option.
/// Then, we may want to only explain it once,
/// and refer to the explanation for the other times.
pub shared_id: Option<usize>,
/// First cause.
pub cause1: Box<DerivationTree<P, VS>>,
/// Second cause.
pub cause2: Box<DerivationTree<P, VS>>,
}
impl<P: Package, VS: VersionSet> DerivationTree<P, VS> {
/// Merge the [NoVersions](External::NoVersions) external incompatibilities
/// with the other one they are matched with
/// in a derived incompatibility.
/// This cleans up quite nicely the generated report.
/// You might want to do this if you know that the
/// [DependencyProvider](crate::solver::DependencyProvider)
/// was not run in some kind of offline mode that may not
/// have access to all versions existing.
pub fn collapse_no_versions(&mut self) {
match self {
DerivationTree::External(_) => {}
DerivationTree::Derived(derived) => {
match (derived.cause1.deref_mut(), derived.cause2.deref_mut()) {
(DerivationTree::External(External::NoVersions(p, r)), ref mut cause2) => {
cause2.collapse_no_versions();
*self = cause2
.clone()
.merge_no_versions(p.to_owned(), r.to_owned())
.unwrap_or_else(|| self.to_owned());
}
(ref mut cause1, DerivationTree::External(External::NoVersions(p, r))) => {
cause1.collapse_no_versions();
*self = cause1
.clone()
.merge_no_versions(p.to_owned(), r.to_owned())
.unwrap_or_else(|| self.to_owned());
}
_ => {
derived.cause1.collapse_no_versions();
derived.cause2.collapse_no_versions();
}
}
}
}
}
fn merge_no_versions(self, package: P, set: VS) -> Option<Self> {
match self {
// TODO: take care of the Derived case.
// Once done, we can remove the Option.
DerivationTree::Derived(_) => Some(self),
DerivationTree::External(External::NotRoot(_, _)) => {
panic!("How did we end up with a NoVersions merged with a NotRoot?")
}
DerivationTree::External(External::NoVersions(_, r)) => Some(DerivationTree::External(
External::NoVersions(package, set.union(&r)),
)),
DerivationTree::External(External::UnavailableDependencies(_, r)) => Some(
DerivationTree::External(External::UnavailableDependencies(package, set.union(&r))),
),
DerivationTree::External(External::FromDependencyOf(p1, r1, p2, r2)) => {
if p1 == package {
Some(DerivationTree::External(External::FromDependencyOf(
p1,
r1.union(&set),
p2,
r2,
)))
} else {
Some(DerivationTree::External(External::FromDependencyOf(
p1,
r1,
p2,
r2.union(&set),
)))
}
}
}
}
}
impl<P: Package, VS: VersionSet> fmt::Display for External<P, VS> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::NotRoot(package, version) => {
write!(f, "we are solving dependencies of {} {}", package, version)
}
Self::NoVersions(package, set) => {
if set == &VS::full() {
write!(f, "there is no available version for {}", package)
} else {
write!(f, "there is no version of {} in {}", package, set)
}
}
Self::UnavailableDependencies(package, set) => {
if set == &VS::full() {
write!(f, "dependencies of {} are unavailable", package)
} else {
write!(
f,
"dependencies of {} at version {} are unavailable",
package, set
)
}
}
Self::FromDependencyOf(p, set_p, dep, set_dep) => {
if set_p == &VS::full() && set_dep == &VS::full() {
write!(f, "{} depends on {}", p, dep)
} else if set_p == &VS::full() {
write!(f, "{} depends on {} {}", p, dep, set_dep)
} else if set_dep == &VS::full() {
write!(f, "{} {} depends on {}", p, set_p, dep)
} else {
write!(f, "{} {} depends on {} {}", p, set_p, dep, set_dep)
}
}
}
}
}
/// Default reporter able to generate an explanation as a [String].
pub struct DefaultStringReporter {
/// Number of explanations already with a line reference.
ref_count: usize,
/// Shared nodes that have already been marked with a line reference.
/// The incompatibility ids are the keys, and the line references are the values.
shared_with_ref: Map<usize, usize>,
/// Accumulated lines of the report already generated.
lines: Vec<String>,
}
impl DefaultStringReporter {
/// Initialize the reporter.
fn new() -> Self {
Self {
ref_count: 0,
shared_with_ref: Map::default(),
lines: Vec::new(),
}
}
fn build_recursive<P: Package, VS: VersionSet>(&mut self, derived: &Derived<P, VS>) {
self.build_recursive_helper(derived);
if let Some(id) = derived.shared_id {
if self.shared_with_ref.get(&id).is_none() {
self.add_line_ref();
self.shared_with_ref.insert(id, self.ref_count);
}
};
}
fn build_recursive_helper<P: Package, VS: VersionSet>(&mut self, current: &Derived<P, VS>) {
match (current.cause1.deref(), current.cause2.deref()) {
(DerivationTree::External(external1), DerivationTree::External(external2)) => {
// Simplest case, we just combine two external incompatibilities.
self.lines.push(Self::explain_both_external(
external1,
external2,
&current.terms,
));
}
(DerivationTree::Derived(derived), DerivationTree::External(external)) => {
// One cause is derived, so we explain this first
// then we add the one-line external part
// and finally conclude with the current incompatibility.
self.report_one_each(derived, external, &current.terms);
}
(DerivationTree::External(external), DerivationTree::Derived(derived)) => {
self.report_one_each(derived, external, &current.terms);
}
(DerivationTree::Derived(derived1), DerivationTree::Derived(derived2)) => {
// This is the most complex case since both causes are also derived.
match (
self.line_ref_of(derived1.shared_id),
self.line_ref_of(derived2.shared_id),
) {
// If both causes already have been referenced (shared_id),
// the explanation simply uses those references.
(Some(ref1), Some(ref2)) => self.lines.push(Self::explain_both_ref(
ref1,
derived1,
ref2,
derived2,
&current.terms,
)),
// Otherwise, if one only has a line number reference,
// we recursively call the one without reference and then
// add the one with reference to conclude.
(Some(ref1), None) => {
self.build_recursive(derived2);
self.lines
.push(Self::and_explain_ref(ref1, derived1, &current.terms));
}
(None, Some(ref2)) => {
self.build_recursive(derived1);
self.lines
.push(Self::and_explain_ref(ref2, derived2, &current.terms));
}
// Finally, if no line reference exists yet,
// we call recursively the first one and then,
// - if this was a shared node, it will get a line ref
// and we can simply recall this with the current node.
// - otherwise, we add a line reference to it,
// recursively call on the second node,
// and finally conclude.
(None, None) => {
self.build_recursive(derived1);
if derived1.shared_id.is_some() {
self.lines.push("".into());
self.build_recursive(current);
} else {
self.add_line_ref();
let ref1 = self.ref_count;
self.lines.push("".into());
self.build_recursive(derived2);
self.lines
.push(Self::and_explain_ref(ref1, derived1, &current.terms));
}
}
}
}
}
}
/// Report a derived and an external incompatibility.
///
/// The result will depend on the fact that the derived incompatibility
/// has already been explained or not.
fn report_one_each<P: Package, VS: VersionSet>(
&mut self,
derived: &Derived<P, VS>,
external: &External<P, VS>,
current_terms: &Map<P, Term<VS>>,
) {
match self.line_ref_of(derived.shared_id) {
Some(ref_id) => self.lines.push(Self::explain_ref_and_external(
ref_id,
derived,
external,
current_terms,
)),
None => self.report_recurse_one_each(derived, external, current_terms),
}
}
/// Report one derived (without a line ref yet) and one external.
fn report_recurse_one_each<P: Package, VS: VersionSet>(
&mut self,
derived: &Derived<P, VS>,
external: &External<P, VS>,
current_terms: &Map<P, Term<VS>>,
) {
match (derived.cause1.deref(), derived.cause2.deref()) {
// If the derived cause has itself one external prior cause,
// we can chain the external explanations.
(DerivationTree::Derived(prior_derived), DerivationTree::External(prior_external)) => {
self.build_recursive(prior_derived);
self.lines.push(Self::and_explain_prior_and_external(
prior_external,
external,
current_terms,
));
}
// If the derived cause has itself one external prior cause,
// we can chain the external explanations.
(DerivationTree::External(prior_external), DerivationTree::Derived(prior_derived)) => {
self.build_recursive(prior_derived);
self.lines.push(Self::and_explain_prior_and_external(
prior_external,
external,
current_terms,
));
}
_ => {
self.build_recursive(derived);
self.lines
.push(Self::and_explain_external(external, current_terms));
}
}
}
// String explanations #####################################################
/// Simplest case, we just combine two external incompatibilities.
fn explain_both_external<P: Package, VS: VersionSet>(
external1: &External<P, VS>,
external2: &External<P, VS>,
current_terms: &Map<P, Term<VS>>,
) -> String {
// TODO: order should be chosen to make it more logical.
format!(
"Because {} and {}, {}.",
external1,
external2,
Self::string_terms(current_terms)
)
}
/// Both causes have already been explained so we use their refs.
fn explain_both_ref<P: Package, VS: VersionSet>(
ref_id1: usize,
derived1: &Derived<P, VS>,
ref_id2: usize,
derived2: &Derived<P, VS>,
current_terms: &Map<P, Term<VS>>,
) -> String {
// TODO: order should be chosen to make it more logical.
format!(
"Because {} ({}) and {} ({}), {}.",
Self::string_terms(&derived1.terms),
ref_id1,
Self::string_terms(&derived2.terms),
ref_id2,
Self::string_terms(current_terms)
)
}
/// One cause is derived (already explained so one-line),
/// the other is a one-line external cause,
/// and finally we conclude with the current incompatibility.
fn explain_ref_and_external<P: Package, VS: VersionSet>(
ref_id: usize,
derived: &Derived<P, VS>,
external: &External<P, VS>,
current_terms: &Map<P, Term<VS>>,
) -> String {
// TODO: order should be chosen to make it more logical.
format!(
"Because {} ({}) and {}, {}.",
Self::string_terms(&derived.terms),
ref_id,
external,
Self::string_terms(current_terms)
)
}
/// Add an external cause to the chain of explanations.
fn and_explain_external<P: Package, VS: VersionSet>(
external: &External<P, VS>,
current_terms: &Map<P, Term<VS>>,
) -> String {
format!(
"And because {}, {}.",
external,
Self::string_terms(current_terms)
)
}
/// Add an already explained incompat to the chain of explanations.
fn and_explain_ref<P: Package, VS: VersionSet>(
ref_id: usize,
derived: &Derived<P, VS>,
current_terms: &Map<P, Term<VS>>,
) -> String {
format!(
"And because {} ({}), {}.",
Self::string_terms(&derived.terms),
ref_id,
Self::string_terms(current_terms)
)
}
/// Add an already explained incompat to the chain of explanations.
fn and_explain_prior_and_external<P: Package, VS: VersionSet>(
prior_external: &External<P, VS>,
external: &External<P, VS>,
current_terms: &Map<P, Term<VS>>,
) -> String {
format!(
"And because {} and {}, {}.",
prior_external,
external,
Self::string_terms(current_terms)
)
}
/// Try to print terms of an incompatibility in a human-readable way.
pub fn string_terms<P: Package, VS: VersionSet>(terms: &Map<P, Term<VS>>) -> String {
let terms_vec: Vec<_> = terms.iter().collect();
match terms_vec.as_slice() {
[] => "version solving failed".into(),
// TODO: special case when that unique package is root.
[(package, Term::Positive(range))] => format!("{} {} is forbidden", package, range),
[(package, Term::Negative(range))] => format!("{} {} is mandatory", package, range),
[(p1, Term::Positive(r1)), (p2, Term::Negative(r2))] => {
External::FromDependencyOf(p1, r1.clone(), p2, r2.clone()).to_string()
}
[(p1, Term::Negative(r1)), (p2, Term::Positive(r2))] => {
External::FromDependencyOf(p2, r2.clone(), p1, r1.clone()).to_string()
}
slice => {
let str_terms: Vec<_> = slice.iter().map(|(p, t)| format!("{} {}", p, t)).collect();
str_terms.join(", ") + " are incompatible"
}
}
}
// Helper functions ########################################################
fn add_line_ref(&mut self) {
let new_count = self.ref_count + 1;
self.ref_count = new_count;
if let Some(line) = self.lines.last_mut() {
*line = format!("{} ({})", line, new_count);
}
}
fn line_ref_of(&self, shared_id: Option<usize>) -> Option<usize> {
shared_id.and_then(|id| self.shared_with_ref.get(&id).cloned())
}
}
impl<P: Package, VS: VersionSet> Reporter<P, VS> for DefaultStringReporter {
type Output = String;
fn report(derivation_tree: &DerivationTree<P, VS>) -> Self::Output {
match derivation_tree {
DerivationTree::External(external) => external.to_string(),
DerivationTree::Derived(derived) => {
let mut reporter = Self::new();
reporter.build_recursive(derived);
reporter.lines.join("\n")
}
}
}
}

View File

@ -1,374 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! PubGrub version solving algorithm.
//!
//! It consists in efficiently finding a set of packages and versions
//! that satisfy all the constraints of a given project dependencies.
//! In addition, when that is not possible,
//! PubGrub tries to provide a very human-readable and clear
//! explanation as to why that failed.
//! Below is an example of explanation present in
//! the introductory blog post about PubGrub
//!
//! ```txt
//! Because dropdown >=2.0.0 depends on icons >=2.0.0 and
//! root depends on icons <2.0.0, dropdown >=2.0.0 is forbidden.
//!
//! And because menu >=1.1.0 depends on dropdown >=2.0.0,
//! menu >=1.1.0 is forbidden.
//!
//! And because menu <1.1.0 depends on dropdown >=1.0.0 <2.0.0
//! which depends on intl <4.0.0, every version of menu
//! requires intl <4.0.0.
//!
//! So, because root depends on both menu >=1.0.0 and intl >=5.0.0,
//! version solving failed.
//! ```
//!
//! The algorithm is generic and works for any type of dependency system
//! as long as packages (P) and versions (V) implement
//! the [Package] and [Version](crate::version::Version) traits.
//! [Package] is strictly equivalent and automatically generated
//! for any type that implement [Clone] + [Eq] + [Hash] + [Debug] + [Display](std::fmt::Display).
//! [Version](crate::version::Version) simply states that versions are ordered,
//! that there should be
//! a minimal [lowest](crate::version::Version::lowest) version (like 0.0.0 in semantic versions),
//! and that for any version, it is possible to compute
//! what the next version closest to this one is ([bump](crate::version::Version::bump)).
//! For semantic versions, [bump](crate::version::Version::bump) corresponds to
//! an increment of the patch number.
//!
//! ## API
//!
//! ```
//! # use pubgrub::solver::{resolve, OfflineDependencyProvider};
//! # use pubgrub::version::NumberVersion;
//! # use pubgrub::error::PubGrubError;
//! # use pubgrub::range::Range;
//! #
//! # type NumVS = Range<NumberVersion>;
//! #
//! # fn try_main() -> Result<(), PubGrubError<&'static str, NumVS>> {
//! # let dependency_provider = OfflineDependencyProvider::<&str, NumVS>::new();
//! # let package = "root";
//! # let version = 1;
//! let solution = resolve(&dependency_provider, package, version)?;
//! # Ok(())
//! # }
//! # fn main() {
//! # assert!(matches!(try_main(), Err(PubGrubError::NoSolution(_))));
//! # }
//! ```
//!
//! Where `dependency_provider` supplies the list of available packages and versions,
//! as well as the dependencies of every available package
//! by implementing the [DependencyProvider] trait.
//! The call to [resolve] for a given package at a given version
//! will compute the set of packages and versions needed
//! to satisfy the dependencies of that package and version pair.
//! If there is no solution, the reason will be provided as clear as possible.
use std::cmp::Reverse;
use std::collections::{BTreeMap, BTreeSet as Set};
use std::error::Error;
use crate::error::PubGrubError;
pub use crate::internal::core::State;
pub use crate::internal::incompatibility::{Incompatibility, Kind};
use crate::package::Package;
use crate::type_aliases::{DependencyConstraints, Map, SelectedDependencies};
use crate::version_set::VersionSet;
use log::{debug, info};
/// Main function of the library.
/// Finds a set of packages satisfying dependency bounds for a given package + version pair.
pub fn resolve<P: Package, VS: VersionSet>(
dependency_provider: &impl DependencyProvider<P, VS>,
package: P,
version: impl Into<VS::V>,
) -> Result<SelectedDependencies<P, VS::V>, PubGrubError<P, VS>> {
let mut state = State::init(package.clone(), version.into());
let mut added_dependencies: Map<P, Set<VS::V>> = Map::default();
let mut next = package;
loop {
dependency_provider
.should_cancel()
.map_err(|err| PubGrubError::ErrorInShouldCancel(err))?;
info!("unit_propagation: {}", &next);
state.unit_propagation(next)?;
debug!(
"Partial solution after unit propagation: {}",
state.partial_solution
);
let Some(highest_priority_pkg) = state
.partial_solution
.pick_highest_priority_pkg(|p, r| dependency_provider.prioritize(p, r))
else {
return Ok(state.partial_solution.extract_solution());
};
next = highest_priority_pkg;
let term_intersection = state
.partial_solution
.term_intersection_for_package(&next)
.ok_or_else(|| {
PubGrubError::Failure("a package was chosen but we don't have a term.".into())
})?;
let decision = dependency_provider
.choose_version(&next, term_intersection.unwrap_positive())
.map_err(PubGrubError::ErrorChoosingPackageVersion)?;
info!("DP chose: {} @ {:?}", next, decision);
// Pick the next compatible version.
let v = match decision {
None => {
let inc = Incompatibility::no_versions(next.clone(), term_intersection.clone());
state.add_incompatibility(inc);
continue;
}
Some(x) => x,
};
if !term_intersection.contains(&v) {
return Err(PubGrubError::ErrorChoosingPackageVersion(
"choose_package_version picked an incompatible version".into(),
));
}
let is_new_dependency = added_dependencies
.entry(next.clone())
.or_default()
.insert(v.clone());
if is_new_dependency {
// Retrieve that package dependencies.
let p = &next;
let dependencies = dependency_provider.get_dependencies(p, &v).map_err(|err| {
PubGrubError::ErrorRetrievingDependencies {
package: p.clone(),
version: v.clone(),
source: err,
}
})?;
let known_dependencies = match dependencies {
Dependencies::Unknown => {
state.add_incompatibility(Incompatibility::unavailable_dependencies(
p.clone(),
v.clone(),
));
continue;
}
Dependencies::Known(x) if x.contains_key(p) => {
return Err(PubGrubError::SelfDependency {
package: p.clone(),
version: v,
});
}
Dependencies::Known(x) => x,
};
// Add that package and version if the dependencies are not problematic.
let dep_incompats = state.add_incompatibility_from_dependencies(
p.clone(),
v.clone(),
&known_dependencies,
);
state.partial_solution.add_version(
p.clone(),
v,
dep_incompats,
&state.incompatibility_store,
);
} else {
// `dep_incompats` are already in `incompatibilities` so we know there are not satisfied
// terms and can add the decision directly.
info!("add_decision (not first time): {} @ {}", &next, v);
state.partial_solution.add_decision(next.clone(), v);
}
}
}
/// An enum used by [DependencyProvider] that holds information about package dependencies.
/// For each [Package] there is a set of versions allowed as a dependency.
#[derive(Clone)]
pub enum Dependencies<P: Package, VS: VersionSet> {
/// Package dependencies are unavailable.
Unknown,
/// Container for all available package versions.
Known(DependencyConstraints<P, VS>),
}
/// Trait that allows the algorithm to retrieve available packages and their dependencies.
/// An implementor needs to be supplied to the [resolve] function.
pub trait DependencyProvider<P: Package, VS: VersionSet> {
/// [Decision making](https://github.com/dart-lang/pub/blob/master/doc/solver.md#decision-making)
/// is the process of choosing the next package
/// and version that will be appended to the partial solution.
///
/// Every time such a decision must be made, the resolver looks at all the potential valid
/// packages that have changed, and a asks the dependency provider how important each one is.
/// For each one it calls `prioritize` with the name of the package and the current set of
/// acceptable versions.
/// The resolver will then pick the package with the highes priority from all the potential valid
/// packages.
///
/// The strategy employed to prioritize packages
/// cannot change the existence of a solution or not,
/// but can drastically change the performances of the solver,
/// or the properties of the solution.
/// The documentation of Pub (PubGrub implementation for the dart programming language)
/// states the following:
///
/// > Pub chooses the latest matching version of the package
/// > with the fewest versions that match the outstanding constraint.
/// > This tends to find conflicts earlier if any exist,
/// > since these packages will run out of versions to try more quickly.
/// > But there's likely room for improvement in these heuristics.
///
/// Note: the resolver may call this even when the range has not change,
/// if it is more efficient for the resolveres internal data structures.
fn prioritize(&self, package: &P, range: &VS) -> Self::Priority;
/// The type returned from `prioritize`. The resolver does not care what type this is
/// as long as it can pick a largest one and clone it.
///
/// [std::cmp::Reverse] can be useful if you want to pick the package with
/// the fewest versions that match the outstanding constraint.
type Priority: Ord + Clone;
/// Once the resolver has found the highest `Priority` package from all potential valid
/// packages, it needs to know what vertion of that package to use. The most common pattern
/// is to select the largest vertion that the range contains.
fn choose_version(
&self,
package: &P,
range: &VS,
) -> Result<Option<VS::V>, Box<dyn Error + Send + Sync>>;
/// Retrieves the package dependencies.
/// Return [Dependencies::Unknown] if its dependencies are unknown.
fn get_dependencies(
&self,
package: &P,
version: &VS::V,
) -> Result<Dependencies<P, VS>, Box<dyn Error + Send + Sync>>;
/// This is called fairly regularly during the resolution,
/// if it returns an Err then resolution will be terminated.
/// This is helpful if you want to add some form of early termination like a timeout,
/// or you want to add some form of user feedback if things are taking a while.
/// If not provided the resolver will run as long as needed.
fn should_cancel(&self) -> Result<(), Box<dyn Error + Send + Sync>> {
Ok(())
}
}
/// A basic implementation of [DependencyProvider].
#[derive(Debug, Clone, Default)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
#[cfg_attr(
feature = "serde",
serde(bound(
serialize = "VS::V: serde::Serialize, VS: serde::Serialize, P: serde::Serialize",
deserialize = "VS::V: serde::Deserialize<'de>, VS: serde::Deserialize<'de>, P: serde::Deserialize<'de>"
))
)]
#[cfg_attr(feature = "serde", serde(transparent))]
pub struct OfflineDependencyProvider<P: Package, VS: VersionSet> {
dependencies: Map<P, BTreeMap<VS::V, DependencyConstraints<P, VS>>>,
}
impl<P: Package, VS: VersionSet> OfflineDependencyProvider<P, VS> {
/// Creates an empty OfflineDependencyProvider with no dependencies.
pub fn new() -> Self {
Self {
dependencies: Map::default(),
}
}
/// Registers the dependencies of a package and version pair.
/// Dependencies must be added with a single call to
/// [add_dependencies](OfflineDependencyProvider::add_dependencies).
/// All subsequent calls to
/// [add_dependencies](OfflineDependencyProvider::add_dependencies) for a given
/// package version pair will replace the dependencies by the new ones.
///
/// The API does not allow to add dependencies one at a time to uphold an assumption that
/// [OfflineDependencyProvider.get_dependencies(p, v)](OfflineDependencyProvider::get_dependencies)
/// provides all dependencies of a given package (p) and version (v) pair.
pub fn add_dependencies<I: IntoIterator<Item = (P, VS)>>(
&mut self,
package: P,
version: impl Into<VS::V>,
dependencies: I,
) {
let package_deps = dependencies.into_iter().collect();
let v = version.into();
*self
.dependencies
.entry(package)
.or_default()
.entry(v)
.or_default() = package_deps;
}
/// Lists packages that have been saved.
pub fn packages(&self) -> impl Iterator<Item = &P> {
self.dependencies.keys()
}
/// Lists versions of saved packages in sorted order.
/// Returns [None] if no information is available regarding that package.
pub fn versions(&self, package: &P) -> Option<impl Iterator<Item = &VS::V>> {
self.dependencies.get(package).map(|k| k.keys())
}
/// Lists dependencies of a given package and version.
/// Returns [None] if no information is available regarding that package and version pair.
fn dependencies(&self, package: &P, version: &VS::V) -> Option<DependencyConstraints<P, VS>> {
self.dependencies.get(package)?.get(version).cloned()
}
}
/// An implementation of [DependencyProvider] that
/// contains all dependency information available in memory.
/// Currently packages are picked with the fewest versions contained in the constraints first.
/// But, that may change in new versions if better heuristics are found.
/// Versions are picked with the newest versions first.
impl<P: Package, VS: VersionSet> DependencyProvider<P, VS> for OfflineDependencyProvider<P, VS> {
fn choose_version(
&self,
package: &P,
range: &VS,
) -> Result<Option<VS::V>, Box<dyn Error + Send + Sync>> {
Ok(self
.dependencies
.get(package)
.and_then(|versions| versions.keys().rev().find(|v| range.contains(v)).cloned()))
}
type Priority = Reverse<usize>;
fn prioritize(&self, package: &P, range: &VS) -> Self::Priority {
Reverse(
self.dependencies
.get(package)
.map(|versions| versions.keys().filter(|v| range.contains(v)).count())
.unwrap_or(0),
)
}
fn get_dependencies(
&self,
package: &P,
version: &VS::V,
) -> Result<Dependencies<P, VS>, Box<dyn Error + Send + Sync>> {
Ok(match self.dependencies(package, version) {
None => Dependencies::Unknown,
Some(dependencies) => Dependencies::Known(dependencies),
})
}
}

View File

@ -1,211 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! A term is the fundamental unit of operation of the PubGrub algorithm.
//! It is a positive or negative expression regarding a set of versions.
use crate::version_set::VersionSet;
use std::fmt::{self, Display};
/// A positive or negative expression regarding a set of versions.
#[derive(Debug, Clone, Eq, PartialEq)]
pub enum Term<VS: VersionSet> {
/// For example, "1.0.0 <= v < 2.0.0" is a positive expression
/// that is evaluated true if a version is selected
/// and comprised between version 1.0.0 and version 2.0.0.
Positive(VS),
/// The term "not v < 3.0.0" is a negative expression
/// that is evaluated true if a version is selected >= 3.0.0
/// or if no version is selected at all.
Negative(VS),
}
/// Base methods.
impl<VS: VersionSet> Term<VS> {
/// A term that is always true.
pub(crate) fn any() -> Self {
Self::Negative(VS::empty())
}
/// A term that is never true.
pub(crate) fn empty() -> Self {
Self::Positive(VS::empty())
}
/// A positive term containing exactly that version.
pub(crate) fn exact(version: VS::V) -> Self {
Self::Positive(VS::singleton(version))
}
/// Simply check if a term is positive.
pub(crate) fn is_positive(&self) -> bool {
match self {
Self::Positive(_) => true,
Self::Negative(_) => false,
}
}
/// Negate a term.
/// Evaluation of a negated term always returns
/// the opposite of the evaluation of the original one.
pub(crate) fn negate(&self) -> Self {
match self {
Self::Positive(set) => Self::Negative(set.clone()),
Self::Negative(set) => Self::Positive(set.clone()),
}
}
/// Evaluate a term regarding a given choice of version.
pub(crate) fn contains(&self, v: &VS::V) -> bool {
match self {
Self::Positive(set) => set.contains(v),
Self::Negative(set) => !(set.contains(v)),
}
}
/// Unwrap the set contained in a positive term.
/// Will panic if used on a negative set.
pub fn unwrap_positive(&self) -> &VS {
match self {
Self::Positive(set) => set,
_ => panic!("Negative term cannot unwrap positive set"),
}
}
}
/// Set operations with terms.
impl<VS: VersionSet> Term<VS> {
/// Compute the intersection of two terms.
/// If at least one term is positive, the intersection is also positive.
pub(crate) fn intersection(&self, other: &Self) -> Self {
match (self, other) {
(Self::Positive(r1), Self::Positive(r2)) => Self::Positive(r1.intersection(r2)),
(Self::Positive(r1), Self::Negative(r2)) => {
Self::Positive(r1.intersection(&r2.complement()))
}
(Self::Negative(r1), Self::Positive(r2)) => {
Self::Positive(r1.complement().intersection(r2))
}
(Self::Negative(r1), Self::Negative(r2)) => Self::Negative(r1.union(r2)),
}
}
/// Compute the union of two terms.
/// If at least one term is negative, the union is also negative.
pub(crate) fn union(&self, other: &Self) -> Self {
(self.negate().intersection(&other.negate())).negate()
}
/// Indicate if this term is a subset of another term.
/// Just like for sets, we say that t1 is a subset of t2
/// if and only if t1 ∩ t2 = t1.
pub(crate) fn subset_of(&self, other: &Self) -> bool {
self == &self.intersection(other)
}
}
/// Describe a relation between a set of terms S and another term t.
///
/// As a shorthand, we say that a term v
/// satisfies or contradicts a term t if {v} satisfies or contradicts it.
pub(crate) enum Relation {
/// We say that a set of terms S "satisfies" a term t
/// if t must be true whenever every term in S is true.
Satisfied,
/// Conversely, S "contradicts" t if t must be false
/// whenever every term in S is true.
Contradicted,
/// If neither of these is true we say that S is "inconclusive" for t.
Inconclusive,
}
/// Relation between terms.
impl<VS: VersionSet> Term<VS> {
/// Check if a set of terms satisfies this term.
///
/// We say that a set of terms S "satisfies" a term t
/// if t must be true whenever every term in S is true.
///
/// It turns out that this can also be expressed with set operations:
/// S satisfies t if and only if ⋂ S ⊆ t
#[cfg(test)]
fn satisfied_by(&self, terms_intersection: &Self) -> bool {
terms_intersection.subset_of(self)
}
/// Check if a set of terms contradicts this term.
///
/// We say that a set of terms S "contradicts" a term t
/// if t must be false whenever every term in S is true.
///
/// It turns out that this can also be expressed with set operations:
/// S contradicts t if and only if ⋂ S is disjoint with t
/// S contradicts t if and only if (⋂ S) ⋂ t = ∅
#[cfg(test)]
fn contradicted_by(&self, terms_intersection: &Self) -> bool {
terms_intersection.intersection(self) == Self::empty()
}
/// Check if a set of terms satisfies or contradicts a given term.
/// Otherwise the relation is inconclusive.
pub(crate) fn relation_with(&self, other_terms_intersection: &Self) -> Relation {
let full_intersection = self.intersection(other_terms_intersection);
if &full_intersection == other_terms_intersection {
Relation::Satisfied
} else if full_intersection == Self::empty() {
Relation::Contradicted
} else {
Relation::Inconclusive
}
}
}
impl<VS: VersionSet> AsRef<Self> for Term<VS> {
fn as_ref(&self) -> &Self {
self
}
}
// REPORT ######################################################################
impl<VS: VersionSet + Display> Display for Term<VS> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::Positive(set) => write!(f, "{}", set),
Self::Negative(set) => write!(f, "Not ( {} )", set),
}
}
}
// TESTS #######################################################################
#[cfg(test)]
pub mod tests {
use super::*;
use crate::range::Range;
use proptest::prelude::*;
pub fn strategy() -> impl Strategy<Value = Term<Range<u32>>> {
prop_oneof![
crate::range::tests::strategy().prop_map(Term::Positive),
crate::range::tests::strategy().prop_map(Term::Negative),
]
}
proptest! {
// Testing relation --------------------------------
#[test]
fn relation_with(term1 in strategy(), term2 in strategy()) {
match term1.relation_with(&term2) {
Relation::Satisfied => assert!(term1.satisfied_by(&term2)),
Relation::Contradicted => assert!(term1.contradicted_by(&term2)),
Relation::Inconclusive => {
assert!(!term1.satisfied_by(&term2));
assert!(!term1.contradicted_by(&term2));
}
}
}
}
}

View File

@ -1,17 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! Publicly exported type aliases.
/// Map implementation used by the library.
pub type Map<K, V> = rustc_hash::FxHashMap<K, V>;
/// Concrete dependencies picked by the library during [resolve](crate::solver::resolve)
/// from [DependencyConstraints].
pub type SelectedDependencies<P, V> = Map<P, V>;
/// Holds information about all possible versions a given package can accept.
/// There is a difference in semantics between an empty map
/// inside [DependencyConstraints] and [Dependencies::Unknown](crate::solver::Dependencies::Unknown):
/// the former means the package has no dependency and it is a known fact,
/// while the latter means they could not be fetched by the [DependencyProvider](crate::solver::DependencyProvider).
pub type DependencyConstraints<P, VS> = Map<P, VS>;

View File

@ -1,289 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! Traits and implementations to create and compare versions.
use std::fmt::{self, Debug, Display};
use std::str::FromStr;
use thiserror::Error;
/// Versions have a minimal version (a "0" version)
/// and are ordered such that every version has a next one.
pub trait Version: Clone + Ord + Debug + Display {
/// Returns the lowest version.
fn lowest() -> Self;
/// Returns the next version, the smallest strictly higher version.
fn bump(&self) -> Self;
}
/// Type for semantic versions: major.minor.patch.
#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
pub struct SemanticVersion {
major: u32,
minor: u32,
patch: u32,
}
#[cfg(feature = "serde")]
impl serde::Serialize for SemanticVersion {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(&format!("{}", self))
}
}
#[cfg(feature = "serde")]
impl<'de> serde::Deserialize<'de> for SemanticVersion {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
FromStr::from_str(&s).map_err(serde::de::Error::custom)
}
}
// Constructors
impl SemanticVersion {
/// Create a version with "major", "minor" and "patch" values.
/// `version = major.minor.patch`
pub fn new(major: u32, minor: u32, patch: u32) -> Self {
Self {
major,
minor,
patch,
}
}
/// Version 0.0.0.
pub fn zero() -> Self {
Self::new(0, 0, 0)
}
/// Version 1.0.0.
pub fn one() -> Self {
Self::new(1, 0, 0)
}
/// Version 2.0.0.
pub fn two() -> Self {
Self::new(2, 0, 0)
}
}
// Convert a tuple (major, minor, patch) into a version.
impl From<(u32, u32, u32)> for SemanticVersion {
fn from(tuple: (u32, u32, u32)) -> Self {
let (major, minor, patch) = tuple;
Self::new(major, minor, patch)
}
}
// Convert a &(major, minor, patch) into a version.
impl From<&(u32, u32, u32)> for SemanticVersion {
fn from(tuple: &(u32, u32, u32)) -> Self {
let (major, minor, patch) = *tuple;
Self::new(major, minor, patch)
}
}
// Convert an &version into a version.
impl From<&SemanticVersion> for SemanticVersion {
fn from(v: &SemanticVersion) -> Self {
*v
}
}
// Convert a version into a tuple (major, minor, patch).
impl From<SemanticVersion> for (u32, u32, u32) {
fn from(v: SemanticVersion) -> Self {
(v.major, v.minor, v.patch)
}
}
// Bump versions.
impl SemanticVersion {
/// Bump the patch number of a version.
pub fn bump_patch(self) -> Self {
Self::new(self.major, self.minor, self.patch + 1)
}
/// Bump the minor number of a version.
pub fn bump_minor(self) -> Self {
Self::new(self.major, self.minor + 1, 0)
}
/// Bump the major number of a version.
pub fn bump_major(self) -> Self {
Self::new(self.major + 1, 0, 0)
}
}
/// Error creating [SemanticVersion] from [String].
#[derive(Error, Debug, PartialEq, Eq)]
pub enum VersionParseError {
/// [SemanticVersion] must contain major, minor, patch versions.
#[error("version {full_version} must contain 3 numbers separated by dot")]
NotThreeParts {
/// [SemanticVersion] that was being parsed.
full_version: String,
},
/// Wrapper around [ParseIntError](core::num::ParseIntError).
#[error("cannot parse '{version_part}' in '{full_version}' as u32: {parse_error}")]
ParseIntError {
/// [SemanticVersion] that was being parsed.
full_version: String,
/// A version part where parsing failed.
version_part: String,
/// A specific error resulted from parsing a part of the version as [u32].
parse_error: String,
},
}
impl FromStr for SemanticVersion {
type Err = VersionParseError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let parse_u32 = |part: &str| {
part.parse::<u32>().map_err(|e| Self::Err::ParseIntError {
full_version: s.to_string(),
version_part: part.to_string(),
parse_error: e.to_string(),
})
};
let mut parts = s.split('.');
match (parts.next(), parts.next(), parts.next(), parts.next()) {
(Some(major), Some(minor), Some(patch), None) => {
let major = parse_u32(major)?;
let minor = parse_u32(minor)?;
let patch = parse_u32(patch)?;
Ok(Self {
major,
minor,
patch,
})
}
_ => Err(Self::Err::NotThreeParts {
full_version: s.to_string(),
}),
}
}
}
#[test]
fn from_str_for_semantic_version() {
let parse = |str: &str| str.parse::<SemanticVersion>();
assert!(parse(
&SemanticVersion {
major: 0,
minor: 1,
patch: 0
}
.to_string()
)
.is_ok());
assert!(parse("1.2.3").is_ok());
assert_eq!(
parse("1.abc.3"),
Err(VersionParseError::ParseIntError {
full_version: "1.abc.3".to_owned(),
version_part: "abc".to_owned(),
parse_error: "invalid digit found in string".to_owned(),
})
);
assert_eq!(
parse("1.2.-3"),
Err(VersionParseError::ParseIntError {
full_version: "1.2.-3".to_owned(),
version_part: "-3".to_owned(),
parse_error: "invalid digit found in string".to_owned(),
})
);
assert_eq!(
parse("1.2.9876543210"),
Err(VersionParseError::ParseIntError {
full_version: "1.2.9876543210".to_owned(),
version_part: "9876543210".to_owned(),
parse_error: "number too large to fit in target type".to_owned(),
})
);
assert_eq!(
parse("1.2"),
Err(VersionParseError::NotThreeParts {
full_version: "1.2".to_owned(),
})
);
assert_eq!(
parse("1.2.3."),
Err(VersionParseError::NotThreeParts {
full_version: "1.2.3.".to_owned(),
})
);
}
impl Display for SemanticVersion {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}.{}.{}", self.major, self.minor, self.patch)
}
}
// Implement Version for SemanticVersion.
impl Version for SemanticVersion {
fn lowest() -> Self {
Self::zero()
}
fn bump(&self) -> Self {
self.bump_patch()
}
}
/// Simplest versions possible, just a positive number.
#[derive(Debug, Copy, Clone, Ord, PartialOrd, Eq, PartialEq, Hash)]
#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize,))]
#[cfg_attr(feature = "serde", serde(transparent))]
pub struct NumberVersion(pub u32);
// Convert an usize into a version.
impl From<u32> for NumberVersion {
fn from(v: u32) -> Self {
Self(v)
}
}
// Convert an &usize into a version.
impl From<&u32> for NumberVersion {
fn from(v: &u32) -> Self {
Self(*v)
}
}
// Convert an &version into a version.
impl From<&NumberVersion> for NumberVersion {
fn from(v: &NumberVersion) -> Self {
*v
}
}
// Convert a version into an usize.
impl From<NumberVersion> for u32 {
fn from(version: NumberVersion) -> Self {
version.0
}
}
impl Display for NumberVersion {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
impl Version for NumberVersion {
fn lowest() -> Self {
Self(0)
}
fn bump(&self) -> Self {
Self(self.0 + 1)
}
}

View File

@ -1,60 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
//! As its name suggests, the [VersionSet] trait describes sets of versions.
//!
//! One needs to define
//! - the associate type for versions,
//! - two constructors for the empty set and a singleton set,
//! - the complement and intersection set operations,
//! - and a function to evaluate membership of versions.
//!
//! Two functions are automatically derived, thanks to the mathematical properties of sets.
//! You can overwrite those implementations, but we highly recommend that you don't,
//! except if you are confident in a correct implementation that brings much performance gains.
//!
//! It is also extremely important that the `Eq` trait is correctly implemented.
//! In particular, you can only use `#[derive(Eq, PartialEq)]` if `Eq` is strictly equivalent to the
//! structural equality, i.e. if version sets have canonical representations.
//! Such problems may arise if your implementations of `complement()` and `intersection()` do not
//! return canonical representations so be careful there.
use std::fmt::{Debug, Display};
/// Trait describing sets of versions.
pub trait VersionSet: Debug + Display + Clone + Eq {
/// Version type associated with the sets manipulated.
type V: Debug + Display + Clone + Ord;
// Constructors
/// Constructor for an empty set containing no version.
fn empty() -> Self;
/// Constructor for a set containing exactly one version.
fn singleton(v: Self::V) -> Self;
// Operations
/// Compute the complement of this set.
fn complement(&self) -> Self;
/// Compute the intersection with another set.
fn intersection(&self, other: &Self) -> Self;
// Membership
/// Evaluate membership of a version in this set.
fn contains(&self, v: &Self::V) -> bool;
// Automatically implemented functions ###########################
/// Constructor for the set containing all versions.
/// Automatically implemented as `Self::empty().complement()`.
fn full() -> Self {
Self::empty().complement()
}
/// Compute the union with another set.
/// Thanks to set properties, this is automatically implemented as:
/// `self.complement().intersection(&other.complement()).complement()`
fn union(&self, other: &Self) -> Self {
self.complement()
.intersection(&other.complement())
.complement()
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,211 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
use pubgrub::range::Range;
use pubgrub::solver::{resolve, OfflineDependencyProvider};
use pubgrub::type_aliases::Map;
use pubgrub::version::{NumberVersion, SemanticVersion};
type NumVS = Range<NumberVersion>;
type SemVS = Range<SemanticVersion>;
use log::LevelFilter;
use std::io::Write;
fn init_log() {
let _ = env_logger::builder()
.filter_level(LevelFilter::Trace)
.format(|buf, record| writeln!(buf, "{}", record.args()))
.is_test(true)
.try_init();
}
#[test]
/// https://github.com/dart-lang/pub/blob/master/doc/solver.md#no-conflicts
fn no_conflict() {
init_log();
let mut dependency_provider = OfflineDependencyProvider::<&str, SemVS>::new();
#[rustfmt::skip]
dependency_provider.add_dependencies(
"root", (1, 0, 0),
[("foo", Range::between((1, 0, 0), (2, 0, 0)))],
);
#[rustfmt::skip]
dependency_provider.add_dependencies(
"foo", (1, 0, 0),
[("bar", Range::between((1, 0, 0), (2, 0, 0)))],
);
dependency_provider.add_dependencies("bar", (1, 0, 0), []);
dependency_provider.add_dependencies("bar", (2, 0, 0), []);
// Run the algorithm.
let computed_solution = resolve(&dependency_provider, "root", (1, 0, 0)).unwrap();
// Solution.
let mut expected_solution = Map::default();
expected_solution.insert("root", (1, 0, 0).into());
expected_solution.insert("foo", (1, 0, 0).into());
expected_solution.insert("bar", (1, 0, 0).into());
// Comparing the true solution with the one computed by the algorithm.
assert_eq!(expected_solution, computed_solution);
}
#[test]
/// https://github.com/dart-lang/pub/blob/master/doc/solver.md#avoiding-conflict-during-decision-making
fn avoiding_conflict_during_decision_making() {
init_log();
let mut dependency_provider = OfflineDependencyProvider::<&str, SemVS>::new();
#[rustfmt::skip]
dependency_provider.add_dependencies(
"root", (1, 0, 0),
[
("foo", Range::between((1, 0, 0), (2, 0, 0))),
("bar", Range::between((1, 0, 0), (2, 0, 0))),
],
);
#[rustfmt::skip]
dependency_provider.add_dependencies(
"foo", (1, 1, 0),
[("bar", Range::between((2, 0, 0), (3, 0, 0)))],
);
dependency_provider.add_dependencies("foo", (1, 0, 0), []);
dependency_provider.add_dependencies("bar", (1, 0, 0), []);
dependency_provider.add_dependencies("bar", (1, 1, 0), []);
dependency_provider.add_dependencies("bar", (2, 0, 0), []);
// Run the algorithm.
let computed_solution = resolve(&dependency_provider, "root", (1, 0, 0)).unwrap();
// Solution.
let mut expected_solution = Map::default();
expected_solution.insert("root", (1, 0, 0).into());
expected_solution.insert("foo", (1, 0, 0).into());
expected_solution.insert("bar", (1, 1, 0).into());
// Comparing the true solution with the one computed by the algorithm.
assert_eq!(expected_solution, computed_solution);
}
#[test]
/// https://github.com/dart-lang/pub/blob/master/doc/solver.md#performing-conflict-resolution
fn conflict_resolution() {
init_log();
let mut dependency_provider = OfflineDependencyProvider::<&str, SemVS>::new();
#[rustfmt::skip]
dependency_provider.add_dependencies(
"root", (1, 0, 0),
[("foo", Range::higher_than((1, 0, 0)))],
);
#[rustfmt::skip]
dependency_provider.add_dependencies(
"foo", (2, 0, 0),
[("bar", Range::between((1, 0, 0), (2, 0, 0)))],
);
dependency_provider.add_dependencies("foo", (1, 0, 0), []);
#[rustfmt::skip]
dependency_provider.add_dependencies(
"bar", (1, 0, 0),
[("foo", Range::between((1, 0, 0), (2, 0, 0)))],
);
// Run the algorithm.
let computed_solution = resolve(&dependency_provider, "root", (1, 0, 0)).unwrap();
// Solution.
let mut expected_solution = Map::default();
expected_solution.insert("root", (1, 0, 0).into());
expected_solution.insert("foo", (1, 0, 0).into());
// Comparing the true solution with the one computed by the algorithm.
assert_eq!(expected_solution, computed_solution);
}
#[test]
/// https://github.com/dart-lang/pub/blob/master/doc/solver.md#conflict-resolution-with-a-partial-satisfier
fn conflict_with_partial_satisfier() {
init_log();
let mut dependency_provider = OfflineDependencyProvider::<&str, SemVS>::new();
#[rustfmt::skip]
// root 1.0.0 depends on foo ^1.0.0 and target ^2.0.0
dependency_provider.add_dependencies(
"root", (1, 0, 0),
[
("foo", Range::between((1, 0, 0), (2, 0, 0))),
("target", Range::between((2, 0, 0), (3, 0, 0))),
],
);
#[rustfmt::skip]
// foo 1.1.0 depends on left ^1.0.0 and right ^1.0.0
dependency_provider.add_dependencies(
"foo", (1, 1, 0),
[
("left", Range::between((1, 0, 0), (2, 0, 0))),
("right", Range::between((1, 0, 0), (2, 0, 0))),
],
);
dependency_provider.add_dependencies("foo", (1, 0, 0), []);
#[rustfmt::skip]
// left 1.0.0 depends on shared >=1.0.0
dependency_provider.add_dependencies(
"left", (1, 0, 0),
[("shared", Range::higher_than((1, 0, 0)))],
);
#[rustfmt::skip]
// right 1.0.0 depends on shared <2.0.0
dependency_provider.add_dependencies(
"right", (1, 0, 0),
[("shared", Range::strictly_lower_than((2, 0, 0)))],
);
dependency_provider.add_dependencies("shared", (2, 0, 0), []);
#[rustfmt::skip]
// shared 1.0.0 depends on target ^1.0.0
dependency_provider.add_dependencies(
"shared", (1, 0, 0),
[("target", Range::between((1, 0, 0), (2, 0, 0)))],
);
dependency_provider.add_dependencies("target", (2, 0, 0), []);
dependency_provider.add_dependencies("target", (1, 0, 0), []);
// Run the algorithm.
let computed_solution = resolve(&dependency_provider, "root", (1, 0, 0)).unwrap();
// Solution.
let mut expected_solution = Map::default();
expected_solution.insert("root", (1, 0, 0).into());
expected_solution.insert("foo", (1, 0, 0).into());
expected_solution.insert("target", (2, 0, 0).into());
// Comparing the true solution with the one computed by the algorithm.
assert_eq!(expected_solution, computed_solution);
}
#[test]
/// a0 dep on b and c
/// b0 dep on d0
/// b1 dep on d1 (not existing)
/// c0 has no dep
/// c1 dep on d2 (not existing)
/// d0 has no dep
///
/// Solution: a0, b0, c0, d0
fn double_choices() {
init_log();
let mut dependency_provider = OfflineDependencyProvider::<&str, NumVS>::new();
dependency_provider.add_dependencies("a", 0, [("b", Range::full()), ("c", Range::full())]);
dependency_provider.add_dependencies("b", 0, [("d", Range::singleton(0))]);
dependency_provider.add_dependencies("b", 1, [("d", Range::singleton(1))]);
dependency_provider.add_dependencies("c", 0, []);
dependency_provider.add_dependencies("c", 1, [("d", Range::singleton(2))]);
dependency_provider.add_dependencies("d", 0, []);
// Solution.
let mut expected_solution = Map::default();
expected_solution.insert("a", 0.into());
expected_solution.insert("b", 0.into());
expected_solution.insert("c", 0.into());
expected_solution.insert("d", 0.into());
// Run the algorithm.
let computed_solution = resolve(&dependency_provider, "a", 0).unwrap();
assert_eq!(expected_solution, computed_solution);
}

View File

@ -1,619 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
use std::{collections::BTreeSet as Set, error::Error};
use pubgrub::error::PubGrubError;
use pubgrub::package::Package;
use pubgrub::range::Range;
use pubgrub::report::{DefaultStringReporter, DerivationTree, External, Reporter};
use pubgrub::solver::{resolve, Dependencies, DependencyProvider, OfflineDependencyProvider};
use pubgrub::type_aliases::SelectedDependencies;
use pubgrub::version::{NumberVersion, SemanticVersion};
use pubgrub::version_set::VersionSet;
use proptest::collection::{btree_map, btree_set, vec};
use proptest::prelude::*;
use proptest::sample::Index;
use proptest::string::string_regex;
use crate::sat_dependency_provider::SatResolve;
mod sat_dependency_provider;
/// The same as [OfflineDependencyProvider] but takes versions from the opposite end:
/// if [OfflineDependencyProvider] returns versions from newest to oldest, this returns them from oldest to newest.
#[derive(Clone)]
struct OldestVersionsDependencyProvider<P: Package, VS: VersionSet>(
OfflineDependencyProvider<P, VS>,
);
impl<P: Package, VS: VersionSet> DependencyProvider<P, VS>
for OldestVersionsDependencyProvider<P, VS>
{
fn get_dependencies(
&self,
p: &P,
v: &VS::V,
) -> Result<Dependencies<P, VS>, Box<dyn Error + Send + Sync>> {
self.0.get_dependencies(p, v)
}
fn choose_version(
&self,
package: &P,
range: &VS,
) -> Result<Option<VS::V>, Box<dyn Error + Send + Sync>> {
Ok(self
.0
.versions(package)
.into_iter()
.flatten()
.find(|&v| range.contains(v))
.cloned())
}
type Priority = <OfflineDependencyProvider<P, VS> as DependencyProvider<P, VS>>::Priority;
fn prioritize(&self, package: &P, range: &VS) -> Self::Priority {
self.0.prioritize(package, range)
}
}
/// The same as DP but it has a timeout.
#[derive(Clone)]
struct TimeoutDependencyProvider<DP> {
dp: DP,
start_time: std::time::Instant,
call_count: std::cell::Cell<u64>,
max_calls: u64,
}
impl<DP> TimeoutDependencyProvider<DP> {
fn new(dp: DP, max_calls: u64) -> Self {
Self {
dp,
start_time: std::time::Instant::now(),
call_count: std::cell::Cell::new(0),
max_calls,
}
}
}
impl<P: Package, VS: VersionSet, DP: DependencyProvider<P, VS>> DependencyProvider<P, VS>
for TimeoutDependencyProvider<DP>
{
fn get_dependencies(
&self,
p: &P,
v: &VS::V,
) -> Result<Dependencies<P, VS>, Box<dyn Error + Send + Sync>> {
self.dp.get_dependencies(p, v)
}
fn should_cancel(&self) -> Result<(), Box<dyn Error + Send + Sync>> {
assert!(self.start_time.elapsed().as_secs() < 60);
let calls = self.call_count.get();
assert!(calls < self.max_calls);
self.call_count.set(calls + 1);
Ok(())
}
fn choose_version(
&self,
package: &P,
range: &VS,
) -> Result<Option<VS::V>, Box<dyn Error + Send + Sync>> {
self.dp.choose_version(package, range)
}
type Priority = DP::Priority;
fn prioritize(&self, package: &P, range: &VS) -> Self::Priority {
self.dp.prioritize(package, range)
}
}
fn timeout_resolve<P: Package, VS: VersionSet, DP: DependencyProvider<P, VS>>(
dependency_provider: DP,
name: P,
version: impl Into<VS::V>,
) -> Result<SelectedDependencies<P, VS::V>, PubGrubError<P, VS>> {
resolve(
&TimeoutDependencyProvider::new(dependency_provider, 50_000),
name,
version,
)
}
type NumVS = Range<NumberVersion>;
type SemVS = Range<SemanticVersion>;
#[test]
#[should_panic]
fn should_cancel_can_panic() {
let mut dependency_provider = OfflineDependencyProvider::<_, NumVS>::new();
dependency_provider.add_dependencies(0, 0, [(666, Range::full())]);
// Run the algorithm.
let _ = resolve(
&TimeoutDependencyProvider::new(dependency_provider, 1),
0,
0,
);
}
fn string_names() -> impl Strategy<Value = String> {
string_regex("[A-Za-z][A-Za-z0-9_-]{0,5}")
.unwrap()
.prop_filter("reserved names", |n| {
// root is the name of the thing being compiled
// so it would be confusing to have it in the index
// bad is a name reserved for a dep that won't work
n != "root" && n != "bad"
})
}
/// This generates a random registry index.
/// Unlike vec((Name, Ver, vec((Name, VerRq), ..), ..)
/// This strategy has a high probability of having valid dependencies
pub fn registry_strategy<N: Package + Ord>(
name: impl Strategy<Value = N>,
) -> impl Strategy<Value = (OfflineDependencyProvider<N, NumVS>, Vec<(N, NumberVersion)>)> {
let max_crates = 40;
let max_versions = 15;
let shrinkage = 40;
let complicated_len = 10usize;
let a_version = ..(max_versions as u32);
let list_of_versions = btree_set(a_version, 1..=max_versions)
.prop_map(move |ver| ver.into_iter().collect::<Vec<_>>());
let list_of_crates_with_versions = btree_map(name, list_of_versions, 1..=max_crates);
// each version of each crate can depend on each crate smaller then it.
// In theory shrinkage should be 2, but in practice we get better trees with a larger value.
let max_deps = max_versions * (max_crates * (max_crates - 1)) / shrinkage;
let raw_version_range = (any::<Index>(), any::<Index>());
let raw_dependency = (any::<Index>(), any::<Index>(), raw_version_range);
fn order_index(a: Index, b: Index, size: usize) -> (usize, usize) {
use std::cmp::{max, min};
let (a, b) = (a.index(size), b.index(size));
(min(a, b), max(a, b))
}
let list_of_raw_dependency = vec(raw_dependency, ..=max_deps);
// By default a package depends only on other packages that have a smaller name,
// this helps make sure that all things in the resulting index are DAGs.
// If this is true then the DAG is maintained with grater instead.
let reverse_alphabetical = any::<bool>().no_shrink();
(
list_of_crates_with_versions,
list_of_raw_dependency,
reverse_alphabetical,
1..(complicated_len + 1),
)
.prop_map(
move |(crate_vers_by_name, raw_dependencies, reverse_alphabetical, complicated_len)| {
let mut list_of_pkgid: Vec<((N, NumberVersion), Vec<(N, NumVS)>)> =
crate_vers_by_name
.iter()
.flat_map(|(name, vers)| {
vers.iter()
.map(move |x| ((name.clone(), NumberVersion::from(x)), vec![]))
})
.collect();
let len_all_pkgid = list_of_pkgid.len();
for (a, b, (c, d)) in raw_dependencies {
let (a, b) = order_index(a, b, len_all_pkgid);
let (a, b) = if reverse_alphabetical { (b, a) } else { (a, b) };
let ((dep_name, _), _) = list_of_pkgid[a].to_owned();
if (list_of_pkgid[b].0).0 == dep_name {
continue;
}
let s = &crate_vers_by_name[&dep_name];
let s_last_index = s.len() - 1;
let (c, d) = order_index(c, d, s.len() + 1);
list_of_pkgid[b].1.push((
dep_name,
if c > s_last_index {
Range::empty()
} else if c == 0 && d >= s_last_index {
Range::full()
} else if c == 0 {
Range::strictly_lower_than(s[d] + 1)
} else if d >= s_last_index {
Range::higher_than(s[c])
} else if c == d {
Range::singleton(s[c])
} else {
Range::between(s[c], s[d] + 1)
},
));
}
let mut dependency_provider = OfflineDependencyProvider::<N, NumVS>::new();
let complicated_len = std::cmp::min(complicated_len, list_of_pkgid.len());
let complicated: Vec<_> = if reverse_alphabetical {
&list_of_pkgid[..complicated_len]
} else {
&list_of_pkgid[(list_of_pkgid.len() - complicated_len)..]
}
.iter()
.map(|(x, _)| (x.0.clone(), x.1))
.collect();
for ((name, ver), deps) in list_of_pkgid {
dependency_provider.add_dependencies(name, ver, deps);
}
(dependency_provider, complicated)
},
)
}
/// Ensures that generator makes registries with large dependency trees.
#[test]
fn meta_test_deep_trees_from_strategy() {
use proptest::strategy::ValueTree;
use proptest::test_runner::TestRunner;
let mut dis = [0; 21];
let strategy = registry_strategy(0u16..665);
let mut test_runner = TestRunner::deterministic();
for _ in 0..128 {
let (dependency_provider, cases) = strategy
.new_tree(&mut TestRunner::new_with_rng(
Default::default(),
test_runner.new_rng(),
))
.unwrap()
.current();
for (name, ver) in cases {
let res = resolve(&dependency_provider, name, ver);
dis[res
.as_ref()
.map(|x| std::cmp::min(x.len(), dis.len()) - 1)
.unwrap_or(0)] += 1;
if dis.iter().all(|&x| x > 0) {
return;
}
}
}
panic!(
"In {} tries we did not see a wide enough distribution of dependency trees! dis: {:?}",
dis.iter().sum::<i32>(),
dis
);
}
/// Removes versions from the dependency provider where the retain function returns false.
/// Solutions are constructed as a set of versions.
/// If there are fewer versions available, there are fewer valid solutions available.
/// If there was no solution to a resolution in the original dependency provider,
/// then there must still be no solution with some options removed.
/// If there was a solution to a resolution in the original dependency provider,
/// there may not be a solution after versions are removes iif removed versions were critical for all valid solutions.
fn retain_versions<N: Package + Ord, VS: VersionSet>(
dependency_provider: &OfflineDependencyProvider<N, VS>,
mut retain: impl FnMut(&N, &VS::V) -> bool,
) -> OfflineDependencyProvider<N, VS> {
let mut smaller_dependency_provider = OfflineDependencyProvider::new();
for n in dependency_provider.packages() {
for v in dependency_provider.versions(n).unwrap() {
if !retain(n, v) {
continue;
}
let deps = match dependency_provider.get_dependencies(&n, &v).unwrap() {
Dependencies::Unknown => panic!(),
Dependencies::Known(deps) => deps,
};
smaller_dependency_provider.add_dependencies(n.clone(), v.clone(), deps)
}
}
smaller_dependency_provider
}
/// Removes dependencies from the dependency provider where the retain function returns false.
/// Solutions are constraned by having to fulfill all the dependencies.
/// If there are fewer dependencies required, there are more valid solutions.
/// If there was a solution to a resolution in the original dependency provider,
/// then there must still be a solution after dependencies are removed.
/// If there was no solution to a resolution in the original dependency provider,
/// there may now be a solution after dependencies are removed.
fn retain_dependencies<N: Package + Ord, VS: VersionSet>(
dependency_provider: &OfflineDependencyProvider<N, VS>,
mut retain: impl FnMut(&N, &VS::V, &N) -> bool,
) -> OfflineDependencyProvider<N, VS> {
let mut smaller_dependency_provider = OfflineDependencyProvider::new();
for n in dependency_provider.packages() {
for v in dependency_provider.versions(n).unwrap() {
let deps = match dependency_provider.get_dependencies(&n, &v).unwrap() {
Dependencies::Unknown => panic!(),
Dependencies::Known(deps) => deps,
};
smaller_dependency_provider.add_dependencies(
n.clone(),
v.clone(),
deps.iter().filter_map(|(dep, range)| {
if !retain(n, v, dep) {
None
} else {
Some((dep.clone(), range.clone()))
}
}),
);
}
}
smaller_dependency_provider
}
fn errors_the_same_with_only_report_dependencies<N: Package + Ord>(
dependency_provider: OfflineDependencyProvider<N, NumVS>,
name: N,
ver: NumberVersion,
) {
let Err(PubGrubError::NoSolution(tree)) =
timeout_resolve(dependency_provider.clone(), name.clone(), ver)
else {
return;
};
fn recursive<N: Package + Ord, VS: VersionSet>(
to_retain: &mut Vec<(N, VS, N)>,
tree: &DerivationTree<N, VS>,
) {
match tree {
DerivationTree::External(External::FromDependencyOf(n1, vs1, n2, _)) => {
to_retain.push((n1.clone(), vs1.clone(), n2.clone()));
}
DerivationTree::Derived(d) => {
recursive(to_retain, &*d.cause1);
recursive(to_retain, &*d.cause2);
}
_ => {}
}
}
let mut to_retain = Vec::new();
recursive(&mut to_retain, &tree);
let removed_provider = retain_dependencies(&dependency_provider, |p, v, d| {
to_retain
.iter()
.any(|(n1, vs1, n2)| n1 == p && vs1.contains(v) && n2 == d)
});
assert!(
timeout_resolve(removed_provider.clone(), name, ver).is_err(),
"The full index errored filtering to only dependencies in the derivation tree succeeded"
);
}
proptest! {
#![proptest_config(ProptestConfig {
max_shrink_iters:
if std::env::var("CI").is_ok() {
// This attempts to make sure that CI will fail fast,
0
} else {
// but that local builds will give a small clear test case.
2048
},
result_cache: prop::test_runner::basic_result_cache,
.. ProptestConfig::default()
})]
#[test]
/// This test is mostly for profiling.
fn prop_passes_string(
(dependency_provider, cases) in registry_strategy(string_names())
) {
for (name, ver) in cases {
_ = timeout_resolve(dependency_provider.clone(), name, ver);
}
}
#[test]
/// This test is mostly for profiling.
fn prop_passes_int(
(dependency_provider, cases) in registry_strategy(0u16..665)
) {
for (name, ver) in cases {
_ = timeout_resolve(dependency_provider.clone(), name, ver);
}
}
#[test]
fn prop_sat_errors_the_same(
(dependency_provider, cases) in registry_strategy(0u16..665)
) {
let mut sat = SatResolve::new(&dependency_provider);
for (name, ver) in cases {
let res = timeout_resolve(dependency_provider.clone(), name, ver);
sat.check_resolve(&res, &name, &ver);
}
}
#[test]
fn prop_errors_the_same_with_only_report_dependencies(
(dependency_provider, cases) in registry_strategy(0u16..665)
) {
for (name, ver) in cases {
errors_the_same_with_only_report_dependencies(dependency_provider.clone(), name, ver);
}
}
#[test]
/// This tests whether the algorithm is still deterministic.
fn prop_same_on_repeated_runs(
(dependency_provider, cases) in registry_strategy(0u16..665)
) {
for (name, ver) in cases {
let one = timeout_resolve(dependency_provider.clone(), name, ver);
for _ in 0..3 {
match (&one, &timeout_resolve(dependency_provider.clone(), name, ver)) {
(Ok(l), Ok(r)) => assert_eq!(l, r),
(Err(PubGrubError::NoSolution(derivation_l)), Err(PubGrubError::NoSolution(derivation_r))) => {
prop_assert_eq!(
DefaultStringReporter::report(derivation_l),
DefaultStringReporter::report(derivation_r)
)},
_ => panic!("not the same result")
}
}
}
}
#[test]
/// [ReverseDependencyProvider] changes what order the candidates
/// are tried but not the existence of a solution.
fn prop_reversed_version_errors_the_same(
(dependency_provider, cases) in registry_strategy(0u16..665)
) {
let reverse_provider = OldestVersionsDependencyProvider(dependency_provider.clone());
for (name, ver) in cases {
let l = timeout_resolve(dependency_provider.clone(), name, ver);
let r = timeout_resolve(reverse_provider.clone(), name, ver);
match (&l, &r) {
(Ok(_), Ok(_)) => (),
(Err(_), Err(_)) => (),
_ => panic!("not the same result")
}
}
}
#[test]
fn prop_removing_a_dep_cant_break(
(dependency_provider, cases) in registry_strategy(0u16..665),
indexes_to_remove in prop::collection::vec((any::<prop::sample::Index>(), any::<prop::sample::Index>(), any::<prop::sample::Index>()), 1..10)
) {
let packages: Vec<_> = dependency_provider.packages().collect();
let mut to_remove = Set::new();
for (package_idx, version_idx, dep_idx) in indexes_to_remove {
let package = package_idx.get(&packages);
let versions: Vec<_> = dependency_provider
.versions(package)
.unwrap().collect();
let version = version_idx.get(&versions);
let dependencies: Vec<(u16, NumVS)> = match dependency_provider
.get_dependencies(package, version)
.unwrap()
{
Dependencies::Unknown => panic!(),
Dependencies::Known(d) => d.into_iter().collect(),
};
if !dependencies.is_empty() {
to_remove.insert((package, **version, dep_idx.get(&dependencies).0));
}
}
let removed_provider = retain_dependencies(
&dependency_provider,
|p, v, d| {!to_remove.contains(&(&p, *v, *d))}
);
for (name, ver) in cases {
if timeout_resolve(dependency_provider.clone(), name, ver).is_ok() {
prop_assert!(
timeout_resolve(removed_provider.clone(), name, ver).is_ok(),
"full index worked for `{} = \"={}\"` but removing some deps broke it!",
name,
ver,
)
}
}
}
#[test]
fn prop_limited_independence_of_irrelevant_alternatives(
(dependency_provider, cases) in registry_strategy(0u16..665),
indexes_to_remove in prop::collection::vec(any::<prop::sample::Index>(), 1..10)
) {
let all_versions: Vec<(u16, NumberVersion)> = dependency_provider
.packages()
.flat_map(|&p| {
dependency_provider
.versions(&p)
.unwrap()
.map(move |&v| (p, v))
})
.collect();
let to_remove: Set<(_, _)> = indexes_to_remove.iter().map(|x| x.get(&all_versions)).cloned().collect();
for (name, ver) in cases {
match timeout_resolve(dependency_provider.clone(), name, ver) {
Ok(used) => {
// If resolution was successful, then unpublishing a version of a crate
// that was not selected should not change that.
let smaller_dependency_provider = retain_versions(&dependency_provider, |n, v| {
used.get(&n) == Some(&v) // it was used
|| to_remove.get(&(*n, *v)).is_none() // or it is not one to be removed
});
prop_assert!(
timeout_resolve(smaller_dependency_provider.clone(), name, ver).is_ok(),
"unpublishing {:?} stopped `{} = \"={}\"` from working",
to_remove,
name,
ver
)
}
Err(_) => {
// If resolution was unsuccessful, then it should stay unsuccessful
// even if any version of a crate is unpublished.
let smaller_dependency_provider = retain_versions(&dependency_provider, |n, v| {
to_remove.get(&(*n, *v)).is_some() // it is one to be removed
});
prop_assert!(
timeout_resolve(smaller_dependency_provider.clone(), name, ver).is_err(),
"full index did not work for `{} = \"={}\"` but unpublishing {:?} fixed it!",
name,
ver,
to_remove,
)
}
}
}
}
}
#[cfg(feature = "serde")]
#[test]
fn large_case() {
for case in std::fs::read_dir("test-examples").unwrap() {
let case = case.unwrap().path();
let name = case.file_name().unwrap().to_string_lossy();
eprint!("{} ", name);
let data = std::fs::read_to_string(&case).unwrap();
let start_time = std::time::Instant::now();
if name.ends_with("u16_NumberVersion.ron") {
let dependency_provider: OfflineDependencyProvider<u16, NumVS> =
ron::de::from_str(&data).unwrap();
let mut sat = SatResolve::new(&dependency_provider);
for p in dependency_provider.packages() {
for v in dependency_provider.versions(p).unwrap() {
let res = resolve(&dependency_provider, p.clone(), v);
sat.check_resolve(&res, p, v);
}
}
} else if name.ends_with("str_SemanticVersion.ron") {
let dependency_provider: OfflineDependencyProvider<&str, SemVS> =
ron::de::from_str(&data).unwrap();
let mut sat = SatResolve::new(&dependency_provider);
for p in dependency_provider.packages() {
for v in dependency_provider.versions(p).unwrap() {
let res = resolve(&dependency_provider, p.clone(), v);
sat.check_resolve(&res, p, v);
}
}
}
eprintln!(" in {}s", start_time.elapsed().as_secs())
}
}

View File

@ -1,151 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
use pubgrub::error::PubGrubError;
use pubgrub::package::Package;
use pubgrub::solver::{Dependencies, DependencyProvider, OfflineDependencyProvider};
use pubgrub::type_aliases::{Map, SelectedDependencies};
use pubgrub::version_set::VersionSet;
use varisat::ExtendFormula;
fn sat_at_most_one(solver: &mut impl varisat::ExtendFormula, vars: &[varisat::Var]) {
if vars.len() <= 1 {
return;
} else if vars.len() == 2 {
solver.add_clause(&[vars[0].negative(), vars[1].negative()]);
return;
} else if vars.len() == 3 {
solver.add_clause(&[vars[0].negative(), vars[1].negative()]);
solver.add_clause(&[vars[0].negative(), vars[2].negative()]);
solver.add_clause(&[vars[1].negative(), vars[2].negative()]);
return;
}
// use the "Binary Encoding" from
// https://www.it.uu.se/research/group/astra/ModRef10/papers/Alan%20M.%20Frisch%20and%20Paul%20A.%20Giannoros.%20SAT%20Encodings%20of%20the%20At-Most-k%20Constraint%20-%20ModRef%202010.pdf
let len_bits = vars.len().ilog2() as usize + 1;
let bits: Vec<varisat::Var> = solver.new_var_iter(len_bits).collect();
for (i, p) in vars.iter().enumerate() {
for (j, &bit) in bits.iter().enumerate() {
solver.add_clause(&[p.negative(), bit.lit(((1 << j) & i) > 0)]);
}
}
}
/// Resolution can be reduced to the SAT problem. So this is an alternative implementation
/// of the resolver that uses a SAT library for the hard work. This is intended to be easy to read,
/// as compared to the real resolver. This will find a valid resolution if one exists.
///
/// The SAT library does not optimize for the newer version,
/// so the selected packages may not match the real resolver.
pub struct SatResolve<P: Package, VS: VersionSet> {
solver: varisat::Solver<'static>,
all_versions_by_p: Map<P, Vec<(VS::V, varisat::Var)>>,
}
impl<P: Package, VS: VersionSet> SatResolve<P, VS> {
pub fn new(dp: &OfflineDependencyProvider<P, VS>) -> Self {
let mut cnf = varisat::CnfFormula::new();
let mut all_versions = vec![];
let mut all_versions_by_p: Map<P, Vec<(VS::V, varisat::Var)>> = Map::default();
for p in dp.packages() {
let mut versions_for_p = vec![];
for v in dp.versions(p).unwrap() {
let new_var = cnf.new_var();
all_versions.push((p.clone(), v.clone(), new_var));
versions_for_p.push(new_var);
all_versions_by_p
.entry(p.clone())
.or_default()
.push((v.clone(), new_var));
}
// no two versions of the same package
sat_at_most_one(&mut cnf, &versions_for_p);
}
// active packages need each of there `deps` to be satisfied
for (p, v, var) in &all_versions {
let deps = match dp.get_dependencies(p, v).unwrap() {
Dependencies::Unknown => panic!(),
Dependencies::Known(d) => d,
};
for (p1, range) in &deps {
let empty_vec = vec![];
let mut matches: Vec<varisat::Lit> = all_versions_by_p
.get(p1)
.unwrap_or(&empty_vec)
.iter()
.filter(|(v1, _)| range.contains(v1))
.map(|(_, var1)| var1.positive())
.collect();
// ^ the `dep` is satisfied or
matches.push(var.negative());
// ^ `p` is not active
cnf.add_clause(&matches);
}
}
let mut solver = varisat::Solver::new();
solver.add_formula(&cnf);
// We dont need to `solve` now. We know that "use nothing" will satisfy all the clauses so far.
// But things run faster if we let it spend some time figuring out how the constraints interact before we add assumptions.
solver
.solve()
.expect("docs say it can't error in default config");
Self {
solver,
all_versions_by_p,
}
}
pub fn resolve(&mut self, name: &P, ver: &VS::V) -> bool {
if let Some(vers) = self.all_versions_by_p.get(name) {
if let Some((_, var)) = vers.iter().find(|(v, _)| v == ver) {
self.solver.assume(&[var.positive()]);
self.solver
.solve()
.expect("docs say it can't error in default config")
} else {
false
}
} else {
false
}
}
pub fn is_valid_solution(&mut self, pids: &SelectedDependencies<P, VS::V>) -> bool {
let mut assumption = vec![];
for (p, vs) in &self.all_versions_by_p {
let pid_for_p = pids.get(p);
for (v, var) in vs {
assumption.push(var.lit(pid_for_p == Some(v)))
}
}
self.solver.assume(&assumption);
self.solver
.solve()
.expect("docs say it can't error in default config")
}
pub fn check_resolve(
&mut self,
res: &Result<SelectedDependencies<P, VS::V>, PubGrubError<P, VS>>,
p: &P,
v: &VS::V,
) {
match res {
Ok(s) => {
assert!(self.is_valid_solution(s));
}
Err(_) => {
assert!(!self.resolve(p, v));
}
}
}
}

View File

@ -1,56 +0,0 @@
// SPDX-License-Identifier: MPL-2.0
use pubgrub::error::PubGrubError;
use pubgrub::range::Range;
use pubgrub::solver::{resolve, OfflineDependencyProvider};
use pubgrub::version::NumberVersion;
type NumVS = Range<NumberVersion>;
#[test]
fn same_result_on_repeated_runs() {
let mut dependency_provider = OfflineDependencyProvider::<_, NumVS>::new();
dependency_provider.add_dependencies("c", 0, []);
dependency_provider.add_dependencies("c", 2, []);
dependency_provider.add_dependencies("b", 0, []);
dependency_provider.add_dependencies("b", 1, [("c", Range::between(0, 1))]);
dependency_provider.add_dependencies("a", 0, [("b", Range::full()), ("c", Range::full())]);
let name = "a";
let ver = NumberVersion(0);
let one = resolve(&dependency_provider, name, ver);
for _ in 0..10 {
match (&one, &resolve(&dependency_provider, name, ver)) {
(Ok(l), Ok(r)) => assert_eq!(l, r),
_ => panic!("not the same result"),
}
}
}
#[test]
fn should_always_find_a_satisfier() {
let mut dependency_provider = OfflineDependencyProvider::<_, NumVS>::new();
dependency_provider.add_dependencies("a", 0, [("b", Range::empty())]);
assert!(matches!(
resolve(&dependency_provider, "a", 0),
Err(PubGrubError::NoSolution { .. })
));
dependency_provider.add_dependencies("c", 0, [("a", Range::full())]);
assert!(matches!(
resolve(&dependency_provider, "c", 0),
Err(PubGrubError::NoSolution { .. })
));
}
#[test]
fn cannot_depend_on_self() {
let mut dependency_provider = OfflineDependencyProvider::<_, NumVS>::new();
dependency_provider.add_dependencies("a", 0, [("a", Range::full())]);
assert!(matches!(
resolve(&dependency_provider, "a", 0),
Err(PubGrubError::SelfDependency { .. })
));
}