SERVER-114126 Add guardrail to Petrick's method (#44523)

GitOrigin-RevId: 0f1da1772e1cdeeb0ffb65282cb72ac5735f3238
This commit is contained in:
Alya Carina Berciu 2025-12-08 10:26:14 +01:00 committed by MongoDB Bot
parent cdeea9e9fe
commit ebaa1cbd2d
13 changed files with 231 additions and 44 deletions

View File

@ -99,6 +99,8 @@ last-continuous:
ticket: SERVER-109322
- test_file: jstests/core/query/regex/regex_max_pattern_length.js
ticket: SERVER-86326
- test_file: jstests/core/query/boolean_simplifier_stress.js
ticket: SERVER-114126
suites: null
last-lts:
all:
@ -700,4 +702,6 @@ last-lts:
ticket: SERVER-109322
- test_file: jstests/core/query/regex/regex_max_pattern_length.js
ticket: SERVER-86326
- test_file: jstests/core/query/boolean_simplifier_stress.js
ticket: SERVER-114126
suites: null

View File

@ -1,5 +1,6 @@
/**
* Test to stress the boolean simplifier.
* @tags: [requires_getmore]
*/
import {assertArrayEq} from "jstests/aggregation/extras/utils.js";
@ -15,12 +16,14 @@ const n2_0 = {n2: 0};
const n3_0 = {n3: 0};
const n4_0 = {n4: 0};
const n5_0 = {n5: 0};
const n6_0 = {n6: 0};
const n0_1 = {n0: {$not: {$eq: 0}}};
const n1_1 = {n1: {$not: {$eq: 0}}};
const n2_1 = {n2: {$not: {$eq: 0}}};
const n3_1 = {n3: {$not: {$eq: 0}}};
const n4_1 = {n4: {$not: {$eq: 0}}};
const n5_1 = {n5: {$not: {$eq: 0}}};
const n6_1 = {n6: {$not: {$eq: 0}}};
const qmcQuery = {
"$or": [
{"$and": [n0_1, n1_1, n2_1, n3_1, n4_0, n5_1]},
@ -661,23 +664,145 @@ assertArrayEq({
expected: [],
});
// This query should cause us to bail before we enter Petrick's method.
const petrickQuery = {
"$or": [
{"$and": [n0_1, n1_0, n2_1, n3_0, n4_1, n5_0, n6_1]},
{"$and": [n0_0, n1_0, n2_1, n3_1, n4_1, n5_0, n6_1]},
{"$and": [n0_1, n1_1, n2_0, n3_0, n4_1, n5_1, n6_0]},
{"$and": [n0_1, n1_0, n2_1, n3_1, n4_1, n5_0, n6_0]},
{"$and": [n0_1, n1_1, n2_0, n3_1, n4_1, n5_0, n6_0]},
{"$and": [n0_1, n1_0, n2_1, n3_0, n4_1, n5_1, n6_0]},
{"$and": [n0_1, n1_0, n2_1, n3_0, n4_1, n5_1, n6_0]},
{"$and": [n0_0, n1_1, n2_1, n3_0, n4_0, n5_1, n6_1]},
{"$and": [n0_0, n1_1, n2_0, n3_1, n4_0, n5_1, n6_1]},
{"$and": [n0_0, n1_1, n2_1, n3_0, n4_0, n5_1, n6_1]},
{"$and": [n0_0, n1_1, n2_1, n3_1, n4_1, n5_0, n6_0]},
{"$and": [n0_1, n1_1, n2_1, n3_0, n4_0, n5_0, n6_1]},
{"$and": [n0_0, n1_1, n2_1, n3_0, n4_1, n5_1, n6_0]},
{"$and": [n0_1, n1_1, n2_1, n3_0, n4_1, n5_0, n6_0]},
{"$and": [n0_0, n1_1, n2_0, n3_1, n4_0, n5_1, n6_1]},
{"$and": [n0_1, n1_0, n2_1, n3_1, n4_1, n5_0, n6_0]},
{"$and": [n0_1, n1_1, n2_0, n3_1, n4_1, n5_0, n6_0]},
{"$and": [n0_1, n1_0, n2_1, n3_1, n4_0, n5_0, n6_1]},
{"$and": [n0_1, n1_1, n2_1, n3_1, n4_0, n5_0, n6_0]},
{"$and": [n0_1, n1_0, n2_0, n3_1, n4_1, n5_1, n6_0]},
{"$and": [n0_0, n1_0, n2_1, n3_1, n4_0, n5_0, n6_1]},
{"$and": [n0_0, n1_1, n2_1, n3_1, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_1, n2_0, n3_0, n4_1, n5_1, n6_0]},
{"$and": [n0_0, n1_0, n2_1, n3_0, n4_1, n5_0, n6_1]},
{"$and": [n0_1, n1_0, n2_0, n3_0, n4_0, n5_1, n6_1]},
{"$and": [n0_0, n1_0, n2_1, n3_1, n4_0, n5_0, n6_1]},
{"$and": [n0_1, n1_0, n2_0, n3_0, n4_0, n5_1, n6_1]},
{"$and": [n0_0, n1_0, n2_0, n3_1, n4_1, n5_0, n6_1]},
{"$and": [n0_0, n1_1, n2_0, n3_0, n4_1, n5_1, n6_0]},
{"$and": [n0_1, n1_0, n2_0, n3_0, n4_1, n5_0, n6_1]},
{"$and": [n0_1, n1_1, n2_1, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_1, n1_0, n2_0, n3_1, n4_1, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_1, n3_0, n4_0, n5_1, n6_1]},
{"$and": [n0_0, n1_1, n2_0, n3_0, n4_0, n5_1, n6_1]},
{"$and": [n0_1, n1_0, n2_0, n3_1, n4_0, n5_0, n6_1]},
{"$and": [n0_0, n1_1, n2_0, n3_0, n4_1, n5_0, n6_1]},
{"$and": [n0_0, n1_0, n2_0, n3_1, n4_1, n5_1, n6_0]},
{"$and": [n0_1, n1_0, n2_1, n3_0, n4_0, n5_0, n6_1]},
{"$and": [n0_1, n1_0, n2_1, n3_1, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_1, n2_1, n3_1, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_1, n5_0, n6_1]},
{"$and": [n0_0, n1_1, n2_1, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_1, n2_1, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_1, n2_1, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_1, n2_0, n3_0, n4_0, n5_0, n6_1]},
{"$and": [n0_1, n1_0, n2_0, n3_1, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_1, n2_0, n3_0, n4_1, n5_0, n6_0]},
{"$and": [n0_1, n1_0, n2_0, n3_1, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_1, n4_1, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_1, n6_1]},
{"$and": [n0_1, n1_0, n2_0, n3_0, n4_1, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_1, n3_0, n4_0, n5_0, n6_1]},
{"$and": [n0_0, n1_0, n2_0, n3_1, n4_1, n5_0, n6_0]},
{"$and": [n0_1, n1_1, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_1, n2_0, n3_0, n4_0, n5_1, n6_0]},
{"$and": [n0_1, n1_0, n2_0, n3_0, n4_0, n5_0, n6_1]},
{"$and": [n0_0, n1_0, n2_0, n3_1, n4_0, n5_0, n6_1]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_1, n5_0, n6_1]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_1, n5_0, n6_1]},
{"$and": [n0_0, n1_0, n2_1, n3_0, n4_0, n5_1, n6_0]},
{"$and": [n0_0, n1_0, n2_1, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_1, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_1, n5_0, n6_0]},
{"$and": [n0_0, n1_1, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_1, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_1, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_1, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_1, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_1, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_1, n6_0]},
{"$and": [n0_0, n1_1, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_1, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_1, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_1, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_1, n6_0]},
{"$and": [n0_1, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_1, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_1, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_1, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_1, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
{"$and": [n0_0, n1_0, n2_0, n3_0, n4_0, n5_0, n6_0]},
],
};
assertArrayEq({
actual: coll.find(petrickQuery).toArray(),
expected: [],
});
// Insert documents and try again.
const docs = [];
for (let i = 0; i < 64; i++) {
for (let i = 0; i < 128; i++) {
docs.push({
_id: i,
n0: NumberInt(i / 2),
n1: NumberInt(i / 4),
n2: NumberInt(i / 8),
n3: NumberInt(i / 16),
n4: NumberInt(i / 32),
n5: NumberInt(i / 64),
n0: NumberInt(i & 1),
n1: NumberInt((i & 2) >> 1),
n2: NumberInt((i & 4) >> 2),
n3: NumberInt((i & 8) >> 3),
n4: NumberInt((i & 16) >> 4),
n5: NumberInt((i & 32) >> 5),
n6: NumberInt((i & 64) >> 6),
});
}
assert.commandWorked(coll.insertMany(docs));
// Will match everything!
// Will match everything except a few _ids.
assertArrayEq({
actual: coll.find(qmcQuery).toArray(),
expected: docs,
expected: Array.from(docs.filter((x) => x._id != NumberInt(127) && x._id != NumberInt(63))),
});
// Will match only the following _ids.
const expectedIds = [
0, 1, 2, 3, 4, 6, 7, 8, 9, 13, 14, 15, 16, 17, 18, 23, 24, 25, 27, 29, 30, 32, 34, 36, 50, 51, 53, 54, 56, 57, 65,
66, 68, 69, 71, 72, 73, 76, 77, 80, 81, 82, 84, 85, 88, 92, 96, 97, 98, 100, 102, 106,
];
assertArrayEq({
actual: coll.find(petrickQuery).toArray(),
expected: Array.from(docs.filter((doc) => expectedIds.includes(doc._id))),
});

View File

@ -207,11 +207,13 @@ private:
*/
class TabularPetrick {
public:
explicit TabularPetrick(const std::vector<CoveredOriginalMinterms>& data)
: _numberOfBits(data.size()), _essentialImplicants(PrimeImplicant(data.size())) {
explicit TabularPetrick(const std::vector<CoveredOriginalMinterms>& data,
size_t maxNumPrimeImplicants)
: _numberOfBits(data.size()),
_maxNumPrimeImplicants{maxNumPrimeImplicants},
_essentialImplicants(PrimeImplicant(data.size())) {
for (size_t implicantIndex = 0; implicantIndex < data.size(); ++implicantIndex) {
for (auto mintermIndex = data[implicantIndex].findFirst(); mintermIndex != Bitset::npos;
mintermIndex = data[implicantIndex].findNext(mintermIndex)) {
for (auto mintermIndex : makePopulationView(data[implicantIndex])) {
insert(mintermIndex, implicantIndex);
}
}
@ -225,8 +227,17 @@ public:
return std::vector<PrimeImplicantIndices>{_essentialImplicants.getListOfSetBits()};
}
size_t cumulativePrimeImplicants = 0;
while (_table.size() > 1) {
const size_t size = _table.size();
const size_t nextProductSize = _table[size - 1].size() * _table[size - 2].size();
if (cumulativePrimeImplicants + nextProductSize > _maxNumPrimeImplicants) {
return {};
} else {
cumulativePrimeImplicants += nextProductSize;
}
auto productResult = _table[size - 1].product(_table[size - 2]);
_table.pop_back();
_table[_table.size() - 1].swap(productResult);
@ -265,17 +276,18 @@ private:
}
const size_t _numberOfBits;
const size_t _maxNumPrimeImplicants;
std::vector<ImplicantSum> _table;
PrimeImplicant _essentialImplicants;
};
} // namespace
std::vector<PrimeImplicantIndices> petricksMethod(
const std::vector<CoveredOriginalMinterms>& data) {
std::vector<PrimeImplicantIndices> petricksMethod(const std::vector<CoveredOriginalMinterms>& data,
size_t maxNumPrimeImplicants) {
if (data.empty()) {
return {};
}
TabularPetrick table{data};
TabularPetrick table{data, maxNumPrimeImplicants};
return table.getMinimalCoverages();
}
} // namespace mongo::boolean_simplification

View File

@ -66,6 +66,11 @@ using PrimeImplicantIndices = std::vector<uint32_t>;
* only 2 prime implicants with indices 0 and 2 are enough to cover all 6 original minterms. It is
* possible that we can get more than one coverage as output. For the given input: [[0, 1, 2], [2,
* 3], [0, 3]] two coverages are possible: [[0, 1], [0, 2]].
*
* 'maxNumPrimeImplicants' specifies a threshold for the maximum number of prime implicants we may
* produce, in order to bound both the time complexity and memory footprint of this function. If we
* hit this limit, this returns an empty vector.
*/
std::vector<PrimeImplicantIndices> petricksMethod(const std::vector<CoveredOriginalMinterms>& data);
std::vector<PrimeImplicantIndices> petricksMethod(const std::vector<CoveredOriginalMinterms>& data,
size_t maxNumPrimeImplicants);
} // namespace mongo::boolean_simplification

View File

@ -46,7 +46,7 @@ void petrick_classic(benchmark::State& state) {
};
for (auto _ : state) {
benchmark::DoNotOptimize(petricksMethod(data));
benchmark::DoNotOptimize(petricksMethod(data, 1000));
}
}
@ -63,7 +63,7 @@ void petrick_noSimplifications(benchmark::State& state) {
}
for (auto _ : state) {
benchmark::DoNotOptimize(petricksMethod(data));
benchmark::DoNotOptimize(petricksMethod(data, 1000));
}
}
@ -83,7 +83,7 @@ void petrick_essentialWithSimplications(benchmark::State& state) {
};
for (auto _ : state) {
benchmark::DoNotOptimize(petricksMethod(data));
benchmark::DoNotOptimize(petricksMethod(data, 1000));
}
}

View File

@ -59,7 +59,7 @@ TEST(PetrickTest, ClassicExample) {
{0, 2, 3, 5},
};
const auto result = petricksMethod(data);
const auto result = petricksMethod(data, 1000);
ASSERT_EQ(expectedResult, result);
}
@ -74,7 +74,7 @@ TEST(PetrickTest, OneCoverage) {
{0, 2},
};
const auto result = petricksMethod(data);
const auto result = petricksMethod(data, 1000);
ASSERT_EQ(expectedResult, result);
}
@ -90,7 +90,7 @@ TEST(PetrickTest, TwoCoverages) {
{0, 2},
};
const auto result = petricksMethod(data);
const auto result = petricksMethod(data, 1000);
ASSERT_EQ(expectedResult, result);
}
@ -106,7 +106,7 @@ TEST(PetrickTest, NoSimplifications) {
{0, 1, 2, 3},
};
const auto result = petricksMethod(data);
const auto result = petricksMethod(data, 1000);
ASSERT_EQ(expectedResult, result);
}
@ -125,7 +125,7 @@ TEST(PetrickTest, ManyEssentialsWithSimplifications) {
{0, 2, 3, 4, 5},
};
const auto result = petricksMethod(data);
const auto result = petricksMethod(data, 1000);
ASSERT_EQ(expectedResult, result);
}
@ -143,7 +143,7 @@ TEST(PetrickTest, ReorderingMultipleEssentialsWithSimplifications) {
{0, 1, 3, 5},
};
const auto result = petricksMethod(data);
const auto result = petricksMethod(data, 1000);
ASSERT_EQ(expectedResult, result);
}
@ -156,7 +156,7 @@ TEST(PetrickTest, OneMinterm) {
{0},
};
const auto result = petricksMethod(data);
const auto result = petricksMethod(data, 1000);
ASSERT_EQ(expectedResult, result);
}
@ -165,7 +165,7 @@ TEST(PetrickTest, NoMinterms) {
std::vector<PrimeImplicantIndices> expectedResult{};
const auto result = petricksMethod(data);
const auto result = petricksMethod(data, 1000);
ASSERT_EQ(expectedResult, result);
}
} // namespace mongo::boolean_simplification

View File

@ -189,15 +189,17 @@ std::pair<Maxterm, std::vector<CoveredOriginalMinterms>> findPrimeImplicants(Max
return result;
}
Maxterm quineMcCluskey(Maxterm inputMaxterm) {
Maxterm quineMcCluskey(Maxterm inputMaxterm, size_t maxNumPrimeImplicants) {
auto [maxterm, maxtermCoverage] = findPrimeImplicants(std::move(inputMaxterm));
const bool allEssential = std::all_of(maxtermCoverage.begin(),
maxtermCoverage.end(),
[](const auto& cov) { return cov.size() == 1; });
if (allEssential) {
return maxterm;
}
const auto& primeImplicantCoverages = petricksMethod(maxtermCoverage);
const auto& primeImplicantCoverages = petricksMethod(maxtermCoverage, maxNumPrimeImplicants);
if (primeImplicantCoverages.size() < 2) {
return maxterm;
}

View File

@ -66,7 +66,10 @@ std::pair<Maxterm, std::vector<CoveredOriginalMinterms>> findPrimeImplicants(Max
* predicates and the second one prepresents mask (see Maxtern and Minterm documentation for
* details). This expression can be simplified to A~BCD | ~ABD | ~A~C, or [(1011, 1111), (0101,
* 1101), (0000, 1010)] in maxterm/minterm representation.
*
* 'maxNumPrimeImplicants' specifies a threshold for the maximum number of prime implicants we may
* produce during the application of Petrick's method.
*/
Maxterm quineMcCluskey(Maxterm maxterm);
Maxterm quineMcCluskey(Maxterm maxterm, size_t maxNumPrimeImplicants);
} // namespace mongo::boolean_simplification

View File

@ -184,7 +184,7 @@ TEST(QuineMcCluskeyTest, Test5) {
Minterm{"0000", "1010"},
};
auto actualMaxterm = quineMcCluskey(maxterm);
auto actualMaxterm = quineMcCluskey(maxterm, 1000);
ASSERT_EQ(expectedMaxterm, actualMaxterm);
}
@ -204,17 +204,34 @@ TEST(QuineMcCluskeyTest, Test6) {
Minterm{"111"_b, mask},
};
Maxterm expectedMaxterm{
Minterm{"000", "101"},
Minterm{"100", "110"},
Minterm{"011", "011"},
};
{
// This is the expected output of findPrimeImplicants().
Maxterm expectedMaxterm{
Minterm{"000", "011"},
Minterm{"000", "101"},
Minterm{"010", "110"},
Minterm{"100", "110"},
Minterm{"011", "011"},
Minterm{"101", "101"},
};
auto actualMaxterm = quineMcCluskey(maxterm, 2);
// Validate that with a very low setting for the number of prime implicants generated by
// Petrick's, we bail out of Petrick's gracefully.
ASSERT_EQ(expectedMaxterm, actualMaxterm);
}
auto actualMaxterm = quineMcCluskey(maxterm);
// This test asserts on one possible output: ~A~C | A~B | BC, another possible output is ~A~C |
// ~AB | AC. See the coverage output in FindPrimeImplicantsTest::Test6, the last uncovered
// minterm #5 can be covered by BC or AC. It just happens that we select the first optimal
// coverage, if we change quineMcCluskey the second one can be picked up.
ASSERT_EQ(expectedMaxterm, actualMaxterm);
{
Maxterm expectedMaxterm{
Minterm{"000", "101"},
Minterm{"100", "110"},
Minterm{"011", "011"},
};
auto actualMaxterm = quineMcCluskey(maxterm, 1000);
// This test asserts on one possible output: ~A~C | A~B | BC, another possible output is
// ~A~C | ~AB | AC. See the coverage output in FindPrimeImplicantsTest::Test6, the last
// uncovered minterm #5 can be covered by BC or AC. It just happens that we select the
// first optimal coverage, if we change quineMcCluskey the second one can be picked up.
ASSERT_EQ(expectedMaxterm, actualMaxterm);
}
}
} // namespace mongo::boolean_simplification

View File

@ -816,6 +816,7 @@ std::unique_ptr<MatchExpression> optimizeMatchExpression(
ExpressionSimplifierSettings settings{
static_cast<size_t>(internalQueryMaximumNumberOfUniquePredicatesToSimplify.load()),
static_cast<size_t>(internalQueryMaximumNumberOfMintermsInSimplifier.load()),
static_cast<size_t>(internalQueryMaxNumPrimeImplicants.load()),
internalQueryMaxSizeFactorToSimplify.load(),
internalQueryDoNotOpenContainedOrsInSimplifier.load(),
/*applyQuineMcCluskey*/ true};

View File

@ -180,7 +180,8 @@ boost::optional<Maxterm> quineMcCluskey(const BitsetTreeNode& tree,
// The simplifications using the Quine-McCluskey algorithm (x&y | x&~y = x). The QMC works only
// for expressions with negations.
if (settings.applyQuineMcCluskey && containsNegations(*maxterm)) {
maxterm = boolean_simplification::quineMcCluskey(std::move(*maxterm));
maxterm = boolean_simplification::quineMcCluskey(std::move(*maxterm),
settings.maxNumPrimeImplicants);
}
return maxterm;

View File

@ -71,17 +71,20 @@ struct ExpressionSimplifierSettings {
: ExpressionSimplifierSettings(
/*maximumNumberOfUniquePredicates*/ std::numeric_limits<size_t>::max(),
/*maximumNumberOfMinterms*/ std::numeric_limits<size_t>::max(),
/*maxNumPrimeImplicants*/ std::numeric_limits<size_t>::max(),
/*maxSizeFactor*/ 1e6,
/*doNotOpenContainedOrs*/ false,
/*applyQuineMcCluskey*/ true) {}
ExpressionSimplifierSettings(size_t maximumNumberOfUniquePredicates,
size_t maximumNumberOfMinterms,
size_t maxNumPrimeImplicants,
double maxSizeFactor,
bool doNotOpenContainedOrs,
bool applyQuineMcCluskey)
: maximumNumberOfUniquePredicates(maximumNumberOfUniquePredicates),
maximumNumberOfMinterms(maximumNumberOfMinterms),
maxNumPrimeImplicants(maxNumPrimeImplicants),
maxSizeFactor(maxSizeFactor),
doNotOpenContainedOrs(doNotOpenContainedOrs),
applyQuineMcCluskey(applyQuineMcCluskey) {}
@ -97,6 +100,12 @@ struct ExpressionSimplifierSettings {
*/
size_t maximumNumberOfMinterms;
/**
* Petrick's method has exponential complexity. This causes us to avoid calling Petrick's method
* when we have more than the specified upper limit of prime implicants.
*/
size_t maxNumPrimeImplicants;
/**
* If the simplified expression is larger than the original expression's size times
* `maxSizeFactor`, the simplified one will be rejected.

View File

@ -1455,6 +1455,14 @@ server_parameters:
default: 1000
redact: false
internalQueryMaxNumPrimeImplicants:
description: "Threshold capping number of prime implicants generated by Petrick's method."
set_at: [startup, runtime]
cpp_varname: internalQueryMaxNumPrimeImplicants
cpp_vartype: AtomicWord<int>
default: 10000
redact: false
internalQueryMaxSizeFactorToSimplify:
description: >-
If the simplified expression is larger than the original expression's size times