mirror of https://github.com/mongodb/mongo
SERVER-112898 Annotate the collection_write_path module (#44653)
GitOrigin-RevId: eff261833a58531c33c1b8e6ef640f5e8a7a6d01
This commit is contained in:
parent
0d69f14a9c
commit
ae781b036a
|
|
@ -763,6 +763,7 @@ collection_write_path: # authoritative: @10gen/server-collection-write-path
|
|||
meta:
|
||||
slack: server-storage-execution
|
||||
jira: Storage Execution
|
||||
fully_marked: true
|
||||
files:
|
||||
- src/mongo/db/collection_crud
|
||||
- src/mongo/db/commands/collection_to_capped*
|
||||
|
|
|
|||
|
|
@ -54,13 +54,11 @@
|
|||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include <boost/move/utility_core.hpp>
|
||||
#include <boost/optional/optional.hpp>
|
||||
|
||||
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage
|
||||
|
||||
namespace mongo {
|
||||
namespace collection_internal {
|
||||
namespace mongo::collection_internal {
|
||||
namespace {
|
||||
|
||||
struct CappedCollectionState {
|
||||
|
|
@ -213,5 +211,4 @@ void cappedDeleteUntilBelowConfiguredMaximum(OperationContext* opCtx,
|
|||
});
|
||||
}
|
||||
|
||||
} // namespace collection_internal
|
||||
} // namespace mongo
|
||||
} // namespace mongo::collection_internal
|
||||
|
|
|
|||
|
|
@ -32,9 +32,9 @@
|
|||
#include "mongo/db/operation_context.h"
|
||||
#include "mongo/db/record_id.h"
|
||||
#include "mongo/db/shard_role/shard_catalog/collection.h"
|
||||
#include "mongo/util/modules.h"
|
||||
|
||||
namespace mongo {
|
||||
namespace collection_internal {
|
||||
namespace mongo::collection_internal {
|
||||
|
||||
bool shouldDeferCappedDeletesToOplogApplication(OperationContext* opCtx,
|
||||
const CollectionPtr& collection);
|
||||
|
|
@ -51,5 +51,4 @@ void cappedDeleteUntilBelowConfiguredMaximum(OperationContext* opCtx,
|
|||
const RecordId& justInserted,
|
||||
OpDebug* opDebug);
|
||||
|
||||
} // namespace collection_internal
|
||||
} // namespace mongo
|
||||
} // namespace mongo::collection_internal
|
||||
|
|
|
|||
|
|
@ -30,14 +30,14 @@
|
|||
#pragma once
|
||||
|
||||
#include "mongo/db/shard_role/shard_role.h"
|
||||
#include "mongo/util/modules.h"
|
||||
#include "mongo/util/uuid.h"
|
||||
|
||||
#include <boost/optional/optional.hpp>
|
||||
|
||||
MONGO_MOD_PUBLIC;
|
||||
|
||||
namespace mongo {
|
||||
class Database;
|
||||
class NamespaceString;
|
||||
class OperationContext;
|
||||
|
||||
/**
|
||||
* Clones the collection "shortFrom" to the capped collection "shortTo" with a size of "size".
|
||||
|
|
|
|||
|
|
@ -32,22 +32,18 @@
|
|||
#include "mongo/base/error_codes.h"
|
||||
#include "mongo/base/string_data.h"
|
||||
#include "mongo/bson/bsonelement.h"
|
||||
#include "mongo/bson/bsonmisc.h"
|
||||
#include "mongo/bson/bsonobjbuilder.h"
|
||||
#include "mongo/bson/bsontypes.h"
|
||||
#include "mongo/bson/simple_bsonelement_comparator.h"
|
||||
#include "mongo/bson/timestamp.h"
|
||||
#include "mongo/crypto/fle_crypto_types.h"
|
||||
#include "mongo/db/collection_crud/capped_collection_maintenance.h"
|
||||
#include "mongo/db/exec/document_value/document.h"
|
||||
#include "mongo/db/exec/write_stage_common.h"
|
||||
#include "mongo/db/feature_flag.h"
|
||||
#include "mongo/db/op_observer/op_observer.h"
|
||||
#include "mongo/db/op_observer/op_observer_util.h"
|
||||
#include "mongo/db/record_id_helpers.h"
|
||||
#include "mongo/db/repl/local_oplog_info.h"
|
||||
#include "mongo/db/repl/replication_coordinator.h"
|
||||
#include "mongo/db/server_options.h"
|
||||
#include "mongo/db/service_context.h"
|
||||
#include "mongo/db/shard_role/lock_manager/d_concurrency.h"
|
||||
#include "mongo/db/shard_role/lock_manager/lock_manager_defs.h"
|
||||
|
|
@ -65,7 +61,6 @@
|
|||
#include "mongo/db/storage/record_data.h"
|
||||
#include "mongo/db/storage/record_store.h"
|
||||
#include "mongo/db/storage/recovery_unit.h"
|
||||
#include "mongo/db/storage/storage_parameters_gen.h"
|
||||
#include "mongo/logv2/log.h"
|
||||
#include "mongo/platform/compiler.h"
|
||||
#include "mongo/util/assert_util.h"
|
||||
|
|
@ -77,18 +72,15 @@
|
|||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <type_traits>
|
||||
#include <utility>
|
||||
|
||||
#include <boost/move/utility_core.hpp>
|
||||
#include <boost/optional/optional.hpp>
|
||||
|
||||
#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage
|
||||
|
||||
namespace mongo {
|
||||
namespace collection_internal {
|
||||
namespace {
|
||||
|
||||
// This failpoint throws a WriteConflictException after a successful call to
|
||||
|
|
@ -216,7 +208,7 @@ Status allowedToInsertDocuments(OperationContext* opCtx,
|
|||
// We require that inserts to indexed capped collections be done one-at-a-time to avoid the
|
||||
// possibility that a later document causes an earlier document to be deleted before it can
|
||||
// be indexed.
|
||||
if (shouldDeferCappedDeletesToOplogApplication(opCtx, collection)) {
|
||||
if (collection_internal::shouldDeferCappedDeletesToOplogApplication(opCtx, collection)) {
|
||||
// However, the logic to do these deletes only runs when the inserts are originally
|
||||
// performed (i.e. on the primary). When doing oplog application, the secondary will
|
||||
// later apply those delete oplogs that were originally generated by the primary, so
|
||||
|
|
@ -396,13 +388,35 @@ Status insertDocumentsImpl(OperationContext* opCtx,
|
|||
/*defaultFromMigrate=*/fromMigrate);
|
||||
}
|
||||
|
||||
cappedDeleteUntilBelowConfiguredMaximum(opCtx, collection, records.begin()->id, opDebug);
|
||||
collection_internal::cappedDeleteUntilBelowConfiguredMaximum(
|
||||
opCtx, collection, records.begin()->id, opDebug);
|
||||
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
Status checkFailCollectionInsertsFailPoint(const NamespaceString& ns, const BSONObj& firstDoc) {
|
||||
Status s = Status::OK();
|
||||
failCollectionInserts.executeIf(
|
||||
[&](const BSONObj& data) {
|
||||
const std::string msg = str::stream()
|
||||
<< "Failpoint (failCollectionInserts) has been enabled (" << data
|
||||
<< "), so rejecting insert (first doc): " << firstDoc;
|
||||
LOGV2(20287,
|
||||
"Failpoint (failCollectionInserts) has been enabled, so rejecting insert",
|
||||
"data"_attr = data,
|
||||
"document"_attr = firstDoc);
|
||||
s = {ErrorCodes::FailPointEnabled, msg};
|
||||
},
|
||||
[&](const BSONObj& data) {
|
||||
// If the failpoint specifies no collection or matches the existing one, fail.
|
||||
const auto fpNss = NamespaceStringUtil::parseFailPointData(data, "collectionNS");
|
||||
return fpNss.isEmpty() || ns == fpNss;
|
||||
});
|
||||
return s;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
namespace collection_internal {
|
||||
Status insertDocumentForBulkLoader(OperationContext* opCtx,
|
||||
const CollectionPtr& collection,
|
||||
const BSONObj& doc,
|
||||
|
|
@ -598,27 +612,6 @@ Status insertDocument(OperationContext* opCtx,
|
|||
return insertDocuments(opCtx, collection, docs.begin(), docs.end(), opDebug, fromMigrate);
|
||||
}
|
||||
|
||||
Status checkFailCollectionInsertsFailPoint(const NamespaceString& ns, const BSONObj& firstDoc) {
|
||||
Status s = Status::OK();
|
||||
failCollectionInserts.executeIf(
|
||||
[&](const BSONObj& data) {
|
||||
const std::string msg = str::stream()
|
||||
<< "Failpoint (failCollectionInserts) has been enabled (" << data
|
||||
<< "), so rejecting insert (first doc): " << firstDoc;
|
||||
LOGV2(20287,
|
||||
"Failpoint (failCollectionInserts) has been enabled, so rejecting insert",
|
||||
"data"_attr = data,
|
||||
"document"_attr = firstDoc);
|
||||
s = {ErrorCodes::FailPointEnabled, msg};
|
||||
},
|
||||
[&](const BSONObj& data) {
|
||||
// If the failpoint specifies no collection or matches the existing one, fail.
|
||||
const auto fpNss = NamespaceStringUtil::parseFailPointData(data, "collectionNS");
|
||||
return fpNss.isEmpty() || ns == fpNss;
|
||||
});
|
||||
return s;
|
||||
}
|
||||
|
||||
void updateDocument(OperationContext* opCtx,
|
||||
const CollectionPtr& collection,
|
||||
const RecordId& oldLocation,
|
||||
|
|
|
|||
|
|
@ -33,7 +33,6 @@
|
|||
#include "mongo/base/status_with.h"
|
||||
#include "mongo/bson/bsonobj.h"
|
||||
#include "mongo/db/curop.h"
|
||||
#include "mongo/db/namespace_string.h"
|
||||
#include "mongo/db/operation_context.h"
|
||||
#include "mongo/db/record_id.h"
|
||||
#include "mongo/db/repl/oplog.h"
|
||||
|
|
@ -42,23 +41,31 @@
|
|||
#include "mongo/db/shard_role/shard_catalog/index_catalog.h"
|
||||
#include "mongo/db/storage/damage_vector.h"
|
||||
#include "mongo/db/storage/snapshot.h"
|
||||
#include "mongo/util/modules.h"
|
||||
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
|
||||
namespace mongo {
|
||||
namespace collection_internal {
|
||||
/**
|
||||
* These functions are intended for low-level operations which need precise control over how a write
|
||||
* is replicated, but are temporarily public because they currently are widely used in tests for
|
||||
* simple CRUD operations. If possible, prefer to use DBHelpers over calling these functions.
|
||||
*/
|
||||
namespace mongo::collection_internal {
|
||||
|
||||
using OnRecordInsertedFn = std::function<Status(const RecordId& loc)>;
|
||||
|
||||
enum class StoreDeletedDoc { Off, On };
|
||||
|
||||
enum class RetryableWrite { kYes, kNo };
|
||||
enum class MONGO_MOD_NEEDS_REPLACEMENT StoreDeletedDoc { Off, On };
|
||||
|
||||
enum class MONGO_MOD_NEEDS_REPLACEMENT RetryableWrite { kYes, kNo };
|
||||
|
||||
/**
|
||||
* Constants used for the opDiff argument in updateDocument and updateDocumentWithDamages.
|
||||
*/
|
||||
MONGO_MOD_NEEDS_REPLACEMENT
|
||||
constexpr const BSONObj* kUpdateAllIndexes = nullptr;
|
||||
MONGO_MOD_NEEDS_REPLACEMENT
|
||||
constexpr const BSONObj* kUpdateNoIndexes = &BSONObj::kEmptyObject;
|
||||
|
||||
/**
|
||||
|
|
@ -71,6 +78,7 @@ constexpr const BSONObj* kUpdateNoIndexes = &BSONObj::kEmptyObject;
|
|||
*
|
||||
* NOTE: It is up to caller to commit the indexes.
|
||||
*/
|
||||
MONGO_MOD_NEEDS_REPLACEMENT
|
||||
Status insertDocumentForBulkLoader(OperationContext* opCtx,
|
||||
const CollectionPtr& collection,
|
||||
const BSONObj& doc,
|
||||
|
|
@ -84,6 +92,7 @@ Status insertDocumentForBulkLoader(OperationContext* opCtx,
|
|||
*
|
||||
* 'opDebug' Optional argument. When not null, will be used to record operation statistics.
|
||||
*/
|
||||
MONGO_MOD_NEEDS_REPLACEMENT
|
||||
Status insertDocuments(OperationContext* opCtx,
|
||||
const CollectionPtr& collection,
|
||||
std::vector<InsertStatement>::const_iterator begin,
|
||||
|
|
@ -97,19 +106,13 @@ Status insertDocuments(OperationContext* opCtx,
|
|||
*
|
||||
* 'opDebug' Optional argument. When not null, will be used to record operation statistics.
|
||||
*/
|
||||
MONGO_MOD_NEEDS_REPLACEMENT
|
||||
Status insertDocument(OperationContext* opCtx,
|
||||
const CollectionPtr& collection,
|
||||
const InsertStatement& doc,
|
||||
OpDebug* opDebug,
|
||||
bool fromMigrate = false);
|
||||
|
||||
/**
|
||||
* Checks the 'failCollectionInserts' fail point at the beginning of an insert operation to see if
|
||||
* the insert should fail. Returns Status::OK if The function should proceed with the insertion.
|
||||
* Otherwise, the function should fail and return early with the error Status.
|
||||
*/
|
||||
Status checkFailCollectionInsertsFailPoint(const NamespaceString& ns, const BSONObj& firstDoc);
|
||||
|
||||
/**
|
||||
* Updates the document @ oldLocation with newDoc.
|
||||
*
|
||||
|
|
@ -122,6 +125,7 @@ Status checkFailCollectionInsertsFailPoint(const NamespaceString& ns, const BSON
|
|||
* 'indexesAffected' is optional. When not null, will be set to whether any indexes were updated
|
||||
* 'opDebug' is argument. When not null, will be used to record operation statistics.
|
||||
*/
|
||||
MONGO_MOD_NEEDS_REPLACEMENT
|
||||
void updateDocument(OperationContext* opCtx,
|
||||
const CollectionPtr& collection,
|
||||
const RecordId& oldLocation,
|
||||
|
|
@ -137,6 +141,7 @@ void updateDocument(OperationContext* opCtx,
|
|||
* Sets 'args.updatedDoc' to the updated version of the document with damages applied, on success.
|
||||
* Returns the contents of the updated document.
|
||||
*/
|
||||
MONGO_MOD_NEEDS_REPLACEMENT
|
||||
StatusWith<BSONObj> updateDocumentWithDamages(OperationContext* opCtx,
|
||||
const CollectionPtr& collection,
|
||||
const RecordId& loc,
|
||||
|
|
@ -153,6 +158,7 @@ StatusWith<BSONObj> updateDocumentWithDamages(OperationContext* opCtx,
|
|||
* Deletes the document with the given RecordId from the collection. For a description of the
|
||||
* parameters, see the overloaded function below.
|
||||
*/
|
||||
MONGO_MOD_NEEDS_REPLACEMENT
|
||||
void deleteDocument(OperationContext* opCtx,
|
||||
const CollectionPtr& collection,
|
||||
StmtId stmtId,
|
||||
|
|
@ -179,6 +185,7 @@ void deleteDocument(OperationContext* opCtx,
|
|||
* unindexing.
|
||||
* @param retryableWrite: whether it's a retryable write, @see write_stage_common::isRetryableWrite
|
||||
*/
|
||||
MONGO_MOD_NEEDS_REPLACEMENT
|
||||
void deleteDocument(OperationContext* opCtx,
|
||||
const CollectionPtr& collection,
|
||||
Snapshotted<BSONObj> doc,
|
||||
|
|
@ -208,11 +215,11 @@ void deleteDocument(OperationContext* opCtx,
|
|||
* Returns the optime of the oplog entry created for the truncate operation.
|
||||
* Returns a null optime if oplog was not modified.
|
||||
*/
|
||||
MONGO_MOD_NEEDS_REPLACEMENT
|
||||
repl::OpTime truncateRange(OperationContext* opCtx,
|
||||
const CollectionPtr& collection,
|
||||
const RecordId& minRecordId,
|
||||
const RecordId& maxRecordId,
|
||||
int64_t bytesDeleted,
|
||||
int64_t docsDeleted);
|
||||
} // namespace collection_internal
|
||||
} // namespace mongo
|
||||
} // namespace mongo::collection_internal
|
||||
|
|
|
|||
|
|
@ -34,9 +34,12 @@
|
|||
#include "mongo/db/shard_role/shard_catalog/collection.h"
|
||||
#include "mongo/db/storage/container.h"
|
||||
#include "mongo/db/storage/recovery_unit.h"
|
||||
#include "mongo/util/modules.h"
|
||||
|
||||
#include <span>
|
||||
|
||||
MONGO_MOD_PUBLIC;
|
||||
|
||||
namespace mongo::container_write {
|
||||
|
||||
/**
|
||||
|
|
|
|||
Loading…
Reference in New Issue