mirror of https://github.com/mongodb/mongo
Revert "SERVER-113220 Improve observability for connections via the maintenance port (#44499)" (#45304)
Co-authored-by: auto-revert-processor <devprod-si-team@mongodb.com> GitOrigin-RevId: 71166e6efcf4e19897808e85f2f18f27d1543902
This commit is contained in:
parent
dc1f11645d
commit
1afd1b3547
|
|
@ -1,86 +0,0 @@
|
|||
/**
|
||||
* Tests metrics and logging for the maintenance port.
|
||||
*
|
||||
* @tags: [
|
||||
* featureFlagDedicatedPortForMaintenanceOperations,
|
||||
* ]
|
||||
*/
|
||||
import {describe, before, after, it} from "jstests/libs/mochalite.js";
|
||||
import {checkLog} from "src/mongo/shell/check_log.js";
|
||||
|
||||
describe("Tests metrics and logging for connections via the maintenance port", function () {
|
||||
const logID = 22943;
|
||||
|
||||
function assertDoesNotContain(conn, id, msg) {
|
||||
const logs = checkLog.getGlobalLog(conn);
|
||||
let containsLog = logs.some((log) => {
|
||||
if (log.search(`"id":${id},`) != -1) {
|
||||
return log.search(msg) != -1;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
});
|
||||
assert.neq(containsLog, true, "Found log line when none should exist" + tojson(msg));
|
||||
}
|
||||
|
||||
function assertMaintenancePortConnCountServerStatusMetricMatches(conn, expectedCount) {
|
||||
let currentCount;
|
||||
assert.soon(
|
||||
() => {
|
||||
let connectionMetrics = assert.commandWorked(conn.adminCommand({serverStatus: 1})).connections;
|
||||
currentCount = connectionMetrics.maintenance;
|
||||
return currentCount == expectedCount;
|
||||
},
|
||||
() => {
|
||||
return (
|
||||
"Incorrect number of maintenance port connections: expected " +
|
||||
expectedCount +
|
||||
", but serverStatus() reports " +
|
||||
currentCount
|
||||
);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
before(() => {
|
||||
this.conn = MongoRunner.runMongod({maintenancePort: allocatePort(), bind_ip: "127.0.0.1", useHostname: false});
|
||||
this.host = this.conn.hostNoPort;
|
||||
this.mainPort = this.conn.port;
|
||||
this.maintenancePort = this.conn.maintenancePort;
|
||||
});
|
||||
|
||||
after(() => {
|
||||
MongoRunner.stopMongod(this.conn);
|
||||
});
|
||||
|
||||
it("Check that normal connections don't log or increment the maintenance metrics", () => {
|
||||
let conn = new Mongo(this.host + ":" + this.mainPort);
|
||||
|
||||
const msg = /"isMaintenance":"true"/;
|
||||
assertDoesNotContain(conn, logID, msg);
|
||||
assertMaintenancePortConnCountServerStatusMetricMatches(conn, 0);
|
||||
|
||||
conn.close();
|
||||
});
|
||||
|
||||
it("Check that maintenance port connections log and increment stats", () => {
|
||||
let mainConn = new Mongo(this.host + ":" + this.mainPort);
|
||||
let conns = [];
|
||||
for (let i = 1; i < 5; i++) {
|
||||
let newConn = new Mongo(this.host + ":" + this.maintenancePort);
|
||||
|
||||
const msg = /"isMaintenance":"true"/;
|
||||
assert(
|
||||
checkLog.checkContainsWithCountJson(mainConn, logID, {"isMaintenance": true}, i),
|
||||
"Expecting to see " + i + " instances of log " + logID + " with " + msg,
|
||||
);
|
||||
assertMaintenancePortConnCountServerStatusMetricMatches(mainConn, i);
|
||||
|
||||
conns.push(newConn);
|
||||
}
|
||||
conns.forEach((conn) => {
|
||||
conn.close();
|
||||
});
|
||||
assertMaintenancePortConnCountServerStatusMetricMatches(mainConn, 0);
|
||||
});
|
||||
});
|
||||
|
|
@ -2,7 +2,6 @@
|
|||
* Confirms that the log output for find and getMore are in the expected format.
|
||||
* @tags: [requires_profiling]
|
||||
*/
|
||||
import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js";
|
||||
import {getLatestProfilerEntry} from "jstests/libs/profiler.js";
|
||||
|
||||
function assertLogLineContains(conn, parts) {
|
||||
|
|
@ -58,16 +57,7 @@ cursor.next(); // Perform initial query and retrieve first document in batch.
|
|||
|
||||
let cursorid = getLatestProfilerEntry(testDB).cursorid;
|
||||
|
||||
let maintenancePortFFEnabled = FeatureFlagUtil.isPresentAndEnabled(testDB, "DedicatedPortForMaintenanceOperations");
|
||||
|
||||
let logLine = maintenancePortFFEnabled
|
||||
? [
|
||||
'"msg":"Slow query","attr":{"type":"command",',
|
||||
'"isFromUserConnection":true,"isFromMaintenancePortConnection":false,"ns":"log_getmore.test","collectionType":"normal","appName":"MongoDB Shell",',
|
||||
'"command":{"find":"test","filter":{"a":{"$gt":0}},"skip":1,"batchSize":5,"limit":10,"singleBatch":false,"sort":{"a":1},"hint":{"a":1}',
|
||||
'"planCacheShapeHash":',
|
||||
]
|
||||
: [
|
||||
let logLine = [
|
||||
'"msg":"Slow query","attr":{"type":"command",',
|
||||
'"isFromUserConnection":true,"ns":"log_getmore.test","collectionType":"normal","appName":"MongoDB Shell",',
|
||||
'"command":{"find":"test","filter":{"a":{"$gt":0}},"skip":1,"batchSize":5,"limit":10,"singleBatch":false,"sort":{"a":1},"hint":{"a":1}',
|
||||
|
|
@ -93,15 +83,7 @@ function cursorIdToString(cursorId) {
|
|||
return cursorIdString.substring('NumberLong("'.length, cursorIdString.length - '")'.length);
|
||||
}
|
||||
|
||||
logLine = maintenancePortFFEnabled
|
||||
? [
|
||||
'"msg":"Slow query"',
|
||||
'"attr":{"type":"command","isFromUserConnection":true,"isFromMaintenancePortConnection":false,"ns":"log_getmore.test","collectionType":"normal","appName":"MongoDB Shell"',
|
||||
`"command":{"getMore":${cursorIdToString(cursorid)},"collection":"test","batchSize":5,`,
|
||||
'"originatingCommand":{"find":"test","filter":{"a":{"$gt":0}},"skip":1,"batchSize":5,"limit":10,"singleBatch":false,"sort":{"a":1},"hint":{"a":1}',
|
||||
'"planCacheShapeHash":',
|
||||
]
|
||||
: [
|
||||
logLine = [
|
||||
'"msg":"Slow query"',
|
||||
'"attr":{"type":"command","isFromUserConnection":true,"ns":"log_getmore.test","collectionType":"normal","appName":"MongoDB Shell"',
|
||||
`"command":{"getMore":${cursorIdToString(cursorid)},"collection":"test","batchSize":5,`,
|
||||
|
|
@ -117,14 +99,7 @@ cursorid = getLatestProfilerEntry(testDB).cursorid;
|
|||
|
||||
assert.eq(cursor.itcount(), 10);
|
||||
|
||||
logLine = maintenancePortFFEnabled
|
||||
? [
|
||||
'"msg":"Slow query"',
|
||||
'"attr":{"type":"command","isFromUserConnection":true,"isFromMaintenancePortConnection":false,"ns":"log_getmore.test","collectionType":"normal","appName":"MongoDB Shell",',
|
||||
`"command":{"getMore":${cursorIdToString(cursorid)},"collection":"test"`,
|
||||
'"originatingCommand":{"aggregate":"test","pipeline":[{"$match":{"a":{"$gt":0}}}],"cursor":{"batchSize":0},"hint":{"a":1}',
|
||||
]
|
||||
: [
|
||||
logLine = [
|
||||
'"msg":"Slow query"',
|
||||
'"attr":{"type":"command","isFromUserConnection":true,"ns":"log_getmore.test","collectionType":"normal","appName":"MongoDB Shell",',
|
||||
`"command":{"getMore":${cursorIdToString(cursorid)},"collection":"test"`,
|
||||
|
|
|
|||
|
|
@ -337,11 +337,6 @@ void CurOp::reportCurrentOpForClient(const boost::intrusive_ptr<ExpressionContex
|
|||
}
|
||||
|
||||
infoBuilder->appendBool("isFromUserConnection", client->isFromUserConnection());
|
||||
if (gFeatureFlagDedicatedPortForMaintenanceOperations.isEnabled()) {
|
||||
infoBuilder->appendBool("isFromMaintenancePortConnection",
|
||||
client->session() &&
|
||||
client->session()->isConnectedToMaintenancePort());
|
||||
}
|
||||
|
||||
if (transport::ServiceExecutorContext::get(client)) {
|
||||
infoBuilder->append("threaded"_sd, true);
|
||||
|
|
|
|||
|
|
@ -36,9 +36,7 @@
|
|||
#include "mongo/db/operation_context_options_gen.h"
|
||||
#include "mongo/db/pipeline/expression_context_for_test.h"
|
||||
#include "mongo/db/query/query_test_service_context.h"
|
||||
#include "mongo/db/server_feature_flags_gen.h"
|
||||
#include "mongo/idl/server_parameter_test_controller.h"
|
||||
#include "mongo/transport/mock_session.h"
|
||||
#include "mongo/transport/transport_layer_mock.h"
|
||||
#include "mongo/unittest/death_test.h"
|
||||
#include "mongo/unittest/unittest.h"
|
||||
|
|
@ -873,99 +871,6 @@ TEST(CurOpTest, ShouldReportIsFromUserConnection) {
|
|||
ASSERT_TRUE(bsonObjUserConn.getField("isFromUserConnection").Bool());
|
||||
}
|
||||
|
||||
class MockMaintenanceSession : public transport::MockSession {
|
||||
public:
|
||||
explicit MockMaintenanceSession(transport::TransportLayer* tl) : MockSession(tl) {}
|
||||
|
||||
bool isConnectedToMaintenancePort() const override {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
TEST(CurOpTest, ShouldNotReportIsFromMaintenancePortConnectionWhenFFDisabled) {
|
||||
gFeatureFlagDedicatedPortForMaintenanceOperations.setForServerParameter(false);
|
||||
|
||||
QueryTestServiceContext serviceContext;
|
||||
auto opCtx = serviceContext.makeOperationContext();
|
||||
auto client = serviceContext.getClient();
|
||||
|
||||
// Mock a client with a user connection.
|
||||
transport::TransportLayerMock transportLayer;
|
||||
transportLayer.createSessionHook = [](transport::TransportLayer* tl) {
|
||||
return std::make_shared<MockMaintenanceSession>(tl);
|
||||
};
|
||||
auto clientMaintenanceConn = serviceContext.getServiceContext()->getService()->makeClient(
|
||||
"maintenanceConn", transportLayer.createSession());
|
||||
|
||||
auto curop = CurOp::get(*opCtx);
|
||||
|
||||
BSONObjBuilder curOpObj;
|
||||
BSONObjBuilder curOpObjMaintenanceConn;
|
||||
{
|
||||
stdx::lock_guard<Client> lk(*opCtx->getClient());
|
||||
auto nss = NamespaceString::createNamespaceString_forTest("db", "coll");
|
||||
|
||||
// Serialization Context on expression context should be non-empty in
|
||||
// reportCurrentOpForClient.
|
||||
auto sc = SerializationContext(SerializationContext::Source::Command,
|
||||
SerializationContext::CallerType::Reply,
|
||||
SerializationContext::Prefix::ExcludePrefix);
|
||||
auto expCtx = make_intrusive<ExpressionContextForTest>(opCtx.get(), nss, sc);
|
||||
|
||||
curop->reportCurrentOpForClient(expCtx, client, false, &curOpObj);
|
||||
curop->reportCurrentOpForClient(
|
||||
expCtx, clientMaintenanceConn.get(), false, &curOpObjMaintenanceConn);
|
||||
}
|
||||
auto bsonObj = curOpObj.done();
|
||||
auto bsonObjMaintenanceConn = curOpObjMaintenanceConn.done();
|
||||
|
||||
ASSERT_FALSE(bsonObj.hasField("isFromMaintenancePortConnection"));
|
||||
ASSERT_FALSE(bsonObjMaintenanceConn.hasField("isFromMaintenancePortConnection"));
|
||||
}
|
||||
|
||||
TEST(CurOpTest, ShouldReportIsFromMaintenancePortConnection) {
|
||||
gFeatureFlagDedicatedPortForMaintenanceOperations.setForServerParameter(true);
|
||||
|
||||
QueryTestServiceContext serviceContext;
|
||||
auto opCtx = serviceContext.makeOperationContext();
|
||||
auto client = serviceContext.getClient();
|
||||
|
||||
// Mock a client with a user connection.
|
||||
transport::TransportLayerMock transportLayer;
|
||||
transportLayer.createSessionHook = [](transport::TransportLayer* tl) {
|
||||
return std::make_shared<MockMaintenanceSession>(tl);
|
||||
};
|
||||
auto clientMaintenanceConn = serviceContext.getServiceContext()->getService()->makeClient(
|
||||
"maintenanceConn", transportLayer.createSession());
|
||||
|
||||
auto curop = CurOp::get(*opCtx);
|
||||
|
||||
BSONObjBuilder curOpObj;
|
||||
BSONObjBuilder curOpObjMaintenanceConn;
|
||||
{
|
||||
stdx::lock_guard<Client> lk(*opCtx->getClient());
|
||||
auto nss = NamespaceString::createNamespaceString_forTest("db", "coll");
|
||||
|
||||
// Serialization Context on expression context should be non-empty in
|
||||
// reportCurrentOpForClient.
|
||||
auto sc = SerializationContext(SerializationContext::Source::Command,
|
||||
SerializationContext::CallerType::Reply,
|
||||
SerializationContext::Prefix::ExcludePrefix);
|
||||
auto expCtx = make_intrusive<ExpressionContextForTest>(opCtx.get(), nss, sc);
|
||||
|
||||
curop->reportCurrentOpForClient(expCtx, client, false, &curOpObj);
|
||||
curop->reportCurrentOpForClient(
|
||||
expCtx, clientMaintenanceConn.get(), false, &curOpObjMaintenanceConn);
|
||||
}
|
||||
auto bsonObj = curOpObj.done();
|
||||
auto bsonObjMaintenanceConn = curOpObjMaintenanceConn.done();
|
||||
|
||||
ASSERT_TRUE(bsonObj.hasField("isFromMaintenancePortConnection"));
|
||||
ASSERT_TRUE(bsonObjMaintenanceConn.hasField("isFromMaintenancePortConnection"));
|
||||
ASSERT_FALSE(bsonObj.getField("isFromMaintenancePortConnection").Bool());
|
||||
ASSERT_TRUE(bsonObjMaintenanceConn.getField("isFromMaintenancePortConnection").Bool());
|
||||
}
|
||||
|
||||
TEST(CurOpTest, ElapsedTimeReflectsTickSource) {
|
||||
QueryTestServiceContext serviceContext;
|
||||
|
||||
|
|
|
|||
|
|
@ -40,7 +40,6 @@
|
|||
#include "mongo/db/query/plan_summary_stats.h"
|
||||
#include "mongo/db/repl/local_oplog_info.h"
|
||||
#include "mongo/db/repl/read_concern_args.h"
|
||||
#include "mongo/db/server_feature_flags_gen.h"
|
||||
#include "mongo/db/shard_role/shard_catalog/raw_data_operation.h"
|
||||
#include "mongo/logv2/log.h"
|
||||
#include "mongo/rpc/metadata/client_metadata.h"
|
||||
|
|
@ -198,11 +197,6 @@ void OpDebug::report(OperationContext* opCtx,
|
|||
}
|
||||
|
||||
pAttrs->add("isFromUserConnection", client && client->isFromUserConnection());
|
||||
if (gFeatureFlagDedicatedPortForMaintenanceOperations.isEnabled()) {
|
||||
pAttrs->add("isFromMaintenancePortConnection",
|
||||
client && client->session() &&
|
||||
client->session()->isConnectedToMaintenancePort());
|
||||
}
|
||||
pAttrs->addDeepCopy("ns", toStringForLogging(curop.getNSS()));
|
||||
pAttrs->addDeepCopy("collectionType", getCollectionType(opCtx, curop.getNSS()));
|
||||
|
||||
|
|
@ -567,12 +561,6 @@ void OpDebug::append(OperationContext* opCtx,
|
|||
|
||||
b.append("ns", curop.getNS());
|
||||
|
||||
if (gFeatureFlagDedicatedPortForMaintenanceOperations.isEnabled()) {
|
||||
b.append("isFromMaintenancePortConnection",
|
||||
opCtx->getClient() && opCtx->getClient()->session() &&
|
||||
opCtx->getClient()->session()->isConnectedToMaintenancePort());
|
||||
}
|
||||
|
||||
if (!omitCommand) {
|
||||
curop_bson_helpers::appendObjectTruncatingAsNecessary(
|
||||
"command",
|
||||
|
|
|
|||
|
|
@ -30,7 +30,6 @@
|
|||
#include "mongo/transport/asio/asio_session_manager.h"
|
||||
|
||||
#include "mongo/db/commands/server_status/server_status.h"
|
||||
#include "mongo/db/server_feature_flags_gen.h"
|
||||
#include "mongo/transport/hello_metrics.h"
|
||||
#include "mongo/transport/service_executor.h"
|
||||
#include "mongo/transport/service_executor_reserved.h"
|
||||
|
|
@ -115,9 +114,6 @@ void AsioSessionManager::appendStats(BSONObjBuilder* bob) const {
|
|||
}
|
||||
|
||||
bob->append("loadBalanced", _loadBalancedConnections.get());
|
||||
if (gFeatureFlagDedicatedPortForMaintenanceOperations.isEnabled()) {
|
||||
bob->append("maintenance", _maintenancePortConnections.get());
|
||||
}
|
||||
}
|
||||
|
||||
void AsioSessionManager::incrementLBConnections() {
|
||||
|
|
@ -128,14 +124,6 @@ void AsioSessionManager::decrementLBConnections() {
|
|||
_loadBalancedConnections.decrement();
|
||||
}
|
||||
|
||||
void AsioSessionManager::incrementMaintenanceConnections() {
|
||||
_maintenancePortConnections.increment();
|
||||
}
|
||||
|
||||
void AsioSessionManager::decrementMaintenanceConnections() {
|
||||
_maintenancePortConnections.decrement();
|
||||
}
|
||||
|
||||
/**
|
||||
* In practice, we will never pass "isLoadBalancerPeer" on connect,
|
||||
* because the client hasn't performed a "hello: {loadBalancer: true} yet.
|
||||
|
|
@ -155,9 +143,6 @@ void AsioSessionManager::onClientConnect(Client* client) {
|
|||
if (session && session->isLoadBalancerPeer()) {
|
||||
incrementLBConnections();
|
||||
}
|
||||
if (session && session->isConnectedToMaintenancePort()) {
|
||||
incrementMaintenanceConnections();
|
||||
}
|
||||
}
|
||||
|
||||
void AsioSessionManager::onClientDisconnect(Client* client) {
|
||||
|
|
@ -165,9 +150,6 @@ void AsioSessionManager::onClientDisconnect(Client* client) {
|
|||
if (session && session->isLoadBalancerPeer()) {
|
||||
decrementLBConnections();
|
||||
}
|
||||
if (session && session->isConnectedToMaintenancePort()) {
|
||||
decrementMaintenanceConnections();
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace mongo::transport
|
||||
|
|
|
|||
|
|
@ -50,13 +50,6 @@ public:
|
|||
void incrementLBConnections();
|
||||
void decrementLBConnections();
|
||||
|
||||
/**
|
||||
* Increments and decrements the count of total maintenance port connections.
|
||||
* Currently only implemented in asio_session_manager.
|
||||
*/
|
||||
void incrementMaintenanceConnections();
|
||||
void decrementMaintenanceConnections();
|
||||
|
||||
protected:
|
||||
std::string getClientThreadName(const Session&) const override;
|
||||
void configureServiceExecutorContext(Client* client, bool isPrivilegedSession) const override;
|
||||
|
|
@ -65,7 +58,6 @@ protected:
|
|||
|
||||
private:
|
||||
Counter64 _loadBalancedConnections;
|
||||
Counter64 _maintenancePortConnections;
|
||||
};
|
||||
|
||||
} // namespace mongo::transport
|
||||
|
|
|
|||
|
|
@ -37,7 +37,6 @@
|
|||
|
||||
#include "mongo/db/auth/restriction_environment.h"
|
||||
#include "mongo/db/multitenancy_gen.h"
|
||||
#include "mongo/db/server_feature_flags_gen.h"
|
||||
#include "mongo/db/server_options.h"
|
||||
#include "mongo/logv2/log.h"
|
||||
#include "mongo/platform/atomic_word.h"
|
||||
|
|
@ -68,8 +67,7 @@ struct ClientSummary {
|
|||
remote(c->session()->remote()),
|
||||
sourceClient(c->session()->getSourceRemoteEndpoint()),
|
||||
id(c->session()->id()),
|
||||
isLoadBalanced(c->session()->isConnectedToLoadBalancerPort()),
|
||||
isMaintenance(c->session()->isConnectedToMaintenancePort()) {}
|
||||
isLoadBalanced(c->session()->isConnectedToLoadBalancerPort()) {}
|
||||
|
||||
friend logv2::DynamicAttributes logAttrs(const ClientSummary& m) {
|
||||
logv2::DynamicAttributes attrs;
|
||||
|
|
@ -78,9 +76,6 @@ struct ClientSummary {
|
|||
if (m.isLoadBalanced) {
|
||||
attrs.add("sourceClient", m.sourceClient);
|
||||
}
|
||||
if (gFeatureFlagDedicatedPortForMaintenanceOperations.isEnabled()) {
|
||||
attrs.add("isMaintenance", m.isMaintenance);
|
||||
}
|
||||
attrs.add("uuid", m.uuid);
|
||||
attrs.add("connectionId", m.id);
|
||||
|
||||
|
|
@ -92,7 +87,6 @@ struct ClientSummary {
|
|||
HostAndPort sourceClient;
|
||||
SessionId id;
|
||||
bool isLoadBalanced;
|
||||
bool isMaintenance;
|
||||
};
|
||||
|
||||
bool quiet() {
|
||||
|
|
|
|||
Loading…
Reference in New Issue