This commit is contained in:
Josh Soref 2025-12-16 12:34:50 +08:00 committed by GitHub
commit 515e74b5ed
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
23 changed files with 34 additions and 34 deletions

View File

@ -242,7 +242,7 @@ sentinel failover-timeout mymaster 180000
# If script exits with "1" the execution is retried later (up to a maximum
# number of times currently set to 10).
#
# If script exits with "2" (or an higher value) the script execution is
# If script exits with "2" (or a higher value) the script execution is
# not retried.
#
# If script terminates because it receives a signal the behavior is the same

View File

@ -773,7 +773,7 @@ void signalFlushedDb(int dbid, int async) {
/* Return the set of flags to use for the emptyData() call for FLUSHALL
* and FLUSHDB commands.
*
* sync: flushes the database in an sync manner.
* sync: flushes the database in a sync manner.
* async: flushes the database in an async manner.
* no option: determine sync or async according to the value of lazyfree-lazy-user-flush.
*

View File

@ -794,7 +794,7 @@ void debugCommand(client *c) {
addReplyStatus(c, d);
sdsfree(d);
} else if (!strcasecmp(c->argv[1]->ptr, "digest-value") && c->argc >= 2) {
/* DEBUG DIGEST-VALUE key key key ... key. */
/* DEBUG DIGEST-VALUE key key ... key. */
addReplyArrayLen(c, c->argc - 2);
for (int j = 2; j < c->argc; j++) {
unsigned char digest[20];
@ -975,7 +975,7 @@ void debugCommand(client *c) {
addReplyVerbatim(c, buf, strlen(buf), "txt");
} else {
addReplyError(c, "The value stored at the specified key is not "
"represented using an hash table");
"represented using a hash table");
}
} else if (!strcasecmp(c->argv[1]->ptr, "change-repl-id") && c->argc == 2) {
serverLog(LL_NOTICE, "Changing replication IDs after receiving DEBUG change-repl-id");

View File

@ -474,7 +474,7 @@ static void evalGenericCommand(client *c, int evalsha) {
dictEntry *entry = dictFind(evalCtx.scripts, sha);
if (evalsha && entry == NULL) {
/* Calling EVALSHA using an hash that was never added to the scripts
/* Calling EVALSHA using a hash that was never added to the scripts
* cache. */
addReplyErrorObject(c, shared.noscripterr);
return;

View File

@ -52,7 +52,7 @@
#define _XOPEN_SOURCE 700
/*
* On NetBSD, _XOPEN_SOURCE undefines _NETBSD_SOURCE and
* thus hides inet_aton etc.
* thus hides inet_aton, etc.
*/
#elif !defined(__NetBSD__)
#define _XOPEN_SOURCE

View File

@ -1660,7 +1660,7 @@ invalid:
return C_ERR;
}
/* PFADD var ele ele ele ... ele => :0 or :1 */
/* PFADD var ele ele ... ele => :0 or :1 */
void pfaddCommand(client *c) {
robj *o = lookupKeyWrite(c->db, c->argv[1]);
struct hllhdr *hdr;

View File

@ -104,7 +104,7 @@ static int IOJobQueue_isEmpty(const IOJobQueue *jq) {
/* Removes the next job from the given job queue by advancing the tail index.
* Called by the IO thread.
* The caller must ensure that the queue is not empty before calling this function.
* This function uses relaxed memory order, so the caller need to use an release memory fence
* This function uses relaxed memory order, so the caller need to use a release memory fence
* after calling this function to make sure the updated tail is visible to the producer (main thread). */
static void IOJobQueue_removeJob(IOJobQueue *jq) {
debugServerAssertWithInfo(NULL, NULL, !inMainThread());

View File

@ -678,7 +678,7 @@ sds latencyCommandGenSparkeline(char *event, struct latencyTimeSeries *ts) {
* LATENCY DOCTOR: returns a human readable analysis of instance latency.
* LATENCY GRAPH: provide an ASCII graph of the latency of the specified event.
* LATENCY RESET: reset data of a specified event or all the data if no event provided.
* LATENCY HISTOGRAM: return a cumulative distribution of latencies in the format of an histogram for the specified
* LATENCY HISTOGRAM: return a cumulative distribution of latencies in the format of a histogram for the specified
* command names.
*/
void latencyCommand(client *c) {

View File

@ -495,7 +495,7 @@ unsigned long lpLength(unsigned char *lp) {
* If the function is called against a badly encoded ziplist, so that there
* is no valid way to parse it, the function returns like if there was an
* integer encoded with value 12345678900000000 + <unrecognized byte>, this may
* be an hint to understand that something is wrong. To crash in this case is
* be a hint to understand that something is wrong. To crash in this case is
* not sensible because of the different requirements of the application using
* this lib.
*

View File

@ -11257,7 +11257,7 @@ size_t VM_MallocSizeDict(ValkeyModuleDict *dict) {
return size;
}
/* Return the a number between 0 to 1 indicating the amount of memory
/* Return a number between 0 to 1 indicating the amount of memory
* currently used, relative to the server "maxmemory" configuration.
*
* * 0 - No memory limit configured.

View File

@ -70,7 +70,7 @@ int HelloPushNative_ValkeyCommand(ValkeyModuleCtx *ctx, ValkeyModuleString **arg
return VALKEYMODULE_OK;
}
/* HELLO.PUSH.CALL implements RPUSH using an higher level approach, calling
/* HELLO.PUSH.CALL implements RPUSH using a higher level approach, calling
* a command instead of working with the key in a low level way. This
* approach is useful when you need to call commands that are not
* available as low level APIs, or when you don't need the maximum speed

View File

@ -142,7 +142,7 @@ typedef struct rax {
* field for space concerns, so we use the auxiliary stack when needed. */
#define RAX_STACK_STATIC_ITEMS 32
typedef struct raxStack {
void **stack; /* Points to static_items or an heap allocated array. */
void **stack; /* Points to static_items or a heap allocated array. */
size_t items, maxitems; /* Number of items contained and total space. */
/* Up to RAXSTACK_STACK_ITEMS items we avoid to allocate on the heap
* and use this static array of pointers instead. */

View File

@ -1347,7 +1347,7 @@ void freeClientReplicationData(client *c) {
* The replica reports its version.
*
* - rdb-channel <1|0>
* Used to identify the client as a replica's rdb connection in an dual channel
* Used to identify the client as a replica's rdb connection in a dual channel
* sync session.
*
* - set-rdb-client-id <client-id>
@ -4125,7 +4125,7 @@ void syncWithPrimary(connection *conn) {
}
}
/* If the primary is in an transient error, we should try to PSYNC
/* If the primary is in a transient error, we should try to PSYNC
* from scratch later, so go to the error path. This happens when
* the server is loading the dataset or is not connected with its
* primary and so forth. */
@ -5207,7 +5207,7 @@ void replicationCron(void) {
/* Second, send a newline to all the replicas in pre-synchronization
* stage, that is, replicas waiting for the primary to create the RDB file.
*
* Also send the a newline to all the chained replicas we have, if we lost
* Also send a newline to all the chained replicas we have, if we lost
* connection from our primary, to keep the replicas aware that their
* primary is online. This is needed since sub-replicas only receive proxied
* data from top-level primaries, so there is no explicit pinging in order

View File

@ -1481,7 +1481,7 @@ sentinelValkeyInstance *getSentinelValkeyInstanceByAddrAndRunID(dict *instances,
serverAssert(addr || runid); /* User must pass at least one search param. */
if (addr != NULL) {
/* Try to resolve addr. If hostnames are used, we're accepting an ri_addr
* that contains an hostname only and can still be matched based on that.
* that contains a hostname only and can still be matched based on that.
*/
ri_addr = createSentinelAddr(addr, port, 1);
if (!ri_addr) return NULL;
@ -3746,7 +3746,7 @@ void sentinelCommand(client *c) {
" failover.",
"CONFIG SET param value [param value ...]",
" Set a global Sentinel configuration parameter.",
"CONFIG GET <param> [param param param ...]",
"CONFIG GET param [param ...]",
" Get global Sentinel configuration parameter.",
"DEBUG [<param> <value> ...]",
" Show a list of configurable time parameters and their values (milliseconds).",

View File

@ -407,7 +407,7 @@ sds setTypeNextObject(setTypeIterator *si) {
/* Return random element from a non empty set.
* The returned element can be an int64_t value if the set is encoded
* as an "intset" blob of integers, or an string.
* as an "intset" blob of integers, or a string.
*
* The caller provides three pointers to be populated with the right
* object. The return value of the function is the object->encoding

View File

@ -45,7 +45,7 @@ rax *TrackingTable = NULL;
rax *PrefixTable = NULL;
uint64_t TrackingTableTotalItems = 0; /* Total number of IDs stored across
the whole tracking table. This gives
an hint about the total memory we
a hint about the total memory we
are using server side for CSC. */
robj *TrackingChannelName;

View File

@ -348,7 +348,7 @@ int fuzzTestCluster(size_t count, double addprob, double remprob) {
/* Generate a random key by altering our template key. */
/* With a given probability, let's use a common prefix so that there
* is a subset of keys that have an higher percentage of probability
* is a subset of keys that have a higher percentage of probability
* of being hit again and again. */
size_t commonprefix = genrand64_int64() & 0xf;
if (commonprefix == 0) memcpy(key + 10, "2e68e5", 6);

View File

@ -436,9 +436,9 @@ err:
#define MULTIPLIER_10E16 10000000000000000ULL
/**
* Convert a string into an signed 64-bit integer using AVX-512 instructions.
* Convert a string into a signed 64-bit integer using AVX-512 instructions.
*
* This function parses a string of digits and converts it into an signed
* This function parses a string of digits and converts it into a signed
* 64-bit integer. It leverages AVX-512 SIMD instructions for optimized
* processing and performs strict validation to ensure the input string
* represents a valid signed integer.

View File

@ -3756,7 +3756,7 @@ clusterManagerCommandDef clusterManagerCommands[] = {
{"add-node", clusterManagerCommandAddNode, 2, "new_host:new_port existing_host:existing_port",
"replica,primaries-id <arg>"},
{"del-node", clusterManagerCommandDeleteNode, 2, "host:port node_id", NULL},
{"call", clusterManagerCommandCall, -2, "host:port command arg arg .. arg", "only-primaries,only-replicas"},
{"call", clusterManagerCommandCall, -2, "host:port command arg arg ... arg", "only-primaries,only-replicas"},
{"set-timeout", clusterManagerCommandSetTimeout, 2, "host:port milliseconds", NULL},
{"import", clusterManagerCommandImport, 1, "host:port",
"from <arg>,from-user <arg>,from-pass <arg>,from-askpass,copy,replace"},

View File

@ -534,7 +534,7 @@ proc end_tests {} {
# The "S" command is used to interact with the N-th Sentinel.
# The general form is:
#
# S <sentinel-id> command arg arg arg ...
# S <sentinel-id> command arg [arg ...]
#
# Example to ping the Sentinel 0 (first instance): S 0 PING
proc S {n args} {

View File

@ -51,9 +51,9 @@ proc valkey_cluster {nodes {tls -1}} {
# Totally reset the slots / nodes state for the client, calls
# CLUSTER NODES in the first startup node available, populates the
# list of nodes ::valkey_cluster::nodes($id) with an hash mapping node
# list of nodes ::valkey_cluster::nodes($id) with a hash mapping node
# ip:port to a representation of the node (another hash), and finally
# maps ::valkey_cluster::slots($id) with an hash mapping slot numbers
# maps ::valkey_cluster::slots($id) with a hash mapping slot numbers
# to node IDs.
#
# This function is called when a new Cluster client is initialized
@ -116,7 +116,7 @@ proc ::valkey_cluster::__method__refresh_nodes_map {id} {
set tls $::valkey_cluster::tls($id)
catch {set link [valkey $host $port 0 $tls]}
# Build this node description as an hash.
# Build this node description as a hash.
set node [dict create \
id $nodeid \
internal_id $id \

View File

@ -12,7 +12,7 @@ start_server {tags {"modules"}} {
}
r hello $proto
test "RESP$proto: RM_ReplyWithString: an string reply" {
test "RESP$proto: RM_ReplyWithString: a string reply" {
# RedisString
set string [r rw.string "Redis"]
assert_equal "Redis" $string
@ -21,7 +21,7 @@ start_server {tags {"modules"}} {
assert_equal "A simple string" $string
}
test "RESP$proto: RM_ReplyWithBigNumber: an string reply" {
test "RESP$proto: RM_ReplyWithBigNumber: a string reply" {
assert_equal "123456778901234567890" [r rw.bignumber "123456778901234567890"]
}
@ -82,7 +82,7 @@ start_server {tags {"modules"}} {
assert_equal {0 1 2 3 4} [r rw.array 5]
}
test "RESP$proto: RM_ReplyWithMap: an map reply" {
test "RESP$proto: RM_ReplyWithMap: a map reply" {
set res [r rw.map 3]
if {$proto == 2} {
assert_equal {0 0 1 1.5 2 3} $res
@ -91,11 +91,11 @@ start_server {tags {"modules"}} {
}
}
test "RESP$proto: RM_ReplyWithSet: an set reply" {
test "RESP$proto: RM_ReplyWithSet: a set reply" {
assert_equal {0 1 2} [r rw.set 3]
}
test "RESP$proto: RM_ReplyWithAttribute: an set reply" {
test "RESP$proto: RM_ReplyWithAttribute: a set reply" {
if {$proto == 2} {
catch {[r rw.attribute 3]} e
assert_match "Attributes aren't supported by RESP 2" $e

View File

@ -243,7 +243,7 @@ start_server {tags {"hash"}} {
list [r hlen bighash]
} {1024}
test {Is the big hash encoded with an hash table?} {
test {Is the big hash encoded with a hash table?} {
assert_encoding hashtable bighash
}