This commit is contained in:
Josh Soref 2025-12-16 12:35:06 +08:00 committed by GitHub
commit 9fea925f5a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
19 changed files with 25 additions and 25 deletions

View File

@ -24,7 +24,7 @@
* - **zmalloc**: An abstraction layer over the memory allocator, providing
* a uniform allocation interface to the application code. It can delegate
* to various underlying allocators (e.g., libc, tcmalloc, jemalloc, or others).
* It is not dependant on defrag implementation logic and it's possible to use jemalloc
* It is not dependent on defrag implementation logic and it's possible to use jemalloc
* version that does not support defrag.
* - **allocator_defrag**: This file contains allocator-specific logic for
* defragmentation, invoked from `defrag.c` when memory defragmentation is needed.

View File

@ -810,7 +810,7 @@ int anetPipe(int fds[2], int read_flags, int write_flags) {
* There is no harm to set O_CLOEXEC to prevent fd leaks. */
pipe_flags = O_CLOEXEC | (read_flags & write_flags);
if (pipe2(fds, pipe_flags)) {
/* Fail on real failures, and fallback to simple pipe if pipe2 is unsupported. */
/* Fail on real failures, and fall back to simple pipe if pipe2 is unsupported. */
if (errno != ENOSYS && errno != EINVAL) return -1;
pipe_flags = 0;
} else {

View File

@ -1120,7 +1120,7 @@ clusterNode *getNodeByQuery(client *c, int *error_code) {
*
* 1. Go over all the keys to count existing keys and missing keys that we
* need for TRYAGAIN and ASK redirects.
* 2. Check for some commands that are forbiddedn during slot migration.
* 2. Check for some commands that are forbidden during slot migration.
*
* Skip this if we're not importing or migrating this slot. */
if (!migrating_slot && !importing_slot) goto after_checking_each_key;

View File

@ -234,7 +234,7 @@ uint64_t crcspeed64little(uint64_t little_table[8][256], uint64_t crc1,
MERGE_END(next2, 2);
}
/* We fall through here to handle our <CRC64_DUAL_CUTOFF inputs, and for any trailing
* bytes that wasn't evenly divisble by 16 or 24 above. */
* bytes that wasn't evenly divisible by 16 or 24 above. */
/* fast processing, 8 bytes (aligned!) per loop */
while (len >= 8) {

View File

@ -243,7 +243,7 @@ int getKeySlot(sds key) {
*
* This optimization is only used when current_client flag `CLIENT_EXECUTING_COMMAND` is set.
* It only gets set during the execution of command under `call` method. Other flows requesting
* the key slot would fallback to keyHashSlot.
* the key slot would fall back to keyHashSlot.
*
* Modules and scripts executed on the primary may get replicated as multi-execs that operate on multiple slots,
* so we must always recompute the slot for commands coming from the primary.
@ -2410,7 +2410,7 @@ int getKeysFromCommandWithSpecs(struct serverCommand *cmd,
int ret = getKeysUsingKeySpecs(cmd, argv, argc, search_flags, result);
if (ret >= 0) return ret;
/* If the specs returned with an error (probably an INVALID or INCOMPLETE spec),
* fallback to the callback method. */
* fall back to the callback method. */
}
/* Resort to getkeys callback methods. */

View File

@ -6201,7 +6201,7 @@ void VM_SetContextUser(ValkeyModuleCtx *ctx, const ValkeyModuleUser *user) {
/* Returns an array of robj pointers, by parsing the format specifier "fmt" as described for
* the VM_Call(), VM_Replicate() and other module APIs. Populates *argcp with the number of
* items (which equals to the length of the allocated argv).
* items (which equals the length of the allocated argv).
*
* The integer pointed by 'flags' is populated with flags according
* to special modifiers in "fmt".
@ -11334,7 +11334,7 @@ void VM_ScanCursorDestroy(ValkeyModuleScanCursor *cursor) {
* void scan_callback(ValkeyModuleCtx *ctx, ValkeyModuleString *keyname,
* ValkeyModuleKey *key, void *privdata);
*
* - `ctx`: the module context provided to for the scan.
* - `ctx`: the module context provided for the scan.
* - `keyname`: owned by the caller and need to be retained if used after this
* function.
* - `key`: holds info on the key and value, it is provided as best effort, in
@ -11441,7 +11441,7 @@ static void moduleScanKeyHashtableCallback(void *privdata, void *entry) {
*
* void scan_callback(ValkeyModuleKey *key, ValkeyModuleString* field, ValkeyModuleString* value, void *privdata);
*
* - key - the key context provided to for the scan.
* - key - the key context provided for the scan.
* - field - field name, owned by the caller and need to be retained if used
* after this function.
* - value - value string or NULL for set type, owned by the caller and need to

View File

@ -765,8 +765,8 @@ void addReplySds(client *c, sds s) {
* client buffer, trying the static buffer initially, and using the string
* of objects if not possible.
*
* It is efficient because does not create an SDS object nor an Object
* if not needed. The object will only be created by calling
* It is efficient because it does not needlessly create an SDS object
* or an Object. The object will only be created by calling
* _addReplyProtoToList() if we fail to extend the existing tail object
* in the list of objects. */
void addReplyProto(client *c, const char *s, size_t len) {

View File

@ -2126,7 +2126,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb, sds key, int dbid, int *error) {
else if (deep_integrity_validation) {
/* In this mode, we need to guarantee that the server won't crash
* later when the ziplist is converted to a hashtable.
* Create a set (hashtable with no values) to for a dup search.
* Create a set (hashtable with no values) for a dup search.
* We can dismiss it as soon as we convert the ziplist to a hash. */
dupSearchHashtable = hashtableCreate(&setHashtableType);
}

View File

@ -754,7 +754,7 @@ dictType migrateCacheDictType = {
NULL /* allow to expand */
};
/* Dict for for case-insensitive search using null terminated C strings.
/* Dict for case-insensitive search using null terminated C strings.
* The keys stored in dict are sds though. */
dictType stringSetDictType = {
dictCStrCaseHash, /* hash function */
@ -765,7 +765,7 @@ dictType stringSetDictType = {
NULL /* allow to expand */
};
/* Dict for for case-insensitive search using null terminated C strings.
/* Dict for case-insensitive search using null terminated C strings.
* The key and value do not have a destructor. */
dictType externalStringType = {
dictCStrCaseHash, /* hash function */

View File

@ -995,7 +995,7 @@ static int connTLSWritev(connection *conn_, const struct iovec *iov, int iovcnt)
* However, in case when last write failed we still have to repeat sending last_failed_write_data_len
* bytes. Because of openssl implementation we cannot repeat sending writes with length smaller than
* the last failed write (https://docs.openssl.org/master/man3/SSL_write) so in case the first io buffer
* does not provide at least the same amount of bytes as previous failed write, we will have to fallback to
* does not provide at least the same amount of bytes as previous failed write, we will have to fall back to
* memory copy to a static buffer before calling SSL_write. */
if (iov_bytes_len > NET_MAX_WRITES_PER_EVENT && iovcnt > 0 && iov[0].iov_len >= conn->last_failed_write_data_len) {
ssize_t tot_sent = 0;

View File

@ -865,7 +865,7 @@ int d2string(char *buf, size_t len, double value) {
*/
int fixedpoint_d2string(char *dst, size_t dstlen, double dvalue, int fractional_digits) {
if (fractional_digits < 1 || fractional_digits > 17) goto err;
/* min size of 2 ( due to 0. ) + n fractional_digitits + \0 */
/* min size of 2 ( due to 0. ) + n fractional_digits + \0 */
if ((int)dstlen < (fractional_digits + 3)) goto err;
if (dvalue == 0) {
dst[0] = '0';

View File

@ -54,7 +54,7 @@
* This should be the size of the buffer for sprintf with %f */
#define MAX_DOUBLE_CHARS 400
/* The maximum number of characters needed to for d2string/fpconv_dtoa call.
/* The maximum number of characters needed for d2string/fpconv_dtoa call.
* Since it uses %g and not %f, some 40 chars should be enough. */
#define MAX_D2STRING_CHARS 128

View File

@ -47,7 +47,7 @@ foreach_valkey_id id {
}
# Now test the migration to a master which used to be a slave, after
# a failver.
# a failover.
source "../tests/includes/init-tests.tcl"

View File

@ -148,7 +148,7 @@ int FragDefrag(ValkeyModuleDefragCtx *ctx, ValkeyModuleString *key, void **value
int dbid = ValkeyModule_GetDbIdFromDefragCtx(ctx);
ValkeyModule_Assert(dbid != -1);
/* Attempt to get cursor, validate it's what we're exepcting */
/* Attempt to get cursor, validate it's what we're expecting */
if (ValkeyModule_DefragCursorGet(ctx, &i) == VALKEYMODULE_OK) {
if (i > 0) datatype_resumes++;

View File

@ -414,7 +414,7 @@ proc run_external_server_test {code overrides} {
dict set saved_config $param [lindex [r config get $param] 1]
r config set $param $val
# If we enable appendonly, wait for for rewrite to complete. This is
# If we enable appendonly, wait for rewrite to complete. This is
# required for tests that begin with a bg* command which will fail if
# the rewriteaof operation is not completed at this point.
if {$param == "appendonly" && $val == "yes"} {

View File

@ -257,7 +257,7 @@ start_cluster 3 1 {tags {external:skip cluster} overrides {cluster-ping-interval
[s 0 role] eq {slave} &&
[s -3 role] eq {master}
} else {
fail "The third falover does not happen"
fail "The third failover does not happen"
}
wait_for_cluster_propagation
}

View File

@ -119,7 +119,7 @@ test "Node #10 should eventually replicate node #5" {
} ;# start_cluster
# Create a cluster with 3 master and 15 slaves, so that we have 5
# slaves for eatch master.
# slaves for each master.
start_cluster 3 15 {tags {external:skip cluster}} {
test "Cluster is up" {

View File

@ -488,7 +488,7 @@ start_server {
r XADD "\{lestream\}2" 2-0 k2 v5
r XADD "\{lestream\}2" 3-0 k3 v6
# read last element from 3 streams (2 with enetries, 1 non-existent)
# read last element from 3 streams (2 with entries, 1 non-existent)
# verify the last element from the two existing streams were returned
set res [r XREAD STREAMS "\{lestream\}1" "\{lestream\}2" "\{lestream\}3" + + +]
assert_equal $res {{{{lestream}1} {{3-0 {k3 v3}}}} {{{lestream}2} {{3-0 {k3 v6}}}}}

View File

@ -22,11 +22,11 @@ start_server {} {
test {WAIT out of range timeout (milliseconds)} {
# Timeout is parsed as milliseconds by getLongLongFromObjectOrReply().
# Verify we get out of range message if value is behind LLONG_MAX
# (decimal value equals to 0x8000000000000000)
# (decimal value equals 0x8000000000000000)
assert_error "*or out of range*" {$master wait 2 9223372036854775808}
# expected to fail by later overflow condition after addition
# of mstime(). (decimal value equals to 0x7FFFFFFFFFFFFFFF)
# of mstime(). (decimal value equals 0x7FFFFFFFFFFFFFFF)
assert_error "*timeout is out of range*" {$master wait 2 9223372036854775807}
assert_error "*timeout is negative*" {$master wait 2 -1}