mirror of https://github.com/valkey-io/valkey
Re-enable hash downsize rehashing during fork (partial #12276)
This was introduced by the recent change in #11692 which prevented a
down-sizing rehashing while there is a fork.
## Solution
1. Fix the rehashing code, so that the same as it allows rehashing for up-sizing
during fork when the ratio is extreme, it will allow it for down-sizing as well.
Co-authored-by: Oran Agra <oran@redislabs.com>
This is a partial cherry pick of:
(cherry picked from commit b00a235186)
(cherry picked from commit d4c37320382edb342292a3e30250d46896a12016)
This commit is contained in:
parent
4fe8a0af3f
commit
a1589c3769
|
|
@ -187,9 +187,12 @@ int dictExpand(dict *d, unsigned long size)
|
|||
* work it does would be unbound and the function may block for a long time. */
|
||||
int dictRehash(dict *d, int n) {
|
||||
int empty_visits = n*10; /* Max number of empty buckets to visit. */
|
||||
unsigned long s0 = d->ht[0].size;
|
||||
unsigned long s1 = d->ht[1].size;
|
||||
if (dict_can_resize == DICT_RESIZE_FORBID || !dictIsRehashing(d)) return 0;
|
||||
if (dict_can_resize == DICT_RESIZE_AVOID &&
|
||||
(d->ht[1].size / d->ht[0].size < dict_force_resize_ratio))
|
||||
((s1 > s0 && s1 / s0 < dict_force_resize_ratio) ||
|
||||
(s1 < s0 && s0 / s1 < dict_force_resize_ratio)))
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
@ -1073,7 +1076,9 @@ size_t _dictGetStatsHt(char *buf, size_t bufsize, dictht *ht, int tableid) {
|
|||
|
||||
if (ht->used == 0) {
|
||||
return snprintf(buf,bufsize,
|
||||
"No stats available for empty dictionaries\n");
|
||||
"Hash table %d stats (%s):\n"
|
||||
"No stats available for empty dictionaries\n",
|
||||
tableid, (tableid == 0) ? "main hash table" : "rehashing target");
|
||||
}
|
||||
|
||||
/* Compute stats. */
|
||||
|
|
|
|||
|
|
@ -541,3 +541,34 @@ proc get_child_pid {idx} {
|
|||
|
||||
return $child_pid
|
||||
}
|
||||
|
||||
proc config_set {param value {options {}}} {
|
||||
set mayfail 0
|
||||
foreach option $options {
|
||||
switch $option {
|
||||
"mayfail" {
|
||||
set mayfail 1
|
||||
}
|
||||
default {
|
||||
error "Unknown option $option"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if {[catch {r config set $param $value} err]} {
|
||||
if {!$mayfail} {
|
||||
error $err
|
||||
} else {
|
||||
if {$::verbose} {
|
||||
puts "Ignoring CONFIG SET $param $value failure: $err"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
proc config_get_set {param value {options {}}} {
|
||||
set config [lindex [r config get $param] 1]
|
||||
config_set $param $value $options
|
||||
return $config
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -667,6 +667,85 @@ start_server {
|
|||
}
|
||||
}
|
||||
|
||||
proc is_rehashing {myset} {
|
||||
set htstats [r debug HTSTATS-KEY $myset]
|
||||
return [string match {*rehashing target*} $htstats]
|
||||
}
|
||||
|
||||
proc rem_hash_set_top_N {myset n} {
|
||||
set cursor 0
|
||||
set members {}
|
||||
set enough 0
|
||||
while 1 {
|
||||
set res [r sscan $myset $cursor]
|
||||
set cursor [lindex $res 0]
|
||||
set k [lindex $res 1]
|
||||
foreach m $k {
|
||||
lappend members $m
|
||||
if {[llength $members] >= $n} {
|
||||
set enough 1
|
||||
break
|
||||
}
|
||||
}
|
||||
if {$enough || $cursor == 0} {
|
||||
break
|
||||
}
|
||||
}
|
||||
r srem $myset {*}$members
|
||||
}
|
||||
|
||||
test "SRANDMEMBER with a dict containing long chain" {
|
||||
set origin_save [config_get_set save ""]
|
||||
set origin_max_is [config_get_set set-max-intset-entries 0]
|
||||
set origin_save_delay [config_get_set rdb-key-save-delay 2147483647]
|
||||
|
||||
# 1) Create a hash set with 100000 members.
|
||||
set members {}
|
||||
for {set i 0} {$i < 100000} {incr i} {
|
||||
lappend members [format "m:%d" $i]
|
||||
}
|
||||
create_set myset $members
|
||||
|
||||
# 2) Wait for the hash set rehashing to finish.
|
||||
while {[is_rehashing myset]} {
|
||||
r srandmember myset 100
|
||||
}
|
||||
|
||||
# 3) Turn off the rehashing of this set, and remove the members to 500.
|
||||
r bgsave
|
||||
rem_hash_set_top_N myset [expr {[r scard myset] - 500}]
|
||||
assert_equal [r scard myset] 500
|
||||
|
||||
# 4) Kill RDB child process to restart rehashing.
|
||||
set pid1 [get_child_pid 0]
|
||||
catch {exec kill -9 $pid1}
|
||||
waitForBgsave r
|
||||
|
||||
# 5) Let the set hash to start rehashing
|
||||
r spop myset 1
|
||||
assert [is_rehashing myset]
|
||||
|
||||
# 6) Verify that when rdb saving is in progress, rehashing will still be performed (because
|
||||
# the ratio is extreme) by waiting for it to finish during an active bgsave.
|
||||
r bgsave
|
||||
|
||||
while {[is_rehashing myset]} {
|
||||
r srandmember myset 1
|
||||
}
|
||||
if {$::verbose} {
|
||||
puts [r debug HTSTATS-KEY myset]
|
||||
}
|
||||
|
||||
set pid1 [get_child_pid 0]
|
||||
catch {exec kill -9 $pid1}
|
||||
waitForBgsave r
|
||||
|
||||
r config set save $origin_save
|
||||
r config set set-max-intset-entries $origin_max_is
|
||||
r config set rdb-key-save-delay $origin_save_delay
|
||||
r save
|
||||
} {OK}
|
||||
|
||||
proc setup_move {} {
|
||||
r del myset3 myset4
|
||||
create_set myset1 {1 a b}
|
||||
|
|
|
|||
Loading…
Reference in New Issue