mirror of
https://github.com/ruby/ruby.git
synced 2022-11-09 12:17:21 -05:00
st.c: skip all deleted entries [Bug #17779]
Update the start entry skipping all already deleted entries. Fixes performance issue of `Hash#first` in a certain case.
This commit is contained in:
parent
60bdf03b6d
commit
9f9045123e
2 changed files with 18 additions and 2 deletions
11
benchmark/hash_first.yml
Normal file
11
benchmark/hash_first.yml
Normal file
|
@ -0,0 +1,11 @@
|
|||
prelude: |
|
||||
hash1 = 1_000_000.times.to_h { [rand, true]}
|
||||
hash2 = hash1.dup
|
||||
hash2.keys[1..100_000].each { hash2.delete _1 }
|
||||
hash2.delete hash2.first[0]
|
||||
|
||||
benchmark:
|
||||
hash1: hash1.first
|
||||
hash2: hash2.first
|
||||
|
||||
loop_count: 100_000
|
9
st.c
9
st.c
|
@ -1244,8 +1244,13 @@ update_range_for_deleted(st_table *tab, st_index_t n)
|
|||
{
|
||||
/* Do not update entries_bound here. Otherwise, we can fill all
|
||||
bins by deleted entry value before rebuilding the table. */
|
||||
if (tab->entries_start == n)
|
||||
tab->entries_start = n + 1;
|
||||
if (tab->entries_start == n) {
|
||||
st_index_t start = n + 1;
|
||||
st_index_t bound = tab->entries_bound;
|
||||
st_table_entry *entries = tab->entries;
|
||||
while (start < bound && DELETED_ENTRY_P(&entries[start])) start++;
|
||||
tab->entries_start = start;
|
||||
}
|
||||
}
|
||||
|
||||
/* Delete entry with KEY from table TAB, set up *VALUE (unless
|
||||
|
|
Loading…
Reference in a new issue