1
0
Fork 0
mirror of https://github.com/ruby/ruby.git synced 2022-11-09 12:17:21 -05:00
ruby--ruby/st.c

2258 lines
62 KiB
C
Raw Normal View History

/* This is a public domain general purpose hash table package
originally written by Peter Moore @ UCB.
The hash table data structures were redesigned and the package was
rewritten by Vladimir Makarov <vmakarov@redhat.com>. */
/* The original package implemented classic bucket-based hash tables
with entries doubly linked for an access by their insertion order.
To decrease pointer chasing and as a consequence to improve a data
locality the current implementation is based on storing entries in
an array and using hash tables with open addressing. The current
entries are more compact in comparison with the original ones and
this also improves the data locality.
The hash table has two arrays called *bins* and *entries*.
bins:
-------
| | entries array:
|-------| --------------------------------
| index | | | entry: | | |
|-------| | | | | |
| ... | | ... | hash | ... | ... |
|-------| | | key | | |
| empty | | | record | | |
|-------| --------------------------------
| ... | ^ ^
|-------| |_ entries start |_ entries bound
|deleted|
-------
o The entry array contains table entries in the same order as they
were inserted.
When the first entry is deleted, a variable containing index of
the current first entry (*entries start*) is changed. In all
other cases of the deletion, we just mark the entry as deleted by
using a reserved hash value.
Such organization of the entry storage makes operations of the
table shift and the entries traversal very fast.
o The bins provide access to the entries by their keys. The
key hash is mapped to a bin containing *index* of the
corresponding entry in the entry array.
The bin array size is always power of two, it makes mapping very
fast by using the corresponding lower bits of the hash.
Generally it is not a good idea to ignore some part of the hash.
But alternative approach is worse. For example, we could use a
modulo operation for mapping and a prime number for the size of
the bin array. Unfortunately, the modulo operation for big
64-bit numbers are extremely slow (it takes more than 100 cycles
on modern Intel CPUs).
Still other bits of the hash value are used when the mapping
results in a collision. In this case we use a secondary hash
value which is a result of a function of the collision bin
index and the original hash value. The function choice
guarantees that we can traverse all bins and finally find the
corresponding bin as after several iterations the function
becomes a full cycle linear congruential generator because it
satisfies requirements of the Hull-Dobell theorem.
When an entry is removed from the table besides marking the
hash in the corresponding entry described above, we also mark
the bin by a special value in order to find entries which had
a collision with the removed entries.
There are two reserved values for the bins. One denotes an
empty bin, another one denotes a bin for a deleted entry.
o The length of the bin array is at least two times more than the
entry array length. This keeps the table load factor healthy.
The trigger of rebuilding the table is always a case when we can
not insert an entry anymore at the entries bound. We could
change the entries bound too in case of deletion but than we need
a special code to count bins with corresponding deleted entries
and reset the bin values when there are too many bins
corresponding deleted entries
Table rebuilding is done by creation of a new entry array and
bins of an appropriate size. We also try to reuse the arrays
in some cases by compacting the array and removing deleted
entries.
o To save memory very small tables have no allocated arrays
bins. We use a linear search for an access by a key.
o To save more memory we use 8-, 16-, 32- and 64- bit indexes in
bins depending on the current hash table size.
2018-02-13 05:02:07 -05:00
o The implementation takes into account that the table can be
rebuilt during hashing or comparison functions. It can happen if
the functions are implemented in Ruby and a thread switch occurs
during their execution.
This implementation speeds up the Ruby hash table benchmarks in
average by more 40% on Intel Haswell CPU.
*/
#ifdef NOT_RUBY
#include "regint.h"
#include "st.h"
#else
#include "internal.h"
#include "internal/bits.h"
#include "internal/hash.h"
#include "internal/sanitizers.h"
#endif
#include <stdio.h>
#ifdef HAVE_STDLIB_H
#include <stdlib.h>
#endif
#include <string.h>
#include <assert.h>
#ifdef __GNUC__
#define PREFETCH(addr, write_p) __builtin_prefetch(addr, write_p)
#define EXPECT(expr, val) __builtin_expect(expr, val)
#define ATTRIBUTE_UNUSED __attribute__((unused))
#else
#define PREFETCH(addr, write_p)
#define EXPECT(expr, val) (expr)
#define ATTRIBUTE_UNUSED
#endif
/* The type of hashes. */
typedef st_index_t st_hash_t;
struct st_table_entry {
st_hash_t hash;
st_data_t key;
st_data_t record;
};
#define type_numhash st_hashtype_num
static const struct st_hash_type st_hashtype_num = {
st_numcmp,
st_numhash,
};
static int st_strcmp(st_data_t, st_data_t);
static st_index_t strhash(st_data_t);
static const struct st_hash_type type_strhash = {
st_strcmp,
strhash,
};
static int st_locale_insensitive_strcasecmp_i(st_data_t lhs, st_data_t rhs);
static st_index_t strcasehash(st_data_t);
static const struct st_hash_type type_strcasehash = {
st_locale_insensitive_strcasecmp_i,
strcasehash,
};
/* Value used to catch uninitialized entries/bins during debugging.
There is a possibility for a false alarm, but its probability is
extremely small. */
#define ST_INIT_VAL 0xafafafafafafafaf
#define ST_INIT_VAL_BYTE 0xafa
#ifdef RUBY
#undef malloc
#undef realloc
#undef calloc
#undef free
#define malloc ruby_xmalloc
#define calloc ruby_xcalloc
#define realloc ruby_xrealloc
#define free ruby_xfree
#endif
#define EQUAL(tab,x,y) ((x) == (y) || (*(tab)->type->compare)((x),(y)) == 0)
#define PTR_EQUAL(tab, ptr, hash_val, key_) \
((ptr)->hash == (hash_val) && EQUAL((tab), (key_), (ptr)->key))
2018-02-13 05:02:07 -05:00
/* As PRT_EQUAL only its result is returned in RES. REBUILT_P is set
up to TRUE if the table is rebuilt during the comparison. */
#define DO_PTR_EQUAL_CHECK(tab, ptr, hash_val, key, res, rebuilt_p) \
do { \
unsigned int _old_rebuilds_num = (tab)->rebuilds_num; \
res = PTR_EQUAL(tab, ptr, hash_val, key); \
rebuilt_p = _old_rebuilds_num != (tab)->rebuilds_num; \
} while (FALSE)
/* Features of a table. */
struct st_features {
/* Power of 2 used for number of allocated entries. */
unsigned char entry_power;
/* Power of 2 used for number of allocated bins. Depending on the
table size, the number of bins is 2-4 times more than the
number of entries. */
unsigned char bin_power;
/* Enumeration of sizes of bins (8-bit, 16-bit etc). */
unsigned char size_ind;
/* Bins are packed in words of type st_index_t. The following is
a size of bins counted by words. */
st_index_t bins_words;
};
/* Features of all possible size tables. */
#if SIZEOF_ST_INDEX_T == 8
#define MAX_POWER2 62
static const struct st_features features[] = {
{0, 1, 0, 0x0},
{1, 2, 0, 0x1},
{2, 3, 0, 0x1},
{3, 4, 0, 0x2},
{4, 5, 0, 0x4},
{5, 6, 0, 0x8},
{6, 7, 0, 0x10},
{7, 8, 0, 0x20},
{8, 9, 1, 0x80},
{9, 10, 1, 0x100},
{10, 11, 1, 0x200},
{11, 12, 1, 0x400},
{12, 13, 1, 0x800},
{13, 14, 1, 0x1000},
{14, 15, 1, 0x2000},
{15, 16, 1, 0x4000},
{16, 17, 2, 0x10000},
{17, 18, 2, 0x20000},
{18, 19, 2, 0x40000},
{19, 20, 2, 0x80000},
{20, 21, 2, 0x100000},
{21, 22, 2, 0x200000},
{22, 23, 2, 0x400000},
{23, 24, 2, 0x800000},
{24, 25, 2, 0x1000000},
{25, 26, 2, 0x2000000},
{26, 27, 2, 0x4000000},
{27, 28, 2, 0x8000000},
{28, 29, 2, 0x10000000},
{29, 30, 2, 0x20000000},
{30, 31, 2, 0x40000000},
{31, 32, 2, 0x80000000},
{32, 33, 3, 0x200000000},
{33, 34, 3, 0x400000000},
{34, 35, 3, 0x800000000},
{35, 36, 3, 0x1000000000},
{36, 37, 3, 0x2000000000},
{37, 38, 3, 0x4000000000},
{38, 39, 3, 0x8000000000},
{39, 40, 3, 0x10000000000},
{40, 41, 3, 0x20000000000},
{41, 42, 3, 0x40000000000},
{42, 43, 3, 0x80000000000},
{43, 44, 3, 0x100000000000},
{44, 45, 3, 0x200000000000},
{45, 46, 3, 0x400000000000},
{46, 47, 3, 0x800000000000},
{47, 48, 3, 0x1000000000000},
{48, 49, 3, 0x2000000000000},
{49, 50, 3, 0x4000000000000},
{50, 51, 3, 0x8000000000000},
{51, 52, 3, 0x10000000000000},
{52, 53, 3, 0x20000000000000},
{53, 54, 3, 0x40000000000000},
{54, 55, 3, 0x80000000000000},
{55, 56, 3, 0x100000000000000},
{56, 57, 3, 0x200000000000000},
{57, 58, 3, 0x400000000000000},
{58, 59, 3, 0x800000000000000},
{59, 60, 3, 0x1000000000000000},
{60, 61, 3, 0x2000000000000000},
{61, 62, 3, 0x4000000000000000},
{62, 63, 3, 0x8000000000000000},
};
#else
#define MAX_POWER2 30
static const struct st_features features[] = {
{0, 1, 0, 0x1},
{1, 2, 0, 0x1},
{2, 3, 0, 0x2},
{3, 4, 0, 0x4},
{4, 5, 0, 0x8},
{5, 6, 0, 0x10},
{6, 7, 0, 0x20},
{7, 8, 0, 0x40},
{8, 9, 1, 0x100},
{9, 10, 1, 0x200},
{10, 11, 1, 0x400},
{11, 12, 1, 0x800},
{12, 13, 1, 0x1000},
{13, 14, 1, 0x2000},
{14, 15, 1, 0x4000},
{15, 16, 1, 0x8000},
{16, 17, 2, 0x20000},
{17, 18, 2, 0x40000},
{18, 19, 2, 0x80000},
{19, 20, 2, 0x100000},
{20, 21, 2, 0x200000},
{21, 22, 2, 0x400000},
{22, 23, 2, 0x800000},
{23, 24, 2, 0x1000000},
{24, 25, 2, 0x2000000},
{25, 26, 2, 0x4000000},
{26, 27, 2, 0x8000000},
{27, 28, 2, 0x10000000},
{28, 29, 2, 0x20000000},
{29, 30, 2, 0x40000000},
{30, 31, 2, 0x80000000},
};
#endif
/* The reserved hash value and its substitution. */
#define RESERVED_HASH_VAL (~(st_hash_t) 0)
#define RESERVED_HASH_SUBSTITUTION_VAL ((st_hash_t) 0)
/* Return hash value of KEY for table TAB. */
static inline st_hash_t
do_hash(st_data_t key, st_table *tab)
{
st_hash_t hash = (st_hash_t)(tab->type->hash)(key);
/* RESERVED_HASH_VAL is used for a deleted entry. Map it into
another value. Such mapping should be extremely rare. */
return hash == RESERVED_HASH_VAL ? RESERVED_HASH_SUBSTITUTION_VAL : hash;
}
/* Power of 2 defining the minimal number of allocated entries. */
#define MINIMAL_POWER2 2
#if MINIMAL_POWER2 < 2
#error "MINIMAL_POWER2 should be >= 2"
#endif
/* If the power2 of the allocated `entries` is less than the following
value, don't allocate bins and use a linear search. */
#define MAX_POWER2_FOR_TABLES_WITHOUT_BINS 4
/* Return smallest n >= MINIMAL_POWER2 such 2^n > SIZE. */
static int
get_power2(st_index_t size)
{
unsigned int n = ST_INDEX_BITS - nlz_intptr(size);
if (n <= MAX_POWER2)
return n < MINIMAL_POWER2 ? MINIMAL_POWER2 : n;
#ifndef NOT_RUBY
/* Ran out of the table entries */
rb_raise(rb_eRuntimeError, "st_table too big");
#endif
/* should raise exception */
return -1;
}
/* Return value of N-th bin in array BINS of table with bins size
index S. */
static inline st_index_t
get_bin(st_index_t *bins, int s, st_index_t n)
{
return (s == 0 ? ((unsigned char *) bins)[n]
: s == 1 ? ((unsigned short *) bins)[n]
: s == 2 ? ((unsigned int *) bins)[n]
: ((st_index_t *) bins)[n]);
}
/* Set up N-th bin in array BINS of table with bins size index S to
value V. */
static inline void
set_bin(st_index_t *bins, int s, st_index_t n, st_index_t v)
{
if (s == 0) ((unsigned char *) bins)[n] = (unsigned char) v;
else if (s == 1) ((unsigned short *) bins)[n] = (unsigned short) v;
else if (s == 2) ((unsigned int *) bins)[n] = (unsigned int) v;
else ((st_index_t *) bins)[n] = v;
}
/* These macros define reserved values for empty table bin and table
bin which contains a deleted entry. We will never use such values
for an entry index in bins. */
#define EMPTY_BIN 0
#define DELETED_BIN 1
/* Base of a real entry index in the bins. */
#define ENTRY_BASE 2
/* Mark I-th bin of table TAB as empty, in other words not
corresponding to any entry. */
#define MARK_BIN_EMPTY(tab, i) (set_bin((tab)->bins, get_size_ind(tab), i, EMPTY_BIN))
/* Values used for not found entry and bin with given
characteristics. */
#define UNDEFINED_ENTRY_IND (~(st_index_t) 0)
#define UNDEFINED_BIN_IND (~(st_index_t) 0)
2018-02-13 05:02:07 -05:00
/* Entry and bin values returned when we found a table rebuild during
the search. */
#define REBUILT_TABLE_ENTRY_IND (~(st_index_t) 1)
#define REBUILT_TABLE_BIN_IND (~(st_index_t) 1)
/* Mark I-th bin of table TAB as corresponding to a deleted table
entry. Update number of entries in the table and number of bins
corresponding to deleted entries. */
#define MARK_BIN_DELETED(tab, i) \
do { \
set_bin((tab)->bins, get_size_ind(tab), i, DELETED_BIN); \
} while (0)
/* Macros to check that value B is used empty bins and bins
corresponding deleted entries. */
#define EMPTY_BIN_P(b) ((b) == EMPTY_BIN)
#define DELETED_BIN_P(b) ((b) == DELETED_BIN)
#define EMPTY_OR_DELETED_BIN_P(b) ((b) <= DELETED_BIN)
/* Macros to check empty bins and bins corresponding to deleted
entries. Bins are given by their index I in table TAB. */
#define IND_EMPTY_BIN_P(tab, i) (EMPTY_BIN_P(get_bin((tab)->bins, get_size_ind(tab), i)))
#define IND_DELETED_BIN_P(tab, i) (DELETED_BIN_P(get_bin((tab)->bins, get_size_ind(tab), i)))
#define IND_EMPTY_OR_DELETED_BIN_P(tab, i) (EMPTY_OR_DELETED_BIN_P(get_bin((tab)->bins, get_size_ind(tab), i)))
/* Macros for marking and checking deleted entries given by their
pointer E_PTR. */
#define MARK_ENTRY_DELETED(e_ptr) ((e_ptr)->hash = RESERVED_HASH_VAL)
#define DELETED_ENTRY_P(e_ptr) ((e_ptr)->hash == RESERVED_HASH_VAL)
/* Return bin size index of table TAB. */
static inline unsigned int
get_size_ind(const st_table *tab)
{
return tab->size_ind;
}
/* Return the number of allocated bins of table TAB. */
static inline st_index_t
get_bins_num(const st_table *tab)
{
return ((st_index_t) 1)<<tab->bin_power;
}
/* Return mask for a bin index in table TAB. */
static inline st_index_t
bins_mask(const st_table *tab)
{
return get_bins_num(tab) - 1;
}
/* Return the index of table TAB bin corresponding to
HASH_VALUE. */
static inline st_index_t
hash_bin(st_hash_t hash_value, st_table *tab)
{
return hash_value & bins_mask(tab);
}
/* Return the number of allocated entries of table TAB. */
static inline st_index_t
get_allocated_entries(const st_table *tab)
{
return ((st_index_t) 1)<<tab->entry_power;
}
/* Return size of the allocated bins of table TAB. */
static inline st_index_t
bins_size(const st_table *tab)
{
return features[tab->entry_power].bins_words * sizeof (st_index_t);
}
/* Mark all bins of table TAB as empty. */
static void
initialize_bins(st_table *tab)
{
memset(tab->bins, 0, bins_size(tab));
}
/* Make table TAB empty. */
static void
make_tab_empty(st_table *tab)
{
tab->num_entries = 0;
tab->entries_start = tab->entries_bound = 0;
if (tab->bins != NULL)
initialize_bins(tab);
}
#ifdef HASH_LOG
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
static struct {
int all, total, num, str, strcase;
} collision;
/* Flag switching off output of package statistics at the end of
program. */
static int init_st = 0;
/* Output overall number of table searches and collisions into a
temporary file. */
static void
stat_col(void)
{
char fname[10+sizeof(long)*3];
FILE *f;
if (!collision.total) return;
f = fopen((snprintf(fname, sizeof(fname), "/tmp/col%ld", (long)getpid()), fname), "w");
if (f == NULL)
return;
fprintf(f, "collision: %d / %d (%6.2f)\n", collision.all, collision.total,
((double)collision.all / (collision.total)) * 100);
fprintf(f, "num: %d, str: %d, strcase: %d\n", collision.num, collision.str, collision.strcase);
fclose(f);
}
#endif
/* Create and return table with TYPE which can hold at least SIZE
entries. The real number of entries which the table can hold is
the nearest power of two for SIZE. */
st_table *
st_init_table_with_size(const struct st_hash_type *type, st_index_t size)
{
st_table *tab;
int n;
#ifdef HASH_LOG
#if HASH_LOG+0 < 0
{
const char *e = getenv("ST_HASH_LOG");
if (!e || !*e) init_st = 1;
}
#endif
if (init_st == 0) {
init_st = 1;
atexit(stat_col);
}
#endif
n = get_power2(size);
2019-07-29 23:08:33 -04:00
#ifndef RUBY
if (n < 0)
return NULL;
#endif
tab = (st_table *) malloc(sizeof (st_table));
2019-07-29 23:08:33 -04:00
#ifndef RUBY
if (tab == NULL)
return NULL;
2019-07-29 23:08:33 -04:00
#endif
tab->type = type;
tab->entry_power = n;
tab->bin_power = features[n].bin_power;
tab->size_ind = features[n].size_ind;
if (n <= MAX_POWER2_FOR_TABLES_WITHOUT_BINS)
tab->bins = NULL;
else {
tab->bins = (st_index_t *) malloc(bins_size(tab));
2019-07-29 23:08:33 -04:00
#ifndef RUBY
if (tab->bins == NULL) {
free(tab);
return NULL;
}
2019-07-29 23:08:33 -04:00
#endif
}
tab->entries = (st_table_entry *) malloc(get_allocated_entries(tab)
* sizeof(st_table_entry));
2019-07-29 23:08:33 -04:00
#ifndef RUBY
if (tab->entries == NULL) {
st_free_table(tab);
return NULL;
}
#endif
make_tab_empty(tab);
tab->rebuilds_num = 0;
return tab;
}
/* Create and return table with TYPE which can hold a minimal number
of entries (see comments for get_power2). */
st_table *
st_init_table(const struct st_hash_type *type)
{
return st_init_table_with_size(type, 0);
}
/* Create and return table which can hold a minimal number of
numbers. */
st_table *
st_init_numtable(void)
{
return st_init_table(&type_numhash);
}
/* Create and return table which can hold SIZE numbers. */
st_table *
st_init_numtable_with_size(st_index_t size)
{
return st_init_table_with_size(&type_numhash, size);
}
/* Create and return table which can hold a minimal number of
strings. */
st_table *
st_init_strtable(void)
{
return st_init_table(&type_strhash);
}
/* Create and return table which can hold SIZE strings. */
st_table *
st_init_strtable_with_size(st_index_t size)
{
return st_init_table_with_size(&type_strhash, size);
}
/* Create and return table which can hold a minimal number of strings
whose character case is ignored. */
st_table *
st_init_strcasetable(void)
{
return st_init_table(&type_strcasehash);
}
/* Create and return table which can hold SIZE strings whose character
case is ignored. */
st_table *
st_init_strcasetable_with_size(st_index_t size)
{
return st_init_table_with_size(&type_strcasehash, size);
}
/* Make table TAB empty. */
void
st_clear(st_table *tab)
{
make_tab_empty(tab);
tab->rebuilds_num++;
}
/* Free table TAB space. */
void
st_free_table(st_table *tab)
{
if (tab->bins != NULL)
free(tab->bins);
free(tab->entries);
free(tab);
}
/* Return byte size of memory allocated for table TAB. */
size_t
st_memsize(const st_table *tab)
{
return(sizeof(st_table)
+ (tab->bins == NULL ? 0 : bins_size(tab))
+ get_allocated_entries(tab) * sizeof(st_table_entry));
}
static st_index_t
find_table_entry_ind(st_table *tab, st_hash_t hash_value, st_data_t key);
static st_index_t
find_table_bin_ind(st_table *tab, st_hash_t hash_value, st_data_t key);
static st_index_t
find_table_bin_ind_direct(st_table *table, st_hash_t hash_value, st_data_t key);
static st_index_t
find_table_bin_ptr_and_reserve(st_table *tab, st_hash_t *hash_value,
st_data_t key, st_index_t *bin_ind);
#ifdef HASH_LOG
static void
count_collision(const struct st_hash_type *type)
{
collision.all++;
if (type == &type_numhash) {
collision.num++;
}
else if (type == &type_strhash) {
collision.strcase++;
}
else if (type == &type_strcasehash) {
collision.str++;
}
}
#define COLLISION (collision_check ? count_collision(tab->type) : (void)0)
#define FOUND_BIN (collision_check ? collision.total++ : (void)0)
#define collision_check 0
#else
#define COLLISION
#define FOUND_BIN
#endif
/* If the number of entries in the table is at least REBUILD_THRESHOLD
times less than the entry array length, decrease the table
size. */
#define REBUILD_THRESHOLD 4
#if REBUILD_THRESHOLD < 2
#error "REBUILD_THRESHOLD should be >= 2"
#endif
/* Rebuild table TAB. Rebuilding removes all deleted bins and entries
and can change size of the table entries and bins arrays.
Rebuilding is implemented by creation of a new table or by
compaction of the existing one. */
static void
rebuild_table(st_table *tab)
{
st_index_t i, ni, bound;
unsigned int size_ind;
st_table *new_tab;
st_table_entry *entries, *new_entries;
st_table_entry *curr_entry_ptr;
st_index_t *bins;
st_index_t bin_ind;
bound = tab->entries_bound;
entries = tab->entries;
if ((2 * tab->num_entries <= get_allocated_entries(tab)
&& REBUILD_THRESHOLD * tab->num_entries > get_allocated_entries(tab))
|| tab->num_entries < (1 << MINIMAL_POWER2)) {
/* Compaction: */
tab->num_entries = 0;
if (tab->bins != NULL)
initialize_bins(tab);
new_tab = tab;
new_entries = entries;
* eval.c (block_pass): should not downgrade safe level. * ext/dbm/extconf.rb: allow specifying dbm-type explicitly. * ext/dbm/extconf.rb: avoid gdbm if possible, because it leaks memory, whereas gdbm.so doesn't. potential incompatibility. * string.c (rb_str_insert): new method. * parse.y (yylex): lex_state after RESCUE_MOD should be EXPR_BEG. * array.c (rb_ary_insert): new method. * array.c (rb_ary_update): new utility function. * io.c (set_outfile): should check if closed before assignment. * eval.c (rb_eval): should preserve value of ruby_errinfo. * eval.c (rb_thread_schedule): infinite sleep should not cause dead lock. * array.c (rb_ary_flatten_bang): proper recursive detection. * eval.c (yield_under): need not to prohibit at safe level 4. * pack.c (pack_pack): p/P packs nil into NULL. * pack.c (pack_unpack): p/P unpacks NULL into nil. * pack.c (pack_pack): size check for P template. * ruby.c (set_arg0): wrong predicate when new $0 value is bigger than original space. * gc.c (id2ref): should use NUM2ULONG() * object.c (rb_mod_const_get): check whether name is a class variable name. * object.c (rb_mod_const_set): ditto. * object.c (rb_mod_const_defined): ditto. * marshal.c (w_float): precision changed to "%.16g" * eval.c (rb_call0): wrong retry behavior. * numeric.c (fix_aref): a bug on long>int architecture. * eval.c (rb_eval_string_wrap): should restore ruby_wrapper. * regex.c (re_compile_pattern): char class at either edge of range should be invalid. * eval.c (handle_rescue): use === to compare exception match. * error.c (syserr_eqq): comparison between SytemCallErrors should based on their error numbers. * eval.c (safe_getter): should use INT2NUM(). * bignum.c (rb_big2long): 2**31 cannot fit in 31 bit long. * regex.c (calculate_must_string): wrong length calculation. * eval.c (rb_thread_start_0): fixed memory leak. * parse.y (none): should clear cmdarg_stack too. * io.c (rb_fopen): use setvbuf() to avoid recursive malloc() on some platforms. * file.c (rb_stat_dev): device functions should honor stat field types (except long long such as dev_t). * eval.c (rb_mod_nesting): should not push nil for nesting array. * eval.c (rb_mod_s_constants): should not search array by rb_mod_const_at() for nil (happens for singleton class). * class.c (rb_singleton_class_attached): should modify iv_tbl by itself, no longer use rb_iv_set() to avoid freeze check error. * variable.c (rb_const_get): error message "uninitialized constant Foo at Bar::Baz" instead of "uninitialized constantBar::Baz::Foo". * eval.c (rb_mod_included): new hook called from rb_mod_include(). * io.c (opt_i_set): should strdup() inplace_edit string. * eval.c (exec_under): need to push cref too. * eval.c (rb_f_missing): raise NameError for "undefined local variable or method". * error.c (Init_Exception): new exception NoMethodError. NameError moved under ScriptError again. * eval.c (rb_f_missing): use NoMethodError instead of NameError. * file.c (Init_File): should redifine "new" class method. * eval.c (PUSH_CREF): sharing cref node was problematic. maintain runtime cref list instead. * eval.c (rb_eval): copy defn node before registering. * eval.c (rb_load): clear ruby_cref before loading. * variable.c (rb_const_get): no recursion to show full class path for modules. * eval.c (rb_set_safe_level): should set safe level in curr_thread as well. * eval.c (safe_setter): ditto. * object.c (rb_obj_is_instance_of): nil belongs to false, not true. * time.c (make_time_t): proper (I hope) daylight saving time handling for both US and Europe. I HATE DST! * eval.c (rb_thread_wait_for): non blocked signal interrupt should stop the interval. * eval.c (proc_eq): class check aded. * eval.c (proc_eq): typo fixed ("return" was ommitted). * error.c (Init_Exception): move NameError under StandardError. * class.c (rb_mod_clone): should copy method bodies too. * bignum.c (bigdivrem): should trim trailing zero bdigits of remainder, even if dd == 0. * file.c (check3rdbyte): safe string check moved here. * time.c (make_time_t): remove HAVE_TM_ZONE code since it sometimes reports wrong time. * time.c (make_time_t): remove unnecessary range check for platforms where negative time_t is available. * process.c (proc_waitall): should push Process::Status instead of Finuxm status. * process.c (waitall_each): should add all entries in pid_tbl. these changes are inspired by Koji Arai. Thanks. * process.c (proc_wait): should not iterate if pid_tbl is 0. * process.c (proc_waitall): ditto. * numeric.c (flodivmod): a bug in no fmod case. * process.c (pst_wifsignaled): should apply WIFSIGNALED for status (int), not st (VALUE). * io.c (Init_IO): value of $/ and $\ are no longer restricted to strings. type checks are done on demand. * class.c (rb_include_module): module inclusion should be check taints. * ruby.h (STR2CSTR): replace to StringType() and StringTypePtr(). * ruby.h (rb_str2cstr): ditto. * eval.c (rb_load): should not copy topleve local variables. It cause variable/method ambiguity. Thanks to L. Peter Deutsch. * class.c (rb_include_module): freeze check at first. * eval.c (rb_attr): sprintf() and rb_intern() moved into conditional body. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@1356 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2001-05-02 00:22:21 -04:00
}
else {
new_tab = st_init_table_with_size(tab->type,
2 * tab->num_entries - 1);
new_entries = new_tab->entries;
}
ni = 0;
bins = new_tab->bins;
size_ind = get_size_ind(new_tab);
for (i = tab->entries_start; i < bound; i++) {
curr_entry_ptr = &entries[i];
PREFETCH(entries + i + 1, 0);
if (EXPECT(DELETED_ENTRY_P(curr_entry_ptr), 0))
continue;
if (&new_entries[ni] != curr_entry_ptr)
new_entries[ni] = *curr_entry_ptr;
if (EXPECT(bins != NULL, 1)) {
bin_ind = find_table_bin_ind_direct(new_tab, curr_entry_ptr->hash,
curr_entry_ptr->key);
set_bin(bins, size_ind, bin_ind, ni + ENTRY_BASE);
}
new_tab->num_entries++;
ni++;
}
if (new_tab != tab) {
tab->entry_power = new_tab->entry_power;
tab->bin_power = new_tab->bin_power;
tab->size_ind = new_tab->size_ind;
if (tab->bins != NULL)
free(tab->bins);
tab->bins = new_tab->bins;
free(tab->entries);
tab->entries = new_tab->entries;
free(new_tab);
}
tab->entries_start = 0;
tab->entries_bound = tab->num_entries;
tab->rebuilds_num++;
}
/* Return the next secondary hash index for table TAB using previous
index IND and PERTERB. Finally modulo of the function becomes a
full *cycle linear congruential generator*, in other words it
guarantees traversing all table bins in extreme case.
According the Hull-Dobell theorem a generator
"Xnext = (a*Xprev + c) mod m" is a full cycle generator if and only if
o m and c are relatively prime
o a-1 is divisible by all prime factors of m
o a-1 is divisible by 4 if m is divisible by 4.
For our case a is 5, c is 1, and m is a power of two. */
static inline st_index_t
secondary_hash(st_index_t ind, st_table *tab, st_index_t *perterb)
{
*perterb >>= 11;
ind = (ind << 2) + ind + *perterb + 1;
return hash_bin(ind, tab);
}
/* Find an entry with HASH_VALUE and KEY in TABLE using a linear
search. Return the index of the found entry in array `entries`.
2018-02-13 05:02:07 -05:00
If it is not found, return UNDEFINED_ENTRY_IND. If the table was
rebuilt during the search, return REBUILT_TABLE_ENTRY_IND. */
static inline st_index_t
find_entry(st_table *tab, st_hash_t hash_value, st_data_t key)
{
2018-02-13 05:02:07 -05:00
int eq_p, rebuilt_p;
st_index_t i, bound;
st_table_entry *entries;
bound = tab->entries_bound;
entries = tab->entries;
for (i = tab->entries_start; i < bound; i++) {
2018-02-13 05:02:07 -05:00
DO_PTR_EQUAL_CHECK(tab, &entries[i], hash_value, key, eq_p, rebuilt_p);
if (EXPECT(rebuilt_p, 0))
return REBUILT_TABLE_ENTRY_IND;
if (eq_p)
return i;
}
return UNDEFINED_ENTRY_IND;
}
/* Use the quadratic probing. The method has a better data locality
but more collisions than the current approach. In average it
results in a bit slower search. */
/*#define QUADRATIC_PROBE*/
/* Return index of entry with HASH_VALUE and KEY in table TAB. If
2018-02-13 05:02:07 -05:00
there is no such entry, return UNDEFINED_ENTRY_IND. If the table
was rebuilt during the search, return REBUILT_TABLE_ENTRY_IND. */
static st_index_t
find_table_entry_ind(st_table *tab, st_hash_t hash_value, st_data_t key)
{
2018-02-13 05:02:07 -05:00
int eq_p, rebuilt_p;
st_index_t ind;
#ifdef QUADRATIC_PROBE
st_index_t d;
#else
st_index_t peterb;
#endif
st_index_t bin;
st_table_entry *entries = tab->entries;
ind = hash_bin(hash_value, tab);
#ifdef QUADRATIC_PROBE
d = 1;
#else
peterb = hash_value;
#endif
FOUND_BIN;
for (;;) {
bin = get_bin(tab->bins, get_size_ind(tab), ind);
2018-02-13 05:02:07 -05:00
if (! EMPTY_OR_DELETED_BIN_P(bin)) {
DO_PTR_EQUAL_CHECK(tab, &entries[bin - ENTRY_BASE], hash_value, key, eq_p, rebuilt_p);
if (EXPECT(rebuilt_p, 0))
return REBUILT_TABLE_ENTRY_IND;
if (eq_p)
break;
} else if (EMPTY_BIN_P(bin))
return UNDEFINED_ENTRY_IND;
#ifdef QUADRATIC_PROBE
ind = hash_bin(ind + d, tab);
d++;
#else
ind = secondary_hash(ind, tab, &peterb);
#endif
COLLISION;
}
return bin;
}
/* Find and return index of table TAB bin corresponding to an entry
with HASH_VALUE and KEY. If there is no such bin, return
2018-02-13 05:02:07 -05:00
UNDEFINED_BIN_IND. If the table was rebuilt during the search,
return REBUILT_TABLE_BIN_IND. */
static st_index_t
find_table_bin_ind(st_table *tab, st_hash_t hash_value, st_data_t key)
{
2018-02-13 05:02:07 -05:00
int eq_p, rebuilt_p;
st_index_t ind;
#ifdef QUADRATIC_PROBE
st_index_t d;
#else
st_index_t peterb;
#endif
st_index_t bin;
st_table_entry *entries = tab->entries;
ind = hash_bin(hash_value, tab);
#ifdef QUADRATIC_PROBE
d = 1;
#else
peterb = hash_value;
#endif
FOUND_BIN;
for (;;) {
bin = get_bin(tab->bins, get_size_ind(tab), ind);
2018-02-13 05:02:07 -05:00
if (! EMPTY_OR_DELETED_BIN_P(bin)) {
DO_PTR_EQUAL_CHECK(tab, &entries[bin - ENTRY_BASE], hash_value, key, eq_p, rebuilt_p);
if (EXPECT(rebuilt_p, 0))
return REBUILT_TABLE_BIN_IND;
if (eq_p)
break;
} else if (EMPTY_BIN_P(bin))
return UNDEFINED_BIN_IND;
#ifdef QUADRATIC_PROBE
ind = hash_bin(ind + d, tab);
d++;
#else
ind = secondary_hash(ind, tab, &peterb);
#endif
COLLISION;
}
return ind;
}
/* Find and return index of table TAB bin corresponding to an entry
with HASH_VALUE and KEY. The entry should be in the table
already. */
static st_index_t
find_table_bin_ind_direct(st_table *tab, st_hash_t hash_value, st_data_t key)
{
st_index_t ind;
#ifdef QUADRATIC_PROBE
st_index_t d;
#else
st_index_t peterb;
#endif
st_index_t bin;
ind = hash_bin(hash_value, tab);
#ifdef QUADRATIC_PROBE
d = 1;
#else
peterb = hash_value;
#endif
FOUND_BIN;
for (;;) {
bin = get_bin(tab->bins, get_size_ind(tab), ind);
if (EMPTY_OR_DELETED_BIN_P(bin))
return ind;
#ifdef QUADRATIC_PROBE
ind = hash_bin(ind + d, tab);
d++;
#else
ind = secondary_hash(ind, tab, &peterb);
#endif
COLLISION;
}
}
/* Return index of table TAB bin for HASH_VALUE and KEY through
BIN_IND and the pointed value as the function result. Reserve the
bin for inclusion of the corresponding entry into the table if it
is not there yet. We always find such bin as bins array length is
bigger entries array. Although we can reuse a deleted bin, the
result bin value is always empty if the table has no entry with
KEY. Return the entries array index of the found entry or
2018-02-13 05:02:07 -05:00
UNDEFINED_ENTRY_IND if it is not found. If the table was rebuilt
during the search, return REBUILT_TABLE_ENTRY_IND. */
static st_index_t
find_table_bin_ptr_and_reserve(st_table *tab, st_hash_t *hash_value,
st_data_t key, st_index_t *bin_ind)
{
2018-02-13 05:02:07 -05:00
int eq_p, rebuilt_p;
st_index_t ind;
st_hash_t curr_hash_value = *hash_value;
#ifdef QUADRATIC_PROBE
st_index_t d;
#else
st_index_t peterb;
#endif
st_index_t entry_index;
st_index_t first_deleted_bin_ind;
st_table_entry *entries;
ind = hash_bin(curr_hash_value, tab);
#ifdef QUADRATIC_PROBE
d = 1;
#else
peterb = curr_hash_value;
#endif
FOUND_BIN;
first_deleted_bin_ind = UNDEFINED_BIN_IND;
entries = tab->entries;
for (;;) {
entry_index = get_bin(tab->bins, get_size_ind(tab), ind);
if (EMPTY_BIN_P(entry_index)) {
tab->num_entries++;
entry_index = UNDEFINED_ENTRY_IND;
if (first_deleted_bin_ind != UNDEFINED_BIN_IND) {
/* We can reuse bin of a deleted entry. */
ind = first_deleted_bin_ind;
MARK_BIN_EMPTY(tab, ind);
}
break;
}
else if (! DELETED_BIN_P(entry_index)) {
2018-02-13 05:02:07 -05:00
DO_PTR_EQUAL_CHECK(tab, &entries[entry_index - ENTRY_BASE], curr_hash_value, key, eq_p, rebuilt_p);
if (EXPECT(rebuilt_p, 0))
return REBUILT_TABLE_ENTRY_IND;
if (eq_p)
break;
}
else if (first_deleted_bin_ind == UNDEFINED_BIN_IND)
first_deleted_bin_ind = ind;
#ifdef QUADRATIC_PROBE
ind = hash_bin(ind + d, tab);
d++;
#else
ind = secondary_hash(ind, tab, &peterb);
#endif
COLLISION;
}
*bin_ind = ind;
return entry_index;
}
/* Find an entry with KEY in table TAB. Return non-zero if we found
it. Set up *RECORD to the found entry record. */
int
st_lookup(st_table *tab, st_data_t key, st_data_t *value)
{
st_index_t bin;
st_hash_t hash = do_hash(key, tab);
2018-02-13 05:02:07 -05:00
retry:
if (tab->bins == NULL) {
bin = find_entry(tab, hash, key);
2018-02-13 05:02:07 -05:00
if (EXPECT(bin == REBUILT_TABLE_ENTRY_IND, 0))
goto retry;
if (bin == UNDEFINED_ENTRY_IND)
return 0;
}
else {
bin = find_table_entry_ind(tab, hash, key);
2018-02-13 05:02:07 -05:00
if (EXPECT(bin == REBUILT_TABLE_ENTRY_IND, 0))
goto retry;
if (bin == UNDEFINED_ENTRY_IND)
return 0;
bin -= ENTRY_BASE;
}
if (value != 0)
*value = tab->entries[bin].record;
return 1;
}
/* Find an entry with KEY in table TAB. Return non-zero if we found
it. Set up *RESULT to the found table entry key. */
int
st_get_key(st_table *tab, st_data_t key, st_data_t *result)
{
st_index_t bin;
st_hash_t hash = do_hash(key, tab);
2018-02-13 05:02:07 -05:00
retry:
if (tab->bins == NULL) {
bin = find_entry(tab, hash, key);
2018-02-13 05:02:07 -05:00
if (EXPECT(bin == REBUILT_TABLE_ENTRY_IND, 0))
goto retry;
if (bin == UNDEFINED_ENTRY_IND)
return 0;
}
else {
bin = find_table_entry_ind(tab, hash, key);
2018-02-13 05:02:07 -05:00
if (EXPECT(bin == REBUILT_TABLE_ENTRY_IND, 0))
goto retry;
if (bin == UNDEFINED_ENTRY_IND)
return 0;
bin -= ENTRY_BASE;
}
if (result != 0)
*result = tab->entries[bin].key;
return 1;
}
/* Check the table and rebuild it if it is necessary. */
static inline void
rebuild_table_if_necessary (st_table *tab)
{
st_index_t bound = tab->entries_bound;
if (bound == get_allocated_entries(tab))
rebuild_table(tab);
}
/* Insert (KEY, VALUE) into table TAB and return zero. If there is
already entry with KEY in the table, return nonzero and update
the value of the found entry. */
int
st_insert(st_table *tab, st_data_t key, st_data_t value)
{
st_table_entry *entry;
st_index_t bin;
st_index_t ind;
st_hash_t hash_value;
st_index_t bin_ind;
int new_p;
hash_value = do_hash(key, tab);
2018-02-13 05:02:07 -05:00
retry:
rebuild_table_if_necessary(tab);
if (tab->bins == NULL) {
bin = find_entry(tab, hash_value, key);
2018-02-13 05:02:07 -05:00
if (EXPECT(bin == REBUILT_TABLE_ENTRY_IND, 0))
goto retry;
new_p = bin == UNDEFINED_ENTRY_IND;
if (new_p)
tab->num_entries++;
bin_ind = UNDEFINED_BIN_IND;
}
else {
bin = find_table_bin_ptr_and_reserve(tab, &hash_value,
key, &bin_ind);
2018-02-13 05:02:07 -05:00
if (EXPECT(bin == REBUILT_TABLE_ENTRY_IND, 0))
goto retry;
new_p = bin == UNDEFINED_ENTRY_IND;
bin -= ENTRY_BASE;
}
if (new_p) {
ind = tab->entries_bound++;
entry = &tab->entries[ind];
entry->hash = hash_value;
entry->key = key;
entry->record = value;
if (bin_ind != UNDEFINED_BIN_IND)
set_bin(tab->bins, get_size_ind(tab), bin_ind, ind + ENTRY_BASE);
return 0;
}
tab->entries[bin].record = value;
return 1;
}
/* Insert (KEY, VALUE, HASH) into table TAB. The table should not have
entry with KEY before the insertion. */
static inline void
st_add_direct_with_hash(st_table *tab,
st_data_t key, st_data_t value, st_hash_t hash)
{
st_table_entry *entry;
st_index_t ind;
st_index_t bin_ind;
rebuild_table_if_necessary(tab);
ind = tab->entries_bound++;
entry = &tab->entries[ind];
entry->hash = hash;
entry->key = key;
entry->record = value;
tab->num_entries++;
if (tab->bins != NULL) {
bin_ind = find_table_bin_ind_direct(tab, hash, key);
set_bin(tab->bins, get_size_ind(tab), bin_ind, ind + ENTRY_BASE);
}
}
/* Insert (KEY, VALUE) into table TAB. The table should not have
entry with KEY before the insertion. */
void
st_add_direct(st_table *tab, st_data_t key, st_data_t value)
{
st_hash_t hash_value;
hash_value = do_hash(key, tab);
st_add_direct_with_hash(tab, key, value, hash_value);
}
/* Insert (FUNC(KEY), VALUE) into table TAB and return zero. If
there is already entry with KEY in the table, return nonzero and
update the value of the found entry. */
int
st_insert2(st_table *tab, st_data_t key, st_data_t value,
st_data_t (*func)(st_data_t))
{
st_table_entry *entry;
st_index_t bin;
st_index_t ind;
st_hash_t hash_value;
st_index_t bin_ind;
int new_p;
hash_value = do_hash(key, tab);
2018-02-13 05:02:07 -05:00
retry:
rebuild_table_if_necessary (tab);
if (tab->bins == NULL) {
bin = find_entry(tab, hash_value, key);
2018-02-13 05:02:07 -05:00
if (EXPECT(bin == REBUILT_TABLE_ENTRY_IND, 0))
goto retry;
new_p = bin == UNDEFINED_ENTRY_IND;
if (new_p)
tab->num_entries++;
bin_ind = UNDEFINED_BIN_IND;
}
else {
bin = find_table_bin_ptr_and_reserve(tab, &hash_value,
key, &bin_ind);
2018-02-13 05:02:07 -05:00
if (EXPECT(bin == REBUILT_TABLE_ENTRY_IND, 0))
goto retry;
new_p = bin == UNDEFINED_ENTRY_IND;
bin -= ENTRY_BASE;
}
if (new_p) {
key = (*func)(key);
ind = tab->entries_bound++;
entry = &tab->entries[ind];
entry->hash = hash_value;
entry->key = key;
entry->record = value;
if (bin_ind != UNDEFINED_BIN_IND)
set_bin(tab->bins, get_size_ind(tab), bin_ind, ind + ENTRY_BASE);
return 0;
}
tab->entries[bin].record = value;
return 1;
}
/* Create and return a copy of table OLD_TAB. */
st_table *
st_copy(st_table *old_tab)
{
st_table *new_tab;
new_tab = (st_table *) malloc(sizeof(st_table));
2019-07-29 23:08:33 -04:00
#ifndef RUBY
if (new_tab == NULL)
return NULL;
2019-07-29 23:08:33 -04:00
#endif
*new_tab = *old_tab;
if (old_tab->bins == NULL)
new_tab->bins = NULL;
else {
new_tab->bins = (st_index_t *) malloc(bins_size(old_tab));
2019-07-29 23:08:33 -04:00
#ifndef RUBY
if (new_tab->bins == NULL) {
free(new_tab);
return NULL;
}
2019-07-29 23:08:33 -04:00
#endif
}
new_tab->entries = (st_table_entry *) malloc(get_allocated_entries(old_tab)
* sizeof(st_table_entry));
2019-07-29 23:08:33 -04:00
#ifndef RUBY
if (new_tab->entries == NULL) {
st_free_table(new_tab);
return NULL;
}
2019-07-29 23:08:33 -04:00
#endif
MEMCPY(new_tab->entries, old_tab->entries, st_table_entry,
get_allocated_entries(old_tab));
if (old_tab->bins != NULL)
MEMCPY(new_tab->bins, old_tab->bins, char, bins_size(old_tab));
return new_tab;
}
/* Update the entries start of table TAB after removing an entry
with index N in the array entries. */
static inline void
update_range_for_deleted(st_table *tab, st_index_t n)
{
/* Do not update entries_bound here. Otherwise, we can fill all
bins by deleted entry value before rebuilding the table. */
if (tab->entries_start == n)
tab->entries_start = n + 1;
}
/* Delete entry with KEY from table TAB, set up *VALUE (unless
VALUE is zero) from deleted table entry, and return non-zero. If
there is no entry with KEY in the table, clear *VALUE (unless VALUE
is zero), and return zero. */
static int
st_general_delete(st_table *tab, st_data_t *key, st_data_t *value)
{
st_table_entry *entry;
st_index_t bin;
st_index_t bin_ind;
st_hash_t hash;
hash = do_hash(*key, tab);
2018-02-13 05:02:07 -05:00
retry:
if (tab->bins == NULL) {
bin = find_entry(tab, hash, *key);
2018-02-13 05:02:07 -05:00
if (EXPECT(bin == REBUILT_TABLE_ENTRY_IND, 0))
goto retry;
if (bin == UNDEFINED_ENTRY_IND) {
if (value != 0) *value = 0;
return 0;
}
}
else {
bin_ind = find_table_bin_ind(tab, hash, *key);
2018-02-13 05:02:07 -05:00
if (EXPECT(bin_ind == REBUILT_TABLE_BIN_IND, 0))
goto retry;
if (bin_ind == UNDEFINED_BIN_IND) {
if (value != 0) *value = 0;
return 0;
}
bin = get_bin(tab->bins, get_size_ind(tab), bin_ind) - ENTRY_BASE;
MARK_BIN_DELETED(tab, bin_ind);
}
entry = &tab->entries[bin];
*key = entry->key;
if (value != 0) *value = entry->record;
MARK_ENTRY_DELETED(entry);
tab->num_entries--;
update_range_for_deleted(tab, bin);
return 1;
}
int
st_delete(st_table *tab, st_data_t *key, st_data_t *value)
{
return st_general_delete(tab, key, value);
}
/* The function and other functions with suffix '_safe' or '_check'
are originated from the previous implementation of the hash tables.
It was necessary for correct deleting entries during traversing
tables. The current implementation permits deletion during
traversing without a specific way to do this. */
int
st_delete_safe(st_table *tab, st_data_t *key, st_data_t *value,
st_data_t never ATTRIBUTE_UNUSED)
{
return st_general_delete(tab, key, value);
}
/* If table TAB is empty, clear *VALUE (unless VALUE is zero), and
return zero. Otherwise, remove the first entry in the table.
Return its key through KEY and its record through VALUE (unless
VALUE is zero). */
int
st_shift(st_table *tab, st_data_t *key, st_data_t *value)
{
st_index_t i, bound;
st_index_t bin;
st_table_entry *entries, *curr_entry_ptr;
st_index_t bin_ind;
entries = tab->entries;
bound = tab->entries_bound;
for (i = tab->entries_start; i < bound; i++) {
curr_entry_ptr = &entries[i];
if (! DELETED_ENTRY_P(curr_entry_ptr)) {
2018-02-13 05:02:07 -05:00
st_hash_t entry_hash = curr_entry_ptr->hash;
st_data_t entry_key = curr_entry_ptr->key;
if (value != 0) *value = curr_entry_ptr->record;
2018-02-13 05:02:07 -05:00
*key = entry_key;
retry:
if (tab->bins == NULL) {
2018-02-13 05:02:07 -05:00
bin = find_entry(tab, entry_hash, entry_key);
if (EXPECT(bin == REBUILT_TABLE_ENTRY_IND, 0)) {
entries = tab->entries;
goto retry;
}
curr_entry_ptr = &entries[bin];
}
else {
2018-02-13 05:02:07 -05:00
bin_ind = find_table_bin_ind(tab, entry_hash, entry_key);
if (EXPECT(bin_ind == REBUILT_TABLE_BIN_IND, 0)) {
entries = tab->entries;
goto retry;
}
curr_entry_ptr = &entries[get_bin(tab->bins, get_size_ind(tab), bin_ind)
- ENTRY_BASE];
MARK_BIN_DELETED(tab, bin_ind);
}
MARK_ENTRY_DELETED(curr_entry_ptr);
tab->num_entries--;
update_range_for_deleted(tab, i);
return 1;
}
}
tab->entries_start = tab->entries_bound = 0;
if (value != 0) *value = 0;
return 0;
}
/* See comments for function st_delete_safe. */
void
st_cleanup_safe(st_table *tab ATTRIBUTE_UNUSED,
st_data_t never ATTRIBUTE_UNUSED)
{
}
/* Find entry with KEY in table TAB, call FUNC with pointers to copies
of the key and the value of the found entry, and non-zero as the
3rd argument. If the entry is not found, call FUNC with a pointer
to KEY, a pointer to zero, and a zero argument. If the call
returns ST_CONTINUE, the table will have an entry with key and
value returned by FUNC through the 1st and 2nd parameters. If the
call of FUNC returns ST_DELETE, the table will not have entry with
KEY. The function returns flag of that the entry with KEY was in
the table before the call. */
* array.c: replace rb_protect_inspect() and rb_inspecting_p() by rb_exec_recursive() in eval.c. * eval.c (rb_exec_recursive): new function. * array.c (rb_ary_join): use rb_exec_recursive(). * array.c (rb_ary_inspect, rb_ary_hash): ditto. * file.c (rb_file_join): ditto. * hash.c (rb_hash_inspect, rb_hash_to_s, rb_hash_hash): ditto. * io.c (rb_io_puts): ditto. * object.c (rb_obj_inspect): ditto * struct.c (rb_struct_inspect): ditto. * lib/set.rb (SortedSet::setup): a hack to shut up warning. [ruby-talk:132866] * lib/time.rb (Time::strptime): add new function. inspired by [ruby-talk:132815]. * lib/parsedate.rb (ParseDate::strptime): ditto. * regparse.c: move st_*_strend() functions from st.c. fixed some potential memory leaks. * exception error messages updated. [ruby-core:04497] * ext/socket/socket.c (Init_socket): add bunch of Socket constants. Patch from Sam Roberts <sroberts@uniserve.com>. [ruby-core:04409] * array.c (rb_ary_s_create): no need for negative argc check. [ruby-core:04463] * array.c (rb_ary_unshift_m): ditto. * lib/xmlrpc/parser.rb (XMLRPC::FaultException): make it subclass of StandardError class, not Exception class. [ruby-core:04429] * parse.y (fcall_gen): lvar(arg) will be evaluated as lvar.call(arg) when lvar is a defined local variable. [new] * object.c (rb_class_initialize): call inherited method before calling initializing block. * eval.c (rb_thread_start_1): initialize newly pushed frame. * lib/open3.rb (Open3::popen3): $? should not be EXIT_FAILURE. fixed: [ruby-core:04444] * eval.c (is_defined): NODE_IASGN is an assignment. * ext/readline/readline.c (Readline.readline): use rl_outstream and rl_instream. [ruby-dev:25699] * ext/etc/etc.c (Init_etc): sGroup needs HAVE_ST_GR_PASSWD check [ruby-dev:25675] * misc/ruby-mode.el: [ruby-core:04415] * lib/rdoc/generators/html_generator.rb: [ruby-core:04412] * lib/rdoc/generators/ri_generator.rb: ditto. * struct.c (make_struct): fixed: [ruby-core:04402] * ext/curses/curses.c (window_color_set): [ruby-core:04393] * ext/socket/socket.c (Init_socket): SO_REUSEPORT added. [ruby-talk:130092] * object.c: [ruby-doc:818] * parse.y (open_args): fix too verbose warnings for the space before argument parentheses. [ruby-dev:25492] * parse.y (parser_yylex): ditto. * parse.y (parser_yylex): the first expression in the parentheses should not be a command. [ruby-dev:25492] * lib/irb/context.rb (IRB::Context::initialize): [ruby-core:04330] * object.c (Init_Object): remove Object#type. [ruby-core:04335] * st.c (st_foreach): report success/failure by return value. [ruby-Bugs-1396] * parse.y: forgot to initialize parser struct. [ruby-dev:25492] * parse.y (parser_yylex): no tLABEL on EXPR_BEG. [ruby-talk:127711] * document updates - [ruby-core:04296], [ruby-core:04301], [ruby-core:04302], [ruby-core:04307] * dir.c (rb_push_glob): should work for NUL delimited patterns. * dir.c (rb_glob2): should aware of offset in the pattern. * string.c (rb_str_new4): should propagate taintedness. * env.h: rename member names in struct FRAME; last_func -> callee, orig_func -> this_func, last_class -> this_class. * struct.c (rb_struct_set): use original method name, not callee name, to retrieve member slot. [ruby-core:04268] * time.c (time_strftime): protect from format modification from GC finalizers. * object.c (Init_Object): remove rb_obj_id_obsolete() * eval.c (rb_mod_define_method): incomplete subclass check. [ruby-dev:25464] * gc.c (rb_data_object_alloc): klass may be NULL. [ruby-list:40498] * bignum.c (rb_big_rand): should return positive random number. [ruby-dev:25401] * bignum.c (rb_big_rand): do not use rb_big_modulo to generate random bignums. [ruby-dev:25396] * variable.c (rb_autoload): [ruby-dev:25373] * eval.c (svalue_to_avalue): [ruby-dev:25366] * string.c (rb_str_justify): [ruby-dev:25367] * io.c (rb_f_select): [ruby-dev:25312] * ext/socket/socket.c (sock_s_getservbyport): [ruby-talk:124072] * struct.c (make_struct): [ruby-dev:25249] * dir.c (dir_open_dir): new function. [ruby-dev:25242] * io.c (rb_f_open): add type check for return value from to_open. * lib/pstore.rb (PStore#transaction): Use the empty content when a file is not found. [ruby-dev:24561] git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@8068 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2005-03-04 01:47:45 -05:00
int
st_update(st_table *tab, st_data_t key,
st_update_callback_func *func, st_data_t arg)
{
st_table_entry *entry = NULL; /* to avoid uninitialized value warning */
st_index_t bin = 0; /* Ditto */
st_table_entry *entries;
st_index_t bin_ind;
st_data_t value = 0, old_key;
int retval, existing;
st_hash_t hash = do_hash(key, tab);
2018-02-13 05:02:07 -05:00
retry:
entries = tab->entries;
if (tab->bins == NULL) {
bin = find_entry(tab, hash, key);
2018-02-13 05:02:07 -05:00
if (EXPECT(bin == REBUILT_TABLE_ENTRY_IND, 0))
goto retry;
existing = bin != UNDEFINED_ENTRY_IND;
entry = &entries[bin];
bin_ind = UNDEFINED_BIN_IND;
}
else {
bin_ind = find_table_bin_ind(tab, hash, key);
2018-02-13 05:02:07 -05:00
if (EXPECT(bin_ind == REBUILT_TABLE_BIN_IND, 0))
goto retry;
existing = bin_ind != UNDEFINED_BIN_IND;
if (existing) {
bin = get_bin(tab->bins, get_size_ind(tab), bin_ind) - ENTRY_BASE;
entry = &entries[bin];
}
}
if (existing) {
key = entry->key;
value = entry->record;
}
old_key = key;
retval = (*func)(&key, &value, arg, existing);
switch (retval) {
case ST_CONTINUE:
if (! existing) {
st_add_direct_with_hash(tab, key, value, hash);
break;
}
if (old_key != key) {
entry->key = key;
}
entry->record = value;
break;
case ST_DELETE:
if (existing) {
if (bin_ind != UNDEFINED_BIN_IND)
MARK_BIN_DELETED(tab, bin_ind);
MARK_ENTRY_DELETED(entry);
tab->num_entries--;
update_range_for_deleted(tab, bin);
}
break;
}
return existing;
}
/* Traverse all entries in table TAB calling FUNC with current entry
key and value and zero. If the call returns ST_STOP, stop
traversing. If the call returns ST_DELETE, delete the current
entry from the table. In case of ST_CHECK or ST_CONTINUE, continue
traversing. The function returns zero unless an error is found.
CHECK_P is flag of st_foreach_check call. The behavior is a bit
different for ST_CHECK and when the current element is removed
during traversing. */
static inline int
st_general_foreach(st_table *tab, st_foreach_check_callback_func *func, st_update_callback_func *replace, st_data_t arg,
int check_p)
{
st_index_t bin;
st_index_t bin_ind;
st_table_entry *entries, *curr_entry_ptr;
enum st_retval retval;
st_index_t i, rebuilds_num;
st_hash_t hash;
st_data_t key;
int error_p, packed_p = tab->bins == NULL;
entries = tab->entries;
/* The bound can change inside the loop even without rebuilding
the table, e.g. by an entry insertion. */
for (i = tab->entries_start; i < tab->entries_bound; i++) {
curr_entry_ptr = &entries[i];
if (EXPECT(DELETED_ENTRY_P(curr_entry_ptr), 0))
continue;
key = curr_entry_ptr->key;
rebuilds_num = tab->rebuilds_num;
hash = curr_entry_ptr->hash;
retval = (*func)(key, curr_entry_ptr->record, arg, 0);
if (retval == ST_REPLACE && replace) {
st_data_t value;
value = curr_entry_ptr->record;
retval = (*replace)(&key, &value, arg, TRUE);
curr_entry_ptr->key = key;
curr_entry_ptr->record = value;
}
if (rebuilds_num != tab->rebuilds_num) {
2018-02-13 05:02:07 -05:00
retry:
entries = tab->entries;
packed_p = tab->bins == NULL;
if (packed_p) {
i = find_entry(tab, hash, key);
2018-02-13 05:02:07 -05:00
if (EXPECT(i == REBUILT_TABLE_ENTRY_IND, 0))
goto retry;
error_p = i == UNDEFINED_ENTRY_IND;
}
else {
i = find_table_entry_ind(tab, hash, key);
2018-02-13 05:02:07 -05:00
if (EXPECT(i == REBUILT_TABLE_ENTRY_IND, 0))
goto retry;
error_p = i == UNDEFINED_ENTRY_IND;
i -= ENTRY_BASE;
}
if (error_p && check_p) {
/* call func with error notice */
retval = (*func)(0, 0, arg, 1);
return 1;
}
curr_entry_ptr = &entries[i];
}
switch (retval) {
2020-03-15 21:45:22 -04:00
case ST_REPLACE:
break;
case ST_CONTINUE:
2020-03-15 21:45:22 -04:00
break;
case ST_CHECK:
2020-03-15 21:45:22 -04:00
if (check_p)
break;
case ST_STOP:
2020-03-15 21:45:22 -04:00
return 0;
2018-02-13 05:02:07 -05:00
case ST_DELETE: {
2020-03-15 21:45:22 -04:00
st_data_t key = curr_entry_ptr->key;
2018-02-13 05:02:07 -05:00
again:
2020-03-15 21:45:22 -04:00
if (packed_p) {
bin = find_entry(tab, hash, key);
if (EXPECT(bin == REBUILT_TABLE_ENTRY_IND, 0))
goto again;
if (bin == UNDEFINED_ENTRY_IND)
break;
}
else {
bin_ind = find_table_bin_ind(tab, hash, key);
if (EXPECT(bin_ind == REBUILT_TABLE_BIN_IND, 0))
goto again;
if (bin_ind == UNDEFINED_BIN_IND)
break;
bin = get_bin(tab->bins, get_size_ind(tab), bin_ind) - ENTRY_BASE;
MARK_BIN_DELETED(tab, bin_ind);
}
curr_entry_ptr = &entries[bin];
MARK_ENTRY_DELETED(curr_entry_ptr);
tab->num_entries--;
update_range_for_deleted(tab, bin);
break;
2018-02-13 05:02:07 -05:00
}
}
}
return 0;
}
int
st_foreach_with_replace(st_table *tab, st_foreach_check_callback_func *func, st_update_callback_func *replace, st_data_t arg)
{
return st_general_foreach(tab, func, replace, arg, TRUE);
}
struct functor {
st_foreach_callback_func *func;
st_data_t arg;
};
static int
apply_functor(st_data_t k, st_data_t v, st_data_t d, int _)
{
const struct functor *f = (void *)d;
return f->func(k, v, f->arg);
}
int
st_foreach(st_table *tab, st_foreach_callback_func *func, st_data_t arg)
{
const struct functor f = { func, arg };
return st_general_foreach(tab, apply_functor, 0, (st_data_t)&f, FALSE);
}
/* See comments for function st_delete_safe. */
int
st_foreach_check(st_table *tab, st_foreach_check_callback_func *func, st_data_t arg,
st_data_t never ATTRIBUTE_UNUSED)
{
return st_general_foreach(tab, func, 0, arg, TRUE);
}
/* Set up array KEYS by at most SIZE keys of head table TAB entries.
Return the number of keys set up in array KEYS. */
static inline st_index_t
st_general_keys(st_table *tab, st_data_t *keys, st_index_t size)
{
st_index_t i, bound;
st_data_t key, *keys_start, *keys_end;
st_table_entry *curr_entry_ptr, *entries = tab->entries;
bound = tab->entries_bound;
keys_start = keys;
keys_end = keys + size;
for (i = tab->entries_start; i < bound; i++) {
if (keys == keys_end)
break;
curr_entry_ptr = &entries[i];
key = curr_entry_ptr->key;
if (! DELETED_ENTRY_P(curr_entry_ptr))
*keys++ = key;
}
return keys - keys_start;
}
st_index_t
st_keys(st_table *tab, st_data_t *keys, st_index_t size)
{
return st_general_keys(tab, keys, size);
}
/* See comments for function st_delete_safe. */
st_index_t
st_keys_check(st_table *tab, st_data_t *keys, st_index_t size,
st_data_t never ATTRIBUTE_UNUSED)
{
return st_general_keys(tab, keys, size);
}
/* Set up array VALUES by at most SIZE values of head table TAB
entries. Return the number of values set up in array VALUES. */
static inline st_index_t
st_general_values(st_table *tab, st_data_t *values, st_index_t size)
{
st_index_t i, bound;
st_data_t *values_start, *values_end;
st_table_entry *curr_entry_ptr, *entries = tab->entries;
values_start = values;
values_end = values + size;
bound = tab->entries_bound;
for (i = tab->entries_start; i < bound; i++) {
if (values == values_end)
break;
curr_entry_ptr = &entries[i];
if (! DELETED_ENTRY_P(curr_entry_ptr))
*values++ = curr_entry_ptr->record;
}
return values - values_start;
}
st_index_t
st_values(st_table *tab, st_data_t *values, st_index_t size)
{
return st_general_values(tab, values, size);
}
/* See comments for function st_delete_safe. */
st_index_t
st_values_check(st_table *tab, st_data_t *values, st_index_t size,
st_data_t never ATTRIBUTE_UNUSED)
{
return st_general_values(tab, values, size);
}
* sprintf.c (rb_str_format): allow %c to print one character string (e.g. ?x). * lib/tempfile.rb (Tempfile::make_tmpname): put dot between basename and pid. [ruby-talk:196272] * parse.y (do_block): remove -> style block. * parse.y (parser_yylex): remove tLAMBDA_ARG. * eval.c (rb_call0): binding for the return event hook should have consistent scope. [ruby-core:07928] * eval.c (proc_invoke): return behavior should depend whether it is surrounded by a lambda or a mere block. * eval.c (formal_assign): handles post splat arguments. * eval.c (rb_call0): ditto. * st.c (strhash): use FNV-1a hash. * parse.y (parser_yylex): removed experimental ';;' terminator. * eval.c (rb_node_arity): should be aware of post splat arguments. * eval.c (rb_proc_arity): ditto. * parse.y (f_args): syntax rule enhanced to support arguments after the splat. * parse.y (block_param): ditto for block parameters. * parse.y (f_post_arg): mandatory formal arguments after the splat argument. * parse.y (new_args_gen): generate nodes for mandatory formal arguments after the splat argument. * eval.c (rb_eval): dispatch mandatory formal arguments after the splat argument. * parse.y (args): allow more than one splat in the argument list. * parse.y (method_call): allow aref [] to accept all kind of method argument, including assocs, splat, and block argument. * eval.c (SETUP_ARGS0): prepare block argument as well. * lib/mathn.rb (Integer): remove Integer#gcd2. [ruby-core:07931] * eval.c (error_line): print receivers true/false/nil specially. * eval.c (rb_proc_yield): handles parameters in yield semantics. * eval.c (nil_yield): gives LocalJumpError to denote no block error. * io.c (rb_io_getc): now takes one-character string. * string.c (rb_str_hash): use FNV-1a hash from Fowler/Noll/Vo hashing algorithm. * string.c (rb_str_aref): str[0] now returns 1 character string, instead of a fixnum. [Ruby2] * parse.y (parser_yylex): ?c now returns 1 character string, instead of a fixnum. [Ruby2] * string.c (rb_str_aset): no longer support fixnum insertion. * eval.c (umethod_bind): should not update original class. [ruby-dev:28636] * eval.c (ev_const_get): should support constant access from within instance_eval(). [ruby-dev:28327] * time.c (time_timeval): should round for usec floating number. [ruby-core:07896] * time.c (time_add): ditto. * dir.c (sys_warning): should not call a vararg function rb_sys_warning() indirectly. [ruby-core:07886] * numeric.c (flo_divmod): the first element of Float#divmod should be an integer. [ruby-dev:28589] * test/ruby/test_float.rb: add tests for divmod, div, modulo and remainder. * re.c (rb_reg_initialize): should not allow modifying literal regexps. frozen check moved from rb_reg_initialize_m as well. * re.c (rb_reg_initialize): should not modify untainted objects in safe levels higher than 3. * re.c (rb_memcmp): type change from char* to const void*. * dir.c (dir_close): should not close untainted dir stream. * dir.c (GetDIR): add tainted/frozen check for each dir operation. * lib/rdoc/parsers/parse_rb.rb (RDoc::RubyParser::parse_symbol_arg): typo fixed. a patch from Florian Gross <florg at florg.net>. * eval.c (EXEC_EVENT_HOOK): trace_func may remove itself from event_hooks. no guarantee for arbitrary hook deletion. [ruby-dev:28632] * util.c (ruby_strtod): differ addition to minimize error. [ruby-dev:28619] * util.c (ruby_strtod): should not raise ERANGE when the input string does not have any digits. [ruby-dev:28629] * eval.c (proc_invoke): should restore old ruby_frame->block. thanks to ts <decoux at moulon.inra.fr>. [ruby-core:07833] also fix [ruby-dev:28614] as well. * signal.c (trap): sig should be less then NSIG. Coverity found this bug. a patch from Kevin Tew <tewk at tewk.com>. [ruby-core:07823] * math.c (math_log2): add new method inspired by [ruby-talk:191237]. * math.c (math_log): add optional base argument to Math::log(). [ruby-talk:191308] * ext/syck/emitter.c (syck_scan_scalar): avoid accessing uninitialized array element. a patch from Pat Eyler <rubypate at gmail.com>. [ruby-core:07809] * array.c (rb_ary_fill): initialize local variables first. a patch from Pat Eyler <rubypate at gmail.com>. [ruby-core:07810] * ext/syck/yaml2byte.c (syck_yaml2byte_handler): need to free type_tag. a patch from Pat Eyler <rubypate at gmail.com>. [ruby-core:07808] * ext/socket/socket.c (make_hostent_internal): accept ai_family check from Sam Roberts <sroberts at uniserve.com>. [ruby-core:07691] * util.c (ruby_strtod): should not cut off 18 digits for no reason. [ruby-core:07796] * array.c (rb_ary_fill): internalize local variable "beg" to pacify Coverity. [ruby-core:07770] * pack.c (pack_unpack): now supports CRLF newlines. a patch from <tommy at tmtm.org>. [ruby-dev:28601] * applied code clean-up patch from Stefan Huehner <stefan at huehner.org>. [ruby-core:07764] * lib/jcode.rb (String::tr_s): should have translated non squeezing character sequence (i.e. a character) as well. thanks to Hiroshi Ichikawa <gimite at gimite.ddo.jp> [ruby-list:42090] * ext/socket/socket.c: document update patch from Sam Roberts <sroberts at uniserve.com>. [ruby-core:07701] * lib/mathn.rb (Integer): need not to remove gcd2. a patch from NARUSE, Yui <naruse at airemix.com>. [ruby-dev:28570] * parse.y (arg): too much NEW_LIST() * eval.c (SETUP_ARGS0): remove unnecessary access to nd_alen. * eval.c (rb_eval): use ARGSCAT for NODE_OP_ASGN1. [ruby-dev:28585] * parse.y (arg): use NODE_ARGSCAT for placeholder. * lib/getoptlong.rb (GetoptLong::get): RDoc update patch from mathew <meta at pobox.com>. [ruby-core:07738] * variable.c (rb_const_set): raise error when no target klass is supplied. [ruby-dev:28582] * prec.c (prec_prec_f): documentation patch from <gerardo.santana at gmail.com>. [ruby-core:07689] * bignum.c (rb_big_pow): second operand may be too big even if it's a Fixnum. [ruby-talk:187984] * README.EXT: update symbol description. [ruby-talk:188104] * COPYING: explicitly note GPLv2. [ruby-talk:187922] * parse.y: remove some obsolete syntax rules (unparenthesized method calls in argument list). * eval.c (rb_call0): insecure calling should be checked for non NODE_SCOPE method invocations too. * eval.c (rb_alias): should preserve the current safe level as well as method definition. * process.c (rb_f_sleep): remove RDoc description about SIGALRM which is not valid on the current implementation. [ruby-dev:28464] Thu Mar 23 21:40:47 2006 K.Kosako <sndgk393 AT ybb.ne.jp> * eval.c (method_missing): should support argument splat in super. a bug in combination of super, splat and method_missing. [ruby-talk:185438] * configure.in: Solaris SunPro compiler -rapth patch from <kuwa at labs.fujitsu.com>. [ruby-dev:28443] * configure.in: remove enable_rpath=no for Solaris. [ruby-dev:28440] * ext/win32ole/win32ole.c (ole_val2olevariantdata): change behavior of converting OLE Variant object with VT_ARRAY|VT_UI1 and Ruby String object. * ruby.1: a clarification patch from David Lutterkort <dlutter at redhat.com>. [ruby-core:7508] * lib/rdoc/ri/ri_paths.rb (RI::Paths): adding paths from rubygems directories. a patch from Eric Hodel <drbrain at segment7.net>. [ruby-core:07423] * eval.c (rb_clear_cache_by_class): clearing wrong cache. * ext/extmk.rb: use :remove_destination to install extension libraries to avoid SEGV. [ruby-dev:28417] * eval.c (rb_thread_fd_writable): should not re-schedule output from KILLED thread (must be error printing). * array.c (rb_ary_flatten_bang): allow specifying recursion level. [ruby-talk:182170] * array.c (rb_ary_flatten): ditto. * gc.c (add_heap): a heap_slots may overflow. a patch from Stefan Weil <weil at mail.berlios.de>. * eval.c (rb_call): use separate cache for fcall/vcall invocation. * eval.c (rb_eval): NODE_FCALL, NODE_VCALL can call local functions. * eval.c (rb_mod_local): a new method to specify newly added visibility "local". * eval.c (search_method): search for local methods which are visible only from the current class. * class.c (rb_class_local_methods): a method to list local methods. * object.c (Init_Object): add BasicObject class as a top level BlankSlate class. * ruby.h (SYM2ID): should not cast to signed long. [ruby-core:07414] * class.c (rb_include_module): allow module duplication. git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@10235 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
2006-06-09 17:20:17 -04:00
#define FNV1_32A_INIT 0x811c9dc5
/*
* 32 bit magic FNV-1a prime
*/
#define FNV_32_PRIME 0x01000193
#ifndef UNALIGNED_WORD_ACCESS
# if defined(__i386) || defined(__i386__) || defined(_M_IX86) || \
defined(__x86_64) || defined(__x86_64__) || defined(_M_AMD64) || \
defined(__powerpc64__) || defined(__aarch64__) || \
defined(__mc68020__)
# define UNALIGNED_WORD_ACCESS 1
# endif
#endif
#ifndef UNALIGNED_WORD_ACCESS
# define UNALIGNED_WORD_ACCESS 0
#endif
/* This hash function is quite simplified MurmurHash3
* Simplification is legal, cause most of magic still happens in finalizator.
* And finalizator is almost the same as in MurmurHash3 */
#define BIG_CONSTANT(x,y) ((st_index_t)(x)<<32|(st_index_t)(y))
#define ROTL(x,n) ((x)<<(n)|(x)>>(SIZEOF_ST_INDEX_T*CHAR_BIT-(n)))
#if ST_INDEX_BITS <= 32
#define C1 (st_index_t)0xcc9e2d51
#define C2 (st_index_t)0x1b873593
#else
#define C1 BIG_CONSTANT(0x87c37b91,0x114253d5);
#define C2 BIG_CONSTANT(0x4cf5ad43,0x2745937f);
#endif
NO_SANITIZE("unsigned-integer-overflow", static inline st_index_t murmur_step(st_index_t h, st_index_t k));
NO_SANITIZE("unsigned-integer-overflow", static inline st_index_t murmur_finish(st_index_t h));
NO_SANITIZE("unsigned-integer-overflow", extern st_index_t st_hash(const void *ptr, size_t len, st_index_t h));
static inline st_index_t
murmur_step(st_index_t h, st_index_t k)
{
#if ST_INDEX_BITS <= 32
#define r1 (17)
#define r2 (11)
#else
#define r1 (33)
#define r2 (24)
#endif
k *= C1;
h ^= ROTL(k, r1);
h *= C2;
h = ROTL(h, r2);
return h;
}
#undef r1
#undef r2
static inline st_index_t
murmur_finish(st_index_t h)
{
#if ST_INDEX_BITS <= 32
#define r1 (16)
#define r2 (13)
#define r3 (16)
const st_index_t c1 = 0x85ebca6b;
const st_index_t c2 = 0xc2b2ae35;
#else
/* values are taken from Mix13 on http://zimbry.blogspot.ru/2011/09/better-bit-mixing-improving-on.html */
#define r1 (30)
#define r2 (27)
#define r3 (31)
const st_index_t c1 = BIG_CONSTANT(0xbf58476d,0x1ce4e5b9);
const st_index_t c2 = BIG_CONSTANT(0x94d049bb,0x133111eb);
#endif
#if ST_INDEX_BITS > 64
h ^= h >> 64;
h *= c2;
h ^= h >> 65;
#endif
h ^= h >> r1;
h *= c1;
h ^= h >> r2;
h *= c2;
h ^= h >> r3;
return h;
}
#undef r1
#undef r2
#undef r3
st_index_t
st_hash(const void *ptr, size_t len, st_index_t h)
{
const char *data = ptr;
st_index_t t = 0;
size_t l = len;
#define data_at(n) (st_index_t)((unsigned char)data[(n)])
#define UNALIGNED_ADD_4 UNALIGNED_ADD(2); UNALIGNED_ADD(1); UNALIGNED_ADD(0)
#if SIZEOF_ST_INDEX_T > 4
#define UNALIGNED_ADD_8 UNALIGNED_ADD(6); UNALIGNED_ADD(5); UNALIGNED_ADD(4); UNALIGNED_ADD(3); UNALIGNED_ADD_4
#if SIZEOF_ST_INDEX_T > 8
#define UNALIGNED_ADD_16 UNALIGNED_ADD(14); UNALIGNED_ADD(13); UNALIGNED_ADD(12); UNALIGNED_ADD(11); \
UNALIGNED_ADD(10); UNALIGNED_ADD(9); UNALIGNED_ADD(8); UNALIGNED_ADD(7); UNALIGNED_ADD_8
#define UNALIGNED_ADD_ALL UNALIGNED_ADD_16
#endif
#define UNALIGNED_ADD_ALL UNALIGNED_ADD_8
#else
#define UNALIGNED_ADD_ALL UNALIGNED_ADD_4
#endif
#undef SKIP_TAIL
if (len >= sizeof(st_index_t)) {
#if !UNALIGNED_WORD_ACCESS
int align = (int)((st_data_t)data % sizeof(st_index_t));
if (align) {
st_index_t d = 0;
int sl, sr, pack;
switch (align) {
#ifdef WORDS_BIGENDIAN
# define UNALIGNED_ADD(n) case SIZEOF_ST_INDEX_T - (n) - 1: \
t |= data_at(n) << CHAR_BIT*(SIZEOF_ST_INDEX_T - (n) - 2)
#else
# define UNALIGNED_ADD(n) case SIZEOF_ST_INDEX_T - (n) - 1: \
t |= data_at(n) << CHAR_BIT*(n)
#endif
UNALIGNED_ADD_ALL;
#undef UNALIGNED_ADD
}
#ifdef WORDS_BIGENDIAN
t >>= (CHAR_BIT * align) - CHAR_BIT;
#else
t <<= (CHAR_BIT * align);
#endif
data += sizeof(st_index_t)-align;
len -= sizeof(st_index_t)-align;
sl = CHAR_BIT * (SIZEOF_ST_INDEX_T-align);
sr = CHAR_BIT * align;
while (len >= sizeof(st_index_t)) {
d = *(st_index_t *)data;
#ifdef WORDS_BIGENDIAN
t = (t << sr) | (d >> sl);
#else
t = (t >> sr) | (d << sl);
#endif
h = murmur_step(h, t);
t = d;
data += sizeof(st_index_t);
len -= sizeof(st_index_t);
}
pack = len < (size_t)align ? (int)len : align;
d = 0;
switch (pack) {
#ifdef WORDS_BIGENDIAN
# define UNALIGNED_ADD(n) case (n) + 1: \
d |= data_at(n) << CHAR_BIT*(SIZEOF_ST_INDEX_T - (n) - 1)
#else
# define UNALIGNED_ADD(n) case (n) + 1: \
d |= data_at(n) << CHAR_BIT*(n)
#endif
UNALIGNED_ADD_ALL;
#undef UNALIGNED_ADD
}
#ifdef WORDS_BIGENDIAN
t = (t << sr) | (d >> sl);
#else
t = (t >> sr) | (d << sl);
#endif
if (len < (size_t)align) goto skip_tail;
# define SKIP_TAIL 1
h = murmur_step(h, t);
data += pack;
len -= pack;
}
else
#endif
#ifdef HAVE_BUILTIN___BUILTIN_ASSUME_ALIGNED
#define aligned_data __builtin_assume_aligned(data, sizeof(st_index_t))
#else
#define aligned_data data
#endif
{
do {
h = murmur_step(h, *(st_index_t *)aligned_data);
data += sizeof(st_index_t);
len -= sizeof(st_index_t);
} while (len >= sizeof(st_index_t));
}
}
t = 0;
switch (len) {
#if UNALIGNED_WORD_ACCESS && SIZEOF_ST_INDEX_T <= 8 && CHAR_BIT == 8
/* in this case byteorder doesn't really matter */
#if SIZEOF_ST_INDEX_T > 4
case 7: t |= data_at(6) << 48;
case 6: t |= data_at(5) << 40;
case 5: t |= data_at(4) << 32;
case 4:
t |= (st_index_t)*(uint32_t*)aligned_data;
goto skip_tail;
# define SKIP_TAIL 1
#endif
case 3: t |= data_at(2) << 16;
case 2: t |= data_at(1) << 8;
case 1: t |= data_at(0);
#else
#ifdef WORDS_BIGENDIAN
# define UNALIGNED_ADD(n) case (n) + 1: \
t |= data_at(n) << CHAR_BIT*(SIZEOF_ST_INDEX_T - (n) - 1)
#else
# define UNALIGNED_ADD(n) case (n) + 1: \
t |= data_at(n) << CHAR_BIT*(n)
#endif
UNALIGNED_ADD_ALL;
#undef UNALIGNED_ADD
#endif
#ifdef SKIP_TAIL
skip_tail:
#endif
h ^= t; h -= ROTL(t, 7);
h *= C2;
}
h ^= l;
#undef aligned_data
return murmur_finish(h);
}
st_index_t
st_hash_uint32(st_index_t h, uint32_t i)
{
return murmur_step(h, i);
}
NO_SANITIZE("unsigned-integer-overflow", extern st_index_t st_hash_uint(st_index_t h, st_index_t i));
st_index_t
st_hash_uint(st_index_t h, st_index_t i)
{
i += h;
/* no matter if it is BigEndian or LittleEndian,
* we hash just integers */
#if SIZEOF_ST_INDEX_T*CHAR_BIT > 8*8
h = murmur_step(h, i >> 8*8);
#endif
h = murmur_step(h, i);
return h;
}
st_index_t
st_hash_end(st_index_t h)
{
h = murmur_finish(h);
return h;
}
#undef st_hash_start
st_index_t
rb_st_hash_start(st_index_t h)
{
return h;
}
static st_index_t
strhash(st_data_t arg)
{
register const char *string = (const char *)arg;
return st_hash(string, strlen(string), FNV1_32A_INIT);
}
int
st_locale_insensitive_strcasecmp(const char *s1, const char *s2)
{
char c1, c2;
while (1) {
c1 = *s1++;
c2 = *s2++;
if (c1 == '\0' || c2 == '\0') {
if (c1 != '\0') return 1;
if (c2 != '\0') return -1;
return 0;
}
if (('A' <= c1) && (c1 <= 'Z')) c1 += 'a' - 'A';
if (('A' <= c2) && (c2 <= 'Z')) c2 += 'a' - 'A';
if (c1 != c2) {
if (c1 > c2)
return 1;
else
return -1;
}
}
}
int
st_locale_insensitive_strncasecmp(const char *s1, const char *s2, size_t n)
{
char c1, c2;
size_t i;
for (i = 0; i < n; i++) {
c1 = *s1++;
c2 = *s2++;
if (c1 == '\0' || c2 == '\0') {
if (c1 != '\0') return 1;
if (c2 != '\0') return -1;
return 0;
}
if (('A' <= c1) && (c1 <= 'Z')) c1 += 'a' - 'A';
if (('A' <= c2) && (c2 <= 'Z')) c2 += 'a' - 'A';
if (c1 != c2) {
if (c1 > c2)
return 1;
else
return -1;
}
}
return 0;
}
static int
st_strcmp(st_data_t lhs, st_data_t rhs)
{
const char *s1 = (char *)lhs;
const char *s2 = (char *)rhs;
return strcmp(s1, s2);
}
static int
st_locale_insensitive_strcasecmp_i(st_data_t lhs, st_data_t rhs)
{
const char *s1 = (char *)lhs;
const char *s2 = (char *)rhs;
return st_locale_insensitive_strcasecmp(s1, s2);
}
NO_SANITIZE("unsigned-integer-overflow", PUREFUNC(static st_index_t strcasehash(st_data_t)));
static st_index_t
strcasehash(st_data_t arg)
{
register const char *string = (const char *)arg;
register st_index_t hval = FNV1_32A_INIT;
/*
* FNV-1a hash each octet in the buffer
*/
while (*string) {
unsigned int c = (unsigned char)*string++;
if ((unsigned int)(c - 'A') <= ('Z' - 'A')) c += 'a' - 'A';
hval ^= c;
/* multiply by the 32 bit FNV magic prime mod 2^32 */
hval *= FNV_32_PRIME;
}
return hval;
}
int
st_numcmp(st_data_t x, st_data_t y)
{
return x != y;
}
st_index_t
st_numhash(st_data_t n)
{
enum {s1 = 11, s2 = 3};
return (st_index_t)((n>>s1|(n<<s2)) ^ (n>>s2));
}
/* Expand TAB to be suitable for holding SIZ entries in total.
Pre-existing entries remain not deleted inside of TAB, but its bins
are cleared to expect future reconstruction. See rehash below. */
static void
st_expand_table(st_table *tab, st_index_t siz)
{
st_table *tmp;
st_index_t n;
if (siz <= get_allocated_entries(tab))
return; /* enough room already */
tmp = st_init_table_with_size(tab->type, siz);
n = get_allocated_entries(tab);
MEMCPY(tmp->entries, tab->entries, st_table_entry, n);
free(tab->entries);
if (tab->bins != NULL)
free(tab->bins);
if (tmp->bins != NULL)
free(tmp->bins);
tab->entry_power = tmp->entry_power;
tab->bin_power = tmp->bin_power;
tab->size_ind = tmp->size_ind;
tab->entries = tmp->entries;
tab->bins = NULL;
tab->rebuilds_num++;
free(tmp);
}
2018-02-13 05:02:07 -05:00
/* Rehash using linear search. Return TRUE if we found that the table
was rebuilt. */
static int
st_rehash_linear(st_table *tab)
{
2018-02-13 05:02:07 -05:00
int eq_p, rebuilt_p;
st_index_t i, j;
st_table_entry *p, *q;
if (tab->bins) {
free(tab->bins);
tab->bins = NULL;
}
for (i = tab->entries_start; i < tab->entries_bound; i++) {
p = &tab->entries[i];
if (DELETED_ENTRY_P(p))
continue;
for (j = i + 1; j < tab->entries_bound; j++) {
q = &tab->entries[j];
if (DELETED_ENTRY_P(q))
continue;
2018-02-13 05:02:07 -05:00
DO_PTR_EQUAL_CHECK(tab, p, q->hash, q->key, eq_p, rebuilt_p);
if (EXPECT(rebuilt_p, 0))
return TRUE;
if (eq_p) {
*p = *q;
MARK_ENTRY_DELETED(q);
tab->num_entries--;
update_range_for_deleted(tab, j);
}
}
}
2018-02-13 05:02:07 -05:00
return FALSE;
}
2018-02-13 05:02:07 -05:00
/* Rehash using index. Return TRUE if we found that the table was
rebuilt. */
static int
st_rehash_indexed(st_table *tab)
{
2018-02-13 05:02:07 -05:00
int eq_p, rebuilt_p;
st_index_t i;
st_index_t const n = bins_size(tab);
unsigned int const size_ind = get_size_ind(tab);
st_index_t *bins = realloc(tab->bins, n);
tab->bins = bins;
initialize_bins(tab);
for (i = tab->entries_start; i < tab->entries_bound; i++) {
st_table_entry *p = &tab->entries[i];
st_index_t ind;
#ifdef QUADRATIC_PROBE
st_index_t d = 1;
#else
st_index_t peterb = p->hash;
#endif
if (DELETED_ENTRY_P(p))
continue;
ind = hash_bin(p->hash, tab);
for(;;) {
st_index_t bin = get_bin(bins, size_ind, ind);
if (EMPTY_OR_DELETED_BIN_P(bin)) {
/* ok, new room */
set_bin(bins, size_ind, ind, i + ENTRY_BASE);
break;
}
else {
st_table_entry *q = &tab->entries[bin - ENTRY_BASE];
2018-02-13 05:02:07 -05:00
DO_PTR_EQUAL_CHECK(tab, q, p->hash, p->key, eq_p, rebuilt_p);
if (EXPECT(rebuilt_p, 0))
return TRUE;
if (eq_p) {
/* duplicated key; delete it */
q->record = p->record;
MARK_ENTRY_DELETED(p);
tab->num_entries--;
update_range_for_deleted(tab, bin);
break;
}
else {
/* hash collision; skip it */
#ifdef QUADRATIC_PROBE
2018-02-13 05:02:07 -05:00
ind = hash_bin(ind + d, tab);
d++;
#else
2018-02-13 05:02:07 -05:00
ind = secondary_hash(ind, tab, &peterb);
#endif
2018-02-13 05:02:07 -05:00
}
}
}
}
2018-02-13 05:02:07 -05:00
return FALSE;
}
/* Reconstruct TAB's bins according to TAB's entries. This function
permits conflicting keys inside of entries. No errors are reported
then. All but one of them are discarded silently. */
static void
st_rehash(st_table *tab)
{
2018-02-13 05:02:07 -05:00
int rebuilt_p;
do {
if (tab->bin_power <= MAX_POWER2_FOR_TABLES_WITHOUT_BINS)
rebuilt_p = st_rehash_linear(tab);
else
rebuilt_p = st_rehash_indexed(tab);
} while (rebuilt_p);
}
#ifdef RUBY
static st_data_t
st_stringify(VALUE key)
{
return (rb_obj_class(key) == rb_cString && !RB_OBJ_FROZEN(key)) ?
rb_hash_key_str(key) : key;
}
static void
st_insert_single(st_table *tab, VALUE hash, VALUE key, VALUE val)
{
st_data_t k = st_stringify(key);
st_table_entry e;
e.hash = do_hash(k, tab);
e.key = k;
e.record = val;
tab->entries[tab->entries_bound++] = e;
tab->num_entries++;
RB_OBJ_WRITTEN(hash, Qundef, k);
RB_OBJ_WRITTEN(hash, Qundef, val);
}
static void
st_insert_linear(st_table *tab, long argc, const VALUE *argv, VALUE hash)
{
long i;
for (i = 0; i < argc; /* */) {
st_data_t k = st_stringify(argv[i++]);
st_data_t v = argv[i++];
st_insert(tab, k, v);
RB_OBJ_WRITTEN(hash, Qundef, k);
RB_OBJ_WRITTEN(hash, Qundef, v);
}
}
static void
st_insert_generic(st_table *tab, long argc, const VALUE *argv, VALUE hash)
{
long i;
/* push elems */
for (i = 0; i < argc; /* */) {
VALUE key = argv[i++];
VALUE val = argv[i++];
st_insert_single(tab, hash, key, val);
}
/* reindex */
st_rehash(tab);
}
/* Mimics ruby's { foo => bar } syntax. This function is subpart
of rb_hash_bulk_insert. */
void
rb_hash_bulk_insert_into_st_table(long argc, const VALUE *argv, VALUE hash)
{
st_index_t n, size = argc / 2;
st_table *tab = RHASH_ST_TABLE(hash);
tab = RHASH_TBL_RAW(hash);
n = tab->entries_bound + size;
st_expand_table(tab, n);
if (UNLIKELY(tab->num_entries))
st_insert_generic(tab, argc, argv, hash);
else if (argc <= 2)
st_insert_single(tab, hash, argv[0], argv[1]);
else if (tab->bin_power <= MAX_POWER2_FOR_TABLES_WITHOUT_BINS)
st_insert_linear(tab, argc, argv, hash);
else
st_insert_generic(tab, argc, argv, hash);
}
// to iterate iv_index_tbl
st_data_t
rb_st_nth_key(st_table *tab, st_index_t index)
{
if (LIKELY(tab->entries_start == 0 &&
tab->num_entries == tab->entries_bound &&
index < tab->num_entries)) {
return tab->entries[index].key;
}
else {
rb_bug("unreachable");
}
}
#endif