1
/* ``The contents of this file are subject to the Erlang Public License,
2
* Version 1.1, (the "License"); you may not use this file except in
3
* compliance with the License. You should have received a copy of the
4
* Erlang Public License along with this software. If not, it can be
5
* retrieved via the world wide web at http://www.erlang.org/.
7
* Software distributed under the License is distributed on an "AS IS"
8
* basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
9
* the License for the specific language governing rights and limitations
12
* The Initial Developer of the Original Code is Ericsson Utvecklings AB.
13
* Portions created by Ericsson are Copyright 2008, Ericsson Utvecklings
14
* AB. All Rights Reserved.''
19
** General thread safe hash table. Simular interface as hash.h
21
** Author: Sverker Eriksson
27
#include "safe_hash.h"
29
/* Currently only used by erl_check_io on Windows */
30
#ifndef ERTS_SYS_CONTINOUS_FD_NUMBERS
33
static ERTS_INLINE void set_size(SafeHash* h, int size)
35
ASSERT(size % SAFE_HASH_LOCK_CNT == 0);
36
/* This important property allows us to lock the right mutex
37
** without reading the table size (that can change without the lock) */
39
h->size_mask = size - 1;
40
ASSERT((size & h->size_mask) == 0);
41
/* An even power of 2 is just for fast bit masking */
43
h->grow_limit = size; /* grow table at 100% load */
46
static ERTS_INLINE int align_up_pow2(int val)
48
int x = val & (val-1);
49
if (x==0) return val ? val : 1;
60
static void rehash(SafeHash* h, int grow_limit)
62
if (erts_smp_atomic_xchg(&h->is_rehashing, 1) != 0) {
63
return; /* already in progress */
65
if (h->grow_limit == grow_limit) {
67
SafeHashBucket** new_tab;
68
SafeHashBucket** old_tab = h->tab;
69
int old_size = h->size_mask + 1;
71
size = old_size * 2; /* double table size */
72
bytes = size * sizeof(SafeHashBucket*);
73
new_tab = (SafeHashBucket **) erts_alloc(h->type, bytes);
74
sys_memzero(new_tab, bytes);
76
for (i=0; i<SAFE_HASH_LOCK_CNT; i++) { /* stop all traffic */
77
erts_smp_mtx_lock(&h->lock_vec[i].mtx);
83
for (i = 0; i < old_size; i++) {
84
SafeHashBucket* b = old_tab[i];
86
SafeHashBucket* b_next = b->next;
87
int ix = b->hvalue & h->size_mask;
88
b->next = new_tab[ix];
94
for (i=0; i<SAFE_HASH_LOCK_CNT; i++) {
95
erts_smp_mtx_unlock(&h->lock_vec[i].mtx);
97
erts_free(h->type, (void *) old_tab);
99
/*else already done */
100
erts_smp_atomic_set(&h->is_rehashing, 0);
105
** Get info about hash
107
void safe_hash_get_info(SafeHashInfo *hi, SafeHash *h)
114
for (lock_ix=0; lock_ix<SAFE_HASH_LOCK_CNT; lock_ix++) {
115
erts_smp_mtx_lock(&h->lock_vec[lock_ix].mtx);
116
size = h->size_mask + 1;
117
for (i = lock_ix; i < size; i += SAFE_HASH_LOCK_CNT) {
119
SafeHashBucket* b = h->tab[i];
125
if (depth > max_depth)
128
erts_smp_mtx_unlock(&h->lock_vec[lock_ix].mtx);
134
hi->depth = max_depth;
138
** Returns size of table in bytes. Stored objects not included.
140
int safe_hash_table_sz(SafeHash *h)
143
for(i=0; h->name[i]; i++);
145
erts_smp_mtx_lock(&h->lock_vec[0].mtx); /* any lock will do to read size */
146
size = h->size_mask + 1;
147
erts_smp_mtx_unlock(&h->lock_vec[0].mtx);
148
return sizeof(SafeHash) + size*sizeof(SafeHashBucket*) + i;
152
** Init a pre allocated or static hash structure
153
** and allocate buckets. NOT SAFE
155
SafeHash* safe_hash_init(ErtsAlcType_t type, SafeHash* h, char* name, int size, SafeHashFunctions fun)
159
size = align_up_pow2(size);
160
bytes = size * sizeof(SafeHashBucket*);
162
h->tab = (SafeHashBucket**) erts_alloc(h->type, bytes);
163
sys_memzero(h->tab, bytes);
167
erts_smp_atomic_init(&h->is_rehashing, 0);
168
erts_smp_atomic_init(&h->nitems, 0);
169
for (i=0; i<SAFE_HASH_LOCK_CNT; i++) {
170
erts_smp_mtx_init(&h->lock_vec[i].mtx,"safe_hash");
177
** Find an object in the hash table
179
void* safe_hash_get(SafeHash* h, void* tmpl)
181
SafeHashValue hval = h->fun.hash(tmpl);
183
erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
184
erts_smp_mtx_lock(lock);
185
b = h->tab[hval & h->size_mask];
188
if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0))
192
erts_smp_mtx_unlock(lock);
197
** Find or insert an object in the hash table
199
void* safe_hash_put(SafeHash* h, void* tmpl)
202
SafeHashValue hval = h->fun.hash(tmpl);
204
SafeHashBucket** head;
205
erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
206
erts_smp_mtx_lock(lock);
207
head = &h->tab[hval & h->size_mask];
210
if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) {
211
erts_smp_mtx_unlock(lock);
217
b = (SafeHashBucket*) h->fun.alloc(tmpl);
221
grow_limit = h->grow_limit;
222
erts_smp_mtx_unlock(lock);
223
if (erts_smp_atomic_inctest(&h->nitems) > grow_limit) {
224
rehash(h, grow_limit);
230
** Erase hash entry return template if erased
231
** return 0 if not erased
233
void* safe_hash_erase(SafeHash* h, void* tmpl)
235
SafeHashValue hval = h->fun.hash(tmpl);
237
SafeHashBucket** prevp;
238
erts_smp_mtx_t* lock = &h->lock_vec[hval % SAFE_HASH_LOCK_CNT].mtx;
239
erts_smp_mtx_lock(lock);
240
prevp = &h->tab[hval & h->size_mask];
243
if ((b->hvalue == hval) && (h->fun.cmp(tmpl, (void*)b) == 0)) {
245
erts_smp_mtx_unlock(lock);
246
erts_smp_atomic_dec(&h->nitems);
247
h->fun.free((void*)b);
253
erts_smp_mtx_unlock(lock);
258
** Call 'func(obj,func_arg2)' for all objects in table. NOT SAFE!!!
260
void safe_hash_for_each(SafeHash* h, void (*func)(void *, void *), void *func_arg2)
264
for (i = 0; i <= h->size_mask; i++) {
265
SafeHashBucket* b = h->tab[i];
267
(*func)((void *) b, func_arg2);
273
#endif /* !ERTS_SYS_CONTINOUS_FD_NUMBERS */