78
78
** The main meta table, containing all ets tables.
81
# define META_MAIN_TAB_LOCK_CNT 16
83
erts_smp_spinlock_t lck;
84
byte _cache_line_alignment[64];
85
}meta_main_tab_locks[META_MAIN_TAB_LOCK_CNT];
82
#define ERTS_META_MAIN_TAB_LOCK_TAB_BITS 8
83
#define ERTS_META_MAIN_TAB_LOCK_TAB_SIZE (1 << ERTS_META_MAIN_TAB_LOCK_TAB_BITS)
84
#define ERTS_META_MAIN_TAB_LOCK_TAB_MASK (ERTS_META_MAIN_TAB_LOCK_TAB_SIZE - 1)
87
erts_smp_rwmtx_t rwmtx;
88
byte cache_line_align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(
89
sizeof(erts_smp_rwmtx_t))];
90
} erts_meta_main_tab_lock_t;
92
static erts_meta_main_tab_lock_t *meta_main_tab_locks;
89
97
DbTable *tb; /* Only directly readable if slot is ALIVE */
90
Uint next_free; /* (index<<2)|1 if slot is FREE */
98
UWord next_free; /* (index<<2)|1 if slot is FREE */
215
221
static Export ets_delete_continue_exp;
217
static ERTS_INLINE DbTable* db_ref(DbTable* tb)
220
erts_refc_inc(&tb->common.ref, 2);
225
static ERTS_INLINE DbTable* db_unref(DbTable* tb)
227
if (!erts_refc_dectest(&tb->common.ref, 0)) {
224
free_dbtable(DbTable* tb)
229
227
if (erts_smp_atomic_read(&tb->common.memory_size) != sizeof(DbTable)) {
230
erts_fprintf(stderr, "ets: db_unref memory remain=%ld fix=%x\n",
231
erts_smp_atomic_read(&tb->common.memory_size)-sizeof(DbTable),
228
erts_fprintf(stderr, "ets: free_dbtable memory remain=%ld fix=%x\n",
229
erts_smp_atomic_read(&tb->common.memory_size)-sizeof(DbTable),
232
230
tb->common.fixations);
234
erts_fprintf(stderr, "ets: db_unref(%T) deleted!!!\r\n",
232
erts_fprintf(stderr, "ets: free_dbtable(%T) deleted!!!\r\n",
237
erts_fprintf(stderr, "ets: db_unref: meta_pid_to_tab common.memory_size = %ld\n",
235
erts_fprintf(stderr, "ets: free_dbtable: meta_pid_to_tab common.memory_size = %ld\n",
238
236
erts_smp_atomic_read(&meta_pid_to_tab->common.memory_size));
239
237
print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_tab);
242
erts_fprintf(stderr, "ets: db_unref: meta_pid_to_fixed_tab common.memory_size = %ld\n",
240
erts_fprintf(stderr, "ets: free_dbtable: meta_pid_to_fixed_tab common.memory_size = %ld\n",
243
241
erts_smp_atomic_read(&meta_pid_to_fixed_tab->common.memory_size));
244
242
print_table(ERTS_PRINT_STDOUT, NULL, 1, meta_pid_to_fixed_tab);
248
245
erts_smp_rwmtx_destroy(&tb->common.rwlock);
249
246
erts_smp_mtx_destroy(&tb->common.fixlock);
251
248
ASSERT(is_immed(tb->common.heir_data));
252
erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable));
249
erts_db_free(ERTS_ALC_T_DB_TABLE, tb, (void *) tb, sizeof(DbTable));
253
250
ERTS_ETS_MISC_MEM_ADD(-sizeof(DbTable));
259
static ERTS_INLINE void db_init_lock(DbTable* tb, char *rwname, char* fixname)
261
erts_refc_init(&tb->common.ref, 1);
262
erts_refc_init(&tb->common.fixref, 0);
264
# ifdef ERTS_ENABLE_LOCK_COUNT
265
erts_smp_rwmtx_init_x(&tb->common.rwlock, rwname, tb->common.the_name);
255
chk_free_dbtable(void *vtb)
257
DbTable * tb = (DbTable *) vtb;
258
ERTS_THR_MEMORY_BARRIER;
259
if (erts_refc_dectest(&tb->common.ref, 0) == 0)
264
static void schedule_free_dbtable(DbTable* tb)
267
* NON-SMP case: Caller is *not* allowed to access the *tb
268
* structure after this function has returned!
269
* SMP case: Caller is allowed to access the *tb structure
270
* until the bif has returned (we typically
271
* need to unlock the table lock after this
272
* function has returned).
275
int scheds = erts_get_max_no_executing_schedulers();
277
ASSERT(erts_refc_read(&tb->common.ref, 0) == 0);
278
erts_refc_init(&tb->common.ref, scheds);
279
ERTS_THR_MEMORY_BARRIER;
280
erts_smp_schedule_misc_aux_work(0, scheds, chk_free_dbtable, tb);
286
static ERTS_INLINE void db_init_lock(DbTable* tb, int use_frequent_read_lock,
287
char *rwname, char* fixname)
290
erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
291
if (use_frequent_read_lock)
292
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
295
erts_smp_rwmtx_init_opt_x(&tb->common.rwlock, &rwmtx_opt,
296
rwname, tb->common.the_name);
266
297
erts_smp_mtx_init_x(&tb->common.fixlock, fixname, tb->common.the_name);
268
erts_smp_rwmtx_init(&tb->common.rwlock, rwname);
269
erts_smp_mtx_init(&tb->common.fixlock, fixname);
271
298
tb->common.is_thread_safe = !(tb->common.status & DB_FINE_LOCKED);
275
static ERTS_INLINE void db_lock_take_over_ref(DbTable* tb, db_lock_kind_t kind)
302
static ERTS_INLINE void db_lock(DbTable* tb, db_lock_kind_t kind)
278
305
ASSERT(tb != meta_pid_to_tab && tb != meta_pid_to_fixed_tab);
356
379
static ERTS_INLINE
357
DbTable* db_get_table(Process *p,
380
DbTable* db_get_table_aux(Process *p,
384
int meta_already_locked)
362
386
DbTable *tb = NULL;
387
erts_smp_rwmtx_t *mtl = NULL;
390
* IMPORTANT: Only scheduler threads are allowed
391
* to access tables. Memory management
394
ASSERT(erts_get_scheduler_data());
364
396
if (is_small(id)) {
365
397
Uint slot = unsigned_val(id) & meta_main_tab_slot_mask;
366
meta_main_tab_lock(slot);
367
if (slot < db_max_tabs && IS_SLOT_ALIVE(slot)) {
368
/* SMP: inc to prevent race, between unlock of meta_main_tab_lock
369
* and the table locking outside the meta_main_tab_lock
371
tb = db_ref(meta_main_tab[slot].u.tb);
373
meta_main_tab_unlock(slot);
398
if (!meta_already_locked) {
399
mtl = get_meta_main_tab_lock(slot);
400
erts_smp_rwmtx_rlock(mtl);
402
#if defined(ERTS_SMP) && defined(ERTS_ENABLE_LOCK_CHECK)
404
erts_smp_rwmtx_t *test_mtl = get_meta_main_tab_lock(slot);
405
ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(test_mtl)
406
|| erts_lc_rwmtx_is_rwlocked(test_mtl));
409
if (slot < db_max_tabs && IS_SLOT_ALIVE(slot))
410
tb = meta_main_tab[slot].u.tb;
375
412
else if (is_atom(id)) {
376
erts_smp_rwmtx_t* rwlock;
377
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&rwlock);
378
erts_smp_rwmtx_rlock(rwlock);
413
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(id,&mtl);
414
if (!meta_already_locked)
415
erts_smp_rwmtx_rlock(mtl);
417
ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rlocked(mtl)
418
|| erts_lc_rwmtx_is_rwlocked(mtl));
379
422
if (bucket->pu.tb != NULL) {
380
423
if (is_atom(bucket->u.name_atom)) { /* single */
381
if (bucket->u.name_atom == id) {
382
tb = db_ref(bucket->pu.tb);
424
if (bucket->u.name_atom == id)
385
427
else { /* multi */
386
428
Uint cnt = unsigned_val(bucket->u.mcnt);
388
430
for (i=0; i<cnt; i++) {
389
431
if (bucket->pu.mvec[i].u.name_atom == id) {
390
tb = db_ref(bucket->pu.mvec[i].pu.tb);
432
tb = bucket->pu.mvec[i].pu.tb;
396
erts_smp_rwmtx_runlock(rwlock);
399
db_lock_take_over_ref(tb, kind);
400
if (tb->common.id == id && ((tb->common.status & what) != 0 ||
401
p->id == tb->common.owner)) {
441
if (tb->common.id != id
442
|| ((tb->common.status & what) == 0 && p->id != tb->common.owner)) {
448
erts_smp_rwmtx_runlock(mtl);
453
DbTable* db_get_table(Process *p,
458
return db_get_table_aux(p, id, what, kind, 0);
409
461
/* Requires meta_main_tab_locks[slot] locked.
473
525
ret = 1; /* Ok */
476
erts_smp_rwmtx_rwunlock(rwlock);
529
erts_smp_rwmtx_rwunlock(rwlock);
480
static int remove_named_tab(Eterm name_atom)
533
static int remove_named_tab(DbTable *tb, int have_lock)
483
536
erts_smp_rwmtx_t* rwlock;
537
Eterm name_atom = tb->common.id;
484
538
struct meta_name_tab_entry* bucket = meta_name_tab_bucket(name_atom,
486
erts_smp_rwmtx_rwlock(rwlock);
541
if (!have_lock && erts_smp_rwmtx_tryrwlock(rwlock) == EBUSY) {
543
* We keep our increased refc over this op in order to
544
* prevent the table from disapearing.
546
erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
547
erts_smp_rwmtx_rwlock(rwlock);
548
erts_smp_rwmtx_rwlock(&tb->common.rwlock);
552
ERTS_SMP_LC_ASSERT(erts_lc_rwmtx_is_rwlocked(rwlock));
487
554
if (bucket->pu.tb == NULL) {
1140
1216
BIF_P->initial[0], BIF_P->initial[1], BIF_P->initial[2]);
1143
if ((tb = db_get_table(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE)) == NULL) {
1144
BIF_ERROR(BIF_P, BADARG);
1147
1220
if (is_not_atom(BIF_ARG_2)) {
1221
BIF_ERROR(BIF_P, BADARG);
1224
(void) meta_name_tab_bucket(BIF_ARG_2, &lck1);
1226
if (is_small(BIF_ARG_1)) {
1227
Uint slot = unsigned_val(BIF_ARG_1) & meta_main_tab_slot_mask;
1228
lck2 = get_meta_main_tab_lock(slot);
1230
else if (is_atom(BIF_ARG_1)) {
1231
(void) meta_name_tab_bucket(BIF_ARG_1, &lck2);
1234
else if (lck1 > lck2) {
1235
erts_smp_rwmtx_t *tmp = lck1;
1241
BIF_ERROR(BIF_P, BADARG);
1244
erts_smp_rwmtx_rwlock(lck1);
1246
erts_smp_rwmtx_rwlock(lck2);
1248
tb = db_get_table_aux(BIF_P, BIF_ARG_1, DB_WRITE, LCK_WRITE, 1);
1151
1252
if (is_not_atom(tb->common.id)) { /* Not a named table */
1152
1253
tb->common.the_name = BIF_ARG_2;
1156
if (!insert_named_tab(BIF_ARG_2,tb)) {
1257
if (!insert_named_tab(BIF_ARG_2, tb, 1))
1159
if (!remove_named_tab(tb->common.id)) {
1260
if (!remove_named_tab(tb, 1))
1160
1261
erl_exit(1,"Could not find named tab %s", tb->common.id);
1163
1263
tb->common.id = tb->common.the_name = BIF_ARG_2;
1166
1266
ret = tb->common.id;
1167
1267
db_unlock(tb, LCK_WRITE);
1268
erts_smp_rwmtx_rwunlock(lck1);
1270
erts_smp_rwmtx_rwunlock(lck2);
1170
db_unlock(tb, LCK_WRITE);
1274
db_unlock(tb, LCK_WRITE);
1275
erts_smp_rwmtx_rwunlock(lck1);
1277
erts_smp_rwmtx_rwunlock(lck2);
1171
1278
BIF_ERROR(BIF_P, BADARG);
1350
1477
tb->common.id = ret;
1351
1478
tb->common.slot = slot; /* store slot for erase */
1353
meta_main_tab_lock(slot);
1480
mmtl = get_meta_main_tab_lock(slot);
1481
erts_smp_rwmtx_rwlock(mmtl);
1354
1482
meta_main_tab[slot].u.tb = tb;
1355
1483
ASSERT(IS_SLOT_ALIVE(slot));
1356
meta_main_tab_unlock(slot);
1484
erts_smp_rwmtx_rwunlock(mmtl);
1358
if (is_named && !insert_named_tab(BIF_ARG_1, tb)) {
1359
meta_main_tab_lock(slot);
1486
if (is_named && !insert_named_tab(BIF_ARG_1, tb, 0)) {
1487
mmtl = get_meta_main_tab_lock(slot);
1488
erts_smp_rwmtx_rwlock(mmtl);
1360
1489
free_slot(slot);
1361
meta_main_tab_unlock(slot);
1490
erts_smp_rwmtx_rwunlock(mmtl);
1363
db_lock_take_over_ref(tb,LCK_WRITE);
1492
db_lock(tb,LCK_WRITE);
1364
1493
free_heir_data(tb);
1365
1494
tb->common.meth->db_free_table(tb);
1495
schedule_free_dbtable(tb);
1366
1496
db_unlock(tb,LCK_WRITE);
1367
1497
BIF_ERROR(BIF_P, BADARG);
1515
1650
tb->common.status &= ~(DB_PROTECTED|DB_PUBLIC|DB_PRIVATE);
1516
1651
tb->common.status |= DB_DELETE;
1518
meta_main_tab_lock(tb->common.slot);
1653
mmtl = get_meta_main_tab_lock(tb->common.slot);
1655
if (erts_smp_rwmtx_tryrwlock(mmtl) == EBUSY) {
1657
* We keep our increased refc over this op in order to
1658
* prevent the table from disapearing.
1660
erts_smp_rwmtx_rwunlock(&tb->common.rwlock);
1661
erts_smp_rwmtx_rwlock(mmtl);
1662
erts_smp_rwmtx_rwlock(&tb->common.rwlock);
1519
1665
/* We must keep the slot, to be found by db_proc_dead() if process dies */
1520
1666
MARK_SLOT_DEAD(tb->common.slot);
1521
meta_main_tab_unlock(tb->common.slot);
1522
if (is_atom(tb->common.id)) {
1523
remove_named_tab(tb->common.id);
1667
erts_smp_rwmtx_rwunlock(mmtl);
1668
if (is_atom(tb->common.id))
1669
remove_named_tab(tb, 0);
1526
1671
if (tb->common.owner != BIF_P->id) {
1527
Eterm meta_tuple[3];
1672
DeclareTmpHeap(meta_tuple,3,BIF_P);
1530
1675
* The table is being deleted by a process other than its owner.
1954
2113
BIF_RETTYPE ets_match_2(BIF_ALIST_2)
2116
DeclareTmpHeap(buff,8,BIF_P);
1958
2117
Eterm *hp = buff;
1959
/*hp = HAlloc(BIF_P, 8);*/
2120
UseTmpHeap(8,BIF_P);
1960
2121
ms = CONS(hp, am_DollarDollar, NIL);
1962
2123
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
1964
2125
ms = CONS(hp, ms, NIL);
1965
return ets_select_2(BIF_P, BIF_ARG_1, ms);
2126
res = ets_select_2(BIF_P, BIF_ARG_1, ms);
2127
UnUseTmpHeap(8,BIF_P);
1968
2131
BIF_RETTYPE ets_match_3(BIF_ALIST_3)
2134
DeclareTmpHeap(buff,8,BIF_P);
1972
2135
Eterm *hp = buff;
1973
/*hp = HAlloc(BIF_P, 8);*/
2138
UseTmpHeap(8,BIF_P);
1974
2139
ms = CONS(hp, am_DollarDollar, NIL);
1976
2141
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
1978
2143
ms = CONS(hp, ms, NIL);
1979
return ets_select_3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
2144
res = ets_select_3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
2145
UnUseTmpHeap(8,BIF_P);
2390
2557
BIF_RETTYPE ets_match_object_2(BIF_ALIST_2)
2560
DeclareTmpHeap(buff,8,BIF_P);
2394
2561
Eterm *hp = buff;
2395
/*hp = HAlloc(BIF_P, 8);*/
2564
UseTmpHeap(8,BIF_P);
2396
2565
ms = CONS(hp, am_DollarUnderscore, NIL);
2398
2567
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
2400
2569
ms = CONS(hp, ms, NIL);
2401
return ets_select_2(BIF_P, BIF_ARG_1, ms);
2570
res = ets_select_2(BIF_P, BIF_ARG_1, ms);
2571
UnUseTmpHeap(8,BIF_P);
2404
2575
BIF_RETTYPE ets_match_object_3(BIF_ALIST_3)
2578
DeclareTmpHeap(buff,8,BIF_P);
2408
2579
Eterm *hp = buff;
2409
/*hp = HAlloc(BIF_P, 8);*/
2582
UseTmpHeap(8,BIF_P);
2410
2583
ms = CONS(hp, am_DollarUnderscore, NIL);
2412
2585
ms = TUPLE3(hp, BIF_ARG_2, NIL, ms);
2414
2587
ms = CONS(hp, ms, NIL);
2415
return ets_select_3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
2588
res = ets_select_3(BIF_P, BIF_ARG_1, ms, BIF_ARG_3);
2589
UnUseTmpHeap(8,BIF_P);
2563
2737
BIF_TRAP3(bif_export[BIF_ets_match_spec_run_r_3],
2564
2738
BIF_P,lst,BIF_ARG_2,ret);
2566
res = db_prog_match(BIF_P, mp, CAR(list_val(lst)), 0, &dummy);
2740
res = db_prog_match(BIF_P, mp, CAR(list_val(lst)), NULL, NULL, 0,
2741
ERTS_PAM_COPY_RESULT, &dummy);
2567
2742
if (is_value(res)) {
2568
sz = size_object(res);
2569
hp = HAlloc(BIF_P, sz + 2);
2570
res = copy_struct(res, sz, &hp, &MSO(BIF_P));
2743
hp = HAlloc(BIF_P, 2);
2571
2744
ret = CONS(hp,res,ret);
2591
2764
DbTable init_tb;
2593
extern Eterm* em_apply_bif;
2766
extern BeamInstr* em_apply_bif;
2598
2771
#ifdef ERTS_SMP
2599
for (i=0; i<META_MAIN_TAB_LOCK_CNT; i++) {
2600
#ifdef ERTS_ENABLE_LOCK_COUNT
2601
erts_smp_spinlock_init_x(&meta_main_tab_locks[i].lck, "meta_main_tab_slot", make_small(i));
2603
erts_smp_spinlock_init(&meta_main_tab_locks[i].lck, "meta_main_tab_slot");
2772
erts_smp_rwmtx_opt_t rwmtx_opt = ERTS_SMP_RWMTX_OPT_DEFAULT_INITER;
2773
rwmtx_opt.type = ERTS_SMP_RWMTX_TYPE_FREQUENT_READ;
2774
rwmtx_opt.lived = ERTS_SMP_RWMTX_LONG_LIVED;
2776
meta_main_tab_locks =
2777
erts_alloc_permanent_cache_aligned(ERTS_ALC_T_DB_TABLES,
2778
sizeof(erts_meta_main_tab_lock_t)
2779
* ERTS_META_MAIN_TAB_LOCK_TAB_SIZE);
2781
for (i = 0; i < ERTS_META_MAIN_TAB_LOCK_TAB_SIZE; i++) {
2782
erts_smp_rwmtx_init_opt_x(&meta_main_tab_locks[i].rwmtx, &rwmtx_opt,
2783
"meta_main_tab_slot", make_small(i));
2606
2785
erts_smp_spinlock_init(&meta_main_tab_main_lock, "meta_main_tab_main");
2607
2786
for (i=0; i<META_NAME_TAB_LOCK_CNT; i++) {
2608
#ifdef ERTS_ENABLE_LOCK_COUNT
2609
erts_smp_rwmtx_init_x(&meta_name_tab_rwlocks[i].lck, "meta_name_tab", make_small(i));
2611
erts_smp_rwmtx_init(&meta_name_tab_rwlocks[i].lck, "meta_name_tab");
2787
erts_smp_rwmtx_init_opt_x(&meta_name_tab_rwlocks[i].lck, &rwmtx_opt,
2788
"meta_name_tab", make_small(i));
3300
3483
if (!is_immed(heir_data)) {
3302
/* Make a dummy 1-tuple around data to use db_get_term() */
3303
heir_data = (Eterm) db_get_term(&tb->common, NULL, 0,
3304
TUPLE1(tmp,heir_data));
3484
DeclareTmpHeap(tmp,2,me);
3489
ErlOffHeap tmp_offheap;
3492
/* Make a dummy 1-tuple around data to use DbTerm */
3493
wrap_tpl = TUPLE1(tmp,heir_data);
3494
size = size_object(wrap_tpl);
3495
dbterm = erts_db_alloc(ERTS_ALC_T_DB_HEIR_DATA, (DbTable *)tb,
3496
(sizeof(DbTerm) + sizeof(Eterm)*(size-1)));
3497
dbterm->size = size;
3499
tmp_offheap.first = NULL;
3500
copy_struct(wrap_tpl, size, &top, &tmp_offheap);
3501
dbterm->first_oh = tmp_offheap.first;
3502
heir_data = (UWord)dbterm;
3305
3504
ASSERT(!is_immed(heir_data));
3307
3506
tb->common.heir_data = heir_data;