158
164
CallCounter clear_cache;
159
165
CallCounter check_cache;
162
static cache_desc_t cache_descs[MAX_CACHE_SIZE];
163
static cache_desc_t *free_cache_descs;
164
static cache_desc_t *cache;
165
static cache_desc_t *cache_end;
166
static Uint cache_hits;
167
static Uint cache_size;
168
static Uint min_cached_seg_size;
169
static Uint max_cached_seg_size;
171
static Uint max_cache_size;
172
static Uint abs_max_cache_bad_fit;
173
static Uint rel_max_cache_bad_fit;
168
typedef struct ErtsMsegAllctr_t_ ErtsMsegAllctr_t;
171
cache_desc_t cache_descs[MAX_CACHE_SIZE];
172
cache_desc_t *free_cache_descs;
174
cache_desc_t *cache_end;
177
Uint min_cached_seg_size;
178
Uint max_cached_seg_size;
197
ErtsMsegAllctr_t *ma;
202
struct ErtsMsegAllctr_t_ {
209
int is_cache_check_scheduled;
221
Uint abs_max_cache_bad_fit;
222
Uint rel_max_cache_bad_fit;
175
226
#if CAN_PARTLY_DESTROY
176
static Uint min_seg_size;
195
#define ERTS_MSEG_ALLOC_STAT(SZ) \
197
segments.current.no++; \
198
if (segments.max.no < segments.current.no) \
199
segments.max.no = segments.current.no; \
200
if (segments.current.watermark < segments.current.no) \
201
segments.current.watermark = segments.current.no; \
202
segments.current.sz += (SZ); \
203
if (segments.max.sz < segments.current.sz) \
204
segments.max.sz = segments.current.sz; \
207
#define ERTS_MSEG_DEALLOC_STAT(SZ) \
209
ASSERT(segments.current.no > 0); \
210
segments.current.no--; \
211
ASSERT(segments.current.sz >= (SZ)); \
212
segments.current.sz -= (SZ); \
233
ErtsMsegAllctr_t mseg_alloc;
234
char align__[ERTS_ALC_CACHE_LINE_ALIGN_SIZE(sizeof(ErtsMsegAllctr_t))];
235
} ErtsAlgndMsegAllctr_t;
237
static int no_mseg_allocators;
238
static ErtsAlgndMsegAllctr_t *aligned_mseg_allctr;
242
#define ERTS_MSEG_ALLCTR_IX(IX) \
243
(&aligned_mseg_allctr[(IX)].mseg_alloc)
245
#define ERTS_MSEG_ALLCTR_SS() \
246
ERTS_MSEG_ALLCTR_IX((int) erts_get_scheduler_id())
248
#define ERTS_MSEG_ALLCTR_OPT(OPT) \
249
((OPT)->sched_spec ? ERTS_MSEG_ALLCTR_SS() : ERTS_MSEG_ALLCTR_IX(0))
253
#define ERTS_MSEG_ALLCTR_IX(IX) \
254
(&aligned_mseg_allctr[0].mseg_alloc)
256
#define ERTS_MSEG_ALLCTR_SS() \
257
(&aligned_mseg_allctr[0].mseg_alloc)
259
#define ERTS_MSEG_ALLCTR_OPT(OPT) \
260
(&aligned_mseg_allctr[0].mseg_alloc)
264
#define ERTS_MSEG_LOCK(MA) \
266
if ((MA)->is_thread_safe) \
267
erts_mtx_lock(&(MA)->mtx); \
270
#define ERTS_MSEG_UNLOCK(MA) \
272
if ((MA)->is_thread_safe) \
273
erts_mtx_unlock(&(MA)->mtx); \
276
#define ERTS_MSEG_ALLOC_STAT(C,SZ) \
278
C->segments.current.no++; \
279
if (C->segments.max.no < C->segments.current.no) \
280
C->segments.max.no = C->segments.current.no; \
281
if (C->segments.current.watermark < C->segments.current.no) \
282
C->segments.current.watermark = C->segments.current.no; \
283
C->segments.current.sz += (SZ); \
284
if (C->segments.max.sz < C->segments.current.sz) \
285
C->segments.max.sz = C->segments.current.sz; \
288
#define ERTS_MSEG_DEALLOC_STAT(C,SZ) \
290
ASSERT(C->segments.current.no > 0); \
291
C->segments.current.no--; \
292
ASSERT(C->segments.current.sz >= (SZ)); \
293
C->segments.current.sz -= (SZ); \
215
#define ERTS_MSEG_REALLOC_STAT(OSZ, NSZ) \
296
#define ERTS_MSEG_REALLOC_STAT(C,OSZ, NSZ) \
217
ASSERT(segments.current.sz >= (OSZ)); \
218
segments.current.sz -= (OSZ); \
219
segments.current.sz += (NSZ); \
298
ASSERT(C->segments.current.sz >= (OSZ)); \
299
C->segments.current.sz -= (OSZ); \
300
C->segments.current.sz += (NSZ); \
222
303
#define ONE_GIGA (1000000000)
224
#define ZERO_CC(CC) (calls.CC.no = 0, calls.CC.giga_no = 0)
226
#define INC_CC(CC) (calls.CC.no == ONE_GIGA - 1 \
227
? (calls.CC.giga_no++, calls.CC.no = 0) \
230
#define DEC_CC(CC) (calls.CC.no == 0 \
231
? (calls.CC.giga_no--, \
232
calls.CC.no = ONE_GIGA - 1) \
236
static erts_mtx_t mseg_mutex; /* Also needed when !USE_THREADS */
305
#define ZERO_CC(MA, CC) ((MA)->calls.CC.no = 0, \
306
(MA)->calls.CC.giga_no = 0)
308
#define INC_CC(MA, CC) ((MA)->calls.CC.no == ONE_GIGA - 1 \
309
? ((MA)->calls.CC.giga_no++, \
310
(MA)->calls.CC.no = 0) \
311
: (MA)->calls.CC.no++)
313
#define DEC_CC(MA, CC) ((MA)->calls.CC.no == 0 \
314
? ((MA)->calls.CC.giga_no--, \
315
(MA)->calls.CC.no = ONE_GIGA - 1) \
316
: (MA)->calls.CC.no--)
237
319
static erts_mtx_t init_atoms_mutex; /* Also needed when !USE_THREADS */
240
#ifdef ERTS_THREADS_NO_SMP
241
static erts_tid_t main_tid;
242
static int async_handle = -1;
245
static void thread_safe_init(void)
322
static ERTS_INLINE void
323
schedule_cache_check(ErtsMsegAllctr_t *ma)
247
erts_mtx_init(&init_atoms_mutex, "mseg_init_atoms");
248
erts_mtx_init(&mseg_mutex, "mseg");
250
#ifdef ERTS_THREADS_NO_SMP
251
main_tid = erts_thr_self();
326
if (!ma->is_cache_check_scheduled && ma->is_init_done) {
327
erts_set_aux_work_timeout(ma->ix,
328
ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK,
330
ma->is_cache_check_scheduled = 1;
257
static ErlTimer cache_check_timer;
259
static ERTS_INLINE void
260
schedule_cache_check(void)
334
static ERTS_INLINE void *
335
mseg_create(ErtsMsegAllctr_t *ma, MemKind* mk, Uint size)
262
if (!is_cache_check_scheduled && is_init_done) {
263
#ifdef ERTS_THREADS_NO_SMP
264
if (!erts_equal_tids(erts_thr_self(), main_tid)) {
265
if (!is_cache_check_requested) {
266
is_cache_check_requested = 1;
267
sys_async_ready(async_handle);
339
ASSERT(size % page_size == 0);
342
if (mk == &ma->low_mem) {
344
if ((unsigned long) seg & CHECK_POINTER_MASK) {
345
erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg);
352
#if defined(ERTS_MSEG_FAKE_SEGMENTS)
353
seg = erts_sys_alloc(ERTS_ALC_N_INVALID, NULL, size);
273
cache_check_timer.active = 0;
274
erl_set_timer(&cache_check_timer,
278
cache_check_interval);
279
is_cache_check_scheduled = 1;
280
#ifdef ERTS_THREADS_NO_SMP
281
is_cache_check_requested = 0;
356
seg = (void *) mmap((void *) 0, (size_t) size,
357
MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0);
358
if (seg == (void *) MAP_FAILED)
287
#ifdef ERTS_THREADS_NO_SMP
290
check_schedule_cache_check(void)
292
erts_mtx_lock(&mseg_mutex);
293
if (is_cache_check_requested
294
&& !is_cache_check_scheduled) {
295
schedule_cache_check();
297
erts_mtx_unlock(&mseg_mutex);
305
erts_mtx_lock(&mseg_mutex);
307
erts_mtx_unlock(&mseg_mutex);
310
static ERTS_INLINE void *
311
mseg_create(Uint size)
315
ASSERT(size % page_size == 0);
317
#if defined(ERTS_MSEG_FAKE_SEGMENTS)
318
seg = erts_sys_alloc(ERTS_ALC_N_INVALID, NULL, size);
323
seg = (void *) mmap((void *) 0, (size_t) size,
324
MMAP_PROT, MMAP_FLAGS, MMAP_FD, 0);
325
if (seg == (void *) MAP_FAILED)
329
if ((unsigned long) seg & CHECK_POINTER_MASK) {
330
erts_fprintf(stderr,"Pointer mask failure (0x%08lx)\n",(unsigned long) seg);
335
#error "Missing mseg_create() implementation"
362
# error "Missing mseg_create() implementation"
343
371
static ERTS_INLINE void
344
mseg_destroy(void *seg, Uint size)
372
mseg_destroy(ErtsMsegAllctr_t *ma, MemKind* mk, void *seg, Uint size)
346
#if defined(ERTS_MSEG_FAKE_SEGMENTS)
347
erts_sys_free(ERTS_ALC_N_INVALID, NULL, seg);
379
if (mk == &ma->low_mem) {
383
pmunmap((void *) seg, size);
388
#ifdef ERTS_MSEG_FAKE_SEGMENTS
389
erts_sys_free(ERTS_ALC_N_INVALID, NULL, seg);
354
pmunmap((void *) seg, size);
397
munmap((void *) seg, size);
356
munmap((void *) seg, size);
399
# error "Missing mseg_destroy() implementation"
358
403
ASSERT(size % page_size == 0);
359
404
ASSERT(res == 0);
361
#error "Missing mseg_destroy() implementation"
368
410
#if HAVE_MSEG_RECREATE
370
412
static ERTS_INLINE void *
371
mseg_recreate(void *old_seg, Uint old_size, Uint new_size)
413
mseg_recreate(ErtsMsegAllctr_t *ma, MemKind* mk, void *old_seg, Uint old_size, Uint new_size)
375
417
ASSERT(old_size % page_size == 0);
376
418
ASSERT(new_size % page_size == 0);
421
if (mk == &ma->low_mem) {
422
new_seg = (void *) pmremap((void *) old_seg,
378
429
#if defined(ERTS_MSEG_FAKE_SEGMENTS)
379
new_seg = erts_sys_realloc(ERTS_ALC_N_INVALID, NULL, old_seg, new_size);
430
new_seg = erts_sys_realloc(ERTS_ALC_N_INVALID, NULL, old_seg, new_size);
380
431
#elif HAVE_MREMAP
382
new_seg = (void *) pmremap((void *) old_seg,
385
#elif defined(__NetBSD__)
386
new_seg = (void *) mremap((void *) old_seg,
391
if (new_seg == (void *) MAP_FAILED)
394
new_seg = (void *) mremap((void *) old_seg,
398
if (new_seg == (void *) MAP_FAILED)
433
#if defined(__NetBSD__)
434
new_seg = (void *) mremap((void *) old_seg,
440
new_seg = (void *) mremap((void *) old_seg,
445
if (new_seg == (void *) MAP_FAILED)
402
448
#error "Missing mseg_recreate() implementation"
452
INC_CC(ma, recreate);
410
457
#endif /* #if HAVE_MSEG_RECREATE */
460
#define ERTS_DBG_MA_CHK_THR_ACCESS(MA) \
462
if ((MA)->is_thread_safe) \
463
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&(MA)->mtx) \
464
|| erts_smp_thr_progress_is_blocking() \
465
|| ERTS_IS_CRASH_DUMPING); \
467
ERTS_LC_ASSERT((MA)->ix == (int) erts_get_scheduler_id() \
468
|| erts_smp_thr_progress_is_blocking() \
469
|| ERTS_IS_CRASH_DUMPING); \
471
#define ERTS_DBG_MK_CHK_THR_ACCESS(MK) \
472
ERTS_DBG_MA_CHK_THR_ACCESS((MK)->ma)
474
#define ERTS_DBG_MA_CHK_THR_ACCESS(MA)
475
#define ERTS_DBG_MK_CHK_THR_ACCESS(MK)
413
478
static ERTS_INLINE cache_desc_t *
479
alloc_cd(MemKind* mk)
416
cache_desc_t *cd = free_cache_descs;
417
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
481
cache_desc_t *cd = mk->free_cache_descs;
482
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
419
free_cache_descs = cd->next;
484
mk->free_cache_descs = cd->next;
423
488
static ERTS_INLINE void
424
free_cd(cache_desc_t *cd)
489
free_cd(MemKind* mk, cache_desc_t *cd)
426
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
427
cd->next = free_cache_descs;
428
free_cache_descs = cd;
491
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
492
cd->next = mk->free_cache_descs;
493
mk->free_cache_descs = cd;
432
497
static ERTS_INLINE void
433
link_cd(cache_desc_t *cd)
498
link_cd(MemKind* mk, cache_desc_t *cd)
435
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
500
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
502
mk->cache->prev = cd;
503
cd->next = mk->cache;
507
if (!mk->cache_end) {
443
508
ASSERT(!cd->next);
515
#if CAN_PARTLY_DESTROY
450
516
static ERTS_INLINE void
451
end_link_cd(cache_desc_t *cd)
517
end_link_cd(MemKind* mk, cache_desc_t *cd)
453
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
455
cache_end->next = cd;
519
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
521
mk->cache_end->next = cd;
457
cd->prev = cache_end;
523
cd->prev = mk->cache_end;
461
527
ASSERT(!cd->prev);
468
535
static ERTS_INLINE void
469
unlink_cd(cache_desc_t *cd)
536
unlink_cd(MemKind* mk, cache_desc_t *cd)
471
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
538
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
473
540
cd->next->prev = cd->prev;
475
cache_end = cd->prev;
542
mk->cache_end = cd->prev;
478
545
cd->prev->next = cd->next;
481
ASSERT(cache_size > 0);
547
mk->cache = cd->next;
548
ASSERT(mk->cache_size > 0);
485
552
static ERTS_INLINE void
486
check_cache_limits(void)
553
check_cache_limits(MemKind* mk)
488
555
cache_desc_t *cd;
489
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
490
max_cached_seg_size = 0;
491
min_cached_seg_size = ~((Uint) 0);
492
for (cd = cache; cd; cd = cd->next) {
493
if (cd->size < min_cached_seg_size)
494
min_cached_seg_size = cd->size;
495
if (cd->size > max_cached_seg_size)
496
max_cached_seg_size = cd->size;
556
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
557
mk->max_cached_seg_size = 0;
558
mk->min_cached_seg_size = ~((Uint) 0);
559
for (cd = mk->cache; cd; cd = cd->next) {
560
if (cd->size < mk->min_cached_seg_size)
561
mk->min_cached_seg_size = cd->size;
562
if (cd->size > mk->max_cached_seg_size)
563
mk->max_cached_seg_size = cd->size;
501
567
static ERTS_INLINE void
502
adjust_cache_size(int force_check_limits)
568
adjust_cache_size(MemKind* mk, int force_check_limits)
504
570
cache_desc_t *cd;
505
571
int check_limits = force_check_limits;
506
Sint max_cached = ((Sint) segments.current.watermark
507
- (Sint) segments.current.no);
508
ERTS_LC_ASSERT(erts_lc_mtx_is_locked(&mseg_mutex));
509
while (((Sint) cache_size) > max_cached && ((Sint) cache_size) > 0) {
572
Sint max_cached = ((Sint) mk->segments.current.watermark
573
- (Sint) mk->segments.current.no);
574
ERTS_DBG_MK_CHK_THR_ACCESS(mk);
575
while (((Sint) mk->cache_size) > max_cached && ((Sint) mk->cache_size) > 0) {
576
ASSERT(mk->cache_end);
512
578
if (!check_limits &&
513
!(min_cached_seg_size < cd->size
514
&& cd->size < max_cached_seg_size)) {
579
!(mk->min_cached_seg_size < cd->size
580
&& cd->size < mk->max_cached_seg_size)) {
515
581
check_limits = 1;
517
583
if (erts_mtrace_enabled)
518
584
erts_mtrace_crr_free(SEGTYPE, SEGTYPE, cd->seg);
519
mseg_destroy(cd->seg, cd->size);
585
mseg_destroy(mk->ma, mk, cd->seg, cd->size);
524
590
if (check_limits)
525
check_cache_limits();
530
check_cache(void *unused)
532
erts_mtx_lock(&mseg_mutex);
534
is_cache_check_scheduled = 0;
536
if (segments.current.watermark > segments.current.no)
537
segments.current.watermark--;
538
adjust_cache_size(0);
541
schedule_cache_check();
545
erts_mtx_unlock(&mseg_mutex);
549
mseg_clear_cache(void)
551
segments.current.watermark = 0;
553
adjust_cache_size(1);
559
segments.current.watermark = segments.current.no;
591
check_cache_limits(mk);
595
check_one_cache(MemKind* mk)
597
if (mk->segments.current.watermark > mk->segments.current.no)
598
mk->segments.current.watermark--;
599
adjust_cache_size(mk, 0);
602
schedule_cache_check(mk->ma);
603
return mk->cache_size;
606
static void do_cache_check(ErtsMsegAllctr_t *ma)
613
for (mk=ma->mk_list; mk; mk=mk->next) {
614
if (check_one_cache(mk))
619
ma->is_cache_check_scheduled = 0;
620
erts_set_aux_work_timeout(ma->ix,
621
ERTS_SSI_AUX_WORK_MSEG_CACHE_CHECK,
625
INC_CC(ma, check_cache);
627
ERTS_MSEG_UNLOCK(ma);
630
void erts_mseg_cache_check(void)
632
do_cache_check(ERTS_MSEG_ALLCTR_SS());
636
mseg_clear_cache(MemKind* mk)
638
mk->segments.current.watermark = 0;
640
adjust_cache_size(mk, 1);
643
ASSERT(!mk->cache_end);
644
ASSERT(!mk->cache_size);
646
mk->segments.current.watermark = mk->segments.current.no;
648
INC_CC(mk->ma, clear_cache);
651
static ERTS_INLINE MemKind* memkind(ErtsMsegAllctr_t *ma,
652
const ErtsMsegOpt_t *opt)
655
return opt->low_mem ? &ma->low_mem : &ma->hi_mem;
565
mseg_alloc(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
662
mseg_alloc(ErtsMsegAllctr_t *ma, ErtsAlcType_t atype, Uint *size_p,
663
const ErtsMsegOpt_t *opt)
568
665
Uint max, min, diff_size, size;
569
666
cache_desc_t *cd, *cand_cd;
668
MemKind* mk = memkind(ma, opt);
574
672
size = PAGE_CEILING(*size_p);
576
674
#if CAN_PARTLY_DESTROY
577
if (size < min_seg_size)
675
if (size < ma->min_seg_size)
676
ma->min_seg_size = size;
581
679
if (!opt->cache) {
583
adjust_cache_size(0);
584
seg = mseg_create(size);
681
adjust_cache_size(mk,0);
682
seg = mseg_create(ma, mk, size);
587
seg = mseg_create(size);
684
mseg_clear_cache(mk);
685
seg = mseg_create(ma, mk, size);
1093
1194
add_3tup(hpp, szp, &res,
1094
1195
am.mseg_check_cache,
1095
bld_unstable_uint(hpp, szp, calls.check_cache.giga_no),
1096
bld_unstable_uint(hpp, szp, calls.check_cache.no));
1196
bld_unstable_uint(hpp, szp, ma->calls.check_cache.giga_no),
1197
bld_unstable_uint(hpp, szp, ma->calls.check_cache.no));
1097
1198
add_3tup(hpp, szp, &res,
1098
1199
am.mseg_clear_cache,
1099
bld_unstable_uint(hpp, szp, calls.clear_cache.giga_no),
1100
bld_unstable_uint(hpp, szp, calls.clear_cache.no));
1200
bld_unstable_uint(hpp, szp, ma->calls.clear_cache.giga_no),
1201
bld_unstable_uint(hpp, szp, ma->calls.clear_cache.no));
1102
1203
#if HAVE_MSEG_RECREATE
1103
1204
add_3tup(hpp, szp, &res,
1104
1205
am.mseg_recreate,
1105
bld_unstable_uint(hpp, szp, calls.recreate.giga_no),
1106
bld_unstable_uint(hpp, szp, calls.recreate.no));
1206
bld_unstable_uint(hpp, szp, ma->calls.recreate.giga_no),
1207
bld_unstable_uint(hpp, szp, ma->calls.recreate.no));
1108
1209
add_3tup(hpp, szp, &res,
1109
1210
am.mseg_destroy,
1110
bld_unstable_uint(hpp, szp, calls.destroy.giga_no),
1111
bld_unstable_uint(hpp, szp, calls.destroy.no));
1211
bld_unstable_uint(hpp, szp, ma->calls.destroy.giga_no),
1212
bld_unstable_uint(hpp, szp, ma->calls.destroy.no));
1112
1213
add_3tup(hpp, szp, &res,
1113
1214
am.mseg_create,
1114
bld_unstable_uint(hpp, szp, calls.create.giga_no),
1115
bld_unstable_uint(hpp, szp, calls.create.no));
1215
bld_unstable_uint(hpp, szp, ma->calls.create.giga_no),
1216
bld_unstable_uint(hpp, szp, ma->calls.create.no));
1118
1219
add_3tup(hpp, szp, &res,
1119
1220
am.mseg_realloc,
1120
bld_unstable_uint(hpp, szp, calls.realloc.giga_no),
1121
bld_unstable_uint(hpp, szp, calls.realloc.no));
1221
bld_unstable_uint(hpp, szp, ma->calls.realloc.giga_no),
1222
bld_unstable_uint(hpp, szp, ma->calls.realloc.no));
1122
1223
add_3tup(hpp, szp, &res,
1123
1224
am.mseg_dealloc,
1124
bld_unstable_uint(hpp, szp, calls.dealloc.giga_no),
1125
bld_unstable_uint(hpp, szp, calls.dealloc.no));
1225
bld_unstable_uint(hpp, szp, ma->calls.dealloc.giga_no),
1226
bld_unstable_uint(hpp, szp, ma->calls.dealloc.no));
1126
1227
add_3tup(hpp, szp, &res,
1128
bld_unstable_uint(hpp, szp, calls.alloc.giga_no),
1129
bld_unstable_uint(hpp, szp, calls.alloc.no));
1229
bld_unstable_uint(hpp, szp, ma->calls.alloc.giga_no),
1230
bld_unstable_uint(hpp, szp, ma->calls.alloc.no));
1136
info_status(int *print_to_p,
1138
int begin_new_max_period,
1237
info_status(ErtsMsegAllctr_t *ma, MemKind* mk, int *print_to_p, void *print_to_arg,
1238
int begin_new_max_period, Uint **hpp, Uint *szp)
1142
1240
Eterm res = THE_NON_VALUE;
1144
if (segments.max_ever.no < segments.max.no)
1145
segments.max_ever.no = segments.max.no;
1146
if (segments.max_ever.sz < segments.max.sz)
1147
segments.max_ever.sz = segments.max.sz;
1242
if (mk->segments.max_ever.no < mk->segments.max.no)
1243
mk->segments.max_ever.no = mk->segments.max.no;
1244
if (mk->segments.max_ever.sz < mk->segments.max.sz)
1245
mk->segments.max_ever.sz = mk->segments.max.sz;
1149
1247
if (print_to_p) {
1150
1248
int to = *print_to_p;
1151
1249
void *arg = print_to_arg;
1153
erts_print(to, arg, "cached_segments: %bpu\n", cache_size);
1154
erts_print(to, arg, "cache_hits: %bpu\n", cache_hits);
1155
erts_print(to, arg, "segments: %bpu %bpu %bpu\n",
1156
segments.current.no, segments.max.no, segments.max_ever.no);
1157
erts_print(to, arg, "segments_size: %bpu %bpu %bpu\n",
1158
segments.current.sz, segments.max.sz, segments.max_ever.sz);
1159
erts_print(to, arg, "segments_watermark: %bpu\n",
1160
segments.current.watermark);
1251
erts_print(to, arg, "cached_segments: %beu\n", mk->cache_size);
1252
erts_print(to, arg, "cache_hits: %beu\n", mk->cache_hits);
1253
erts_print(to, arg, "segments: %beu %beu %beu\n",
1254
mk->segments.current.no, mk->segments.max.no, mk->segments.max_ever.no);
1255
erts_print(to, arg, "segments_size: %beu %beu %beu\n",
1256
mk->segments.current.sz, mk->segments.max.sz, mk->segments.max_ever.sz);
1257
erts_print(to, arg, "segments_watermark: %beu\n",
1258
mk->segments.current.watermark);
1163
1261
if (hpp || szp) {
1165
1263
add_2tup(hpp, szp, &res,
1166
1264
am.segments_watermark,
1167
bld_unstable_uint(hpp, szp, segments.current.watermark));
1265
bld_unstable_uint(hpp, szp, mk->segments.current.watermark));
1168
1266
add_4tup(hpp, szp, &res,
1169
1267
am.segments_size,
1170
bld_unstable_uint(hpp, szp, segments.current.sz),
1171
bld_unstable_uint(hpp, szp, segments.max.sz),
1172
bld_unstable_uint(hpp, szp, segments.max_ever.sz));
1268
bld_unstable_uint(hpp, szp, mk->segments.current.sz),
1269
bld_unstable_uint(hpp, szp, mk->segments.max.sz),
1270
bld_unstable_uint(hpp, szp, mk->segments.max_ever.sz));
1173
1271
add_4tup(hpp, szp, &res,
1175
bld_unstable_uint(hpp, szp, segments.current.no),
1176
bld_unstable_uint(hpp, szp, segments.max.no),
1177
bld_unstable_uint(hpp, szp, segments.max_ever.no));
1273
bld_unstable_uint(hpp, szp, mk->segments.current.no),
1274
bld_unstable_uint(hpp, szp, mk->segments.max.no),
1275
bld_unstable_uint(hpp, szp, mk->segments.max_ever.no));
1178
1276
add_2tup(hpp, szp, &res,
1180
bld_unstable_uint(hpp, szp, cache_hits));
1278
bld_unstable_uint(hpp, szp, mk->cache_hits));
1181
1279
add_2tup(hpp, szp, &res,
1182
1280
am.cached_segments,
1183
bld_unstable_uint(hpp, szp, cache_size));
1281
bld_unstable_uint(hpp, szp, mk->cache_size));
1187
1285
if (begin_new_max_period) {
1188
segments.max.no = segments.current.no;
1189
segments.max.sz = segments.current.sz;
1286
mk->segments.max.no = mk->segments.current.no;
1287
mk->segments.max.sz = mk->segments.current.sz;
1293
static Eterm info_memkind(ErtsMsegAllctr_t *ma, MemKind* mk, int *print_to_p, void *print_to_arg,
1294
int begin_max_per, Uint **hpp, Uint *szp)
1296
Eterm res = THE_NON_VALUE;
1301
erts_print(*print_to_p, print_to_arg, "memory kind: %s\n", mk->name);
1305
atoms[1] = am.status;
1306
atoms[2] = am.calls;
1307
values[0] = erts_bld_string(hpp, szp, mk->name);
1309
values[1] = info_status(ma, mk, print_to_p, print_to_arg, begin_max_per, hpp, szp);
1310
values[2] = info_calls(ma, print_to_p, print_to_arg, hpp, szp);
1313
res = bld_2tup_list(hpp, szp, 3, atoms, values);
1196
info_version(int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
1320
info_version(ErtsMsegAllctr_t *ma, int *print_to_p, void *print_to_arg, Uint **hpp, Uint *szp)
1198
1322
Eterm res = THE_NON_VALUE;
1269
1404
erts_mseg_alloc_opt(ErtsAlcType_t atype, Uint *size_p, const ErtsMsegOpt_t *opt)
1406
ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
1272
erts_mtx_lock(&mseg_mutex);
1273
seg = mseg_alloc(atype, size_p, opt);
1274
erts_mtx_unlock(&mseg_mutex);
1409
ERTS_DBG_MA_CHK_THR_ACCESS(ma);
1410
seg = mseg_alloc(ma, atype, size_p, opt);
1411
ERTS_MSEG_UNLOCK(ma);
1279
1416
erts_mseg_alloc(ErtsAlcType_t atype, Uint *size_p)
1281
return erts_mseg_alloc_opt(atype, size_p, &default_opt);
1285
erts_mseg_dealloc_opt(ErtsAlcType_t atype, void *seg, Uint size,
1418
return erts_mseg_alloc_opt(atype, size_p, &erts_mseg_default_opt);
1422
erts_mseg_dealloc_opt(ErtsAlcType_t atype, void *seg,
1423
Uint size, const ErtsMsegOpt_t *opt)
1425
ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
1427
ERTS_DBG_MA_CHK_THR_ACCESS(ma);
1428
mseg_dealloc(ma, atype, seg, size, opt);
1429
ERTS_MSEG_UNLOCK(ma);
1433
erts_mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size)
1435
erts_mseg_dealloc_opt(atype, seg, size, &erts_mseg_default_opt);
1439
erts_mseg_realloc_opt(ErtsAlcType_t atype, void *seg,
1440
Uint old_size, Uint *new_size_p,
1286
1441
const ErtsMsegOpt_t *opt)
1288
erts_mtx_lock(&mseg_mutex);
1289
mseg_dealloc(atype, seg, size, opt);
1290
erts_mtx_unlock(&mseg_mutex);
1294
erts_mseg_dealloc(ErtsAlcType_t atype, void *seg, Uint size)
1296
erts_mseg_dealloc_opt(atype, seg, size, &default_opt);
1300
erts_mseg_realloc_opt(ErtsAlcType_t atype, void *seg, Uint old_size,
1301
Uint *new_size_p, const ErtsMsegOpt_t *opt)
1443
ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
1304
erts_mtx_lock(&mseg_mutex);
1305
new_seg = mseg_realloc(atype, seg, old_size, new_size_p, opt);
1306
erts_mtx_unlock(&mseg_mutex);
1446
ERTS_DBG_MA_CHK_THR_ACCESS(ma);
1447
new_seg = mseg_realloc(ma, atype, seg, old_size, new_size_p, opt);
1448
ERTS_MSEG_UNLOCK(ma);
1307
1449
return new_seg;
1311
erts_mseg_realloc(ErtsAlcType_t atype, void *seg, Uint old_size,
1453
erts_mseg_realloc(ErtsAlcType_t atype, void *seg,
1454
Uint old_size, Uint *new_size_p)
1314
return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p, &default_opt);
1456
return erts_mseg_realloc_opt(atype, seg, old_size, new_size_p,
1457
&erts_mseg_default_opt);
1318
1461
erts_mseg_clear_cache(void)
1320
erts_mtx_lock(&mseg_mutex);
1322
erts_mtx_unlock(&mseg_mutex);
1463
ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_SS();
1469
ERTS_DBG_MA_CHK_THR_ACCESS(ma);
1470
for (mk=ma->mk_list; mk; mk=mk->next) {
1471
mseg_clear_cache(mk);
1473
ERTS_MSEG_UNLOCK(ma);
1476
ma = ERTS_MSEG_ALLCTR_IX(0);
1482
erts_mseg_no(const ErtsMsegOpt_t *opt)
1329
erts_mtx_lock(&mseg_mutex);
1330
n = segments.current.no;
1331
erts_mtx_unlock(&mseg_mutex);
1484
ErtsMsegAllctr_t *ma = ERTS_MSEG_ALLCTR_OPT(opt);
1488
ERTS_DBG_MA_CHK_THR_ACCESS(ma);
1489
for (mk=ma->mk_list; mk; mk=mk->next) {
1490
n += mk->segments.current.no;
1492
ERTS_MSEG_UNLOCK(ma);