298
298
p->sl_total = new_size;
300
300
p->slots[p->sl_curr++] = ptr;
301
p->requested -= size;
305
static int nz_strcmp(int nzlength, const char *nz, const char *z) {
306
int zlength=strlen(z);
307
return (zlength == nzlength) && (strncmp(nz, z, zlength) == 0) ? 0 : -1;
310
bool get_stats(const char *stat_type, int nkey, ADD_STAT add_stats, void *c) {
313
if (add_stats != NULL) {
315
/* prepare general statistics for the engine */
316
APPEND_STAT("bytes", "%llu", (unsigned long long)stats.curr_bytes);
317
APPEND_STAT("curr_items", "%u", stats.curr_items);
318
APPEND_STAT("total_items", "%u", stats.total_items);
319
APPEND_STAT("evictions", "%llu",
320
(unsigned long long)stats.evictions);
321
} else if (nz_strcmp(nkey, stat_type, "items") == 0) {
322
item_stats(add_stats, c);
323
} else if (nz_strcmp(nkey, stat_type, "slabs") == 0) {
324
slabs_stats(add_stats, c);
325
} else if (nz_strcmp(nkey, stat_type, "sizes") == 0) {
326
item_stats_sizes(add_stats, c);
305
char* do_slabs_stats(int *buflen) {
338
static void do_slabs_stats(ADD_STAT add_stats, void *c) {
307
char *buf = (char *)malloc(power_largest * 200 + 100);
311
if (buf == NULL) return NULL;
340
/* Get the per-thread stats which contain some interesting aggregates */
341
struct thread_stats thread_stats;
342
threadlocal_stats_aggregate(&thread_stats);
314
345
for(i = POWER_SMALLEST; i <= power_largest; i++) {
315
346
slabclass_t *p = &slabclass[i];
316
347
if (p->slabs != 0) {
317
unsigned int perslab, slabs;
348
uint32_t perslab, slabs;
319
349
slabs = p->slabs;
320
350
perslab = p->perslab;
322
bufcurr += sprintf(bufcurr, "STAT %d:chunk_size %u\r\n", i, p->size);
323
bufcurr += sprintf(bufcurr, "STAT %d:chunks_per_page %u\r\n", i, perslab);
324
bufcurr += sprintf(bufcurr, "STAT %d:total_pages %u\r\n", i, slabs);
325
bufcurr += sprintf(bufcurr, "STAT %d:total_chunks %u\r\n", i, slabs*perslab);
326
bufcurr += sprintf(bufcurr, "STAT %d:used_chunks %u\r\n", i, slabs*perslab - p->sl_curr - p->end_page_free);
327
bufcurr += sprintf(bufcurr, "STAT %d:free_chunks %u\r\n", i, p->sl_curr);
328
bufcurr += sprintf(bufcurr, "STAT %d:free_chunks_end %u\r\n", i, p->end_page_free);
352
char key_str[STAT_KEY_LEN];
353
char val_str[STAT_VAL_LEN];
354
int klen = 0, vlen = 0;
356
APPEND_NUM_STAT(i, "chunk_size", "%u", p->size);
357
APPEND_NUM_STAT(i, "chunks_per_page", "%u", perslab);
358
APPEND_NUM_STAT(i, "total_pages", "%u", slabs);
359
APPEND_NUM_STAT(i, "total_chunks", "%u", slabs * perslab);
360
APPEND_NUM_STAT(i, "used_chunks", "%u",
361
slabs*perslab - p->sl_curr - p->end_page_free);
362
APPEND_NUM_STAT(i, "free_chunks", "%u", p->sl_curr);
363
APPEND_NUM_STAT(i, "free_chunks_end", "%u", p->end_page_free);
364
APPEND_NUM_STAT(i, "mem_requested", "%llu",
365
(unsigned long long)p->requested);
366
APPEND_NUM_STAT(i, "get_hits", "%llu",
367
(unsigned long long)thread_stats.slab_stats[i].get_hits);
368
APPEND_NUM_STAT(i, "cmd_set", "%llu",
369
(unsigned long long)thread_stats.slab_stats[i].set_cmds);
370
APPEND_NUM_STAT(i, "delete_hits", "%llu",
371
(unsigned long long)thread_stats.slab_stats[i].delete_hits);
372
APPEND_NUM_STAT(i, "incr_hits", "%llu",
373
(unsigned long long)thread_stats.slab_stats[i].incr_hits);
374
APPEND_NUM_STAT(i, "decr_hits", "%llu",
375
(unsigned long long)thread_stats.slab_stats[i].decr_hits);
376
APPEND_NUM_STAT(i, "cas_hits", "%llu",
377
(unsigned long long)thread_stats.slab_stats[i].cas_hits);
378
APPEND_NUM_STAT(i, "cas_badval", "%llu",
379
(unsigned long long)thread_stats.slab_stats[i].cas_badval);
332
bufcurr += sprintf(bufcurr, "STAT active_slabs %d\r\nSTAT total_malloced %llu\r\n", total, (unsigned long long)mem_malloced);
333
bufcurr += sprintf(bufcurr, "END\r\n");
334
*buflen = bufcurr - buf;
338
#ifdef ALLOW_SLABS_REASSIGN
339
/* Blows away all the items in a slab class and moves its slabs to another
340
class. This is only used by the "slabs reassign" command, for manual tweaking
341
of memory allocation. It's disabled by default since it requires that all
342
slabs be the same size (which can waste space for chunk size mantissas of
346
-1 = tried. busy. send again shortly. */
347
int do_slabs_reassign(unsigned char srcid, unsigned char dstid) {
348
void *slab, *slab_end;
351
bool was_busy = false;
353
if (srcid < POWER_SMALLEST || srcid > power_largest ||
354
dstid < POWER_SMALLEST || dstid > power_largest)
357
p = &slabclass[srcid];
358
dp = &slabclass[dstid];
360
/* fail if src still populating, or no slab to give up in src */
361
if (p->end_page_ptr || ! p->slabs)
364
/* fail if dst is still growing or we can't make room to hold its new one */
365
if (dp->end_page_ptr || ! grow_slab_list(dstid))
368
if (p->killing == 0) p->killing = 1;
370
slab = p->slab_list[p->killing - 1];
371
slab_end = (char*)slab + POWER_BLOCK;
373
for (iter = slab; iter < slab_end; (char*)iter += p->size) {
374
item *it = (item *)iter;
375
if (it->slabs_clsid) {
376
if (it->refcount) was_busy = true;
381
/* go through free list and discard items that are no longer part of this slab */
384
for (fi = p->sl_curr - 1; fi >= 0; fi--) {
385
if (p->slots[fi] >= slab && p->slots[fi] < slab_end) {
387
if (p->sl_curr > fi) p->slots[fi] = p->slots[p->sl_curr];
392
if (was_busy) return -1;
394
/* if good, now move it to the dst slab class */
395
p->slab_list[p->killing - 1] = p->slab_list[p->slabs - 1];
398
dp->slab_list[dp->slabs++] = slab;
399
dp->end_page_ptr = slab;
400
dp->end_page_free = dp->perslab;
401
/* this isn't too critical, but other parts of the code do asserts to
402
make sure this field is always 0. */
403
for (iter = slab; iter < slab_end; (char*)iter += dp->size) {
404
((item *)iter)->slabs_clsid = 0;
385
/* add overall slab stats and append terminator */
387
APPEND_STAT("active_slabs", "%d", total);
388
APPEND_STAT("total_malloced", "%llu", (unsigned long long)mem_malloced);
389
add_stats(NULL, 0, NULL, 0, c);
410
392
static void *memory_allocate(size_t size) {