1 /* -*- Mode: C; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
3 * Slabs memory allocation, based on powers-of-N. Slabs are up to 1MB in size
4 * and are divided into chunks. The chunk sizes start off at the size of the
5 * "item" structure plus space for a small key and value. They increase by
6 * a multiplier factor from there, up to half the maximum slab size. The last
7 * slab size is always 1MB, since that's the maximum item size allowed by the
10 #include "memcached.h"
12 #include <sys/socket.h>
13 #include <sys/signal.h>
14 #include <sys/resource.h>
16 #include <netinet/in.h>
24 /* powers-of-N allocation structures */
27 unsigned int size
; /* sizes of items */
28 unsigned int perslab
; /* how many items per slab */
30 void *slots
; /* list of item ptrs */
31 unsigned int sl_curr
; /* total free items in list */
33 void *end_page_ptr
; /* pointer to next free item at end of page, or 0 */
34 unsigned int end_page_free
; /* number of items remaining at end of last alloced page */
36 unsigned int slabs
; /* how many slabs were allocated for this class */
38 void **slab_list
; /* array of slab pointers */
39 unsigned int list_size
; /* size of prev array */
41 unsigned int killing
; /* index+1 of dying slab, or zero if none */
42 size_t requested
; /* The number of requested bytes */
45 static slabclass_t slabclass
[MAX_NUMBER_OF_SLAB_CLASSES
];
46 static size_t mem_limit
= 0;
47 static size_t mem_malloced
= 0;
48 static int power_largest
;
50 static void *mem_base
= NULL
;
51 static void *mem_current
= NULL
;
52 static size_t mem_avail
= 0;
55 * Access to the slab allocator is protected by this lock
57 static pthread_mutex_t slabs_lock
= PTHREAD_MUTEX_INITIALIZER
;
60 * Forward Declarations
62 static int do_slabs_newslab(const unsigned int id
);
63 static void *memory_allocate(size_t size
);
65 #ifndef DONT_PREALLOC_SLABS
66 /* Preallocate as many slab pages as possible (called from slabs_init)
67 on start-up, so users don't get confused out-of-memory errors when
68 they do have free (in-slab) space, but no space to make new slabs.
69 if maxslabs is 18 (POWER_LARGEST - POWER_SMALLEST + 1), then all
70 slab types can be made. if max memory is less than 18 MB, only the
71 smaller ones will be made. */
72 static void slabs_preallocate (const unsigned int maxslabs
);
76 * Figures out which slab class (chunk size) is required to store an item of
79 * Given object size, return id to use when allocating/freeing memory for object
80 * 0 means error: can't store such a large object
83 unsigned int slabs_clsid(const size_t size
) {
84 int res
= POWER_SMALLEST
;
88 while (size
> slabclass
[res
].size
)
89 if (res
++ == power_largest
) /* won't fit in the biggest slab */
95 * Determines the chunk sizes and initializes the slab class descriptors
98 void slabs_init(const size_t limit
, const double factor
, const bool prealloc
) {
99 int i
= POWER_SMALLEST
- 1;
100 unsigned int size
= sizeof(item
) + settings
.chunk_size
;
105 /* Allocate everything in a big chunk with malloc */
106 mem_base
= malloc(mem_limit
);
107 if (mem_base
!= NULL
) {
108 mem_current
= mem_base
;
109 mem_avail
= mem_limit
;
111 fprintf(stderr
, "Warning: Failed to allocate requested memory in"
112 " one large chunk.\nWill allocate in smaller chunks\n");
116 memset(slabclass
, 0, sizeof(slabclass
));
118 while (++i
< POWER_LARGEST
&& size
<= settings
.item_size_max
/ factor
) {
119 /* Make sure items are always n-byte aligned */
120 if (size
% CHUNK_ALIGN_BYTES
)
121 size
+= CHUNK_ALIGN_BYTES
- (size
% CHUNK_ALIGN_BYTES
);
123 slabclass
[i
].size
= size
;
124 slabclass
[i
].perslab
= settings
.item_size_max
/ slabclass
[i
].size
;
126 if (settings
.verbose
> 1) {
127 fprintf(stderr
, "slab class %3d: chunk size %9u perslab %7u\n",
128 i
, slabclass
[i
].size
, slabclass
[i
].perslab
);
133 slabclass
[power_largest
].size
= settings
.item_size_max
;
134 slabclass
[power_largest
].perslab
= 1;
135 if (settings
.verbose
> 1) {
136 fprintf(stderr
, "slab class %3d: chunk size %9u perslab %7u\n",
137 i
, slabclass
[i
].size
, slabclass
[i
].perslab
);
140 /* for the test suite: faking of how much we've already malloc'd */
142 char *t_initial_malloc
= getenv("T_MEMD_INITIAL_MALLOC");
143 if (t_initial_malloc
) {
144 mem_malloced
= (size_t)atol(t_initial_malloc
);
149 #ifndef DONT_PREALLOC_SLABS
151 char *pre_alloc
= getenv("T_MEMD_SLABS_ALLOC");
153 if (pre_alloc
== NULL
|| atoi(pre_alloc
) != 0) {
154 slabs_preallocate(power_largest
);
160 #ifndef DONT_PREALLOC_SLABS
161 static void slabs_preallocate (const unsigned int maxslabs
) {
163 unsigned int prealloc
= 0;
165 /* pre-allocate a 1MB slab in every size class so people don't get
166 confused by non-intuitive "SERVER_ERROR out of memory"
167 messages. this is the most common question on the mailing
168 list. if you really don't want this, you can rebuild without
169 these three lines. */
171 for (i
= POWER_SMALLEST
; i
<= POWER_LARGEST
; i
++) {
172 if (++prealloc
> maxslabs
)
180 static int grow_slab_list (const unsigned int id
) {
181 slabclass_t
*p
= &slabclass
[id
];
182 if (p
->slabs
== p
->list_size
) {
183 size_t new_size
= (p
->list_size
!= 0) ? p
->list_size
* 2 : 16;
184 void *new_list
= realloc(p
->slab_list
, new_size
* sizeof(void *));
185 if (new_list
== 0) return 0;
186 p
->list_size
= new_size
;
187 p
->slab_list
= new_list
;
192 #ifndef __INTEL_COMPILER
193 #pragma GCC diagnostic ignored "-Wsign-compare"
196 static int do_slabs_newslab(const unsigned int id
) {
197 slabclass_t
*p
= &slabclass
[id
];
198 int len
= settings
.slab_reassign
? settings
.item_size_max
199 : p
->size
* p
->perslab
;
202 if ((mem_limit
&& mem_malloced
+ len
> mem_limit
&& p
->slabs
> 0) ||
203 (grow_slab_list(id
) == 0) ||
204 ((ptr
= memory_allocate((size_t)len
)) == 0)) {
206 MEMCACHED_SLABS_SLABCLASS_ALLOCATE_FAILED(id
);
210 memset(ptr
, 0, (size_t)len
);
211 p
->end_page_ptr
= ptr
;
212 p
->end_page_free
= p
->perslab
;
214 p
->slab_list
[p
->slabs
++] = ptr
;
216 MEMCACHED_SLABS_SLABCLASS_ALLOCATE(id
);
222 static void *do_slabs_alloc(const size_t size
, unsigned int id
) {
227 if (id
< POWER_SMALLEST
|| id
> power_largest
) {
228 MEMCACHED_SLABS_ALLOCATE_FAILED(size
, 0);
233 assert(p
->sl_curr
== 0 || ((item
*)p
->slots
)->slabs_clsid
== 0);
235 #ifdef USE_SYSTEM_MALLOC
236 if (mem_limit
&& mem_malloced
+ size
> mem_limit
) {
237 MEMCACHED_SLABS_ALLOCATE_FAILED(size
, id
);
240 mem_malloced
+= size
;
242 MEMCACHED_SLABS_ALLOCATE(size
, id
, 0, ret
);
246 /* fail unless we have space at the end of a recently allocated page,
247 we have something on our freelist, or we could allocate a new page */
248 if (! (p
->end_page_ptr
!= 0 || p
->sl_curr
!= 0 ||
249 do_slabs_newslab(id
) != 0)) {
250 /* We don't have more memory available */
252 } else if (p
->sl_curr
!= 0) {
253 /* return off our freelist */
254 it
= (item
*)p
->slots
;
256 if (it
->next
) it
->next
->prev
= 0;
260 /* if we recently allocated a whole page, return from that */
261 assert(p
->end_page_ptr
!= NULL
);
262 ret
= p
->end_page_ptr
;
263 if (--p
->end_page_free
!= 0) {
264 p
->end_page_ptr
= ((caddr_t
)p
->end_page_ptr
) + p
->size
;
271 p
->requested
+= size
;
272 MEMCACHED_SLABS_ALLOCATE(size
, id
, p
->size
, ret
);
274 MEMCACHED_SLABS_ALLOCATE_FAILED(size
, id
);
280 static void do_slabs_free(void *ptr
, const size_t size
, unsigned int id
) {
284 assert(((item
*)ptr
)->slabs_clsid
== 0);
285 assert(id
>= POWER_SMALLEST
&& id
<= power_largest
);
286 if (id
< POWER_SMALLEST
|| id
> power_largest
)
289 MEMCACHED_SLABS_FREE(size
, id
, ptr
);
292 #ifdef USE_SYSTEM_MALLOC
293 mem_malloced
-= size
;
299 it
->it_flags
|= ITEM_SLABBED
;
302 if (it
->next
) it
->next
->prev
= it
;
306 p
->requested
-= size
;
310 static int nz_strcmp(int nzlength
, const char *nz
, const char *z
) {
311 int zlength
=strlen(z
);
312 return (zlength
== nzlength
) && (strncmp(nz
, z
, zlength
) == 0) ? 0 : -1;
315 bool get_stats(const char *stat_type
, int nkey
, ADD_STAT add_stats
, void *c
) {
318 if (add_stats
!= NULL
) {
320 /* prepare general statistics for the engine */
322 APPEND_STAT("bytes", "%llu", (unsigned long long)stats
.curr_bytes
);
323 APPEND_STAT("curr_items", "%u", stats
.curr_items
);
324 APPEND_STAT("total_items", "%u", stats
.total_items
);
325 APPEND_STAT("evictions", "%llu",
326 (unsigned long long)stats
.evictions
);
327 APPEND_STAT("reclaimed", "%llu",
328 (unsigned long long)stats
.reclaimed
);
330 } else if (nz_strcmp(nkey
, stat_type
, "items") == 0) {
331 item_stats(add_stats
, c
);
332 } else if (nz_strcmp(nkey
, stat_type
, "slabs") == 0) {
333 slabs_stats(add_stats
, c
);
334 } else if (nz_strcmp(nkey
, stat_type
, "sizes") == 0) {
335 item_stats_sizes(add_stats
, c
);
347 static void do_slabs_stats(ADD_STAT add_stats
, void *c
) {
349 /* Get the per-thread stats which contain some interesting aggregates */
350 struct thread_stats thread_stats
;
351 threadlocal_stats_aggregate(&thread_stats
);
354 for(i
= POWER_SMALLEST
; i
<= power_largest
; i
++) {
355 slabclass_t
*p
= &slabclass
[i
];
357 uint32_t perslab
, slabs
;
359 perslab
= p
->perslab
;
361 char key_str
[STAT_KEY_LEN
];
362 char val_str
[STAT_VAL_LEN
];
363 int klen
= 0, vlen
= 0;
365 APPEND_NUM_STAT(i
, "chunk_size", "%u", p
->size
);
366 APPEND_NUM_STAT(i
, "chunks_per_page", "%u", perslab
);
367 APPEND_NUM_STAT(i
, "total_pages", "%u", slabs
);
368 APPEND_NUM_STAT(i
, "total_chunks", "%u", slabs
* perslab
);
369 APPEND_NUM_STAT(i
, "used_chunks", "%u",
370 slabs
*perslab
- p
->sl_curr
- p
->end_page_free
);
371 APPEND_NUM_STAT(i
, "free_chunks", "%u", p
->sl_curr
);
372 APPEND_NUM_STAT(i
, "free_chunks_end", "%u", p
->end_page_free
);
373 APPEND_NUM_STAT(i
, "mem_requested", "%llu",
374 (unsigned long long)p
->requested
);
375 APPEND_NUM_STAT(i
, "get_hits", "%llu",
376 (unsigned long long)thread_stats
.slab_stats
[i
].get_hits
);
377 APPEND_NUM_STAT(i
, "cmd_set", "%llu",
378 (unsigned long long)thread_stats
.slab_stats
[i
].set_cmds
);
379 APPEND_NUM_STAT(i
, "delete_hits", "%llu",
380 (unsigned long long)thread_stats
.slab_stats
[i
].delete_hits
);
381 APPEND_NUM_STAT(i
, "incr_hits", "%llu",
382 (unsigned long long)thread_stats
.slab_stats
[i
].incr_hits
);
383 APPEND_NUM_STAT(i
, "decr_hits", "%llu",
384 (unsigned long long)thread_stats
.slab_stats
[i
].decr_hits
);
385 APPEND_NUM_STAT(i
, "cas_hits", "%llu",
386 (unsigned long long)thread_stats
.slab_stats
[i
].cas_hits
);
387 APPEND_NUM_STAT(i
, "cas_badval", "%llu",
388 (unsigned long long)thread_stats
.slab_stats
[i
].cas_badval
);
389 APPEND_NUM_STAT(i
, "touch_hits", "%llu",
390 (unsigned long long)thread_stats
.slab_stats
[i
].touch_hits
);
395 /* add overall slab stats and append terminator */
397 APPEND_STAT("active_slabs", "%d", total
);
398 APPEND_STAT("total_malloced", "%llu", (unsigned long long)mem_malloced
);
399 add_stats(NULL
, 0, NULL
, 0, c
);
402 static void *memory_allocate(size_t size
) {
405 if (mem_base
== NULL
) {
406 /* We are not using a preallocated large memory chunk */
411 if (size
> mem_avail
) {
415 /* mem_current pointer _must_ be aligned!!! */
416 if (size
% CHUNK_ALIGN_BYTES
) {
417 size
+= CHUNK_ALIGN_BYTES
- (size
% CHUNK_ALIGN_BYTES
);
420 mem_current
= ((char*)mem_current
) + size
;
421 if (size
< mem_avail
) {
431 void *slabs_alloc(size_t size
, unsigned int id
) {
434 pthread_mutex_lock(&slabs_lock
);
435 ret
= do_slabs_alloc(size
, id
);
436 pthread_mutex_unlock(&slabs_lock
);
440 void slabs_free(void *ptr
, size_t size
, unsigned int id
) {
441 pthread_mutex_lock(&slabs_lock
);
442 do_slabs_free(ptr
, size
, id
);
443 pthread_mutex_unlock(&slabs_lock
);
446 void slabs_stats(ADD_STAT add_stats
, void *c
) {
447 pthread_mutex_lock(&slabs_lock
);
448 do_slabs_stats(add_stats
, c
);
449 pthread_mutex_unlock(&slabs_lock
);
452 void slabs_adjust_mem_requested(unsigned int id
, size_t old
, size_t ntotal
)
454 pthread_mutex_lock(&slabs_lock
);
456 if (id
< POWER_SMALLEST
|| id
> power_largest
) {
457 fprintf(stderr
, "Internal error! Invalid slab class\n");
462 p
->requested
= p
->requested
- old
+ ntotal
;
463 pthread_mutex_unlock(&slabs_lock
);
466 static pthread_cond_t maintenance_cond
= PTHREAD_COND_INITIALIZER
;
467 static volatile int do_run_slab_thread
= 1;
469 #define DEFAULT_SLAB_BULK_CHECK 1
470 int slab_bulk_check
= DEFAULT_SLAB_BULK_CHECK
;
472 static int slab_rebalance_start(void) {
477 pthread_mutex_lock(&cache_lock
);
478 pthread_mutex_lock(&slabs_lock
);
480 if (slab_rebal
.s_clsid
< POWER_SMALLEST
||
481 slab_rebal
.s_clsid
> power_largest
||
482 slab_rebal
.d_clsid
< POWER_SMALLEST
||
483 slab_rebal
.d_clsid
> power_largest
||
484 slab_rebal
.s_clsid
== slab_rebal
.d_clsid
)
487 s_cls
= &slabclass
[slab_rebal
.s_clsid
];
488 d_cls
= &slabclass
[slab_rebal
.d_clsid
];
490 if (d_cls
->end_page_ptr
|| s_cls
->end_page_ptr
||
491 !grow_slab_list(slab_rebal
.d_clsid
)) {
495 if (s_cls
->slabs
< 2)
499 pthread_mutex_unlock(&slabs_lock
);
500 pthread_mutex_unlock(&cache_lock
);
501 return no_go
; /* Should use a wrapper function... */
506 slab_rebal
.slab_start
= s_cls
->slab_list
[s_cls
->killing
- 1];
507 slab_rebal
.slab_end
= (char *)slab_rebal
.slab_start
+
508 (s_cls
->size
* s_cls
->perslab
);
509 slab_rebal
.slab_pos
= slab_rebal
.slab_start
;
512 /* Also tells do_item_get to search for items in this slab */
513 slab_rebalance_signal
= 2;
515 if (settings
.verbose
> 1) {
516 fprintf(stderr
, "Started a slab rebalance\n");
519 pthread_mutex_unlock(&slabs_lock
);
520 pthread_mutex_unlock(&cache_lock
);
523 stats
.slab_reassign_running
= true;
530 MOVE_PASS
=0, MOVE_DONE
, MOVE_BUSY
533 /* refcount == 0 is safe since nobody can incr while cache_lock is held.
534 * refcount != 0 is impossible since flags/etc can be modified in other
535 * threads. instead, note we found a busy one and bail. logic in do_item_get
536 * will prevent busy items from continuing to be busy
538 static int slab_rebalance_move(void) {
543 enum move_status status
= MOVE_PASS
;
545 pthread_mutex_lock(&cache_lock
);
546 pthread_mutex_lock(&slabs_lock
);
548 s_cls
= &slabclass
[slab_rebal
.s_clsid
];
550 for (x
= 0; x
< slab_bulk_check
; x
++) {
551 item
*it
= slab_rebal
.slab_pos
;
553 if (it
->slabs_clsid
!= 255) {
554 refcount
= refcount_incr(&it
->refcount
);
555 if (refcount
== 1) { /* item is unlinked, unused */
556 if (it
->it_flags
& ITEM_SLABBED
) {
557 /* remove from slab freelist */
558 if (s_cls
->slots
== it
) {
559 s_cls
->slots
= it
->next
;
561 if (it
->next
) it
->next
->prev
= it
->prev
;
562 if (it
->prev
) it
->prev
->next
= it
->next
;
568 } else if (refcount
== 2) { /* item is linked but not busy */
569 if ((it
->it_flags
& ITEM_LINKED
) != 0) {
570 do_item_unlink_nolock(it
, hash(ITEM_key(it
), it
->nkey
, 0));
573 /* refcount == 1 + !ITEM_LINKED means the item is being
574 * uploaded to, or was just unlinked but hasn't been freed
575 * yet. Let it bleed off on its own and try again later */
579 if (settings
.verbose
> 2) {
580 fprintf(stderr
, "Slab reassign hit a busy item: refcount: %d (%d -> %d)\n",
581 it
->refcount
, slab_rebal
.s_clsid
, slab_rebal
.d_clsid
);
591 it
->slabs_clsid
= 255;
594 slab_rebal
.busy_items
++;
596 refcount_decr(&it
->refcount
);
602 slab_rebal
.slab_pos
= (char *)slab_rebal
.slab_pos
+ s_cls
->size
;
603 if (slab_rebal
.slab_pos
>= slab_rebal
.slab_end
)
607 if (slab_rebal
.slab_pos
>= slab_rebal
.slab_end
) {
608 /* Some items were busy, start again from the top */
609 if (slab_rebal
.busy_items
) {
610 slab_rebal
.slab_pos
= slab_rebal
.slab_start
;
611 slab_rebal
.busy_items
= 0;
617 pthread_mutex_unlock(&slabs_lock
);
618 pthread_mutex_unlock(&cache_lock
);
623 static void slab_rebalance_finish(void) {
627 pthread_mutex_lock(&cache_lock
);
628 pthread_mutex_lock(&slabs_lock
);
630 s_cls
= &slabclass
[slab_rebal
.s_clsid
];
631 d_cls
= &slabclass
[slab_rebal
.d_clsid
];
633 /* At this point the stolen slab is completely clear */
634 s_cls
->slab_list
[s_cls
->killing
- 1] =
635 s_cls
->slab_list
[s_cls
->slabs
- 1];
639 memset(slab_rebal
.slab_start
, 0, (size_t)settings
.item_size_max
);
641 d_cls
->slab_list
[d_cls
->slabs
++] = slab_rebal
.slab_start
;
642 d_cls
->end_page_ptr
= slab_rebal
.slab_start
;
643 d_cls
->end_page_free
= d_cls
->perslab
;
646 slab_rebal
.s_clsid
= 0;
647 slab_rebal
.d_clsid
= 0;
648 slab_rebal
.slab_start
= NULL
;
649 slab_rebal
.slab_end
= NULL
;
650 slab_rebal
.slab_pos
= NULL
;
652 slab_rebalance_signal
= 0;
654 pthread_mutex_unlock(&slabs_lock
);
655 pthread_mutex_unlock(&cache_lock
);
658 stats
.slab_reassign_running
= false;
662 if (settings
.verbose
> 1) {
663 fprintf(stderr
, "finished a slab move\n");
667 /* Return 1 means a decision was reached.
668 * Move to its own thread (created/destroyed as needed) once automover is more
671 static int slab_automove_decision(int *src
, int *dst
) {
672 static uint64_t evicted_old
[POWER_LARGEST
];
673 static unsigned int slab_zeroes
[POWER_LARGEST
];
674 static unsigned int slab_winner
= 0;
675 static unsigned int slab_wins
= 0;
676 uint64_t evicted_new
[POWER_LARGEST
];
677 uint64_t evicted_diff
= 0;
678 uint64_t evicted_max
= 0;
679 unsigned int highest_slab
= 0;
680 unsigned int total_pages
[POWER_LARGEST
];
684 static rel_time_t next_run
;
686 /* Run less frequently than the slabmove tester. */
687 if (current_time
>= next_run
) {
688 next_run
= current_time
+ 10;
693 item_stats_evictions(evicted_new
);
694 pthread_mutex_lock(&cache_lock
);
695 for (i
= POWER_SMALLEST
; i
< power_largest
; i
++) {
696 total_pages
[i
] = slabclass
[i
].slabs
;
698 pthread_mutex_unlock(&cache_lock
);
700 /* Find a candidate source; something with zero evicts 3+ times */
701 for (i
= POWER_SMALLEST
; i
< power_largest
; i
++) {
702 evicted_diff
= evicted_new
[i
] - evicted_old
[i
];
703 if (evicted_diff
== 0 && total_pages
[i
] > 2) {
705 if (source
== 0 && slab_zeroes
[i
] >= 3)
709 if (evicted_diff
> evicted_max
) {
710 evicted_max
= evicted_diff
;
714 evicted_old
[i
] = evicted_new
[i
];
717 /* Pick a valid destination */
718 if (slab_winner
!= 0 && slab_winner
== highest_slab
) {
724 slab_winner
= highest_slab
;
727 if (source
&& dest
) {
735 #ifndef __INTEL_COMPILER
736 #pragma GCC diagnostic ignored "-Wunused-parameter"
738 /* Slab rebalancer thread.
739 * Does not use spinlocks since it is not timing sensitive. Burn less CPU and
740 * go to sleep if locks are contended
742 static void *slab_maintenance_thread(void *arg
) {
746 while (do_run_slab_thread
) {
747 if (slab_rebalance_signal
== 1) {
748 if (slab_rebalance_start() < 0) {
749 /* Handle errors with more specifity as required. */
750 slab_rebalance_signal
= 0;
753 } else if (slab_rebalance_signal
&& slab_rebal
.slab_start
!= NULL
) {
754 /* If we have a decision to continue, continue it */
755 was_busy
= slab_rebalance_move();
756 } else if (settings
.slab_automove
&& slab_automove_decision(&src
, &dest
) == 1) {
757 /* Blind to the return codes. It will retry on its own */
758 slabs_reassign(src
, dest
);
761 if (slab_rebal
.done
) {
762 slab_rebalance_finish();
765 /* Sleep a bit if no work to do, or waiting on busy objects */
766 if (was_busy
|| !slab_rebalance_signal
)
772 static enum reassign_result_type
do_slabs_reassign(int src
, int dst
) {
773 if (slab_rebalance_signal
!= 0)
774 return REASSIGN_RUNNING
;
777 return REASSIGN_SRC_DST_SAME
;
779 if (src
< POWER_SMALLEST
|| src
> power_largest
||
780 dst
< POWER_SMALLEST
|| dst
> power_largest
)
781 return REASSIGN_BADCLASS
;
783 if (slabclass
[src
].slabs
< 2)
784 return REASSIGN_NOSPARE
;
786 if (slabclass
[dst
].end_page_ptr
)
787 return REASSIGN_DEST_NOT_FULL
;
789 if (slabclass
[src
].end_page_ptr
)
790 return REASSIGN_SRC_NOT_SAFE
;
792 slab_rebal
.s_clsid
= src
;
793 slab_rebal
.d_clsid
= dst
;
795 slab_rebalance_signal
= 1;
800 enum reassign_result_type
slabs_reassign(int src
, int dst
) {
801 enum reassign_result_type ret
;
802 mutex_lock(&slabs_lock
);
803 ret
= do_slabs_reassign(src
, dst
);
804 pthread_mutex_unlock(&slabs_lock
);
808 static pthread_t maintenance_tid
;
810 int start_slab_maintenance_thread(void) {
812 slab_rebalance_signal
= 0;
813 slab_rebal
.slab_start
= NULL
;
814 char *env
= getenv("MEMCACHED_SLAB_BULK_CHECK");
816 slab_bulk_check
= atoi(env
);
817 if (slab_bulk_check
== 0) {
818 slab_bulk_check
= DEFAULT_SLAB_BULK_CHECK
;
821 if ((ret
= pthread_create(&maintenance_tid
, NULL
,
822 slab_maintenance_thread
, NULL
)) != 0) {
823 fprintf(stderr
, "Can't create thread: %s\n", strerror(ret
));
829 void stop_slab_maintenance_thread(void) {
830 mutex_lock(&cache_lock
);
831 do_run_slab_thread
= 0;
832 pthread_cond_signal(&maintenance_cond
);
833 pthread_mutex_unlock(&cache_lock
);
835 /* Wait for the maintenance thread to stop */
836 pthread_join(maintenance_tid
, NULL
);