2
* mm/percpu.c - percpu memory allocator
4
* Copyright (C) 2009 SUSE Linux Products GmbH
5
* Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7
* This file is released under the GPLv2.
9
* This is percpu allocator which can handle both static and dynamic
10
* areas. Percpu areas are allocated in chunks. Each chunk is
11
* consisted of boot-time determined number of units and the first
12
* chunk is used for static percpu variables in the kernel image
13
* (special boot time alloc/init handling necessary as these areas
14
* need to be brought up before allocation services are running).
15
* Unit grows as necessary and all units grow or shrink in unison.
16
* When a chunk is filled up, another chunk is allocated.
19
* ------------------- ------------------- ------------
20
* | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21
* ------------------- ...... ------------------- .... ------------
23
* Allocation is done in offset-size areas of single unit space. Ie,
24
* an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25
* c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
26
* cpus. On NUMA, the mapping can be non-linear and even sparse.
27
* Percpu access can be done by configuring percpu base registers
28
* according to cpu to unit mapping and pcpu_unit_size.
30
* There are usually many small percpu allocations many of them being
31
* as small as 4 bytes. The allocator organizes chunks into lists
32
* according to free size and tries to allocate from the fullest one.
33
* Each chunk keeps the maximum contiguous area size hint which is
34
* guaranteed to be equal to or larger than the maximum contiguous
35
* area in the chunk. This helps the allocator not to iterate the
36
* chunk maps unnecessarily.
38
* Allocation state in each chunk is kept using an array of integers
39
* on chunk->map. A positive value in the map represents a free
40
* region and negative allocated. Allocation inside a chunk is done
41
* by scanning this map sequentially and serving the first matching
42
* entry. This is mostly copied from the percpu_modalloc() allocator.
43
* Chunks can be determined from the address using the index field
44
* in the page struct. The index field contains a pointer to the chunk.
46
* To use this allocator, arch code should do the followings.
48
* - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49
* regular address to percpu pointer and back if they need to be
50
* different from the default
52
* - use pcpu_setup_first_chunk() during percpu area initialization to
53
* setup the first chunk containing the kernel static percpu area
56
#include <linux/bitmap.h>
57
#include <linux/bootmem.h>
58
#include <linux/err.h>
59
#include <linux/list.h>
60
#include <linux/log2.h>
62
#include <linux/module.h>
63
#include <linux/mutex.h>
64
#include <linux/percpu.h>
65
#include <linux/pfn.h>
66
#include <linux/slab.h>
67
#include <linux/spinlock.h>
68
#include <linux/vmalloc.h>
69
#include <linux/workqueue.h>
71
#include <asm/cacheflush.h>
72
#include <asm/sections.h>
73
#include <asm/tlbflush.h>
76
#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
77
#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
80
/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
81
#ifndef __addr_to_pcpu_ptr
82
#define __addr_to_pcpu_ptr(addr) \
83
(void __percpu *)((unsigned long)(addr) - \
84
(unsigned long)pcpu_base_addr + \
85
(unsigned long)__per_cpu_start)
87
#ifndef __pcpu_ptr_to_addr
88
#define __pcpu_ptr_to_addr(ptr) \
89
(void __force *)((unsigned long)(ptr) + \
90
(unsigned long)pcpu_base_addr - \
91
(unsigned long)__per_cpu_start)
93
#else /* CONFIG_SMP */
94
/* on UP, it's always identity mapped */
95
#define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
96
#define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
97
#endif /* CONFIG_SMP */
100
struct list_head list; /* linked to pcpu_slot lists */
101
int free_size; /* free bytes in the chunk */
102
int contig_hint; /* max contiguous size hint */
103
void *base_addr; /* base address of this chunk */
104
int map_used; /* # of map entries used */
105
int map_alloc; /* # of map entries allocated */
106
int *map; /* allocation map */
107
void *data; /* chunk data */
108
bool immutable; /* no [de]population allowed */
109
unsigned long populated[]; /* populated bitmap */
112
static int pcpu_unit_pages __read_mostly;
113
static int pcpu_unit_size __read_mostly;
114
static int pcpu_nr_units __read_mostly;
115
static int pcpu_atom_size __read_mostly;
116
static int pcpu_nr_slots __read_mostly;
117
static size_t pcpu_chunk_struct_size __read_mostly;
119
/* cpus with the lowest and highest unit addresses */
120
static unsigned int pcpu_low_unit_cpu __read_mostly;
121
static unsigned int pcpu_high_unit_cpu __read_mostly;
123
/* the address of the first chunk which starts with the kernel static area */
124
void *pcpu_base_addr __read_mostly;
125
EXPORT_SYMBOL_GPL(pcpu_base_addr);
127
static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
128
const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
130
/* group information, used for vm allocation */
131
static int pcpu_nr_groups __read_mostly;
132
static const unsigned long *pcpu_group_offsets __read_mostly;
133
static const size_t *pcpu_group_sizes __read_mostly;
136
* The first chunk which always exists. Note that unlike other
137
* chunks, this one can be allocated and mapped in several different
138
* ways and thus often doesn't live in the vmalloc area.
140
static struct pcpu_chunk *pcpu_first_chunk;
143
* Optional reserved chunk. This chunk reserves part of the first
144
* chunk and serves it for reserved allocations. The amount of
145
* reserved offset is in pcpu_reserved_chunk_limit. When reserved
146
* area doesn't exist, the following variables contain NULL and 0
149
static struct pcpu_chunk *pcpu_reserved_chunk;
150
static int pcpu_reserved_chunk_limit;
153
* Synchronization rules.
155
* There are two locks - pcpu_alloc_mutex and pcpu_lock. The former
156
* protects allocation/reclaim paths, chunks, populated bitmap and
157
* vmalloc mapping. The latter is a spinlock and protects the index
158
* data structures - chunk slots, chunks and area maps in chunks.
160
* During allocation, pcpu_alloc_mutex is kept locked all the time and
161
* pcpu_lock is grabbed and released as necessary. All actual memory
162
* allocations are done using GFP_KERNEL with pcpu_lock released. In
163
* general, percpu memory can't be allocated with irq off but
164
* irqsave/restore are still used in alloc path so that it can be used
165
* from early init path - sched_init() specifically.
167
* Free path accesses and alters only the index data structures, so it
168
* can be safely called from atomic context. When memory needs to be
169
* returned to the system, free path schedules reclaim_work which
170
* grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
171
* reclaimed, release both locks and frees the chunks. Note that it's
172
* necessary to grab both locks to remove a chunk from circulation as
173
* allocation path might be referencing the chunk with only
174
* pcpu_alloc_mutex locked.
176
static DEFINE_MUTEX(pcpu_alloc_mutex); /* protects whole alloc and reclaim */
177
static DEFINE_SPINLOCK(pcpu_lock); /* protects index data structures */
179
static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
181
/* reclaim work to release fully free chunks, scheduled from free path */
182
static void pcpu_reclaim(struct work_struct *work);
183
static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
185
static bool pcpu_addr_in_first_chunk(void *addr)
187
void *first_start = pcpu_first_chunk->base_addr;
189
return addr >= first_start && addr < first_start + pcpu_unit_size;
192
static bool pcpu_addr_in_reserved_chunk(void *addr)
194
void *first_start = pcpu_first_chunk->base_addr;
196
return addr >= first_start &&
197
addr < first_start + pcpu_reserved_chunk_limit;
200
static int __pcpu_size_to_slot(int size)
202
int highbit = fls(size); /* size is in bytes */
203
return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
206
static int pcpu_size_to_slot(int size)
208
if (size == pcpu_unit_size)
209
return pcpu_nr_slots - 1;
210
return __pcpu_size_to_slot(size);
213
static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
215
if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
218
return pcpu_size_to_slot(chunk->free_size);
221
/* set the pointer to a chunk in a page struct */
222
static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
224
page->index = (unsigned long)pcpu;
227
/* obtain pointer to a chunk from a page struct */
228
static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
230
return (struct pcpu_chunk *)page->index;
233
static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
235
return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
238
static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
239
unsigned int cpu, int page_idx)
241
return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
242
(page_idx << PAGE_SHIFT);
245
static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
246
int *rs, int *re, int end)
248
*rs = find_next_zero_bit(chunk->populated, end, *rs);
249
*re = find_next_bit(chunk->populated, end, *rs + 1);
252
static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
253
int *rs, int *re, int end)
255
*rs = find_next_bit(chunk->populated, end, *rs);
256
*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
260
* (Un)populated page region iterators. Iterate over (un)populated
261
* page regions between @start and @end in @chunk. @rs and @re should
262
* be integer variables and will be set to start and end page index of
263
* the current region.
265
#define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
266
for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
268
(rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
270
#define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
271
for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
273
(rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
276
* pcpu_mem_zalloc - allocate memory
277
* @size: bytes to allocate
279
* Allocate @size bytes. If @size is smaller than PAGE_SIZE,
280
* kzalloc() is used; otherwise, vzalloc() is used. The returned
281
* memory is always zeroed.
284
* Does GFP_KERNEL allocation.
287
* Pointer to the allocated area on success, NULL on failure.
289
static void *pcpu_mem_zalloc(size_t size)
291
if (WARN_ON_ONCE(!slab_is_available()))
294
if (size <= PAGE_SIZE)
295
return kzalloc(size, GFP_KERNEL);
297
return vzalloc(size);
301
* pcpu_mem_free - free memory
302
* @ptr: memory to free
303
* @size: size of the area
305
* Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
307
static void pcpu_mem_free(void *ptr, size_t size)
309
if (size <= PAGE_SIZE)
316
* pcpu_chunk_relocate - put chunk in the appropriate chunk slot
317
* @chunk: chunk of interest
318
* @oslot: the previous slot it was on
320
* This function is called after an allocation or free changed @chunk.
321
* New slot according to the changed state is determined and @chunk is
322
* moved to the slot. Note that the reserved chunk is never put on
328
static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
330
int nslot = pcpu_chunk_slot(chunk);
332
if (chunk != pcpu_reserved_chunk && oslot != nslot) {
334
list_move(&chunk->list, &pcpu_slot[nslot]);
336
list_move_tail(&chunk->list, &pcpu_slot[nslot]);
341
* pcpu_need_to_extend - determine whether chunk area map needs to be extended
342
* @chunk: chunk of interest
344
* Determine whether area map of @chunk needs to be extended to
345
* accommodate a new allocation.
351
* New target map allocation length if extension is necessary, 0
354
static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
358
if (chunk->map_alloc >= chunk->map_used + 2)
361
new_alloc = PCPU_DFL_MAP_ALLOC;
362
while (new_alloc < chunk->map_used + 2)
369
* pcpu_extend_area_map - extend area map of a chunk
370
* @chunk: chunk of interest
371
* @new_alloc: new target allocation length of the area map
373
* Extend area map of @chunk to have @new_alloc entries.
376
* Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
379
* 0 on success, -errno on failure.
381
static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
383
int *old = NULL, *new = NULL;
384
size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
387
new = pcpu_mem_zalloc(new_size);
391
/* acquire pcpu_lock and switch to new area map */
392
spin_lock_irqsave(&pcpu_lock, flags);
394
if (new_alloc <= chunk->map_alloc)
397
old_size = chunk->map_alloc * sizeof(chunk->map[0]);
400
memcpy(new, old, old_size);
402
chunk->map_alloc = new_alloc;
407
spin_unlock_irqrestore(&pcpu_lock, flags);
410
* pcpu_mem_free() might end up calling vfree() which uses
411
* IRQ-unsafe lock and thus can't be called under pcpu_lock.
413
pcpu_mem_free(old, old_size);
414
pcpu_mem_free(new, new_size);
420
* pcpu_split_block - split a map block
421
* @chunk: chunk of interest
422
* @i: index of map block to split
423
* @head: head size in bytes (can be 0)
424
* @tail: tail size in bytes (can be 0)
426
* Split the @i'th map block into two or three blocks. If @head is
427
* non-zero, @head bytes block is inserted before block @i moving it
428
* to @i+1 and reducing its size by @head bytes.
430
* If @tail is non-zero, the target block, which can be @i or @i+1
431
* depending on @head, is reduced by @tail bytes and @tail byte block
432
* is inserted after the target block.
434
* @chunk->map must have enough free slots to accommodate the split.
439
static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
442
int nr_extra = !!head + !!tail;
444
BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
446
/* insert new subblocks */
447
memmove(&chunk->map[i + nr_extra], &chunk->map[i],
448
sizeof(chunk->map[0]) * (chunk->map_used - i));
449
chunk->map_used += nr_extra;
452
chunk->map[i + 1] = chunk->map[i] - head;
453
chunk->map[i++] = head;
456
chunk->map[i++] -= tail;
457
chunk->map[i] = tail;
462
* pcpu_alloc_area - allocate area from a pcpu_chunk
463
* @chunk: chunk of interest
464
* @size: wanted size in bytes
465
* @align: wanted align
467
* Try to allocate @size bytes area aligned at @align from @chunk.
468
* Note that this function only allocates the offset. It doesn't
469
* populate or map the area.
471
* @chunk->map must have at least two free slots.
477
* Allocated offset in @chunk on success, -1 if no matching area is
480
static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
482
int oslot = pcpu_chunk_slot(chunk);
486
for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
487
bool is_last = i + 1 == chunk->map_used;
490
/* extra for alignment requirement */
491
head = ALIGN(off, align) - off;
492
BUG_ON(i == 0 && head != 0);
494
if (chunk->map[i] < 0)
496
if (chunk->map[i] < head + size) {
497
max_contig = max(chunk->map[i], max_contig);
502
* If head is small or the previous block is free,
503
* merge'em. Note that 'small' is defined as smaller
504
* than sizeof(int), which is very small but isn't too
505
* uncommon for percpu allocations.
507
if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
508
if (chunk->map[i - 1] > 0)
509
chunk->map[i - 1] += head;
511
chunk->map[i - 1] -= head;
512
chunk->free_size -= head;
514
chunk->map[i] -= head;
519
/* if tail is small, just keep it around */
520
tail = chunk->map[i] - head - size;
521
if (tail < sizeof(int))
524
/* split if warranted */
526
pcpu_split_block(chunk, i, head, tail);
530
max_contig = max(chunk->map[i - 1], max_contig);
533
max_contig = max(chunk->map[i + 1], max_contig);
536
/* update hint and mark allocated */
538
chunk->contig_hint = max_contig; /* fully scanned */
540
chunk->contig_hint = max(chunk->contig_hint,
543
chunk->free_size -= chunk->map[i];
544
chunk->map[i] = -chunk->map[i];
546
pcpu_chunk_relocate(chunk, oslot);
550
chunk->contig_hint = max_contig; /* fully scanned */
551
pcpu_chunk_relocate(chunk, oslot);
553
/* tell the upper layer that this chunk has no matching area */
558
* pcpu_free_area - free area to a pcpu_chunk
559
* @chunk: chunk of interest
560
* @freeme: offset of area to free
562
* Free area starting from @freeme to @chunk. Note that this function
563
* only modifies the allocation map. It doesn't depopulate or unmap
569
static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
571
int oslot = pcpu_chunk_slot(chunk);
574
for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
577
BUG_ON(off != freeme);
578
BUG_ON(chunk->map[i] > 0);
580
chunk->map[i] = -chunk->map[i];
581
chunk->free_size += chunk->map[i];
583
/* merge with previous? */
584
if (i > 0 && chunk->map[i - 1] >= 0) {
585
chunk->map[i - 1] += chunk->map[i];
587
memmove(&chunk->map[i], &chunk->map[i + 1],
588
(chunk->map_used - i) * sizeof(chunk->map[0]));
591
/* merge with next? */
592
if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
593
chunk->map[i] += chunk->map[i + 1];
595
memmove(&chunk->map[i + 1], &chunk->map[i + 2],
596
(chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
599
chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
600
pcpu_chunk_relocate(chunk, oslot);
603
static struct pcpu_chunk *pcpu_alloc_chunk(void)
605
struct pcpu_chunk *chunk;
607
chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
611
chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
612
sizeof(chunk->map[0]));
618
chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
619
chunk->map[chunk->map_used++] = pcpu_unit_size;
621
INIT_LIST_HEAD(&chunk->list);
622
chunk->free_size = pcpu_unit_size;
623
chunk->contig_hint = pcpu_unit_size;
628
static void pcpu_free_chunk(struct pcpu_chunk *chunk)
632
pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
637
* Chunk management implementation.
639
* To allow different implementations, chunk alloc/free and
640
* [de]population are implemented in a separate file which is pulled
641
* into this file and compiled together. The following functions
642
* should be implemented.
644
* pcpu_populate_chunk - populate the specified range of a chunk
645
* pcpu_depopulate_chunk - depopulate the specified range of a chunk
646
* pcpu_create_chunk - create a new chunk
647
* pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
648
* pcpu_addr_to_page - translate address to physical address
649
* pcpu_verify_alloc_info - check alloc_info is acceptable during init
651
static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
652
static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
653
static struct pcpu_chunk *pcpu_create_chunk(void);
654
static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
655
static struct page *pcpu_addr_to_page(void *addr);
656
static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
658
#ifdef CONFIG_NEED_PER_CPU_KM
659
#include "percpu-km.c"
661
#include "percpu-vm.c"
665
* pcpu_chunk_addr_search - determine chunk containing specified address
666
* @addr: address for which the chunk needs to be determined.
669
* The address of the found chunk.
671
static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
673
/* is it in the first chunk? */
674
if (pcpu_addr_in_first_chunk(addr)) {
675
/* is it in the reserved area? */
676
if (pcpu_addr_in_reserved_chunk(addr))
677
return pcpu_reserved_chunk;
678
return pcpu_first_chunk;
682
* The address is relative to unit0 which might be unused and
683
* thus unmapped. Offset the address to the unit space of the
684
* current processor before looking it up in the vmalloc
685
* space. Note that any possible cpu id can be used here, so
686
* there's no need to worry about preemption or cpu hotplug.
688
addr += pcpu_unit_offsets[raw_smp_processor_id()];
689
return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
693
* pcpu_alloc - the percpu allocator
694
* @size: size of area to allocate in bytes
695
* @align: alignment of area (max PAGE_SIZE)
696
* @reserved: allocate from the reserved chunk if available
698
* Allocate percpu area of @size bytes aligned at @align.
701
* Does GFP_KERNEL allocation.
704
* Percpu pointer to the allocated area on success, NULL on failure.
706
static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
708
static int warn_limit = 10;
709
struct pcpu_chunk *chunk;
711
int slot, off, new_alloc;
714
if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
715
WARN(true, "illegal size (%zu) or align (%zu) for "
716
"percpu allocation\n", size, align);
720
mutex_lock(&pcpu_alloc_mutex);
721
spin_lock_irqsave(&pcpu_lock, flags);
723
/* serve reserved allocations from the reserved chunk if available */
724
if (reserved && pcpu_reserved_chunk) {
725
chunk = pcpu_reserved_chunk;
727
if (size > chunk->contig_hint) {
728
err = "alloc from reserved chunk failed";
732
while ((new_alloc = pcpu_need_to_extend(chunk))) {
733
spin_unlock_irqrestore(&pcpu_lock, flags);
734
if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
735
err = "failed to extend area map of reserved chunk";
736
goto fail_unlock_mutex;
738
spin_lock_irqsave(&pcpu_lock, flags);
741
off = pcpu_alloc_area(chunk, size, align);
745
err = "alloc from reserved chunk failed";
750
/* search through normal chunks */
751
for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
752
list_for_each_entry(chunk, &pcpu_slot[slot], list) {
753
if (size > chunk->contig_hint)
756
new_alloc = pcpu_need_to_extend(chunk);
758
spin_unlock_irqrestore(&pcpu_lock, flags);
759
if (pcpu_extend_area_map(chunk,
761
err = "failed to extend area map";
762
goto fail_unlock_mutex;
764
spin_lock_irqsave(&pcpu_lock, flags);
766
* pcpu_lock has been dropped, need to
767
* restart cpu_slot list walking.
772
off = pcpu_alloc_area(chunk, size, align);
778
/* hmmm... no space left, create a new chunk */
779
spin_unlock_irqrestore(&pcpu_lock, flags);
781
chunk = pcpu_create_chunk();
783
err = "failed to allocate new chunk";
784
goto fail_unlock_mutex;
787
spin_lock_irqsave(&pcpu_lock, flags);
788
pcpu_chunk_relocate(chunk, -1);
792
spin_unlock_irqrestore(&pcpu_lock, flags);
794
/* populate, map and clear the area */
795
if (pcpu_populate_chunk(chunk, off, size)) {
796
spin_lock_irqsave(&pcpu_lock, flags);
797
pcpu_free_area(chunk, off);
798
err = "failed to populate";
802
mutex_unlock(&pcpu_alloc_mutex);
804
/* return address relative to base address */
805
return __addr_to_pcpu_ptr(chunk->base_addr + off);
808
spin_unlock_irqrestore(&pcpu_lock, flags);
810
mutex_unlock(&pcpu_alloc_mutex);
812
pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
813
"%s\n", size, align, err);
816
pr_info("PERCPU: limit reached, disable warning\n");
822
* __alloc_percpu - allocate dynamic percpu area
823
* @size: size of area to allocate in bytes
824
* @align: alignment of area (max PAGE_SIZE)
826
* Allocate zero-filled percpu area of @size bytes aligned at @align.
827
* Might sleep. Might trigger writeouts.
830
* Does GFP_KERNEL allocation.
833
* Percpu pointer to the allocated area on success, NULL on failure.
835
void __percpu *__alloc_percpu(size_t size, size_t align)
837
return pcpu_alloc(size, align, false);
839
EXPORT_SYMBOL_GPL(__alloc_percpu);
842
* __alloc_reserved_percpu - allocate reserved percpu area
843
* @size: size of area to allocate in bytes
844
* @align: alignment of area (max PAGE_SIZE)
846
* Allocate zero-filled percpu area of @size bytes aligned at @align
847
* from reserved percpu area if arch has set it up; otherwise,
848
* allocation is served from the same dynamic area. Might sleep.
849
* Might trigger writeouts.
852
* Does GFP_KERNEL allocation.
855
* Percpu pointer to the allocated area on success, NULL on failure.
857
void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
859
return pcpu_alloc(size, align, true);
863
* pcpu_reclaim - reclaim fully free chunks, workqueue function
866
* Reclaim all fully free chunks except for the first one.
871
static void pcpu_reclaim(struct work_struct *work)
874
struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
875
struct pcpu_chunk *chunk, *next;
877
mutex_lock(&pcpu_alloc_mutex);
878
spin_lock_irq(&pcpu_lock);
880
list_for_each_entry_safe(chunk, next, head, list) {
881
WARN_ON(chunk->immutable);
883
/* spare the first one */
884
if (chunk == list_first_entry(head, struct pcpu_chunk, list))
887
list_move(&chunk->list, &todo);
890
spin_unlock_irq(&pcpu_lock);
892
list_for_each_entry_safe(chunk, next, &todo, list) {
893
pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
894
pcpu_destroy_chunk(chunk);
897
mutex_unlock(&pcpu_alloc_mutex);
901
* free_percpu - free percpu area
902
* @ptr: pointer to area to free
904
* Free percpu area @ptr.
907
* Can be called from atomic context.
909
void free_percpu(void __percpu *ptr)
912
struct pcpu_chunk *chunk;
919
addr = __pcpu_ptr_to_addr(ptr);
921
spin_lock_irqsave(&pcpu_lock, flags);
923
chunk = pcpu_chunk_addr_search(addr);
924
off = addr - chunk->base_addr;
926
pcpu_free_area(chunk, off);
928
/* if there are more than one fully free chunks, wake up grim reaper */
929
if (chunk->free_size == pcpu_unit_size) {
930
struct pcpu_chunk *pos;
932
list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
934
schedule_work(&pcpu_reclaim_work);
939
spin_unlock_irqrestore(&pcpu_lock, flags);
941
EXPORT_SYMBOL_GPL(free_percpu);
944
* is_kernel_percpu_address - test whether address is from static percpu area
945
* @addr: address to test
947
* Test whether @addr belongs to in-kernel static percpu area. Module
948
* static percpu areas are not considered. For those, use
949
* is_module_percpu_address().
952
* %true if @addr is from in-kernel static percpu area, %false otherwise.
954
bool is_kernel_percpu_address(unsigned long addr)
957
const size_t static_size = __per_cpu_end - __per_cpu_start;
958
void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
961
for_each_possible_cpu(cpu) {
962
void *start = per_cpu_ptr(base, cpu);
964
if ((void *)addr >= start && (void *)addr < start + static_size)
968
/* on UP, can't distinguish from other static vars, always false */
973
* per_cpu_ptr_to_phys - convert translated percpu address to physical address
974
* @addr: the address to be converted to physical address
976
* Given @addr which is dereferenceable address obtained via one of
977
* percpu access macros, this function translates it into its physical
978
* address. The caller is responsible for ensuring @addr stays valid
979
* until this function finishes.
981
* percpu allocator has special setup for the first chunk, which currently
982
* supports either embedding in linear address space or vmalloc mapping,
983
* and, from the second one, the backing allocator (currently either vm or
984
* km) provides translation.
986
* The addr can be tranlated simply without checking if it falls into the
987
* first chunk. But the current code reflects better how percpu allocator
988
* actually works, and the verification can discover both bugs in percpu
989
* allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
993
* The physical address for @addr.
995
phys_addr_t per_cpu_ptr_to_phys(void *addr)
997
void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
998
bool in_first_chunk = false;
999
unsigned long first_low, first_high;
1003
* The following test on unit_low/high isn't strictly
1004
* necessary but will speed up lookups of addresses which
1005
* aren't in the first chunk.
1007
first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
1008
first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
1010
if ((unsigned long)addr >= first_low &&
1011
(unsigned long)addr < first_high) {
1012
for_each_possible_cpu(cpu) {
1013
void *start = per_cpu_ptr(base, cpu);
1015
if (addr >= start && addr < start + pcpu_unit_size) {
1016
in_first_chunk = true;
1022
if (in_first_chunk) {
1023
if (!is_vmalloc_addr(addr))
1026
return page_to_phys(vmalloc_to_page(addr)) +
1027
offset_in_page(addr);
1029
return page_to_phys(pcpu_addr_to_page(addr)) +
1030
offset_in_page(addr);
1034
* pcpu_alloc_alloc_info - allocate percpu allocation info
1035
* @nr_groups: the number of groups
1036
* @nr_units: the number of units
1038
* Allocate ai which is large enough for @nr_groups groups containing
1039
* @nr_units units. The returned ai's groups[0].cpu_map points to the
1040
* cpu_map array which is long enough for @nr_units and filled with
1041
* NR_CPUS. It's the caller's responsibility to initialize cpu_map
1042
* pointer of other groups.
1045
* Pointer to the allocated pcpu_alloc_info on success, NULL on
1048
struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1051
struct pcpu_alloc_info *ai;
1052
size_t base_size, ai_size;
1056
base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1057
__alignof__(ai->groups[0].cpu_map[0]));
1058
ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1060
ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1066
ai->groups[0].cpu_map = ptr;
1068
for (unit = 0; unit < nr_units; unit++)
1069
ai->groups[0].cpu_map[unit] = NR_CPUS;
1071
ai->nr_groups = nr_groups;
1072
ai->__ai_size = PFN_ALIGN(ai_size);
1078
* pcpu_free_alloc_info - free percpu allocation info
1079
* @ai: pcpu_alloc_info to free
1081
* Free @ai which was allocated by pcpu_alloc_alloc_info().
1083
void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1085
free_bootmem(__pa(ai), ai->__ai_size);
1089
* pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1091
* @ai: allocation info to dump
1093
* Print out information about @ai using loglevel @lvl.
1095
static void pcpu_dump_alloc_info(const char *lvl,
1096
const struct pcpu_alloc_info *ai)
1098
int group_width = 1, cpu_width = 1, width;
1099
char empty_str[] = "--------";
1100
int alloc = 0, alloc_end = 0;
1102
int upa, apl; /* units per alloc, allocs per line */
1108
v = num_possible_cpus();
1111
empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1113
upa = ai->alloc_size / ai->unit_size;
1114
width = upa * (cpu_width + 1) + group_width + 3;
1115
apl = rounddown_pow_of_two(max(60 / width, 1));
1117
printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1118
lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1119
ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1121
for (group = 0; group < ai->nr_groups; group++) {
1122
const struct pcpu_group_info *gi = &ai->groups[group];
1123
int unit = 0, unit_end = 0;
1125
BUG_ON(gi->nr_units % upa);
1126
for (alloc_end += gi->nr_units / upa;
1127
alloc < alloc_end; alloc++) {
1128
if (!(alloc % apl)) {
1130
printk("%spcpu-alloc: ", lvl);
1132
printk("[%0*d] ", group_width, group);
1134
for (unit_end += upa; unit < unit_end; unit++)
1135
if (gi->cpu_map[unit] != NR_CPUS)
1136
printk("%0*d ", cpu_width,
1139
printk("%s ", empty_str);
1146
* pcpu_setup_first_chunk - initialize the first percpu chunk
1147
* @ai: pcpu_alloc_info describing how to percpu area is shaped
1148
* @base_addr: mapped address
1150
* Initialize the first percpu chunk which contains the kernel static
1151
* perpcu area. This function is to be called from arch percpu area
1154
* @ai contains all information necessary to initialize the first
1155
* chunk and prime the dynamic percpu allocator.
1157
* @ai->static_size is the size of static percpu area.
1159
* @ai->reserved_size, if non-zero, specifies the amount of bytes to
1160
* reserve after the static area in the first chunk. This reserves
1161
* the first chunk such that it's available only through reserved
1162
* percpu allocation. This is primarily used to serve module percpu
1163
* static areas on architectures where the addressing model has
1164
* limited offset range for symbol relocations to guarantee module
1165
* percpu symbols fall inside the relocatable range.
1167
* @ai->dyn_size determines the number of bytes available for dynamic
1168
* allocation in the first chunk. The area between @ai->static_size +
1169
* @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1171
* @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1172
* and equal to or larger than @ai->static_size + @ai->reserved_size +
1175
* @ai->atom_size is the allocation atom size and used as alignment
1178
* @ai->alloc_size is the allocation size and always multiple of
1179
* @ai->atom_size. This is larger than @ai->atom_size if
1180
* @ai->unit_size is larger than @ai->atom_size.
1182
* @ai->nr_groups and @ai->groups describe virtual memory layout of
1183
* percpu areas. Units which should be colocated are put into the
1184
* same group. Dynamic VM areas will be allocated according to these
1185
* groupings. If @ai->nr_groups is zero, a single group containing
1186
* all units is assumed.
1188
* The caller should have mapped the first chunk at @base_addr and
1189
* copied static data to each unit.
1191
* If the first chunk ends up with both reserved and dynamic areas, it
1192
* is served by two chunks - one to serve the core static and reserved
1193
* areas and the other for the dynamic area. They share the same vm
1194
* and page map but uses different area allocation map to stay away
1195
* from each other. The latter chunk is circulated in the chunk slots
1196
* and available for dynamic allocation like any other chunks.
1199
* 0 on success, -errno on failure.
1201
int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1204
static char cpus_buf[4096] __initdata;
1205
static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1206
static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1207
size_t dyn_size = ai->dyn_size;
1208
size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1209
struct pcpu_chunk *schunk, *dchunk = NULL;
1210
unsigned long *group_offsets;
1211
size_t *group_sizes;
1212
unsigned long *unit_off;
1217
cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1219
#define PCPU_SETUP_BUG_ON(cond) do { \
1220
if (unlikely(cond)) { \
1221
pr_emerg("PERCPU: failed to initialize, %s", #cond); \
1222
pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf); \
1223
pcpu_dump_alloc_info(KERN_EMERG, ai); \
1229
PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1231
PCPU_SETUP_BUG_ON(!ai->static_size);
1232
PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK);
1234
PCPU_SETUP_BUG_ON(!base_addr);
1235
PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK);
1236
PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1237
PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1238
PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1239
PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1240
PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1242
/* process group information and build config tables accordingly */
1243
group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
1244
group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
1245
unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
1246
unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
1248
for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1249
unit_map[cpu] = UINT_MAX;
1251
pcpu_low_unit_cpu = NR_CPUS;
1252
pcpu_high_unit_cpu = NR_CPUS;
1254
for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1255
const struct pcpu_group_info *gi = &ai->groups[group];
1257
group_offsets[group] = gi->base_offset;
1258
group_sizes[group] = gi->nr_units * ai->unit_size;
1260
for (i = 0; i < gi->nr_units; i++) {
1261
cpu = gi->cpu_map[i];
1265
PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1266
PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1267
PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1269
unit_map[cpu] = unit + i;
1270
unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1272
/* determine low/high unit_cpu */
1273
if (pcpu_low_unit_cpu == NR_CPUS ||
1274
unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
1275
pcpu_low_unit_cpu = cpu;
1276
if (pcpu_high_unit_cpu == NR_CPUS ||
1277
unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
1278
pcpu_high_unit_cpu = cpu;
1281
pcpu_nr_units = unit;
1283
for_each_possible_cpu(cpu)
1284
PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1286
/* we're done parsing the input, undefine BUG macro and dump config */
1287
#undef PCPU_SETUP_BUG_ON
1288
pcpu_dump_alloc_info(KERN_DEBUG, ai);
1290
pcpu_nr_groups = ai->nr_groups;
1291
pcpu_group_offsets = group_offsets;
1292
pcpu_group_sizes = group_sizes;
1293
pcpu_unit_map = unit_map;
1294
pcpu_unit_offsets = unit_off;
1296
/* determine basic parameters */
1297
pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1298
pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1299
pcpu_atom_size = ai->atom_size;
1300
pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1301
BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1304
* Allocate chunk slots. The additional last slot is for
1307
pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1308
pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
1309
for (i = 0; i < pcpu_nr_slots; i++)
1310
INIT_LIST_HEAD(&pcpu_slot[i]);
1313
* Initialize static chunk. If reserved_size is zero, the
1314
* static chunk covers static area + dynamic allocation area
1315
* in the first chunk. If reserved_size is not zero, it
1316
* covers static area + reserved area (mostly used for module
1317
* static percpu allocation).
1319
schunk = alloc_bootmem(pcpu_chunk_struct_size);
1320
INIT_LIST_HEAD(&schunk->list);
1321
schunk->base_addr = base_addr;
1323
schunk->map_alloc = ARRAY_SIZE(smap);
1324
schunk->immutable = true;
1325
bitmap_fill(schunk->populated, pcpu_unit_pages);
1327
if (ai->reserved_size) {
1328
schunk->free_size = ai->reserved_size;
1329
pcpu_reserved_chunk = schunk;
1330
pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1332
schunk->free_size = dyn_size;
1333
dyn_size = 0; /* dynamic area covered */
1335
schunk->contig_hint = schunk->free_size;
1337
schunk->map[schunk->map_used++] = -ai->static_size;
1338
if (schunk->free_size)
1339
schunk->map[schunk->map_used++] = schunk->free_size;
1341
/* init dynamic chunk if necessary */
1343
dchunk = alloc_bootmem(pcpu_chunk_struct_size);
1344
INIT_LIST_HEAD(&dchunk->list);
1345
dchunk->base_addr = base_addr;
1347
dchunk->map_alloc = ARRAY_SIZE(dmap);
1348
dchunk->immutable = true;
1349
bitmap_fill(dchunk->populated, pcpu_unit_pages);
1351
dchunk->contig_hint = dchunk->free_size = dyn_size;
1352
dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1353
dchunk->map[dchunk->map_used++] = dchunk->free_size;
1356
/* link the first chunk in */
1357
pcpu_first_chunk = dchunk ?: schunk;
1358
pcpu_chunk_relocate(pcpu_first_chunk, -1);
1361
pcpu_base_addr = base_addr;
1367
const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
1368
[PCPU_FC_AUTO] = "auto",
1369
[PCPU_FC_EMBED] = "embed",
1370
[PCPU_FC_PAGE] = "page",
1373
enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1375
static int __init percpu_alloc_setup(char *str)
1379
#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1380
else if (!strcmp(str, "embed"))
1381
pcpu_chosen_fc = PCPU_FC_EMBED;
1383
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1384
else if (!strcmp(str, "page"))
1385
pcpu_chosen_fc = PCPU_FC_PAGE;
1388
pr_warning("PERCPU: unknown allocator %s specified\n", str);
1392
early_param("percpu_alloc", percpu_alloc_setup);
1395
* pcpu_embed_first_chunk() is used by the generic percpu setup.
1396
* Build it if needed by the arch config or the generic setup is going
1399
#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1400
!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1401
#define BUILD_EMBED_FIRST_CHUNK
1404
/* build pcpu_page_first_chunk() iff needed by the arch config */
1405
#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
1406
#define BUILD_PAGE_FIRST_CHUNK
1409
/* pcpu_build_alloc_info() is used by both embed and page first chunk */
1410
#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
1412
* pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1413
* @reserved_size: the size of reserved percpu area in bytes
1414
* @dyn_size: minimum free size for dynamic allocation in bytes
1415
* @atom_size: allocation atom size
1416
* @cpu_distance_fn: callback to determine distance between cpus, optional
1418
* This function determines grouping of units, their mappings to cpus
1419
* and other parameters considering needed percpu size, allocation
1420
* atom size and distances between CPUs.
1422
* Groups are always mutliples of atom size and CPUs which are of
1423
* LOCAL_DISTANCE both ways are grouped together and share space for
1424
* units in the same group. The returned configuration is guaranteed
1425
* to have CPUs on different nodes on different groups and >=75% usage
1426
* of allocated virtual address space.
1429
* On success, pointer to the new allocation_info is returned. On
1430
* failure, ERR_PTR value is returned.
1432
static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1433
size_t reserved_size, size_t dyn_size,
1435
pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1437
static int group_map[NR_CPUS] __initdata;
1438
static int group_cnt[NR_CPUS] __initdata;
1439
const size_t static_size = __per_cpu_end - __per_cpu_start;
1440
int nr_groups = 1, nr_units = 0;
1441
size_t size_sum, min_unit_size, alloc_size;
1442
int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
1443
int last_allocs, group, unit;
1444
unsigned int cpu, tcpu;
1445
struct pcpu_alloc_info *ai;
1446
unsigned int *cpu_map;
1448
/* this function may be called multiple times */
1449
memset(group_map, 0, sizeof(group_map));
1450
memset(group_cnt, 0, sizeof(group_cnt));
1452
/* calculate size_sum and ensure dyn_size is enough for early alloc */
1453
size_sum = PFN_ALIGN(static_size + reserved_size +
1454
max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1455
dyn_size = size_sum - static_size - reserved_size;
1458
* Determine min_unit_size, alloc_size and max_upa such that
1459
* alloc_size is multiple of atom_size and is the smallest
1460
* which can accommodate 4k aligned segments which are equal to
1461
* or larger than min_unit_size.
1463
min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1465
alloc_size = roundup(min_unit_size, atom_size);
1466
upa = alloc_size / min_unit_size;
1467
while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1471
/* group cpus according to their proximity */
1472
for_each_possible_cpu(cpu) {
1475
for_each_possible_cpu(tcpu) {
1478
if (group_map[tcpu] == group && cpu_distance_fn &&
1479
(cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1480
cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1482
nr_groups = max(nr_groups, group + 1);
1486
group_map[cpu] = group;
1491
* Expand unit size until address space usage goes over 75%
1492
* and then as much as possible without using more address
1495
last_allocs = INT_MAX;
1496
for (upa = max_upa; upa; upa--) {
1497
int allocs = 0, wasted = 0;
1499
if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1502
for (group = 0; group < nr_groups; group++) {
1503
int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1504
allocs += this_allocs;
1505
wasted += this_allocs * upa - group_cnt[group];
1509
* Don't accept if wastage is over 1/3. The
1510
* greater-than comparison ensures upa==1 always
1511
* passes the following check.
1513
if (wasted > num_possible_cpus() / 3)
1516
/* and then don't consume more memory */
1517
if (allocs > last_allocs)
1519
last_allocs = allocs;
1524
/* allocate and fill alloc_info */
1525
for (group = 0; group < nr_groups; group++)
1526
nr_units += roundup(group_cnt[group], upa);
1528
ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1530
return ERR_PTR(-ENOMEM);
1531
cpu_map = ai->groups[0].cpu_map;
1533
for (group = 0; group < nr_groups; group++) {
1534
ai->groups[group].cpu_map = cpu_map;
1535
cpu_map += roundup(group_cnt[group], upa);
1538
ai->static_size = static_size;
1539
ai->reserved_size = reserved_size;
1540
ai->dyn_size = dyn_size;
1541
ai->unit_size = alloc_size / upa;
1542
ai->atom_size = atom_size;
1543
ai->alloc_size = alloc_size;
1545
for (group = 0, unit = 0; group_cnt[group]; group++) {
1546
struct pcpu_group_info *gi = &ai->groups[group];
1549
* Initialize base_offset as if all groups are located
1550
* back-to-back. The caller should update this to
1551
* reflect actual allocation.
1553
gi->base_offset = unit * ai->unit_size;
1555
for_each_possible_cpu(cpu)
1556
if (group_map[cpu] == group)
1557
gi->cpu_map[gi->nr_units++] = cpu;
1558
gi->nr_units = roundup(gi->nr_units, upa);
1559
unit += gi->nr_units;
1561
BUG_ON(unit != nr_units);
1565
#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
1567
#if defined(BUILD_EMBED_FIRST_CHUNK)
1569
* pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1570
* @reserved_size: the size of reserved percpu area in bytes
1571
* @dyn_size: minimum free size for dynamic allocation in bytes
1572
* @atom_size: allocation atom size
1573
* @cpu_distance_fn: callback to determine distance between cpus, optional
1574
* @alloc_fn: function to allocate percpu page
1575
* @free_fn: function to free percpu page
1577
* This is a helper to ease setting up embedded first percpu chunk and
1578
* can be called where pcpu_setup_first_chunk() is expected.
1580
* If this function is used to setup the first chunk, it is allocated
1581
* by calling @alloc_fn and used as-is without being mapped into
1582
* vmalloc area. Allocations are always whole multiples of @atom_size
1583
* aligned to @atom_size.
1585
* This enables the first chunk to piggy back on the linear physical
1586
* mapping which often uses larger page size. Please note that this
1587
* can result in very sparse cpu->unit mapping on NUMA machines thus
1588
* requiring large vmalloc address space. Don't use this allocator if
1589
* vmalloc space is not orders of magnitude larger than distances
1590
* between node memory addresses (ie. 32bit NUMA machines).
1592
* @dyn_size specifies the minimum dynamic area size.
1594
* If the needed size is smaller than the minimum or specified unit
1595
* size, the leftover is returned using @free_fn.
1598
* 0 on success, -errno on failure.
1600
int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1602
pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1603
pcpu_fc_alloc_fn_t alloc_fn,
1604
pcpu_fc_free_fn_t free_fn)
1606
void *base = (void *)ULONG_MAX;
1607
void **areas = NULL;
1608
struct pcpu_alloc_info *ai;
1609
size_t size_sum, areas_size, max_distance;
1612
ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1617
size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1618
areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1620
areas = alloc_bootmem_nopanic(areas_size);
1626
/* allocate, copy and determine base address */
1627
for (group = 0; group < ai->nr_groups; group++) {
1628
struct pcpu_group_info *gi = &ai->groups[group];
1629
unsigned int cpu = NR_CPUS;
1632
for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1633
cpu = gi->cpu_map[i];
1634
BUG_ON(cpu == NR_CPUS);
1636
/* allocate space for the whole group */
1637
ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1640
goto out_free_areas;
1644
base = min(ptr, base);
1646
for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1647
if (gi->cpu_map[i] == NR_CPUS) {
1648
/* unused unit, free whole */
1649
free_fn(ptr, ai->unit_size);
1652
/* copy and return the unused part */
1653
memcpy(ptr, __per_cpu_load, ai->static_size);
1654
free_fn(ptr + size_sum, ai->unit_size - size_sum);
1658
/* base address is now known, determine group base offsets */
1660
for (group = 0; group < ai->nr_groups; group++) {
1661
ai->groups[group].base_offset = areas[group] - base;
1662
max_distance = max_t(size_t, max_distance,
1663
ai->groups[group].base_offset);
1665
max_distance += ai->unit_size;
1667
/* warn if maximum distance is further than 75% of vmalloc space */
1668
if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1669
pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1670
"space 0x%lx\n", max_distance,
1671
(unsigned long)(VMALLOC_END - VMALLOC_START));
1672
#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1673
/* and fail if we have fallback */
1679
pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1680
PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1681
ai->dyn_size, ai->unit_size);
1683
rc = pcpu_setup_first_chunk(ai, base);
1687
for (group = 0; group < ai->nr_groups; group++)
1688
free_fn(areas[group],
1689
ai->groups[group].nr_units * ai->unit_size);
1691
pcpu_free_alloc_info(ai);
1693
free_bootmem(__pa(areas), areas_size);
1696
#endif /* BUILD_EMBED_FIRST_CHUNK */
1698
#ifdef BUILD_PAGE_FIRST_CHUNK
1700
* pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1701
* @reserved_size: the size of reserved percpu area in bytes
1702
* @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1703
* @free_fn: function to free percpu page, always called with PAGE_SIZE
1704
* @populate_pte_fn: function to populate pte
1706
* This is a helper to ease setting up page-remapped first percpu
1707
* chunk and can be called where pcpu_setup_first_chunk() is expected.
1709
* This is the basic allocator. Static percpu area is allocated
1710
* page-by-page into vmalloc area.
1713
* 0 on success, -errno on failure.
1715
int __init pcpu_page_first_chunk(size_t reserved_size,
1716
pcpu_fc_alloc_fn_t alloc_fn,
1717
pcpu_fc_free_fn_t free_fn,
1718
pcpu_fc_populate_pte_fn_t populate_pte_fn)
1720
static struct vm_struct vm;
1721
struct pcpu_alloc_info *ai;
1725
struct page **pages;
1728
snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1730
ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
1733
BUG_ON(ai->nr_groups != 1);
1734
BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1736
unit_pages = ai->unit_size >> PAGE_SHIFT;
1738
/* unaligned allocations can't be freed, round up to page size */
1739
pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1741
pages = alloc_bootmem(pages_size);
1743
/* allocate pages */
1745
for (unit = 0; unit < num_possible_cpus(); unit++)
1746
for (i = 0; i < unit_pages; i++) {
1747
unsigned int cpu = ai->groups[0].cpu_map[unit];
1750
ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
1752
pr_warning("PERCPU: failed to allocate %s page "
1753
"for cpu%u\n", psize_str, cpu);
1756
pages[j++] = virt_to_page(ptr);
1759
/* allocate vm area, map the pages and copy static data */
1760
vm.flags = VM_ALLOC;
1761
vm.size = num_possible_cpus() * ai->unit_size;
1762
vm_area_register_early(&vm, PAGE_SIZE);
1764
for (unit = 0; unit < num_possible_cpus(); unit++) {
1765
unsigned long unit_addr =
1766
(unsigned long)vm.addr + unit * ai->unit_size;
1768
for (i = 0; i < unit_pages; i++)
1769
populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1771
/* pte already populated, the following shouldn't fail */
1772
rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1775
panic("failed to map percpu area, err=%d\n", rc);
1778
* FIXME: Archs with virtual cache should flush local
1779
* cache for the linear mapping here - something
1780
* equivalent to flush_cache_vmap() on the local cpu.
1781
* flush_cache_vmap() can't be used as most supporting
1782
* data structures are not set up yet.
1785
/* copy static data */
1786
memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
1789
/* we're ready, commit */
1790
pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1791
unit_pages, psize_str, vm.addr, ai->static_size,
1792
ai->reserved_size, ai->dyn_size);
1794
rc = pcpu_setup_first_chunk(ai, vm.addr);
1799
free_fn(page_address(pages[j]), PAGE_SIZE);
1802
free_bootmem(__pa(pages), pages_size);
1803
pcpu_free_alloc_info(ai);
1806
#endif /* BUILD_PAGE_FIRST_CHUNK */
1808
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
1810
* Generic SMP percpu area setup.
1812
* The embedding helper is used because its behavior closely resembles
1813
* the original non-dynamic generic percpu area setup. This is
1814
* important because many archs have addressing restrictions and might
1815
* fail if the percpu area is located far away from the previous
1816
* location. As an added bonus, in non-NUMA cases, embedding is
1817
* generally a good idea TLB-wise because percpu area can piggy back
1818
* on the physical linear memory mapping which uses large page
1819
* mappings on applicable archs.
1821
unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1822
EXPORT_SYMBOL(__per_cpu_offset);
1824
static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
1827
return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
1830
static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
1832
free_bootmem(__pa(ptr), size);
1835
void __init setup_per_cpu_areas(void)
1837
unsigned long delta;
1842
* Always reserve area for module percpu variables. That's
1843
* what the legacy allocator did.
1845
rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1846
PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
1847
pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
1849
panic("Failed to initialize percpu areas.");
1851
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1852
for_each_possible_cpu(cpu)
1853
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1855
#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
1857
#else /* CONFIG_SMP */
1860
* UP percpu area setup.
1862
* UP always uses km-based percpu allocator with identity mapping.
1863
* Static percpu variables are indistinguishable from the usual static
1864
* variables and don't require any special preparation.
1866
void __init setup_per_cpu_areas(void)
1868
const size_t unit_size =
1869
roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
1870
PERCPU_DYNAMIC_RESERVE));
1871
struct pcpu_alloc_info *ai;
1874
ai = pcpu_alloc_alloc_info(1, 1);
1875
fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
1877
panic("Failed to allocate memory for percpu areas.");
1879
ai->dyn_size = unit_size;
1880
ai->unit_size = unit_size;
1881
ai->atom_size = unit_size;
1882
ai->alloc_size = unit_size;
1883
ai->groups[0].nr_units = 1;
1884
ai->groups[0].cpu_map[0] = 0;
1886
if (pcpu_setup_first_chunk(ai, fc) < 0)
1887
panic("Failed to initialize percpu areas.");
1890
#endif /* CONFIG_SMP */
1893
* First and reserved chunks are initialized with temporary allocation
1894
* map in initdata so that they can be used before slab is online.
1895
* This function is called after slab is brought up and replaces those
1896
* with properly allocated maps.
1898
void __init percpu_init_late(void)
1900
struct pcpu_chunk *target_chunks[] =
1901
{ pcpu_first_chunk, pcpu_reserved_chunk, NULL };
1902
struct pcpu_chunk *chunk;
1903
unsigned long flags;
1906
for (i = 0; (chunk = target_chunks[i]); i++) {
1908
const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
1910
BUILD_BUG_ON(size > PAGE_SIZE);
1912
map = pcpu_mem_zalloc(size);
1915
spin_lock_irqsave(&pcpu_lock, flags);
1916
memcpy(map, chunk->map, size);
1918
spin_unlock_irqrestore(&pcpu_lock, flags);