Loading...
1/*
2 * mm/percpu.c - percpu memory allocator
3 *
4 * Copyright (C) 2009 SUSE Linux Products GmbH
5 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
6 *
7 * This file is released under the GPLv2.
8 *
9 * This is percpu allocator which can handle both static and dynamic
10 * areas. Percpu areas are allocated in chunks. Each chunk is
11 * consisted of boot-time determined number of units and the first
12 * chunk is used for static percpu variables in the kernel image
13 * (special boot time alloc/init handling necessary as these areas
14 * need to be brought up before allocation services are running).
15 * Unit grows as necessary and all units grow or shrink in unison.
16 * When a chunk is filled up, another chunk is allocated.
17 *
18 * c0 c1 c2
19 * ------------------- ------------------- ------------
20 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
21 * ------------------- ...... ------------------- .... ------------
22 *
23 * Allocation is done in offset-size areas of single unit space. Ie,
24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
25 * c1:u1, c1:u2 and c1:u3. On UMA, units corresponds directly to
26 * cpus. On NUMA, the mapping can be non-linear and even sparse.
27 * Percpu access can be done by configuring percpu base registers
28 * according to cpu to unit mapping and pcpu_unit_size.
29 *
30 * There are usually many small percpu allocations many of them being
31 * as small as 4 bytes. The allocator organizes chunks into lists
32 * according to free size and tries to allocate from the fullest one.
33 * Each chunk keeps the maximum contiguous area size hint which is
34 * guaranteed to be equal to or larger than the maximum contiguous
35 * area in the chunk. This helps the allocator not to iterate the
36 * chunk maps unnecessarily.
37 *
38 * Allocation state in each chunk is kept using an array of integers
39 * on chunk->map. A positive value in the map represents a free
40 * region and negative allocated. Allocation inside a chunk is done
41 * by scanning this map sequentially and serving the first matching
42 * entry. This is mostly copied from the percpu_modalloc() allocator.
43 * Chunks can be determined from the address using the index field
44 * in the page struct. The index field contains a pointer to the chunk.
45 *
46 * To use this allocator, arch code should do the followings.
47 *
48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
49 * regular address to percpu pointer and back if they need to be
50 * different from the default
51 *
52 * - use pcpu_setup_first_chunk() during percpu area initialization to
53 * setup the first chunk containing the kernel static percpu area
54 */
55
56#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
57
58#include <linux/bitmap.h>
59#include <linux/bootmem.h>
60#include <linux/err.h>
61#include <linux/list.h>
62#include <linux/log2.h>
63#include <linux/mm.h>
64#include <linux/module.h>
65#include <linux/mutex.h>
66#include <linux/percpu.h>
67#include <linux/pfn.h>
68#include <linux/slab.h>
69#include <linux/spinlock.h>
70#include <linux/vmalloc.h>
71#include <linux/workqueue.h>
72#include <linux/kmemleak.h>
73
74#include <asm/cacheflush.h>
75#include <asm/sections.h>
76#include <asm/tlbflush.h>
77#include <asm/io.h>
78
79#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
80#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
81#define PCPU_ATOMIC_MAP_MARGIN_LOW 32
82#define PCPU_ATOMIC_MAP_MARGIN_HIGH 64
83#define PCPU_EMPTY_POP_PAGES_LOW 2
84#define PCPU_EMPTY_POP_PAGES_HIGH 4
85
86#ifdef CONFIG_SMP
87/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
88#ifndef __addr_to_pcpu_ptr
89#define __addr_to_pcpu_ptr(addr) \
90 (void __percpu *)((unsigned long)(addr) - \
91 (unsigned long)pcpu_base_addr + \
92 (unsigned long)__per_cpu_start)
93#endif
94#ifndef __pcpu_ptr_to_addr
95#define __pcpu_ptr_to_addr(ptr) \
96 (void __force *)((unsigned long)(ptr) + \
97 (unsigned long)pcpu_base_addr - \
98 (unsigned long)__per_cpu_start)
99#endif
100#else /* CONFIG_SMP */
101/* on UP, it's always identity mapped */
102#define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
103#define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
104#endif /* CONFIG_SMP */
105
106struct pcpu_chunk {
107 struct list_head list; /* linked to pcpu_slot lists */
108 int free_size; /* free bytes in the chunk */
109 int contig_hint; /* max contiguous size hint */
110 void *base_addr; /* base address of this chunk */
111
112 int map_used; /* # of map entries used before the sentry */
113 int map_alloc; /* # of map entries allocated */
114 int *map; /* allocation map */
115 struct work_struct map_extend_work;/* async ->map[] extension */
116
117 void *data; /* chunk data */
118 int first_free; /* no free below this */
119 bool immutable; /* no [de]population allowed */
120 int nr_populated; /* # of populated pages */
121 unsigned long populated[]; /* populated bitmap */
122};
123
124static int pcpu_unit_pages __read_mostly;
125static int pcpu_unit_size __read_mostly;
126static int pcpu_nr_units __read_mostly;
127static int pcpu_atom_size __read_mostly;
128static int pcpu_nr_slots __read_mostly;
129static size_t pcpu_chunk_struct_size __read_mostly;
130
131/* cpus with the lowest and highest unit addresses */
132static unsigned int pcpu_low_unit_cpu __read_mostly;
133static unsigned int pcpu_high_unit_cpu __read_mostly;
134
135/* the address of the first chunk which starts with the kernel static area */
136void *pcpu_base_addr __read_mostly;
137EXPORT_SYMBOL_GPL(pcpu_base_addr);
138
139static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
140const unsigned long *pcpu_unit_offsets __read_mostly; /* cpu -> unit offset */
141
142/* group information, used for vm allocation */
143static int pcpu_nr_groups __read_mostly;
144static const unsigned long *pcpu_group_offsets __read_mostly;
145static const size_t *pcpu_group_sizes __read_mostly;
146
147/*
148 * The first chunk which always exists. Note that unlike other
149 * chunks, this one can be allocated and mapped in several different
150 * ways and thus often doesn't live in the vmalloc area.
151 */
152static struct pcpu_chunk *pcpu_first_chunk;
153
154/*
155 * Optional reserved chunk. This chunk reserves part of the first
156 * chunk and serves it for reserved allocations. The amount of
157 * reserved offset is in pcpu_reserved_chunk_limit. When reserved
158 * area doesn't exist, the following variables contain NULL and 0
159 * respectively.
160 */
161static struct pcpu_chunk *pcpu_reserved_chunk;
162static int pcpu_reserved_chunk_limit;
163
164static DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
165static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
166
167static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
168
169/*
170 * The number of empty populated pages, protected by pcpu_lock. The
171 * reserved chunk doesn't contribute to the count.
172 */
173static int pcpu_nr_empty_pop_pages;
174
175/*
176 * Balance work is used to populate or destroy chunks asynchronously. We
177 * try to keep the number of populated free pages between
178 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
179 * empty chunk.
180 */
181static void pcpu_balance_workfn(struct work_struct *work);
182static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
183static bool pcpu_async_enabled __read_mostly;
184static bool pcpu_atomic_alloc_failed;
185
186static void pcpu_schedule_balance_work(void)
187{
188 if (pcpu_async_enabled)
189 schedule_work(&pcpu_balance_work);
190}
191
192static bool pcpu_addr_in_first_chunk(void *addr)
193{
194 void *first_start = pcpu_first_chunk->base_addr;
195
196 return addr >= first_start && addr < first_start + pcpu_unit_size;
197}
198
199static bool pcpu_addr_in_reserved_chunk(void *addr)
200{
201 void *first_start = pcpu_first_chunk->base_addr;
202
203 return addr >= first_start &&
204 addr < first_start + pcpu_reserved_chunk_limit;
205}
206
207static int __pcpu_size_to_slot(int size)
208{
209 int highbit = fls(size); /* size is in bytes */
210 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
211}
212
213static int pcpu_size_to_slot(int size)
214{
215 if (size == pcpu_unit_size)
216 return pcpu_nr_slots - 1;
217 return __pcpu_size_to_slot(size);
218}
219
220static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
221{
222 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
223 return 0;
224
225 return pcpu_size_to_slot(chunk->free_size);
226}
227
228/* set the pointer to a chunk in a page struct */
229static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
230{
231 page->index = (unsigned long)pcpu;
232}
233
234/* obtain pointer to a chunk from a page struct */
235static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
236{
237 return (struct pcpu_chunk *)page->index;
238}
239
240static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
241{
242 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
243}
244
245static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
246 unsigned int cpu, int page_idx)
247{
248 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
249 (page_idx << PAGE_SHIFT);
250}
251
252static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
253 int *rs, int *re, int end)
254{
255 *rs = find_next_zero_bit(chunk->populated, end, *rs);
256 *re = find_next_bit(chunk->populated, end, *rs + 1);
257}
258
259static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
260 int *rs, int *re, int end)
261{
262 *rs = find_next_bit(chunk->populated, end, *rs);
263 *re = find_next_zero_bit(chunk->populated, end, *rs + 1);
264}
265
266/*
267 * (Un)populated page region iterators. Iterate over (un)populated
268 * page regions between @start and @end in @chunk. @rs and @re should
269 * be integer variables and will be set to start and end page index of
270 * the current region.
271 */
272#define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \
273 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
274 (rs) < (re); \
275 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
276
277#define pcpu_for_each_pop_region(chunk, rs, re, start, end) \
278 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
279 (rs) < (re); \
280 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
281
282/**
283 * pcpu_mem_zalloc - allocate memory
284 * @size: bytes to allocate
285 *
286 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
287 * kzalloc() is used; otherwise, vzalloc() is used. The returned
288 * memory is always zeroed.
289 *
290 * CONTEXT:
291 * Does GFP_KERNEL allocation.
292 *
293 * RETURNS:
294 * Pointer to the allocated area on success, NULL on failure.
295 */
296static void *pcpu_mem_zalloc(size_t size)
297{
298 if (WARN_ON_ONCE(!slab_is_available()))
299 return NULL;
300
301 if (size <= PAGE_SIZE)
302 return kzalloc(size, GFP_KERNEL);
303 else
304 return vzalloc(size);
305}
306
307/**
308 * pcpu_mem_free - free memory
309 * @ptr: memory to free
310 *
311 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
312 */
313static void pcpu_mem_free(void *ptr)
314{
315 kvfree(ptr);
316}
317
318/**
319 * pcpu_count_occupied_pages - count the number of pages an area occupies
320 * @chunk: chunk of interest
321 * @i: index of the area in question
322 *
323 * Count the number of pages chunk's @i'th area occupies. When the area's
324 * start and/or end address isn't aligned to page boundary, the straddled
325 * page is included in the count iff the rest of the page is free.
326 */
327static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i)
328{
329 int off = chunk->map[i] & ~1;
330 int end = chunk->map[i + 1] & ~1;
331
332 if (!PAGE_ALIGNED(off) && i > 0) {
333 int prev = chunk->map[i - 1];
334
335 if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE))
336 off = round_down(off, PAGE_SIZE);
337 }
338
339 if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) {
340 int next = chunk->map[i + 1];
341 int nend = chunk->map[i + 2] & ~1;
342
343 if (!(next & 1) && nend >= round_up(end, PAGE_SIZE))
344 end = round_up(end, PAGE_SIZE);
345 }
346
347 return max_t(int, PFN_DOWN(end) - PFN_UP(off), 0);
348}
349
350/**
351 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
352 * @chunk: chunk of interest
353 * @oslot: the previous slot it was on
354 *
355 * This function is called after an allocation or free changed @chunk.
356 * New slot according to the changed state is determined and @chunk is
357 * moved to the slot. Note that the reserved chunk is never put on
358 * chunk slots.
359 *
360 * CONTEXT:
361 * pcpu_lock.
362 */
363static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
364{
365 int nslot = pcpu_chunk_slot(chunk);
366
367 if (chunk != pcpu_reserved_chunk && oslot != nslot) {
368 if (oslot < nslot)
369 list_move(&chunk->list, &pcpu_slot[nslot]);
370 else
371 list_move_tail(&chunk->list, &pcpu_slot[nslot]);
372 }
373}
374
375/**
376 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
377 * @chunk: chunk of interest
378 * @is_atomic: the allocation context
379 *
380 * Determine whether area map of @chunk needs to be extended. If
381 * @is_atomic, only the amount necessary for a new allocation is
382 * considered; however, async extension is scheduled if the left amount is
383 * low. If !@is_atomic, it aims for more empty space. Combined, this
384 * ensures that the map is likely to have enough available space to
385 * accomodate atomic allocations which can't extend maps directly.
386 *
387 * CONTEXT:
388 * pcpu_lock.
389 *
390 * RETURNS:
391 * New target map allocation length if extension is necessary, 0
392 * otherwise.
393 */
394static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
395{
396 int margin, new_alloc;
397
398 if (is_atomic) {
399 margin = 3;
400
401 if (chunk->map_alloc <
402 chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW &&
403 pcpu_async_enabled)
404 schedule_work(&chunk->map_extend_work);
405 } else {
406 margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
407 }
408
409 if (chunk->map_alloc >= chunk->map_used + margin)
410 return 0;
411
412 new_alloc = PCPU_DFL_MAP_ALLOC;
413 while (new_alloc < chunk->map_used + margin)
414 new_alloc *= 2;
415
416 return new_alloc;
417}
418
419/**
420 * pcpu_extend_area_map - extend area map of a chunk
421 * @chunk: chunk of interest
422 * @new_alloc: new target allocation length of the area map
423 *
424 * Extend area map of @chunk to have @new_alloc entries.
425 *
426 * CONTEXT:
427 * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
428 *
429 * RETURNS:
430 * 0 on success, -errno on failure.
431 */
432static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
433{
434 int *old = NULL, *new = NULL;
435 size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
436 unsigned long flags;
437
438 new = pcpu_mem_zalloc(new_size);
439 if (!new)
440 return -ENOMEM;
441
442 /* acquire pcpu_lock and switch to new area map */
443 spin_lock_irqsave(&pcpu_lock, flags);
444
445 if (new_alloc <= chunk->map_alloc)
446 goto out_unlock;
447
448 old_size = chunk->map_alloc * sizeof(chunk->map[0]);
449 old = chunk->map;
450
451 memcpy(new, old, old_size);
452
453 chunk->map_alloc = new_alloc;
454 chunk->map = new;
455 new = NULL;
456
457out_unlock:
458 spin_unlock_irqrestore(&pcpu_lock, flags);
459
460 /*
461 * pcpu_mem_free() might end up calling vfree() which uses
462 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
463 */
464 pcpu_mem_free(old);
465 pcpu_mem_free(new);
466
467 return 0;
468}
469
470static void pcpu_map_extend_workfn(struct work_struct *work)
471{
472 struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
473 map_extend_work);
474 int new_alloc;
475
476 spin_lock_irq(&pcpu_lock);
477 new_alloc = pcpu_need_to_extend(chunk, false);
478 spin_unlock_irq(&pcpu_lock);
479
480 if (new_alloc)
481 pcpu_extend_area_map(chunk, new_alloc);
482}
483
484/**
485 * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
486 * @chunk: chunk the candidate area belongs to
487 * @off: the offset to the start of the candidate area
488 * @this_size: the size of the candidate area
489 * @size: the size of the target allocation
490 * @align: the alignment of the target allocation
491 * @pop_only: only allocate from already populated region
492 *
493 * We're trying to allocate @size bytes aligned at @align. @chunk's area
494 * at @off sized @this_size is a candidate. This function determines
495 * whether the target allocation fits in the candidate area and returns the
496 * number of bytes to pad after @off. If the target area doesn't fit, -1
497 * is returned.
498 *
499 * If @pop_only is %true, this function only considers the already
500 * populated part of the candidate area.
501 */
502static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size,
503 int size, int align, bool pop_only)
504{
505 int cand_off = off;
506
507 while (true) {
508 int head = ALIGN(cand_off, align) - off;
509 int page_start, page_end, rs, re;
510
511 if (this_size < head + size)
512 return -1;
513
514 if (!pop_only)
515 return head;
516
517 /*
518 * If the first unpopulated page is beyond the end of the
519 * allocation, the whole allocation is populated;
520 * otherwise, retry from the end of the unpopulated area.
521 */
522 page_start = PFN_DOWN(head + off);
523 page_end = PFN_UP(head + off + size);
524
525 rs = page_start;
526 pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size));
527 if (rs >= page_end)
528 return head;
529 cand_off = re * PAGE_SIZE;
530 }
531}
532
533/**
534 * pcpu_alloc_area - allocate area from a pcpu_chunk
535 * @chunk: chunk of interest
536 * @size: wanted size in bytes
537 * @align: wanted align
538 * @pop_only: allocate only from the populated area
539 * @occ_pages_p: out param for the number of pages the area occupies
540 *
541 * Try to allocate @size bytes area aligned at @align from @chunk.
542 * Note that this function only allocates the offset. It doesn't
543 * populate or map the area.
544 *
545 * @chunk->map must have at least two free slots.
546 *
547 * CONTEXT:
548 * pcpu_lock.
549 *
550 * RETURNS:
551 * Allocated offset in @chunk on success, -1 if no matching area is
552 * found.
553 */
554static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align,
555 bool pop_only, int *occ_pages_p)
556{
557 int oslot = pcpu_chunk_slot(chunk);
558 int max_contig = 0;
559 int i, off;
560 bool seen_free = false;
561 int *p;
562
563 for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) {
564 int head, tail;
565 int this_size;
566
567 off = *p;
568 if (off & 1)
569 continue;
570
571 this_size = (p[1] & ~1) - off;
572
573 head = pcpu_fit_in_area(chunk, off, this_size, size, align,
574 pop_only);
575 if (head < 0) {
576 if (!seen_free) {
577 chunk->first_free = i;
578 seen_free = true;
579 }
580 max_contig = max(this_size, max_contig);
581 continue;
582 }
583
584 /*
585 * If head is small or the previous block is free,
586 * merge'em. Note that 'small' is defined as smaller
587 * than sizeof(int), which is very small but isn't too
588 * uncommon for percpu allocations.
589 */
590 if (head && (head < sizeof(int) || !(p[-1] & 1))) {
591 *p = off += head;
592 if (p[-1] & 1)
593 chunk->free_size -= head;
594 else
595 max_contig = max(*p - p[-1], max_contig);
596 this_size -= head;
597 head = 0;
598 }
599
600 /* if tail is small, just keep it around */
601 tail = this_size - head - size;
602 if (tail < sizeof(int)) {
603 tail = 0;
604 size = this_size - head;
605 }
606
607 /* split if warranted */
608 if (head || tail) {
609 int nr_extra = !!head + !!tail;
610
611 /* insert new subblocks */
612 memmove(p + nr_extra + 1, p + 1,
613 sizeof(chunk->map[0]) * (chunk->map_used - i));
614 chunk->map_used += nr_extra;
615
616 if (head) {
617 if (!seen_free) {
618 chunk->first_free = i;
619 seen_free = true;
620 }
621 *++p = off += head;
622 ++i;
623 max_contig = max(head, max_contig);
624 }
625 if (tail) {
626 p[1] = off + size;
627 max_contig = max(tail, max_contig);
628 }
629 }
630
631 if (!seen_free)
632 chunk->first_free = i + 1;
633
634 /* update hint and mark allocated */
635 if (i + 1 == chunk->map_used)
636 chunk->contig_hint = max_contig; /* fully scanned */
637 else
638 chunk->contig_hint = max(chunk->contig_hint,
639 max_contig);
640
641 chunk->free_size -= size;
642 *p |= 1;
643
644 *occ_pages_p = pcpu_count_occupied_pages(chunk, i);
645 pcpu_chunk_relocate(chunk, oslot);
646 return off;
647 }
648
649 chunk->contig_hint = max_contig; /* fully scanned */
650 pcpu_chunk_relocate(chunk, oslot);
651
652 /* tell the upper layer that this chunk has no matching area */
653 return -1;
654}
655
656/**
657 * pcpu_free_area - free area to a pcpu_chunk
658 * @chunk: chunk of interest
659 * @freeme: offset of area to free
660 * @occ_pages_p: out param for the number of pages the area occupies
661 *
662 * Free area starting from @freeme to @chunk. Note that this function
663 * only modifies the allocation map. It doesn't depopulate or unmap
664 * the area.
665 *
666 * CONTEXT:
667 * pcpu_lock.
668 */
669static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme,
670 int *occ_pages_p)
671{
672 int oslot = pcpu_chunk_slot(chunk);
673 int off = 0;
674 unsigned i, j;
675 int to_free = 0;
676 int *p;
677
678 freeme |= 1; /* we are searching for <given offset, in use> pair */
679
680 i = 0;
681 j = chunk->map_used;
682 while (i != j) {
683 unsigned k = (i + j) / 2;
684 off = chunk->map[k];
685 if (off < freeme)
686 i = k + 1;
687 else if (off > freeme)
688 j = k;
689 else
690 i = j = k;
691 }
692 BUG_ON(off != freeme);
693
694 if (i < chunk->first_free)
695 chunk->first_free = i;
696
697 p = chunk->map + i;
698 *p = off &= ~1;
699 chunk->free_size += (p[1] & ~1) - off;
700
701 *occ_pages_p = pcpu_count_occupied_pages(chunk, i);
702
703 /* merge with next? */
704 if (!(p[1] & 1))
705 to_free++;
706 /* merge with previous? */
707 if (i > 0 && !(p[-1] & 1)) {
708 to_free++;
709 i--;
710 p--;
711 }
712 if (to_free) {
713 chunk->map_used -= to_free;
714 memmove(p + 1, p + 1 + to_free,
715 (chunk->map_used - i) * sizeof(chunk->map[0]));
716 }
717
718 chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint);
719 pcpu_chunk_relocate(chunk, oslot);
720}
721
722static struct pcpu_chunk *pcpu_alloc_chunk(void)
723{
724 struct pcpu_chunk *chunk;
725
726 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
727 if (!chunk)
728 return NULL;
729
730 chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
731 sizeof(chunk->map[0]));
732 if (!chunk->map) {
733 pcpu_mem_free(chunk);
734 return NULL;
735 }
736
737 chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
738 chunk->map[0] = 0;
739 chunk->map[1] = pcpu_unit_size | 1;
740 chunk->map_used = 1;
741
742 INIT_LIST_HEAD(&chunk->list);
743 INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn);
744 chunk->free_size = pcpu_unit_size;
745 chunk->contig_hint = pcpu_unit_size;
746
747 return chunk;
748}
749
750static void pcpu_free_chunk(struct pcpu_chunk *chunk)
751{
752 if (!chunk)
753 return;
754 pcpu_mem_free(chunk->map);
755 pcpu_mem_free(chunk);
756}
757
758/**
759 * pcpu_chunk_populated - post-population bookkeeping
760 * @chunk: pcpu_chunk which got populated
761 * @page_start: the start page
762 * @page_end: the end page
763 *
764 * Pages in [@page_start,@page_end) have been populated to @chunk. Update
765 * the bookkeeping information accordingly. Must be called after each
766 * successful population.
767 */
768static void pcpu_chunk_populated(struct pcpu_chunk *chunk,
769 int page_start, int page_end)
770{
771 int nr = page_end - page_start;
772
773 lockdep_assert_held(&pcpu_lock);
774
775 bitmap_set(chunk->populated, page_start, nr);
776 chunk->nr_populated += nr;
777 pcpu_nr_empty_pop_pages += nr;
778}
779
780/**
781 * pcpu_chunk_depopulated - post-depopulation bookkeeping
782 * @chunk: pcpu_chunk which got depopulated
783 * @page_start: the start page
784 * @page_end: the end page
785 *
786 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
787 * Update the bookkeeping information accordingly. Must be called after
788 * each successful depopulation.
789 */
790static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
791 int page_start, int page_end)
792{
793 int nr = page_end - page_start;
794
795 lockdep_assert_held(&pcpu_lock);
796
797 bitmap_clear(chunk->populated, page_start, nr);
798 chunk->nr_populated -= nr;
799 pcpu_nr_empty_pop_pages -= nr;
800}
801
802/*
803 * Chunk management implementation.
804 *
805 * To allow different implementations, chunk alloc/free and
806 * [de]population are implemented in a separate file which is pulled
807 * into this file and compiled together. The following functions
808 * should be implemented.
809 *
810 * pcpu_populate_chunk - populate the specified range of a chunk
811 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
812 * pcpu_create_chunk - create a new chunk
813 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
814 * pcpu_addr_to_page - translate address to physical address
815 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
816 */
817static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
818static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
819static struct pcpu_chunk *pcpu_create_chunk(void);
820static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
821static struct page *pcpu_addr_to_page(void *addr);
822static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
823
824#ifdef CONFIG_NEED_PER_CPU_KM
825#include "percpu-km.c"
826#else
827#include "percpu-vm.c"
828#endif
829
830/**
831 * pcpu_chunk_addr_search - determine chunk containing specified address
832 * @addr: address for which the chunk needs to be determined.
833 *
834 * RETURNS:
835 * The address of the found chunk.
836 */
837static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
838{
839 /* is it in the first chunk? */
840 if (pcpu_addr_in_first_chunk(addr)) {
841 /* is it in the reserved area? */
842 if (pcpu_addr_in_reserved_chunk(addr))
843 return pcpu_reserved_chunk;
844 return pcpu_first_chunk;
845 }
846
847 /*
848 * The address is relative to unit0 which might be unused and
849 * thus unmapped. Offset the address to the unit space of the
850 * current processor before looking it up in the vmalloc
851 * space. Note that any possible cpu id can be used here, so
852 * there's no need to worry about preemption or cpu hotplug.
853 */
854 addr += pcpu_unit_offsets[raw_smp_processor_id()];
855 return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
856}
857
858/**
859 * pcpu_alloc - the percpu allocator
860 * @size: size of area to allocate in bytes
861 * @align: alignment of area (max PAGE_SIZE)
862 * @reserved: allocate from the reserved chunk if available
863 * @gfp: allocation flags
864 *
865 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
866 * contain %GFP_KERNEL, the allocation is atomic.
867 *
868 * RETURNS:
869 * Percpu pointer to the allocated area on success, NULL on failure.
870 */
871static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
872 gfp_t gfp)
873{
874 static int warn_limit = 10;
875 struct pcpu_chunk *chunk;
876 const char *err;
877 bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
878 int occ_pages = 0;
879 int slot, off, new_alloc, cpu, ret;
880 unsigned long flags;
881 void __percpu *ptr;
882
883 /*
884 * We want the lowest bit of offset available for in-use/free
885 * indicator, so force >= 16bit alignment and make size even.
886 */
887 if (unlikely(align < 2))
888 align = 2;
889
890 size = ALIGN(size, 2);
891
892 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
893 WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
894 size, align);
895 return NULL;
896 }
897
898 spin_lock_irqsave(&pcpu_lock, flags);
899
900 /* serve reserved allocations from the reserved chunk if available */
901 if (reserved && pcpu_reserved_chunk) {
902 chunk = pcpu_reserved_chunk;
903
904 if (size > chunk->contig_hint) {
905 err = "alloc from reserved chunk failed";
906 goto fail_unlock;
907 }
908
909 while ((new_alloc = pcpu_need_to_extend(chunk, is_atomic))) {
910 spin_unlock_irqrestore(&pcpu_lock, flags);
911 if (is_atomic ||
912 pcpu_extend_area_map(chunk, new_alloc) < 0) {
913 err = "failed to extend area map of reserved chunk";
914 goto fail;
915 }
916 spin_lock_irqsave(&pcpu_lock, flags);
917 }
918
919 off = pcpu_alloc_area(chunk, size, align, is_atomic,
920 &occ_pages);
921 if (off >= 0)
922 goto area_found;
923
924 err = "alloc from reserved chunk failed";
925 goto fail_unlock;
926 }
927
928restart:
929 /* search through normal chunks */
930 for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
931 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
932 if (size > chunk->contig_hint)
933 continue;
934
935 new_alloc = pcpu_need_to_extend(chunk, is_atomic);
936 if (new_alloc) {
937 if (is_atomic)
938 continue;
939 spin_unlock_irqrestore(&pcpu_lock, flags);
940 if (pcpu_extend_area_map(chunk,
941 new_alloc) < 0) {
942 err = "failed to extend area map";
943 goto fail;
944 }
945 spin_lock_irqsave(&pcpu_lock, flags);
946 /*
947 * pcpu_lock has been dropped, need to
948 * restart cpu_slot list walking.
949 */
950 goto restart;
951 }
952
953 off = pcpu_alloc_area(chunk, size, align, is_atomic,
954 &occ_pages);
955 if (off >= 0)
956 goto area_found;
957 }
958 }
959
960 spin_unlock_irqrestore(&pcpu_lock, flags);
961
962 /*
963 * No space left. Create a new chunk. We don't want multiple
964 * tasks to create chunks simultaneously. Serialize and create iff
965 * there's still no empty chunk after grabbing the mutex.
966 */
967 if (is_atomic)
968 goto fail;
969
970 mutex_lock(&pcpu_alloc_mutex);
971
972 if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
973 chunk = pcpu_create_chunk();
974 if (!chunk) {
975 mutex_unlock(&pcpu_alloc_mutex);
976 err = "failed to allocate new chunk";
977 goto fail;
978 }
979
980 spin_lock_irqsave(&pcpu_lock, flags);
981 pcpu_chunk_relocate(chunk, -1);
982 } else {
983 spin_lock_irqsave(&pcpu_lock, flags);
984 }
985
986 mutex_unlock(&pcpu_alloc_mutex);
987 goto restart;
988
989area_found:
990 spin_unlock_irqrestore(&pcpu_lock, flags);
991
992 /* populate if not all pages are already there */
993 if (!is_atomic) {
994 int page_start, page_end, rs, re;
995
996 mutex_lock(&pcpu_alloc_mutex);
997
998 page_start = PFN_DOWN(off);
999 page_end = PFN_UP(off + size);
1000
1001 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
1002 WARN_ON(chunk->immutable);
1003
1004 ret = pcpu_populate_chunk(chunk, rs, re);
1005
1006 spin_lock_irqsave(&pcpu_lock, flags);
1007 if (ret) {
1008 mutex_unlock(&pcpu_alloc_mutex);
1009 pcpu_free_area(chunk, off, &occ_pages);
1010 err = "failed to populate";
1011 goto fail_unlock;
1012 }
1013 pcpu_chunk_populated(chunk, rs, re);
1014 spin_unlock_irqrestore(&pcpu_lock, flags);
1015 }
1016
1017 mutex_unlock(&pcpu_alloc_mutex);
1018 }
1019
1020 if (chunk != pcpu_reserved_chunk)
1021 pcpu_nr_empty_pop_pages -= occ_pages;
1022
1023 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1024 pcpu_schedule_balance_work();
1025
1026 /* clear the areas and return address relative to base address */
1027 for_each_possible_cpu(cpu)
1028 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1029
1030 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1031 kmemleak_alloc_percpu(ptr, size, gfp);
1032 return ptr;
1033
1034fail_unlock:
1035 spin_unlock_irqrestore(&pcpu_lock, flags);
1036fail:
1037 if (!is_atomic && warn_limit) {
1038 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1039 size, align, is_atomic, err);
1040 dump_stack();
1041 if (!--warn_limit)
1042 pr_info("limit reached, disable warning\n");
1043 }
1044 if (is_atomic) {
1045 /* see the flag handling in pcpu_blance_workfn() */
1046 pcpu_atomic_alloc_failed = true;
1047 pcpu_schedule_balance_work();
1048 }
1049 return NULL;
1050}
1051
1052/**
1053 * __alloc_percpu_gfp - allocate dynamic percpu area
1054 * @size: size of area to allocate in bytes
1055 * @align: alignment of area (max PAGE_SIZE)
1056 * @gfp: allocation flags
1057 *
1058 * Allocate zero-filled percpu area of @size bytes aligned at @align. If
1059 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1060 * be called from any context but is a lot more likely to fail.
1061 *
1062 * RETURNS:
1063 * Percpu pointer to the allocated area on success, NULL on failure.
1064 */
1065void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1066{
1067 return pcpu_alloc(size, align, false, gfp);
1068}
1069EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1070
1071/**
1072 * __alloc_percpu - allocate dynamic percpu area
1073 * @size: size of area to allocate in bytes
1074 * @align: alignment of area (max PAGE_SIZE)
1075 *
1076 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1077 */
1078void __percpu *__alloc_percpu(size_t size, size_t align)
1079{
1080 return pcpu_alloc(size, align, false, GFP_KERNEL);
1081}
1082EXPORT_SYMBOL_GPL(__alloc_percpu);
1083
1084/**
1085 * __alloc_reserved_percpu - allocate reserved percpu area
1086 * @size: size of area to allocate in bytes
1087 * @align: alignment of area (max PAGE_SIZE)
1088 *
1089 * Allocate zero-filled percpu area of @size bytes aligned at @align
1090 * from reserved percpu area if arch has set it up; otherwise,
1091 * allocation is served from the same dynamic area. Might sleep.
1092 * Might trigger writeouts.
1093 *
1094 * CONTEXT:
1095 * Does GFP_KERNEL allocation.
1096 *
1097 * RETURNS:
1098 * Percpu pointer to the allocated area on success, NULL on failure.
1099 */
1100void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1101{
1102 return pcpu_alloc(size, align, true, GFP_KERNEL);
1103}
1104
1105/**
1106 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
1107 * @work: unused
1108 *
1109 * Reclaim all fully free chunks except for the first one.
1110 */
1111static void pcpu_balance_workfn(struct work_struct *work)
1112{
1113 LIST_HEAD(to_free);
1114 struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1115 struct pcpu_chunk *chunk, *next;
1116 int slot, nr_to_pop, ret;
1117
1118 /*
1119 * There's no reason to keep around multiple unused chunks and VM
1120 * areas can be scarce. Destroy all free chunks except for one.
1121 */
1122 mutex_lock(&pcpu_alloc_mutex);
1123 spin_lock_irq(&pcpu_lock);
1124
1125 list_for_each_entry_safe(chunk, next, free_head, list) {
1126 WARN_ON(chunk->immutable);
1127
1128 /* spare the first one */
1129 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1130 continue;
1131
1132 list_move(&chunk->list, &to_free);
1133 }
1134
1135 spin_unlock_irq(&pcpu_lock);
1136
1137 list_for_each_entry_safe(chunk, next, &to_free, list) {
1138 int rs, re;
1139
1140 pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) {
1141 pcpu_depopulate_chunk(chunk, rs, re);
1142 spin_lock_irq(&pcpu_lock);
1143 pcpu_chunk_depopulated(chunk, rs, re);
1144 spin_unlock_irq(&pcpu_lock);
1145 }
1146 pcpu_destroy_chunk(chunk);
1147 }
1148
1149 /*
1150 * Ensure there are certain number of free populated pages for
1151 * atomic allocs. Fill up from the most packed so that atomic
1152 * allocs don't increase fragmentation. If atomic allocation
1153 * failed previously, always populate the maximum amount. This
1154 * should prevent atomic allocs larger than PAGE_SIZE from keeping
1155 * failing indefinitely; however, large atomic allocs are not
1156 * something we support properly and can be highly unreliable and
1157 * inefficient.
1158 */
1159retry_pop:
1160 if (pcpu_atomic_alloc_failed) {
1161 nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
1162 /* best effort anyway, don't worry about synchronization */
1163 pcpu_atomic_alloc_failed = false;
1164 } else {
1165 nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
1166 pcpu_nr_empty_pop_pages,
1167 0, PCPU_EMPTY_POP_PAGES_HIGH);
1168 }
1169
1170 for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
1171 int nr_unpop = 0, rs, re;
1172
1173 if (!nr_to_pop)
1174 break;
1175
1176 spin_lock_irq(&pcpu_lock);
1177 list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1178 nr_unpop = pcpu_unit_pages - chunk->nr_populated;
1179 if (nr_unpop)
1180 break;
1181 }
1182 spin_unlock_irq(&pcpu_lock);
1183
1184 if (!nr_unpop)
1185 continue;
1186
1187 /* @chunk can't go away while pcpu_alloc_mutex is held */
1188 pcpu_for_each_unpop_region(chunk, rs, re, 0, pcpu_unit_pages) {
1189 int nr = min(re - rs, nr_to_pop);
1190
1191 ret = pcpu_populate_chunk(chunk, rs, rs + nr);
1192 if (!ret) {
1193 nr_to_pop -= nr;
1194 spin_lock_irq(&pcpu_lock);
1195 pcpu_chunk_populated(chunk, rs, rs + nr);
1196 spin_unlock_irq(&pcpu_lock);
1197 } else {
1198 nr_to_pop = 0;
1199 }
1200
1201 if (!nr_to_pop)
1202 break;
1203 }
1204 }
1205
1206 if (nr_to_pop) {
1207 /* ran out of chunks to populate, create a new one and retry */
1208 chunk = pcpu_create_chunk();
1209 if (chunk) {
1210 spin_lock_irq(&pcpu_lock);
1211 pcpu_chunk_relocate(chunk, -1);
1212 spin_unlock_irq(&pcpu_lock);
1213 goto retry_pop;
1214 }
1215 }
1216
1217 mutex_unlock(&pcpu_alloc_mutex);
1218}
1219
1220/**
1221 * free_percpu - free percpu area
1222 * @ptr: pointer to area to free
1223 *
1224 * Free percpu area @ptr.
1225 *
1226 * CONTEXT:
1227 * Can be called from atomic context.
1228 */
1229void free_percpu(void __percpu *ptr)
1230{
1231 void *addr;
1232 struct pcpu_chunk *chunk;
1233 unsigned long flags;
1234 int off, occ_pages;
1235
1236 if (!ptr)
1237 return;
1238
1239 kmemleak_free_percpu(ptr);
1240
1241 addr = __pcpu_ptr_to_addr(ptr);
1242
1243 spin_lock_irqsave(&pcpu_lock, flags);
1244
1245 chunk = pcpu_chunk_addr_search(addr);
1246 off = addr - chunk->base_addr;
1247
1248 pcpu_free_area(chunk, off, &occ_pages);
1249
1250 if (chunk != pcpu_reserved_chunk)
1251 pcpu_nr_empty_pop_pages += occ_pages;
1252
1253 /* if there are more than one fully free chunks, wake up grim reaper */
1254 if (chunk->free_size == pcpu_unit_size) {
1255 struct pcpu_chunk *pos;
1256
1257 list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1258 if (pos != chunk) {
1259 pcpu_schedule_balance_work();
1260 break;
1261 }
1262 }
1263
1264 spin_unlock_irqrestore(&pcpu_lock, flags);
1265}
1266EXPORT_SYMBOL_GPL(free_percpu);
1267
1268/**
1269 * is_kernel_percpu_address - test whether address is from static percpu area
1270 * @addr: address to test
1271 *
1272 * Test whether @addr belongs to in-kernel static percpu area. Module
1273 * static percpu areas are not considered. For those, use
1274 * is_module_percpu_address().
1275 *
1276 * RETURNS:
1277 * %true if @addr is from in-kernel static percpu area, %false otherwise.
1278 */
1279bool is_kernel_percpu_address(unsigned long addr)
1280{
1281#ifdef CONFIG_SMP
1282 const size_t static_size = __per_cpu_end - __per_cpu_start;
1283 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1284 unsigned int cpu;
1285
1286 for_each_possible_cpu(cpu) {
1287 void *start = per_cpu_ptr(base, cpu);
1288
1289 if ((void *)addr >= start && (void *)addr < start + static_size)
1290 return true;
1291 }
1292#endif
1293 /* on UP, can't distinguish from other static vars, always false */
1294 return false;
1295}
1296
1297/**
1298 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
1299 * @addr: the address to be converted to physical address
1300 *
1301 * Given @addr which is dereferenceable address obtained via one of
1302 * percpu access macros, this function translates it into its physical
1303 * address. The caller is responsible for ensuring @addr stays valid
1304 * until this function finishes.
1305 *
1306 * percpu allocator has special setup for the first chunk, which currently
1307 * supports either embedding in linear address space or vmalloc mapping,
1308 * and, from the second one, the backing allocator (currently either vm or
1309 * km) provides translation.
1310 *
1311 * The addr can be translated simply without checking if it falls into the
1312 * first chunk. But the current code reflects better how percpu allocator
1313 * actually works, and the verification can discover both bugs in percpu
1314 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
1315 * code.
1316 *
1317 * RETURNS:
1318 * The physical address for @addr.
1319 */
1320phys_addr_t per_cpu_ptr_to_phys(void *addr)
1321{
1322 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1323 bool in_first_chunk = false;
1324 unsigned long first_low, first_high;
1325 unsigned int cpu;
1326
1327 /*
1328 * The following test on unit_low/high isn't strictly
1329 * necessary but will speed up lookups of addresses which
1330 * aren't in the first chunk.
1331 */
1332 first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
1333 first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
1334 pcpu_unit_pages);
1335 if ((unsigned long)addr >= first_low &&
1336 (unsigned long)addr < first_high) {
1337 for_each_possible_cpu(cpu) {
1338 void *start = per_cpu_ptr(base, cpu);
1339
1340 if (addr >= start && addr < start + pcpu_unit_size) {
1341 in_first_chunk = true;
1342 break;
1343 }
1344 }
1345 }
1346
1347 if (in_first_chunk) {
1348 if (!is_vmalloc_addr(addr))
1349 return __pa(addr);
1350 else
1351 return page_to_phys(vmalloc_to_page(addr)) +
1352 offset_in_page(addr);
1353 } else
1354 return page_to_phys(pcpu_addr_to_page(addr)) +
1355 offset_in_page(addr);
1356}
1357
1358/**
1359 * pcpu_alloc_alloc_info - allocate percpu allocation info
1360 * @nr_groups: the number of groups
1361 * @nr_units: the number of units
1362 *
1363 * Allocate ai which is large enough for @nr_groups groups containing
1364 * @nr_units units. The returned ai's groups[0].cpu_map points to the
1365 * cpu_map array which is long enough for @nr_units and filled with
1366 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
1367 * pointer of other groups.
1368 *
1369 * RETURNS:
1370 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1371 * failure.
1372 */
1373struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1374 int nr_units)
1375{
1376 struct pcpu_alloc_info *ai;
1377 size_t base_size, ai_size;
1378 void *ptr;
1379 int unit;
1380
1381 base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1382 __alignof__(ai->groups[0].cpu_map[0]));
1383 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1384
1385 ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
1386 if (!ptr)
1387 return NULL;
1388 ai = ptr;
1389 ptr += base_size;
1390
1391 ai->groups[0].cpu_map = ptr;
1392
1393 for (unit = 0; unit < nr_units; unit++)
1394 ai->groups[0].cpu_map[unit] = NR_CPUS;
1395
1396 ai->nr_groups = nr_groups;
1397 ai->__ai_size = PFN_ALIGN(ai_size);
1398
1399 return ai;
1400}
1401
1402/**
1403 * pcpu_free_alloc_info - free percpu allocation info
1404 * @ai: pcpu_alloc_info to free
1405 *
1406 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1407 */
1408void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1409{
1410 memblock_free_early(__pa(ai), ai->__ai_size);
1411}
1412
1413/**
1414 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1415 * @lvl: loglevel
1416 * @ai: allocation info to dump
1417 *
1418 * Print out information about @ai using loglevel @lvl.
1419 */
1420static void pcpu_dump_alloc_info(const char *lvl,
1421 const struct pcpu_alloc_info *ai)
1422{
1423 int group_width = 1, cpu_width = 1, width;
1424 char empty_str[] = "--------";
1425 int alloc = 0, alloc_end = 0;
1426 int group, v;
1427 int upa, apl; /* units per alloc, allocs per line */
1428
1429 v = ai->nr_groups;
1430 while (v /= 10)
1431 group_width++;
1432
1433 v = num_possible_cpus();
1434 while (v /= 10)
1435 cpu_width++;
1436 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1437
1438 upa = ai->alloc_size / ai->unit_size;
1439 width = upa * (cpu_width + 1) + group_width + 3;
1440 apl = rounddown_pow_of_two(max(60 / width, 1));
1441
1442 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1443 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1444 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1445
1446 for (group = 0; group < ai->nr_groups; group++) {
1447 const struct pcpu_group_info *gi = &ai->groups[group];
1448 int unit = 0, unit_end = 0;
1449
1450 BUG_ON(gi->nr_units % upa);
1451 for (alloc_end += gi->nr_units / upa;
1452 alloc < alloc_end; alloc++) {
1453 if (!(alloc % apl)) {
1454 pr_cont("\n");
1455 printk("%spcpu-alloc: ", lvl);
1456 }
1457 pr_cont("[%0*d] ", group_width, group);
1458
1459 for (unit_end += upa; unit < unit_end; unit++)
1460 if (gi->cpu_map[unit] != NR_CPUS)
1461 pr_cont("%0*d ",
1462 cpu_width, gi->cpu_map[unit]);
1463 else
1464 pr_cont("%s ", empty_str);
1465 }
1466 }
1467 pr_cont("\n");
1468}
1469
1470/**
1471 * pcpu_setup_first_chunk - initialize the first percpu chunk
1472 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1473 * @base_addr: mapped address
1474 *
1475 * Initialize the first percpu chunk which contains the kernel static
1476 * perpcu area. This function is to be called from arch percpu area
1477 * setup path.
1478 *
1479 * @ai contains all information necessary to initialize the first
1480 * chunk and prime the dynamic percpu allocator.
1481 *
1482 * @ai->static_size is the size of static percpu area.
1483 *
1484 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1485 * reserve after the static area in the first chunk. This reserves
1486 * the first chunk such that it's available only through reserved
1487 * percpu allocation. This is primarily used to serve module percpu
1488 * static areas on architectures where the addressing model has
1489 * limited offset range for symbol relocations to guarantee module
1490 * percpu symbols fall inside the relocatable range.
1491 *
1492 * @ai->dyn_size determines the number of bytes available for dynamic
1493 * allocation in the first chunk. The area between @ai->static_size +
1494 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1495 *
1496 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1497 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1498 * @ai->dyn_size.
1499 *
1500 * @ai->atom_size is the allocation atom size and used as alignment
1501 * for vm areas.
1502 *
1503 * @ai->alloc_size is the allocation size and always multiple of
1504 * @ai->atom_size. This is larger than @ai->atom_size if
1505 * @ai->unit_size is larger than @ai->atom_size.
1506 *
1507 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1508 * percpu areas. Units which should be colocated are put into the
1509 * same group. Dynamic VM areas will be allocated according to these
1510 * groupings. If @ai->nr_groups is zero, a single group containing
1511 * all units is assumed.
1512 *
1513 * The caller should have mapped the first chunk at @base_addr and
1514 * copied static data to each unit.
1515 *
1516 * If the first chunk ends up with both reserved and dynamic areas, it
1517 * is served by two chunks - one to serve the core static and reserved
1518 * areas and the other for the dynamic area. They share the same vm
1519 * and page map but uses different area allocation map to stay away
1520 * from each other. The latter chunk is circulated in the chunk slots
1521 * and available for dynamic allocation like any other chunks.
1522 *
1523 * RETURNS:
1524 * 0 on success, -errno on failure.
1525 */
1526int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1527 void *base_addr)
1528{
1529 static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1530 static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1531 size_t dyn_size = ai->dyn_size;
1532 size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1533 struct pcpu_chunk *schunk, *dchunk = NULL;
1534 unsigned long *group_offsets;
1535 size_t *group_sizes;
1536 unsigned long *unit_off;
1537 unsigned int cpu;
1538 int *unit_map;
1539 int group, unit, i;
1540
1541#define PCPU_SETUP_BUG_ON(cond) do { \
1542 if (unlikely(cond)) { \
1543 pr_emerg("failed to initialize, %s\n", #cond); \
1544 pr_emerg("cpu_possible_mask=%*pb\n", \
1545 cpumask_pr_args(cpu_possible_mask)); \
1546 pcpu_dump_alloc_info(KERN_EMERG, ai); \
1547 BUG(); \
1548 } \
1549} while (0)
1550
1551 /* sanity checks */
1552 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1553#ifdef CONFIG_SMP
1554 PCPU_SETUP_BUG_ON(!ai->static_size);
1555 PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
1556#endif
1557 PCPU_SETUP_BUG_ON(!base_addr);
1558 PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
1559 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1560 PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
1561 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1562 PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1563 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1564
1565 /* process group information and build config tables accordingly */
1566 group_offsets = memblock_virt_alloc(ai->nr_groups *
1567 sizeof(group_offsets[0]), 0);
1568 group_sizes = memblock_virt_alloc(ai->nr_groups *
1569 sizeof(group_sizes[0]), 0);
1570 unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
1571 unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
1572
1573 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1574 unit_map[cpu] = UINT_MAX;
1575
1576 pcpu_low_unit_cpu = NR_CPUS;
1577 pcpu_high_unit_cpu = NR_CPUS;
1578
1579 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1580 const struct pcpu_group_info *gi = &ai->groups[group];
1581
1582 group_offsets[group] = gi->base_offset;
1583 group_sizes[group] = gi->nr_units * ai->unit_size;
1584
1585 for (i = 0; i < gi->nr_units; i++) {
1586 cpu = gi->cpu_map[i];
1587 if (cpu == NR_CPUS)
1588 continue;
1589
1590 PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
1591 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1592 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1593
1594 unit_map[cpu] = unit + i;
1595 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1596
1597 /* determine low/high unit_cpu */
1598 if (pcpu_low_unit_cpu == NR_CPUS ||
1599 unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
1600 pcpu_low_unit_cpu = cpu;
1601 if (pcpu_high_unit_cpu == NR_CPUS ||
1602 unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
1603 pcpu_high_unit_cpu = cpu;
1604 }
1605 }
1606 pcpu_nr_units = unit;
1607
1608 for_each_possible_cpu(cpu)
1609 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1610
1611 /* we're done parsing the input, undefine BUG macro and dump config */
1612#undef PCPU_SETUP_BUG_ON
1613 pcpu_dump_alloc_info(KERN_DEBUG, ai);
1614
1615 pcpu_nr_groups = ai->nr_groups;
1616 pcpu_group_offsets = group_offsets;
1617 pcpu_group_sizes = group_sizes;
1618 pcpu_unit_map = unit_map;
1619 pcpu_unit_offsets = unit_off;
1620
1621 /* determine basic parameters */
1622 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1623 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1624 pcpu_atom_size = ai->atom_size;
1625 pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1626 BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1627
1628 /*
1629 * Allocate chunk slots. The additional last slot is for
1630 * empty chunks.
1631 */
1632 pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1633 pcpu_slot = memblock_virt_alloc(
1634 pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
1635 for (i = 0; i < pcpu_nr_slots; i++)
1636 INIT_LIST_HEAD(&pcpu_slot[i]);
1637
1638 /*
1639 * Initialize static chunk. If reserved_size is zero, the
1640 * static chunk covers static area + dynamic allocation area
1641 * in the first chunk. If reserved_size is not zero, it
1642 * covers static area + reserved area (mostly used for module
1643 * static percpu allocation).
1644 */
1645 schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1646 INIT_LIST_HEAD(&schunk->list);
1647 INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn);
1648 schunk->base_addr = base_addr;
1649 schunk->map = smap;
1650 schunk->map_alloc = ARRAY_SIZE(smap);
1651 schunk->immutable = true;
1652 bitmap_fill(schunk->populated, pcpu_unit_pages);
1653 schunk->nr_populated = pcpu_unit_pages;
1654
1655 if (ai->reserved_size) {
1656 schunk->free_size = ai->reserved_size;
1657 pcpu_reserved_chunk = schunk;
1658 pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1659 } else {
1660 schunk->free_size = dyn_size;
1661 dyn_size = 0; /* dynamic area covered */
1662 }
1663 schunk->contig_hint = schunk->free_size;
1664
1665 schunk->map[0] = 1;
1666 schunk->map[1] = ai->static_size;
1667 schunk->map_used = 1;
1668 if (schunk->free_size)
1669 schunk->map[++schunk->map_used] = ai->static_size + schunk->free_size;
1670 schunk->map[schunk->map_used] |= 1;
1671
1672 /* init dynamic chunk if necessary */
1673 if (dyn_size) {
1674 dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1675 INIT_LIST_HEAD(&dchunk->list);
1676 INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn);
1677 dchunk->base_addr = base_addr;
1678 dchunk->map = dmap;
1679 dchunk->map_alloc = ARRAY_SIZE(dmap);
1680 dchunk->immutable = true;
1681 bitmap_fill(dchunk->populated, pcpu_unit_pages);
1682 dchunk->nr_populated = pcpu_unit_pages;
1683
1684 dchunk->contig_hint = dchunk->free_size = dyn_size;
1685 dchunk->map[0] = 1;
1686 dchunk->map[1] = pcpu_reserved_chunk_limit;
1687 dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1;
1688 dchunk->map_used = 2;
1689 }
1690
1691 /* link the first chunk in */
1692 pcpu_first_chunk = dchunk ?: schunk;
1693 pcpu_nr_empty_pop_pages +=
1694 pcpu_count_occupied_pages(pcpu_first_chunk, 1);
1695 pcpu_chunk_relocate(pcpu_first_chunk, -1);
1696
1697 /* we're done */
1698 pcpu_base_addr = base_addr;
1699 return 0;
1700}
1701
1702#ifdef CONFIG_SMP
1703
1704const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
1705 [PCPU_FC_AUTO] = "auto",
1706 [PCPU_FC_EMBED] = "embed",
1707 [PCPU_FC_PAGE] = "page",
1708};
1709
1710enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1711
1712static int __init percpu_alloc_setup(char *str)
1713{
1714 if (!str)
1715 return -EINVAL;
1716
1717 if (0)
1718 /* nada */;
1719#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1720 else if (!strcmp(str, "embed"))
1721 pcpu_chosen_fc = PCPU_FC_EMBED;
1722#endif
1723#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1724 else if (!strcmp(str, "page"))
1725 pcpu_chosen_fc = PCPU_FC_PAGE;
1726#endif
1727 else
1728 pr_warn("unknown allocator %s specified\n", str);
1729
1730 return 0;
1731}
1732early_param("percpu_alloc", percpu_alloc_setup);
1733
1734/*
1735 * pcpu_embed_first_chunk() is used by the generic percpu setup.
1736 * Build it if needed by the arch config or the generic setup is going
1737 * to be used.
1738 */
1739#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1740 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1741#define BUILD_EMBED_FIRST_CHUNK
1742#endif
1743
1744/* build pcpu_page_first_chunk() iff needed by the arch config */
1745#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
1746#define BUILD_PAGE_FIRST_CHUNK
1747#endif
1748
1749/* pcpu_build_alloc_info() is used by both embed and page first chunk */
1750#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
1751/**
1752 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1753 * @reserved_size: the size of reserved percpu area in bytes
1754 * @dyn_size: minimum free size for dynamic allocation in bytes
1755 * @atom_size: allocation atom size
1756 * @cpu_distance_fn: callback to determine distance between cpus, optional
1757 *
1758 * This function determines grouping of units, their mappings to cpus
1759 * and other parameters considering needed percpu size, allocation
1760 * atom size and distances between CPUs.
1761 *
1762 * Groups are always multiples of atom size and CPUs which are of
1763 * LOCAL_DISTANCE both ways are grouped together and share space for
1764 * units in the same group. The returned configuration is guaranteed
1765 * to have CPUs on different nodes on different groups and >=75% usage
1766 * of allocated virtual address space.
1767 *
1768 * RETURNS:
1769 * On success, pointer to the new allocation_info is returned. On
1770 * failure, ERR_PTR value is returned.
1771 */
1772static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1773 size_t reserved_size, size_t dyn_size,
1774 size_t atom_size,
1775 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1776{
1777 static int group_map[NR_CPUS] __initdata;
1778 static int group_cnt[NR_CPUS] __initdata;
1779 const size_t static_size = __per_cpu_end - __per_cpu_start;
1780 int nr_groups = 1, nr_units = 0;
1781 size_t size_sum, min_unit_size, alloc_size;
1782 int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */
1783 int last_allocs, group, unit;
1784 unsigned int cpu, tcpu;
1785 struct pcpu_alloc_info *ai;
1786 unsigned int *cpu_map;
1787
1788 /* this function may be called multiple times */
1789 memset(group_map, 0, sizeof(group_map));
1790 memset(group_cnt, 0, sizeof(group_cnt));
1791
1792 /* calculate size_sum and ensure dyn_size is enough for early alloc */
1793 size_sum = PFN_ALIGN(static_size + reserved_size +
1794 max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1795 dyn_size = size_sum - static_size - reserved_size;
1796
1797 /*
1798 * Determine min_unit_size, alloc_size and max_upa such that
1799 * alloc_size is multiple of atom_size and is the smallest
1800 * which can accommodate 4k aligned segments which are equal to
1801 * or larger than min_unit_size.
1802 */
1803 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1804
1805 alloc_size = roundup(min_unit_size, atom_size);
1806 upa = alloc_size / min_unit_size;
1807 while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
1808 upa--;
1809 max_upa = upa;
1810
1811 /* group cpus according to their proximity */
1812 for_each_possible_cpu(cpu) {
1813 group = 0;
1814 next_group:
1815 for_each_possible_cpu(tcpu) {
1816 if (cpu == tcpu)
1817 break;
1818 if (group_map[tcpu] == group && cpu_distance_fn &&
1819 (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1820 cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1821 group++;
1822 nr_groups = max(nr_groups, group + 1);
1823 goto next_group;
1824 }
1825 }
1826 group_map[cpu] = group;
1827 group_cnt[group]++;
1828 }
1829
1830 /*
1831 * Expand unit size until address space usage goes over 75%
1832 * and then as much as possible without using more address
1833 * space.
1834 */
1835 last_allocs = INT_MAX;
1836 for (upa = max_upa; upa; upa--) {
1837 int allocs = 0, wasted = 0;
1838
1839 if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
1840 continue;
1841
1842 for (group = 0; group < nr_groups; group++) {
1843 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1844 allocs += this_allocs;
1845 wasted += this_allocs * upa - group_cnt[group];
1846 }
1847
1848 /*
1849 * Don't accept if wastage is over 1/3. The
1850 * greater-than comparison ensures upa==1 always
1851 * passes the following check.
1852 */
1853 if (wasted > num_possible_cpus() / 3)
1854 continue;
1855
1856 /* and then don't consume more memory */
1857 if (allocs > last_allocs)
1858 break;
1859 last_allocs = allocs;
1860 best_upa = upa;
1861 }
1862 upa = best_upa;
1863
1864 /* allocate and fill alloc_info */
1865 for (group = 0; group < nr_groups; group++)
1866 nr_units += roundup(group_cnt[group], upa);
1867
1868 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1869 if (!ai)
1870 return ERR_PTR(-ENOMEM);
1871 cpu_map = ai->groups[0].cpu_map;
1872
1873 for (group = 0; group < nr_groups; group++) {
1874 ai->groups[group].cpu_map = cpu_map;
1875 cpu_map += roundup(group_cnt[group], upa);
1876 }
1877
1878 ai->static_size = static_size;
1879 ai->reserved_size = reserved_size;
1880 ai->dyn_size = dyn_size;
1881 ai->unit_size = alloc_size / upa;
1882 ai->atom_size = atom_size;
1883 ai->alloc_size = alloc_size;
1884
1885 for (group = 0, unit = 0; group_cnt[group]; group++) {
1886 struct pcpu_group_info *gi = &ai->groups[group];
1887
1888 /*
1889 * Initialize base_offset as if all groups are located
1890 * back-to-back. The caller should update this to
1891 * reflect actual allocation.
1892 */
1893 gi->base_offset = unit * ai->unit_size;
1894
1895 for_each_possible_cpu(cpu)
1896 if (group_map[cpu] == group)
1897 gi->cpu_map[gi->nr_units++] = cpu;
1898 gi->nr_units = roundup(gi->nr_units, upa);
1899 unit += gi->nr_units;
1900 }
1901 BUG_ON(unit != nr_units);
1902
1903 return ai;
1904}
1905#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
1906
1907#if defined(BUILD_EMBED_FIRST_CHUNK)
1908/**
1909 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1910 * @reserved_size: the size of reserved percpu area in bytes
1911 * @dyn_size: minimum free size for dynamic allocation in bytes
1912 * @atom_size: allocation atom size
1913 * @cpu_distance_fn: callback to determine distance between cpus, optional
1914 * @alloc_fn: function to allocate percpu page
1915 * @free_fn: function to free percpu page
1916 *
1917 * This is a helper to ease setting up embedded first percpu chunk and
1918 * can be called where pcpu_setup_first_chunk() is expected.
1919 *
1920 * If this function is used to setup the first chunk, it is allocated
1921 * by calling @alloc_fn and used as-is without being mapped into
1922 * vmalloc area. Allocations are always whole multiples of @atom_size
1923 * aligned to @atom_size.
1924 *
1925 * This enables the first chunk to piggy back on the linear physical
1926 * mapping which often uses larger page size. Please note that this
1927 * can result in very sparse cpu->unit mapping on NUMA machines thus
1928 * requiring large vmalloc address space. Don't use this allocator if
1929 * vmalloc space is not orders of magnitude larger than distances
1930 * between node memory addresses (ie. 32bit NUMA machines).
1931 *
1932 * @dyn_size specifies the minimum dynamic area size.
1933 *
1934 * If the needed size is smaller than the minimum or specified unit
1935 * size, the leftover is returned using @free_fn.
1936 *
1937 * RETURNS:
1938 * 0 on success, -errno on failure.
1939 */
1940int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1941 size_t atom_size,
1942 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1943 pcpu_fc_alloc_fn_t alloc_fn,
1944 pcpu_fc_free_fn_t free_fn)
1945{
1946 void *base = (void *)ULONG_MAX;
1947 void **areas = NULL;
1948 struct pcpu_alloc_info *ai;
1949 size_t size_sum, areas_size, max_distance;
1950 int group, i, rc;
1951
1952 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1953 cpu_distance_fn);
1954 if (IS_ERR(ai))
1955 return PTR_ERR(ai);
1956
1957 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1958 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1959
1960 areas = memblock_virt_alloc_nopanic(areas_size, 0);
1961 if (!areas) {
1962 rc = -ENOMEM;
1963 goto out_free;
1964 }
1965
1966 /* allocate, copy and determine base address */
1967 for (group = 0; group < ai->nr_groups; group++) {
1968 struct pcpu_group_info *gi = &ai->groups[group];
1969 unsigned int cpu = NR_CPUS;
1970 void *ptr;
1971
1972 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1973 cpu = gi->cpu_map[i];
1974 BUG_ON(cpu == NR_CPUS);
1975
1976 /* allocate space for the whole group */
1977 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1978 if (!ptr) {
1979 rc = -ENOMEM;
1980 goto out_free_areas;
1981 }
1982 /* kmemleak tracks the percpu allocations separately */
1983 kmemleak_free(ptr);
1984 areas[group] = ptr;
1985
1986 base = min(ptr, base);
1987 }
1988
1989 /*
1990 * Copy data and free unused parts. This should happen after all
1991 * allocations are complete; otherwise, we may end up with
1992 * overlapping groups.
1993 */
1994 for (group = 0; group < ai->nr_groups; group++) {
1995 struct pcpu_group_info *gi = &ai->groups[group];
1996 void *ptr = areas[group];
1997
1998 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1999 if (gi->cpu_map[i] == NR_CPUS) {
2000 /* unused unit, free whole */
2001 free_fn(ptr, ai->unit_size);
2002 continue;
2003 }
2004 /* copy and return the unused part */
2005 memcpy(ptr, __per_cpu_load, ai->static_size);
2006 free_fn(ptr + size_sum, ai->unit_size - size_sum);
2007 }
2008 }
2009
2010 /* base address is now known, determine group base offsets */
2011 max_distance = 0;
2012 for (group = 0; group < ai->nr_groups; group++) {
2013 ai->groups[group].base_offset = areas[group] - base;
2014 max_distance = max_t(size_t, max_distance,
2015 ai->groups[group].base_offset);
2016 }
2017 max_distance += ai->unit_size;
2018
2019 /* warn if maximum distance is further than 75% of vmalloc space */
2020 if (max_distance > VMALLOC_TOTAL * 3 / 4) {
2021 pr_warn("max_distance=0x%zx too large for vmalloc space 0x%lx\n",
2022 max_distance, VMALLOC_TOTAL);
2023#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2024 /* and fail if we have fallback */
2025 rc = -EINVAL;
2026 goto out_free;
2027#endif
2028 }
2029
2030 pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
2031 PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
2032 ai->dyn_size, ai->unit_size);
2033
2034 rc = pcpu_setup_first_chunk(ai, base);
2035 goto out_free;
2036
2037out_free_areas:
2038 for (group = 0; group < ai->nr_groups; group++)
2039 if (areas[group])
2040 free_fn(areas[group],
2041 ai->groups[group].nr_units * ai->unit_size);
2042out_free:
2043 pcpu_free_alloc_info(ai);
2044 if (areas)
2045 memblock_free_early(__pa(areas), areas_size);
2046 return rc;
2047}
2048#endif /* BUILD_EMBED_FIRST_CHUNK */
2049
2050#ifdef BUILD_PAGE_FIRST_CHUNK
2051/**
2052 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2053 * @reserved_size: the size of reserved percpu area in bytes
2054 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
2055 * @free_fn: function to free percpu page, always called with PAGE_SIZE
2056 * @populate_pte_fn: function to populate pte
2057 *
2058 * This is a helper to ease setting up page-remapped first percpu
2059 * chunk and can be called where pcpu_setup_first_chunk() is expected.
2060 *
2061 * This is the basic allocator. Static percpu area is allocated
2062 * page-by-page into vmalloc area.
2063 *
2064 * RETURNS:
2065 * 0 on success, -errno on failure.
2066 */
2067int __init pcpu_page_first_chunk(size_t reserved_size,
2068 pcpu_fc_alloc_fn_t alloc_fn,
2069 pcpu_fc_free_fn_t free_fn,
2070 pcpu_fc_populate_pte_fn_t populate_pte_fn)
2071{
2072 static struct vm_struct vm;
2073 struct pcpu_alloc_info *ai;
2074 char psize_str[16];
2075 int unit_pages;
2076 size_t pages_size;
2077 struct page **pages;
2078 int unit, i, j, rc;
2079
2080 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
2081
2082 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2083 if (IS_ERR(ai))
2084 return PTR_ERR(ai);
2085 BUG_ON(ai->nr_groups != 1);
2086 BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
2087
2088 unit_pages = ai->unit_size >> PAGE_SHIFT;
2089
2090 /* unaligned allocations can't be freed, round up to page size */
2091 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2092 sizeof(pages[0]));
2093 pages = memblock_virt_alloc(pages_size, 0);
2094
2095 /* allocate pages */
2096 j = 0;
2097 for (unit = 0; unit < num_possible_cpus(); unit++)
2098 for (i = 0; i < unit_pages; i++) {
2099 unsigned int cpu = ai->groups[0].cpu_map[unit];
2100 void *ptr;
2101
2102 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
2103 if (!ptr) {
2104 pr_warn("failed to allocate %s page for cpu%u\n",
2105 psize_str, cpu);
2106 goto enomem;
2107 }
2108 /* kmemleak tracks the percpu allocations separately */
2109 kmemleak_free(ptr);
2110 pages[j++] = virt_to_page(ptr);
2111 }
2112
2113 /* allocate vm area, map the pages and copy static data */
2114 vm.flags = VM_ALLOC;
2115 vm.size = num_possible_cpus() * ai->unit_size;
2116 vm_area_register_early(&vm, PAGE_SIZE);
2117
2118 for (unit = 0; unit < num_possible_cpus(); unit++) {
2119 unsigned long unit_addr =
2120 (unsigned long)vm.addr + unit * ai->unit_size;
2121
2122 for (i = 0; i < unit_pages; i++)
2123 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
2124
2125 /* pte already populated, the following shouldn't fail */
2126 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
2127 unit_pages);
2128 if (rc < 0)
2129 panic("failed to map percpu area, err=%d\n", rc);
2130
2131 /*
2132 * FIXME: Archs with virtual cache should flush local
2133 * cache for the linear mapping here - something
2134 * equivalent to flush_cache_vmap() on the local cpu.
2135 * flush_cache_vmap() can't be used as most supporting
2136 * data structures are not set up yet.
2137 */
2138
2139 /* copy static data */
2140 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
2141 }
2142
2143 /* we're ready, commit */
2144 pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
2145 unit_pages, psize_str, vm.addr, ai->static_size,
2146 ai->reserved_size, ai->dyn_size);
2147
2148 rc = pcpu_setup_first_chunk(ai, vm.addr);
2149 goto out_free_ar;
2150
2151enomem:
2152 while (--j >= 0)
2153 free_fn(page_address(pages[j]), PAGE_SIZE);
2154 rc = -ENOMEM;
2155out_free_ar:
2156 memblock_free_early(__pa(pages), pages_size);
2157 pcpu_free_alloc_info(ai);
2158 return rc;
2159}
2160#endif /* BUILD_PAGE_FIRST_CHUNK */
2161
2162#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
2163/*
2164 * Generic SMP percpu area setup.
2165 *
2166 * The embedding helper is used because its behavior closely resembles
2167 * the original non-dynamic generic percpu area setup. This is
2168 * important because many archs have addressing restrictions and might
2169 * fail if the percpu area is located far away from the previous
2170 * location. As an added bonus, in non-NUMA cases, embedding is
2171 * generally a good idea TLB-wise because percpu area can piggy back
2172 * on the physical linear memory mapping which uses large page
2173 * mappings on applicable archs.
2174 */
2175unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
2176EXPORT_SYMBOL(__per_cpu_offset);
2177
2178static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
2179 size_t align)
2180{
2181 return memblock_virt_alloc_from_nopanic(
2182 size, align, __pa(MAX_DMA_ADDRESS));
2183}
2184
2185static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
2186{
2187 memblock_free_early(__pa(ptr), size);
2188}
2189
2190void __init setup_per_cpu_areas(void)
2191{
2192 unsigned long delta;
2193 unsigned int cpu;
2194 int rc;
2195
2196 /*
2197 * Always reserve area for module percpu variables. That's
2198 * what the legacy allocator did.
2199 */
2200 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2201 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
2202 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
2203 if (rc < 0)
2204 panic("Failed to initialize percpu areas.");
2205
2206 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2207 for_each_possible_cpu(cpu)
2208 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2209}
2210#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
2211
2212#else /* CONFIG_SMP */
2213
2214/*
2215 * UP percpu area setup.
2216 *
2217 * UP always uses km-based percpu allocator with identity mapping.
2218 * Static percpu variables are indistinguishable from the usual static
2219 * variables and don't require any special preparation.
2220 */
2221void __init setup_per_cpu_areas(void)
2222{
2223 const size_t unit_size =
2224 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
2225 PERCPU_DYNAMIC_RESERVE));
2226 struct pcpu_alloc_info *ai;
2227 void *fc;
2228
2229 ai = pcpu_alloc_alloc_info(1, 1);
2230 fc = memblock_virt_alloc_from_nopanic(unit_size,
2231 PAGE_SIZE,
2232 __pa(MAX_DMA_ADDRESS));
2233 if (!ai || !fc)
2234 panic("Failed to allocate memory for percpu areas.");
2235 /* kmemleak tracks the percpu allocations separately */
2236 kmemleak_free(fc);
2237
2238 ai->dyn_size = unit_size;
2239 ai->unit_size = unit_size;
2240 ai->atom_size = unit_size;
2241 ai->alloc_size = unit_size;
2242 ai->groups[0].nr_units = 1;
2243 ai->groups[0].cpu_map[0] = 0;
2244
2245 if (pcpu_setup_first_chunk(ai, fc) < 0)
2246 panic("Failed to initialize percpu areas.");
2247}
2248
2249#endif /* CONFIG_SMP */
2250
2251/*
2252 * First and reserved chunks are initialized with temporary allocation
2253 * map in initdata so that they can be used before slab is online.
2254 * This function is called after slab is brought up and replaces those
2255 * with properly allocated maps.
2256 */
2257void __init percpu_init_late(void)
2258{
2259 struct pcpu_chunk *target_chunks[] =
2260 { pcpu_first_chunk, pcpu_reserved_chunk, NULL };
2261 struct pcpu_chunk *chunk;
2262 unsigned long flags;
2263 int i;
2264
2265 for (i = 0; (chunk = target_chunks[i]); i++) {
2266 int *map;
2267 const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
2268
2269 BUILD_BUG_ON(size > PAGE_SIZE);
2270
2271 map = pcpu_mem_zalloc(size);
2272 BUG_ON(!map);
2273
2274 spin_lock_irqsave(&pcpu_lock, flags);
2275 memcpy(map, chunk->map, size);
2276 chunk->map = map;
2277 spin_unlock_irqrestore(&pcpu_lock, flags);
2278 }
2279}
2280
2281/*
2282 * Percpu allocator is initialized early during boot when neither slab or
2283 * workqueue is available. Plug async management until everything is up
2284 * and running.
2285 */
2286static int __init percpu_enable_async(void)
2287{
2288 pcpu_async_enabled = true;
2289 return 0;
2290}
2291subsys_initcall(percpu_enable_async);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm/percpu.c - percpu memory allocator
4 *
5 * Copyright (C) 2009 SUSE Linux Products GmbH
6 * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
7 *
8 * Copyright (C) 2017 Facebook Inc.
9 * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org>
10 *
11 * The percpu allocator handles both static and dynamic areas. Percpu
12 * areas are allocated in chunks which are divided into units. There is
13 * a 1-to-1 mapping for units to possible cpus. These units are grouped
14 * based on NUMA properties of the machine.
15 *
16 * c0 c1 c2
17 * ------------------- ------------------- ------------
18 * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
19 * ------------------- ...... ------------------- .... ------------
20 *
21 * Allocation is done by offsets into a unit's address space. Ie., an
22 * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
23 * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear
24 * and even sparse. Access is handled by configuring percpu base
25 * registers according to the cpu to unit mappings and offsetting the
26 * base address using pcpu_unit_size.
27 *
28 * There is special consideration for the first chunk which must handle
29 * the static percpu variables in the kernel image as allocation services
30 * are not online yet. In short, the first chunk is structured like so:
31 *
32 * <Static | [Reserved] | Dynamic>
33 *
34 * The static data is copied from the original section managed by the
35 * linker. The reserved section, if non-zero, primarily manages static
36 * percpu variables from kernel modules. Finally, the dynamic section
37 * takes care of normal allocations.
38 *
39 * The allocator organizes chunks into lists according to free size and
40 * memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT
41 * flag should be passed. All memcg-aware allocations are sharing one set
42 * of chunks and all unaccounted allocations and allocations performed
43 * by processes belonging to the root memory cgroup are using the second set.
44 *
45 * The allocator tries to allocate from the fullest chunk first. Each chunk
46 * is managed by a bitmap with metadata blocks. The allocation map is updated
47 * on every allocation and free to reflect the current state while the boundary
48 * map is only updated on allocation. Each metadata block contains
49 * information to help mitigate the need to iterate over large portions
50 * of the bitmap. The reverse mapping from page to chunk is stored in
51 * the page's index. Lastly, units are lazily backed and grow in unison.
52 *
53 * There is a unique conversion that goes on here between bytes and bits.
54 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk
55 * tracks the number of pages it is responsible for in nr_pages. Helper
56 * functions are used to convert from between the bytes, bits, and blocks.
57 * All hints are managed in bits unless explicitly stated.
58 *
59 * To use this allocator, arch code should do the following:
60 *
61 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
62 * regular address to percpu pointer and back if they need to be
63 * different from the default
64 *
65 * - use pcpu_setup_first_chunk() during percpu area initialization to
66 * setup the first chunk containing the kernel static percpu area
67 */
68
69#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
70
71#include <linux/bitmap.h>
72#include <linux/cpumask.h>
73#include <linux/memblock.h>
74#include <linux/err.h>
75#include <linux/lcm.h>
76#include <linux/list.h>
77#include <linux/log2.h>
78#include <linux/mm.h>
79#include <linux/module.h>
80#include <linux/mutex.h>
81#include <linux/percpu.h>
82#include <linux/pfn.h>
83#include <linux/slab.h>
84#include <linux/spinlock.h>
85#include <linux/vmalloc.h>
86#include <linux/workqueue.h>
87#include <linux/kmemleak.h>
88#include <linux/sched.h>
89#include <linux/sched/mm.h>
90#include <linux/memcontrol.h>
91
92#include <asm/cacheflush.h>
93#include <asm/sections.h>
94#include <asm/tlbflush.h>
95#include <asm/io.h>
96
97#define CREATE_TRACE_POINTS
98#include <trace/events/percpu.h>
99
100#include "percpu-internal.h"
101
102/*
103 * The slots are sorted by the size of the biggest continuous free area.
104 * 1-31 bytes share the same slot.
105 */
106#define PCPU_SLOT_BASE_SHIFT 5
107/* chunks in slots below this are subject to being sidelined on failed alloc */
108#define PCPU_SLOT_FAIL_THRESHOLD 3
109
110#define PCPU_EMPTY_POP_PAGES_LOW 2
111#define PCPU_EMPTY_POP_PAGES_HIGH 4
112
113#ifdef CONFIG_SMP
114/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
115#ifndef __addr_to_pcpu_ptr
116#define __addr_to_pcpu_ptr(addr) \
117 (void __percpu *)((unsigned long)(addr) - \
118 (unsigned long)pcpu_base_addr + \
119 (unsigned long)__per_cpu_start)
120#endif
121#ifndef __pcpu_ptr_to_addr
122#define __pcpu_ptr_to_addr(ptr) \
123 (void __force *)((unsigned long)(ptr) + \
124 (unsigned long)pcpu_base_addr - \
125 (unsigned long)__per_cpu_start)
126#endif
127#else /* CONFIG_SMP */
128/* on UP, it's always identity mapped */
129#define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
130#define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
131#endif /* CONFIG_SMP */
132
133static int pcpu_unit_pages __ro_after_init;
134static int pcpu_unit_size __ro_after_init;
135static int pcpu_nr_units __ro_after_init;
136static int pcpu_atom_size __ro_after_init;
137int pcpu_nr_slots __ro_after_init;
138static int pcpu_free_slot __ro_after_init;
139int pcpu_sidelined_slot __ro_after_init;
140int pcpu_to_depopulate_slot __ro_after_init;
141static size_t pcpu_chunk_struct_size __ro_after_init;
142
143/* cpus with the lowest and highest unit addresses */
144static unsigned int pcpu_low_unit_cpu __ro_after_init;
145static unsigned int pcpu_high_unit_cpu __ro_after_init;
146
147/* the address of the first chunk which starts with the kernel static area */
148void *pcpu_base_addr __ro_after_init;
149EXPORT_SYMBOL_GPL(pcpu_base_addr);
150
151static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */
152const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */
153
154/* group information, used for vm allocation */
155static int pcpu_nr_groups __ro_after_init;
156static const unsigned long *pcpu_group_offsets __ro_after_init;
157static const size_t *pcpu_group_sizes __ro_after_init;
158
159/*
160 * The first chunk which always exists. Note that unlike other
161 * chunks, this one can be allocated and mapped in several different
162 * ways and thus often doesn't live in the vmalloc area.
163 */
164struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
165
166/*
167 * Optional reserved chunk. This chunk reserves part of the first
168 * chunk and serves it for reserved allocations. When the reserved
169 * region doesn't exist, the following variable is NULL.
170 */
171struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
172
173DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
174static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
175
176struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
177
178/* chunks which need their map areas extended, protected by pcpu_lock */
179static LIST_HEAD(pcpu_map_extend_chunks);
180
181/*
182 * The number of empty populated pages, protected by pcpu_lock.
183 * The reserved chunk doesn't contribute to the count.
184 */
185int pcpu_nr_empty_pop_pages;
186
187/*
188 * The number of populated pages in use by the allocator, protected by
189 * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets
190 * allocated/deallocated, it is allocated/deallocated in all units of a chunk
191 * and increments/decrements this count by 1).
192 */
193static unsigned long pcpu_nr_populated;
194
195/*
196 * Balance work is used to populate or destroy chunks asynchronously. We
197 * try to keep the number of populated free pages between
198 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
199 * empty chunk.
200 */
201static void pcpu_balance_workfn(struct work_struct *work);
202static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
203static bool pcpu_async_enabled __read_mostly;
204static bool pcpu_atomic_alloc_failed;
205
206static void pcpu_schedule_balance_work(void)
207{
208 if (pcpu_async_enabled)
209 schedule_work(&pcpu_balance_work);
210}
211
212/**
213 * pcpu_addr_in_chunk - check if the address is served from this chunk
214 * @chunk: chunk of interest
215 * @addr: percpu address
216 *
217 * RETURNS:
218 * True if the address is served from this chunk.
219 */
220static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
221{
222 void *start_addr, *end_addr;
223
224 if (!chunk)
225 return false;
226
227 start_addr = chunk->base_addr + chunk->start_offset;
228 end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
229 chunk->end_offset;
230
231 return addr >= start_addr && addr < end_addr;
232}
233
234static int __pcpu_size_to_slot(int size)
235{
236 int highbit = fls(size); /* size is in bytes */
237 return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
238}
239
240static int pcpu_size_to_slot(int size)
241{
242 if (size == pcpu_unit_size)
243 return pcpu_free_slot;
244 return __pcpu_size_to_slot(size);
245}
246
247static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
248{
249 const struct pcpu_block_md *chunk_md = &chunk->chunk_md;
250
251 if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE ||
252 chunk_md->contig_hint == 0)
253 return 0;
254
255 return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);
256}
257
258/* set the pointer to a chunk in a page struct */
259static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
260{
261 page->index = (unsigned long)pcpu;
262}
263
264/* obtain pointer to a chunk from a page struct */
265static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
266{
267 return (struct pcpu_chunk *)page->index;
268}
269
270static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
271{
272 return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
273}
274
275static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
276{
277 return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
278}
279
280static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
281 unsigned int cpu, int page_idx)
282{
283 return (unsigned long)chunk->base_addr +
284 pcpu_unit_page_offset(cpu, page_idx);
285}
286
287/*
288 * The following are helper functions to help access bitmaps and convert
289 * between bitmap offsets to address offsets.
290 */
291static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
292{
293 return chunk->alloc_map +
294 (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
295}
296
297static unsigned long pcpu_off_to_block_index(int off)
298{
299 return off / PCPU_BITMAP_BLOCK_BITS;
300}
301
302static unsigned long pcpu_off_to_block_off(int off)
303{
304 return off & (PCPU_BITMAP_BLOCK_BITS - 1);
305}
306
307static unsigned long pcpu_block_off_to_off(int index, int off)
308{
309 return index * PCPU_BITMAP_BLOCK_BITS + off;
310}
311
312/**
313 * pcpu_check_block_hint - check against the contig hint
314 * @block: block of interest
315 * @bits: size of allocation
316 * @align: alignment of area (max PAGE_SIZE)
317 *
318 * Check to see if the allocation can fit in the block's contig hint.
319 * Note, a chunk uses the same hints as a block so this can also check against
320 * the chunk's contig hint.
321 */
322static bool pcpu_check_block_hint(struct pcpu_block_md *block, int bits,
323 size_t align)
324{
325 int bit_off = ALIGN(block->contig_hint_start, align) -
326 block->contig_hint_start;
327
328 return bit_off + bits <= block->contig_hint;
329}
330
331/*
332 * pcpu_next_hint - determine which hint to use
333 * @block: block of interest
334 * @alloc_bits: size of allocation
335 *
336 * This determines if we should scan based on the scan_hint or first_free.
337 * In general, we want to scan from first_free to fulfill allocations by
338 * first fit. However, if we know a scan_hint at position scan_hint_start
339 * cannot fulfill an allocation, we can begin scanning from there knowing
340 * the contig_hint will be our fallback.
341 */
342static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
343{
344 /*
345 * The three conditions below determine if we can skip past the
346 * scan_hint. First, does the scan hint exist. Second, is the
347 * contig_hint after the scan_hint (possibly not true iff
348 * contig_hint == scan_hint). Third, is the allocation request
349 * larger than the scan_hint.
350 */
351 if (block->scan_hint &&
352 block->contig_hint_start > block->scan_hint_start &&
353 alloc_bits > block->scan_hint)
354 return block->scan_hint_start + block->scan_hint;
355
356 return block->first_free;
357}
358
359/**
360 * pcpu_next_md_free_region - finds the next hint free area
361 * @chunk: chunk of interest
362 * @bit_off: chunk offset
363 * @bits: size of free area
364 *
365 * Helper function for pcpu_for_each_md_free_region. It checks
366 * block->contig_hint and performs aggregation across blocks to find the
367 * next hint. It modifies bit_off and bits in-place to be consumed in the
368 * loop.
369 */
370static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
371 int *bits)
372{
373 int i = pcpu_off_to_block_index(*bit_off);
374 int block_off = pcpu_off_to_block_off(*bit_off);
375 struct pcpu_block_md *block;
376
377 *bits = 0;
378 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
379 block++, i++) {
380 /* handles contig area across blocks */
381 if (*bits) {
382 *bits += block->left_free;
383 if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
384 continue;
385 return;
386 }
387
388 /*
389 * This checks three things. First is there a contig_hint to
390 * check. Second, have we checked this hint before by
391 * comparing the block_off. Third, is this the same as the
392 * right contig hint. In the last case, it spills over into
393 * the next block and should be handled by the contig area
394 * across blocks code.
395 */
396 *bits = block->contig_hint;
397 if (*bits && block->contig_hint_start >= block_off &&
398 *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
399 *bit_off = pcpu_block_off_to_off(i,
400 block->contig_hint_start);
401 return;
402 }
403 /* reset to satisfy the second predicate above */
404 block_off = 0;
405
406 *bits = block->right_free;
407 *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
408 }
409}
410
411/**
412 * pcpu_next_fit_region - finds fit areas for a given allocation request
413 * @chunk: chunk of interest
414 * @alloc_bits: size of allocation
415 * @align: alignment of area (max PAGE_SIZE)
416 * @bit_off: chunk offset
417 * @bits: size of free area
418 *
419 * Finds the next free region that is viable for use with a given size and
420 * alignment. This only returns if there is a valid area to be used for this
421 * allocation. block->first_free is returned if the allocation request fits
422 * within the block to see if the request can be fulfilled prior to the contig
423 * hint.
424 */
425static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
426 int align, int *bit_off, int *bits)
427{
428 int i = pcpu_off_to_block_index(*bit_off);
429 int block_off = pcpu_off_to_block_off(*bit_off);
430 struct pcpu_block_md *block;
431
432 *bits = 0;
433 for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
434 block++, i++) {
435 /* handles contig area across blocks */
436 if (*bits) {
437 *bits += block->left_free;
438 if (*bits >= alloc_bits)
439 return;
440 if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
441 continue;
442 }
443
444 /* check block->contig_hint */
445 *bits = ALIGN(block->contig_hint_start, align) -
446 block->contig_hint_start;
447 /*
448 * This uses the block offset to determine if this has been
449 * checked in the prior iteration.
450 */
451 if (block->contig_hint &&
452 block->contig_hint_start >= block_off &&
453 block->contig_hint >= *bits + alloc_bits) {
454 int start = pcpu_next_hint(block, alloc_bits);
455
456 *bits += alloc_bits + block->contig_hint_start -
457 start;
458 *bit_off = pcpu_block_off_to_off(i, start);
459 return;
460 }
461 /* reset to satisfy the second predicate above */
462 block_off = 0;
463
464 *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
465 align);
466 *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
467 *bit_off = pcpu_block_off_to_off(i, *bit_off);
468 if (*bits >= alloc_bits)
469 return;
470 }
471
472 /* no valid offsets were found - fail condition */
473 *bit_off = pcpu_chunk_map_bits(chunk);
474}
475
476/*
477 * Metadata free area iterators. These perform aggregation of free areas
478 * based on the metadata blocks and return the offset @bit_off and size in
479 * bits of the free area @bits. pcpu_for_each_fit_region only returns when
480 * a fit is found for the allocation request.
481 */
482#define pcpu_for_each_md_free_region(chunk, bit_off, bits) \
483 for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \
484 (bit_off) < pcpu_chunk_map_bits((chunk)); \
485 (bit_off) += (bits) + 1, \
486 pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
487
488#define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \
489 for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
490 &(bits)); \
491 (bit_off) < pcpu_chunk_map_bits((chunk)); \
492 (bit_off) += (bits), \
493 pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
494 &(bits)))
495
496/**
497 * pcpu_mem_zalloc - allocate memory
498 * @size: bytes to allocate
499 * @gfp: allocation flags
500 *
501 * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
502 * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
503 * This is to facilitate passing through whitelisted flags. The
504 * returned memory is always zeroed.
505 *
506 * RETURNS:
507 * Pointer to the allocated area on success, NULL on failure.
508 */
509static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
510{
511 if (WARN_ON_ONCE(!slab_is_available()))
512 return NULL;
513
514 if (size <= PAGE_SIZE)
515 return kzalloc(size, gfp);
516 else
517 return __vmalloc(size, gfp | __GFP_ZERO);
518}
519
520/**
521 * pcpu_mem_free - free memory
522 * @ptr: memory to free
523 *
524 * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
525 */
526static void pcpu_mem_free(void *ptr)
527{
528 kvfree(ptr);
529}
530
531static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
532 bool move_front)
533{
534 if (chunk != pcpu_reserved_chunk) {
535 if (move_front)
536 list_move(&chunk->list, &pcpu_chunk_lists[slot]);
537 else
538 list_move_tail(&chunk->list, &pcpu_chunk_lists[slot]);
539 }
540}
541
542static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
543{
544 __pcpu_chunk_move(chunk, slot, true);
545}
546
547/**
548 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
549 * @chunk: chunk of interest
550 * @oslot: the previous slot it was on
551 *
552 * This function is called after an allocation or free changed @chunk.
553 * New slot according to the changed state is determined and @chunk is
554 * moved to the slot. Note that the reserved chunk is never put on
555 * chunk slots.
556 *
557 * CONTEXT:
558 * pcpu_lock.
559 */
560static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
561{
562 int nslot = pcpu_chunk_slot(chunk);
563
564 /* leave isolated chunks in-place */
565 if (chunk->isolated)
566 return;
567
568 if (oslot != nslot)
569 __pcpu_chunk_move(chunk, nslot, oslot < nslot);
570}
571
572static void pcpu_isolate_chunk(struct pcpu_chunk *chunk)
573{
574 lockdep_assert_held(&pcpu_lock);
575
576 if (!chunk->isolated) {
577 chunk->isolated = true;
578 pcpu_nr_empty_pop_pages -= chunk->nr_empty_pop_pages;
579 }
580 list_move(&chunk->list, &pcpu_chunk_lists[pcpu_to_depopulate_slot]);
581}
582
583static void pcpu_reintegrate_chunk(struct pcpu_chunk *chunk)
584{
585 lockdep_assert_held(&pcpu_lock);
586
587 if (chunk->isolated) {
588 chunk->isolated = false;
589 pcpu_nr_empty_pop_pages += chunk->nr_empty_pop_pages;
590 pcpu_chunk_relocate(chunk, -1);
591 }
592}
593
594/*
595 * pcpu_update_empty_pages - update empty page counters
596 * @chunk: chunk of interest
597 * @nr: nr of empty pages
598 *
599 * This is used to keep track of the empty pages now based on the premise
600 * a md_block covers a page. The hint update functions recognize if a block
601 * is made full or broken to calculate deltas for keeping track of free pages.
602 */
603static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
604{
605 chunk->nr_empty_pop_pages += nr;
606 if (chunk != pcpu_reserved_chunk && !chunk->isolated)
607 pcpu_nr_empty_pop_pages += nr;
608}
609
610/*
611 * pcpu_region_overlap - determines if two regions overlap
612 * @a: start of first region, inclusive
613 * @b: end of first region, exclusive
614 * @x: start of second region, inclusive
615 * @y: end of second region, exclusive
616 *
617 * This is used to determine if the hint region [a, b) overlaps with the
618 * allocated region [x, y).
619 */
620static inline bool pcpu_region_overlap(int a, int b, int x, int y)
621{
622 return (a < y) && (x < b);
623}
624
625/**
626 * pcpu_block_update - updates a block given a free area
627 * @block: block of interest
628 * @start: start offset in block
629 * @end: end offset in block
630 *
631 * Updates a block given a known free area. The region [start, end) is
632 * expected to be the entirety of the free area within a block. Chooses
633 * the best starting offset if the contig hints are equal.
634 */
635static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
636{
637 int contig = end - start;
638
639 block->first_free = min(block->first_free, start);
640 if (start == 0)
641 block->left_free = contig;
642
643 if (end == block->nr_bits)
644 block->right_free = contig;
645
646 if (contig > block->contig_hint) {
647 /* promote the old contig_hint to be the new scan_hint */
648 if (start > block->contig_hint_start) {
649 if (block->contig_hint > block->scan_hint) {
650 block->scan_hint_start =
651 block->contig_hint_start;
652 block->scan_hint = block->contig_hint;
653 } else if (start < block->scan_hint_start) {
654 /*
655 * The old contig_hint == scan_hint. But, the
656 * new contig is larger so hold the invariant
657 * scan_hint_start < contig_hint_start.
658 */
659 block->scan_hint = 0;
660 }
661 } else {
662 block->scan_hint = 0;
663 }
664 block->contig_hint_start = start;
665 block->contig_hint = contig;
666 } else if (contig == block->contig_hint) {
667 if (block->contig_hint_start &&
668 (!start ||
669 __ffs(start) > __ffs(block->contig_hint_start))) {
670 /* start has a better alignment so use it */
671 block->contig_hint_start = start;
672 if (start < block->scan_hint_start &&
673 block->contig_hint > block->scan_hint)
674 block->scan_hint = 0;
675 } else if (start > block->scan_hint_start ||
676 block->contig_hint > block->scan_hint) {
677 /*
678 * Knowing contig == contig_hint, update the scan_hint
679 * if it is farther than or larger than the current
680 * scan_hint.
681 */
682 block->scan_hint_start = start;
683 block->scan_hint = contig;
684 }
685 } else {
686 /*
687 * The region is smaller than the contig_hint. So only update
688 * the scan_hint if it is larger than or equal and farther than
689 * the current scan_hint.
690 */
691 if ((start < block->contig_hint_start &&
692 (contig > block->scan_hint ||
693 (contig == block->scan_hint &&
694 start > block->scan_hint_start)))) {
695 block->scan_hint_start = start;
696 block->scan_hint = contig;
697 }
698 }
699}
700
701/*
702 * pcpu_block_update_scan - update a block given a free area from a scan
703 * @chunk: chunk of interest
704 * @bit_off: chunk offset
705 * @bits: size of free area
706 *
707 * Finding the final allocation spot first goes through pcpu_find_block_fit()
708 * to find a block that can hold the allocation and then pcpu_alloc_area()
709 * where a scan is used. When allocations require specific alignments,
710 * we can inadvertently create holes which will not be seen in the alloc
711 * or free paths.
712 *
713 * This takes a given free area hole and updates a block as it may change the
714 * scan_hint. We need to scan backwards to ensure we don't miss free bits
715 * from alignment.
716 */
717static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off,
718 int bits)
719{
720 int s_off = pcpu_off_to_block_off(bit_off);
721 int e_off = s_off + bits;
722 int s_index, l_bit;
723 struct pcpu_block_md *block;
724
725 if (e_off > PCPU_BITMAP_BLOCK_BITS)
726 return;
727
728 s_index = pcpu_off_to_block_index(bit_off);
729 block = chunk->md_blocks + s_index;
730
731 /* scan backwards in case of alignment skipping free bits */
732 l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off);
733 s_off = (s_off == l_bit) ? 0 : l_bit + 1;
734
735 pcpu_block_update(block, s_off, e_off);
736}
737
738/**
739 * pcpu_chunk_refresh_hint - updates metadata about a chunk
740 * @chunk: chunk of interest
741 * @full_scan: if we should scan from the beginning
742 *
743 * Iterates over the metadata blocks to find the largest contig area.
744 * A full scan can be avoided on the allocation path as this is triggered
745 * if we broke the contig_hint. In doing so, the scan_hint will be before
746 * the contig_hint or after if the scan_hint == contig_hint. This cannot
747 * be prevented on freeing as we want to find the largest area possibly
748 * spanning blocks.
749 */
750static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
751{
752 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
753 int bit_off, bits;
754
755 /* promote scan_hint to contig_hint */
756 if (!full_scan && chunk_md->scan_hint) {
757 bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint;
758 chunk_md->contig_hint_start = chunk_md->scan_hint_start;
759 chunk_md->contig_hint = chunk_md->scan_hint;
760 chunk_md->scan_hint = 0;
761 } else {
762 bit_off = chunk_md->first_free;
763 chunk_md->contig_hint = 0;
764 }
765
766 bits = 0;
767 pcpu_for_each_md_free_region(chunk, bit_off, bits)
768 pcpu_block_update(chunk_md, bit_off, bit_off + bits);
769}
770
771/**
772 * pcpu_block_refresh_hint
773 * @chunk: chunk of interest
774 * @index: index of the metadata block
775 *
776 * Scans over the block beginning at first_free and updates the block
777 * metadata accordingly.
778 */
779static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
780{
781 struct pcpu_block_md *block = chunk->md_blocks + index;
782 unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
783 unsigned int rs, re, start; /* region start, region end */
784
785 /* promote scan_hint to contig_hint */
786 if (block->scan_hint) {
787 start = block->scan_hint_start + block->scan_hint;
788 block->contig_hint_start = block->scan_hint_start;
789 block->contig_hint = block->scan_hint;
790 block->scan_hint = 0;
791 } else {
792 start = block->first_free;
793 block->contig_hint = 0;
794 }
795
796 block->right_free = 0;
797
798 /* iterate over free areas and update the contig hints */
799 bitmap_for_each_clear_region(alloc_map, rs, re, start,
800 PCPU_BITMAP_BLOCK_BITS)
801 pcpu_block_update(block, rs, re);
802}
803
804/**
805 * pcpu_block_update_hint_alloc - update hint on allocation path
806 * @chunk: chunk of interest
807 * @bit_off: chunk offset
808 * @bits: size of request
809 *
810 * Updates metadata for the allocation path. The metadata only has to be
811 * refreshed by a full scan iff the chunk's contig hint is broken. Block level
812 * scans are required if the block's contig hint is broken.
813 */
814static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
815 int bits)
816{
817 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
818 int nr_empty_pages = 0;
819 struct pcpu_block_md *s_block, *e_block, *block;
820 int s_index, e_index; /* block indexes of the freed allocation */
821 int s_off, e_off; /* block offsets of the freed allocation */
822
823 /*
824 * Calculate per block offsets.
825 * The calculation uses an inclusive range, but the resulting offsets
826 * are [start, end). e_index always points to the last block in the
827 * range.
828 */
829 s_index = pcpu_off_to_block_index(bit_off);
830 e_index = pcpu_off_to_block_index(bit_off + bits - 1);
831 s_off = pcpu_off_to_block_off(bit_off);
832 e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
833
834 s_block = chunk->md_blocks + s_index;
835 e_block = chunk->md_blocks + e_index;
836
837 /*
838 * Update s_block.
839 * block->first_free must be updated if the allocation takes its place.
840 * If the allocation breaks the contig_hint, a scan is required to
841 * restore this hint.
842 */
843 if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
844 nr_empty_pages++;
845
846 if (s_off == s_block->first_free)
847 s_block->first_free = find_next_zero_bit(
848 pcpu_index_alloc_map(chunk, s_index),
849 PCPU_BITMAP_BLOCK_BITS,
850 s_off + bits);
851
852 if (pcpu_region_overlap(s_block->scan_hint_start,
853 s_block->scan_hint_start + s_block->scan_hint,
854 s_off,
855 s_off + bits))
856 s_block->scan_hint = 0;
857
858 if (pcpu_region_overlap(s_block->contig_hint_start,
859 s_block->contig_hint_start +
860 s_block->contig_hint,
861 s_off,
862 s_off + bits)) {
863 /* block contig hint is broken - scan to fix it */
864 if (!s_off)
865 s_block->left_free = 0;
866 pcpu_block_refresh_hint(chunk, s_index);
867 } else {
868 /* update left and right contig manually */
869 s_block->left_free = min(s_block->left_free, s_off);
870 if (s_index == e_index)
871 s_block->right_free = min_t(int, s_block->right_free,
872 PCPU_BITMAP_BLOCK_BITS - e_off);
873 else
874 s_block->right_free = 0;
875 }
876
877 /*
878 * Update e_block.
879 */
880 if (s_index != e_index) {
881 if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
882 nr_empty_pages++;
883
884 /*
885 * When the allocation is across blocks, the end is along
886 * the left part of the e_block.
887 */
888 e_block->first_free = find_next_zero_bit(
889 pcpu_index_alloc_map(chunk, e_index),
890 PCPU_BITMAP_BLOCK_BITS, e_off);
891
892 if (e_off == PCPU_BITMAP_BLOCK_BITS) {
893 /* reset the block */
894 e_block++;
895 } else {
896 if (e_off > e_block->scan_hint_start)
897 e_block->scan_hint = 0;
898
899 e_block->left_free = 0;
900 if (e_off > e_block->contig_hint_start) {
901 /* contig hint is broken - scan to fix it */
902 pcpu_block_refresh_hint(chunk, e_index);
903 } else {
904 e_block->right_free =
905 min_t(int, e_block->right_free,
906 PCPU_BITMAP_BLOCK_BITS - e_off);
907 }
908 }
909
910 /* update in-between md_blocks */
911 nr_empty_pages += (e_index - s_index - 1);
912 for (block = s_block + 1; block < e_block; block++) {
913 block->scan_hint = 0;
914 block->contig_hint = 0;
915 block->left_free = 0;
916 block->right_free = 0;
917 }
918 }
919
920 if (nr_empty_pages)
921 pcpu_update_empty_pages(chunk, -nr_empty_pages);
922
923 if (pcpu_region_overlap(chunk_md->scan_hint_start,
924 chunk_md->scan_hint_start +
925 chunk_md->scan_hint,
926 bit_off,
927 bit_off + bits))
928 chunk_md->scan_hint = 0;
929
930 /*
931 * The only time a full chunk scan is required is if the chunk
932 * contig hint is broken. Otherwise, it means a smaller space
933 * was used and therefore the chunk contig hint is still correct.
934 */
935 if (pcpu_region_overlap(chunk_md->contig_hint_start,
936 chunk_md->contig_hint_start +
937 chunk_md->contig_hint,
938 bit_off,
939 bit_off + bits))
940 pcpu_chunk_refresh_hint(chunk, false);
941}
942
943/**
944 * pcpu_block_update_hint_free - updates the block hints on the free path
945 * @chunk: chunk of interest
946 * @bit_off: chunk offset
947 * @bits: size of request
948 *
949 * Updates metadata for the allocation path. This avoids a blind block
950 * refresh by making use of the block contig hints. If this fails, it scans
951 * forward and backward to determine the extent of the free area. This is
952 * capped at the boundary of blocks.
953 *
954 * A chunk update is triggered if a page becomes free, a block becomes free,
955 * or the free spans across blocks. This tradeoff is to minimize iterating
956 * over the block metadata to update chunk_md->contig_hint.
957 * chunk_md->contig_hint may be off by up to a page, but it will never be more
958 * than the available space. If the contig hint is contained in one block, it
959 * will be accurate.
960 */
961static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
962 int bits)
963{
964 int nr_empty_pages = 0;
965 struct pcpu_block_md *s_block, *e_block, *block;
966 int s_index, e_index; /* block indexes of the freed allocation */
967 int s_off, e_off; /* block offsets of the freed allocation */
968 int start, end; /* start and end of the whole free area */
969
970 /*
971 * Calculate per block offsets.
972 * The calculation uses an inclusive range, but the resulting offsets
973 * are [start, end). e_index always points to the last block in the
974 * range.
975 */
976 s_index = pcpu_off_to_block_index(bit_off);
977 e_index = pcpu_off_to_block_index(bit_off + bits - 1);
978 s_off = pcpu_off_to_block_off(bit_off);
979 e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
980
981 s_block = chunk->md_blocks + s_index;
982 e_block = chunk->md_blocks + e_index;
983
984 /*
985 * Check if the freed area aligns with the block->contig_hint.
986 * If it does, then the scan to find the beginning/end of the
987 * larger free area can be avoided.
988 *
989 * start and end refer to beginning and end of the free area
990 * within each their respective blocks. This is not necessarily
991 * the entire free area as it may span blocks past the beginning
992 * or end of the block.
993 */
994 start = s_off;
995 if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
996 start = s_block->contig_hint_start;
997 } else {
998 /*
999 * Scan backwards to find the extent of the free area.
1000 * find_last_bit returns the starting bit, so if the start bit
1001 * is returned, that means there was no last bit and the
1002 * remainder of the chunk is free.
1003 */
1004 int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
1005 start);
1006 start = (start == l_bit) ? 0 : l_bit + 1;
1007 }
1008
1009 end = e_off;
1010 if (e_off == e_block->contig_hint_start)
1011 end = e_block->contig_hint_start + e_block->contig_hint;
1012 else
1013 end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
1014 PCPU_BITMAP_BLOCK_BITS, end);
1015
1016 /* update s_block */
1017 e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
1018 if (!start && e_off == PCPU_BITMAP_BLOCK_BITS)
1019 nr_empty_pages++;
1020 pcpu_block_update(s_block, start, e_off);
1021
1022 /* freeing in the same block */
1023 if (s_index != e_index) {
1024 /* update e_block */
1025 if (end == PCPU_BITMAP_BLOCK_BITS)
1026 nr_empty_pages++;
1027 pcpu_block_update(e_block, 0, end);
1028
1029 /* reset md_blocks in the middle */
1030 nr_empty_pages += (e_index - s_index - 1);
1031 for (block = s_block + 1; block < e_block; block++) {
1032 block->first_free = 0;
1033 block->scan_hint = 0;
1034 block->contig_hint_start = 0;
1035 block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
1036 block->left_free = PCPU_BITMAP_BLOCK_BITS;
1037 block->right_free = PCPU_BITMAP_BLOCK_BITS;
1038 }
1039 }
1040
1041 if (nr_empty_pages)
1042 pcpu_update_empty_pages(chunk, nr_empty_pages);
1043
1044 /*
1045 * Refresh chunk metadata when the free makes a block free or spans
1046 * across blocks. The contig_hint may be off by up to a page, but if
1047 * the contig_hint is contained in a block, it will be accurate with
1048 * the else condition below.
1049 */
1050 if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index)
1051 pcpu_chunk_refresh_hint(chunk, true);
1052 else
1053 pcpu_block_update(&chunk->chunk_md,
1054 pcpu_block_off_to_off(s_index, start),
1055 end);
1056}
1057
1058/**
1059 * pcpu_is_populated - determines if the region is populated
1060 * @chunk: chunk of interest
1061 * @bit_off: chunk offset
1062 * @bits: size of area
1063 * @next_off: return value for the next offset to start searching
1064 *
1065 * For atomic allocations, check if the backing pages are populated.
1066 *
1067 * RETURNS:
1068 * Bool if the backing pages are populated.
1069 * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
1070 */
1071static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
1072 int *next_off)
1073{
1074 unsigned int page_start, page_end, rs, re;
1075
1076 page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
1077 page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
1078
1079 rs = page_start;
1080 bitmap_next_clear_region(chunk->populated, &rs, &re, page_end);
1081 if (rs >= page_end)
1082 return true;
1083
1084 *next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
1085 return false;
1086}
1087
1088/**
1089 * pcpu_find_block_fit - finds the block index to start searching
1090 * @chunk: chunk of interest
1091 * @alloc_bits: size of request in allocation units
1092 * @align: alignment of area (max PAGE_SIZE bytes)
1093 * @pop_only: use populated regions only
1094 *
1095 * Given a chunk and an allocation spec, find the offset to begin searching
1096 * for a free region. This iterates over the bitmap metadata blocks to
1097 * find an offset that will be guaranteed to fit the requirements. It is
1098 * not quite first fit as if the allocation does not fit in the contig hint
1099 * of a block or chunk, it is skipped. This errs on the side of caution
1100 * to prevent excess iteration. Poor alignment can cause the allocator to
1101 * skip over blocks and chunks that have valid free areas.
1102 *
1103 * RETURNS:
1104 * The offset in the bitmap to begin searching.
1105 * -1 if no offset is found.
1106 */
1107static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
1108 size_t align, bool pop_only)
1109{
1110 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1111 int bit_off, bits, next_off;
1112
1113 /*
1114 * This is an optimization to prevent scanning by assuming if the
1115 * allocation cannot fit in the global hint, there is memory pressure
1116 * and creating a new chunk would happen soon.
1117 */
1118 if (!pcpu_check_block_hint(chunk_md, alloc_bits, align))
1119 return -1;
1120
1121 bit_off = pcpu_next_hint(chunk_md, alloc_bits);
1122 bits = 0;
1123 pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
1124 if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
1125 &next_off))
1126 break;
1127
1128 bit_off = next_off;
1129 bits = 0;
1130 }
1131
1132 if (bit_off == pcpu_chunk_map_bits(chunk))
1133 return -1;
1134
1135 return bit_off;
1136}
1137
1138/*
1139 * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
1140 * @map: the address to base the search on
1141 * @size: the bitmap size in bits
1142 * @start: the bitnumber to start searching at
1143 * @nr: the number of zeroed bits we're looking for
1144 * @align_mask: alignment mask for zero area
1145 * @largest_off: offset of the largest area skipped
1146 * @largest_bits: size of the largest area skipped
1147 *
1148 * The @align_mask should be one less than a power of 2.
1149 *
1150 * This is a modified version of bitmap_find_next_zero_area_off() to remember
1151 * the largest area that was skipped. This is imperfect, but in general is
1152 * good enough. The largest remembered region is the largest failed region
1153 * seen. This does not include anything we possibly skipped due to alignment.
1154 * pcpu_block_update_scan() does scan backwards to try and recover what was
1155 * lost to alignment. While this can cause scanning to miss earlier possible
1156 * free areas, smaller allocations will eventually fill those holes.
1157 */
1158static unsigned long pcpu_find_zero_area(unsigned long *map,
1159 unsigned long size,
1160 unsigned long start,
1161 unsigned long nr,
1162 unsigned long align_mask,
1163 unsigned long *largest_off,
1164 unsigned long *largest_bits)
1165{
1166 unsigned long index, end, i, area_off, area_bits;
1167again:
1168 index = find_next_zero_bit(map, size, start);
1169
1170 /* Align allocation */
1171 index = __ALIGN_MASK(index, align_mask);
1172 area_off = index;
1173
1174 end = index + nr;
1175 if (end > size)
1176 return end;
1177 i = find_next_bit(map, end, index);
1178 if (i < end) {
1179 area_bits = i - area_off;
1180 /* remember largest unused area with best alignment */
1181 if (area_bits > *largest_bits ||
1182 (area_bits == *largest_bits && *largest_off &&
1183 (!area_off || __ffs(area_off) > __ffs(*largest_off)))) {
1184 *largest_off = area_off;
1185 *largest_bits = area_bits;
1186 }
1187
1188 start = i + 1;
1189 goto again;
1190 }
1191 return index;
1192}
1193
1194/**
1195 * pcpu_alloc_area - allocates an area from a pcpu_chunk
1196 * @chunk: chunk of interest
1197 * @alloc_bits: size of request in allocation units
1198 * @align: alignment of area (max PAGE_SIZE)
1199 * @start: bit_off to start searching
1200 *
1201 * This function takes in a @start offset to begin searching to fit an
1202 * allocation of @alloc_bits with alignment @align. It needs to scan
1203 * the allocation map because if it fits within the block's contig hint,
1204 * @start will be block->first_free. This is an attempt to fill the
1205 * allocation prior to breaking the contig hint. The allocation and
1206 * boundary maps are updated accordingly if it confirms a valid
1207 * free area.
1208 *
1209 * RETURNS:
1210 * Allocated addr offset in @chunk on success.
1211 * -1 if no matching area is found.
1212 */
1213static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
1214 size_t align, int start)
1215{
1216 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1217 size_t align_mask = (align) ? (align - 1) : 0;
1218 unsigned long area_off = 0, area_bits = 0;
1219 int bit_off, end, oslot;
1220
1221 lockdep_assert_held(&pcpu_lock);
1222
1223 oslot = pcpu_chunk_slot(chunk);
1224
1225 /*
1226 * Search to find a fit.
1227 */
1228 end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
1229 pcpu_chunk_map_bits(chunk));
1230 bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
1231 align_mask, &area_off, &area_bits);
1232 if (bit_off >= end)
1233 return -1;
1234
1235 if (area_bits)
1236 pcpu_block_update_scan(chunk, area_off, area_bits);
1237
1238 /* update alloc map */
1239 bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
1240
1241 /* update boundary map */
1242 set_bit(bit_off, chunk->bound_map);
1243 bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
1244 set_bit(bit_off + alloc_bits, chunk->bound_map);
1245
1246 chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
1247
1248 /* update first free bit */
1249 if (bit_off == chunk_md->first_free)
1250 chunk_md->first_free = find_next_zero_bit(
1251 chunk->alloc_map,
1252 pcpu_chunk_map_bits(chunk),
1253 bit_off + alloc_bits);
1254
1255 pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
1256
1257 pcpu_chunk_relocate(chunk, oslot);
1258
1259 return bit_off * PCPU_MIN_ALLOC_SIZE;
1260}
1261
1262/**
1263 * pcpu_free_area - frees the corresponding offset
1264 * @chunk: chunk of interest
1265 * @off: addr offset into chunk
1266 *
1267 * This function determines the size of an allocation to free using
1268 * the boundary bitmap and clears the allocation map.
1269 *
1270 * RETURNS:
1271 * Number of freed bytes.
1272 */
1273static int pcpu_free_area(struct pcpu_chunk *chunk, int off)
1274{
1275 struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1276 int bit_off, bits, end, oslot, freed;
1277
1278 lockdep_assert_held(&pcpu_lock);
1279 pcpu_stats_area_dealloc(chunk);
1280
1281 oslot = pcpu_chunk_slot(chunk);
1282
1283 bit_off = off / PCPU_MIN_ALLOC_SIZE;
1284
1285 /* find end index */
1286 end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
1287 bit_off + 1);
1288 bits = end - bit_off;
1289 bitmap_clear(chunk->alloc_map, bit_off, bits);
1290
1291 freed = bits * PCPU_MIN_ALLOC_SIZE;
1292
1293 /* update metadata */
1294 chunk->free_bytes += freed;
1295
1296 /* update first free bit */
1297 chunk_md->first_free = min(chunk_md->first_free, bit_off);
1298
1299 pcpu_block_update_hint_free(chunk, bit_off, bits);
1300
1301 pcpu_chunk_relocate(chunk, oslot);
1302
1303 return freed;
1304}
1305
1306static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
1307{
1308 block->scan_hint = 0;
1309 block->contig_hint = nr_bits;
1310 block->left_free = nr_bits;
1311 block->right_free = nr_bits;
1312 block->first_free = 0;
1313 block->nr_bits = nr_bits;
1314}
1315
1316static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
1317{
1318 struct pcpu_block_md *md_block;
1319
1320 /* init the chunk's block */
1321 pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk));
1322
1323 for (md_block = chunk->md_blocks;
1324 md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
1325 md_block++)
1326 pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS);
1327}
1328
1329/**
1330 * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1331 * @tmp_addr: the start of the region served
1332 * @map_size: size of the region served
1333 *
1334 * This is responsible for creating the chunks that serve the first chunk. The
1335 * base_addr is page aligned down of @tmp_addr while the region end is page
1336 * aligned up. Offsets are kept track of to determine the region served. All
1337 * this is done to appease the bitmap allocator in avoiding partial blocks.
1338 *
1339 * RETURNS:
1340 * Chunk serving the region at @tmp_addr of @map_size.
1341 */
1342static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
1343 int map_size)
1344{
1345 struct pcpu_chunk *chunk;
1346 unsigned long aligned_addr, lcm_align;
1347 int start_offset, offset_bits, region_size, region_bits;
1348 size_t alloc_size;
1349
1350 /* region calculations */
1351 aligned_addr = tmp_addr & PAGE_MASK;
1352
1353 start_offset = tmp_addr - aligned_addr;
1354
1355 /*
1356 * Align the end of the region with the LCM of PAGE_SIZE and
1357 * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of
1358 * the other.
1359 */
1360 lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE);
1361 region_size = ALIGN(start_offset + map_size, lcm_align);
1362
1363 /* allocate chunk */
1364 alloc_size = struct_size(chunk, populated,
1365 BITS_TO_LONGS(region_size >> PAGE_SHIFT));
1366 chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1367 if (!chunk)
1368 panic("%s: Failed to allocate %zu bytes\n", __func__,
1369 alloc_size);
1370
1371 INIT_LIST_HEAD(&chunk->list);
1372
1373 chunk->base_addr = (void *)aligned_addr;
1374 chunk->start_offset = start_offset;
1375 chunk->end_offset = region_size - chunk->start_offset - map_size;
1376
1377 chunk->nr_pages = region_size >> PAGE_SHIFT;
1378 region_bits = pcpu_chunk_map_bits(chunk);
1379
1380 alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
1381 chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1382 if (!chunk->alloc_map)
1383 panic("%s: Failed to allocate %zu bytes\n", __func__,
1384 alloc_size);
1385
1386 alloc_size =
1387 BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
1388 chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1389 if (!chunk->bound_map)
1390 panic("%s: Failed to allocate %zu bytes\n", __func__,
1391 alloc_size);
1392
1393 alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
1394 chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1395 if (!chunk->md_blocks)
1396 panic("%s: Failed to allocate %zu bytes\n", __func__,
1397 alloc_size);
1398
1399#ifdef CONFIG_MEMCG_KMEM
1400 /* first chunk is free to use */
1401 chunk->obj_cgroups = NULL;
1402#endif
1403 pcpu_init_md_blocks(chunk);
1404
1405 /* manage populated page bitmap */
1406 chunk->immutable = true;
1407 bitmap_fill(chunk->populated, chunk->nr_pages);
1408 chunk->nr_populated = chunk->nr_pages;
1409 chunk->nr_empty_pop_pages = chunk->nr_pages;
1410
1411 chunk->free_bytes = map_size;
1412
1413 if (chunk->start_offset) {
1414 /* hide the beginning of the bitmap */
1415 offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
1416 bitmap_set(chunk->alloc_map, 0, offset_bits);
1417 set_bit(0, chunk->bound_map);
1418 set_bit(offset_bits, chunk->bound_map);
1419
1420 chunk->chunk_md.first_free = offset_bits;
1421
1422 pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
1423 }
1424
1425 if (chunk->end_offset) {
1426 /* hide the end of the bitmap */
1427 offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
1428 bitmap_set(chunk->alloc_map,
1429 pcpu_chunk_map_bits(chunk) - offset_bits,
1430 offset_bits);
1431 set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
1432 chunk->bound_map);
1433 set_bit(region_bits, chunk->bound_map);
1434
1435 pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
1436 - offset_bits, offset_bits);
1437 }
1438
1439 return chunk;
1440}
1441
1442static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
1443{
1444 struct pcpu_chunk *chunk;
1445 int region_bits;
1446
1447 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
1448 if (!chunk)
1449 return NULL;
1450
1451 INIT_LIST_HEAD(&chunk->list);
1452 chunk->nr_pages = pcpu_unit_pages;
1453 region_bits = pcpu_chunk_map_bits(chunk);
1454
1455 chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
1456 sizeof(chunk->alloc_map[0]), gfp);
1457 if (!chunk->alloc_map)
1458 goto alloc_map_fail;
1459
1460 chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
1461 sizeof(chunk->bound_map[0]), gfp);
1462 if (!chunk->bound_map)
1463 goto bound_map_fail;
1464
1465 chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
1466 sizeof(chunk->md_blocks[0]), gfp);
1467 if (!chunk->md_blocks)
1468 goto md_blocks_fail;
1469
1470#ifdef CONFIG_MEMCG_KMEM
1471 if (!mem_cgroup_kmem_disabled()) {
1472 chunk->obj_cgroups =
1473 pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
1474 sizeof(struct obj_cgroup *), gfp);
1475 if (!chunk->obj_cgroups)
1476 goto objcg_fail;
1477 }
1478#endif
1479
1480 pcpu_init_md_blocks(chunk);
1481
1482 /* init metadata */
1483 chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
1484
1485 return chunk;
1486
1487#ifdef CONFIG_MEMCG_KMEM
1488objcg_fail:
1489 pcpu_mem_free(chunk->md_blocks);
1490#endif
1491md_blocks_fail:
1492 pcpu_mem_free(chunk->bound_map);
1493bound_map_fail:
1494 pcpu_mem_free(chunk->alloc_map);
1495alloc_map_fail:
1496 pcpu_mem_free(chunk);
1497
1498 return NULL;
1499}
1500
1501static void pcpu_free_chunk(struct pcpu_chunk *chunk)
1502{
1503 if (!chunk)
1504 return;
1505#ifdef CONFIG_MEMCG_KMEM
1506 pcpu_mem_free(chunk->obj_cgroups);
1507#endif
1508 pcpu_mem_free(chunk->md_blocks);
1509 pcpu_mem_free(chunk->bound_map);
1510 pcpu_mem_free(chunk->alloc_map);
1511 pcpu_mem_free(chunk);
1512}
1513
1514/**
1515 * pcpu_chunk_populated - post-population bookkeeping
1516 * @chunk: pcpu_chunk which got populated
1517 * @page_start: the start page
1518 * @page_end: the end page
1519 *
1520 * Pages in [@page_start,@page_end) have been populated to @chunk. Update
1521 * the bookkeeping information accordingly. Must be called after each
1522 * successful population.
1523 *
1524 * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it
1525 * is to serve an allocation in that area.
1526 */
1527static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
1528 int page_end)
1529{
1530 int nr = page_end - page_start;
1531
1532 lockdep_assert_held(&pcpu_lock);
1533
1534 bitmap_set(chunk->populated, page_start, nr);
1535 chunk->nr_populated += nr;
1536 pcpu_nr_populated += nr;
1537
1538 pcpu_update_empty_pages(chunk, nr);
1539}
1540
1541/**
1542 * pcpu_chunk_depopulated - post-depopulation bookkeeping
1543 * @chunk: pcpu_chunk which got depopulated
1544 * @page_start: the start page
1545 * @page_end: the end page
1546 *
1547 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1548 * Update the bookkeeping information accordingly. Must be called after
1549 * each successful depopulation.
1550 */
1551static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1552 int page_start, int page_end)
1553{
1554 int nr = page_end - page_start;
1555
1556 lockdep_assert_held(&pcpu_lock);
1557
1558 bitmap_clear(chunk->populated, page_start, nr);
1559 chunk->nr_populated -= nr;
1560 pcpu_nr_populated -= nr;
1561
1562 pcpu_update_empty_pages(chunk, -nr);
1563}
1564
1565/*
1566 * Chunk management implementation.
1567 *
1568 * To allow different implementations, chunk alloc/free and
1569 * [de]population are implemented in a separate file which is pulled
1570 * into this file and compiled together. The following functions
1571 * should be implemented.
1572 *
1573 * pcpu_populate_chunk - populate the specified range of a chunk
1574 * pcpu_depopulate_chunk - depopulate the specified range of a chunk
1575 * pcpu_post_unmap_tlb_flush - flush tlb for the specified range of a chunk
1576 * pcpu_create_chunk - create a new chunk
1577 * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
1578 * pcpu_addr_to_page - translate address to physical address
1579 * pcpu_verify_alloc_info - check alloc_info is acceptable during init
1580 */
1581static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
1582 int page_start, int page_end, gfp_t gfp);
1583static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
1584 int page_start, int page_end);
1585static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
1586 int page_start, int page_end);
1587static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
1588static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
1589static struct page *pcpu_addr_to_page(void *addr);
1590static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
1591
1592#ifdef CONFIG_NEED_PER_CPU_KM
1593#include "percpu-km.c"
1594#else
1595#include "percpu-vm.c"
1596#endif
1597
1598/**
1599 * pcpu_chunk_addr_search - determine chunk containing specified address
1600 * @addr: address for which the chunk needs to be determined.
1601 *
1602 * This is an internal function that handles all but static allocations.
1603 * Static percpu address values should never be passed into the allocator.
1604 *
1605 * RETURNS:
1606 * The address of the found chunk.
1607 */
1608static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1609{
1610 /* is it in the dynamic region (first chunk)? */
1611 if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
1612 return pcpu_first_chunk;
1613
1614 /* is it in the reserved region? */
1615 if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
1616 return pcpu_reserved_chunk;
1617
1618 /*
1619 * The address is relative to unit0 which might be unused and
1620 * thus unmapped. Offset the address to the unit space of the
1621 * current processor before looking it up in the vmalloc
1622 * space. Note that any possible cpu id can be used here, so
1623 * there's no need to worry about preemption or cpu hotplug.
1624 */
1625 addr += pcpu_unit_offsets[raw_smp_processor_id()];
1626 return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
1627}
1628
1629#ifdef CONFIG_MEMCG_KMEM
1630static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
1631 struct obj_cgroup **objcgp)
1632{
1633 struct obj_cgroup *objcg;
1634
1635 if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT))
1636 return true;
1637
1638 objcg = get_obj_cgroup_from_current();
1639 if (!objcg)
1640 return true;
1641
1642 if (obj_cgroup_charge(objcg, gfp, size * num_possible_cpus())) {
1643 obj_cgroup_put(objcg);
1644 return false;
1645 }
1646
1647 *objcgp = objcg;
1648 return true;
1649}
1650
1651static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1652 struct pcpu_chunk *chunk, int off,
1653 size_t size)
1654{
1655 if (!objcg)
1656 return;
1657
1658 if (likely(chunk && chunk->obj_cgroups)) {
1659 chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg;
1660
1661 rcu_read_lock();
1662 mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
1663 size * num_possible_cpus());
1664 rcu_read_unlock();
1665 } else {
1666 obj_cgroup_uncharge(objcg, size * num_possible_cpus());
1667 obj_cgroup_put(objcg);
1668 }
1669}
1670
1671static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1672{
1673 struct obj_cgroup *objcg;
1674
1675 if (unlikely(!chunk->obj_cgroups))
1676 return;
1677
1678 objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT];
1679 if (!objcg)
1680 return;
1681 chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL;
1682
1683 obj_cgroup_uncharge(objcg, size * num_possible_cpus());
1684
1685 rcu_read_lock();
1686 mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
1687 -(size * num_possible_cpus()));
1688 rcu_read_unlock();
1689
1690 obj_cgroup_put(objcg);
1691}
1692
1693#else /* CONFIG_MEMCG_KMEM */
1694static bool
1695pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
1696{
1697 return true;
1698}
1699
1700static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1701 struct pcpu_chunk *chunk, int off,
1702 size_t size)
1703{
1704}
1705
1706static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1707{
1708}
1709#endif /* CONFIG_MEMCG_KMEM */
1710
1711/**
1712 * pcpu_alloc - the percpu allocator
1713 * @size: size of area to allocate in bytes
1714 * @align: alignment of area (max PAGE_SIZE)
1715 * @reserved: allocate from the reserved chunk if available
1716 * @gfp: allocation flags
1717 *
1718 * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
1719 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1720 * then no warning will be triggered on invalid or failed allocation
1721 * requests.
1722 *
1723 * RETURNS:
1724 * Percpu pointer to the allocated area on success, NULL on failure.
1725 */
1726static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1727 gfp_t gfp)
1728{
1729 gfp_t pcpu_gfp;
1730 bool is_atomic;
1731 bool do_warn;
1732 struct obj_cgroup *objcg = NULL;
1733 static int warn_limit = 10;
1734 struct pcpu_chunk *chunk, *next;
1735 const char *err;
1736 int slot, off, cpu, ret;
1737 unsigned long flags;
1738 void __percpu *ptr;
1739 size_t bits, bit_align;
1740
1741 gfp = current_gfp_context(gfp);
1742 /* whitelisted flags that can be passed to the backing allocators */
1743 pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
1744 is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1745 do_warn = !(gfp & __GFP_NOWARN);
1746
1747 /*
1748 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
1749 * therefore alignment must be a minimum of that many bytes.
1750 * An allocation may have internal fragmentation from rounding up
1751 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1752 */
1753 if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
1754 align = PCPU_MIN_ALLOC_SIZE;
1755
1756 size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
1757 bits = size >> PCPU_MIN_ALLOC_SHIFT;
1758 bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
1759
1760 if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
1761 !is_power_of_2(align))) {
1762 WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1763 size, align);
1764 return NULL;
1765 }
1766
1767 if (unlikely(!pcpu_memcg_pre_alloc_hook(size, gfp, &objcg)))
1768 return NULL;
1769
1770 if (!is_atomic) {
1771 /*
1772 * pcpu_balance_workfn() allocates memory under this mutex,
1773 * and it may wait for memory reclaim. Allow current task
1774 * to become OOM victim, in case of memory pressure.
1775 */
1776 if (gfp & __GFP_NOFAIL) {
1777 mutex_lock(&pcpu_alloc_mutex);
1778 } else if (mutex_lock_killable(&pcpu_alloc_mutex)) {
1779 pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1780 return NULL;
1781 }
1782 }
1783
1784 spin_lock_irqsave(&pcpu_lock, flags);
1785
1786 /* serve reserved allocations from the reserved chunk if available */
1787 if (reserved && pcpu_reserved_chunk) {
1788 chunk = pcpu_reserved_chunk;
1789
1790 off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
1791 if (off < 0) {
1792 err = "alloc from reserved chunk failed";
1793 goto fail_unlock;
1794 }
1795
1796 off = pcpu_alloc_area(chunk, bits, bit_align, off);
1797 if (off >= 0)
1798 goto area_found;
1799
1800 err = "alloc from reserved chunk failed";
1801 goto fail_unlock;
1802 }
1803
1804restart:
1805 /* search through normal chunks */
1806 for (slot = pcpu_size_to_slot(size); slot <= pcpu_free_slot; slot++) {
1807 list_for_each_entry_safe(chunk, next, &pcpu_chunk_lists[slot],
1808 list) {
1809 off = pcpu_find_block_fit(chunk, bits, bit_align,
1810 is_atomic);
1811 if (off < 0) {
1812 if (slot < PCPU_SLOT_FAIL_THRESHOLD)
1813 pcpu_chunk_move(chunk, 0);
1814 continue;
1815 }
1816
1817 off = pcpu_alloc_area(chunk, bits, bit_align, off);
1818 if (off >= 0) {
1819 pcpu_reintegrate_chunk(chunk);
1820 goto area_found;
1821 }
1822 }
1823 }
1824
1825 spin_unlock_irqrestore(&pcpu_lock, flags);
1826
1827 /*
1828 * No space left. Create a new chunk. We don't want multiple
1829 * tasks to create chunks simultaneously. Serialize and create iff
1830 * there's still no empty chunk after grabbing the mutex.
1831 */
1832 if (is_atomic) {
1833 err = "atomic alloc failed, no space left";
1834 goto fail;
1835 }
1836
1837 if (list_empty(&pcpu_chunk_lists[pcpu_free_slot])) {
1838 chunk = pcpu_create_chunk(pcpu_gfp);
1839 if (!chunk) {
1840 err = "failed to allocate new chunk";
1841 goto fail;
1842 }
1843
1844 spin_lock_irqsave(&pcpu_lock, flags);
1845 pcpu_chunk_relocate(chunk, -1);
1846 } else {
1847 spin_lock_irqsave(&pcpu_lock, flags);
1848 }
1849
1850 goto restart;
1851
1852area_found:
1853 pcpu_stats_area_alloc(chunk, size);
1854 spin_unlock_irqrestore(&pcpu_lock, flags);
1855
1856 /* populate if not all pages are already there */
1857 if (!is_atomic) {
1858 unsigned int page_start, page_end, rs, re;
1859
1860 page_start = PFN_DOWN(off);
1861 page_end = PFN_UP(off + size);
1862
1863 bitmap_for_each_clear_region(chunk->populated, rs, re,
1864 page_start, page_end) {
1865 WARN_ON(chunk->immutable);
1866
1867 ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
1868
1869 spin_lock_irqsave(&pcpu_lock, flags);
1870 if (ret) {
1871 pcpu_free_area(chunk, off);
1872 err = "failed to populate";
1873 goto fail_unlock;
1874 }
1875 pcpu_chunk_populated(chunk, rs, re);
1876 spin_unlock_irqrestore(&pcpu_lock, flags);
1877 }
1878
1879 mutex_unlock(&pcpu_alloc_mutex);
1880 }
1881
1882 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1883 pcpu_schedule_balance_work();
1884
1885 /* clear the areas and return address relative to base address */
1886 for_each_possible_cpu(cpu)
1887 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1888
1889 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1890 kmemleak_alloc_percpu(ptr, size, gfp);
1891
1892 trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
1893 chunk->base_addr, off, ptr);
1894
1895 pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
1896
1897 return ptr;
1898
1899fail_unlock:
1900 spin_unlock_irqrestore(&pcpu_lock, flags);
1901fail:
1902 trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1903
1904 if (!is_atomic && do_warn && warn_limit) {
1905 pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1906 size, align, is_atomic, err);
1907 dump_stack();
1908 if (!--warn_limit)
1909 pr_info("limit reached, disable warning\n");
1910 }
1911 if (is_atomic) {
1912 /* see the flag handling in pcpu_balance_workfn() */
1913 pcpu_atomic_alloc_failed = true;
1914 pcpu_schedule_balance_work();
1915 } else {
1916 mutex_unlock(&pcpu_alloc_mutex);
1917 }
1918
1919 pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1920
1921 return NULL;
1922}
1923
1924/**
1925 * __alloc_percpu_gfp - allocate dynamic percpu area
1926 * @size: size of area to allocate in bytes
1927 * @align: alignment of area (max PAGE_SIZE)
1928 * @gfp: allocation flags
1929 *
1930 * Allocate zero-filled percpu area of @size bytes aligned at @align. If
1931 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1932 * be called from any context but is a lot more likely to fail. If @gfp
1933 * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1934 * allocation requests.
1935 *
1936 * RETURNS:
1937 * Percpu pointer to the allocated area on success, NULL on failure.
1938 */
1939void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1940{
1941 return pcpu_alloc(size, align, false, gfp);
1942}
1943EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1944
1945/**
1946 * __alloc_percpu - allocate dynamic percpu area
1947 * @size: size of area to allocate in bytes
1948 * @align: alignment of area (max PAGE_SIZE)
1949 *
1950 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1951 */
1952void __percpu *__alloc_percpu(size_t size, size_t align)
1953{
1954 return pcpu_alloc(size, align, false, GFP_KERNEL);
1955}
1956EXPORT_SYMBOL_GPL(__alloc_percpu);
1957
1958/**
1959 * __alloc_reserved_percpu - allocate reserved percpu area
1960 * @size: size of area to allocate in bytes
1961 * @align: alignment of area (max PAGE_SIZE)
1962 *
1963 * Allocate zero-filled percpu area of @size bytes aligned at @align
1964 * from reserved percpu area if arch has set it up; otherwise,
1965 * allocation is served from the same dynamic area. Might sleep.
1966 * Might trigger writeouts.
1967 *
1968 * CONTEXT:
1969 * Does GFP_KERNEL allocation.
1970 *
1971 * RETURNS:
1972 * Percpu pointer to the allocated area on success, NULL on failure.
1973 */
1974void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1975{
1976 return pcpu_alloc(size, align, true, GFP_KERNEL);
1977}
1978
1979/**
1980 * pcpu_balance_free - manage the amount of free chunks
1981 * @empty_only: free chunks only if there are no populated pages
1982 *
1983 * If empty_only is %false, reclaim all fully free chunks regardless of the
1984 * number of populated pages. Otherwise, only reclaim chunks that have no
1985 * populated pages.
1986 *
1987 * CONTEXT:
1988 * pcpu_lock (can be dropped temporarily)
1989 */
1990static void pcpu_balance_free(bool empty_only)
1991{
1992 LIST_HEAD(to_free);
1993 struct list_head *free_head = &pcpu_chunk_lists[pcpu_free_slot];
1994 struct pcpu_chunk *chunk, *next;
1995
1996 lockdep_assert_held(&pcpu_lock);
1997
1998 /*
1999 * There's no reason to keep around multiple unused chunks and VM
2000 * areas can be scarce. Destroy all free chunks except for one.
2001 */
2002 list_for_each_entry_safe(chunk, next, free_head, list) {
2003 WARN_ON(chunk->immutable);
2004
2005 /* spare the first one */
2006 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
2007 continue;
2008
2009 if (!empty_only || chunk->nr_empty_pop_pages == 0)
2010 list_move(&chunk->list, &to_free);
2011 }
2012
2013 if (list_empty(&to_free))
2014 return;
2015
2016 spin_unlock_irq(&pcpu_lock);
2017 list_for_each_entry_safe(chunk, next, &to_free, list) {
2018 unsigned int rs, re;
2019
2020 bitmap_for_each_set_region(chunk->populated, rs, re, 0,
2021 chunk->nr_pages) {
2022 pcpu_depopulate_chunk(chunk, rs, re);
2023 spin_lock_irq(&pcpu_lock);
2024 pcpu_chunk_depopulated(chunk, rs, re);
2025 spin_unlock_irq(&pcpu_lock);
2026 }
2027 pcpu_destroy_chunk(chunk);
2028 cond_resched();
2029 }
2030 spin_lock_irq(&pcpu_lock);
2031}
2032
2033/**
2034 * pcpu_balance_populated - manage the amount of populated pages
2035 *
2036 * Maintain a certain amount of populated pages to satisfy atomic allocations.
2037 * It is possible that this is called when physical memory is scarce causing
2038 * OOM killer to be triggered. We should avoid doing so until an actual
2039 * allocation causes the failure as it is possible that requests can be
2040 * serviced from already backed regions.
2041 *
2042 * CONTEXT:
2043 * pcpu_lock (can be dropped temporarily)
2044 */
2045static void pcpu_balance_populated(void)
2046{
2047 /* gfp flags passed to underlying allocators */
2048 const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
2049 struct pcpu_chunk *chunk;
2050 int slot, nr_to_pop, ret;
2051
2052 lockdep_assert_held(&pcpu_lock);
2053
2054 /*
2055 * Ensure there are certain number of free populated pages for
2056 * atomic allocs. Fill up from the most packed so that atomic
2057 * allocs don't increase fragmentation. If atomic allocation
2058 * failed previously, always populate the maximum amount. This
2059 * should prevent atomic allocs larger than PAGE_SIZE from keeping
2060 * failing indefinitely; however, large atomic allocs are not
2061 * something we support properly and can be highly unreliable and
2062 * inefficient.
2063 */
2064retry_pop:
2065 if (pcpu_atomic_alloc_failed) {
2066 nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
2067 /* best effort anyway, don't worry about synchronization */
2068 pcpu_atomic_alloc_failed = false;
2069 } else {
2070 nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
2071 pcpu_nr_empty_pop_pages,
2072 0, PCPU_EMPTY_POP_PAGES_HIGH);
2073 }
2074
2075 for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) {
2076 unsigned int nr_unpop = 0, rs, re;
2077
2078 if (!nr_to_pop)
2079 break;
2080
2081 list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) {
2082 nr_unpop = chunk->nr_pages - chunk->nr_populated;
2083 if (nr_unpop)
2084 break;
2085 }
2086
2087 if (!nr_unpop)
2088 continue;
2089
2090 /* @chunk can't go away while pcpu_alloc_mutex is held */
2091 bitmap_for_each_clear_region(chunk->populated, rs, re, 0,
2092 chunk->nr_pages) {
2093 int nr = min_t(int, re - rs, nr_to_pop);
2094
2095 spin_unlock_irq(&pcpu_lock);
2096 ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
2097 cond_resched();
2098 spin_lock_irq(&pcpu_lock);
2099 if (!ret) {
2100 nr_to_pop -= nr;
2101 pcpu_chunk_populated(chunk, rs, rs + nr);
2102 } else {
2103 nr_to_pop = 0;
2104 }
2105
2106 if (!nr_to_pop)
2107 break;
2108 }
2109 }
2110
2111 if (nr_to_pop) {
2112 /* ran out of chunks to populate, create a new one and retry */
2113 spin_unlock_irq(&pcpu_lock);
2114 chunk = pcpu_create_chunk(gfp);
2115 cond_resched();
2116 spin_lock_irq(&pcpu_lock);
2117 if (chunk) {
2118 pcpu_chunk_relocate(chunk, -1);
2119 goto retry_pop;
2120 }
2121 }
2122}
2123
2124/**
2125 * pcpu_reclaim_populated - scan over to_depopulate chunks and free empty pages
2126 *
2127 * Scan over chunks in the depopulate list and try to release unused populated
2128 * pages back to the system. Depopulated chunks are sidelined to prevent
2129 * repopulating these pages unless required. Fully free chunks are reintegrated
2130 * and freed accordingly (1 is kept around). If we drop below the empty
2131 * populated pages threshold, reintegrate the chunk if it has empty free pages.
2132 * Each chunk is scanned in the reverse order to keep populated pages close to
2133 * the beginning of the chunk.
2134 *
2135 * CONTEXT:
2136 * pcpu_lock (can be dropped temporarily)
2137 *
2138 */
2139static void pcpu_reclaim_populated(void)
2140{
2141 struct pcpu_chunk *chunk;
2142 struct pcpu_block_md *block;
2143 int freed_page_start, freed_page_end;
2144 int i, end;
2145 bool reintegrate;
2146
2147 lockdep_assert_held(&pcpu_lock);
2148
2149 /*
2150 * Once a chunk is isolated to the to_depopulate list, the chunk is no
2151 * longer discoverable to allocations whom may populate pages. The only
2152 * other accessor is the free path which only returns area back to the
2153 * allocator not touching the populated bitmap.
2154 */
2155 while (!list_empty(&pcpu_chunk_lists[pcpu_to_depopulate_slot])) {
2156 chunk = list_first_entry(&pcpu_chunk_lists[pcpu_to_depopulate_slot],
2157 struct pcpu_chunk, list);
2158 WARN_ON(chunk->immutable);
2159
2160 /*
2161 * Scan chunk's pages in the reverse order to keep populated
2162 * pages close to the beginning of the chunk.
2163 */
2164 freed_page_start = chunk->nr_pages;
2165 freed_page_end = 0;
2166 reintegrate = false;
2167 for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) {
2168 /* no more work to do */
2169 if (chunk->nr_empty_pop_pages == 0)
2170 break;
2171
2172 /* reintegrate chunk to prevent atomic alloc failures */
2173 if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) {
2174 reintegrate = true;
2175 goto end_chunk;
2176 }
2177
2178 /*
2179 * If the page is empty and populated, start or
2180 * extend the (i, end) range. If i == 0, decrease
2181 * i and perform the depopulation to cover the last
2182 * (first) page in the chunk.
2183 */
2184 block = chunk->md_blocks + i;
2185 if (block->contig_hint == PCPU_BITMAP_BLOCK_BITS &&
2186 test_bit(i, chunk->populated)) {
2187 if (end == -1)
2188 end = i;
2189 if (i > 0)
2190 continue;
2191 i--;
2192 }
2193
2194 /* depopulate if there is an active range */
2195 if (end == -1)
2196 continue;
2197
2198 spin_unlock_irq(&pcpu_lock);
2199 pcpu_depopulate_chunk(chunk, i + 1, end + 1);
2200 cond_resched();
2201 spin_lock_irq(&pcpu_lock);
2202
2203 pcpu_chunk_depopulated(chunk, i + 1, end + 1);
2204 freed_page_start = min(freed_page_start, i + 1);
2205 freed_page_end = max(freed_page_end, end + 1);
2206
2207 /* reset the range and continue */
2208 end = -1;
2209 }
2210
2211end_chunk:
2212 /* batch tlb flush per chunk to amortize cost */
2213 if (freed_page_start < freed_page_end) {
2214 spin_unlock_irq(&pcpu_lock);
2215 pcpu_post_unmap_tlb_flush(chunk,
2216 freed_page_start,
2217 freed_page_end);
2218 cond_resched();
2219 spin_lock_irq(&pcpu_lock);
2220 }
2221
2222 if (reintegrate || chunk->free_bytes == pcpu_unit_size)
2223 pcpu_reintegrate_chunk(chunk);
2224 else
2225 list_move_tail(&chunk->list,
2226 &pcpu_chunk_lists[pcpu_sidelined_slot]);
2227 }
2228}
2229
2230/**
2231 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
2232 * @work: unused
2233 *
2234 * For each chunk type, manage the number of fully free chunks and the number of
2235 * populated pages. An important thing to consider is when pages are freed and
2236 * how they contribute to the global counts.
2237 */
2238static void pcpu_balance_workfn(struct work_struct *work)
2239{
2240 /*
2241 * pcpu_balance_free() is called twice because the first time we may
2242 * trim pages in the active pcpu_nr_empty_pop_pages which may cause us
2243 * to grow other chunks. This then gives pcpu_reclaim_populated() time
2244 * to move fully free chunks to the active list to be freed if
2245 * appropriate.
2246 */
2247 mutex_lock(&pcpu_alloc_mutex);
2248 spin_lock_irq(&pcpu_lock);
2249
2250 pcpu_balance_free(false);
2251 pcpu_reclaim_populated();
2252 pcpu_balance_populated();
2253 pcpu_balance_free(true);
2254
2255 spin_unlock_irq(&pcpu_lock);
2256 mutex_unlock(&pcpu_alloc_mutex);
2257}
2258
2259/**
2260 * free_percpu - free percpu area
2261 * @ptr: pointer to area to free
2262 *
2263 * Free percpu area @ptr.
2264 *
2265 * CONTEXT:
2266 * Can be called from atomic context.
2267 */
2268void free_percpu(void __percpu *ptr)
2269{
2270 void *addr;
2271 struct pcpu_chunk *chunk;
2272 unsigned long flags;
2273 int size, off;
2274 bool need_balance = false;
2275
2276 if (!ptr)
2277 return;
2278
2279 kmemleak_free_percpu(ptr);
2280
2281 addr = __pcpu_ptr_to_addr(ptr);
2282
2283 spin_lock_irqsave(&pcpu_lock, flags);
2284
2285 chunk = pcpu_chunk_addr_search(addr);
2286 off = addr - chunk->base_addr;
2287
2288 size = pcpu_free_area(chunk, off);
2289
2290 pcpu_memcg_free_hook(chunk, off, size);
2291
2292 /*
2293 * If there are more than one fully free chunks, wake up grim reaper.
2294 * If the chunk is isolated, it may be in the process of being
2295 * reclaimed. Let reclaim manage cleaning up of that chunk.
2296 */
2297 if (!chunk->isolated && chunk->free_bytes == pcpu_unit_size) {
2298 struct pcpu_chunk *pos;
2299
2300 list_for_each_entry(pos, &pcpu_chunk_lists[pcpu_free_slot], list)
2301 if (pos != chunk) {
2302 need_balance = true;
2303 break;
2304 }
2305 } else if (pcpu_should_reclaim_chunk(chunk)) {
2306 pcpu_isolate_chunk(chunk);
2307 need_balance = true;
2308 }
2309
2310 trace_percpu_free_percpu(chunk->base_addr, off, ptr);
2311
2312 spin_unlock_irqrestore(&pcpu_lock, flags);
2313
2314 if (need_balance)
2315 pcpu_schedule_balance_work();
2316}
2317EXPORT_SYMBOL_GPL(free_percpu);
2318
2319bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
2320{
2321#ifdef CONFIG_SMP
2322 const size_t static_size = __per_cpu_end - __per_cpu_start;
2323 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2324 unsigned int cpu;
2325
2326 for_each_possible_cpu(cpu) {
2327 void *start = per_cpu_ptr(base, cpu);
2328 void *va = (void *)addr;
2329
2330 if (va >= start && va < start + static_size) {
2331 if (can_addr) {
2332 *can_addr = (unsigned long) (va - start);
2333 *can_addr += (unsigned long)
2334 per_cpu_ptr(base, get_boot_cpu_id());
2335 }
2336 return true;
2337 }
2338 }
2339#endif
2340 /* on UP, can't distinguish from other static vars, always false */
2341 return false;
2342}
2343
2344/**
2345 * is_kernel_percpu_address - test whether address is from static percpu area
2346 * @addr: address to test
2347 *
2348 * Test whether @addr belongs to in-kernel static percpu area. Module
2349 * static percpu areas are not considered. For those, use
2350 * is_module_percpu_address().
2351 *
2352 * RETURNS:
2353 * %true if @addr is from in-kernel static percpu area, %false otherwise.
2354 */
2355bool is_kernel_percpu_address(unsigned long addr)
2356{
2357 return __is_kernel_percpu_address(addr, NULL);
2358}
2359
2360/**
2361 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
2362 * @addr: the address to be converted to physical address
2363 *
2364 * Given @addr which is dereferenceable address obtained via one of
2365 * percpu access macros, this function translates it into its physical
2366 * address. The caller is responsible for ensuring @addr stays valid
2367 * until this function finishes.
2368 *
2369 * percpu allocator has special setup for the first chunk, which currently
2370 * supports either embedding in linear address space or vmalloc mapping,
2371 * and, from the second one, the backing allocator (currently either vm or
2372 * km) provides translation.
2373 *
2374 * The addr can be translated simply without checking if it falls into the
2375 * first chunk. But the current code reflects better how percpu allocator
2376 * actually works, and the verification can discover both bugs in percpu
2377 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
2378 * code.
2379 *
2380 * RETURNS:
2381 * The physical address for @addr.
2382 */
2383phys_addr_t per_cpu_ptr_to_phys(void *addr)
2384{
2385 void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2386 bool in_first_chunk = false;
2387 unsigned long first_low, first_high;
2388 unsigned int cpu;
2389
2390 /*
2391 * The following test on unit_low/high isn't strictly
2392 * necessary but will speed up lookups of addresses which
2393 * aren't in the first chunk.
2394 *
2395 * The address check is against full chunk sizes. pcpu_base_addr
2396 * points to the beginning of the first chunk including the
2397 * static region. Assumes good intent as the first chunk may
2398 * not be full (ie. < pcpu_unit_pages in size).
2399 */
2400 first_low = (unsigned long)pcpu_base_addr +
2401 pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
2402 first_high = (unsigned long)pcpu_base_addr +
2403 pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
2404 if ((unsigned long)addr >= first_low &&
2405 (unsigned long)addr < first_high) {
2406 for_each_possible_cpu(cpu) {
2407 void *start = per_cpu_ptr(base, cpu);
2408
2409 if (addr >= start && addr < start + pcpu_unit_size) {
2410 in_first_chunk = true;
2411 break;
2412 }
2413 }
2414 }
2415
2416 if (in_first_chunk) {
2417 if (!is_vmalloc_addr(addr))
2418 return __pa(addr);
2419 else
2420 return page_to_phys(vmalloc_to_page(addr)) +
2421 offset_in_page(addr);
2422 } else
2423 return page_to_phys(pcpu_addr_to_page(addr)) +
2424 offset_in_page(addr);
2425}
2426
2427/**
2428 * pcpu_alloc_alloc_info - allocate percpu allocation info
2429 * @nr_groups: the number of groups
2430 * @nr_units: the number of units
2431 *
2432 * Allocate ai which is large enough for @nr_groups groups containing
2433 * @nr_units units. The returned ai's groups[0].cpu_map points to the
2434 * cpu_map array which is long enough for @nr_units and filled with
2435 * NR_CPUS. It's the caller's responsibility to initialize cpu_map
2436 * pointer of other groups.
2437 *
2438 * RETURNS:
2439 * Pointer to the allocated pcpu_alloc_info on success, NULL on
2440 * failure.
2441 */
2442struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
2443 int nr_units)
2444{
2445 struct pcpu_alloc_info *ai;
2446 size_t base_size, ai_size;
2447 void *ptr;
2448 int unit;
2449
2450 base_size = ALIGN(struct_size(ai, groups, nr_groups),
2451 __alignof__(ai->groups[0].cpu_map[0]));
2452 ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
2453
2454 ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
2455 if (!ptr)
2456 return NULL;
2457 ai = ptr;
2458 ptr += base_size;
2459
2460 ai->groups[0].cpu_map = ptr;
2461
2462 for (unit = 0; unit < nr_units; unit++)
2463 ai->groups[0].cpu_map[unit] = NR_CPUS;
2464
2465 ai->nr_groups = nr_groups;
2466 ai->__ai_size = PFN_ALIGN(ai_size);
2467
2468 return ai;
2469}
2470
2471/**
2472 * pcpu_free_alloc_info - free percpu allocation info
2473 * @ai: pcpu_alloc_info to free
2474 *
2475 * Free @ai which was allocated by pcpu_alloc_alloc_info().
2476 */
2477void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
2478{
2479 memblock_free_early(__pa(ai), ai->__ai_size);
2480}
2481
2482/**
2483 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
2484 * @lvl: loglevel
2485 * @ai: allocation info to dump
2486 *
2487 * Print out information about @ai using loglevel @lvl.
2488 */
2489static void pcpu_dump_alloc_info(const char *lvl,
2490 const struct pcpu_alloc_info *ai)
2491{
2492 int group_width = 1, cpu_width = 1, width;
2493 char empty_str[] = "--------";
2494 int alloc = 0, alloc_end = 0;
2495 int group, v;
2496 int upa, apl; /* units per alloc, allocs per line */
2497
2498 v = ai->nr_groups;
2499 while (v /= 10)
2500 group_width++;
2501
2502 v = num_possible_cpus();
2503 while (v /= 10)
2504 cpu_width++;
2505 empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
2506
2507 upa = ai->alloc_size / ai->unit_size;
2508 width = upa * (cpu_width + 1) + group_width + 3;
2509 apl = rounddown_pow_of_two(max(60 / width, 1));
2510
2511 printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
2512 lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
2513 ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
2514
2515 for (group = 0; group < ai->nr_groups; group++) {
2516 const struct pcpu_group_info *gi = &ai->groups[group];
2517 int unit = 0, unit_end = 0;
2518
2519 BUG_ON(gi->nr_units % upa);
2520 for (alloc_end += gi->nr_units / upa;
2521 alloc < alloc_end; alloc++) {
2522 if (!(alloc % apl)) {
2523 pr_cont("\n");
2524 printk("%spcpu-alloc: ", lvl);
2525 }
2526 pr_cont("[%0*d] ", group_width, group);
2527
2528 for (unit_end += upa; unit < unit_end; unit++)
2529 if (gi->cpu_map[unit] != NR_CPUS)
2530 pr_cont("%0*d ",
2531 cpu_width, gi->cpu_map[unit]);
2532 else
2533 pr_cont("%s ", empty_str);
2534 }
2535 }
2536 pr_cont("\n");
2537}
2538
2539/**
2540 * pcpu_setup_first_chunk - initialize the first percpu chunk
2541 * @ai: pcpu_alloc_info describing how to percpu area is shaped
2542 * @base_addr: mapped address
2543 *
2544 * Initialize the first percpu chunk which contains the kernel static
2545 * percpu area. This function is to be called from arch percpu area
2546 * setup path.
2547 *
2548 * @ai contains all information necessary to initialize the first
2549 * chunk and prime the dynamic percpu allocator.
2550 *
2551 * @ai->static_size is the size of static percpu area.
2552 *
2553 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
2554 * reserve after the static area in the first chunk. This reserves
2555 * the first chunk such that it's available only through reserved
2556 * percpu allocation. This is primarily used to serve module percpu
2557 * static areas on architectures where the addressing model has
2558 * limited offset range for symbol relocations to guarantee module
2559 * percpu symbols fall inside the relocatable range.
2560 *
2561 * @ai->dyn_size determines the number of bytes available for dynamic
2562 * allocation in the first chunk. The area between @ai->static_size +
2563 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
2564 *
2565 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2566 * and equal to or larger than @ai->static_size + @ai->reserved_size +
2567 * @ai->dyn_size.
2568 *
2569 * @ai->atom_size is the allocation atom size and used as alignment
2570 * for vm areas.
2571 *
2572 * @ai->alloc_size is the allocation size and always multiple of
2573 * @ai->atom_size. This is larger than @ai->atom_size if
2574 * @ai->unit_size is larger than @ai->atom_size.
2575 *
2576 * @ai->nr_groups and @ai->groups describe virtual memory layout of
2577 * percpu areas. Units which should be colocated are put into the
2578 * same group. Dynamic VM areas will be allocated according to these
2579 * groupings. If @ai->nr_groups is zero, a single group containing
2580 * all units is assumed.
2581 *
2582 * The caller should have mapped the first chunk at @base_addr and
2583 * copied static data to each unit.
2584 *
2585 * The first chunk will always contain a static and a dynamic region.
2586 * However, the static region is not managed by any chunk. If the first
2587 * chunk also contains a reserved region, it is served by two chunks -
2588 * one for the reserved region and one for the dynamic region. They
2589 * share the same vm, but use offset regions in the area allocation map.
2590 * The chunk serving the dynamic region is circulated in the chunk slots
2591 * and available for dynamic allocation like any other chunk.
2592 */
2593void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2594 void *base_addr)
2595{
2596 size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2597 size_t static_size, dyn_size;
2598 struct pcpu_chunk *chunk;
2599 unsigned long *group_offsets;
2600 size_t *group_sizes;
2601 unsigned long *unit_off;
2602 unsigned int cpu;
2603 int *unit_map;
2604 int group, unit, i;
2605 int map_size;
2606 unsigned long tmp_addr;
2607 size_t alloc_size;
2608
2609#define PCPU_SETUP_BUG_ON(cond) do { \
2610 if (unlikely(cond)) { \
2611 pr_emerg("failed to initialize, %s\n", #cond); \
2612 pr_emerg("cpu_possible_mask=%*pb\n", \
2613 cpumask_pr_args(cpu_possible_mask)); \
2614 pcpu_dump_alloc_info(KERN_EMERG, ai); \
2615 BUG(); \
2616 } \
2617} while (0)
2618
2619 /* sanity checks */
2620 PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
2621#ifdef CONFIG_SMP
2622 PCPU_SETUP_BUG_ON(!ai->static_size);
2623 PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
2624#endif
2625 PCPU_SETUP_BUG_ON(!base_addr);
2626 PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
2627 PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
2628 PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
2629 PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
2630 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
2631 PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
2632 PCPU_SETUP_BUG_ON(!ai->dyn_size);
2633 PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
2634 PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
2635 IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
2636 PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
2637
2638 /* process group information and build config tables accordingly */
2639 alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
2640 group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2641 if (!group_offsets)
2642 panic("%s: Failed to allocate %zu bytes\n", __func__,
2643 alloc_size);
2644
2645 alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
2646 group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2647 if (!group_sizes)
2648 panic("%s: Failed to allocate %zu bytes\n", __func__,
2649 alloc_size);
2650
2651 alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
2652 unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2653 if (!unit_map)
2654 panic("%s: Failed to allocate %zu bytes\n", __func__,
2655 alloc_size);
2656
2657 alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
2658 unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2659 if (!unit_off)
2660 panic("%s: Failed to allocate %zu bytes\n", __func__,
2661 alloc_size);
2662
2663 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
2664 unit_map[cpu] = UINT_MAX;
2665
2666 pcpu_low_unit_cpu = NR_CPUS;
2667 pcpu_high_unit_cpu = NR_CPUS;
2668
2669 for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2670 const struct pcpu_group_info *gi = &ai->groups[group];
2671
2672 group_offsets[group] = gi->base_offset;
2673 group_sizes[group] = gi->nr_units * ai->unit_size;
2674
2675 for (i = 0; i < gi->nr_units; i++) {
2676 cpu = gi->cpu_map[i];
2677 if (cpu == NR_CPUS)
2678 continue;
2679
2680 PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
2681 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
2682 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
2683
2684 unit_map[cpu] = unit + i;
2685 unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2686
2687 /* determine low/high unit_cpu */
2688 if (pcpu_low_unit_cpu == NR_CPUS ||
2689 unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
2690 pcpu_low_unit_cpu = cpu;
2691 if (pcpu_high_unit_cpu == NR_CPUS ||
2692 unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
2693 pcpu_high_unit_cpu = cpu;
2694 }
2695 }
2696 pcpu_nr_units = unit;
2697
2698 for_each_possible_cpu(cpu)
2699 PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
2700
2701 /* we're done parsing the input, undefine BUG macro and dump config */
2702#undef PCPU_SETUP_BUG_ON
2703 pcpu_dump_alloc_info(KERN_DEBUG, ai);
2704
2705 pcpu_nr_groups = ai->nr_groups;
2706 pcpu_group_offsets = group_offsets;
2707 pcpu_group_sizes = group_sizes;
2708 pcpu_unit_map = unit_map;
2709 pcpu_unit_offsets = unit_off;
2710
2711 /* determine basic parameters */
2712 pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
2713 pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
2714 pcpu_atom_size = ai->atom_size;
2715 pcpu_chunk_struct_size = struct_size(chunk, populated,
2716 BITS_TO_LONGS(pcpu_unit_pages));
2717
2718 pcpu_stats_save_ai(ai);
2719
2720 /*
2721 * Allocate chunk slots. The slots after the active slots are:
2722 * sidelined_slot - isolated, depopulated chunks
2723 * free_slot - fully free chunks
2724 * to_depopulate_slot - isolated, chunks to depopulate
2725 */
2726 pcpu_sidelined_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1;
2727 pcpu_free_slot = pcpu_sidelined_slot + 1;
2728 pcpu_to_depopulate_slot = pcpu_free_slot + 1;
2729 pcpu_nr_slots = pcpu_to_depopulate_slot + 1;
2730 pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots *
2731 sizeof(pcpu_chunk_lists[0]),
2732 SMP_CACHE_BYTES);
2733 if (!pcpu_chunk_lists)
2734 panic("%s: Failed to allocate %zu bytes\n", __func__,
2735 pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]));
2736
2737 for (i = 0; i < pcpu_nr_slots; i++)
2738 INIT_LIST_HEAD(&pcpu_chunk_lists[i]);
2739
2740 /*
2741 * The end of the static region needs to be aligned with the
2742 * minimum allocation size as this offsets the reserved and
2743 * dynamic region. The first chunk ends page aligned by
2744 * expanding the dynamic region, therefore the dynamic region
2745 * can be shrunk to compensate while still staying above the
2746 * configured sizes.
2747 */
2748 static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2749 dyn_size = ai->dyn_size - (static_size - ai->static_size);
2750
2751 /*
2752 * Initialize first chunk.
2753 * If the reserved_size is non-zero, this initializes the reserved
2754 * chunk. If the reserved_size is zero, the reserved chunk is NULL
2755 * and the dynamic region is initialized here. The first chunk,
2756 * pcpu_first_chunk, will always point to the chunk that serves
2757 * the dynamic region.
2758 */
2759 tmp_addr = (unsigned long)base_addr + static_size;
2760 map_size = ai->reserved_size ?: dyn_size;
2761 chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2762
2763 /* init dynamic chunk if necessary */
2764 if (ai->reserved_size) {
2765 pcpu_reserved_chunk = chunk;
2766
2767 tmp_addr = (unsigned long)base_addr + static_size +
2768 ai->reserved_size;
2769 map_size = dyn_size;
2770 chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2771 }
2772
2773 /* link the first chunk in */
2774 pcpu_first_chunk = chunk;
2775 pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
2776 pcpu_chunk_relocate(pcpu_first_chunk, -1);
2777
2778 /* include all regions of the first chunk */
2779 pcpu_nr_populated += PFN_DOWN(size_sum);
2780
2781 pcpu_stats_chunk_alloc();
2782 trace_percpu_create_chunk(base_addr);
2783
2784 /* we're done */
2785 pcpu_base_addr = base_addr;
2786}
2787
2788#ifdef CONFIG_SMP
2789
2790const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
2791 [PCPU_FC_AUTO] = "auto",
2792 [PCPU_FC_EMBED] = "embed",
2793 [PCPU_FC_PAGE] = "page",
2794};
2795
2796enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
2797
2798static int __init percpu_alloc_setup(char *str)
2799{
2800 if (!str)
2801 return -EINVAL;
2802
2803 if (0)
2804 /* nada */;
2805#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2806 else if (!strcmp(str, "embed"))
2807 pcpu_chosen_fc = PCPU_FC_EMBED;
2808#endif
2809#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2810 else if (!strcmp(str, "page"))
2811 pcpu_chosen_fc = PCPU_FC_PAGE;
2812#endif
2813 else
2814 pr_warn("unknown allocator %s specified\n", str);
2815
2816 return 0;
2817}
2818early_param("percpu_alloc", percpu_alloc_setup);
2819
2820/*
2821 * pcpu_embed_first_chunk() is used by the generic percpu setup.
2822 * Build it if needed by the arch config or the generic setup is going
2823 * to be used.
2824 */
2825#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
2826 !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
2827#define BUILD_EMBED_FIRST_CHUNK
2828#endif
2829
2830/* build pcpu_page_first_chunk() iff needed by the arch config */
2831#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
2832#define BUILD_PAGE_FIRST_CHUNK
2833#endif
2834
2835/* pcpu_build_alloc_info() is used by both embed and page first chunk */
2836#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
2837/**
2838 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2839 * @reserved_size: the size of reserved percpu area in bytes
2840 * @dyn_size: minimum free size for dynamic allocation in bytes
2841 * @atom_size: allocation atom size
2842 * @cpu_distance_fn: callback to determine distance between cpus, optional
2843 *
2844 * This function determines grouping of units, their mappings to cpus
2845 * and other parameters considering needed percpu size, allocation
2846 * atom size and distances between CPUs.
2847 *
2848 * Groups are always multiples of atom size and CPUs which are of
2849 * LOCAL_DISTANCE both ways are grouped together and share space for
2850 * units in the same group. The returned configuration is guaranteed
2851 * to have CPUs on different nodes on different groups and >=75% usage
2852 * of allocated virtual address space.
2853 *
2854 * RETURNS:
2855 * On success, pointer to the new allocation_info is returned. On
2856 * failure, ERR_PTR value is returned.
2857 */
2858static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info(
2859 size_t reserved_size, size_t dyn_size,
2860 size_t atom_size,
2861 pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
2862{
2863 static int group_map[NR_CPUS] __initdata;
2864 static int group_cnt[NR_CPUS] __initdata;
2865 static struct cpumask mask __initdata;
2866 const size_t static_size = __per_cpu_end - __per_cpu_start;
2867 int nr_groups = 1, nr_units = 0;
2868 size_t size_sum, min_unit_size, alloc_size;
2869 int upa, max_upa, best_upa; /* units_per_alloc */
2870 int last_allocs, group, unit;
2871 unsigned int cpu, tcpu;
2872 struct pcpu_alloc_info *ai;
2873 unsigned int *cpu_map;
2874
2875 /* this function may be called multiple times */
2876 memset(group_map, 0, sizeof(group_map));
2877 memset(group_cnt, 0, sizeof(group_cnt));
2878 cpumask_clear(&mask);
2879
2880 /* calculate size_sum and ensure dyn_size is enough for early alloc */
2881 size_sum = PFN_ALIGN(static_size + reserved_size +
2882 max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
2883 dyn_size = size_sum - static_size - reserved_size;
2884
2885 /*
2886 * Determine min_unit_size, alloc_size and max_upa such that
2887 * alloc_size is multiple of atom_size and is the smallest
2888 * which can accommodate 4k aligned segments which are equal to
2889 * or larger than min_unit_size.
2890 */
2891 min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
2892
2893 /* determine the maximum # of units that can fit in an allocation */
2894 alloc_size = roundup(min_unit_size, atom_size);
2895 upa = alloc_size / min_unit_size;
2896 while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2897 upa--;
2898 max_upa = upa;
2899
2900 cpumask_copy(&mask, cpu_possible_mask);
2901
2902 /* group cpus according to their proximity */
2903 for (group = 0; !cpumask_empty(&mask); group++) {
2904 /* pop the group's first cpu */
2905 cpu = cpumask_first(&mask);
2906 group_map[cpu] = group;
2907 group_cnt[group]++;
2908 cpumask_clear_cpu(cpu, &mask);
2909
2910 for_each_cpu(tcpu, &mask) {
2911 if (!cpu_distance_fn ||
2912 (cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE &&
2913 cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) {
2914 group_map[tcpu] = group;
2915 group_cnt[group]++;
2916 cpumask_clear_cpu(tcpu, &mask);
2917 }
2918 }
2919 }
2920 nr_groups = group;
2921
2922 /*
2923 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
2924 * Expand the unit_size until we use >= 75% of the units allocated.
2925 * Related to atom_size, which could be much larger than the unit_size.
2926 */
2927 last_allocs = INT_MAX;
2928 best_upa = 0;
2929 for (upa = max_upa; upa; upa--) {
2930 int allocs = 0, wasted = 0;
2931
2932 if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2933 continue;
2934
2935 for (group = 0; group < nr_groups; group++) {
2936 int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
2937 allocs += this_allocs;
2938 wasted += this_allocs * upa - group_cnt[group];
2939 }
2940
2941 /*
2942 * Don't accept if wastage is over 1/3. The
2943 * greater-than comparison ensures upa==1 always
2944 * passes the following check.
2945 */
2946 if (wasted > num_possible_cpus() / 3)
2947 continue;
2948
2949 /* and then don't consume more memory */
2950 if (allocs > last_allocs)
2951 break;
2952 last_allocs = allocs;
2953 best_upa = upa;
2954 }
2955 BUG_ON(!best_upa);
2956 upa = best_upa;
2957
2958 /* allocate and fill alloc_info */
2959 for (group = 0; group < nr_groups; group++)
2960 nr_units += roundup(group_cnt[group], upa);
2961
2962 ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2963 if (!ai)
2964 return ERR_PTR(-ENOMEM);
2965 cpu_map = ai->groups[0].cpu_map;
2966
2967 for (group = 0; group < nr_groups; group++) {
2968 ai->groups[group].cpu_map = cpu_map;
2969 cpu_map += roundup(group_cnt[group], upa);
2970 }
2971
2972 ai->static_size = static_size;
2973 ai->reserved_size = reserved_size;
2974 ai->dyn_size = dyn_size;
2975 ai->unit_size = alloc_size / upa;
2976 ai->atom_size = atom_size;
2977 ai->alloc_size = alloc_size;
2978
2979 for (group = 0, unit = 0; group < nr_groups; group++) {
2980 struct pcpu_group_info *gi = &ai->groups[group];
2981
2982 /*
2983 * Initialize base_offset as if all groups are located
2984 * back-to-back. The caller should update this to
2985 * reflect actual allocation.
2986 */
2987 gi->base_offset = unit * ai->unit_size;
2988
2989 for_each_possible_cpu(cpu)
2990 if (group_map[cpu] == group)
2991 gi->cpu_map[gi->nr_units++] = cpu;
2992 gi->nr_units = roundup(gi->nr_units, upa);
2993 unit += gi->nr_units;
2994 }
2995 BUG_ON(unit != nr_units);
2996
2997 return ai;
2998}
2999#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
3000
3001#if defined(BUILD_EMBED_FIRST_CHUNK)
3002/**
3003 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
3004 * @reserved_size: the size of reserved percpu area in bytes
3005 * @dyn_size: minimum free size for dynamic allocation in bytes
3006 * @atom_size: allocation atom size
3007 * @cpu_distance_fn: callback to determine distance between cpus, optional
3008 * @alloc_fn: function to allocate percpu page
3009 * @free_fn: function to free percpu page
3010 *
3011 * This is a helper to ease setting up embedded first percpu chunk and
3012 * can be called where pcpu_setup_first_chunk() is expected.
3013 *
3014 * If this function is used to setup the first chunk, it is allocated
3015 * by calling @alloc_fn and used as-is without being mapped into
3016 * vmalloc area. Allocations are always whole multiples of @atom_size
3017 * aligned to @atom_size.
3018 *
3019 * This enables the first chunk to piggy back on the linear physical
3020 * mapping which often uses larger page size. Please note that this
3021 * can result in very sparse cpu->unit mapping on NUMA machines thus
3022 * requiring large vmalloc address space. Don't use this allocator if
3023 * vmalloc space is not orders of magnitude larger than distances
3024 * between node memory addresses (ie. 32bit NUMA machines).
3025 *
3026 * @dyn_size specifies the minimum dynamic area size.
3027 *
3028 * If the needed size is smaller than the minimum or specified unit
3029 * size, the leftover is returned using @free_fn.
3030 *
3031 * RETURNS:
3032 * 0 on success, -errno on failure.
3033 */
3034int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
3035 size_t atom_size,
3036 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
3037 pcpu_fc_alloc_fn_t alloc_fn,
3038 pcpu_fc_free_fn_t free_fn)
3039{
3040 void *base = (void *)ULONG_MAX;
3041 void **areas = NULL;
3042 struct pcpu_alloc_info *ai;
3043 size_t size_sum, areas_size;
3044 unsigned long max_distance;
3045 int group, i, highest_group, rc = 0;
3046
3047 ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
3048 cpu_distance_fn);
3049 if (IS_ERR(ai))
3050 return PTR_ERR(ai);
3051
3052 size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
3053 areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
3054
3055 areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
3056 if (!areas) {
3057 rc = -ENOMEM;
3058 goto out_free;
3059 }
3060
3061 /* allocate, copy and determine base address & max_distance */
3062 highest_group = 0;
3063 for (group = 0; group < ai->nr_groups; group++) {
3064 struct pcpu_group_info *gi = &ai->groups[group];
3065 unsigned int cpu = NR_CPUS;
3066 void *ptr;
3067
3068 for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
3069 cpu = gi->cpu_map[i];
3070 BUG_ON(cpu == NR_CPUS);
3071
3072 /* allocate space for the whole group */
3073 ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
3074 if (!ptr) {
3075 rc = -ENOMEM;
3076 goto out_free_areas;
3077 }
3078 /* kmemleak tracks the percpu allocations separately */
3079 kmemleak_free(ptr);
3080 areas[group] = ptr;
3081
3082 base = min(ptr, base);
3083 if (ptr > areas[highest_group])
3084 highest_group = group;
3085 }
3086 max_distance = areas[highest_group] - base;
3087 max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
3088
3089 /* warn if maximum distance is further than 75% of vmalloc space */
3090 if (max_distance > VMALLOC_TOTAL * 3 / 4) {
3091 pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
3092 max_distance, VMALLOC_TOTAL);
3093#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
3094 /* and fail if we have fallback */
3095 rc = -EINVAL;
3096 goto out_free_areas;
3097#endif
3098 }
3099
3100 /*
3101 * Copy data and free unused parts. This should happen after all
3102 * allocations are complete; otherwise, we may end up with
3103 * overlapping groups.
3104 */
3105 for (group = 0; group < ai->nr_groups; group++) {
3106 struct pcpu_group_info *gi = &ai->groups[group];
3107 void *ptr = areas[group];
3108
3109 for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
3110 if (gi->cpu_map[i] == NR_CPUS) {
3111 /* unused unit, free whole */
3112 free_fn(ptr, ai->unit_size);
3113 continue;
3114 }
3115 /* copy and return the unused part */
3116 memcpy(ptr, __per_cpu_load, ai->static_size);
3117 free_fn(ptr + size_sum, ai->unit_size - size_sum);
3118 }
3119 }
3120
3121 /* base address is now known, determine group base offsets */
3122 for (group = 0; group < ai->nr_groups; group++) {
3123 ai->groups[group].base_offset = areas[group] - base;
3124 }
3125
3126 pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
3127 PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
3128 ai->dyn_size, ai->unit_size);
3129
3130 pcpu_setup_first_chunk(ai, base);
3131 goto out_free;
3132
3133out_free_areas:
3134 for (group = 0; group < ai->nr_groups; group++)
3135 if (areas[group])
3136 free_fn(areas[group],
3137 ai->groups[group].nr_units * ai->unit_size);
3138out_free:
3139 pcpu_free_alloc_info(ai);
3140 if (areas)
3141 memblock_free_early(__pa(areas), areas_size);
3142 return rc;
3143}
3144#endif /* BUILD_EMBED_FIRST_CHUNK */
3145
3146#ifdef BUILD_PAGE_FIRST_CHUNK
3147/**
3148 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
3149 * @reserved_size: the size of reserved percpu area in bytes
3150 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
3151 * @free_fn: function to free percpu page, always called with PAGE_SIZE
3152 * @populate_pte_fn: function to populate pte
3153 *
3154 * This is a helper to ease setting up page-remapped first percpu
3155 * chunk and can be called where pcpu_setup_first_chunk() is expected.
3156 *
3157 * This is the basic allocator. Static percpu area is allocated
3158 * page-by-page into vmalloc area.
3159 *
3160 * RETURNS:
3161 * 0 on success, -errno on failure.
3162 */
3163int __init pcpu_page_first_chunk(size_t reserved_size,
3164 pcpu_fc_alloc_fn_t alloc_fn,
3165 pcpu_fc_free_fn_t free_fn,
3166 pcpu_fc_populate_pte_fn_t populate_pte_fn)
3167{
3168 static struct vm_struct vm;
3169 struct pcpu_alloc_info *ai;
3170 char psize_str[16];
3171 int unit_pages;
3172 size_t pages_size;
3173 struct page **pages;
3174 int unit, i, j, rc = 0;
3175 int upa;
3176 int nr_g0_units;
3177
3178 snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
3179
3180 ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
3181 if (IS_ERR(ai))
3182 return PTR_ERR(ai);
3183 BUG_ON(ai->nr_groups != 1);
3184 upa = ai->alloc_size/ai->unit_size;
3185 nr_g0_units = roundup(num_possible_cpus(), upa);
3186 if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
3187 pcpu_free_alloc_info(ai);
3188 return -EINVAL;
3189 }
3190
3191 unit_pages = ai->unit_size >> PAGE_SHIFT;
3192
3193 /* unaligned allocations can't be freed, round up to page size */
3194 pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
3195 sizeof(pages[0]));
3196 pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
3197 if (!pages)
3198 panic("%s: Failed to allocate %zu bytes\n", __func__,
3199 pages_size);
3200
3201 /* allocate pages */
3202 j = 0;
3203 for (unit = 0; unit < num_possible_cpus(); unit++) {
3204 unsigned int cpu = ai->groups[0].cpu_map[unit];
3205 for (i = 0; i < unit_pages; i++) {
3206 void *ptr;
3207
3208 ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
3209 if (!ptr) {
3210 pr_warn("failed to allocate %s page for cpu%u\n",
3211 psize_str, cpu);
3212 goto enomem;
3213 }
3214 /* kmemleak tracks the percpu allocations separately */
3215 kmemleak_free(ptr);
3216 pages[j++] = virt_to_page(ptr);
3217 }
3218 }
3219
3220 /* allocate vm area, map the pages and copy static data */
3221 vm.flags = VM_ALLOC;
3222 vm.size = num_possible_cpus() * ai->unit_size;
3223 vm_area_register_early(&vm, PAGE_SIZE);
3224
3225 for (unit = 0; unit < num_possible_cpus(); unit++) {
3226 unsigned long unit_addr =
3227 (unsigned long)vm.addr + unit * ai->unit_size;
3228
3229 for (i = 0; i < unit_pages; i++)
3230 populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
3231
3232 /* pte already populated, the following shouldn't fail */
3233 rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
3234 unit_pages);
3235 if (rc < 0)
3236 panic("failed to map percpu area, err=%d\n", rc);
3237
3238 /*
3239 * FIXME: Archs with virtual cache should flush local
3240 * cache for the linear mapping here - something
3241 * equivalent to flush_cache_vmap() on the local cpu.
3242 * flush_cache_vmap() can't be used as most supporting
3243 * data structures are not set up yet.
3244 */
3245
3246 /* copy static data */
3247 memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
3248 }
3249
3250 /* we're ready, commit */
3251 pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
3252 unit_pages, psize_str, ai->static_size,
3253 ai->reserved_size, ai->dyn_size);
3254
3255 pcpu_setup_first_chunk(ai, vm.addr);
3256 goto out_free_ar;
3257
3258enomem:
3259 while (--j >= 0)
3260 free_fn(page_address(pages[j]), PAGE_SIZE);
3261 rc = -ENOMEM;
3262out_free_ar:
3263 memblock_free_early(__pa(pages), pages_size);
3264 pcpu_free_alloc_info(ai);
3265 return rc;
3266}
3267#endif /* BUILD_PAGE_FIRST_CHUNK */
3268
3269#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
3270/*
3271 * Generic SMP percpu area setup.
3272 *
3273 * The embedding helper is used because its behavior closely resembles
3274 * the original non-dynamic generic percpu area setup. This is
3275 * important because many archs have addressing restrictions and might
3276 * fail if the percpu area is located far away from the previous
3277 * location. As an added bonus, in non-NUMA cases, embedding is
3278 * generally a good idea TLB-wise because percpu area can piggy back
3279 * on the physical linear memory mapping which uses large page
3280 * mappings on applicable archs.
3281 */
3282unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
3283EXPORT_SYMBOL(__per_cpu_offset);
3284
3285static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
3286 size_t align)
3287{
3288 return memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS));
3289}
3290
3291static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
3292{
3293 memblock_free_early(__pa(ptr), size);
3294}
3295
3296void __init setup_per_cpu_areas(void)
3297{
3298 unsigned long delta;
3299 unsigned int cpu;
3300 int rc;
3301
3302 /*
3303 * Always reserve area for module percpu variables. That's
3304 * what the legacy allocator did.
3305 */
3306 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
3307 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
3308 pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
3309 if (rc < 0)
3310 panic("Failed to initialize percpu areas.");
3311
3312 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
3313 for_each_possible_cpu(cpu)
3314 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
3315}
3316#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
3317
3318#else /* CONFIG_SMP */
3319
3320/*
3321 * UP percpu area setup.
3322 *
3323 * UP always uses km-based percpu allocator with identity mapping.
3324 * Static percpu variables are indistinguishable from the usual static
3325 * variables and don't require any special preparation.
3326 */
3327void __init setup_per_cpu_areas(void)
3328{
3329 const size_t unit_size =
3330 roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
3331 PERCPU_DYNAMIC_RESERVE));
3332 struct pcpu_alloc_info *ai;
3333 void *fc;
3334
3335 ai = pcpu_alloc_alloc_info(1, 1);
3336 fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
3337 if (!ai || !fc)
3338 panic("Failed to allocate memory for percpu areas.");
3339 /* kmemleak tracks the percpu allocations separately */
3340 kmemleak_free(fc);
3341
3342 ai->dyn_size = unit_size;
3343 ai->unit_size = unit_size;
3344 ai->atom_size = unit_size;
3345 ai->alloc_size = unit_size;
3346 ai->groups[0].nr_units = 1;
3347 ai->groups[0].cpu_map[0] = 0;
3348
3349 pcpu_setup_first_chunk(ai, fc);
3350 pcpu_free_alloc_info(ai);
3351}
3352
3353#endif /* CONFIG_SMP */
3354
3355/*
3356 * pcpu_nr_pages - calculate total number of populated backing pages
3357 *
3358 * This reflects the number of pages populated to back chunks. Metadata is
3359 * excluded in the number exposed in meminfo as the number of backing pages
3360 * scales with the number of cpus and can quickly outweigh the memory used for
3361 * metadata. It also keeps this calculation nice and simple.
3362 *
3363 * RETURNS:
3364 * Total number of populated backing pages in use by the allocator.
3365 */
3366unsigned long pcpu_nr_pages(void)
3367{
3368 return pcpu_nr_populated * pcpu_nr_units;
3369}
3370
3371/*
3372 * Percpu allocator is initialized early during boot when neither slab or
3373 * workqueue is available. Plug async management until everything is up
3374 * and running.
3375 */
3376static int __init percpu_enable_async(void)
3377{
3378 pcpu_async_enabled = true;
3379 return 0;
3380}
3381subsys_initcall(percpu_enable_async);