Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * mm/percpu.c - percpu memory allocator
   4 *
   5 * Copyright (C) 2009		SUSE Linux Products GmbH
   6 * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
   7 *
   8 * Copyright (C) 2017		Facebook Inc.
   9 * Copyright (C) 2017		Dennis Zhou <dennisszhou@gmail.com>
  10 *
  11 * The percpu allocator handles both static and dynamic areas.  Percpu
  12 * areas are allocated in chunks which are divided into units.  There is
  13 * a 1-to-1 mapping for units to possible cpus.  These units are grouped
  14 * based on NUMA properties of the machine.
 
 
 
 
  15 *
  16 *  c0                           c1                         c2
  17 *  -------------------          -------------------        ------------
  18 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
  19 *  -------------------  ......  -------------------  ....  ------------
  20 *
  21 * Allocation is done by offsets into a unit's address space.  Ie., an
  22 * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
  23 * c1:u1, c1:u2, etc.  On NUMA machines, the mapping may be non-linear
  24 * and even sparse.  Access is handled by configuring percpu base
  25 * registers according to the cpu to unit mappings and offsetting the
  26 * base address using pcpu_unit_size.
  27 *
  28 * There is special consideration for the first chunk which must handle
  29 * the static percpu variables in the kernel image as allocation services
  30 * are not online yet.  In short, the first chunk is structured like so:
  31 *
  32 *                  <Static | [Reserved] | Dynamic>
  33 *
  34 * The static data is copied from the original section managed by the
  35 * linker.  The reserved section, if non-zero, primarily manages static
  36 * percpu variables from kernel modules.  Finally, the dynamic section
  37 * takes care of normal allocations.
  38 *
  39 * The allocator organizes chunks into lists according to free size and
  40 * tries to allocate from the fullest chunk first.  Each chunk is managed
  41 * by a bitmap with metadata blocks.  The allocation map is updated on
  42 * every allocation and free to reflect the current state while the boundary
  43 * map is only updated on allocation.  Each metadata block contains
  44 * information to help mitigate the need to iterate over large portions
  45 * of the bitmap.  The reverse mapping from page to chunk is stored in
  46 * the page's index.  Lastly, units are lazily backed and grow in unison.
  47 *
  48 * There is a unique conversion that goes on here between bytes and bits.
  49 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE.  The chunk
  50 * tracks the number of pages it is responsible for in nr_pages.  Helper
  51 * functions are used to convert from between the bytes, bits, and blocks.
  52 * All hints are managed in bits unless explicitly stated.
  53 *
  54 * To use this allocator, arch code should do the following:
  55 *
  56 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
  57 *   regular address to percpu pointer and back if they need to be
  58 *   different from the default
  59 *
  60 * - use pcpu_setup_first_chunk() during percpu area initialization to
  61 *   setup the first chunk containing the kernel static percpu area
  62 */
  63
  64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  65
  66#include <linux/bitmap.h>
  67#include <linux/memblock.h>
  68#include <linux/err.h>
  69#include <linux/lcm.h>
  70#include <linux/list.h>
  71#include <linux/log2.h>
  72#include <linux/mm.h>
  73#include <linux/module.h>
  74#include <linux/mutex.h>
  75#include <linux/percpu.h>
  76#include <linux/pfn.h>
  77#include <linux/slab.h>
  78#include <linux/spinlock.h>
  79#include <linux/vmalloc.h>
  80#include <linux/workqueue.h>
  81#include <linux/kmemleak.h>
  82#include <linux/sched.h>
  83
  84#include <asm/cacheflush.h>
  85#include <asm/sections.h>
  86#include <asm/tlbflush.h>
  87#include <asm/io.h>
  88
  89#define CREATE_TRACE_POINTS
  90#include <trace/events/percpu.h>
  91
  92#include "percpu-internal.h"
  93
  94/* the slots are sorted by free bytes left, 1-31 bytes share the same slot */
  95#define PCPU_SLOT_BASE_SHIFT		5
  96/* chunks in slots below this are subject to being sidelined on failed alloc */
  97#define PCPU_SLOT_FAIL_THRESHOLD	3
  98
  99#define PCPU_EMPTY_POP_PAGES_LOW	2
 100#define PCPU_EMPTY_POP_PAGES_HIGH	4
 101
 102#ifdef CONFIG_SMP
 103/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
 104#ifndef __addr_to_pcpu_ptr
 105#define __addr_to_pcpu_ptr(addr)					\
 106	(void __percpu *)((unsigned long)(addr) -			\
 107			  (unsigned long)pcpu_base_addr	+		\
 108			  (unsigned long)__per_cpu_start)
 109#endif
 110#ifndef __pcpu_ptr_to_addr
 111#define __pcpu_ptr_to_addr(ptr)						\
 112	(void __force *)((unsigned long)(ptr) +				\
 113			 (unsigned long)pcpu_base_addr -		\
 114			 (unsigned long)__per_cpu_start)
 115#endif
 116#else	/* CONFIG_SMP */
 117/* on UP, it's always identity mapped */
 118#define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
 119#define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
 120#endif	/* CONFIG_SMP */
 121
 122static int pcpu_unit_pages __ro_after_init;
 123static int pcpu_unit_size __ro_after_init;
 124static int pcpu_nr_units __ro_after_init;
 125static int pcpu_atom_size __ro_after_init;
 126int pcpu_nr_slots __ro_after_init;
 127static size_t pcpu_chunk_struct_size __ro_after_init;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 128
 129/* cpus with the lowest and highest unit addresses */
 130static unsigned int pcpu_low_unit_cpu __ro_after_init;
 131static unsigned int pcpu_high_unit_cpu __ro_after_init;
 132
 133/* the address of the first chunk which starts with the kernel static area */
 134void *pcpu_base_addr __ro_after_init;
 135EXPORT_SYMBOL_GPL(pcpu_base_addr);
 136
 137static const int *pcpu_unit_map __ro_after_init;		/* cpu -> unit */
 138const unsigned long *pcpu_unit_offsets __ro_after_init;	/* cpu -> unit offset */
 139
 140/* group information, used for vm allocation */
 141static int pcpu_nr_groups __ro_after_init;
 142static const unsigned long *pcpu_group_offsets __ro_after_init;
 143static const size_t *pcpu_group_sizes __ro_after_init;
 144
 145/*
 146 * The first chunk which always exists.  Note that unlike other
 147 * chunks, this one can be allocated and mapped in several different
 148 * ways and thus often doesn't live in the vmalloc area.
 149 */
 150struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
 151
 152/*
 153 * Optional reserved chunk.  This chunk reserves part of the first
 154 * chunk and serves it for reserved allocations.  When the reserved
 155 * region doesn't exist, the following variable is NULL.
 
 
 156 */
 157struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
 158
 159DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
 160static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
 161
 162struct list_head *pcpu_slot __ro_after_init; /* chunk list slots */
 163
 164/* chunks which need their map areas extended, protected by pcpu_lock */
 165static LIST_HEAD(pcpu_map_extend_chunks);
 166
 167/*
 168 * The number of empty populated pages, protected by pcpu_lock.  The
 169 * reserved chunk doesn't contribute to the count.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 170 */
 171int pcpu_nr_empty_pop_pages;
 
 172
 173/*
 174 * The number of populated pages in use by the allocator, protected by
 175 * pcpu_lock.  This number is kept per a unit per chunk (i.e. when a page gets
 176 * allocated/deallocated, it is allocated/deallocated in all units of a chunk
 177 * and increments/decrements this count by 1).
 178 */
 179static unsigned long pcpu_nr_populated;
 180
 181/*
 182 * Balance work is used to populate or destroy chunks asynchronously.  We
 183 * try to keep the number of populated free pages between
 184 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
 185 * empty chunk.
 186 */
 187static void pcpu_balance_workfn(struct work_struct *work);
 188static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
 189static bool pcpu_async_enabled __read_mostly;
 190static bool pcpu_atomic_alloc_failed;
 191
 192static void pcpu_schedule_balance_work(void)
 193{
 194	if (pcpu_async_enabled)
 195		schedule_work(&pcpu_balance_work);
 
 196}
 197
 198/**
 199 * pcpu_addr_in_chunk - check if the address is served from this chunk
 200 * @chunk: chunk of interest
 201 * @addr: percpu address
 202 *
 203 * RETURNS:
 204 * True if the address is served from this chunk.
 205 */
 206static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
 207{
 208	void *start_addr, *end_addr;
 209
 210	if (!chunk)
 211		return false;
 212
 213	start_addr = chunk->base_addr + chunk->start_offset;
 214	end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
 215		   chunk->end_offset;
 216
 217	return addr >= start_addr && addr < end_addr;
 218}
 219
 220static int __pcpu_size_to_slot(int size)
 221{
 222	int highbit = fls(size);	/* size is in bytes */
 223	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
 224}
 225
 226static int pcpu_size_to_slot(int size)
 227{
 228	if (size == pcpu_unit_size)
 229		return pcpu_nr_slots - 1;
 230	return __pcpu_size_to_slot(size);
 231}
 232
 233static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
 234{
 235	const struct pcpu_block_md *chunk_md = &chunk->chunk_md;
 236
 237	if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE ||
 238	    chunk_md->contig_hint == 0)
 239		return 0;
 240
 241	return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);
 242}
 243
 244/* set the pointer to a chunk in a page struct */
 245static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
 246{
 247	page->index = (unsigned long)pcpu;
 248}
 249
 250/* obtain pointer to a chunk from a page struct */
 251static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
 252{
 253	return (struct pcpu_chunk *)page->index;
 254}
 255
 256static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
 257{
 258	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
 259}
 260
 261static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
 262{
 263	return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
 264}
 265
 266static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
 267				     unsigned int cpu, int page_idx)
 268{
 269	return (unsigned long)chunk->base_addr +
 270	       pcpu_unit_page_offset(cpu, page_idx);
 271}
 272
 273static void pcpu_next_unpop(unsigned long *bitmap, int *rs, int *re, int end)
 274{
 275	*rs = find_next_zero_bit(bitmap, end, *rs);
 276	*re = find_next_bit(bitmap, end, *rs + 1);
 277}
 278
 279static void pcpu_next_pop(unsigned long *bitmap, int *rs, int *re, int end)
 280{
 281	*rs = find_next_bit(bitmap, end, *rs);
 282	*re = find_next_zero_bit(bitmap, end, *rs + 1);
 283}
 284
 285/*
 286 * Bitmap region iterators.  Iterates over the bitmap between
 287 * [@start, @end) in @chunk.  @rs and @re should be integer variables
 288 * and will be set to start and end index of the current free region.
 289 */
 290#define pcpu_for_each_unpop_region(bitmap, rs, re, start, end)		     \
 291	for ((rs) = (start), pcpu_next_unpop((bitmap), &(rs), &(re), (end)); \
 292	     (rs) < (re);						     \
 293	     (rs) = (re) + 1, pcpu_next_unpop((bitmap), &(rs), &(re), (end)))
 294
 295#define pcpu_for_each_pop_region(bitmap, rs, re, start, end)		     \
 296	for ((rs) = (start), pcpu_next_pop((bitmap), &(rs), &(re), (end));   \
 297	     (rs) < (re);						     \
 298	     (rs) = (re) + 1, pcpu_next_pop((bitmap), &(rs), &(re), (end)))
 299
 300/*
 301 * The following are helper functions to help access bitmaps and convert
 302 * between bitmap offsets to address offsets.
 303 */
 304static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
 305{
 306	return chunk->alloc_map +
 307	       (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
 308}
 309
 310static unsigned long pcpu_off_to_block_index(int off)
 311{
 312	return off / PCPU_BITMAP_BLOCK_BITS;
 313}
 314
 315static unsigned long pcpu_off_to_block_off(int off)
 
 316{
 317	return off & (PCPU_BITMAP_BLOCK_BITS - 1);
 
 318}
 319
 320static unsigned long pcpu_block_off_to_off(int index, int off)
 
 321{
 322	return index * PCPU_BITMAP_BLOCK_BITS + off;
 323}
 324
 325/*
 326 * pcpu_next_hint - determine which hint to use
 327 * @block: block of interest
 328 * @alloc_bits: size of allocation
 329 *
 330 * This determines if we should scan based on the scan_hint or first_free.
 331 * In general, we want to scan from first_free to fulfill allocations by
 332 * first fit.  However, if we know a scan_hint at position scan_hint_start
 333 * cannot fulfill an allocation, we can begin scanning from there knowing
 334 * the contig_hint will be our fallback.
 335 */
 336static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
 337{
 338	/*
 339	 * The three conditions below determine if we can skip past the
 340	 * scan_hint.  First, does the scan hint exist.  Second, is the
 341	 * contig_hint after the scan_hint (possibly not true iff
 342	 * contig_hint == scan_hint).  Third, is the allocation request
 343	 * larger than the scan_hint.
 344	 */
 345	if (block->scan_hint &&
 346	    block->contig_hint_start > block->scan_hint_start &&
 347	    alloc_bits > block->scan_hint)
 348		return block->scan_hint_start + block->scan_hint;
 349
 350	return block->first_free;
 351}
 352
 353/**
 354 * pcpu_next_md_free_region - finds the next hint free area
 355 * @chunk: chunk of interest
 356 * @bit_off: chunk offset
 357 * @bits: size of free area
 358 *
 359 * Helper function for pcpu_for_each_md_free_region.  It checks
 360 * block->contig_hint and performs aggregation across blocks to find the
 361 * next hint.  It modifies bit_off and bits in-place to be consumed in the
 362 * loop.
 363 */
 364static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
 365				     int *bits)
 366{
 367	int i = pcpu_off_to_block_index(*bit_off);
 368	int block_off = pcpu_off_to_block_off(*bit_off);
 369	struct pcpu_block_md *block;
 370
 371	*bits = 0;
 372	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
 373	     block++, i++) {
 374		/* handles contig area across blocks */
 375		if (*bits) {
 376			*bits += block->left_free;
 377			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
 378				continue;
 379			return;
 380		}
 381
 382		/*
 383		 * This checks three things.  First is there a contig_hint to
 384		 * check.  Second, have we checked this hint before by
 385		 * comparing the block_off.  Third, is this the same as the
 386		 * right contig hint.  In the last case, it spills over into
 387		 * the next block and should be handled by the contig area
 388		 * across blocks code.
 389		 */
 390		*bits = block->contig_hint;
 391		if (*bits && block->contig_hint_start >= block_off &&
 392		    *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
 393			*bit_off = pcpu_block_off_to_off(i,
 394					block->contig_hint_start);
 395			return;
 396		}
 397		/* reset to satisfy the second predicate above */
 398		block_off = 0;
 399
 400		*bits = block->right_free;
 401		*bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
 402	}
 403}
 404
 405/**
 406 * pcpu_next_fit_region - finds fit areas for a given allocation request
 407 * @chunk: chunk of interest
 408 * @alloc_bits: size of allocation
 409 * @align: alignment of area (max PAGE_SIZE)
 410 * @bit_off: chunk offset
 411 * @bits: size of free area
 412 *
 413 * Finds the next free region that is viable for use with a given size and
 414 * alignment.  This only returns if there is a valid area to be used for this
 415 * allocation.  block->first_free is returned if the allocation request fits
 416 * within the block to see if the request can be fulfilled prior to the contig
 417 * hint.
 418 */
 419static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
 420				 int align, int *bit_off, int *bits)
 421{
 422	int i = pcpu_off_to_block_index(*bit_off);
 423	int block_off = pcpu_off_to_block_off(*bit_off);
 424	struct pcpu_block_md *block;
 425
 426	*bits = 0;
 427	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
 428	     block++, i++) {
 429		/* handles contig area across blocks */
 430		if (*bits) {
 431			*bits += block->left_free;
 432			if (*bits >= alloc_bits)
 433				return;
 434			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
 435				continue;
 436		}
 437
 438		/* check block->contig_hint */
 439		*bits = ALIGN(block->contig_hint_start, align) -
 440			block->contig_hint_start;
 441		/*
 442		 * This uses the block offset to determine if this has been
 443		 * checked in the prior iteration.
 444		 */
 445		if (block->contig_hint &&
 446		    block->contig_hint_start >= block_off &&
 447		    block->contig_hint >= *bits + alloc_bits) {
 448			int start = pcpu_next_hint(block, alloc_bits);
 449
 450			*bits += alloc_bits + block->contig_hint_start -
 451				 start;
 452			*bit_off = pcpu_block_off_to_off(i, start);
 453			return;
 454		}
 455		/* reset to satisfy the second predicate above */
 456		block_off = 0;
 457
 458		*bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
 459				 align);
 460		*bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
 461		*bit_off = pcpu_block_off_to_off(i, *bit_off);
 462		if (*bits >= alloc_bits)
 463			return;
 464	}
 465
 466	/* no valid offsets were found - fail condition */
 467	*bit_off = pcpu_chunk_map_bits(chunk);
 468}
 469
 470/*
 471 * Metadata free area iterators.  These perform aggregation of free areas
 472 * based on the metadata blocks and return the offset @bit_off and size in
 473 * bits of the free area @bits.  pcpu_for_each_fit_region only returns when
 474 * a fit is found for the allocation request.
 475 */
 476#define pcpu_for_each_md_free_region(chunk, bit_off, bits)		\
 477	for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits));	\
 478	     (bit_off) < pcpu_chunk_map_bits((chunk));			\
 479	     (bit_off) += (bits) + 1,					\
 480	     pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
 481
 482#define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits)     \
 483	for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
 484				  &(bits));				      \
 485	     (bit_off) < pcpu_chunk_map_bits((chunk));			      \
 486	     (bit_off) += (bits),					      \
 487	     pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
 488				  &(bits)))
 489
 490/**
 491 * pcpu_mem_zalloc - allocate memory
 492 * @size: bytes to allocate
 493 * @gfp: allocation flags
 494 *
 495 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
 496 * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
 497 * This is to facilitate passing through whitelisted flags.  The
 498 * returned memory is always zeroed.
 
 
 499 *
 500 * RETURNS:
 501 * Pointer to the allocated area on success, NULL on failure.
 502 */
 503static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
 504{
 505	if (WARN_ON_ONCE(!slab_is_available()))
 506		return NULL;
 507
 508	if (size <= PAGE_SIZE)
 509		return kzalloc(size, gfp);
 510	else
 511		return __vmalloc(size, gfp | __GFP_ZERO, PAGE_KERNEL);
 512}
 513
 514/**
 515 * pcpu_mem_free - free memory
 516 * @ptr: memory to free
 
 517 *
 518 * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
 519 */
 520static void pcpu_mem_free(void *ptr)
 521{
 522	kvfree(ptr);
 523}
 524
 525static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
 526			      bool move_front)
 527{
 528	if (chunk != pcpu_reserved_chunk) {
 529		if (move_front)
 530			list_move(&chunk->list, &pcpu_slot[slot]);
 531		else
 532			list_move_tail(&chunk->list, &pcpu_slot[slot]);
 533	}
 534}
 535
 536static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
 537{
 538	__pcpu_chunk_move(chunk, slot, true);
 539}
 540
 541/**
 542 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
 543 * @chunk: chunk of interest
 544 * @oslot: the previous slot it was on
 545 *
 546 * This function is called after an allocation or free changed @chunk.
 547 * New slot according to the changed state is determined and @chunk is
 548 * moved to the slot.  Note that the reserved chunk is never put on
 549 * chunk slots.
 550 *
 551 * CONTEXT:
 552 * pcpu_lock.
 553 */
 554static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
 555{
 556	int nslot = pcpu_chunk_slot(chunk);
 557
 558	if (oslot != nslot)
 559		__pcpu_chunk_move(chunk, nslot, oslot < nslot);
 
 
 
 
 560}
 561
 562/*
 563 * pcpu_update_empty_pages - update empty page counters
 564 * @chunk: chunk of interest
 565 * @nr: nr of empty pages
 566 *
 567 * This is used to keep track of the empty pages now based on the premise
 568 * a md_block covers a page.  The hint update functions recognize if a block
 569 * is made full or broken to calculate deltas for keeping track of free pages.
 570 */
 571static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
 572{
 573	chunk->nr_empty_pop_pages += nr;
 574	if (chunk != pcpu_reserved_chunk)
 575		pcpu_nr_empty_pop_pages += nr;
 576}
 577
 578/*
 579 * pcpu_region_overlap - determines if two regions overlap
 580 * @a: start of first region, inclusive
 581 * @b: end of first region, exclusive
 582 * @x: start of second region, inclusive
 583 * @y: end of second region, exclusive
 584 *
 585 * This is used to determine if the hint region [a, b) overlaps with the
 586 * allocated region [x, y).
 587 */
 588static inline bool pcpu_region_overlap(int a, int b, int x, int y)
 589{
 590	return (a < y) && (x < b);
 591}
 592
 593/**
 594 * pcpu_block_update - updates a block given a free area
 595 * @block: block of interest
 596 * @start: start offset in block
 597 * @end: end offset in block
 598 *
 599 * Updates a block given a known free area.  The region [start, end) is
 600 * expected to be the entirety of the free area within a block.  Chooses
 601 * the best starting offset if the contig hints are equal.
 602 */
 603static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
 604{
 605	int contig = end - start;
 606
 607	block->first_free = min(block->first_free, start);
 608	if (start == 0)
 609		block->left_free = contig;
 610
 611	if (end == block->nr_bits)
 612		block->right_free = contig;
 613
 614	if (contig > block->contig_hint) {
 615		/* promote the old contig_hint to be the new scan_hint */
 616		if (start > block->contig_hint_start) {
 617			if (block->contig_hint > block->scan_hint) {
 618				block->scan_hint_start =
 619					block->contig_hint_start;
 620				block->scan_hint = block->contig_hint;
 621			} else if (start < block->scan_hint_start) {
 622				/*
 623				 * The old contig_hint == scan_hint.  But, the
 624				 * new contig is larger so hold the invariant
 625				 * scan_hint_start < contig_hint_start.
 626				 */
 627				block->scan_hint = 0;
 628			}
 629		} else {
 630			block->scan_hint = 0;
 631		}
 632		block->contig_hint_start = start;
 633		block->contig_hint = contig;
 634	} else if (contig == block->contig_hint) {
 635		if (block->contig_hint_start &&
 636		    (!start ||
 637		     __ffs(start) > __ffs(block->contig_hint_start))) {
 638			/* start has a better alignment so use it */
 639			block->contig_hint_start = start;
 640			if (start < block->scan_hint_start &&
 641			    block->contig_hint > block->scan_hint)
 642				block->scan_hint = 0;
 643		} else if (start > block->scan_hint_start ||
 644			   block->contig_hint > block->scan_hint) {
 645			/*
 646			 * Knowing contig == contig_hint, update the scan_hint
 647			 * if it is farther than or larger than the current
 648			 * scan_hint.
 649			 */
 650			block->scan_hint_start = start;
 651			block->scan_hint = contig;
 652		}
 653	} else {
 654		/*
 655		 * The region is smaller than the contig_hint.  So only update
 656		 * the scan_hint if it is larger than or equal and farther than
 657		 * the current scan_hint.
 658		 */
 659		if ((start < block->contig_hint_start &&
 660		     (contig > block->scan_hint ||
 661		      (contig == block->scan_hint &&
 662		       start > block->scan_hint_start)))) {
 663			block->scan_hint_start = start;
 664			block->scan_hint = contig;
 665		}
 666	}
 667}
 668
 669/*
 670 * pcpu_block_update_scan - update a block given a free area from a scan
 671 * @chunk: chunk of interest
 672 * @bit_off: chunk offset
 673 * @bits: size of free area
 674 *
 675 * Finding the final allocation spot first goes through pcpu_find_block_fit()
 676 * to find a block that can hold the allocation and then pcpu_alloc_area()
 677 * where a scan is used.  When allocations require specific alignments,
 678 * we can inadvertently create holes which will not be seen in the alloc
 679 * or free paths.
 680 *
 681 * This takes a given free area hole and updates a block as it may change the
 682 * scan_hint.  We need to scan backwards to ensure we don't miss free bits
 683 * from alignment.
 684 */
 685static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off,
 686				   int bits)
 687{
 688	int s_off = pcpu_off_to_block_off(bit_off);
 689	int e_off = s_off + bits;
 690	int s_index, l_bit;
 691	struct pcpu_block_md *block;
 692
 693	if (e_off > PCPU_BITMAP_BLOCK_BITS)
 694		return;
 695
 696	s_index = pcpu_off_to_block_index(bit_off);
 697	block = chunk->md_blocks + s_index;
 698
 699	/* scan backwards in case of alignment skipping free bits */
 700	l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off);
 701	s_off = (s_off == l_bit) ? 0 : l_bit + 1;
 702
 703	pcpu_block_update(block, s_off, e_off);
 704}
 705
 706/**
 707 * pcpu_chunk_refresh_hint - updates metadata about a chunk
 708 * @chunk: chunk of interest
 709 * @full_scan: if we should scan from the beginning
 710 *
 711 * Iterates over the metadata blocks to find the largest contig area.
 712 * A full scan can be avoided on the allocation path as this is triggered
 713 * if we broke the contig_hint.  In doing so, the scan_hint will be before
 714 * the contig_hint or after if the scan_hint == contig_hint.  This cannot
 715 * be prevented on freeing as we want to find the largest area possibly
 716 * spanning blocks.
 717 */
 718static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
 719{
 720	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
 721	int bit_off, bits;
 722
 723	/* promote scan_hint to contig_hint */
 724	if (!full_scan && chunk_md->scan_hint) {
 725		bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint;
 726		chunk_md->contig_hint_start = chunk_md->scan_hint_start;
 727		chunk_md->contig_hint = chunk_md->scan_hint;
 728		chunk_md->scan_hint = 0;
 729	} else {
 730		bit_off = chunk_md->first_free;
 731		chunk_md->contig_hint = 0;
 732	}
 733
 734	bits = 0;
 735	pcpu_for_each_md_free_region(chunk, bit_off, bits) {
 736		pcpu_block_update(chunk_md, bit_off, bit_off + bits);
 737	}
 738}
 739
 740/**
 741 * pcpu_block_refresh_hint
 742 * @chunk: chunk of interest
 743 * @index: index of the metadata block
 744 *
 745 * Scans over the block beginning at first_free and updates the block
 746 * metadata accordingly.
 
 
 
 747 */
 748static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
 749{
 750	struct pcpu_block_md *block = chunk->md_blocks + index;
 751	unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
 752	int rs, re, start;	/* region start, region end */
 753
 754	/* promote scan_hint to contig_hint */
 755	if (block->scan_hint) {
 756		start = block->scan_hint_start + block->scan_hint;
 757		block->contig_hint_start = block->scan_hint_start;
 758		block->contig_hint = block->scan_hint;
 759		block->scan_hint = 0;
 760	} else {
 761		start = block->first_free;
 762		block->contig_hint = 0;
 763	}
 764
 765	block->right_free = 0;
 
 
 766
 767	/* iterate over free areas and update the contig hints */
 768	pcpu_for_each_unpop_region(alloc_map, rs, re, start,
 769				   PCPU_BITMAP_BLOCK_BITS) {
 770		pcpu_block_update(block, rs, re);
 771	}
 772}
 773
 774/**
 775 * pcpu_block_update_hint_alloc - update hint on allocation path
 776 * @chunk: chunk of interest
 777 * @bit_off: chunk offset
 778 * @bits: size of request
 779 *
 780 * Updates metadata for the allocation path.  The metadata only has to be
 781 * refreshed by a full scan iff the chunk's contig hint is broken.  Block level
 782 * scans are required if the block's contig hint is broken.
 783 */
 784static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
 785					 int bits)
 786{
 787	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
 788	int nr_empty_pages = 0;
 789	struct pcpu_block_md *s_block, *e_block, *block;
 790	int s_index, e_index;	/* block indexes of the freed allocation */
 791	int s_off, e_off;	/* block offsets of the freed allocation */
 792
 793	/*
 794	 * Calculate per block offsets.
 795	 * The calculation uses an inclusive range, but the resulting offsets
 796	 * are [start, end).  e_index always points to the last block in the
 797	 * range.
 798	 */
 799	s_index = pcpu_off_to_block_index(bit_off);
 800	e_index = pcpu_off_to_block_index(bit_off + bits - 1);
 801	s_off = pcpu_off_to_block_off(bit_off);
 802	e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
 803
 804	s_block = chunk->md_blocks + s_index;
 805	e_block = chunk->md_blocks + e_index;
 806
 807	/*
 808	 * Update s_block.
 809	 * block->first_free must be updated if the allocation takes its place.
 810	 * If the allocation breaks the contig_hint, a scan is required to
 811	 * restore this hint.
 812	 */
 813	if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
 814		nr_empty_pages++;
 815
 816	if (s_off == s_block->first_free)
 817		s_block->first_free = find_next_zero_bit(
 818					pcpu_index_alloc_map(chunk, s_index),
 819					PCPU_BITMAP_BLOCK_BITS,
 820					s_off + bits);
 821
 822	if (pcpu_region_overlap(s_block->scan_hint_start,
 823				s_block->scan_hint_start + s_block->scan_hint,
 824				s_off,
 825				s_off + bits))
 826		s_block->scan_hint = 0;
 827
 828	if (pcpu_region_overlap(s_block->contig_hint_start,
 829				s_block->contig_hint_start +
 830				s_block->contig_hint,
 831				s_off,
 832				s_off + bits)) {
 833		/* block contig hint is broken - scan to fix it */
 834		if (!s_off)
 835			s_block->left_free = 0;
 836		pcpu_block_refresh_hint(chunk, s_index);
 837	} else {
 838		/* update left and right contig manually */
 839		s_block->left_free = min(s_block->left_free, s_off);
 840		if (s_index == e_index)
 841			s_block->right_free = min_t(int, s_block->right_free,
 842					PCPU_BITMAP_BLOCK_BITS - e_off);
 843		else
 844			s_block->right_free = 0;
 845	}
 846
 847	/*
 848	 * Update e_block.
 
 849	 */
 850	if (s_index != e_index) {
 851		if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
 852			nr_empty_pages++;
 853
 854		/*
 855		 * When the allocation is across blocks, the end is along
 856		 * the left part of the e_block.
 857		 */
 858		e_block->first_free = find_next_zero_bit(
 859				pcpu_index_alloc_map(chunk, e_index),
 860				PCPU_BITMAP_BLOCK_BITS, e_off);
 861
 862		if (e_off == PCPU_BITMAP_BLOCK_BITS) {
 863			/* reset the block */
 864			e_block++;
 865		} else {
 866			if (e_off > e_block->scan_hint_start)
 867				e_block->scan_hint = 0;
 868
 869			e_block->left_free = 0;
 870			if (e_off > e_block->contig_hint_start) {
 871				/* contig hint is broken - scan to fix it */
 872				pcpu_block_refresh_hint(chunk, e_index);
 873			} else {
 874				e_block->right_free =
 875					min_t(int, e_block->right_free,
 876					      PCPU_BITMAP_BLOCK_BITS - e_off);
 877			}
 878		}
 879
 880		/* update in-between md_blocks */
 881		nr_empty_pages += (e_index - s_index - 1);
 882		for (block = s_block + 1; block < e_block; block++) {
 883			block->scan_hint = 0;
 884			block->contig_hint = 0;
 885			block->left_free = 0;
 886			block->right_free = 0;
 887		}
 888	}
 889
 890	if (nr_empty_pages)
 891		pcpu_update_empty_pages(chunk, -nr_empty_pages);
 892
 893	if (pcpu_region_overlap(chunk_md->scan_hint_start,
 894				chunk_md->scan_hint_start +
 895				chunk_md->scan_hint,
 896				bit_off,
 897				bit_off + bits))
 898		chunk_md->scan_hint = 0;
 899
 900	/*
 901	 * The only time a full chunk scan is required is if the chunk
 902	 * contig hint is broken.  Otherwise, it means a smaller space
 903	 * was used and therefore the chunk contig hint is still correct.
 904	 */
 905	if (pcpu_region_overlap(chunk_md->contig_hint_start,
 906				chunk_md->contig_hint_start +
 907				chunk_md->contig_hint,
 908				bit_off,
 909				bit_off + bits))
 910		pcpu_chunk_refresh_hint(chunk, false);
 911}
 912
 913/**
 914 * pcpu_block_update_hint_free - updates the block hints on the free path
 915 * @chunk: chunk of interest
 916 * @bit_off: chunk offset
 917 * @bits: size of request
 918 *
 919 * Updates metadata for the allocation path.  This avoids a blind block
 920 * refresh by making use of the block contig hints.  If this fails, it scans
 921 * forward and backward to determine the extent of the free area.  This is
 922 * capped at the boundary of blocks.
 923 *
 924 * A chunk update is triggered if a page becomes free, a block becomes free,
 925 * or the free spans across blocks.  This tradeoff is to minimize iterating
 926 * over the block metadata to update chunk_md->contig_hint.
 927 * chunk_md->contig_hint may be off by up to a page, but it will never be more
 928 * than the available space.  If the contig hint is contained in one block, it
 929 * will be accurate.
 930 */
 931static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
 932					int bits)
 933{
 934	int nr_empty_pages = 0;
 935	struct pcpu_block_md *s_block, *e_block, *block;
 936	int s_index, e_index;	/* block indexes of the freed allocation */
 937	int s_off, e_off;	/* block offsets of the freed allocation */
 938	int start, end;		/* start and end of the whole free area */
 939
 940	/*
 941	 * Calculate per block offsets.
 942	 * The calculation uses an inclusive range, but the resulting offsets
 943	 * are [start, end).  e_index always points to the last block in the
 944	 * range.
 945	 */
 946	s_index = pcpu_off_to_block_index(bit_off);
 947	e_index = pcpu_off_to_block_index(bit_off + bits - 1);
 948	s_off = pcpu_off_to_block_off(bit_off);
 949	e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
 950
 951	s_block = chunk->md_blocks + s_index;
 952	e_block = chunk->md_blocks + e_index;
 953
 954	/*
 955	 * Check if the freed area aligns with the block->contig_hint.
 956	 * If it does, then the scan to find the beginning/end of the
 957	 * larger free area can be avoided.
 958	 *
 959	 * start and end refer to beginning and end of the free area
 960	 * within each their respective blocks.  This is not necessarily
 961	 * the entire free area as it may span blocks past the beginning
 962	 * or end of the block.
 963	 */
 964	start = s_off;
 965	if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
 966		start = s_block->contig_hint_start;
 967	} else {
 968		/*
 969		 * Scan backwards to find the extent of the free area.
 970		 * find_last_bit returns the starting bit, so if the start bit
 971		 * is returned, that means there was no last bit and the
 972		 * remainder of the chunk is free.
 973		 */
 974		int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
 975					  start);
 976		start = (start == l_bit) ? 0 : l_bit + 1;
 977	}
 978
 979	end = e_off;
 980	if (e_off == e_block->contig_hint_start)
 981		end = e_block->contig_hint_start + e_block->contig_hint;
 982	else
 983		end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
 984				    PCPU_BITMAP_BLOCK_BITS, end);
 985
 986	/* update s_block */
 987	e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
 988	if (!start && e_off == PCPU_BITMAP_BLOCK_BITS)
 989		nr_empty_pages++;
 990	pcpu_block_update(s_block, start, e_off);
 991
 992	/* freeing in the same block */
 993	if (s_index != e_index) {
 994		/* update e_block */
 995		if (end == PCPU_BITMAP_BLOCK_BITS)
 996			nr_empty_pages++;
 997		pcpu_block_update(e_block, 0, end);
 998
 999		/* reset md_blocks in the middle */
1000		nr_empty_pages += (e_index - s_index - 1);
1001		for (block = s_block + 1; block < e_block; block++) {
1002			block->first_free = 0;
1003			block->scan_hint = 0;
1004			block->contig_hint_start = 0;
1005			block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
1006			block->left_free = PCPU_BITMAP_BLOCK_BITS;
1007			block->right_free = PCPU_BITMAP_BLOCK_BITS;
1008		}
1009	}
1010
1011	if (nr_empty_pages)
1012		pcpu_update_empty_pages(chunk, nr_empty_pages);
1013
1014	/*
1015	 * Refresh chunk metadata when the free makes a block free or spans
1016	 * across blocks.  The contig_hint may be off by up to a page, but if
1017	 * the contig_hint is contained in a block, it will be accurate with
1018	 * the else condition below.
1019	 */
1020	if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index)
1021		pcpu_chunk_refresh_hint(chunk, true);
1022	else
1023		pcpu_block_update(&chunk->chunk_md,
1024				  pcpu_block_off_to_off(s_index, start),
1025				  end);
1026}
1027
1028/**
1029 * pcpu_is_populated - determines if the region is populated
1030 * @chunk: chunk of interest
1031 * @bit_off: chunk offset
1032 * @bits: size of area
1033 * @next_off: return value for the next offset to start searching
1034 *
1035 * For atomic allocations, check if the backing pages are populated.
1036 *
1037 * RETURNS:
1038 * Bool if the backing pages are populated.
1039 * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
1040 */
1041static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
1042			      int *next_off)
1043{
1044	int page_start, page_end, rs, re;
1045
1046	page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
1047	page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
1048
1049	rs = page_start;
1050	pcpu_next_unpop(chunk->populated, &rs, &re, page_end);
1051	if (rs >= page_end)
1052		return true;
1053
1054	*next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
1055	return false;
1056}
1057
1058/**
1059 * pcpu_find_block_fit - finds the block index to start searching
1060 * @chunk: chunk of interest
1061 * @alloc_bits: size of request in allocation units
1062 * @align: alignment of area (max PAGE_SIZE bytes)
1063 * @pop_only: use populated regions only
1064 *
1065 * Given a chunk and an allocation spec, find the offset to begin searching
1066 * for a free region.  This iterates over the bitmap metadata blocks to
1067 * find an offset that will be guaranteed to fit the requirements.  It is
1068 * not quite first fit as if the allocation does not fit in the contig hint
1069 * of a block or chunk, it is skipped.  This errs on the side of caution
1070 * to prevent excess iteration.  Poor alignment can cause the allocator to
1071 * skip over blocks and chunks that have valid free areas.
1072 *
1073 * RETURNS:
1074 * The offset in the bitmap to begin searching.
1075 * -1 if no offset is found.
1076 */
1077static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
1078			       size_t align, bool pop_only)
1079{
1080	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1081	int bit_off, bits, next_off;
1082
1083	/*
1084	 * Check to see if the allocation can fit in the chunk's contig hint.
1085	 * This is an optimization to prevent scanning by assuming if it
1086	 * cannot fit in the global hint, there is memory pressure and creating
1087	 * a new chunk would happen soon.
1088	 */
1089	bit_off = ALIGN(chunk_md->contig_hint_start, align) -
1090		  chunk_md->contig_hint_start;
1091	if (bit_off + alloc_bits > chunk_md->contig_hint)
1092		return -1;
1093
1094	bit_off = pcpu_next_hint(chunk_md, alloc_bits);
1095	bits = 0;
1096	pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
1097		if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
1098						   &next_off))
1099			break;
1100
1101		bit_off = next_off;
1102		bits = 0;
1103	}
1104
1105	if (bit_off == pcpu_chunk_map_bits(chunk))
1106		return -1;
 
1107
1108	return bit_off;
1109}
1110
1111/*
1112 * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
1113 * @map: the address to base the search on
1114 * @size: the bitmap size in bits
1115 * @start: the bitnumber to start searching at
1116 * @nr: the number of zeroed bits we're looking for
1117 * @align_mask: alignment mask for zero area
1118 * @largest_off: offset of the largest area skipped
1119 * @largest_bits: size of the largest area skipped
1120 *
1121 * The @align_mask should be one less than a power of 2.
1122 *
1123 * This is a modified version of bitmap_find_next_zero_area_off() to remember
1124 * the largest area that was skipped.  This is imperfect, but in general is
1125 * good enough.  The largest remembered region is the largest failed region
1126 * seen.  This does not include anything we possibly skipped due to alignment.
1127 * pcpu_block_update_scan() does scan backwards to try and recover what was
1128 * lost to alignment.  While this can cause scanning to miss earlier possible
1129 * free areas, smaller allocations will eventually fill those holes.
1130 */
1131static unsigned long pcpu_find_zero_area(unsigned long *map,
1132					 unsigned long size,
1133					 unsigned long start,
1134					 unsigned long nr,
1135					 unsigned long align_mask,
1136					 unsigned long *largest_off,
1137					 unsigned long *largest_bits)
1138{
1139	unsigned long index, end, i, area_off, area_bits;
1140again:
1141	index = find_next_zero_bit(map, size, start);
1142
1143	/* Align allocation */
1144	index = __ALIGN_MASK(index, align_mask);
1145	area_off = index;
1146
1147	end = index + nr;
1148	if (end > size)
1149		return end;
1150	i = find_next_bit(map, end, index);
1151	if (i < end) {
1152		area_bits = i - area_off;
1153		/* remember largest unused area with best alignment */
1154		if (area_bits > *largest_bits ||
1155		    (area_bits == *largest_bits && *largest_off &&
1156		     (!area_off || __ffs(area_off) > __ffs(*largest_off)))) {
1157			*largest_off = area_off;
1158			*largest_bits = area_bits;
1159		}
1160
1161		start = i + 1;
1162		goto again;
1163	}
1164	return index;
1165}
1166
1167/**
1168 * pcpu_alloc_area - allocates an area from a pcpu_chunk
1169 * @chunk: chunk of interest
1170 * @alloc_bits: size of request in allocation units
1171 * @align: alignment of area (max PAGE_SIZE)
1172 * @start: bit_off to start searching
1173 *
1174 * This function takes in a @start offset to begin searching to fit an
1175 * allocation of @alloc_bits with alignment @align.  It needs to scan
1176 * the allocation map because if it fits within the block's contig hint,
1177 * @start will be block->first_free. This is an attempt to fill the
1178 * allocation prior to breaking the contig hint.  The allocation and
1179 * boundary maps are updated accordingly if it confirms a valid
1180 * free area.
1181 *
1182 * RETURNS:
1183 * Allocated addr offset in @chunk on success.
1184 * -1 if no matching area is found.
1185 */
1186static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
1187			   size_t align, int start)
1188{
1189	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1190	size_t align_mask = (align) ? (align - 1) : 0;
1191	unsigned long area_off = 0, area_bits = 0;
1192	int bit_off, end, oslot;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1193
1194	lockdep_assert_held(&pcpu_lock);
 
1195
1196	oslot = pcpu_chunk_slot(chunk);
 
 
 
 
 
1197
1198	/*
1199	 * Search to find a fit.
1200	 */
1201	end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
1202		    pcpu_chunk_map_bits(chunk));
1203	bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
1204				      align_mask, &area_off, &area_bits);
1205	if (bit_off >= end)
1206		return -1;
1207
1208	if (area_bits)
1209		pcpu_block_update_scan(chunk, area_off, area_bits);
1210
1211	/* update alloc map */
1212	bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
1213
1214	/* update boundary map */
1215	set_bit(bit_off, chunk->bound_map);
1216	bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
1217	set_bit(bit_off + alloc_bits, chunk->bound_map);
1218
1219	chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
1220
1221	/* update first free bit */
1222	if (bit_off == chunk_md->first_free)
1223		chunk_md->first_free = find_next_zero_bit(
1224					chunk->alloc_map,
1225					pcpu_chunk_map_bits(chunk),
1226					bit_off + alloc_bits);
1227
1228	pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
 
 
1229
 
1230	pcpu_chunk_relocate(chunk, oslot);
1231
1232	return bit_off * PCPU_MIN_ALLOC_SIZE;
 
1233}
1234
1235/**
1236 * pcpu_free_area - frees the corresponding offset
1237 * @chunk: chunk of interest
1238 * @off: addr offset into chunk
1239 *
1240 * This function determines the size of an allocation to free using
1241 * the boundary bitmap and clears the allocation map.
1242 */
1243static void pcpu_free_area(struct pcpu_chunk *chunk, int off)
1244{
1245	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1246	int bit_off, bits, end, oslot;
1247
1248	lockdep_assert_held(&pcpu_lock);
1249	pcpu_stats_area_dealloc(chunk);
1250
1251	oslot = pcpu_chunk_slot(chunk);
1252
1253	bit_off = off / PCPU_MIN_ALLOC_SIZE;
1254
1255	/* find end index */
1256	end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
1257			    bit_off + 1);
1258	bits = end - bit_off;
1259	bitmap_clear(chunk->alloc_map, bit_off, bits);
1260
1261	/* update metadata */
1262	chunk->free_bytes += bits * PCPU_MIN_ALLOC_SIZE;
1263
1264	/* update first free bit */
1265	chunk_md->first_free = min(chunk_md->first_free, bit_off);
1266
1267	pcpu_block_update_hint_free(chunk, bit_off, bits);
1268
1269	pcpu_chunk_relocate(chunk, oslot);
1270}
1271
1272static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
1273{
1274	block->scan_hint = 0;
1275	block->contig_hint = nr_bits;
1276	block->left_free = nr_bits;
1277	block->right_free = nr_bits;
1278	block->first_free = 0;
1279	block->nr_bits = nr_bits;
1280}
1281
1282static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
1283{
1284	struct pcpu_block_md *md_block;
1285
1286	/* init the chunk's block */
1287	pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk));
1288
1289	for (md_block = chunk->md_blocks;
1290	     md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
1291	     md_block++)
1292		pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS);
1293}
1294
1295/**
1296 * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1297 * @tmp_addr: the start of the region served
1298 * @map_size: size of the region served
1299 *
1300 * This is responsible for creating the chunks that serve the first chunk.  The
1301 * base_addr is page aligned down of @tmp_addr while the region end is page
1302 * aligned up.  Offsets are kept track of to determine the region served. All
1303 * this is done to appease the bitmap allocator in avoiding partial blocks.
1304 *
1305 * RETURNS:
1306 * Chunk serving the region at @tmp_addr of @map_size.
1307 */
1308static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
1309							 int map_size)
1310{
1311	struct pcpu_chunk *chunk;
1312	unsigned long aligned_addr, lcm_align;
1313	int start_offset, offset_bits, region_size, region_bits;
1314	size_t alloc_size;
1315
1316	/* region calculations */
1317	aligned_addr = tmp_addr & PAGE_MASK;
1318
1319	start_offset = tmp_addr - aligned_addr;
1320
1321	/*
1322	 * Align the end of the region with the LCM of PAGE_SIZE and
1323	 * PCPU_BITMAP_BLOCK_SIZE.  One of these constants is a multiple of
1324	 * the other.
1325	 */
1326	lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE);
1327	region_size = ALIGN(start_offset + map_size, lcm_align);
1328
1329	/* allocate chunk */
1330	alloc_size = sizeof(struct pcpu_chunk) +
1331		BITS_TO_LONGS(region_size >> PAGE_SHIFT);
1332	chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1333	if (!chunk)
1334		panic("%s: Failed to allocate %zu bytes\n", __func__,
1335		      alloc_size);
1336
1337	INIT_LIST_HEAD(&chunk->list);
 
1338
1339	chunk->base_addr = (void *)aligned_addr;
1340	chunk->start_offset = start_offset;
1341	chunk->end_offset = region_size - chunk->start_offset - map_size;
1342
1343	chunk->nr_pages = region_size >> PAGE_SHIFT;
1344	region_bits = pcpu_chunk_map_bits(chunk);
1345
1346	alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
1347	chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1348	if (!chunk->alloc_map)
1349		panic("%s: Failed to allocate %zu bytes\n", __func__,
1350		      alloc_size);
1351
1352	alloc_size =
1353		BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
1354	chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1355	if (!chunk->bound_map)
1356		panic("%s: Failed to allocate %zu bytes\n", __func__,
1357		      alloc_size);
1358
1359	alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
1360	chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1361	if (!chunk->md_blocks)
1362		panic("%s: Failed to allocate %zu bytes\n", __func__,
1363		      alloc_size);
1364
1365	pcpu_init_md_blocks(chunk);
1366
1367	/* manage populated page bitmap */
1368	chunk->immutable = true;
1369	bitmap_fill(chunk->populated, chunk->nr_pages);
1370	chunk->nr_populated = chunk->nr_pages;
1371	chunk->nr_empty_pop_pages = chunk->nr_pages;
1372
1373	chunk->free_bytes = map_size;
1374
1375	if (chunk->start_offset) {
1376		/* hide the beginning of the bitmap */
1377		offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
1378		bitmap_set(chunk->alloc_map, 0, offset_bits);
1379		set_bit(0, chunk->bound_map);
1380		set_bit(offset_bits, chunk->bound_map);
1381
1382		chunk->chunk_md.first_free = offset_bits;
1383
1384		pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
1385	}
1386
1387	if (chunk->end_offset) {
1388		/* hide the end of the bitmap */
1389		offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
1390		bitmap_set(chunk->alloc_map,
1391			   pcpu_chunk_map_bits(chunk) - offset_bits,
1392			   offset_bits);
1393		set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
1394			chunk->bound_map);
1395		set_bit(region_bits, chunk->bound_map);
1396
1397		pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
1398					     - offset_bits, offset_bits);
 
 
 
 
 
 
 
 
 
 
 
1399	}
1400
1401	return chunk;
 
1402}
1403
1404static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
1405{
1406	struct pcpu_chunk *chunk;
1407	int region_bits;
1408
1409	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
1410	if (!chunk)
1411		return NULL;
1412
1413	INIT_LIST_HEAD(&chunk->list);
1414	chunk->nr_pages = pcpu_unit_pages;
1415	region_bits = pcpu_chunk_map_bits(chunk);
1416
1417	chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
1418					   sizeof(chunk->alloc_map[0]), gfp);
1419	if (!chunk->alloc_map)
1420		goto alloc_map_fail;
1421
1422	chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
1423					   sizeof(chunk->bound_map[0]), gfp);
1424	if (!chunk->bound_map)
1425		goto bound_map_fail;
1426
1427	chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
1428					   sizeof(chunk->md_blocks[0]), gfp);
1429	if (!chunk->md_blocks)
1430		goto md_blocks_fail;
1431
1432	pcpu_init_md_blocks(chunk);
 
 
 
1433
1434	/* init metadata */
1435	chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
 
1436
1437	return chunk;
1438
1439md_blocks_fail:
1440	pcpu_mem_free(chunk->bound_map);
1441bound_map_fail:
1442	pcpu_mem_free(chunk->alloc_map);
1443alloc_map_fail:
1444	pcpu_mem_free(chunk);
1445
1446	return NULL;
1447}
1448
1449static void pcpu_free_chunk(struct pcpu_chunk *chunk)
1450{
1451	if (!chunk)
1452		return;
1453	pcpu_mem_free(chunk->md_blocks);
1454	pcpu_mem_free(chunk->bound_map);
1455	pcpu_mem_free(chunk->alloc_map);
1456	pcpu_mem_free(chunk);
1457}
1458
1459/**
1460 * pcpu_chunk_populated - post-population bookkeeping
1461 * @chunk: pcpu_chunk which got populated
1462 * @page_start: the start page
1463 * @page_end: the end page
1464 *
1465 * Pages in [@page_start,@page_end) have been populated to @chunk.  Update
1466 * the bookkeeping information accordingly.  Must be called after each
1467 * successful population.
1468 *
1469 * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it
1470 * is to serve an allocation in that area.
1471 */
1472static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
1473				 int page_end)
1474{
1475	int nr = page_end - page_start;
1476
1477	lockdep_assert_held(&pcpu_lock);
1478
1479	bitmap_set(chunk->populated, page_start, nr);
1480	chunk->nr_populated += nr;
1481	pcpu_nr_populated += nr;
1482
1483	pcpu_update_empty_pages(chunk, nr);
1484}
1485
1486/**
1487 * pcpu_chunk_depopulated - post-depopulation bookkeeping
1488 * @chunk: pcpu_chunk which got depopulated
1489 * @page_start: the start page
1490 * @page_end: the end page
1491 *
1492 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1493 * Update the bookkeeping information accordingly.  Must be called after
1494 * each successful depopulation.
1495 */
1496static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1497				   int page_start, int page_end)
1498{
1499	int nr = page_end - page_start;
1500
1501	lockdep_assert_held(&pcpu_lock);
1502
1503	bitmap_clear(chunk->populated, page_start, nr);
1504	chunk->nr_populated -= nr;
1505	pcpu_nr_populated -= nr;
1506
1507	pcpu_update_empty_pages(chunk, -nr);
1508}
1509
1510/*
1511 * Chunk management implementation.
1512 *
1513 * To allow different implementations, chunk alloc/free and
1514 * [de]population are implemented in a separate file which is pulled
1515 * into this file and compiled together.  The following functions
1516 * should be implemented.
1517 *
1518 * pcpu_populate_chunk		- populate the specified range of a chunk
1519 * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
1520 * pcpu_create_chunk		- create a new chunk
1521 * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
1522 * pcpu_addr_to_page		- translate address to physical address
1523 * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
1524 */
1525static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
1526			       int page_start, int page_end, gfp_t gfp);
1527static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
1528				  int page_start, int page_end);
1529static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
1530static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
1531static struct page *pcpu_addr_to_page(void *addr);
1532static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
1533
1534#ifdef CONFIG_NEED_PER_CPU_KM
1535#include "percpu-km.c"
1536#else
1537#include "percpu-vm.c"
1538#endif
1539
1540/**
1541 * pcpu_chunk_addr_search - determine chunk containing specified address
1542 * @addr: address for which the chunk needs to be determined.
1543 *
1544 * This is an internal function that handles all but static allocations.
1545 * Static percpu address values should never be passed into the allocator.
1546 *
1547 * RETURNS:
1548 * The address of the found chunk.
1549 */
1550static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1551{
1552	/* is it in the dynamic region (first chunk)? */
1553	if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
 
 
 
1554		return pcpu_first_chunk;
1555
1556	/* is it in the reserved region? */
1557	if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
1558		return pcpu_reserved_chunk;
1559
1560	/*
1561	 * The address is relative to unit0 which might be unused and
1562	 * thus unmapped.  Offset the address to the unit space of the
1563	 * current processor before looking it up in the vmalloc
1564	 * space.  Note that any possible cpu id can be used here, so
1565	 * there's no need to worry about preemption or cpu hotplug.
1566	 */
1567	addr += pcpu_unit_offsets[raw_smp_processor_id()];
1568	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
1569}
1570
1571/**
1572 * pcpu_alloc - the percpu allocator
1573 * @size: size of area to allocate in bytes
1574 * @align: alignment of area (max PAGE_SIZE)
1575 * @reserved: allocate from the reserved chunk if available
1576 * @gfp: allocation flags
1577 *
1578 * Allocate percpu area of @size bytes aligned at @align.  If @gfp doesn't
1579 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1580 * then no warning will be triggered on invalid or failed allocation
1581 * requests.
1582 *
1583 * RETURNS:
1584 * Percpu pointer to the allocated area on success, NULL on failure.
1585 */
1586static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1587				 gfp_t gfp)
1588{
1589	/* whitelisted flags that can be passed to the backing allocators */
1590	gfp_t pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
1591	bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1592	bool do_warn = !(gfp & __GFP_NOWARN);
1593	static int warn_limit = 10;
1594	struct pcpu_chunk *chunk, *next;
1595	const char *err;
1596	int slot, off, cpu, ret;
1597	unsigned long flags;
1598	void __percpu *ptr;
1599	size_t bits, bit_align;
1600
1601	/*
1602	 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
1603	 * therefore alignment must be a minimum of that many bytes.
1604	 * An allocation may have internal fragmentation from rounding up
1605	 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1606	 */
1607	if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
1608		align = PCPU_MIN_ALLOC_SIZE;
1609
1610	size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
1611	bits = size >> PCPU_MIN_ALLOC_SHIFT;
1612	bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
1613
1614	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
1615		     !is_power_of_2(align))) {
1616		WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1617		     size, align);
1618		return NULL;
1619	}
1620
1621	if (!is_atomic) {
1622		/*
1623		 * pcpu_balance_workfn() allocates memory under this mutex,
1624		 * and it may wait for memory reclaim. Allow current task
1625		 * to become OOM victim, in case of memory pressure.
1626		 */
1627		if (gfp & __GFP_NOFAIL)
1628			mutex_lock(&pcpu_alloc_mutex);
1629		else if (mutex_lock_killable(&pcpu_alloc_mutex))
1630			return NULL;
1631	}
1632
 
1633	spin_lock_irqsave(&pcpu_lock, flags);
1634
1635	/* serve reserved allocations from the reserved chunk if available */
1636	if (reserved && pcpu_reserved_chunk) {
1637		chunk = pcpu_reserved_chunk;
1638
1639		off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
1640		if (off < 0) {
1641			err = "alloc from reserved chunk failed";
1642			goto fail_unlock;
1643		}
1644
1645		off = pcpu_alloc_area(chunk, bits, bit_align, off);
 
 
 
 
 
 
 
 
 
1646		if (off >= 0)
1647			goto area_found;
1648
1649		err = "alloc from reserved chunk failed";
1650		goto fail_unlock;
1651	}
1652
1653restart:
1654	/* search through normal chunks */
1655	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
1656		list_for_each_entry_safe(chunk, next, &pcpu_slot[slot], list) {
1657			off = pcpu_find_block_fit(chunk, bits, bit_align,
1658						  is_atomic);
1659			if (off < 0) {
1660				if (slot < PCPU_SLOT_FAIL_THRESHOLD)
1661					pcpu_chunk_move(chunk, 0);
1662				continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1663			}
1664
1665			off = pcpu_alloc_area(chunk, bits, bit_align, off);
1666			if (off >= 0)
1667				goto area_found;
1668
1669		}
1670	}
1671
 
1672	spin_unlock_irqrestore(&pcpu_lock, flags);
1673
1674	/*
1675	 * No space left.  Create a new chunk.  We don't want multiple
1676	 * tasks to create chunks simultaneously.  Serialize and create iff
1677	 * there's still no empty chunk after grabbing the mutex.
1678	 */
1679	if (is_atomic) {
1680		err = "atomic alloc failed, no space left";
1681		goto fail;
1682	}
1683
1684	if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
1685		chunk = pcpu_create_chunk(pcpu_gfp);
1686		if (!chunk) {
1687			err = "failed to allocate new chunk";
1688			goto fail;
1689		}
1690
1691		spin_lock_irqsave(&pcpu_lock, flags);
1692		pcpu_chunk_relocate(chunk, -1);
1693	} else {
1694		spin_lock_irqsave(&pcpu_lock, flags);
1695	}
1696
 
 
1697	goto restart;
1698
1699area_found:
1700	pcpu_stats_area_alloc(chunk, size);
1701	spin_unlock_irqrestore(&pcpu_lock, flags);
1702
1703	/* populate if not all pages are already there */
1704	if (!is_atomic) {
1705		int page_start, page_end, rs, re;
1706
1707		page_start = PFN_DOWN(off);
1708		page_end = PFN_UP(off + size);
1709
1710		pcpu_for_each_unpop_region(chunk->populated, rs, re,
1711					   page_start, page_end) {
1712			WARN_ON(chunk->immutable);
1713
1714			ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
1715
1716			spin_lock_irqsave(&pcpu_lock, flags);
1717			if (ret) {
1718				pcpu_free_area(chunk, off);
1719				err = "failed to populate";
1720				goto fail_unlock;
1721			}
1722			pcpu_chunk_populated(chunk, rs, re);
1723			spin_unlock_irqrestore(&pcpu_lock, flags);
1724		}
1725
1726		mutex_unlock(&pcpu_alloc_mutex);
1727	}
1728
1729	if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1730		pcpu_schedule_balance_work();
1731
1732	/* clear the areas and return address relative to base address */
1733	for_each_possible_cpu(cpu)
1734		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1735
 
1736	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1737	kmemleak_alloc_percpu(ptr, size, gfp);
1738
1739	trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
1740			chunk->base_addr, off, ptr);
1741
1742	return ptr;
1743
1744fail_unlock:
1745	spin_unlock_irqrestore(&pcpu_lock, flags);
1746fail:
1747	trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1748
1749	if (!is_atomic && do_warn && warn_limit) {
1750		pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1751			size, align, is_atomic, err);
1752		dump_stack();
1753		if (!--warn_limit)
1754			pr_info("limit reached, disable warning\n");
1755	}
1756	if (is_atomic) {
1757		/* see the flag handling in pcpu_blance_workfn() */
1758		pcpu_atomic_alloc_failed = true;
1759		pcpu_schedule_balance_work();
1760	} else {
1761		mutex_unlock(&pcpu_alloc_mutex);
1762	}
1763	return NULL;
1764}
1765
1766/**
1767 * __alloc_percpu_gfp - allocate dynamic percpu area
1768 * @size: size of area to allocate in bytes
1769 * @align: alignment of area (max PAGE_SIZE)
1770 * @gfp: allocation flags
1771 *
1772 * Allocate zero-filled percpu area of @size bytes aligned at @align.  If
1773 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1774 * be called from any context but is a lot more likely to fail. If @gfp
1775 * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1776 * allocation requests.
1777 *
1778 * RETURNS:
1779 * Percpu pointer to the allocated area on success, NULL on failure.
1780 */
1781void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1782{
1783	return pcpu_alloc(size, align, false, gfp);
1784}
1785EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1786
1787/**
1788 * __alloc_percpu - allocate dynamic percpu area
1789 * @size: size of area to allocate in bytes
1790 * @align: alignment of area (max PAGE_SIZE)
1791 *
1792 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1793 */
1794void __percpu *__alloc_percpu(size_t size, size_t align)
1795{
1796	return pcpu_alloc(size, align, false, GFP_KERNEL);
1797}
1798EXPORT_SYMBOL_GPL(__alloc_percpu);
1799
1800/**
1801 * __alloc_reserved_percpu - allocate reserved percpu area
1802 * @size: size of area to allocate in bytes
1803 * @align: alignment of area (max PAGE_SIZE)
1804 *
1805 * Allocate zero-filled percpu area of @size bytes aligned at @align
1806 * from reserved percpu area if arch has set it up; otherwise,
1807 * allocation is served from the same dynamic area.  Might sleep.
1808 * Might trigger writeouts.
1809 *
1810 * CONTEXT:
1811 * Does GFP_KERNEL allocation.
1812 *
1813 * RETURNS:
1814 * Percpu pointer to the allocated area on success, NULL on failure.
1815 */
1816void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1817{
1818	return pcpu_alloc(size, align, true, GFP_KERNEL);
1819}
1820
1821/**
1822 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
1823 * @work: unused
1824 *
1825 * Reclaim all fully free chunks except for the first one.  This is also
1826 * responsible for maintaining the pool of empty populated pages.  However,
1827 * it is possible that this is called when physical memory is scarce causing
1828 * OOM killer to be triggered.  We should avoid doing so until an actual
1829 * allocation causes the failure as it is possible that requests can be
1830 * serviced from already backed regions.
1831 */
1832static void pcpu_balance_workfn(struct work_struct *work)
1833{
1834	/* gfp flags passed to underlying allocators */
1835	const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
1836	LIST_HEAD(to_free);
1837	struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1838	struct pcpu_chunk *chunk, *next;
1839	int slot, nr_to_pop, ret;
1840
1841	/*
1842	 * There's no reason to keep around multiple unused chunks and VM
1843	 * areas can be scarce.  Destroy all free chunks except for one.
1844	 */
1845	mutex_lock(&pcpu_alloc_mutex);
1846	spin_lock_irq(&pcpu_lock);
1847
1848	list_for_each_entry_safe(chunk, next, free_head, list) {
1849		WARN_ON(chunk->immutable);
1850
1851		/* spare the first one */
1852		if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1853			continue;
1854
1855		list_move(&chunk->list, &to_free);
1856	}
1857
1858	spin_unlock_irq(&pcpu_lock);
1859
1860	list_for_each_entry_safe(chunk, next, &to_free, list) {
1861		int rs, re;
1862
1863		pcpu_for_each_pop_region(chunk->populated, rs, re, 0,
1864					 chunk->nr_pages) {
1865			pcpu_depopulate_chunk(chunk, rs, re);
1866			spin_lock_irq(&pcpu_lock);
1867			pcpu_chunk_depopulated(chunk, rs, re);
1868			spin_unlock_irq(&pcpu_lock);
1869		}
1870		pcpu_destroy_chunk(chunk);
1871		cond_resched();
1872	}
1873
1874	/*
1875	 * Ensure there are certain number of free populated pages for
1876	 * atomic allocs.  Fill up from the most packed so that atomic
1877	 * allocs don't increase fragmentation.  If atomic allocation
1878	 * failed previously, always populate the maximum amount.  This
1879	 * should prevent atomic allocs larger than PAGE_SIZE from keeping
1880	 * failing indefinitely; however, large atomic allocs are not
1881	 * something we support properly and can be highly unreliable and
1882	 * inefficient.
1883	 */
1884retry_pop:
1885	if (pcpu_atomic_alloc_failed) {
1886		nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
1887		/* best effort anyway, don't worry about synchronization */
1888		pcpu_atomic_alloc_failed = false;
1889	} else {
1890		nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
1891				  pcpu_nr_empty_pop_pages,
1892				  0, PCPU_EMPTY_POP_PAGES_HIGH);
1893	}
1894
1895	for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
1896		int nr_unpop = 0, rs, re;
1897
1898		if (!nr_to_pop)
1899			break;
1900
1901		spin_lock_irq(&pcpu_lock);
1902		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1903			nr_unpop = chunk->nr_pages - chunk->nr_populated;
1904			if (nr_unpop)
1905				break;
1906		}
1907		spin_unlock_irq(&pcpu_lock);
1908
1909		if (!nr_unpop)
1910			continue;
1911
1912		/* @chunk can't go away while pcpu_alloc_mutex is held */
1913		pcpu_for_each_unpop_region(chunk->populated, rs, re, 0,
1914					   chunk->nr_pages) {
1915			int nr = min(re - rs, nr_to_pop);
1916
1917			ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
1918			if (!ret) {
1919				nr_to_pop -= nr;
1920				spin_lock_irq(&pcpu_lock);
1921				pcpu_chunk_populated(chunk, rs, rs + nr);
1922				spin_unlock_irq(&pcpu_lock);
1923			} else {
1924				nr_to_pop = 0;
1925			}
1926
1927			if (!nr_to_pop)
1928				break;
1929		}
1930	}
1931
1932	if (nr_to_pop) {
1933		/* ran out of chunks to populate, create a new one and retry */
1934		chunk = pcpu_create_chunk(gfp);
1935		if (chunk) {
1936			spin_lock_irq(&pcpu_lock);
1937			pcpu_chunk_relocate(chunk, -1);
1938			spin_unlock_irq(&pcpu_lock);
1939			goto retry_pop;
1940		}
1941	}
1942
1943	mutex_unlock(&pcpu_alloc_mutex);
1944}
1945
1946/**
1947 * free_percpu - free percpu area
1948 * @ptr: pointer to area to free
1949 *
1950 * Free percpu area @ptr.
1951 *
1952 * CONTEXT:
1953 * Can be called from atomic context.
1954 */
1955void free_percpu(void __percpu *ptr)
1956{
1957	void *addr;
1958	struct pcpu_chunk *chunk;
1959	unsigned long flags;
1960	int off;
1961	bool need_balance = false;
1962
1963	if (!ptr)
1964		return;
1965
1966	kmemleak_free_percpu(ptr);
1967
1968	addr = __pcpu_ptr_to_addr(ptr);
1969
1970	spin_lock_irqsave(&pcpu_lock, flags);
1971
1972	chunk = pcpu_chunk_addr_search(addr);
1973	off = addr - chunk->base_addr;
1974
1975	pcpu_free_area(chunk, off);
1976
1977	/* if there are more than one fully free chunks, wake up grim reaper */
1978	if (chunk->free_bytes == pcpu_unit_size) {
1979		struct pcpu_chunk *pos;
1980
1981		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1982			if (pos != chunk) {
1983				need_balance = true;
1984				break;
1985			}
1986	}
1987
1988	trace_percpu_free_percpu(chunk->base_addr, off, ptr);
1989
1990	spin_unlock_irqrestore(&pcpu_lock, flags);
1991
1992	if (need_balance)
1993		pcpu_schedule_balance_work();
1994}
1995EXPORT_SYMBOL_GPL(free_percpu);
1996
1997bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
1998{
1999#ifdef CONFIG_SMP
2000	const size_t static_size = __per_cpu_end - __per_cpu_start;
2001	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2002	unsigned int cpu;
2003
2004	for_each_possible_cpu(cpu) {
2005		void *start = per_cpu_ptr(base, cpu);
2006		void *va = (void *)addr;
2007
2008		if (va >= start && va < start + static_size) {
2009			if (can_addr) {
2010				*can_addr = (unsigned long) (va - start);
2011				*can_addr += (unsigned long)
2012					per_cpu_ptr(base, get_boot_cpu_id());
2013			}
2014			return true;
2015		}
2016	}
2017#endif
2018	/* on UP, can't distinguish from other static vars, always false */
2019	return false;
2020}
2021
2022/**
2023 * is_kernel_percpu_address - test whether address is from static percpu area
2024 * @addr: address to test
2025 *
2026 * Test whether @addr belongs to in-kernel static percpu area.  Module
2027 * static percpu areas are not considered.  For those, use
2028 * is_module_percpu_address().
2029 *
2030 * RETURNS:
2031 * %true if @addr is from in-kernel static percpu area, %false otherwise.
2032 */
2033bool is_kernel_percpu_address(unsigned long addr)
2034{
2035	return __is_kernel_percpu_address(addr, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
2036}
2037
2038/**
2039 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
2040 * @addr: the address to be converted to physical address
2041 *
2042 * Given @addr which is dereferenceable address obtained via one of
2043 * percpu access macros, this function translates it into its physical
2044 * address.  The caller is responsible for ensuring @addr stays valid
2045 * until this function finishes.
2046 *
2047 * percpu allocator has special setup for the first chunk, which currently
2048 * supports either embedding in linear address space or vmalloc mapping,
2049 * and, from the second one, the backing allocator (currently either vm or
2050 * km) provides translation.
2051 *
2052 * The addr can be translated simply without checking if it falls into the
2053 * first chunk. But the current code reflects better how percpu allocator
2054 * actually works, and the verification can discover both bugs in percpu
2055 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
2056 * code.
2057 *
2058 * RETURNS:
2059 * The physical address for @addr.
2060 */
2061phys_addr_t per_cpu_ptr_to_phys(void *addr)
2062{
2063	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2064	bool in_first_chunk = false;
2065	unsigned long first_low, first_high;
2066	unsigned int cpu;
2067
2068	/*
2069	 * The following test on unit_low/high isn't strictly
2070	 * necessary but will speed up lookups of addresses which
2071	 * aren't in the first chunk.
2072	 *
2073	 * The address check is against full chunk sizes.  pcpu_base_addr
2074	 * points to the beginning of the first chunk including the
2075	 * static region.  Assumes good intent as the first chunk may
2076	 * not be full (ie. < pcpu_unit_pages in size).
2077	 */
2078	first_low = (unsigned long)pcpu_base_addr +
2079		    pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
2080	first_high = (unsigned long)pcpu_base_addr +
2081		     pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
2082	if ((unsigned long)addr >= first_low &&
2083	    (unsigned long)addr < first_high) {
2084		for_each_possible_cpu(cpu) {
2085			void *start = per_cpu_ptr(base, cpu);
2086
2087			if (addr >= start && addr < start + pcpu_unit_size) {
2088				in_first_chunk = true;
2089				break;
2090			}
2091		}
2092	}
2093
2094	if (in_first_chunk) {
2095		if (!is_vmalloc_addr(addr))
2096			return __pa(addr);
2097		else
2098			return page_to_phys(vmalloc_to_page(addr)) +
2099			       offset_in_page(addr);
2100	} else
2101		return page_to_phys(pcpu_addr_to_page(addr)) +
2102		       offset_in_page(addr);
2103}
2104
2105/**
2106 * pcpu_alloc_alloc_info - allocate percpu allocation info
2107 * @nr_groups: the number of groups
2108 * @nr_units: the number of units
2109 *
2110 * Allocate ai which is large enough for @nr_groups groups containing
2111 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
2112 * cpu_map array which is long enough for @nr_units and filled with
2113 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
2114 * pointer of other groups.
2115 *
2116 * RETURNS:
2117 * Pointer to the allocated pcpu_alloc_info on success, NULL on
2118 * failure.
2119 */
2120struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
2121						      int nr_units)
2122{
2123	struct pcpu_alloc_info *ai;
2124	size_t base_size, ai_size;
2125	void *ptr;
2126	int unit;
2127
2128	base_size = ALIGN(struct_size(ai, groups, nr_groups),
2129			  __alignof__(ai->groups[0].cpu_map[0]));
2130	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
2131
2132	ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
2133	if (!ptr)
2134		return NULL;
2135	ai = ptr;
2136	ptr += base_size;
2137
2138	ai->groups[0].cpu_map = ptr;
2139
2140	for (unit = 0; unit < nr_units; unit++)
2141		ai->groups[0].cpu_map[unit] = NR_CPUS;
2142
2143	ai->nr_groups = nr_groups;
2144	ai->__ai_size = PFN_ALIGN(ai_size);
2145
2146	return ai;
2147}
2148
2149/**
2150 * pcpu_free_alloc_info - free percpu allocation info
2151 * @ai: pcpu_alloc_info to free
2152 *
2153 * Free @ai which was allocated by pcpu_alloc_alloc_info().
2154 */
2155void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
2156{
2157	memblock_free_early(__pa(ai), ai->__ai_size);
2158}
2159
2160/**
2161 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
2162 * @lvl: loglevel
2163 * @ai: allocation info to dump
2164 *
2165 * Print out information about @ai using loglevel @lvl.
2166 */
2167static void pcpu_dump_alloc_info(const char *lvl,
2168				 const struct pcpu_alloc_info *ai)
2169{
2170	int group_width = 1, cpu_width = 1, width;
2171	char empty_str[] = "--------";
2172	int alloc = 0, alloc_end = 0;
2173	int group, v;
2174	int upa, apl;	/* units per alloc, allocs per line */
2175
2176	v = ai->nr_groups;
2177	while (v /= 10)
2178		group_width++;
2179
2180	v = num_possible_cpus();
2181	while (v /= 10)
2182		cpu_width++;
2183	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
2184
2185	upa = ai->alloc_size / ai->unit_size;
2186	width = upa * (cpu_width + 1) + group_width + 3;
2187	apl = rounddown_pow_of_two(max(60 / width, 1));
2188
2189	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
2190	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
2191	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
2192
2193	for (group = 0; group < ai->nr_groups; group++) {
2194		const struct pcpu_group_info *gi = &ai->groups[group];
2195		int unit = 0, unit_end = 0;
2196
2197		BUG_ON(gi->nr_units % upa);
2198		for (alloc_end += gi->nr_units / upa;
2199		     alloc < alloc_end; alloc++) {
2200			if (!(alloc % apl)) {
2201				pr_cont("\n");
2202				printk("%spcpu-alloc: ", lvl);
2203			}
2204			pr_cont("[%0*d] ", group_width, group);
2205
2206			for (unit_end += upa; unit < unit_end; unit++)
2207				if (gi->cpu_map[unit] != NR_CPUS)
2208					pr_cont("%0*d ",
2209						cpu_width, gi->cpu_map[unit]);
2210				else
2211					pr_cont("%s ", empty_str);
2212		}
2213	}
2214	pr_cont("\n");
2215}
2216
2217/**
2218 * pcpu_setup_first_chunk - initialize the first percpu chunk
2219 * @ai: pcpu_alloc_info describing how to percpu area is shaped
2220 * @base_addr: mapped address
2221 *
2222 * Initialize the first percpu chunk which contains the kernel static
2223 * percpu area.  This function is to be called from arch percpu area
2224 * setup path.
2225 *
2226 * @ai contains all information necessary to initialize the first
2227 * chunk and prime the dynamic percpu allocator.
2228 *
2229 * @ai->static_size is the size of static percpu area.
2230 *
2231 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
2232 * reserve after the static area in the first chunk.  This reserves
2233 * the first chunk such that it's available only through reserved
2234 * percpu allocation.  This is primarily used to serve module percpu
2235 * static areas on architectures where the addressing model has
2236 * limited offset range for symbol relocations to guarantee module
2237 * percpu symbols fall inside the relocatable range.
2238 *
2239 * @ai->dyn_size determines the number of bytes available for dynamic
2240 * allocation in the first chunk.  The area between @ai->static_size +
2241 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
2242 *
2243 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2244 * and equal to or larger than @ai->static_size + @ai->reserved_size +
2245 * @ai->dyn_size.
2246 *
2247 * @ai->atom_size is the allocation atom size and used as alignment
2248 * for vm areas.
2249 *
2250 * @ai->alloc_size is the allocation size and always multiple of
2251 * @ai->atom_size.  This is larger than @ai->atom_size if
2252 * @ai->unit_size is larger than @ai->atom_size.
2253 *
2254 * @ai->nr_groups and @ai->groups describe virtual memory layout of
2255 * percpu areas.  Units which should be colocated are put into the
2256 * same group.  Dynamic VM areas will be allocated according to these
2257 * groupings.  If @ai->nr_groups is zero, a single group containing
2258 * all units is assumed.
2259 *
2260 * The caller should have mapped the first chunk at @base_addr and
2261 * copied static data to each unit.
2262 *
2263 * The first chunk will always contain a static and a dynamic region.
2264 * However, the static region is not managed by any chunk.  If the first
2265 * chunk also contains a reserved region, it is served by two chunks -
2266 * one for the reserved region and one for the dynamic region.  They
2267 * share the same vm, but use offset regions in the area allocation map.
2268 * The chunk serving the dynamic region is circulated in the chunk slots
2269 * and available for dynamic allocation like any other chunk.
 
 
2270 */
2271void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2272				   void *base_addr)
2273{
2274	size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2275	size_t static_size, dyn_size;
2276	struct pcpu_chunk *chunk;
 
 
 
2277	unsigned long *group_offsets;
2278	size_t *group_sizes;
2279	unsigned long *unit_off;
2280	unsigned int cpu;
2281	int *unit_map;
2282	int group, unit, i;
2283	int map_size;
2284	unsigned long tmp_addr;
2285	size_t alloc_size;
2286
2287#define PCPU_SETUP_BUG_ON(cond)	do {					\
2288	if (unlikely(cond)) {						\
2289		pr_emerg("failed to initialize, %s\n", #cond);		\
2290		pr_emerg("cpu_possible_mask=%*pb\n",			\
2291			 cpumask_pr_args(cpu_possible_mask));		\
2292		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
2293		BUG();							\
2294	}								\
2295} while (0)
2296
2297	/* sanity checks */
2298	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
2299#ifdef CONFIG_SMP
2300	PCPU_SETUP_BUG_ON(!ai->static_size);
2301	PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
2302#endif
2303	PCPU_SETUP_BUG_ON(!base_addr);
2304	PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
2305	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
2306	PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
2307	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
2308	PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
2309	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
2310	PCPU_SETUP_BUG_ON(!ai->dyn_size);
2311	PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
2312	PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
2313			    IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
2314	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
2315
2316	/* process group information and build config tables accordingly */
2317	alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
2318	group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2319	if (!group_offsets)
2320		panic("%s: Failed to allocate %zu bytes\n", __func__,
2321		      alloc_size);
2322
2323	alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
2324	group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2325	if (!group_sizes)
2326		panic("%s: Failed to allocate %zu bytes\n", __func__,
2327		      alloc_size);
2328
2329	alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
2330	unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2331	if (!unit_map)
2332		panic("%s: Failed to allocate %zu bytes\n", __func__,
2333		      alloc_size);
2334
2335	alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
2336	unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2337	if (!unit_off)
2338		panic("%s: Failed to allocate %zu bytes\n", __func__,
2339		      alloc_size);
2340
2341	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
2342		unit_map[cpu] = UINT_MAX;
2343
2344	pcpu_low_unit_cpu = NR_CPUS;
2345	pcpu_high_unit_cpu = NR_CPUS;
2346
2347	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2348		const struct pcpu_group_info *gi = &ai->groups[group];
2349
2350		group_offsets[group] = gi->base_offset;
2351		group_sizes[group] = gi->nr_units * ai->unit_size;
2352
2353		for (i = 0; i < gi->nr_units; i++) {
2354			cpu = gi->cpu_map[i];
2355			if (cpu == NR_CPUS)
2356				continue;
2357
2358			PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
2359			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
2360			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
2361
2362			unit_map[cpu] = unit + i;
2363			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2364
2365			/* determine low/high unit_cpu */
2366			if (pcpu_low_unit_cpu == NR_CPUS ||
2367			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
2368				pcpu_low_unit_cpu = cpu;
2369			if (pcpu_high_unit_cpu == NR_CPUS ||
2370			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
2371				pcpu_high_unit_cpu = cpu;
2372		}
2373	}
2374	pcpu_nr_units = unit;
2375
2376	for_each_possible_cpu(cpu)
2377		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
2378
2379	/* we're done parsing the input, undefine BUG macro and dump config */
2380#undef PCPU_SETUP_BUG_ON
2381	pcpu_dump_alloc_info(KERN_DEBUG, ai);
2382
2383	pcpu_nr_groups = ai->nr_groups;
2384	pcpu_group_offsets = group_offsets;
2385	pcpu_group_sizes = group_sizes;
2386	pcpu_unit_map = unit_map;
2387	pcpu_unit_offsets = unit_off;
2388
2389	/* determine basic parameters */
2390	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
2391	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
2392	pcpu_atom_size = ai->atom_size;
2393	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
2394		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
2395
2396	pcpu_stats_save_ai(ai);
2397
2398	/*
2399	 * Allocate chunk slots.  The additional last slot is for
2400	 * empty chunks.
2401	 */
2402	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
2403	pcpu_slot = memblock_alloc(pcpu_nr_slots * sizeof(pcpu_slot[0]),
2404				   SMP_CACHE_BYTES);
2405	if (!pcpu_slot)
2406		panic("%s: Failed to allocate %zu bytes\n", __func__,
2407		      pcpu_nr_slots * sizeof(pcpu_slot[0]));
2408	for (i = 0; i < pcpu_nr_slots; i++)
2409		INIT_LIST_HEAD(&pcpu_slot[i]);
2410
2411	/*
2412	 * The end of the static region needs to be aligned with the
2413	 * minimum allocation size as this offsets the reserved and
2414	 * dynamic region.  The first chunk ends page aligned by
2415	 * expanding the dynamic region, therefore the dynamic region
2416	 * can be shrunk to compensate while still staying above the
2417	 * configured sizes.
2418	 */
2419	static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2420	dyn_size = ai->dyn_size - (static_size - ai->static_size);
2421
2422	/*
2423	 * Initialize first chunk.
2424	 * If the reserved_size is non-zero, this initializes the reserved
2425	 * chunk.  If the reserved_size is zero, the reserved chunk is NULL
2426	 * and the dynamic region is initialized here.  The first chunk,
2427	 * pcpu_first_chunk, will always point to the chunk that serves
2428	 * the dynamic region.
2429	 */
2430	tmp_addr = (unsigned long)base_addr + static_size;
2431	map_size = ai->reserved_size ?: dyn_size;
2432	chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2433
2434	/* init dynamic chunk if necessary */
2435	if (ai->reserved_size) {
2436		pcpu_reserved_chunk = chunk;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2437
2438		tmp_addr = (unsigned long)base_addr + static_size +
2439			   ai->reserved_size;
2440		map_size = dyn_size;
2441		chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
 
 
 
 
 
 
 
 
 
 
 
2442	}
2443
2444	/* link the first chunk in */
2445	pcpu_first_chunk = chunk;
2446	pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
2447	pcpu_chunk_relocate(pcpu_first_chunk, -1);
2448
2449	/* include all regions of the first chunk */
2450	pcpu_nr_populated += PFN_DOWN(size_sum);
2451
2452	pcpu_stats_chunk_alloc();
2453	trace_percpu_create_chunk(base_addr);
2454
2455	/* we're done */
2456	pcpu_base_addr = base_addr;
 
2457}
2458
2459#ifdef CONFIG_SMP
2460
2461const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
2462	[PCPU_FC_AUTO]	= "auto",
2463	[PCPU_FC_EMBED]	= "embed",
2464	[PCPU_FC_PAGE]	= "page",
2465};
2466
2467enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
2468
2469static int __init percpu_alloc_setup(char *str)
2470{
2471	if (!str)
2472		return -EINVAL;
2473
2474	if (0)
2475		/* nada */;
2476#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2477	else if (!strcmp(str, "embed"))
2478		pcpu_chosen_fc = PCPU_FC_EMBED;
2479#endif
2480#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2481	else if (!strcmp(str, "page"))
2482		pcpu_chosen_fc = PCPU_FC_PAGE;
2483#endif
2484	else
2485		pr_warn("unknown allocator %s specified\n", str);
2486
2487	return 0;
2488}
2489early_param("percpu_alloc", percpu_alloc_setup);
2490
2491/*
2492 * pcpu_embed_first_chunk() is used by the generic percpu setup.
2493 * Build it if needed by the arch config or the generic setup is going
2494 * to be used.
2495 */
2496#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
2497	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
2498#define BUILD_EMBED_FIRST_CHUNK
2499#endif
2500
2501/* build pcpu_page_first_chunk() iff needed by the arch config */
2502#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
2503#define BUILD_PAGE_FIRST_CHUNK
2504#endif
2505
2506/* pcpu_build_alloc_info() is used by both embed and page first chunk */
2507#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
2508/**
2509 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2510 * @reserved_size: the size of reserved percpu area in bytes
2511 * @dyn_size: minimum free size for dynamic allocation in bytes
2512 * @atom_size: allocation atom size
2513 * @cpu_distance_fn: callback to determine distance between cpus, optional
2514 *
2515 * This function determines grouping of units, their mappings to cpus
2516 * and other parameters considering needed percpu size, allocation
2517 * atom size and distances between CPUs.
2518 *
2519 * Groups are always multiples of atom size and CPUs which are of
2520 * LOCAL_DISTANCE both ways are grouped together and share space for
2521 * units in the same group.  The returned configuration is guaranteed
2522 * to have CPUs on different nodes on different groups and >=75% usage
2523 * of allocated virtual address space.
2524 *
2525 * RETURNS:
2526 * On success, pointer to the new allocation_info is returned.  On
2527 * failure, ERR_PTR value is returned.
2528 */
2529static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
2530				size_t reserved_size, size_t dyn_size,
2531				size_t atom_size,
2532				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
2533{
2534	static int group_map[NR_CPUS] __initdata;
2535	static int group_cnt[NR_CPUS] __initdata;
2536	const size_t static_size = __per_cpu_end - __per_cpu_start;
2537	int nr_groups = 1, nr_units = 0;
2538	size_t size_sum, min_unit_size, alloc_size;
2539	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
2540	int last_allocs, group, unit;
2541	unsigned int cpu, tcpu;
2542	struct pcpu_alloc_info *ai;
2543	unsigned int *cpu_map;
2544
2545	/* this function may be called multiple times */
2546	memset(group_map, 0, sizeof(group_map));
2547	memset(group_cnt, 0, sizeof(group_cnt));
2548
2549	/* calculate size_sum and ensure dyn_size is enough for early alloc */
2550	size_sum = PFN_ALIGN(static_size + reserved_size +
2551			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
2552	dyn_size = size_sum - static_size - reserved_size;
2553
2554	/*
2555	 * Determine min_unit_size, alloc_size and max_upa such that
2556	 * alloc_size is multiple of atom_size and is the smallest
2557	 * which can accommodate 4k aligned segments which are equal to
2558	 * or larger than min_unit_size.
2559	 */
2560	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
2561
2562	/* determine the maximum # of units that can fit in an allocation */
2563	alloc_size = roundup(min_unit_size, atom_size);
2564	upa = alloc_size / min_unit_size;
2565	while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2566		upa--;
2567	max_upa = upa;
2568
2569	/* group cpus according to their proximity */
2570	for_each_possible_cpu(cpu) {
2571		group = 0;
2572	next_group:
2573		for_each_possible_cpu(tcpu) {
2574			if (cpu == tcpu)
2575				break;
2576			if (group_map[tcpu] == group && cpu_distance_fn &&
2577			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
2578			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
2579				group++;
2580				nr_groups = max(nr_groups, group + 1);
2581				goto next_group;
2582			}
2583		}
2584		group_map[cpu] = group;
2585		group_cnt[group]++;
2586	}
2587
2588	/*
2589	 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
2590	 * Expand the unit_size until we use >= 75% of the units allocated.
2591	 * Related to atom_size, which could be much larger than the unit_size.
2592	 */
2593	last_allocs = INT_MAX;
2594	for (upa = max_upa; upa; upa--) {
2595		int allocs = 0, wasted = 0;
2596
2597		if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2598			continue;
2599
2600		for (group = 0; group < nr_groups; group++) {
2601			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
2602			allocs += this_allocs;
2603			wasted += this_allocs * upa - group_cnt[group];
2604		}
2605
2606		/*
2607		 * Don't accept if wastage is over 1/3.  The
2608		 * greater-than comparison ensures upa==1 always
2609		 * passes the following check.
2610		 */
2611		if (wasted > num_possible_cpus() / 3)
2612			continue;
2613
2614		/* and then don't consume more memory */
2615		if (allocs > last_allocs)
2616			break;
2617		last_allocs = allocs;
2618		best_upa = upa;
2619	}
2620	upa = best_upa;
2621
2622	/* allocate and fill alloc_info */
2623	for (group = 0; group < nr_groups; group++)
2624		nr_units += roundup(group_cnt[group], upa);
2625
2626	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2627	if (!ai)
2628		return ERR_PTR(-ENOMEM);
2629	cpu_map = ai->groups[0].cpu_map;
2630
2631	for (group = 0; group < nr_groups; group++) {
2632		ai->groups[group].cpu_map = cpu_map;
2633		cpu_map += roundup(group_cnt[group], upa);
2634	}
2635
2636	ai->static_size = static_size;
2637	ai->reserved_size = reserved_size;
2638	ai->dyn_size = dyn_size;
2639	ai->unit_size = alloc_size / upa;
2640	ai->atom_size = atom_size;
2641	ai->alloc_size = alloc_size;
2642
2643	for (group = 0, unit = 0; group < nr_groups; group++) {
2644		struct pcpu_group_info *gi = &ai->groups[group];
2645
2646		/*
2647		 * Initialize base_offset as if all groups are located
2648		 * back-to-back.  The caller should update this to
2649		 * reflect actual allocation.
2650		 */
2651		gi->base_offset = unit * ai->unit_size;
2652
2653		for_each_possible_cpu(cpu)
2654			if (group_map[cpu] == group)
2655				gi->cpu_map[gi->nr_units++] = cpu;
2656		gi->nr_units = roundup(gi->nr_units, upa);
2657		unit += gi->nr_units;
2658	}
2659	BUG_ON(unit != nr_units);
2660
2661	return ai;
2662}
2663#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
2664
2665#if defined(BUILD_EMBED_FIRST_CHUNK)
2666/**
2667 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
2668 * @reserved_size: the size of reserved percpu area in bytes
2669 * @dyn_size: minimum free size for dynamic allocation in bytes
2670 * @atom_size: allocation atom size
2671 * @cpu_distance_fn: callback to determine distance between cpus, optional
2672 * @alloc_fn: function to allocate percpu page
2673 * @free_fn: function to free percpu page
2674 *
2675 * This is a helper to ease setting up embedded first percpu chunk and
2676 * can be called where pcpu_setup_first_chunk() is expected.
2677 *
2678 * If this function is used to setup the first chunk, it is allocated
2679 * by calling @alloc_fn and used as-is without being mapped into
2680 * vmalloc area.  Allocations are always whole multiples of @atom_size
2681 * aligned to @atom_size.
2682 *
2683 * This enables the first chunk to piggy back on the linear physical
2684 * mapping which often uses larger page size.  Please note that this
2685 * can result in very sparse cpu->unit mapping on NUMA machines thus
2686 * requiring large vmalloc address space.  Don't use this allocator if
2687 * vmalloc space is not orders of magnitude larger than distances
2688 * between node memory addresses (ie. 32bit NUMA machines).
2689 *
2690 * @dyn_size specifies the minimum dynamic area size.
2691 *
2692 * If the needed size is smaller than the minimum or specified unit
2693 * size, the leftover is returned using @free_fn.
2694 *
2695 * RETURNS:
2696 * 0 on success, -errno on failure.
2697 */
2698int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
2699				  size_t atom_size,
2700				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
2701				  pcpu_fc_alloc_fn_t alloc_fn,
2702				  pcpu_fc_free_fn_t free_fn)
2703{
2704	void *base = (void *)ULONG_MAX;
2705	void **areas = NULL;
2706	struct pcpu_alloc_info *ai;
2707	size_t size_sum, areas_size;
2708	unsigned long max_distance;
2709	int group, i, highest_group, rc = 0;
2710
2711	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
2712				   cpu_distance_fn);
2713	if (IS_ERR(ai))
2714		return PTR_ERR(ai);
2715
2716	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2717	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
2718
2719	areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
2720	if (!areas) {
2721		rc = -ENOMEM;
2722		goto out_free;
2723	}
2724
2725	/* allocate, copy and determine base address & max_distance */
2726	highest_group = 0;
2727	for (group = 0; group < ai->nr_groups; group++) {
2728		struct pcpu_group_info *gi = &ai->groups[group];
2729		unsigned int cpu = NR_CPUS;
2730		void *ptr;
2731
2732		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
2733			cpu = gi->cpu_map[i];
2734		BUG_ON(cpu == NR_CPUS);
2735
2736		/* allocate space for the whole group */
2737		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
2738		if (!ptr) {
2739			rc = -ENOMEM;
2740			goto out_free_areas;
2741		}
2742		/* kmemleak tracks the percpu allocations separately */
2743		kmemleak_free(ptr);
2744		areas[group] = ptr;
2745
2746		base = min(ptr, base);
2747		if (ptr > areas[highest_group])
2748			highest_group = group;
2749	}
2750	max_distance = areas[highest_group] - base;
2751	max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
2752
2753	/* warn if maximum distance is further than 75% of vmalloc space */
2754	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
2755		pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
2756				max_distance, VMALLOC_TOTAL);
2757#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2758		/* and fail if we have fallback */
2759		rc = -EINVAL;
2760		goto out_free_areas;
2761#endif
2762	}
2763
2764	/*
2765	 * Copy data and free unused parts.  This should happen after all
2766	 * allocations are complete; otherwise, we may end up with
2767	 * overlapping groups.
2768	 */
2769	for (group = 0; group < ai->nr_groups; group++) {
2770		struct pcpu_group_info *gi = &ai->groups[group];
2771		void *ptr = areas[group];
2772
2773		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
2774			if (gi->cpu_map[i] == NR_CPUS) {
2775				/* unused unit, free whole */
2776				free_fn(ptr, ai->unit_size);
2777				continue;
2778			}
2779			/* copy and return the unused part */
2780			memcpy(ptr, __per_cpu_load, ai->static_size);
2781			free_fn(ptr + size_sum, ai->unit_size - size_sum);
2782		}
2783	}
2784
2785	/* base address is now known, determine group base offsets */
 
2786	for (group = 0; group < ai->nr_groups; group++) {
2787		ai->groups[group].base_offset = areas[group] - base;
 
 
2788	}
 
2789
2790	pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
2791		PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
 
 
 
 
 
 
 
 
 
 
 
 
2792		ai->dyn_size, ai->unit_size);
2793
2794	pcpu_setup_first_chunk(ai, base);
2795	goto out_free;
2796
2797out_free_areas:
2798	for (group = 0; group < ai->nr_groups; group++)
2799		if (areas[group])
2800			free_fn(areas[group],
2801				ai->groups[group].nr_units * ai->unit_size);
2802out_free:
2803	pcpu_free_alloc_info(ai);
2804	if (areas)
2805		memblock_free_early(__pa(areas), areas_size);
2806	return rc;
2807}
2808#endif /* BUILD_EMBED_FIRST_CHUNK */
2809
2810#ifdef BUILD_PAGE_FIRST_CHUNK
2811/**
2812 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2813 * @reserved_size: the size of reserved percpu area in bytes
2814 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
2815 * @free_fn: function to free percpu page, always called with PAGE_SIZE
2816 * @populate_pte_fn: function to populate pte
2817 *
2818 * This is a helper to ease setting up page-remapped first percpu
2819 * chunk and can be called where pcpu_setup_first_chunk() is expected.
2820 *
2821 * This is the basic allocator.  Static percpu area is allocated
2822 * page-by-page into vmalloc area.
2823 *
2824 * RETURNS:
2825 * 0 on success, -errno on failure.
2826 */
2827int __init pcpu_page_first_chunk(size_t reserved_size,
2828				 pcpu_fc_alloc_fn_t alloc_fn,
2829				 pcpu_fc_free_fn_t free_fn,
2830				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
2831{
2832	static struct vm_struct vm;
2833	struct pcpu_alloc_info *ai;
2834	char psize_str[16];
2835	int unit_pages;
2836	size_t pages_size;
2837	struct page **pages;
2838	int unit, i, j, rc = 0;
2839	int upa;
2840	int nr_g0_units;
2841
2842	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
2843
2844	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2845	if (IS_ERR(ai))
2846		return PTR_ERR(ai);
2847	BUG_ON(ai->nr_groups != 1);
2848	upa = ai->alloc_size/ai->unit_size;
2849	nr_g0_units = roundup(num_possible_cpus(), upa);
2850	if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
2851		pcpu_free_alloc_info(ai);
2852		return -EINVAL;
2853	}
2854
2855	unit_pages = ai->unit_size >> PAGE_SHIFT;
2856
2857	/* unaligned allocations can't be freed, round up to page size */
2858	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2859			       sizeof(pages[0]));
2860	pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
2861	if (!pages)
2862		panic("%s: Failed to allocate %zu bytes\n", __func__,
2863		      pages_size);
2864
2865	/* allocate pages */
2866	j = 0;
2867	for (unit = 0; unit < num_possible_cpus(); unit++) {
2868		unsigned int cpu = ai->groups[0].cpu_map[unit];
2869		for (i = 0; i < unit_pages; i++) {
 
2870			void *ptr;
2871
2872			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
2873			if (!ptr) {
2874				pr_warn("failed to allocate %s page for cpu%u\n",
2875						psize_str, cpu);
2876				goto enomem;
2877			}
2878			/* kmemleak tracks the percpu allocations separately */
2879			kmemleak_free(ptr);
2880			pages[j++] = virt_to_page(ptr);
2881		}
2882	}
2883
2884	/* allocate vm area, map the pages and copy static data */
2885	vm.flags = VM_ALLOC;
2886	vm.size = num_possible_cpus() * ai->unit_size;
2887	vm_area_register_early(&vm, PAGE_SIZE);
2888
2889	for (unit = 0; unit < num_possible_cpus(); unit++) {
2890		unsigned long unit_addr =
2891			(unsigned long)vm.addr + unit * ai->unit_size;
2892
2893		for (i = 0; i < unit_pages; i++)
2894			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
2895
2896		/* pte already populated, the following shouldn't fail */
2897		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
2898				      unit_pages);
2899		if (rc < 0)
2900			panic("failed to map percpu area, err=%d\n", rc);
2901
2902		/*
2903		 * FIXME: Archs with virtual cache should flush local
2904		 * cache for the linear mapping here - something
2905		 * equivalent to flush_cache_vmap() on the local cpu.
2906		 * flush_cache_vmap() can't be used as most supporting
2907		 * data structures are not set up yet.
2908		 */
2909
2910		/* copy static data */
2911		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
2912	}
2913
2914	/* we're ready, commit */
2915	pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
2916		unit_pages, psize_str, ai->static_size,
2917		ai->reserved_size, ai->dyn_size);
2918
2919	pcpu_setup_first_chunk(ai, vm.addr);
2920	goto out_free_ar;
2921
2922enomem:
2923	while (--j >= 0)
2924		free_fn(page_address(pages[j]), PAGE_SIZE);
2925	rc = -ENOMEM;
2926out_free_ar:
2927	memblock_free_early(__pa(pages), pages_size);
2928	pcpu_free_alloc_info(ai);
2929	return rc;
2930}
2931#endif /* BUILD_PAGE_FIRST_CHUNK */
2932
2933#ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
2934/*
2935 * Generic SMP percpu area setup.
2936 *
2937 * The embedding helper is used because its behavior closely resembles
2938 * the original non-dynamic generic percpu area setup.  This is
2939 * important because many archs have addressing restrictions and might
2940 * fail if the percpu area is located far away from the previous
2941 * location.  As an added bonus, in non-NUMA cases, embedding is
2942 * generally a good idea TLB-wise because percpu area can piggy back
2943 * on the physical linear memory mapping which uses large page
2944 * mappings on applicable archs.
2945 */
2946unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
2947EXPORT_SYMBOL(__per_cpu_offset);
2948
2949static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
2950				       size_t align)
2951{
2952	return  memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS));
 
2953}
2954
2955static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
2956{
2957	memblock_free_early(__pa(ptr), size);
2958}
2959
2960void __init setup_per_cpu_areas(void)
2961{
2962	unsigned long delta;
2963	unsigned int cpu;
2964	int rc;
2965
2966	/*
2967	 * Always reserve area for module percpu variables.  That's
2968	 * what the legacy allocator did.
2969	 */
2970	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2971				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
2972				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
2973	if (rc < 0)
2974		panic("Failed to initialize percpu areas.");
2975
2976	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2977	for_each_possible_cpu(cpu)
2978		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2979}
2980#endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */
2981
2982#else	/* CONFIG_SMP */
2983
2984/*
2985 * UP percpu area setup.
2986 *
2987 * UP always uses km-based percpu allocator with identity mapping.
2988 * Static percpu variables are indistinguishable from the usual static
2989 * variables and don't require any special preparation.
2990 */
2991void __init setup_per_cpu_areas(void)
2992{
2993	const size_t unit_size =
2994		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
2995					 PERCPU_DYNAMIC_RESERVE));
2996	struct pcpu_alloc_info *ai;
2997	void *fc;
2998
2999	ai = pcpu_alloc_alloc_info(1, 1);
3000	fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
 
 
3001	if (!ai || !fc)
3002		panic("Failed to allocate memory for percpu areas.");
3003	/* kmemleak tracks the percpu allocations separately */
3004	kmemleak_free(fc);
3005
3006	ai->dyn_size = unit_size;
3007	ai->unit_size = unit_size;
3008	ai->atom_size = unit_size;
3009	ai->alloc_size = unit_size;
3010	ai->groups[0].nr_units = 1;
3011	ai->groups[0].cpu_map[0] = 0;
3012
3013	pcpu_setup_first_chunk(ai, fc);
3014	pcpu_free_alloc_info(ai);
3015}
3016
3017#endif	/* CONFIG_SMP */
3018
3019/*
3020 * pcpu_nr_pages - calculate total number of populated backing pages
3021 *
3022 * This reflects the number of pages populated to back chunks.  Metadata is
3023 * excluded in the number exposed in meminfo as the number of backing pages
3024 * scales with the number of cpus and can quickly outweigh the memory used for
3025 * metadata.  It also keeps this calculation nice and simple.
3026 *
3027 * RETURNS:
3028 * Total number of populated backing pages in use by the allocator.
3029 */
3030unsigned long pcpu_nr_pages(void)
3031{
3032	return pcpu_nr_populated * pcpu_nr_units;
3033}
 
 
 
3034
3035/*
3036 * Percpu allocator is initialized early during boot when neither slab or
3037 * workqueue is available.  Plug async management until everything is up
3038 * and running.
3039 */
3040static int __init percpu_enable_async(void)
3041{
3042	pcpu_async_enabled = true;
3043	return 0;
 
 
 
 
 
3044}
3045subsys_initcall(percpu_enable_async);
v3.15
 
   1/*
   2 * mm/percpu.c - percpu memory allocator
   3 *
   4 * Copyright (C) 2009		SUSE Linux Products GmbH
   5 * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
   6 *
   7 * This file is released under the GPLv2.
 
   8 *
   9 * This is percpu allocator which can handle both static and dynamic
  10 * areas.  Percpu areas are allocated in chunks.  Each chunk is
  11 * consisted of boot-time determined number of units and the first
  12 * chunk is used for static percpu variables in the kernel image
  13 * (special boot time alloc/init handling necessary as these areas
  14 * need to be brought up before allocation services are running).
  15 * Unit grows as necessary and all units grow or shrink in unison.
  16 * When a chunk is filled up, another chunk is allocated.
  17 *
  18 *  c0                           c1                         c2
  19 *  -------------------          -------------------        ------------
  20 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
  21 *  -------------------  ......  -------------------  ....  ------------
  22 *
  23 * Allocation is done in offset-size areas of single unit space.  Ie,
  24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
  25 * c1:u1, c1:u2 and c1:u3.  On UMA, units corresponds directly to
  26 * cpus.  On NUMA, the mapping can be non-linear and even sparse.
  27 * Percpu access can be done by configuring percpu base registers
  28 * according to cpu to unit mapping and pcpu_unit_size.
  29 *
  30 * There are usually many small percpu allocations many of them being
  31 * as small as 4 bytes.  The allocator organizes chunks into lists
  32 * according to free size and tries to allocate from the fullest one.
  33 * Each chunk keeps the maximum contiguous area size hint which is
  34 * guaranteed to be equal to or larger than the maximum contiguous
  35 * area in the chunk.  This helps the allocator not to iterate the
  36 * chunk maps unnecessarily.
  37 *
  38 * Allocation state in each chunk is kept using an array of integers
  39 * on chunk->map.  A positive value in the map represents a free
  40 * region and negative allocated.  Allocation inside a chunk is done
  41 * by scanning this map sequentially and serving the first matching
  42 * entry.  This is mostly copied from the percpu_modalloc() allocator.
  43 * Chunks can be determined from the address using the index field
  44 * in the page struct. The index field contains a pointer to the chunk.
 
 
 
 
 
 
 
 
 
 
  45 *
  46 * To use this allocator, arch code should do the followings.
  47 *
  48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
  49 *   regular address to percpu pointer and back if they need to be
  50 *   different from the default
  51 *
  52 * - use pcpu_setup_first_chunk() during percpu area initialization to
  53 *   setup the first chunk containing the kernel static percpu area
  54 */
  55
 
 
  56#include <linux/bitmap.h>
  57#include <linux/bootmem.h>
  58#include <linux/err.h>
 
  59#include <linux/list.h>
  60#include <linux/log2.h>
  61#include <linux/mm.h>
  62#include <linux/module.h>
  63#include <linux/mutex.h>
  64#include <linux/percpu.h>
  65#include <linux/pfn.h>
  66#include <linux/slab.h>
  67#include <linux/spinlock.h>
  68#include <linux/vmalloc.h>
  69#include <linux/workqueue.h>
  70#include <linux/kmemleak.h>
 
  71
  72#include <asm/cacheflush.h>
  73#include <asm/sections.h>
  74#include <asm/tlbflush.h>
  75#include <asm/io.h>
  76
  77#define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
  78#define PCPU_DFL_MAP_ALLOC		16	/* start a map with 16 ents */
 
 
 
 
 
 
 
 
 
 
  79
  80#ifdef CONFIG_SMP
  81/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
  82#ifndef __addr_to_pcpu_ptr
  83#define __addr_to_pcpu_ptr(addr)					\
  84	(void __percpu *)((unsigned long)(addr) -			\
  85			  (unsigned long)pcpu_base_addr	+		\
  86			  (unsigned long)__per_cpu_start)
  87#endif
  88#ifndef __pcpu_ptr_to_addr
  89#define __pcpu_ptr_to_addr(ptr)						\
  90	(void __force *)((unsigned long)(ptr) +				\
  91			 (unsigned long)pcpu_base_addr -		\
  92			 (unsigned long)__per_cpu_start)
  93#endif
  94#else	/* CONFIG_SMP */
  95/* on UP, it's always identity mapped */
  96#define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
  97#define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
  98#endif	/* CONFIG_SMP */
  99
 100struct pcpu_chunk {
 101	struct list_head	list;		/* linked to pcpu_slot lists */
 102	int			free_size;	/* free bytes in the chunk */
 103	int			contig_hint;	/* max contiguous size hint */
 104	void			*base_addr;	/* base address of this chunk */
 105	int			map_used;	/* # of map entries used before the sentry */
 106	int			map_alloc;	/* # of map entries allocated */
 107	int			*map;		/* allocation map */
 108	void			*data;		/* chunk data */
 109	int			first_free;	/* no free below this */
 110	bool			immutable;	/* no [de]population allowed */
 111	unsigned long		populated[];	/* populated bitmap */
 112};
 113
 114static int pcpu_unit_pages __read_mostly;
 115static int pcpu_unit_size __read_mostly;
 116static int pcpu_nr_units __read_mostly;
 117static int pcpu_atom_size __read_mostly;
 118static int pcpu_nr_slots __read_mostly;
 119static size_t pcpu_chunk_struct_size __read_mostly;
 120
 121/* cpus with the lowest and highest unit addresses */
 122static unsigned int pcpu_low_unit_cpu __read_mostly;
 123static unsigned int pcpu_high_unit_cpu __read_mostly;
 124
 125/* the address of the first chunk which starts with the kernel static area */
 126void *pcpu_base_addr __read_mostly;
 127EXPORT_SYMBOL_GPL(pcpu_base_addr);
 128
 129static const int *pcpu_unit_map __read_mostly;		/* cpu -> unit */
 130const unsigned long *pcpu_unit_offsets __read_mostly;	/* cpu -> unit offset */
 131
 132/* group information, used for vm allocation */
 133static int pcpu_nr_groups __read_mostly;
 134static const unsigned long *pcpu_group_offsets __read_mostly;
 135static const size_t *pcpu_group_sizes __read_mostly;
 136
 137/*
 138 * The first chunk which always exists.  Note that unlike other
 139 * chunks, this one can be allocated and mapped in several different
 140 * ways and thus often doesn't live in the vmalloc area.
 141 */
 142static struct pcpu_chunk *pcpu_first_chunk;
 143
 144/*
 145 * Optional reserved chunk.  This chunk reserves part of the first
 146 * chunk and serves it for reserved allocations.  The amount of
 147 * reserved offset is in pcpu_reserved_chunk_limit.  When reserved
 148 * area doesn't exist, the following variables contain NULL and 0
 149 * respectively.
 150 */
 151static struct pcpu_chunk *pcpu_reserved_chunk;
 152static int pcpu_reserved_chunk_limit;
 
 
 
 
 
 
 
 153
 154/*
 155 * Synchronization rules.
 156 *
 157 * There are two locks - pcpu_alloc_mutex and pcpu_lock.  The former
 158 * protects allocation/reclaim paths, chunks, populated bitmap and
 159 * vmalloc mapping.  The latter is a spinlock and protects the index
 160 * data structures - chunk slots, chunks and area maps in chunks.
 161 *
 162 * During allocation, pcpu_alloc_mutex is kept locked all the time and
 163 * pcpu_lock is grabbed and released as necessary.  All actual memory
 164 * allocations are done using GFP_KERNEL with pcpu_lock released.  In
 165 * general, percpu memory can't be allocated with irq off but
 166 * irqsave/restore are still used in alloc path so that it can be used
 167 * from early init path - sched_init() specifically.
 168 *
 169 * Free path accesses and alters only the index data structures, so it
 170 * can be safely called from atomic context.  When memory needs to be
 171 * returned to the system, free path schedules reclaim_work which
 172 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
 173 * reclaimed, release both locks and frees the chunks.  Note that it's
 174 * necessary to grab both locks to remove a chunk from circulation as
 175 * allocation path might be referencing the chunk with only
 176 * pcpu_alloc_mutex locked.
 177 */
 178static DEFINE_MUTEX(pcpu_alloc_mutex);	/* protects whole alloc and reclaim */
 179static DEFINE_SPINLOCK(pcpu_lock);	/* protects index data structures */
 180
 181static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
 
 
 
 
 
 
 182
 183/* reclaim work to release fully free chunks, scheduled from free path */
 184static void pcpu_reclaim(struct work_struct *work);
 185static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
 
 
 
 
 
 
 
 186
 187static bool pcpu_addr_in_first_chunk(void *addr)
 188{
 189	void *first_start = pcpu_first_chunk->base_addr;
 190
 191	return addr >= first_start && addr < first_start + pcpu_unit_size;
 192}
 193
 194static bool pcpu_addr_in_reserved_chunk(void *addr)
 
 
 
 
 
 
 
 
 195{
 196	void *first_start = pcpu_first_chunk->base_addr;
 
 
 
 197
 198	return addr >= first_start &&
 199		addr < first_start + pcpu_reserved_chunk_limit;
 
 
 
 200}
 201
 202static int __pcpu_size_to_slot(int size)
 203{
 204	int highbit = fls(size);	/* size is in bytes */
 205	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
 206}
 207
 208static int pcpu_size_to_slot(int size)
 209{
 210	if (size == pcpu_unit_size)
 211		return pcpu_nr_slots - 1;
 212	return __pcpu_size_to_slot(size);
 213}
 214
 215static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
 216{
 217	if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
 
 
 
 218		return 0;
 219
 220	return pcpu_size_to_slot(chunk->free_size);
 221}
 222
 223/* set the pointer to a chunk in a page struct */
 224static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
 225{
 226	page->index = (unsigned long)pcpu;
 227}
 228
 229/* obtain pointer to a chunk from a page struct */
 230static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
 231{
 232	return (struct pcpu_chunk *)page->index;
 233}
 234
 235static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
 236{
 237	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
 238}
 239
 
 
 
 
 
 240static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
 241				     unsigned int cpu, int page_idx)
 242{
 243	return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
 244		(page_idx << PAGE_SHIFT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 245}
 246
 247static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
 248					   int *rs, int *re, int end)
 249{
 250	*rs = find_next_zero_bit(chunk->populated, end, *rs);
 251	*re = find_next_bit(chunk->populated, end, *rs + 1);
 252}
 253
 254static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
 255					 int *rs, int *re, int end)
 256{
 257	*rs = find_next_bit(chunk->populated, end, *rs);
 258	*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 259}
 260
 261/*
 262 * (Un)populated page region iterators.  Iterate over (un)populated
 263 * page regions between @start and @end in @chunk.  @rs and @re should
 264 * be integer variables and will be set to start and end page index of
 265 * the current region.
 266 */
 267#define pcpu_for_each_unpop_region(chunk, rs, re, start, end)		    \
 268	for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
 269	     (rs) < (re);						    \
 270	     (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
 271
 272#define pcpu_for_each_pop_region(chunk, rs, re, start, end)		    \
 273	for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
 274	     (rs) < (re);						    \
 275	     (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
 
 
 
 
 276
 277/**
 278 * pcpu_mem_zalloc - allocate memory
 279 * @size: bytes to allocate
 
 280 *
 281 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
 282 * kzalloc() is used; otherwise, vzalloc() is used.  The returned
 283 * memory is always zeroed.
 284 *
 285 * CONTEXT:
 286 * Does GFP_KERNEL allocation.
 287 *
 288 * RETURNS:
 289 * Pointer to the allocated area on success, NULL on failure.
 290 */
 291static void *pcpu_mem_zalloc(size_t size)
 292{
 293	if (WARN_ON_ONCE(!slab_is_available()))
 294		return NULL;
 295
 296	if (size <= PAGE_SIZE)
 297		return kzalloc(size, GFP_KERNEL);
 298	else
 299		return vzalloc(size);
 300}
 301
 302/**
 303 * pcpu_mem_free - free memory
 304 * @ptr: memory to free
 305 * @size: size of the area
 306 *
 307 * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
 308 */
 309static void pcpu_mem_free(void *ptr, size_t size)
 310{
 311	if (size <= PAGE_SIZE)
 312		kfree(ptr);
 313	else
 314		vfree(ptr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 315}
 316
 317/**
 318 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
 319 * @chunk: chunk of interest
 320 * @oslot: the previous slot it was on
 321 *
 322 * This function is called after an allocation or free changed @chunk.
 323 * New slot according to the changed state is determined and @chunk is
 324 * moved to the slot.  Note that the reserved chunk is never put on
 325 * chunk slots.
 326 *
 327 * CONTEXT:
 328 * pcpu_lock.
 329 */
 330static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
 331{
 332	int nslot = pcpu_chunk_slot(chunk);
 333
 334	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
 335		if (oslot < nslot)
 336			list_move(&chunk->list, &pcpu_slot[nslot]);
 337		else
 338			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
 339	}
 340}
 341
 342/**
 343 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
 344 * @chunk: chunk of interest
 
 345 *
 346 * Determine whether area map of @chunk needs to be extended to
 347 * accommodate a new allocation.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 348 *
 349 * CONTEXT:
 350 * pcpu_lock.
 
 
 
 
 
 
 
 
 
 
 
 351 *
 352 * RETURNS:
 353 * New target map allocation length if extension is necessary, 0
 354 * otherwise.
 355 */
 356static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
 357{
 358	int new_alloc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 359
 360	if (chunk->map_alloc >= chunk->map_used + 3)
 361		return 0;
 362
 363	new_alloc = PCPU_DFL_MAP_ALLOC;
 364	while (new_alloc < chunk->map_used + 3)
 365		new_alloc *= 2;
 366
 367	return new_alloc;
 368}
 369
 370/**
 371 * pcpu_extend_area_map - extend area map of a chunk
 372 * @chunk: chunk of interest
 373 * @new_alloc: new target allocation length of the area map
 374 *
 375 * Extend area map of @chunk to have @new_alloc entries.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 376 *
 377 * CONTEXT:
 378 * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
 379 *
 380 * RETURNS:
 381 * 0 on success, -errno on failure.
 382 */
 383static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
 384{
 385	int *old = NULL, *new = NULL;
 386	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
 387	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 388
 389	new = pcpu_mem_zalloc(new_size);
 390	if (!new)
 391		return -ENOMEM;
 392
 393	/* acquire pcpu_lock and switch to new area map */
 394	spin_lock_irqsave(&pcpu_lock, flags);
 
 
 
 
 395
 396	if (new_alloc <= chunk->map_alloc)
 397		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 398
 399	old_size = chunk->map_alloc * sizeof(chunk->map[0]);
 400	old = chunk->map;
 
 
 
 
 
 
 
 
 401
 402	memcpy(new, old, old_size);
 
 403
 404	chunk->map_alloc = new_alloc;
 405	chunk->map = new;
 406	new = NULL;
 
 
 
 
 
 407
 408out_unlock:
 409	spin_unlock_irqrestore(&pcpu_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 410
 411	/*
 412	 * pcpu_mem_free() might end up calling vfree() which uses
 413	 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
 414	 */
 415	pcpu_mem_free(old, old_size);
 416	pcpu_mem_free(new, new_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 417
 418	return 0;
 
 
 
 
 
 
 
 
 
 
 419}
 420
 421/**
 422 * pcpu_alloc_area - allocate area from a pcpu_chunk
 423 * @chunk: chunk of interest
 424 * @size: wanted size in bytes
 425 * @align: wanted align
 426 *
 427 * Try to allocate @size bytes area aligned at @align from @chunk.
 428 * Note that this function only allocates the offset.  It doesn't
 429 * populate or map the area.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 430 *
 431 * @chunk->map must have at least two free slots.
 432 *
 433 * CONTEXT:
 434 * pcpu_lock.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 435 *
 436 * RETURNS:
 437 * Allocated offset in @chunk on success, -1 if no matching area is
 438 * found.
 439 */
 440static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
 
 441{
 442	int oslot = pcpu_chunk_slot(chunk);
 443	int max_contig = 0;
 444	int i, off;
 445	bool seen_free = false;
 446	int *p;
 447
 448	for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) {
 449		int head, tail;
 450		int this_size;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 451
 452		off = *p;
 453		if (off & 1)
 454			continue;
 455
 456		/* extra for alignment requirement */
 457		head = ALIGN(off, align) - off;
 458
 459		this_size = (p[1] & ~1) - off;
 460		if (this_size < head + size) {
 461			if (!seen_free) {
 462				chunk->first_free = i;
 463				seen_free = true;
 464			}
 465			max_contig = max(this_size, max_contig);
 466			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 467		}
 468
 469		/*
 470		 * If head is small or the previous block is free,
 471		 * merge'em.  Note that 'small' is defined as smaller
 472		 * than sizeof(int), which is very small but isn't too
 473		 * uncommon for percpu allocations.
 474		 */
 475		if (head && (head < sizeof(int) || !(p[-1] & 1))) {
 476			*p = off += head;
 477			if (p[-1] & 1)
 478				chunk->free_size -= head;
 479			else
 480				max_contig = max(*p - p[-1], max_contig);
 481			this_size -= head;
 482			head = 0;
 483		}
 484
 485		/* if tail is small, just keep it around */
 486		tail = this_size - head - size;
 487		if (tail < sizeof(int)) {
 488			tail = 0;
 489			size = this_size - head;
 490		}
 491
 492		/* split if warranted */
 493		if (head || tail) {
 494			int nr_extra = !!head + !!tail;
 495
 496			/* insert new subblocks */
 497			memmove(p + nr_extra + 1, p + 1,
 498				sizeof(chunk->map[0]) * (chunk->map_used - i));
 499			chunk->map_used += nr_extra;
 500
 501			if (head) {
 502				if (!seen_free) {
 503					chunk->first_free = i;
 504					seen_free = true;
 505				}
 506				*++p = off += head;
 507				++i;
 508				max_contig = max(head, max_contig);
 509			}
 510			if (tail) {
 511				p[1] = off + size;
 512				max_contig = max(tail, max_contig);
 513			}
 514		}
 515
 516		if (!seen_free)
 517			chunk->first_free = i + 1;
 518
 519		/* update hint and mark allocated */
 520		if (i + 1 == chunk->map_used)
 521			chunk->contig_hint = max_contig; /* fully scanned */
 522		else
 523			chunk->contig_hint = max(chunk->contig_hint,
 524						 max_contig);
 525
 526		chunk->free_size -= size;
 527		*p |= 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 528
 529		pcpu_chunk_relocate(chunk, oslot);
 530		return off;
 531	}
 532
 533	chunk->contig_hint = max_contig;	/* fully scanned */
 534	pcpu_chunk_relocate(chunk, oslot);
 535
 536	/* tell the upper layer that this chunk has no matching area */
 537	return -1;
 538}
 539
 540/**
 541 * pcpu_free_area - free area to a pcpu_chunk
 542 * @chunk: chunk of interest
 543 * @freeme: offset of area to free
 544 *
 545 * Free area starting from @freeme to @chunk.  Note that this function
 546 * only modifies the allocation map.  It doesn't depopulate or unmap
 547 * the area.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 548 *
 549 * CONTEXT:
 550 * pcpu_lock.
 551 */
 552static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
 
 553{
 554	int oslot = pcpu_chunk_slot(chunk);
 555	int off = 0;
 556	unsigned i, j;
 557	int to_free = 0;
 558	int *p;
 559
 560	freeme |= 1;	/* we are searching for <given offset, in use> pair */
 561
 562	i = 0;
 563	j = chunk->map_used;
 564	while (i != j) {
 565		unsigned k = (i + j) / 2;
 566		off = chunk->map[k];
 567		if (off < freeme)
 568			i = k + 1;
 569		else if (off > freeme)
 570			j = k;
 571		else
 572			i = j = k;
 573	}
 574	BUG_ON(off != freeme);
 
 
 
 
 575
 576	if (i < chunk->first_free)
 577		chunk->first_free = i;
 578
 579	p = chunk->map + i;
 580	*p = off &= ~1;
 581	chunk->free_size += (p[1] & ~1) - off;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 582
 583	/* merge with next? */
 584	if (!(p[1] & 1))
 585		to_free++;
 586	/* merge with previous? */
 587	if (i > 0 && !(p[-1] & 1)) {
 588		to_free++;
 589		i--;
 590		p--;
 591	}
 592	if (to_free) {
 593		chunk->map_used -= to_free;
 594		memmove(p + 1, p + 1 + to_free,
 595			(chunk->map_used - i) * sizeof(chunk->map[0]));
 596	}
 597
 598	chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint);
 599	pcpu_chunk_relocate(chunk, oslot);
 600}
 601
 602static struct pcpu_chunk *pcpu_alloc_chunk(void)
 603{
 604	struct pcpu_chunk *chunk;
 
 605
 606	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
 607	if (!chunk)
 608		return NULL;
 609
 610	chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
 611						sizeof(chunk->map[0]));
 612	if (!chunk->map) {
 613		pcpu_mem_free(chunk, pcpu_chunk_struct_size);
 614		return NULL;
 615	}
 
 
 
 
 
 
 
 
 
 
 
 
 616
 617	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
 618	chunk->map[0] = 0;
 619	chunk->map[1] = pcpu_unit_size | 1;
 620	chunk->map_used = 1;
 621
 622	INIT_LIST_HEAD(&chunk->list);
 623	chunk->free_size = pcpu_unit_size;
 624	chunk->contig_hint = pcpu_unit_size;
 625
 626	return chunk;
 
 
 
 
 
 
 
 
 
 627}
 628
 629static void pcpu_free_chunk(struct pcpu_chunk *chunk)
 630{
 631	if (!chunk)
 632		return;
 633	pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
 634	pcpu_mem_free(chunk, pcpu_chunk_struct_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 635}
 636
 637/*
 638 * Chunk management implementation.
 639 *
 640 * To allow different implementations, chunk alloc/free and
 641 * [de]population are implemented in a separate file which is pulled
 642 * into this file and compiled together.  The following functions
 643 * should be implemented.
 644 *
 645 * pcpu_populate_chunk		- populate the specified range of a chunk
 646 * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
 647 * pcpu_create_chunk		- create a new chunk
 648 * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
 649 * pcpu_addr_to_page		- translate address to physical address
 650 * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
 651 */
 652static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
 653static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
 654static struct pcpu_chunk *pcpu_create_chunk(void);
 
 
 655static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
 656static struct page *pcpu_addr_to_page(void *addr);
 657static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
 658
 659#ifdef CONFIG_NEED_PER_CPU_KM
 660#include "percpu-km.c"
 661#else
 662#include "percpu-vm.c"
 663#endif
 664
 665/**
 666 * pcpu_chunk_addr_search - determine chunk containing specified address
 667 * @addr: address for which the chunk needs to be determined.
 668 *
 
 
 
 669 * RETURNS:
 670 * The address of the found chunk.
 671 */
 672static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
 673{
 674	/* is it in the first chunk? */
 675	if (pcpu_addr_in_first_chunk(addr)) {
 676		/* is it in the reserved area? */
 677		if (pcpu_addr_in_reserved_chunk(addr))
 678			return pcpu_reserved_chunk;
 679		return pcpu_first_chunk;
 680	}
 
 
 
 681
 682	/*
 683	 * The address is relative to unit0 which might be unused and
 684	 * thus unmapped.  Offset the address to the unit space of the
 685	 * current processor before looking it up in the vmalloc
 686	 * space.  Note that any possible cpu id can be used here, so
 687	 * there's no need to worry about preemption or cpu hotplug.
 688	 */
 689	addr += pcpu_unit_offsets[raw_smp_processor_id()];
 690	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
 691}
 692
 693/**
 694 * pcpu_alloc - the percpu allocator
 695 * @size: size of area to allocate in bytes
 696 * @align: alignment of area (max PAGE_SIZE)
 697 * @reserved: allocate from the reserved chunk if available
 
 698 *
 699 * Allocate percpu area of @size bytes aligned at @align.
 700 *
 701 * CONTEXT:
 702 * Does GFP_KERNEL allocation.
 703 *
 704 * RETURNS:
 705 * Percpu pointer to the allocated area on success, NULL on failure.
 706 */
 707static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
 
 708{
 
 
 
 
 709	static int warn_limit = 10;
 710	struct pcpu_chunk *chunk;
 711	const char *err;
 712	int slot, off, new_alloc;
 713	unsigned long flags;
 714	void __percpu *ptr;
 
 715
 716	/*
 717	 * We want the lowest bit of offset available for in-use/free
 718	 * indicator, so force >= 16bit alignment and make size even.
 
 
 719	 */
 720	if (unlikely(align < 2))
 721		align = 2;
 722
 723	if (unlikely(size & 1))
 724		size++;
 
 
 
 
 
 
 
 
 725
 726	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
 727		WARN(true, "illegal size (%zu) or align (%zu) for "
 728		     "percpu allocation\n", size, align);
 729		return NULL;
 
 
 
 
 
 
 730	}
 731
 732	mutex_lock(&pcpu_alloc_mutex);
 733	spin_lock_irqsave(&pcpu_lock, flags);
 734
 735	/* serve reserved allocations from the reserved chunk if available */
 736	if (reserved && pcpu_reserved_chunk) {
 737		chunk = pcpu_reserved_chunk;
 738
 739		if (size > chunk->contig_hint) {
 
 740			err = "alloc from reserved chunk failed";
 741			goto fail_unlock;
 742		}
 743
 744		while ((new_alloc = pcpu_need_to_extend(chunk))) {
 745			spin_unlock_irqrestore(&pcpu_lock, flags);
 746			if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
 747				err = "failed to extend area map of reserved chunk";
 748				goto fail_unlock_mutex;
 749			}
 750			spin_lock_irqsave(&pcpu_lock, flags);
 751		}
 752
 753		off = pcpu_alloc_area(chunk, size, align);
 754		if (off >= 0)
 755			goto area_found;
 756
 757		err = "alloc from reserved chunk failed";
 758		goto fail_unlock;
 759	}
 760
 761restart:
 762	/* search through normal chunks */
 763	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
 764		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
 765			if (size > chunk->contig_hint)
 
 
 
 
 766				continue;
 767
 768			new_alloc = pcpu_need_to_extend(chunk);
 769			if (new_alloc) {
 770				spin_unlock_irqrestore(&pcpu_lock, flags);
 771				if (pcpu_extend_area_map(chunk,
 772							 new_alloc) < 0) {
 773					err = "failed to extend area map";
 774					goto fail_unlock_mutex;
 775				}
 776				spin_lock_irqsave(&pcpu_lock, flags);
 777				/*
 778				 * pcpu_lock has been dropped, need to
 779				 * restart cpu_slot list walking.
 780				 */
 781				goto restart;
 782			}
 783
 784			off = pcpu_alloc_area(chunk, size, align);
 785			if (off >= 0)
 786				goto area_found;
 
 787		}
 788	}
 789
 790	/* hmmm... no space left, create a new chunk */
 791	spin_unlock_irqrestore(&pcpu_lock, flags);
 792
 793	chunk = pcpu_create_chunk();
 794	if (!chunk) {
 795		err = "failed to allocate new chunk";
 796		goto fail_unlock_mutex;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 797	}
 798
 799	spin_lock_irqsave(&pcpu_lock, flags);
 800	pcpu_chunk_relocate(chunk, -1);
 801	goto restart;
 802
 803area_found:
 
 804	spin_unlock_irqrestore(&pcpu_lock, flags);
 805
 806	/* populate, map and clear the area */
 807	if (pcpu_populate_chunk(chunk, off, size)) {
 808		spin_lock_irqsave(&pcpu_lock, flags);
 809		pcpu_free_area(chunk, off);
 810		err = "failed to populate";
 811		goto fail_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 812	}
 813
 814	mutex_unlock(&pcpu_alloc_mutex);
 
 
 
 
 
 815
 816	/* return address relative to base address */
 817	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
 818	kmemleak_alloc_percpu(ptr, size);
 
 
 
 
 819	return ptr;
 820
 821fail_unlock:
 822	spin_unlock_irqrestore(&pcpu_lock, flags);
 823fail_unlock_mutex:
 824	mutex_unlock(&pcpu_alloc_mutex);
 825	if (warn_limit) {
 826		pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
 827			   "%s\n", size, align, err);
 
 828		dump_stack();
 829		if (!--warn_limit)
 830			pr_info("PERCPU: limit reached, disable warning\n");
 
 
 
 
 
 
 
 831	}
 832	return NULL;
 833}
 834
 835/**
 836 * __alloc_percpu - allocate dynamic percpu area
 837 * @size: size of area to allocate in bytes
 838 * @align: alignment of area (max PAGE_SIZE)
 
 839 *
 840 * Allocate zero-filled percpu area of @size bytes aligned at @align.
 841 * Might sleep.  Might trigger writeouts.
 842 *
 843 * CONTEXT:
 844 * Does GFP_KERNEL allocation.
 845 *
 846 * RETURNS:
 847 * Percpu pointer to the allocated area on success, NULL on failure.
 848 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 849void __percpu *__alloc_percpu(size_t size, size_t align)
 850{
 851	return pcpu_alloc(size, align, false);
 852}
 853EXPORT_SYMBOL_GPL(__alloc_percpu);
 854
 855/**
 856 * __alloc_reserved_percpu - allocate reserved percpu area
 857 * @size: size of area to allocate in bytes
 858 * @align: alignment of area (max PAGE_SIZE)
 859 *
 860 * Allocate zero-filled percpu area of @size bytes aligned at @align
 861 * from reserved percpu area if arch has set it up; otherwise,
 862 * allocation is served from the same dynamic area.  Might sleep.
 863 * Might trigger writeouts.
 864 *
 865 * CONTEXT:
 866 * Does GFP_KERNEL allocation.
 867 *
 868 * RETURNS:
 869 * Percpu pointer to the allocated area on success, NULL on failure.
 870 */
 871void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
 872{
 873	return pcpu_alloc(size, align, true);
 874}
 875
 876/**
 877 * pcpu_reclaim - reclaim fully free chunks, workqueue function
 878 * @work: unused
 879 *
 880 * Reclaim all fully free chunks except for the first one.
 881 *
 882 * CONTEXT:
 883 * workqueue context.
 884 */
 885static void pcpu_reclaim(struct work_struct *work)
 886{
 887	LIST_HEAD(todo);
 888	struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
 
 
 
 
 889	struct pcpu_chunk *chunk, *next;
 
 890
 
 
 
 
 891	mutex_lock(&pcpu_alloc_mutex);
 892	spin_lock_irq(&pcpu_lock);
 893
 894	list_for_each_entry_safe(chunk, next, head, list) {
 895		WARN_ON(chunk->immutable);
 896
 897		/* spare the first one */
 898		if (chunk == list_first_entry(head, struct pcpu_chunk, list))
 899			continue;
 900
 901		list_move(&chunk->list, &todo);
 902	}
 903
 904	spin_unlock_irq(&pcpu_lock);
 905
 906	list_for_each_entry_safe(chunk, next, &todo, list) {
 907		pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
 
 
 
 
 
 
 
 
 908		pcpu_destroy_chunk(chunk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 909	}
 910
 911	mutex_unlock(&pcpu_alloc_mutex);
 912}
 913
 914/**
 915 * free_percpu - free percpu area
 916 * @ptr: pointer to area to free
 917 *
 918 * Free percpu area @ptr.
 919 *
 920 * CONTEXT:
 921 * Can be called from atomic context.
 922 */
 923void free_percpu(void __percpu *ptr)
 924{
 925	void *addr;
 926	struct pcpu_chunk *chunk;
 927	unsigned long flags;
 928	int off;
 
 929
 930	if (!ptr)
 931		return;
 932
 933	kmemleak_free_percpu(ptr);
 934
 935	addr = __pcpu_ptr_to_addr(ptr);
 936
 937	spin_lock_irqsave(&pcpu_lock, flags);
 938
 939	chunk = pcpu_chunk_addr_search(addr);
 940	off = addr - chunk->base_addr;
 941
 942	pcpu_free_area(chunk, off);
 943
 944	/* if there are more than one fully free chunks, wake up grim reaper */
 945	if (chunk->free_size == pcpu_unit_size) {
 946		struct pcpu_chunk *pos;
 947
 948		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
 949			if (pos != chunk) {
 950				schedule_work(&pcpu_reclaim_work);
 951				break;
 952			}
 953	}
 954
 
 
 955	spin_unlock_irqrestore(&pcpu_lock, flags);
 
 
 
 956}
 957EXPORT_SYMBOL_GPL(free_percpu);
 958
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 959/**
 960 * is_kernel_percpu_address - test whether address is from static percpu area
 961 * @addr: address to test
 962 *
 963 * Test whether @addr belongs to in-kernel static percpu area.  Module
 964 * static percpu areas are not considered.  For those, use
 965 * is_module_percpu_address().
 966 *
 967 * RETURNS:
 968 * %true if @addr is from in-kernel static percpu area, %false otherwise.
 969 */
 970bool is_kernel_percpu_address(unsigned long addr)
 971{
 972#ifdef CONFIG_SMP
 973	const size_t static_size = __per_cpu_end - __per_cpu_start;
 974	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
 975	unsigned int cpu;
 976
 977	for_each_possible_cpu(cpu) {
 978		void *start = per_cpu_ptr(base, cpu);
 979
 980		if ((void *)addr >= start && (void *)addr < start + static_size)
 981			return true;
 982        }
 983#endif
 984	/* on UP, can't distinguish from other static vars, always false */
 985	return false;
 986}
 987
 988/**
 989 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
 990 * @addr: the address to be converted to physical address
 991 *
 992 * Given @addr which is dereferenceable address obtained via one of
 993 * percpu access macros, this function translates it into its physical
 994 * address.  The caller is responsible for ensuring @addr stays valid
 995 * until this function finishes.
 996 *
 997 * percpu allocator has special setup for the first chunk, which currently
 998 * supports either embedding in linear address space or vmalloc mapping,
 999 * and, from the second one, the backing allocator (currently either vm or
1000 * km) provides translation.
1001 *
1002 * The addr can be tranlated simply without checking if it falls into the
1003 * first chunk. But the current code reflects better how percpu allocator
1004 * actually works, and the verification can discover both bugs in percpu
1005 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
1006 * code.
1007 *
1008 * RETURNS:
1009 * The physical address for @addr.
1010 */
1011phys_addr_t per_cpu_ptr_to_phys(void *addr)
1012{
1013	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1014	bool in_first_chunk = false;
1015	unsigned long first_low, first_high;
1016	unsigned int cpu;
1017
1018	/*
1019	 * The following test on unit_low/high isn't strictly
1020	 * necessary but will speed up lookups of addresses which
1021	 * aren't in the first chunk.
 
 
 
 
 
1022	 */
1023	first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
1024	first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
1025				     pcpu_unit_pages);
 
1026	if ((unsigned long)addr >= first_low &&
1027	    (unsigned long)addr < first_high) {
1028		for_each_possible_cpu(cpu) {
1029			void *start = per_cpu_ptr(base, cpu);
1030
1031			if (addr >= start && addr < start + pcpu_unit_size) {
1032				in_first_chunk = true;
1033				break;
1034			}
1035		}
1036	}
1037
1038	if (in_first_chunk) {
1039		if (!is_vmalloc_addr(addr))
1040			return __pa(addr);
1041		else
1042			return page_to_phys(vmalloc_to_page(addr)) +
1043			       offset_in_page(addr);
1044	} else
1045		return page_to_phys(pcpu_addr_to_page(addr)) +
1046		       offset_in_page(addr);
1047}
1048
1049/**
1050 * pcpu_alloc_alloc_info - allocate percpu allocation info
1051 * @nr_groups: the number of groups
1052 * @nr_units: the number of units
1053 *
1054 * Allocate ai which is large enough for @nr_groups groups containing
1055 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
1056 * cpu_map array which is long enough for @nr_units and filled with
1057 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
1058 * pointer of other groups.
1059 *
1060 * RETURNS:
1061 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1062 * failure.
1063 */
1064struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1065						      int nr_units)
1066{
1067	struct pcpu_alloc_info *ai;
1068	size_t base_size, ai_size;
1069	void *ptr;
1070	int unit;
1071
1072	base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1073			  __alignof__(ai->groups[0].cpu_map[0]));
1074	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1075
1076	ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
1077	if (!ptr)
1078		return NULL;
1079	ai = ptr;
1080	ptr += base_size;
1081
1082	ai->groups[0].cpu_map = ptr;
1083
1084	for (unit = 0; unit < nr_units; unit++)
1085		ai->groups[0].cpu_map[unit] = NR_CPUS;
1086
1087	ai->nr_groups = nr_groups;
1088	ai->__ai_size = PFN_ALIGN(ai_size);
1089
1090	return ai;
1091}
1092
1093/**
1094 * pcpu_free_alloc_info - free percpu allocation info
1095 * @ai: pcpu_alloc_info to free
1096 *
1097 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1098 */
1099void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1100{
1101	memblock_free_early(__pa(ai), ai->__ai_size);
1102}
1103
1104/**
1105 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1106 * @lvl: loglevel
1107 * @ai: allocation info to dump
1108 *
1109 * Print out information about @ai using loglevel @lvl.
1110 */
1111static void pcpu_dump_alloc_info(const char *lvl,
1112				 const struct pcpu_alloc_info *ai)
1113{
1114	int group_width = 1, cpu_width = 1, width;
1115	char empty_str[] = "--------";
1116	int alloc = 0, alloc_end = 0;
1117	int group, v;
1118	int upa, apl;	/* units per alloc, allocs per line */
1119
1120	v = ai->nr_groups;
1121	while (v /= 10)
1122		group_width++;
1123
1124	v = num_possible_cpus();
1125	while (v /= 10)
1126		cpu_width++;
1127	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1128
1129	upa = ai->alloc_size / ai->unit_size;
1130	width = upa * (cpu_width + 1) + group_width + 3;
1131	apl = rounddown_pow_of_two(max(60 / width, 1));
1132
1133	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1134	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1135	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1136
1137	for (group = 0; group < ai->nr_groups; group++) {
1138		const struct pcpu_group_info *gi = &ai->groups[group];
1139		int unit = 0, unit_end = 0;
1140
1141		BUG_ON(gi->nr_units % upa);
1142		for (alloc_end += gi->nr_units / upa;
1143		     alloc < alloc_end; alloc++) {
1144			if (!(alloc % apl)) {
1145				printk(KERN_CONT "\n");
1146				printk("%spcpu-alloc: ", lvl);
1147			}
1148			printk(KERN_CONT "[%0*d] ", group_width, group);
1149
1150			for (unit_end += upa; unit < unit_end; unit++)
1151				if (gi->cpu_map[unit] != NR_CPUS)
1152					printk(KERN_CONT "%0*d ", cpu_width,
1153					       gi->cpu_map[unit]);
1154				else
1155					printk(KERN_CONT "%s ", empty_str);
1156		}
1157	}
1158	printk(KERN_CONT "\n");
1159}
1160
1161/**
1162 * pcpu_setup_first_chunk - initialize the first percpu chunk
1163 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1164 * @base_addr: mapped address
1165 *
1166 * Initialize the first percpu chunk which contains the kernel static
1167 * perpcu area.  This function is to be called from arch percpu area
1168 * setup path.
1169 *
1170 * @ai contains all information necessary to initialize the first
1171 * chunk and prime the dynamic percpu allocator.
1172 *
1173 * @ai->static_size is the size of static percpu area.
1174 *
1175 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1176 * reserve after the static area in the first chunk.  This reserves
1177 * the first chunk such that it's available only through reserved
1178 * percpu allocation.  This is primarily used to serve module percpu
1179 * static areas on architectures where the addressing model has
1180 * limited offset range for symbol relocations to guarantee module
1181 * percpu symbols fall inside the relocatable range.
1182 *
1183 * @ai->dyn_size determines the number of bytes available for dynamic
1184 * allocation in the first chunk.  The area between @ai->static_size +
1185 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1186 *
1187 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1188 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1189 * @ai->dyn_size.
1190 *
1191 * @ai->atom_size is the allocation atom size and used as alignment
1192 * for vm areas.
1193 *
1194 * @ai->alloc_size is the allocation size and always multiple of
1195 * @ai->atom_size.  This is larger than @ai->atom_size if
1196 * @ai->unit_size is larger than @ai->atom_size.
1197 *
1198 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1199 * percpu areas.  Units which should be colocated are put into the
1200 * same group.  Dynamic VM areas will be allocated according to these
1201 * groupings.  If @ai->nr_groups is zero, a single group containing
1202 * all units is assumed.
1203 *
1204 * The caller should have mapped the first chunk at @base_addr and
1205 * copied static data to each unit.
1206 *
1207 * If the first chunk ends up with both reserved and dynamic areas, it
1208 * is served by two chunks - one to serve the core static and reserved
1209 * areas and the other for the dynamic area.  They share the same vm
1210 * and page map but uses different area allocation map to stay away
1211 * from each other.  The latter chunk is circulated in the chunk slots
1212 * and available for dynamic allocation like any other chunks.
1213 *
1214 * RETURNS:
1215 * 0 on success, -errno on failure.
1216 */
1217int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1218				  void *base_addr)
1219{
1220	static char cpus_buf[4096] __initdata;
1221	static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1222	static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1223	size_t dyn_size = ai->dyn_size;
1224	size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1225	struct pcpu_chunk *schunk, *dchunk = NULL;
1226	unsigned long *group_offsets;
1227	size_t *group_sizes;
1228	unsigned long *unit_off;
1229	unsigned int cpu;
1230	int *unit_map;
1231	int group, unit, i;
1232
1233	cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
 
1234
1235#define PCPU_SETUP_BUG_ON(cond)	do {					\
1236	if (unlikely(cond)) {						\
1237		pr_emerg("PERCPU: failed to initialize, %s", #cond);	\
1238		pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf);	\
 
1239		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
1240		BUG();							\
1241	}								\
1242} while (0)
1243
1244	/* sanity checks */
1245	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1246#ifdef CONFIG_SMP
1247	PCPU_SETUP_BUG_ON(!ai->static_size);
1248	PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK);
1249#endif
1250	PCPU_SETUP_BUG_ON(!base_addr);
1251	PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK);
1252	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1253	PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1254	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
 
1255	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
 
 
 
 
1256	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1257
1258	/* process group information and build config tables accordingly */
1259	group_offsets = memblock_virt_alloc(ai->nr_groups *
1260					     sizeof(group_offsets[0]), 0);
1261	group_sizes = memblock_virt_alloc(ai->nr_groups *
1262					   sizeof(group_sizes[0]), 0);
1263	unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
1264	unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1265
1266	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1267		unit_map[cpu] = UINT_MAX;
1268
1269	pcpu_low_unit_cpu = NR_CPUS;
1270	pcpu_high_unit_cpu = NR_CPUS;
1271
1272	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1273		const struct pcpu_group_info *gi = &ai->groups[group];
1274
1275		group_offsets[group] = gi->base_offset;
1276		group_sizes[group] = gi->nr_units * ai->unit_size;
1277
1278		for (i = 0; i < gi->nr_units; i++) {
1279			cpu = gi->cpu_map[i];
1280			if (cpu == NR_CPUS)
1281				continue;
1282
1283			PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1284			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1285			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1286
1287			unit_map[cpu] = unit + i;
1288			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1289
1290			/* determine low/high unit_cpu */
1291			if (pcpu_low_unit_cpu == NR_CPUS ||
1292			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
1293				pcpu_low_unit_cpu = cpu;
1294			if (pcpu_high_unit_cpu == NR_CPUS ||
1295			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
1296				pcpu_high_unit_cpu = cpu;
1297		}
1298	}
1299	pcpu_nr_units = unit;
1300
1301	for_each_possible_cpu(cpu)
1302		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1303
1304	/* we're done parsing the input, undefine BUG macro and dump config */
1305#undef PCPU_SETUP_BUG_ON
1306	pcpu_dump_alloc_info(KERN_DEBUG, ai);
1307
1308	pcpu_nr_groups = ai->nr_groups;
1309	pcpu_group_offsets = group_offsets;
1310	pcpu_group_sizes = group_sizes;
1311	pcpu_unit_map = unit_map;
1312	pcpu_unit_offsets = unit_off;
1313
1314	/* determine basic parameters */
1315	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1316	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1317	pcpu_atom_size = ai->atom_size;
1318	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1319		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1320
 
 
1321	/*
1322	 * Allocate chunk slots.  The additional last slot is for
1323	 * empty chunks.
1324	 */
1325	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1326	pcpu_slot = memblock_virt_alloc(
1327			pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
 
 
 
1328	for (i = 0; i < pcpu_nr_slots; i++)
1329		INIT_LIST_HEAD(&pcpu_slot[i]);
1330
1331	/*
1332	 * Initialize static chunk.  If reserved_size is zero, the
1333	 * static chunk covers static area + dynamic allocation area
1334	 * in the first chunk.  If reserved_size is not zero, it
1335	 * covers static area + reserved area (mostly used for module
1336	 * static percpu allocation).
1337	 */
1338	schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1339	INIT_LIST_HEAD(&schunk->list);
1340	schunk->base_addr = base_addr;
1341	schunk->map = smap;
1342	schunk->map_alloc = ARRAY_SIZE(smap);
1343	schunk->immutable = true;
1344	bitmap_fill(schunk->populated, pcpu_unit_pages);
 
 
 
 
 
 
 
 
1345
 
1346	if (ai->reserved_size) {
1347		schunk->free_size = ai->reserved_size;
1348		pcpu_reserved_chunk = schunk;
1349		pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1350	} else {
1351		schunk->free_size = dyn_size;
1352		dyn_size = 0;			/* dynamic area covered */
1353	}
1354	schunk->contig_hint = schunk->free_size;
1355
1356	schunk->map[0] = 1;
1357	schunk->map[1] = ai->static_size;
1358	schunk->map_used = 1;
1359	if (schunk->free_size)
1360		schunk->map[++schunk->map_used] = 1 | (ai->static_size + schunk->free_size);
1361	else
1362		schunk->map[1] |= 1;
1363
1364	/* init dynamic chunk if necessary */
1365	if (dyn_size) {
1366		dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1367		INIT_LIST_HEAD(&dchunk->list);
1368		dchunk->base_addr = base_addr;
1369		dchunk->map = dmap;
1370		dchunk->map_alloc = ARRAY_SIZE(dmap);
1371		dchunk->immutable = true;
1372		bitmap_fill(dchunk->populated, pcpu_unit_pages);
1373
1374		dchunk->contig_hint = dchunk->free_size = dyn_size;
1375		dchunk->map[0] = 1;
1376		dchunk->map[1] = pcpu_reserved_chunk_limit;
1377		dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1;
1378		dchunk->map_used = 2;
1379	}
1380
1381	/* link the first chunk in */
1382	pcpu_first_chunk = dchunk ?: schunk;
 
1383	pcpu_chunk_relocate(pcpu_first_chunk, -1);
1384
 
 
 
 
 
 
1385	/* we're done */
1386	pcpu_base_addr = base_addr;
1387	return 0;
1388}
1389
1390#ifdef CONFIG_SMP
1391
1392const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
1393	[PCPU_FC_AUTO]	= "auto",
1394	[PCPU_FC_EMBED]	= "embed",
1395	[PCPU_FC_PAGE]	= "page",
1396};
1397
1398enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1399
1400static int __init percpu_alloc_setup(char *str)
1401{
1402	if (!str)
1403		return -EINVAL;
1404
1405	if (0)
1406		/* nada */;
1407#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1408	else if (!strcmp(str, "embed"))
1409		pcpu_chosen_fc = PCPU_FC_EMBED;
1410#endif
1411#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1412	else if (!strcmp(str, "page"))
1413		pcpu_chosen_fc = PCPU_FC_PAGE;
1414#endif
1415	else
1416		pr_warning("PERCPU: unknown allocator %s specified\n", str);
1417
1418	return 0;
1419}
1420early_param("percpu_alloc", percpu_alloc_setup);
1421
1422/*
1423 * pcpu_embed_first_chunk() is used by the generic percpu setup.
1424 * Build it if needed by the arch config or the generic setup is going
1425 * to be used.
1426 */
1427#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1428	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1429#define BUILD_EMBED_FIRST_CHUNK
1430#endif
1431
1432/* build pcpu_page_first_chunk() iff needed by the arch config */
1433#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
1434#define BUILD_PAGE_FIRST_CHUNK
1435#endif
1436
1437/* pcpu_build_alloc_info() is used by both embed and page first chunk */
1438#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
1439/**
1440 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1441 * @reserved_size: the size of reserved percpu area in bytes
1442 * @dyn_size: minimum free size for dynamic allocation in bytes
1443 * @atom_size: allocation atom size
1444 * @cpu_distance_fn: callback to determine distance between cpus, optional
1445 *
1446 * This function determines grouping of units, their mappings to cpus
1447 * and other parameters considering needed percpu size, allocation
1448 * atom size and distances between CPUs.
1449 *
1450 * Groups are always mutliples of atom size and CPUs which are of
1451 * LOCAL_DISTANCE both ways are grouped together and share space for
1452 * units in the same group.  The returned configuration is guaranteed
1453 * to have CPUs on different nodes on different groups and >=75% usage
1454 * of allocated virtual address space.
1455 *
1456 * RETURNS:
1457 * On success, pointer to the new allocation_info is returned.  On
1458 * failure, ERR_PTR value is returned.
1459 */
1460static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1461				size_t reserved_size, size_t dyn_size,
1462				size_t atom_size,
1463				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1464{
1465	static int group_map[NR_CPUS] __initdata;
1466	static int group_cnt[NR_CPUS] __initdata;
1467	const size_t static_size = __per_cpu_end - __per_cpu_start;
1468	int nr_groups = 1, nr_units = 0;
1469	size_t size_sum, min_unit_size, alloc_size;
1470	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
1471	int last_allocs, group, unit;
1472	unsigned int cpu, tcpu;
1473	struct pcpu_alloc_info *ai;
1474	unsigned int *cpu_map;
1475
1476	/* this function may be called multiple times */
1477	memset(group_map, 0, sizeof(group_map));
1478	memset(group_cnt, 0, sizeof(group_cnt));
1479
1480	/* calculate size_sum and ensure dyn_size is enough for early alloc */
1481	size_sum = PFN_ALIGN(static_size + reserved_size +
1482			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1483	dyn_size = size_sum - static_size - reserved_size;
1484
1485	/*
1486	 * Determine min_unit_size, alloc_size and max_upa such that
1487	 * alloc_size is multiple of atom_size and is the smallest
1488	 * which can accommodate 4k aligned segments which are equal to
1489	 * or larger than min_unit_size.
1490	 */
1491	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1492
 
1493	alloc_size = roundup(min_unit_size, atom_size);
1494	upa = alloc_size / min_unit_size;
1495	while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1496		upa--;
1497	max_upa = upa;
1498
1499	/* group cpus according to their proximity */
1500	for_each_possible_cpu(cpu) {
1501		group = 0;
1502	next_group:
1503		for_each_possible_cpu(tcpu) {
1504			if (cpu == tcpu)
1505				break;
1506			if (group_map[tcpu] == group && cpu_distance_fn &&
1507			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1508			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1509				group++;
1510				nr_groups = max(nr_groups, group + 1);
1511				goto next_group;
1512			}
1513		}
1514		group_map[cpu] = group;
1515		group_cnt[group]++;
1516	}
1517
1518	/*
1519	 * Expand unit size until address space usage goes over 75%
1520	 * and then as much as possible without using more address
1521	 * space.
1522	 */
1523	last_allocs = INT_MAX;
1524	for (upa = max_upa; upa; upa--) {
1525		int allocs = 0, wasted = 0;
1526
1527		if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1528			continue;
1529
1530		for (group = 0; group < nr_groups; group++) {
1531			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1532			allocs += this_allocs;
1533			wasted += this_allocs * upa - group_cnt[group];
1534		}
1535
1536		/*
1537		 * Don't accept if wastage is over 1/3.  The
1538		 * greater-than comparison ensures upa==1 always
1539		 * passes the following check.
1540		 */
1541		if (wasted > num_possible_cpus() / 3)
1542			continue;
1543
1544		/* and then don't consume more memory */
1545		if (allocs > last_allocs)
1546			break;
1547		last_allocs = allocs;
1548		best_upa = upa;
1549	}
1550	upa = best_upa;
1551
1552	/* allocate and fill alloc_info */
1553	for (group = 0; group < nr_groups; group++)
1554		nr_units += roundup(group_cnt[group], upa);
1555
1556	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1557	if (!ai)
1558		return ERR_PTR(-ENOMEM);
1559	cpu_map = ai->groups[0].cpu_map;
1560
1561	for (group = 0; group < nr_groups; group++) {
1562		ai->groups[group].cpu_map = cpu_map;
1563		cpu_map += roundup(group_cnt[group], upa);
1564	}
1565
1566	ai->static_size = static_size;
1567	ai->reserved_size = reserved_size;
1568	ai->dyn_size = dyn_size;
1569	ai->unit_size = alloc_size / upa;
1570	ai->atom_size = atom_size;
1571	ai->alloc_size = alloc_size;
1572
1573	for (group = 0, unit = 0; group_cnt[group]; group++) {
1574		struct pcpu_group_info *gi = &ai->groups[group];
1575
1576		/*
1577		 * Initialize base_offset as if all groups are located
1578		 * back-to-back.  The caller should update this to
1579		 * reflect actual allocation.
1580		 */
1581		gi->base_offset = unit * ai->unit_size;
1582
1583		for_each_possible_cpu(cpu)
1584			if (group_map[cpu] == group)
1585				gi->cpu_map[gi->nr_units++] = cpu;
1586		gi->nr_units = roundup(gi->nr_units, upa);
1587		unit += gi->nr_units;
1588	}
1589	BUG_ON(unit != nr_units);
1590
1591	return ai;
1592}
1593#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
1594
1595#if defined(BUILD_EMBED_FIRST_CHUNK)
1596/**
1597 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1598 * @reserved_size: the size of reserved percpu area in bytes
1599 * @dyn_size: minimum free size for dynamic allocation in bytes
1600 * @atom_size: allocation atom size
1601 * @cpu_distance_fn: callback to determine distance between cpus, optional
1602 * @alloc_fn: function to allocate percpu page
1603 * @free_fn: function to free percpu page
1604 *
1605 * This is a helper to ease setting up embedded first percpu chunk and
1606 * can be called where pcpu_setup_first_chunk() is expected.
1607 *
1608 * If this function is used to setup the first chunk, it is allocated
1609 * by calling @alloc_fn and used as-is without being mapped into
1610 * vmalloc area.  Allocations are always whole multiples of @atom_size
1611 * aligned to @atom_size.
1612 *
1613 * This enables the first chunk to piggy back on the linear physical
1614 * mapping which often uses larger page size.  Please note that this
1615 * can result in very sparse cpu->unit mapping on NUMA machines thus
1616 * requiring large vmalloc address space.  Don't use this allocator if
1617 * vmalloc space is not orders of magnitude larger than distances
1618 * between node memory addresses (ie. 32bit NUMA machines).
1619 *
1620 * @dyn_size specifies the minimum dynamic area size.
1621 *
1622 * If the needed size is smaller than the minimum or specified unit
1623 * size, the leftover is returned using @free_fn.
1624 *
1625 * RETURNS:
1626 * 0 on success, -errno on failure.
1627 */
1628int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1629				  size_t atom_size,
1630				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1631				  pcpu_fc_alloc_fn_t alloc_fn,
1632				  pcpu_fc_free_fn_t free_fn)
1633{
1634	void *base = (void *)ULONG_MAX;
1635	void **areas = NULL;
1636	struct pcpu_alloc_info *ai;
1637	size_t size_sum, areas_size, max_distance;
1638	int group, i, rc;
 
1639
1640	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1641				   cpu_distance_fn);
1642	if (IS_ERR(ai))
1643		return PTR_ERR(ai);
1644
1645	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1646	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1647
1648	areas = memblock_virt_alloc_nopanic(areas_size, 0);
1649	if (!areas) {
1650		rc = -ENOMEM;
1651		goto out_free;
1652	}
1653
1654	/* allocate, copy and determine base address */
 
1655	for (group = 0; group < ai->nr_groups; group++) {
1656		struct pcpu_group_info *gi = &ai->groups[group];
1657		unsigned int cpu = NR_CPUS;
1658		void *ptr;
1659
1660		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1661			cpu = gi->cpu_map[i];
1662		BUG_ON(cpu == NR_CPUS);
1663
1664		/* allocate space for the whole group */
1665		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1666		if (!ptr) {
1667			rc = -ENOMEM;
1668			goto out_free_areas;
1669		}
1670		/* kmemleak tracks the percpu allocations separately */
1671		kmemleak_free(ptr);
1672		areas[group] = ptr;
1673
1674		base = min(ptr, base);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1675	}
1676
1677	/*
1678	 * Copy data and free unused parts.  This should happen after all
1679	 * allocations are complete; otherwise, we may end up with
1680	 * overlapping groups.
1681	 */
1682	for (group = 0; group < ai->nr_groups; group++) {
1683		struct pcpu_group_info *gi = &ai->groups[group];
1684		void *ptr = areas[group];
1685
1686		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1687			if (gi->cpu_map[i] == NR_CPUS) {
1688				/* unused unit, free whole */
1689				free_fn(ptr, ai->unit_size);
1690				continue;
1691			}
1692			/* copy and return the unused part */
1693			memcpy(ptr, __per_cpu_load, ai->static_size);
1694			free_fn(ptr + size_sum, ai->unit_size - size_sum);
1695		}
1696	}
1697
1698	/* base address is now known, determine group base offsets */
1699	max_distance = 0;
1700	for (group = 0; group < ai->nr_groups; group++) {
1701		ai->groups[group].base_offset = areas[group] - base;
1702		max_distance = max_t(size_t, max_distance,
1703				     ai->groups[group].base_offset);
1704	}
1705	max_distance += ai->unit_size;
1706
1707	/* warn if maximum distance is further than 75% of vmalloc space */
1708	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
1709		pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1710			   "space 0x%lx\n", max_distance,
1711			   VMALLOC_TOTAL);
1712#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1713		/* and fail if we have fallback */
1714		rc = -EINVAL;
1715		goto out_free;
1716#endif
1717	}
1718
1719	pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1720		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1721		ai->dyn_size, ai->unit_size);
1722
1723	rc = pcpu_setup_first_chunk(ai, base);
1724	goto out_free;
1725
1726out_free_areas:
1727	for (group = 0; group < ai->nr_groups; group++)
1728		if (areas[group])
1729			free_fn(areas[group],
1730				ai->groups[group].nr_units * ai->unit_size);
1731out_free:
1732	pcpu_free_alloc_info(ai);
1733	if (areas)
1734		memblock_free_early(__pa(areas), areas_size);
1735	return rc;
1736}
1737#endif /* BUILD_EMBED_FIRST_CHUNK */
1738
1739#ifdef BUILD_PAGE_FIRST_CHUNK
1740/**
1741 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1742 * @reserved_size: the size of reserved percpu area in bytes
1743 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1744 * @free_fn: function to free percpu page, always called with PAGE_SIZE
1745 * @populate_pte_fn: function to populate pte
1746 *
1747 * This is a helper to ease setting up page-remapped first percpu
1748 * chunk and can be called where pcpu_setup_first_chunk() is expected.
1749 *
1750 * This is the basic allocator.  Static percpu area is allocated
1751 * page-by-page into vmalloc area.
1752 *
1753 * RETURNS:
1754 * 0 on success, -errno on failure.
1755 */
1756int __init pcpu_page_first_chunk(size_t reserved_size,
1757				 pcpu_fc_alloc_fn_t alloc_fn,
1758				 pcpu_fc_free_fn_t free_fn,
1759				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1760{
1761	static struct vm_struct vm;
1762	struct pcpu_alloc_info *ai;
1763	char psize_str[16];
1764	int unit_pages;
1765	size_t pages_size;
1766	struct page **pages;
1767	int unit, i, j, rc;
 
 
1768
1769	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1770
1771	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
1772	if (IS_ERR(ai))
1773		return PTR_ERR(ai);
1774	BUG_ON(ai->nr_groups != 1);
1775	BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
 
 
 
 
 
1776
1777	unit_pages = ai->unit_size >> PAGE_SHIFT;
1778
1779	/* unaligned allocations can't be freed, round up to page size */
1780	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1781			       sizeof(pages[0]));
1782	pages = memblock_virt_alloc(pages_size, 0);
 
 
 
1783
1784	/* allocate pages */
1785	j = 0;
1786	for (unit = 0; unit < num_possible_cpus(); unit++)
 
1787		for (i = 0; i < unit_pages; i++) {
1788			unsigned int cpu = ai->groups[0].cpu_map[unit];
1789			void *ptr;
1790
1791			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
1792			if (!ptr) {
1793				pr_warning("PERCPU: failed to allocate %s page "
1794					   "for cpu%u\n", psize_str, cpu);
1795				goto enomem;
1796			}
1797			/* kmemleak tracks the percpu allocations separately */
1798			kmemleak_free(ptr);
1799			pages[j++] = virt_to_page(ptr);
1800		}
 
1801
1802	/* allocate vm area, map the pages and copy static data */
1803	vm.flags = VM_ALLOC;
1804	vm.size = num_possible_cpus() * ai->unit_size;
1805	vm_area_register_early(&vm, PAGE_SIZE);
1806
1807	for (unit = 0; unit < num_possible_cpus(); unit++) {
1808		unsigned long unit_addr =
1809			(unsigned long)vm.addr + unit * ai->unit_size;
1810
1811		for (i = 0; i < unit_pages; i++)
1812			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1813
1814		/* pte already populated, the following shouldn't fail */
1815		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1816				      unit_pages);
1817		if (rc < 0)
1818			panic("failed to map percpu area, err=%d\n", rc);
1819
1820		/*
1821		 * FIXME: Archs with virtual cache should flush local
1822		 * cache for the linear mapping here - something
1823		 * equivalent to flush_cache_vmap() on the local cpu.
1824		 * flush_cache_vmap() can't be used as most supporting
1825		 * data structures are not set up yet.
1826		 */
1827
1828		/* copy static data */
1829		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
1830	}
1831
1832	/* we're ready, commit */
1833	pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1834		unit_pages, psize_str, vm.addr, ai->static_size,
1835		ai->reserved_size, ai->dyn_size);
1836
1837	rc = pcpu_setup_first_chunk(ai, vm.addr);
1838	goto out_free_ar;
1839
1840enomem:
1841	while (--j >= 0)
1842		free_fn(page_address(pages[j]), PAGE_SIZE);
1843	rc = -ENOMEM;
1844out_free_ar:
1845	memblock_free_early(__pa(pages), pages_size);
1846	pcpu_free_alloc_info(ai);
1847	return rc;
1848}
1849#endif /* BUILD_PAGE_FIRST_CHUNK */
1850
1851#ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
1852/*
1853 * Generic SMP percpu area setup.
1854 *
1855 * The embedding helper is used because its behavior closely resembles
1856 * the original non-dynamic generic percpu area setup.  This is
1857 * important because many archs have addressing restrictions and might
1858 * fail if the percpu area is located far away from the previous
1859 * location.  As an added bonus, in non-NUMA cases, embedding is
1860 * generally a good idea TLB-wise because percpu area can piggy back
1861 * on the physical linear memory mapping which uses large page
1862 * mappings on applicable archs.
1863 */
1864unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1865EXPORT_SYMBOL(__per_cpu_offset);
1866
1867static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
1868				       size_t align)
1869{
1870	return  memblock_virt_alloc_from_nopanic(
1871			size, align, __pa(MAX_DMA_ADDRESS));
1872}
1873
1874static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
1875{
1876	memblock_free_early(__pa(ptr), size);
1877}
1878
1879void __init setup_per_cpu_areas(void)
1880{
1881	unsigned long delta;
1882	unsigned int cpu;
1883	int rc;
1884
1885	/*
1886	 * Always reserve area for module percpu variables.  That's
1887	 * what the legacy allocator did.
1888	 */
1889	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1890				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
1891				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
1892	if (rc < 0)
1893		panic("Failed to initialize percpu areas.");
1894
1895	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1896	for_each_possible_cpu(cpu)
1897		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1898}
1899#endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */
1900
1901#else	/* CONFIG_SMP */
1902
1903/*
1904 * UP percpu area setup.
1905 *
1906 * UP always uses km-based percpu allocator with identity mapping.
1907 * Static percpu variables are indistinguishable from the usual static
1908 * variables and don't require any special preparation.
1909 */
1910void __init setup_per_cpu_areas(void)
1911{
1912	const size_t unit_size =
1913		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
1914					 PERCPU_DYNAMIC_RESERVE));
1915	struct pcpu_alloc_info *ai;
1916	void *fc;
1917
1918	ai = pcpu_alloc_alloc_info(1, 1);
1919	fc = memblock_virt_alloc_from_nopanic(unit_size,
1920					      PAGE_SIZE,
1921					      __pa(MAX_DMA_ADDRESS));
1922	if (!ai || !fc)
1923		panic("Failed to allocate memory for percpu areas.");
1924	/* kmemleak tracks the percpu allocations separately */
1925	kmemleak_free(fc);
1926
1927	ai->dyn_size = unit_size;
1928	ai->unit_size = unit_size;
1929	ai->atom_size = unit_size;
1930	ai->alloc_size = unit_size;
1931	ai->groups[0].nr_units = 1;
1932	ai->groups[0].cpu_map[0] = 0;
1933
1934	if (pcpu_setup_first_chunk(ai, fc) < 0)
1935		panic("Failed to initialize percpu areas.");
1936}
1937
1938#endif	/* CONFIG_SMP */
1939
1940/*
1941 * First and reserved chunks are initialized with temporary allocation
1942 * map in initdata so that they can be used before slab is online.
1943 * This function is called after slab is brought up and replaces those
1944 * with properly allocated maps.
 
 
 
 
 
1945 */
1946void __init percpu_init_late(void)
1947{
1948	struct pcpu_chunk *target_chunks[] =
1949		{ pcpu_first_chunk, pcpu_reserved_chunk, NULL };
1950	struct pcpu_chunk *chunk;
1951	unsigned long flags;
1952	int i;
1953
1954	for (i = 0; (chunk = target_chunks[i]); i++) {
1955		int *map;
1956		const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
1957
1958		BUILD_BUG_ON(size > PAGE_SIZE);
1959
1960		map = pcpu_mem_zalloc(size);
1961		BUG_ON(!map);
1962
1963		spin_lock_irqsave(&pcpu_lock, flags);
1964		memcpy(map, chunk->map, size);
1965		chunk->map = map;
1966		spin_unlock_irqrestore(&pcpu_lock, flags);
1967	}
1968}