Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * mm/percpu.c - percpu memory allocator
   4 *
   5 * Copyright (C) 2009		SUSE Linux Products GmbH
   6 * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
   7 *
   8 * Copyright (C) 2017		Facebook Inc.
   9 * Copyright (C) 2017		Dennis Zhou <dennis@kernel.org>
  10 *
  11 * The percpu allocator handles both static and dynamic areas.  Percpu
  12 * areas are allocated in chunks which are divided into units.  There is
  13 * a 1-to-1 mapping for units to possible cpus.  These units are grouped
  14 * based on NUMA properties of the machine.
 
 
 
 
  15 *
  16 *  c0                           c1                         c2
  17 *  -------------------          -------------------        ------------
  18 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
  19 *  -------------------  ......  -------------------  ....  ------------
  20 *
  21 * Allocation is done by offsets into a unit's address space.  Ie., an
  22 * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
  23 * c1:u1, c1:u2, etc.  On NUMA machines, the mapping may be non-linear
  24 * and even sparse.  Access is handled by configuring percpu base
  25 * registers according to the cpu to unit mappings and offsetting the
  26 * base address using pcpu_unit_size.
  27 *
  28 * There is special consideration for the first chunk which must handle
  29 * the static percpu variables in the kernel image as allocation services
  30 * are not online yet.  In short, the first chunk is structured like so:
  31 *
  32 *                  <Static | [Reserved] | Dynamic>
  33 *
  34 * The static data is copied from the original section managed by the
  35 * linker.  The reserved section, if non-zero, primarily manages static
  36 * percpu variables from kernel modules.  Finally, the dynamic section
  37 * takes care of normal allocations.
  38 *
  39 * The allocator organizes chunks into lists according to free size and
  40 * memcg-awareness.  To make a percpu allocation memcg-aware the __GFP_ACCOUNT
  41 * flag should be passed.  All memcg-aware allocations are sharing one set
  42 * of chunks and all unaccounted allocations and allocations performed
  43 * by processes belonging to the root memory cgroup are using the second set.
  44 *
  45 * The allocator tries to allocate from the fullest chunk first. Each chunk
  46 * is managed by a bitmap with metadata blocks.  The allocation map is updated
  47 * on every allocation and free to reflect the current state while the boundary
  48 * map is only updated on allocation.  Each metadata block contains
  49 * information to help mitigate the need to iterate over large portions
  50 * of the bitmap.  The reverse mapping from page to chunk is stored in
  51 * the page's index.  Lastly, units are lazily backed and grow in unison.
  52 *
  53 * There is a unique conversion that goes on here between bytes and bits.
  54 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE.  The chunk
  55 * tracks the number of pages it is responsible for in nr_pages.  Helper
  56 * functions are used to convert from between the bytes, bits, and blocks.
  57 * All hints are managed in bits unless explicitly stated.
  58 *
  59 * To use this allocator, arch code should do the following:
  60 *
  61 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
  62 *   regular address to percpu pointer and back if they need to be
  63 *   different from the default
  64 *
  65 * - use pcpu_setup_first_chunk() during percpu area initialization to
  66 *   setup the first chunk containing the kernel static percpu area
  67 */
  68
  69#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  70
  71#include <linux/bitmap.h>
  72#include <linux/memblock.h>
  73#include <linux/err.h>
  74#include <linux/lcm.h>
  75#include <linux/list.h>
  76#include <linux/log2.h>
  77#include <linux/mm.h>
  78#include <linux/module.h>
  79#include <linux/mutex.h>
  80#include <linux/percpu.h>
  81#include <linux/pfn.h>
  82#include <linux/slab.h>
  83#include <linux/spinlock.h>
  84#include <linux/vmalloc.h>
  85#include <linux/workqueue.h>
  86#include <linux/kmemleak.h>
  87#include <linux/sched.h>
  88#include <linux/sched/mm.h>
  89#include <linux/memcontrol.h>
  90
  91#include <asm/cacheflush.h>
  92#include <asm/sections.h>
  93#include <asm/tlbflush.h>
  94#include <asm/io.h>
  95
  96#define CREATE_TRACE_POINTS
  97#include <trace/events/percpu.h>
  98
  99#include "percpu-internal.h"
 100
 101/* the slots are sorted by free bytes left, 1-31 bytes share the same slot */
 102#define PCPU_SLOT_BASE_SHIFT		5
 103/* chunks in slots below this are subject to being sidelined on failed alloc */
 104#define PCPU_SLOT_FAIL_THRESHOLD	3
 105
 106#define PCPU_EMPTY_POP_PAGES_LOW	2
 107#define PCPU_EMPTY_POP_PAGES_HIGH	4
 108
 109#ifdef CONFIG_SMP
 110/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
 111#ifndef __addr_to_pcpu_ptr
 112#define __addr_to_pcpu_ptr(addr)					\
 113	(void __percpu *)((unsigned long)(addr) -			\
 114			  (unsigned long)pcpu_base_addr	+		\
 115			  (unsigned long)__per_cpu_start)
 116#endif
 117#ifndef __pcpu_ptr_to_addr
 118#define __pcpu_ptr_to_addr(ptr)						\
 119	(void __force *)((unsigned long)(ptr) +				\
 120			 (unsigned long)pcpu_base_addr -		\
 121			 (unsigned long)__per_cpu_start)
 122#endif
 123#else	/* CONFIG_SMP */
 124/* on UP, it's always identity mapped */
 125#define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
 126#define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
 127#endif	/* CONFIG_SMP */
 128
 129static int pcpu_unit_pages __ro_after_init;
 130static int pcpu_unit_size __ro_after_init;
 131static int pcpu_nr_units __ro_after_init;
 132static int pcpu_atom_size __ro_after_init;
 133int pcpu_nr_slots __ro_after_init;
 134static size_t pcpu_chunk_struct_size __ro_after_init;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 135
 136/* cpus with the lowest and highest unit addresses */
 137static unsigned int pcpu_low_unit_cpu __ro_after_init;
 138static unsigned int pcpu_high_unit_cpu __ro_after_init;
 139
 140/* the address of the first chunk which starts with the kernel static area */
 141void *pcpu_base_addr __ro_after_init;
 142EXPORT_SYMBOL_GPL(pcpu_base_addr);
 143
 144static const int *pcpu_unit_map __ro_after_init;		/* cpu -> unit */
 145const unsigned long *pcpu_unit_offsets __ro_after_init;	/* cpu -> unit offset */
 146
 147/* group information, used for vm allocation */
 148static int pcpu_nr_groups __ro_after_init;
 149static const unsigned long *pcpu_group_offsets __ro_after_init;
 150static const size_t *pcpu_group_sizes __ro_after_init;
 151
 152/*
 153 * The first chunk which always exists.  Note that unlike other
 154 * chunks, this one can be allocated and mapped in several different
 155 * ways and thus often doesn't live in the vmalloc area.
 156 */
 157struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
 158
 159/*
 160 * Optional reserved chunk.  This chunk reserves part of the first
 161 * chunk and serves it for reserved allocations.  When the reserved
 162 * region doesn't exist, the following variable is NULL.
 
 
 163 */
 164struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
 
 165
 166DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
 167static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
 168
 169struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
 170
 171/* chunks which need their map areas extended, protected by pcpu_lock */
 172static LIST_HEAD(pcpu_map_extend_chunks);
 173
 174/*
 175 * The number of empty populated pages, protected by pcpu_lock.  The
 176 * reserved chunk doesn't contribute to the count.
 177 */
 178int pcpu_nr_empty_pop_pages;
 179
 180/*
 181 * The number of populated pages in use by the allocator, protected by
 182 * pcpu_lock.  This number is kept per a unit per chunk (i.e. when a page gets
 183 * allocated/deallocated, it is allocated/deallocated in all units of a chunk
 184 * and increments/decrements this count by 1).
 185 */
 186static unsigned long pcpu_nr_populated;
 187
 188/*
 189 * Balance work is used to populate or destroy chunks asynchronously.  We
 190 * try to keep the number of populated free pages between
 191 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
 192 * empty chunk.
 193 */
 194static void pcpu_balance_workfn(struct work_struct *work);
 195static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
 196static bool pcpu_async_enabled __read_mostly;
 197static bool pcpu_atomic_alloc_failed;
 198
 199static void pcpu_schedule_balance_work(void)
 200{
 201	if (pcpu_async_enabled)
 202		schedule_work(&pcpu_balance_work);
 203}
 204
 205/**
 206 * pcpu_addr_in_chunk - check if the address is served from this chunk
 207 * @chunk: chunk of interest
 208 * @addr: percpu address
 209 *
 210 * RETURNS:
 211 * True if the address is served from this chunk.
 212 */
 213static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
 214{
 215	void *start_addr, *end_addr;
 216
 217	if (!chunk)
 218		return false;
 219
 220	start_addr = chunk->base_addr + chunk->start_offset;
 221	end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
 222		   chunk->end_offset;
 223
 224	return addr >= start_addr && addr < end_addr;
 
 225}
 226
 227static int __pcpu_size_to_slot(int size)
 228{
 229	int highbit = fls(size);	/* size is in bytes */
 230	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
 231}
 232
 233static int pcpu_size_to_slot(int size)
 234{
 235	if (size == pcpu_unit_size)
 236		return pcpu_nr_slots - 1;
 237	return __pcpu_size_to_slot(size);
 238}
 239
 240static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
 241{
 242	const struct pcpu_block_md *chunk_md = &chunk->chunk_md;
 243
 244	if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE ||
 245	    chunk_md->contig_hint == 0)
 246		return 0;
 247
 248	return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);
 249}
 250
 251/* set the pointer to a chunk in a page struct */
 252static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
 253{
 254	page->index = (unsigned long)pcpu;
 255}
 256
 257/* obtain pointer to a chunk from a page struct */
 258static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
 259{
 260	return (struct pcpu_chunk *)page->index;
 261}
 262
 263static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
 264{
 265	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
 266}
 267
 268static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
 269{
 270	return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
 271}
 272
 273static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
 274				     unsigned int cpu, int page_idx)
 275{
 276	return (unsigned long)chunk->base_addr +
 277	       pcpu_unit_page_offset(cpu, page_idx);
 278}
 279
 280/*
 281 * The following are helper functions to help access bitmaps and convert
 282 * between bitmap offsets to address offsets.
 283 */
 284static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
 285{
 286	return chunk->alloc_map +
 287	       (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
 288}
 289
 290static unsigned long pcpu_off_to_block_index(int off)
 291{
 292	return off / PCPU_BITMAP_BLOCK_BITS;
 293}
 294
 295static unsigned long pcpu_off_to_block_off(int off)
 
 296{
 297	return off & (PCPU_BITMAP_BLOCK_BITS - 1);
 
 298}
 299
 300static unsigned long pcpu_block_off_to_off(int index, int off)
 
 301{
 302	return index * PCPU_BITMAP_BLOCK_BITS + off;
 303}
 304
 305/*
 306 * pcpu_next_hint - determine which hint to use
 307 * @block: block of interest
 308 * @alloc_bits: size of allocation
 309 *
 310 * This determines if we should scan based on the scan_hint or first_free.
 311 * In general, we want to scan from first_free to fulfill allocations by
 312 * first fit.  However, if we know a scan_hint at position scan_hint_start
 313 * cannot fulfill an allocation, we can begin scanning from there knowing
 314 * the contig_hint will be our fallback.
 315 */
 316static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
 317{
 318	/*
 319	 * The three conditions below determine if we can skip past the
 320	 * scan_hint.  First, does the scan hint exist.  Second, is the
 321	 * contig_hint after the scan_hint (possibly not true iff
 322	 * contig_hint == scan_hint).  Third, is the allocation request
 323	 * larger than the scan_hint.
 324	 */
 325	if (block->scan_hint &&
 326	    block->contig_hint_start > block->scan_hint_start &&
 327	    alloc_bits > block->scan_hint)
 328		return block->scan_hint_start + block->scan_hint;
 329
 330	return block->first_free;
 331}
 332
 333/**
 334 * pcpu_next_md_free_region - finds the next hint free area
 335 * @chunk: chunk of interest
 336 * @bit_off: chunk offset
 337 * @bits: size of free area
 338 *
 339 * Helper function for pcpu_for_each_md_free_region.  It checks
 340 * block->contig_hint and performs aggregation across blocks to find the
 341 * next hint.  It modifies bit_off and bits in-place to be consumed in the
 342 * loop.
 343 */
 344static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
 345				     int *bits)
 346{
 347	int i = pcpu_off_to_block_index(*bit_off);
 348	int block_off = pcpu_off_to_block_off(*bit_off);
 349	struct pcpu_block_md *block;
 350
 351	*bits = 0;
 352	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
 353	     block++, i++) {
 354		/* handles contig area across blocks */
 355		if (*bits) {
 356			*bits += block->left_free;
 357			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
 358				continue;
 359			return;
 360		}
 361
 362		/*
 363		 * This checks three things.  First is there a contig_hint to
 364		 * check.  Second, have we checked this hint before by
 365		 * comparing the block_off.  Third, is this the same as the
 366		 * right contig hint.  In the last case, it spills over into
 367		 * the next block and should be handled by the contig area
 368		 * across blocks code.
 369		 */
 370		*bits = block->contig_hint;
 371		if (*bits && block->contig_hint_start >= block_off &&
 372		    *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
 373			*bit_off = pcpu_block_off_to_off(i,
 374					block->contig_hint_start);
 375			return;
 376		}
 377		/* reset to satisfy the second predicate above */
 378		block_off = 0;
 379
 380		*bits = block->right_free;
 381		*bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
 382	}
 383}
 384
 385/**
 386 * pcpu_next_fit_region - finds fit areas for a given allocation request
 387 * @chunk: chunk of interest
 388 * @alloc_bits: size of allocation
 389 * @align: alignment of area (max PAGE_SIZE)
 390 * @bit_off: chunk offset
 391 * @bits: size of free area
 392 *
 393 * Finds the next free region that is viable for use with a given size and
 394 * alignment.  This only returns if there is a valid area to be used for this
 395 * allocation.  block->first_free is returned if the allocation request fits
 396 * within the block to see if the request can be fulfilled prior to the contig
 397 * hint.
 398 */
 399static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
 400				 int align, int *bit_off, int *bits)
 401{
 402	int i = pcpu_off_to_block_index(*bit_off);
 403	int block_off = pcpu_off_to_block_off(*bit_off);
 404	struct pcpu_block_md *block;
 405
 406	*bits = 0;
 407	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
 408	     block++, i++) {
 409		/* handles contig area across blocks */
 410		if (*bits) {
 411			*bits += block->left_free;
 412			if (*bits >= alloc_bits)
 413				return;
 414			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
 415				continue;
 416		}
 417
 418		/* check block->contig_hint */
 419		*bits = ALIGN(block->contig_hint_start, align) -
 420			block->contig_hint_start;
 421		/*
 422		 * This uses the block offset to determine if this has been
 423		 * checked in the prior iteration.
 424		 */
 425		if (block->contig_hint &&
 426		    block->contig_hint_start >= block_off &&
 427		    block->contig_hint >= *bits + alloc_bits) {
 428			int start = pcpu_next_hint(block, alloc_bits);
 429
 430			*bits += alloc_bits + block->contig_hint_start -
 431				 start;
 432			*bit_off = pcpu_block_off_to_off(i, start);
 433			return;
 434		}
 435		/* reset to satisfy the second predicate above */
 436		block_off = 0;
 437
 438		*bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
 439				 align);
 440		*bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
 441		*bit_off = pcpu_block_off_to_off(i, *bit_off);
 442		if (*bits >= alloc_bits)
 443			return;
 444	}
 445
 446	/* no valid offsets were found - fail condition */
 447	*bit_off = pcpu_chunk_map_bits(chunk);
 448}
 449
 450/*
 451 * Metadata free area iterators.  These perform aggregation of free areas
 452 * based on the metadata blocks and return the offset @bit_off and size in
 453 * bits of the free area @bits.  pcpu_for_each_fit_region only returns when
 454 * a fit is found for the allocation request.
 455 */
 456#define pcpu_for_each_md_free_region(chunk, bit_off, bits)		\
 457	for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits));	\
 458	     (bit_off) < pcpu_chunk_map_bits((chunk));			\
 459	     (bit_off) += (bits) + 1,					\
 460	     pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
 461
 462#define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits)     \
 463	for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
 464				  &(bits));				      \
 465	     (bit_off) < pcpu_chunk_map_bits((chunk));			      \
 466	     (bit_off) += (bits),					      \
 467	     pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
 468				  &(bits)))
 469
 470/**
 471 * pcpu_mem_zalloc - allocate memory
 472 * @size: bytes to allocate
 473 * @gfp: allocation flags
 474 *
 475 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
 476 * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
 477 * This is to facilitate passing through whitelisted flags.  The
 478 * returned memory is always zeroed.
 
 
 479 *
 480 * RETURNS:
 481 * Pointer to the allocated area on success, NULL on failure.
 482 */
 483static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
 484{
 485	if (WARN_ON_ONCE(!slab_is_available()))
 486		return NULL;
 487
 488	if (size <= PAGE_SIZE)
 489		return kzalloc(size, gfp);
 490	else
 491		return __vmalloc(size, gfp | __GFP_ZERO);
 492}
 493
 494/**
 495 * pcpu_mem_free - free memory
 496 * @ptr: memory to free
 497 *
 498 * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
 499 */
 500static void pcpu_mem_free(void *ptr)
 501{
 502	kvfree(ptr);
 503}
 504
 505static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
 506			      bool move_front)
 
 
 
 
 
 
 
 
 507{
 508	if (chunk != pcpu_reserved_chunk) {
 509		struct list_head *pcpu_slot;
 510
 511		pcpu_slot = pcpu_chunk_list(pcpu_chunk_type(chunk));
 512		if (move_front)
 513			list_move(&chunk->list, &pcpu_slot[slot]);
 514		else
 515			list_move_tail(&chunk->list, &pcpu_slot[slot]);
 516	}
 517}
 518
 519static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
 520{
 521	__pcpu_chunk_move(chunk, slot, true);
 
 
 
 
 
 
 522}
 523
 524/**
 525 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
 526 * @chunk: chunk of interest
 527 * @oslot: the previous slot it was on
 528 *
 529 * This function is called after an allocation or free changed @chunk.
 530 * New slot according to the changed state is determined and @chunk is
 531 * moved to the slot.  Note that the reserved chunk is never put on
 532 * chunk slots.
 533 *
 534 * CONTEXT:
 535 * pcpu_lock.
 536 */
 537static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
 538{
 539	int nslot = pcpu_chunk_slot(chunk);
 540
 541	if (oslot != nslot)
 542		__pcpu_chunk_move(chunk, nslot, oslot < nslot);
 
 
 
 
 543}
 544
 545/*
 546 * pcpu_update_empty_pages - update empty page counters
 547 * @chunk: chunk of interest
 548 * @nr: nr of empty pages
 549 *
 550 * This is used to keep track of the empty pages now based on the premise
 551 * a md_block covers a page.  The hint update functions recognize if a block
 552 * is made full or broken to calculate deltas for keeping track of free pages.
 553 */
 554static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
 555{
 556	chunk->nr_empty_pop_pages += nr;
 557	if (chunk != pcpu_reserved_chunk)
 558		pcpu_nr_empty_pop_pages += nr;
 559}
 560
 561/*
 562 * pcpu_region_overlap - determines if two regions overlap
 563 * @a: start of first region, inclusive
 564 * @b: end of first region, exclusive
 565 * @x: start of second region, inclusive
 566 * @y: end of second region, exclusive
 567 *
 568 * This is used to determine if the hint region [a, b) overlaps with the
 569 * allocated region [x, y).
 570 */
 571static inline bool pcpu_region_overlap(int a, int b, int x, int y)
 572{
 573	return (a < y) && (x < b);
 574}
 575
 576/**
 577 * pcpu_block_update - updates a block given a free area
 578 * @block: block of interest
 579 * @start: start offset in block
 580 * @end: end offset in block
 581 *
 582 * Updates a block given a known free area.  The region [start, end) is
 583 * expected to be the entirety of the free area within a block.  Chooses
 584 * the best starting offset if the contig hints are equal.
 585 */
 586static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
 587{
 588	int contig = end - start;
 589
 590	block->first_free = min(block->first_free, start);
 591	if (start == 0)
 592		block->left_free = contig;
 593
 594	if (end == block->nr_bits)
 595		block->right_free = contig;
 596
 597	if (contig > block->contig_hint) {
 598		/* promote the old contig_hint to be the new scan_hint */
 599		if (start > block->contig_hint_start) {
 600			if (block->contig_hint > block->scan_hint) {
 601				block->scan_hint_start =
 602					block->contig_hint_start;
 603				block->scan_hint = block->contig_hint;
 604			} else if (start < block->scan_hint_start) {
 605				/*
 606				 * The old contig_hint == scan_hint.  But, the
 607				 * new contig is larger so hold the invariant
 608				 * scan_hint_start < contig_hint_start.
 609				 */
 610				block->scan_hint = 0;
 611			}
 612		} else {
 613			block->scan_hint = 0;
 614		}
 615		block->contig_hint_start = start;
 616		block->contig_hint = contig;
 617	} else if (contig == block->contig_hint) {
 618		if (block->contig_hint_start &&
 619		    (!start ||
 620		     __ffs(start) > __ffs(block->contig_hint_start))) {
 621			/* start has a better alignment so use it */
 622			block->contig_hint_start = start;
 623			if (start < block->scan_hint_start &&
 624			    block->contig_hint > block->scan_hint)
 625				block->scan_hint = 0;
 626		} else if (start > block->scan_hint_start ||
 627			   block->contig_hint > block->scan_hint) {
 628			/*
 629			 * Knowing contig == contig_hint, update the scan_hint
 630			 * if it is farther than or larger than the current
 631			 * scan_hint.
 632			 */
 633			block->scan_hint_start = start;
 634			block->scan_hint = contig;
 635		}
 636	} else {
 637		/*
 638		 * The region is smaller than the contig_hint.  So only update
 639		 * the scan_hint if it is larger than or equal and farther than
 640		 * the current scan_hint.
 641		 */
 642		if ((start < block->contig_hint_start &&
 643		     (contig > block->scan_hint ||
 644		      (contig == block->scan_hint &&
 645		       start > block->scan_hint_start)))) {
 646			block->scan_hint_start = start;
 647			block->scan_hint = contig;
 648		}
 649	}
 650}
 651
 652/*
 653 * pcpu_block_update_scan - update a block given a free area from a scan
 654 * @chunk: chunk of interest
 655 * @bit_off: chunk offset
 656 * @bits: size of free area
 657 *
 658 * Finding the final allocation spot first goes through pcpu_find_block_fit()
 659 * to find a block that can hold the allocation and then pcpu_alloc_area()
 660 * where a scan is used.  When allocations require specific alignments,
 661 * we can inadvertently create holes which will not be seen in the alloc
 662 * or free paths.
 663 *
 664 * This takes a given free area hole and updates a block as it may change the
 665 * scan_hint.  We need to scan backwards to ensure we don't miss free bits
 666 * from alignment.
 667 */
 668static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off,
 669				   int bits)
 670{
 671	int s_off = pcpu_off_to_block_off(bit_off);
 672	int e_off = s_off + bits;
 673	int s_index, l_bit;
 674	struct pcpu_block_md *block;
 675
 676	if (e_off > PCPU_BITMAP_BLOCK_BITS)
 677		return;
 678
 679	s_index = pcpu_off_to_block_index(bit_off);
 680	block = chunk->md_blocks + s_index;
 681
 682	/* scan backwards in case of alignment skipping free bits */
 683	l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off);
 684	s_off = (s_off == l_bit) ? 0 : l_bit + 1;
 685
 686	pcpu_block_update(block, s_off, e_off);
 687}
 688
 689/**
 690 * pcpu_chunk_refresh_hint - updates metadata about a chunk
 691 * @chunk: chunk of interest
 692 * @full_scan: if we should scan from the beginning
 693 *
 694 * Iterates over the metadata blocks to find the largest contig area.
 695 * A full scan can be avoided on the allocation path as this is triggered
 696 * if we broke the contig_hint.  In doing so, the scan_hint will be before
 697 * the contig_hint or after if the scan_hint == contig_hint.  This cannot
 698 * be prevented on freeing as we want to find the largest area possibly
 699 * spanning blocks.
 700 */
 701static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
 702{
 703	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
 704	int bit_off, bits;
 705
 706	/* promote scan_hint to contig_hint */
 707	if (!full_scan && chunk_md->scan_hint) {
 708		bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint;
 709		chunk_md->contig_hint_start = chunk_md->scan_hint_start;
 710		chunk_md->contig_hint = chunk_md->scan_hint;
 711		chunk_md->scan_hint = 0;
 712	} else {
 713		bit_off = chunk_md->first_free;
 714		chunk_md->contig_hint = 0;
 715	}
 716
 717	bits = 0;
 718	pcpu_for_each_md_free_region(chunk, bit_off, bits)
 719		pcpu_block_update(chunk_md, bit_off, bit_off + bits);
 720}
 721
 722/**
 723 * pcpu_block_refresh_hint
 724 * @chunk: chunk of interest
 725 * @index: index of the metadata block
 726 *
 727 * Scans over the block beginning at first_free and updates the block
 728 * metadata accordingly.
 
 
 
 729 */
 730static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
 731{
 732	struct pcpu_block_md *block = chunk->md_blocks + index;
 733	unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
 734	unsigned int rs, re, start;	/* region start, region end */
 735
 736	/* promote scan_hint to contig_hint */
 737	if (block->scan_hint) {
 738		start = block->scan_hint_start + block->scan_hint;
 739		block->contig_hint_start = block->scan_hint_start;
 740		block->contig_hint = block->scan_hint;
 741		block->scan_hint = 0;
 742	} else {
 743		start = block->first_free;
 744		block->contig_hint = 0;
 745	}
 746
 747	block->right_free = 0;
 748
 749	/* iterate over free areas and update the contig hints */
 750	bitmap_for_each_clear_region(alloc_map, rs, re, start,
 751				     PCPU_BITMAP_BLOCK_BITS)
 752		pcpu_block_update(block, rs, re);
 753}
 754
 755/**
 756 * pcpu_block_update_hint_alloc - update hint on allocation path
 757 * @chunk: chunk of interest
 758 * @bit_off: chunk offset
 759 * @bits: size of request
 760 *
 761 * Updates metadata for the allocation path.  The metadata only has to be
 762 * refreshed by a full scan iff the chunk's contig hint is broken.  Block level
 763 * scans are required if the block's contig hint is broken.
 764 */
 765static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
 766					 int bits)
 767{
 768	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
 769	int nr_empty_pages = 0;
 770	struct pcpu_block_md *s_block, *e_block, *block;
 771	int s_index, e_index;	/* block indexes of the freed allocation */
 772	int s_off, e_off;	/* block offsets of the freed allocation */
 773
 774	/*
 775	 * Calculate per block offsets.
 776	 * The calculation uses an inclusive range, but the resulting offsets
 777	 * are [start, end).  e_index always points to the last block in the
 778	 * range.
 779	 */
 780	s_index = pcpu_off_to_block_index(bit_off);
 781	e_index = pcpu_off_to_block_index(bit_off + bits - 1);
 782	s_off = pcpu_off_to_block_off(bit_off);
 783	e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
 784
 785	s_block = chunk->md_blocks + s_index;
 786	e_block = chunk->md_blocks + e_index;
 
 787
 788	/*
 789	 * Update s_block.
 790	 * block->first_free must be updated if the allocation takes its place.
 791	 * If the allocation breaks the contig_hint, a scan is required to
 792	 * restore this hint.
 793	 */
 794	if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
 795		nr_empty_pages++;
 796
 797	if (s_off == s_block->first_free)
 798		s_block->first_free = find_next_zero_bit(
 799					pcpu_index_alloc_map(chunk, s_index),
 800					PCPU_BITMAP_BLOCK_BITS,
 801					s_off + bits);
 802
 803	if (pcpu_region_overlap(s_block->scan_hint_start,
 804				s_block->scan_hint_start + s_block->scan_hint,
 805				s_off,
 806				s_off + bits))
 807		s_block->scan_hint = 0;
 808
 809	if (pcpu_region_overlap(s_block->contig_hint_start,
 810				s_block->contig_hint_start +
 811				s_block->contig_hint,
 812				s_off,
 813				s_off + bits)) {
 814		/* block contig hint is broken - scan to fix it */
 815		if (!s_off)
 816			s_block->left_free = 0;
 817		pcpu_block_refresh_hint(chunk, s_index);
 818	} else {
 819		/* update left and right contig manually */
 820		s_block->left_free = min(s_block->left_free, s_off);
 821		if (s_index == e_index)
 822			s_block->right_free = min_t(int, s_block->right_free,
 823					PCPU_BITMAP_BLOCK_BITS - e_off);
 824		else
 825			s_block->right_free = 0;
 826	}
 827
 828	/*
 829	 * Update e_block.
 830	 */
 831	if (s_index != e_index) {
 832		if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
 833			nr_empty_pages++;
 834
 835		/*
 836		 * When the allocation is across blocks, the end is along
 837		 * the left part of the e_block.
 838		 */
 839		e_block->first_free = find_next_zero_bit(
 840				pcpu_index_alloc_map(chunk, e_index),
 841				PCPU_BITMAP_BLOCK_BITS, e_off);
 842
 843		if (e_off == PCPU_BITMAP_BLOCK_BITS) {
 844			/* reset the block */
 845			e_block++;
 846		} else {
 847			if (e_off > e_block->scan_hint_start)
 848				e_block->scan_hint = 0;
 849
 850			e_block->left_free = 0;
 851			if (e_off > e_block->contig_hint_start) {
 852				/* contig hint is broken - scan to fix it */
 853				pcpu_block_refresh_hint(chunk, e_index);
 854			} else {
 855				e_block->right_free =
 856					min_t(int, e_block->right_free,
 857					      PCPU_BITMAP_BLOCK_BITS - e_off);
 858			}
 859		}
 860
 861		/* update in-between md_blocks */
 862		nr_empty_pages += (e_index - s_index - 1);
 863		for (block = s_block + 1; block < e_block; block++) {
 864			block->scan_hint = 0;
 865			block->contig_hint = 0;
 866			block->left_free = 0;
 867			block->right_free = 0;
 868		}
 869	}
 870
 871	if (nr_empty_pages)
 872		pcpu_update_empty_pages(chunk, -nr_empty_pages);
 873
 874	if (pcpu_region_overlap(chunk_md->scan_hint_start,
 875				chunk_md->scan_hint_start +
 876				chunk_md->scan_hint,
 877				bit_off,
 878				bit_off + bits))
 879		chunk_md->scan_hint = 0;
 880
 881	/*
 882	 * The only time a full chunk scan is required is if the chunk
 883	 * contig hint is broken.  Otherwise, it means a smaller space
 884	 * was used and therefore the chunk contig hint is still correct.
 885	 */
 886	if (pcpu_region_overlap(chunk_md->contig_hint_start,
 887				chunk_md->contig_hint_start +
 888				chunk_md->contig_hint,
 889				bit_off,
 890				bit_off + bits))
 891		pcpu_chunk_refresh_hint(chunk, false);
 892}
 893
 894/**
 895 * pcpu_block_update_hint_free - updates the block hints on the free path
 896 * @chunk: chunk of interest
 897 * @bit_off: chunk offset
 898 * @bits: size of request
 899 *
 900 * Updates metadata for the allocation path.  This avoids a blind block
 901 * refresh by making use of the block contig hints.  If this fails, it scans
 902 * forward and backward to determine the extent of the free area.  This is
 903 * capped at the boundary of blocks.
 904 *
 905 * A chunk update is triggered if a page becomes free, a block becomes free,
 906 * or the free spans across blocks.  This tradeoff is to minimize iterating
 907 * over the block metadata to update chunk_md->contig_hint.
 908 * chunk_md->contig_hint may be off by up to a page, but it will never be more
 909 * than the available space.  If the contig hint is contained in one block, it
 910 * will be accurate.
 911 */
 912static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
 913					int bits)
 914{
 915	int nr_empty_pages = 0;
 916	struct pcpu_block_md *s_block, *e_block, *block;
 917	int s_index, e_index;	/* block indexes of the freed allocation */
 918	int s_off, e_off;	/* block offsets of the freed allocation */
 919	int start, end;		/* start and end of the whole free area */
 920
 921	/*
 922	 * Calculate per block offsets.
 923	 * The calculation uses an inclusive range, but the resulting offsets
 924	 * are [start, end).  e_index always points to the last block in the
 925	 * range.
 926	 */
 927	s_index = pcpu_off_to_block_index(bit_off);
 928	e_index = pcpu_off_to_block_index(bit_off + bits - 1);
 929	s_off = pcpu_off_to_block_off(bit_off);
 930	e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
 931
 932	s_block = chunk->md_blocks + s_index;
 933	e_block = chunk->md_blocks + e_index;
 934
 935	/*
 936	 * Check if the freed area aligns with the block->contig_hint.
 937	 * If it does, then the scan to find the beginning/end of the
 938	 * larger free area can be avoided.
 939	 *
 940	 * start and end refer to beginning and end of the free area
 941	 * within each their respective blocks.  This is not necessarily
 942	 * the entire free area as it may span blocks past the beginning
 943	 * or end of the block.
 944	 */
 945	start = s_off;
 946	if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
 947		start = s_block->contig_hint_start;
 948	} else {
 949		/*
 950		 * Scan backwards to find the extent of the free area.
 951		 * find_last_bit returns the starting bit, so if the start bit
 952		 * is returned, that means there was no last bit and the
 953		 * remainder of the chunk is free.
 954		 */
 955		int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
 956					  start);
 957		start = (start == l_bit) ? 0 : l_bit + 1;
 958	}
 959
 960	end = e_off;
 961	if (e_off == e_block->contig_hint_start)
 962		end = e_block->contig_hint_start + e_block->contig_hint;
 963	else
 964		end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
 965				    PCPU_BITMAP_BLOCK_BITS, end);
 966
 967	/* update s_block */
 968	e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
 969	if (!start && e_off == PCPU_BITMAP_BLOCK_BITS)
 970		nr_empty_pages++;
 971	pcpu_block_update(s_block, start, e_off);
 972
 973	/* freeing in the same block */
 974	if (s_index != e_index) {
 975		/* update e_block */
 976		if (end == PCPU_BITMAP_BLOCK_BITS)
 977			nr_empty_pages++;
 978		pcpu_block_update(e_block, 0, end);
 979
 980		/* reset md_blocks in the middle */
 981		nr_empty_pages += (e_index - s_index - 1);
 982		for (block = s_block + 1; block < e_block; block++) {
 983			block->first_free = 0;
 984			block->scan_hint = 0;
 985			block->contig_hint_start = 0;
 986			block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
 987			block->left_free = PCPU_BITMAP_BLOCK_BITS;
 988			block->right_free = PCPU_BITMAP_BLOCK_BITS;
 989		}
 990	}
 991
 992	if (nr_empty_pages)
 993		pcpu_update_empty_pages(chunk, nr_empty_pages);
 994
 995	/*
 996	 * Refresh chunk metadata when the free makes a block free or spans
 997	 * across blocks.  The contig_hint may be off by up to a page, but if
 998	 * the contig_hint is contained in a block, it will be accurate with
 999	 * the else condition below.
1000	 */
1001	if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index)
1002		pcpu_chunk_refresh_hint(chunk, true);
1003	else
1004		pcpu_block_update(&chunk->chunk_md,
1005				  pcpu_block_off_to_off(s_index, start),
1006				  end);
1007}
1008
1009/**
1010 * pcpu_is_populated - determines if the region is populated
1011 * @chunk: chunk of interest
1012 * @bit_off: chunk offset
1013 * @bits: size of area
1014 * @next_off: return value for the next offset to start searching
 
 
 
 
 
1015 *
1016 * For atomic allocations, check if the backing pages are populated.
1017 *
1018 * RETURNS:
1019 * Bool if the backing pages are populated.
1020 * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
1021 */
1022static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
1023			      int *next_off)
1024{
1025	unsigned int page_start, page_end, rs, re;
1026
1027	page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
1028	page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
1029
1030	rs = page_start;
1031	bitmap_next_clear_region(chunk->populated, &rs, &re, page_end);
1032	if (rs >= page_end)
1033		return true;
1034
1035	*next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
1036	return false;
1037}
1038
1039/**
1040 * pcpu_find_block_fit - finds the block index to start searching
1041 * @chunk: chunk of interest
1042 * @alloc_bits: size of request in allocation units
1043 * @align: alignment of area (max PAGE_SIZE bytes)
1044 * @pop_only: use populated regions only
1045 *
1046 * Given a chunk and an allocation spec, find the offset to begin searching
1047 * for a free region.  This iterates over the bitmap metadata blocks to
1048 * find an offset that will be guaranteed to fit the requirements.  It is
1049 * not quite first fit as if the allocation does not fit in the contig hint
1050 * of a block or chunk, it is skipped.  This errs on the side of caution
1051 * to prevent excess iteration.  Poor alignment can cause the allocator to
1052 * skip over blocks and chunks that have valid free areas.
1053 *
1054 * RETURNS:
1055 * The offset in the bitmap to begin searching.
1056 * -1 if no offset is found.
1057 */
1058static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
1059			       size_t align, bool pop_only)
1060{
1061	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1062	int bit_off, bits, next_off;
 
 
 
 
 
 
 
1063
1064	/*
1065	 * Check to see if the allocation can fit in the chunk's contig hint.
1066	 * This is an optimization to prevent scanning by assuming if it
1067	 * cannot fit in the global hint, there is memory pressure and creating
1068	 * a new chunk would happen soon.
1069	 */
1070	bit_off = ALIGN(chunk_md->contig_hint_start, align) -
1071		  chunk_md->contig_hint_start;
1072	if (bit_off + alloc_bits > chunk_md->contig_hint)
1073		return -1;
1074
1075	bit_off = pcpu_next_hint(chunk_md, alloc_bits);
1076	bits = 0;
1077	pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
1078		if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
1079						   &next_off))
1080			break;
1081
1082		bit_off = next_off;
1083		bits = 0;
1084	}
1085
1086	if (bit_off == pcpu_chunk_map_bits(chunk))
1087		return -1;
1088
1089	return bit_off;
1090}
1091
1092/*
1093 * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
1094 * @map: the address to base the search on
1095 * @size: the bitmap size in bits
1096 * @start: the bitnumber to start searching at
1097 * @nr: the number of zeroed bits we're looking for
1098 * @align_mask: alignment mask for zero area
1099 * @largest_off: offset of the largest area skipped
1100 * @largest_bits: size of the largest area skipped
1101 *
1102 * The @align_mask should be one less than a power of 2.
1103 *
1104 * This is a modified version of bitmap_find_next_zero_area_off() to remember
1105 * the largest area that was skipped.  This is imperfect, but in general is
1106 * good enough.  The largest remembered region is the largest failed region
1107 * seen.  This does not include anything we possibly skipped due to alignment.
1108 * pcpu_block_update_scan() does scan backwards to try and recover what was
1109 * lost to alignment.  While this can cause scanning to miss earlier possible
1110 * free areas, smaller allocations will eventually fill those holes.
1111 */
1112static unsigned long pcpu_find_zero_area(unsigned long *map,
1113					 unsigned long size,
1114					 unsigned long start,
1115					 unsigned long nr,
1116					 unsigned long align_mask,
1117					 unsigned long *largest_off,
1118					 unsigned long *largest_bits)
1119{
1120	unsigned long index, end, i, area_off, area_bits;
1121again:
1122	index = find_next_zero_bit(map, size, start);
1123
1124	/* Align allocation */
1125	index = __ALIGN_MASK(index, align_mask);
1126	area_off = index;
1127
1128	end = index + nr;
1129	if (end > size)
1130		return end;
1131	i = find_next_bit(map, end, index);
1132	if (i < end) {
1133		area_bits = i - area_off;
1134		/* remember largest unused area with best alignment */
1135		if (area_bits > *largest_bits ||
1136		    (area_bits == *largest_bits && *largest_off &&
1137		     (!area_off || __ffs(area_off) > __ffs(*largest_off)))) {
1138			*largest_off = area_off;
1139			*largest_bits = area_bits;
1140		}
1141
1142		start = i + 1;
1143		goto again;
1144	}
1145	return index;
1146}
1147
1148/**
1149 * pcpu_alloc_area - allocates an area from a pcpu_chunk
1150 * @chunk: chunk of interest
1151 * @alloc_bits: size of request in allocation units
1152 * @align: alignment of area (max PAGE_SIZE)
1153 * @start: bit_off to start searching
1154 *
1155 * This function takes in a @start offset to begin searching to fit an
1156 * allocation of @alloc_bits with alignment @align.  It needs to scan
1157 * the allocation map because if it fits within the block's contig hint,
1158 * @start will be block->first_free. This is an attempt to fill the
1159 * allocation prior to breaking the contig hint.  The allocation and
1160 * boundary maps are updated accordingly if it confirms a valid
1161 * free area.
1162 *
1163 * RETURNS:
1164 * Allocated addr offset in @chunk on success.
1165 * -1 if no matching area is found.
1166 */
1167static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
1168			   size_t align, int start)
1169{
1170	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1171	size_t align_mask = (align) ? (align - 1) : 0;
1172	unsigned long area_off = 0, area_bits = 0;
1173	int bit_off, end, oslot;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1174
1175	lockdep_assert_held(&pcpu_lock);
 
1176
1177	oslot = pcpu_chunk_slot(chunk);
 
 
 
 
 
1178
1179	/*
1180	 * Search to find a fit.
1181	 */
1182	end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
1183		    pcpu_chunk_map_bits(chunk));
1184	bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
1185				      align_mask, &area_off, &area_bits);
1186	if (bit_off >= end)
1187		return -1;
1188
1189	if (area_bits)
1190		pcpu_block_update_scan(chunk, area_off, area_bits);
1191
1192	/* update alloc map */
1193	bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
1194
1195	/* update boundary map */
1196	set_bit(bit_off, chunk->bound_map);
1197	bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
1198	set_bit(bit_off + alloc_bits, chunk->bound_map);
1199
1200	chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
1201
1202	/* update first free bit */
1203	if (bit_off == chunk_md->first_free)
1204		chunk_md->first_free = find_next_zero_bit(
1205					chunk->alloc_map,
1206					pcpu_chunk_map_bits(chunk),
1207					bit_off + alloc_bits);
1208
1209	pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
 
 
 
1210
 
1211	pcpu_chunk_relocate(chunk, oslot);
1212
1213	return bit_off * PCPU_MIN_ALLOC_SIZE;
 
1214}
1215
1216/**
1217 * pcpu_free_area - frees the corresponding offset
1218 * @chunk: chunk of interest
1219 * @off: addr offset into chunk
1220 *
1221 * This function determines the size of an allocation to free using
1222 * the boundary bitmap and clears the allocation map.
1223 *
1224 * RETURNS:
1225 * Number of freed bytes.
1226 */
1227static int pcpu_free_area(struct pcpu_chunk *chunk, int off)
1228{
1229	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1230	int bit_off, bits, end, oslot, freed;
1231
1232	lockdep_assert_held(&pcpu_lock);
1233	pcpu_stats_area_dealloc(chunk);
1234
1235	oslot = pcpu_chunk_slot(chunk);
1236
1237	bit_off = off / PCPU_MIN_ALLOC_SIZE;
1238
1239	/* find end index */
1240	end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
1241			    bit_off + 1);
1242	bits = end - bit_off;
1243	bitmap_clear(chunk->alloc_map, bit_off, bits);
1244
1245	freed = bits * PCPU_MIN_ALLOC_SIZE;
1246
1247	/* update metadata */
1248	chunk->free_bytes += freed;
1249
1250	/* update first free bit */
1251	chunk_md->first_free = min(chunk_md->first_free, bit_off);
1252
1253	pcpu_block_update_hint_free(chunk, bit_off, bits);
1254
1255	pcpu_chunk_relocate(chunk, oslot);
1256
1257	return freed;
1258}
1259
1260static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
1261{
1262	block->scan_hint = 0;
1263	block->contig_hint = nr_bits;
1264	block->left_free = nr_bits;
1265	block->right_free = nr_bits;
1266	block->first_free = 0;
1267	block->nr_bits = nr_bits;
1268}
1269
1270static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
1271{
1272	struct pcpu_block_md *md_block;
1273
1274	/* init the chunk's block */
1275	pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk));
1276
1277	for (md_block = chunk->md_blocks;
1278	     md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
1279	     md_block++)
1280		pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS);
1281}
1282
1283/**
1284 * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1285 * @tmp_addr: the start of the region served
1286 * @map_size: size of the region served
1287 *
1288 * This is responsible for creating the chunks that serve the first chunk.  The
1289 * base_addr is page aligned down of @tmp_addr while the region end is page
1290 * aligned up.  Offsets are kept track of to determine the region served. All
1291 * this is done to appease the bitmap allocator in avoiding partial blocks.
1292 *
1293 * RETURNS:
1294 * Chunk serving the region at @tmp_addr of @map_size.
1295 */
1296static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
1297							 int map_size)
1298{
1299	struct pcpu_chunk *chunk;
1300	unsigned long aligned_addr, lcm_align;
1301	int start_offset, offset_bits, region_size, region_bits;
1302	size_t alloc_size;
1303
1304	/* region calculations */
1305	aligned_addr = tmp_addr & PAGE_MASK;
1306
1307	start_offset = tmp_addr - aligned_addr;
1308
1309	/*
1310	 * Align the end of the region with the LCM of PAGE_SIZE and
1311	 * PCPU_BITMAP_BLOCK_SIZE.  One of these constants is a multiple of
1312	 * the other.
1313	 */
1314	lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE);
1315	region_size = ALIGN(start_offset + map_size, lcm_align);
 
 
 
 
1316
1317	/* allocate chunk */
1318	alloc_size = sizeof(struct pcpu_chunk) +
1319		BITS_TO_LONGS(region_size >> PAGE_SHIFT) * sizeof(unsigned long);
1320	chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1321	if (!chunk)
1322		panic("%s: Failed to allocate %zu bytes\n", __func__,
1323		      alloc_size);
1324
1325	INIT_LIST_HEAD(&chunk->list);
 
 
1326
1327	chunk->base_addr = (void *)aligned_addr;
1328	chunk->start_offset = start_offset;
1329	chunk->end_offset = region_size - chunk->start_offset - map_size;
1330
1331	chunk->nr_pages = region_size >> PAGE_SHIFT;
1332	region_bits = pcpu_chunk_map_bits(chunk);
1333
1334	alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
1335	chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1336	if (!chunk->alloc_map)
1337		panic("%s: Failed to allocate %zu bytes\n", __func__,
1338		      alloc_size);
1339
1340	alloc_size =
1341		BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
1342	chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1343	if (!chunk->bound_map)
1344		panic("%s: Failed to allocate %zu bytes\n", __func__,
1345		      alloc_size);
1346
1347	alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
1348	chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1349	if (!chunk->md_blocks)
1350		panic("%s: Failed to allocate %zu bytes\n", __func__,
1351		      alloc_size);
1352
1353#ifdef CONFIG_MEMCG_KMEM
1354	/* first chunk isn't memcg-aware */
1355	chunk->obj_cgroups = NULL;
1356#endif
1357	pcpu_init_md_blocks(chunk);
1358
1359	/* manage populated page bitmap */
1360	chunk->immutable = true;
1361	bitmap_fill(chunk->populated, chunk->nr_pages);
1362	chunk->nr_populated = chunk->nr_pages;
1363	chunk->nr_empty_pop_pages = chunk->nr_pages;
1364
1365	chunk->free_bytes = map_size;
1366
1367	if (chunk->start_offset) {
1368		/* hide the beginning of the bitmap */
1369		offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
1370		bitmap_set(chunk->alloc_map, 0, offset_bits);
1371		set_bit(0, chunk->bound_map);
1372		set_bit(offset_bits, chunk->bound_map);
1373
1374		chunk->chunk_md.first_free = offset_bits;
1375
1376		pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
1377	}
1378
1379	if (chunk->end_offset) {
1380		/* hide the end of the bitmap */
1381		offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
1382		bitmap_set(chunk->alloc_map,
1383			   pcpu_chunk_map_bits(chunk) - offset_bits,
1384			   offset_bits);
1385		set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
1386			chunk->bound_map);
1387		set_bit(region_bits, chunk->bound_map);
1388
1389		pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
1390					     - offset_bits, offset_bits);
1391	}
1392
1393	return chunk;
 
1394}
1395
1396static struct pcpu_chunk *pcpu_alloc_chunk(enum pcpu_chunk_type type, gfp_t gfp)
1397{
1398	struct pcpu_chunk *chunk;
1399	int region_bits;
1400
1401	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
1402	if (!chunk)
1403		return NULL;
1404
1405	INIT_LIST_HEAD(&chunk->list);
1406	chunk->nr_pages = pcpu_unit_pages;
1407	region_bits = pcpu_chunk_map_bits(chunk);
1408
1409	chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
1410					   sizeof(chunk->alloc_map[0]), gfp);
1411	if (!chunk->alloc_map)
1412		goto alloc_map_fail;
1413
1414	chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
1415					   sizeof(chunk->bound_map[0]), gfp);
1416	if (!chunk->bound_map)
1417		goto bound_map_fail;
1418
1419	chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
1420					   sizeof(chunk->md_blocks[0]), gfp);
1421	if (!chunk->md_blocks)
1422		goto md_blocks_fail;
1423
1424#ifdef CONFIG_MEMCG_KMEM
1425	if (pcpu_is_memcg_chunk(type)) {
1426		chunk->obj_cgroups =
1427			pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
1428					sizeof(struct obj_cgroup *), gfp);
1429		if (!chunk->obj_cgroups)
1430			goto objcg_fail;
1431	}
1432#endif
1433
1434	pcpu_init_md_blocks(chunk);
 
 
 
1435
1436	/* init metadata */
1437	chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
 
 
1438
1439	return chunk;
1440
1441#ifdef CONFIG_MEMCG_KMEM
1442objcg_fail:
1443	pcpu_mem_free(chunk->md_blocks);
1444#endif
1445md_blocks_fail:
1446	pcpu_mem_free(chunk->bound_map);
1447bound_map_fail:
1448	pcpu_mem_free(chunk->alloc_map);
1449alloc_map_fail:
1450	pcpu_mem_free(chunk);
1451
1452	return NULL;
1453}
1454
1455static void pcpu_free_chunk(struct pcpu_chunk *chunk)
1456{
1457	if (!chunk)
1458		return;
1459#ifdef CONFIG_MEMCG_KMEM
1460	pcpu_mem_free(chunk->obj_cgroups);
1461#endif
1462	pcpu_mem_free(chunk->md_blocks);
1463	pcpu_mem_free(chunk->bound_map);
1464	pcpu_mem_free(chunk->alloc_map);
1465	pcpu_mem_free(chunk);
1466}
1467
1468/**
1469 * pcpu_chunk_populated - post-population bookkeeping
1470 * @chunk: pcpu_chunk which got populated
1471 * @page_start: the start page
1472 * @page_end: the end page
1473 *
1474 * Pages in [@page_start,@page_end) have been populated to @chunk.  Update
1475 * the bookkeeping information accordingly.  Must be called after each
1476 * successful population.
1477 *
1478 * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it
1479 * is to serve an allocation in that area.
1480 */
1481static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
1482				 int page_end)
1483{
1484	int nr = page_end - page_start;
1485
1486	lockdep_assert_held(&pcpu_lock);
1487
1488	bitmap_set(chunk->populated, page_start, nr);
1489	chunk->nr_populated += nr;
1490	pcpu_nr_populated += nr;
1491
1492	pcpu_update_empty_pages(chunk, nr);
1493}
1494
1495/**
1496 * pcpu_chunk_depopulated - post-depopulation bookkeeping
1497 * @chunk: pcpu_chunk which got depopulated
1498 * @page_start: the start page
1499 * @page_end: the end page
1500 *
1501 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1502 * Update the bookkeeping information accordingly.  Must be called after
1503 * each successful depopulation.
1504 */
1505static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1506				   int page_start, int page_end)
1507{
1508	int nr = page_end - page_start;
1509
1510	lockdep_assert_held(&pcpu_lock);
1511
1512	bitmap_clear(chunk->populated, page_start, nr);
1513	chunk->nr_populated -= nr;
1514	pcpu_nr_populated -= nr;
1515
1516	pcpu_update_empty_pages(chunk, -nr);
1517}
1518
1519/*
1520 * Chunk management implementation.
1521 *
1522 * To allow different implementations, chunk alloc/free and
1523 * [de]population are implemented in a separate file which is pulled
1524 * into this file and compiled together.  The following functions
1525 * should be implemented.
1526 *
1527 * pcpu_populate_chunk		- populate the specified range of a chunk
1528 * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
1529 * pcpu_create_chunk		- create a new chunk
1530 * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
1531 * pcpu_addr_to_page		- translate address to physical address
1532 * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
1533 */
1534static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
1535			       int page_start, int page_end, gfp_t gfp);
1536static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
1537				  int page_start, int page_end);
1538static struct pcpu_chunk *pcpu_create_chunk(enum pcpu_chunk_type type,
1539					    gfp_t gfp);
1540static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
1541static struct page *pcpu_addr_to_page(void *addr);
1542static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
1543
1544#ifdef CONFIG_NEED_PER_CPU_KM
1545#include "percpu-km.c"
1546#else
1547#include "percpu-vm.c"
1548#endif
1549
1550/**
1551 * pcpu_chunk_addr_search - determine chunk containing specified address
1552 * @addr: address for which the chunk needs to be determined.
1553 *
1554 * This is an internal function that handles all but static allocations.
1555 * Static percpu address values should never be passed into the allocator.
1556 *
1557 * RETURNS:
1558 * The address of the found chunk.
1559 */
1560static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1561{
1562	/* is it in the dynamic region (first chunk)? */
1563	if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
 
 
 
1564		return pcpu_first_chunk;
1565
1566	/* is it in the reserved region? */
1567	if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
1568		return pcpu_reserved_chunk;
1569
1570	/*
1571	 * The address is relative to unit0 which might be unused and
1572	 * thus unmapped.  Offset the address to the unit space of the
1573	 * current processor before looking it up in the vmalloc
1574	 * space.  Note that any possible cpu id can be used here, so
1575	 * there's no need to worry about preemption or cpu hotplug.
1576	 */
1577	addr += pcpu_unit_offsets[raw_smp_processor_id()];
1578	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
1579}
1580
1581#ifdef CONFIG_MEMCG_KMEM
1582static enum pcpu_chunk_type pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
1583						     struct obj_cgroup **objcgp)
1584{
1585	struct obj_cgroup *objcg;
1586
1587	if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT) ||
1588	    memcg_kmem_bypass())
1589		return PCPU_CHUNK_ROOT;
1590
1591	objcg = get_obj_cgroup_from_current();
1592	if (!objcg)
1593		return PCPU_CHUNK_ROOT;
1594
1595	if (obj_cgroup_charge(objcg, gfp, size * num_possible_cpus())) {
1596		obj_cgroup_put(objcg);
1597		return PCPU_FAIL_ALLOC;
1598	}
1599
1600	*objcgp = objcg;
1601	return PCPU_CHUNK_MEMCG;
1602}
1603
1604static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1605				       struct pcpu_chunk *chunk, int off,
1606				       size_t size)
1607{
1608	if (!objcg)
1609		return;
1610
1611	if (chunk) {
1612		chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg;
1613
1614		rcu_read_lock();
1615		mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
1616				size * num_possible_cpus());
1617		rcu_read_unlock();
1618	} else {
1619		obj_cgroup_uncharge(objcg, size * num_possible_cpus());
1620		obj_cgroup_put(objcg);
1621	}
1622}
1623
1624static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1625{
1626	struct obj_cgroup *objcg;
1627
1628	if (!pcpu_is_memcg_chunk(pcpu_chunk_type(chunk)))
1629		return;
1630
1631	objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT];
1632	chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL;
1633
1634	obj_cgroup_uncharge(objcg, size * num_possible_cpus());
1635
1636	rcu_read_lock();
1637	mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
1638			-(size * num_possible_cpus()));
1639	rcu_read_unlock();
1640
1641	obj_cgroup_put(objcg);
1642}
1643
1644#else /* CONFIG_MEMCG_KMEM */
1645static enum pcpu_chunk_type
1646pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
1647{
1648	return PCPU_CHUNK_ROOT;
1649}
1650
1651static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1652				       struct pcpu_chunk *chunk, int off,
1653				       size_t size)
1654{
1655}
1656
1657static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1658{
1659}
1660#endif /* CONFIG_MEMCG_KMEM */
1661
1662/**
1663 * pcpu_alloc - the percpu allocator
1664 * @size: size of area to allocate in bytes
1665 * @align: alignment of area (max PAGE_SIZE)
1666 * @reserved: allocate from the reserved chunk if available
1667 * @gfp: allocation flags
1668 *
1669 * Allocate percpu area of @size bytes aligned at @align.  If @gfp doesn't
1670 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1671 * then no warning will be triggered on invalid or failed allocation
1672 * requests.
1673 *
1674 * RETURNS:
1675 * Percpu pointer to the allocated area on success, NULL on failure.
1676 */
1677static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1678				 gfp_t gfp)
1679{
1680	gfp_t pcpu_gfp;
1681	bool is_atomic;
1682	bool do_warn;
1683	enum pcpu_chunk_type type;
1684	struct list_head *pcpu_slot;
1685	struct obj_cgroup *objcg = NULL;
1686	static int warn_limit = 10;
1687	struct pcpu_chunk *chunk, *next;
1688	const char *err;
1689	int slot, off, cpu, ret;
 
 
1690	unsigned long flags;
1691	void __percpu *ptr;
1692	size_t bits, bit_align;
1693
1694	gfp = current_gfp_context(gfp);
1695	/* whitelisted flags that can be passed to the backing allocators */
1696	pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
1697	is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1698	do_warn = !(gfp & __GFP_NOWARN);
1699
1700	/*
1701	 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
1702	 * therefore alignment must be a minimum of that many bytes.
1703	 * An allocation may have internal fragmentation from rounding up
1704	 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1705	 */
1706	if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
1707		align = PCPU_MIN_ALLOC_SIZE;
1708
1709	size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
1710	bits = size >> PCPU_MIN_ALLOC_SHIFT;
1711	bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
1712
1713	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
1714		     !is_power_of_2(align))) {
1715		WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1716		     size, align);
1717		return NULL;
1718	}
1719
1720	type = pcpu_memcg_pre_alloc_hook(size, gfp, &objcg);
1721	if (unlikely(type == PCPU_FAIL_ALLOC))
1722		return NULL;
1723	pcpu_slot = pcpu_chunk_list(type);
1724
1725	if (!is_atomic) {
1726		/*
1727		 * pcpu_balance_workfn() allocates memory under this mutex,
1728		 * and it may wait for memory reclaim. Allow current task
1729		 * to become OOM victim, in case of memory pressure.
1730		 */
1731		if (gfp & __GFP_NOFAIL) {
1732			mutex_lock(&pcpu_alloc_mutex);
1733		} else if (mutex_lock_killable(&pcpu_alloc_mutex)) {
1734			pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1735			return NULL;
1736		}
1737	}
1738
1739	spin_lock_irqsave(&pcpu_lock, flags);
1740
1741	/* serve reserved allocations from the reserved chunk if available */
1742	if (reserved && pcpu_reserved_chunk) {
1743		chunk = pcpu_reserved_chunk;
1744
1745		off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
1746		if (off < 0) {
1747			err = "alloc from reserved chunk failed";
1748			goto fail_unlock;
1749		}
1750
1751		off = pcpu_alloc_area(chunk, bits, bit_align, off);
 
 
 
 
 
 
 
 
 
 
 
1752		if (off >= 0)
1753			goto area_found;
1754
1755		err = "alloc from reserved chunk failed";
1756		goto fail_unlock;
1757	}
1758
1759restart:
1760	/* search through normal chunks */
1761	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
1762		list_for_each_entry_safe(chunk, next, &pcpu_slot[slot], list) {
1763			off = pcpu_find_block_fit(chunk, bits, bit_align,
1764						  is_atomic);
1765			if (off < 0) {
1766				if (slot < PCPU_SLOT_FAIL_THRESHOLD)
1767					pcpu_chunk_move(chunk, 0);
1768				continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1769			}
1770
1771			off = pcpu_alloc_area(chunk, bits, bit_align, off);
 
1772			if (off >= 0)
1773				goto area_found;
1774
1775		}
1776	}
1777
1778	spin_unlock_irqrestore(&pcpu_lock, flags);
1779
1780	/*
1781	 * No space left.  Create a new chunk.  We don't want multiple
1782	 * tasks to create chunks simultaneously.  Serialize and create iff
1783	 * there's still no empty chunk after grabbing the mutex.
1784	 */
1785	if (is_atomic) {
1786		err = "atomic alloc failed, no space left";
1787		goto fail;
1788	}
1789
1790	if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
1791		chunk = pcpu_create_chunk(type, pcpu_gfp);
1792		if (!chunk) {
1793			err = "failed to allocate new chunk";
1794			goto fail;
1795		}
1796
1797		spin_lock_irqsave(&pcpu_lock, flags);
1798		pcpu_chunk_relocate(chunk, -1);
1799	} else {
1800		spin_lock_irqsave(&pcpu_lock, flags);
1801	}
1802
1803	goto restart;
1804
1805area_found:
1806	pcpu_stats_area_alloc(chunk, size);
1807	spin_unlock_irqrestore(&pcpu_lock, flags);
1808
1809	/* populate if not all pages are already there */
1810	if (!is_atomic) {
1811		unsigned int page_start, page_end, rs, re;
1812
1813		page_start = PFN_DOWN(off);
1814		page_end = PFN_UP(off + size);
1815
1816		bitmap_for_each_clear_region(chunk->populated, rs, re,
1817					     page_start, page_end) {
1818			WARN_ON(chunk->immutable);
1819
1820			ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
1821
1822			spin_lock_irqsave(&pcpu_lock, flags);
1823			if (ret) {
1824				pcpu_free_area(chunk, off);
1825				err = "failed to populate";
1826				goto fail_unlock;
1827			}
1828			pcpu_chunk_populated(chunk, rs, re);
1829			spin_unlock_irqrestore(&pcpu_lock, flags);
1830		}
1831
1832		mutex_unlock(&pcpu_alloc_mutex);
1833	}
1834
 
 
 
 
 
 
1835	if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1836		pcpu_schedule_balance_work();
1837
1838	/* clear the areas and return address relative to base address */
1839	for_each_possible_cpu(cpu)
1840		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1841
1842	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1843	kmemleak_alloc_percpu(ptr, size, gfp);
1844
1845	trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
1846			chunk->base_addr, off, ptr);
1847
1848	pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
1849
1850	return ptr;
1851
1852fail_unlock:
1853	spin_unlock_irqrestore(&pcpu_lock, flags);
1854fail:
1855	trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1856
1857	if (!is_atomic && do_warn && warn_limit) {
1858		pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1859			size, align, is_atomic, err);
1860		dump_stack();
1861		if (!--warn_limit)
1862			pr_info("limit reached, disable warning\n");
1863	}
1864	if (is_atomic) {
1865		/* see the flag handling in pcpu_blance_workfn() */
1866		pcpu_atomic_alloc_failed = true;
1867		pcpu_schedule_balance_work();
1868	} else {
1869		mutex_unlock(&pcpu_alloc_mutex);
1870	}
1871
1872	pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1873
1874	return NULL;
1875}
1876
1877/**
1878 * __alloc_percpu_gfp - allocate dynamic percpu area
1879 * @size: size of area to allocate in bytes
1880 * @align: alignment of area (max PAGE_SIZE)
1881 * @gfp: allocation flags
1882 *
1883 * Allocate zero-filled percpu area of @size bytes aligned at @align.  If
1884 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1885 * be called from any context but is a lot more likely to fail. If @gfp
1886 * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1887 * allocation requests.
1888 *
1889 * RETURNS:
1890 * Percpu pointer to the allocated area on success, NULL on failure.
1891 */
1892void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1893{
1894	return pcpu_alloc(size, align, false, gfp);
1895}
1896EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1897
1898/**
1899 * __alloc_percpu - allocate dynamic percpu area
1900 * @size: size of area to allocate in bytes
1901 * @align: alignment of area (max PAGE_SIZE)
1902 *
1903 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1904 */
1905void __percpu *__alloc_percpu(size_t size, size_t align)
1906{
1907	return pcpu_alloc(size, align, false, GFP_KERNEL);
1908}
1909EXPORT_SYMBOL_GPL(__alloc_percpu);
1910
1911/**
1912 * __alloc_reserved_percpu - allocate reserved percpu area
1913 * @size: size of area to allocate in bytes
1914 * @align: alignment of area (max PAGE_SIZE)
1915 *
1916 * Allocate zero-filled percpu area of @size bytes aligned at @align
1917 * from reserved percpu area if arch has set it up; otherwise,
1918 * allocation is served from the same dynamic area.  Might sleep.
1919 * Might trigger writeouts.
1920 *
1921 * CONTEXT:
1922 * Does GFP_KERNEL allocation.
1923 *
1924 * RETURNS:
1925 * Percpu pointer to the allocated area on success, NULL on failure.
1926 */
1927void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1928{
1929	return pcpu_alloc(size, align, true, GFP_KERNEL);
1930}
1931
1932/**
1933 * __pcpu_balance_workfn - manage the amount of free chunks and populated pages
1934 * @type: chunk type
1935 *
1936 * Reclaim all fully free chunks except for the first one.  This is also
1937 * responsible for maintaining the pool of empty populated pages.  However,
1938 * it is possible that this is called when physical memory is scarce causing
1939 * OOM killer to be triggered.  We should avoid doing so until an actual
1940 * allocation causes the failure as it is possible that requests can be
1941 * serviced from already backed regions.
1942 */
1943static void __pcpu_balance_workfn(enum pcpu_chunk_type type)
1944{
1945	/* gfp flags passed to underlying allocators */
1946	const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
1947	LIST_HEAD(to_free);
1948	struct list_head *pcpu_slot = pcpu_chunk_list(type);
1949	struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1950	struct pcpu_chunk *chunk, *next;
1951	int slot, nr_to_pop, ret;
1952
1953	/*
1954	 * There's no reason to keep around multiple unused chunks and VM
1955	 * areas can be scarce.  Destroy all free chunks except for one.
1956	 */
1957	mutex_lock(&pcpu_alloc_mutex);
1958	spin_lock_irq(&pcpu_lock);
1959
1960	list_for_each_entry_safe(chunk, next, free_head, list) {
1961		WARN_ON(chunk->immutable);
1962
1963		/* spare the first one */
1964		if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1965			continue;
1966
 
1967		list_move(&chunk->list, &to_free);
1968	}
1969
1970	spin_unlock_irq(&pcpu_lock);
1971
1972	list_for_each_entry_safe(chunk, next, &to_free, list) {
1973		unsigned int rs, re;
1974
1975		bitmap_for_each_set_region(chunk->populated, rs, re, 0,
1976					   chunk->nr_pages) {
1977			pcpu_depopulate_chunk(chunk, rs, re);
1978			spin_lock_irq(&pcpu_lock);
1979			pcpu_chunk_depopulated(chunk, rs, re);
1980			spin_unlock_irq(&pcpu_lock);
1981		}
1982		pcpu_destroy_chunk(chunk);
1983		cond_resched();
1984	}
1985
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1986	/*
1987	 * Ensure there are certain number of free populated pages for
1988	 * atomic allocs.  Fill up from the most packed so that atomic
1989	 * allocs don't increase fragmentation.  If atomic allocation
1990	 * failed previously, always populate the maximum amount.  This
1991	 * should prevent atomic allocs larger than PAGE_SIZE from keeping
1992	 * failing indefinitely; however, large atomic allocs are not
1993	 * something we support properly and can be highly unreliable and
1994	 * inefficient.
1995	 */
1996retry_pop:
1997	if (pcpu_atomic_alloc_failed) {
1998		nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
1999		/* best effort anyway, don't worry about synchronization */
2000		pcpu_atomic_alloc_failed = false;
2001	} else {
2002		nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
2003				  pcpu_nr_empty_pop_pages,
2004				  0, PCPU_EMPTY_POP_PAGES_HIGH);
2005	}
2006
2007	for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
2008		unsigned int nr_unpop = 0, rs, re;
2009
2010		if (!nr_to_pop)
2011			break;
2012
2013		spin_lock_irq(&pcpu_lock);
2014		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
2015			nr_unpop = chunk->nr_pages - chunk->nr_populated;
2016			if (nr_unpop)
2017				break;
2018		}
2019		spin_unlock_irq(&pcpu_lock);
2020
2021		if (!nr_unpop)
2022			continue;
2023
2024		/* @chunk can't go away while pcpu_alloc_mutex is held */
2025		bitmap_for_each_clear_region(chunk->populated, rs, re, 0,
2026					     chunk->nr_pages) {
2027			int nr = min_t(int, re - rs, nr_to_pop);
2028
2029			ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
2030			if (!ret) {
2031				nr_to_pop -= nr;
2032				spin_lock_irq(&pcpu_lock);
2033				pcpu_chunk_populated(chunk, rs, rs + nr);
2034				spin_unlock_irq(&pcpu_lock);
2035			} else {
2036				nr_to_pop = 0;
2037			}
2038
2039			if (!nr_to_pop)
2040				break;
2041		}
2042	}
2043
2044	if (nr_to_pop) {
2045		/* ran out of chunks to populate, create a new one and retry */
2046		chunk = pcpu_create_chunk(type, gfp);
2047		if (chunk) {
2048			spin_lock_irq(&pcpu_lock);
2049			pcpu_chunk_relocate(chunk, -1);
2050			spin_unlock_irq(&pcpu_lock);
2051			goto retry_pop;
2052		}
2053	}
2054
2055	mutex_unlock(&pcpu_alloc_mutex);
2056}
2057
2058/**
2059 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
2060 * @work: unused
2061 *
2062 * Call __pcpu_balance_workfn() for each chunk type.
2063 */
2064static void pcpu_balance_workfn(struct work_struct *work)
2065{
2066	enum pcpu_chunk_type type;
2067
2068	for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
2069		__pcpu_balance_workfn(type);
2070}
2071
2072/**
2073 * free_percpu - free percpu area
2074 * @ptr: pointer to area to free
2075 *
2076 * Free percpu area @ptr.
2077 *
2078 * CONTEXT:
2079 * Can be called from atomic context.
2080 */
2081void free_percpu(void __percpu *ptr)
2082{
2083	void *addr;
2084	struct pcpu_chunk *chunk;
2085	unsigned long flags;
2086	int size, off;
2087	bool need_balance = false;
2088	struct list_head *pcpu_slot;
2089
2090	if (!ptr)
2091		return;
2092
2093	kmemleak_free_percpu(ptr);
2094
2095	addr = __pcpu_ptr_to_addr(ptr);
2096
2097	spin_lock_irqsave(&pcpu_lock, flags);
2098
2099	chunk = pcpu_chunk_addr_search(addr);
2100	off = addr - chunk->base_addr;
2101
2102	size = pcpu_free_area(chunk, off);
2103
2104	pcpu_slot = pcpu_chunk_list(pcpu_chunk_type(chunk));
2105
2106	pcpu_memcg_free_hook(chunk, off, size);
 
2107
2108	/* if there are more than one fully free chunks, wake up grim reaper */
2109	if (chunk->free_bytes == pcpu_unit_size) {
2110		struct pcpu_chunk *pos;
2111
2112		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
2113			if (pos != chunk) {
2114				need_balance = true;
2115				break;
2116			}
2117	}
2118
2119	trace_percpu_free_percpu(chunk->base_addr, off, ptr);
2120
2121	spin_unlock_irqrestore(&pcpu_lock, flags);
2122
2123	if (need_balance)
2124		pcpu_schedule_balance_work();
2125}
2126EXPORT_SYMBOL_GPL(free_percpu);
2127
2128bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
2129{
2130#ifdef CONFIG_SMP
2131	const size_t static_size = __per_cpu_end - __per_cpu_start;
2132	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2133	unsigned int cpu;
2134
2135	for_each_possible_cpu(cpu) {
2136		void *start = per_cpu_ptr(base, cpu);
2137		void *va = (void *)addr;
2138
2139		if (va >= start && va < start + static_size) {
2140			if (can_addr) {
2141				*can_addr = (unsigned long) (va - start);
2142				*can_addr += (unsigned long)
2143					per_cpu_ptr(base, get_boot_cpu_id());
2144			}
2145			return true;
2146		}
2147	}
2148#endif
2149	/* on UP, can't distinguish from other static vars, always false */
2150	return false;
2151}
2152
2153/**
2154 * is_kernel_percpu_address - test whether address is from static percpu area
2155 * @addr: address to test
2156 *
2157 * Test whether @addr belongs to in-kernel static percpu area.  Module
2158 * static percpu areas are not considered.  For those, use
2159 * is_module_percpu_address().
2160 *
2161 * RETURNS:
2162 * %true if @addr is from in-kernel static percpu area, %false otherwise.
2163 */
2164bool is_kernel_percpu_address(unsigned long addr)
2165{
2166	return __is_kernel_percpu_address(addr, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
2167}
2168
2169/**
2170 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
2171 * @addr: the address to be converted to physical address
2172 *
2173 * Given @addr which is dereferenceable address obtained via one of
2174 * percpu access macros, this function translates it into its physical
2175 * address.  The caller is responsible for ensuring @addr stays valid
2176 * until this function finishes.
2177 *
2178 * percpu allocator has special setup for the first chunk, which currently
2179 * supports either embedding in linear address space or vmalloc mapping,
2180 * and, from the second one, the backing allocator (currently either vm or
2181 * km) provides translation.
2182 *
2183 * The addr can be translated simply without checking if it falls into the
2184 * first chunk. But the current code reflects better how percpu allocator
2185 * actually works, and the verification can discover both bugs in percpu
2186 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
2187 * code.
2188 *
2189 * RETURNS:
2190 * The physical address for @addr.
2191 */
2192phys_addr_t per_cpu_ptr_to_phys(void *addr)
2193{
2194	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2195	bool in_first_chunk = false;
2196	unsigned long first_low, first_high;
2197	unsigned int cpu;
2198
2199	/*
2200	 * The following test on unit_low/high isn't strictly
2201	 * necessary but will speed up lookups of addresses which
2202	 * aren't in the first chunk.
2203	 *
2204	 * The address check is against full chunk sizes.  pcpu_base_addr
2205	 * points to the beginning of the first chunk including the
2206	 * static region.  Assumes good intent as the first chunk may
2207	 * not be full (ie. < pcpu_unit_pages in size).
2208	 */
2209	first_low = (unsigned long)pcpu_base_addr +
2210		    pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
2211	first_high = (unsigned long)pcpu_base_addr +
2212		     pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
2213	if ((unsigned long)addr >= first_low &&
2214	    (unsigned long)addr < first_high) {
2215		for_each_possible_cpu(cpu) {
2216			void *start = per_cpu_ptr(base, cpu);
2217
2218			if (addr >= start && addr < start + pcpu_unit_size) {
2219				in_first_chunk = true;
2220				break;
2221			}
2222		}
2223	}
2224
2225	if (in_first_chunk) {
2226		if (!is_vmalloc_addr(addr))
2227			return __pa(addr);
2228		else
2229			return page_to_phys(vmalloc_to_page(addr)) +
2230			       offset_in_page(addr);
2231	} else
2232		return page_to_phys(pcpu_addr_to_page(addr)) +
2233		       offset_in_page(addr);
2234}
2235
2236/**
2237 * pcpu_alloc_alloc_info - allocate percpu allocation info
2238 * @nr_groups: the number of groups
2239 * @nr_units: the number of units
2240 *
2241 * Allocate ai which is large enough for @nr_groups groups containing
2242 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
2243 * cpu_map array which is long enough for @nr_units and filled with
2244 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
2245 * pointer of other groups.
2246 *
2247 * RETURNS:
2248 * Pointer to the allocated pcpu_alloc_info on success, NULL on
2249 * failure.
2250 */
2251struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
2252						      int nr_units)
2253{
2254	struct pcpu_alloc_info *ai;
2255	size_t base_size, ai_size;
2256	void *ptr;
2257	int unit;
2258
2259	base_size = ALIGN(struct_size(ai, groups, nr_groups),
2260			  __alignof__(ai->groups[0].cpu_map[0]));
2261	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
2262
2263	ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
2264	if (!ptr)
2265		return NULL;
2266	ai = ptr;
2267	ptr += base_size;
2268
2269	ai->groups[0].cpu_map = ptr;
2270
2271	for (unit = 0; unit < nr_units; unit++)
2272		ai->groups[0].cpu_map[unit] = NR_CPUS;
2273
2274	ai->nr_groups = nr_groups;
2275	ai->__ai_size = PFN_ALIGN(ai_size);
2276
2277	return ai;
2278}
2279
2280/**
2281 * pcpu_free_alloc_info - free percpu allocation info
2282 * @ai: pcpu_alloc_info to free
2283 *
2284 * Free @ai which was allocated by pcpu_alloc_alloc_info().
2285 */
2286void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
2287{
2288	memblock_free_early(__pa(ai), ai->__ai_size);
2289}
2290
2291/**
2292 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
2293 * @lvl: loglevel
2294 * @ai: allocation info to dump
2295 *
2296 * Print out information about @ai using loglevel @lvl.
2297 */
2298static void pcpu_dump_alloc_info(const char *lvl,
2299				 const struct pcpu_alloc_info *ai)
2300{
2301	int group_width = 1, cpu_width = 1, width;
2302	char empty_str[] = "--------";
2303	int alloc = 0, alloc_end = 0;
2304	int group, v;
2305	int upa, apl;	/* units per alloc, allocs per line */
2306
2307	v = ai->nr_groups;
2308	while (v /= 10)
2309		group_width++;
2310
2311	v = num_possible_cpus();
2312	while (v /= 10)
2313		cpu_width++;
2314	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
2315
2316	upa = ai->alloc_size / ai->unit_size;
2317	width = upa * (cpu_width + 1) + group_width + 3;
2318	apl = rounddown_pow_of_two(max(60 / width, 1));
2319
2320	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
2321	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
2322	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
2323
2324	for (group = 0; group < ai->nr_groups; group++) {
2325		const struct pcpu_group_info *gi = &ai->groups[group];
2326		int unit = 0, unit_end = 0;
2327
2328		BUG_ON(gi->nr_units % upa);
2329		for (alloc_end += gi->nr_units / upa;
2330		     alloc < alloc_end; alloc++) {
2331			if (!(alloc % apl)) {
2332				pr_cont("\n");
2333				printk("%spcpu-alloc: ", lvl);
2334			}
2335			pr_cont("[%0*d] ", group_width, group);
2336
2337			for (unit_end += upa; unit < unit_end; unit++)
2338				if (gi->cpu_map[unit] != NR_CPUS)
2339					pr_cont("%0*d ",
2340						cpu_width, gi->cpu_map[unit]);
2341				else
2342					pr_cont("%s ", empty_str);
2343		}
2344	}
2345	pr_cont("\n");
2346}
2347
2348/**
2349 * pcpu_setup_first_chunk - initialize the first percpu chunk
2350 * @ai: pcpu_alloc_info describing how to percpu area is shaped
2351 * @base_addr: mapped address
2352 *
2353 * Initialize the first percpu chunk which contains the kernel static
2354 * percpu area.  This function is to be called from arch percpu area
2355 * setup path.
2356 *
2357 * @ai contains all information necessary to initialize the first
2358 * chunk and prime the dynamic percpu allocator.
2359 *
2360 * @ai->static_size is the size of static percpu area.
2361 *
2362 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
2363 * reserve after the static area in the first chunk.  This reserves
2364 * the first chunk such that it's available only through reserved
2365 * percpu allocation.  This is primarily used to serve module percpu
2366 * static areas on architectures where the addressing model has
2367 * limited offset range for symbol relocations to guarantee module
2368 * percpu symbols fall inside the relocatable range.
2369 *
2370 * @ai->dyn_size determines the number of bytes available for dynamic
2371 * allocation in the first chunk.  The area between @ai->static_size +
2372 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
2373 *
2374 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2375 * and equal to or larger than @ai->static_size + @ai->reserved_size +
2376 * @ai->dyn_size.
2377 *
2378 * @ai->atom_size is the allocation atom size and used as alignment
2379 * for vm areas.
2380 *
2381 * @ai->alloc_size is the allocation size and always multiple of
2382 * @ai->atom_size.  This is larger than @ai->atom_size if
2383 * @ai->unit_size is larger than @ai->atom_size.
2384 *
2385 * @ai->nr_groups and @ai->groups describe virtual memory layout of
2386 * percpu areas.  Units which should be colocated are put into the
2387 * same group.  Dynamic VM areas will be allocated according to these
2388 * groupings.  If @ai->nr_groups is zero, a single group containing
2389 * all units is assumed.
2390 *
2391 * The caller should have mapped the first chunk at @base_addr and
2392 * copied static data to each unit.
2393 *
2394 * The first chunk will always contain a static and a dynamic region.
2395 * However, the static region is not managed by any chunk.  If the first
2396 * chunk also contains a reserved region, it is served by two chunks -
2397 * one for the reserved region and one for the dynamic region.  They
2398 * share the same vm, but use offset regions in the area allocation map.
2399 * The chunk serving the dynamic region is circulated in the chunk slots
2400 * and available for dynamic allocation like any other chunk.
 
 
2401 */
2402void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2403				   void *base_addr)
2404{
2405	size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2406	size_t static_size, dyn_size;
2407	struct pcpu_chunk *chunk;
 
 
2408	unsigned long *group_offsets;
2409	size_t *group_sizes;
2410	unsigned long *unit_off;
2411	unsigned int cpu;
2412	int *unit_map;
2413	int group, unit, i;
2414	int map_size;
2415	unsigned long tmp_addr;
2416	size_t alloc_size;
2417	enum pcpu_chunk_type type;
2418
2419#define PCPU_SETUP_BUG_ON(cond)	do {					\
2420	if (unlikely(cond)) {						\
2421		pr_emerg("failed to initialize, %s\n", #cond);		\
2422		pr_emerg("cpu_possible_mask=%*pb\n",			\
2423			 cpumask_pr_args(cpu_possible_mask));		\
2424		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
2425		BUG();							\
2426	}								\
2427} while (0)
2428
2429	/* sanity checks */
2430	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
2431#ifdef CONFIG_SMP
2432	PCPU_SETUP_BUG_ON(!ai->static_size);
2433	PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
2434#endif
2435	PCPU_SETUP_BUG_ON(!base_addr);
2436	PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
2437	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
2438	PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
2439	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
2440	PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
2441	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
2442	PCPU_SETUP_BUG_ON(!ai->dyn_size);
2443	PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
2444	PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
2445			    IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
2446	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
2447
2448	/* process group information and build config tables accordingly */
2449	alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
2450	group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2451	if (!group_offsets)
2452		panic("%s: Failed to allocate %zu bytes\n", __func__,
2453		      alloc_size);
2454
2455	alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
2456	group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2457	if (!group_sizes)
2458		panic("%s: Failed to allocate %zu bytes\n", __func__,
2459		      alloc_size);
2460
2461	alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
2462	unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2463	if (!unit_map)
2464		panic("%s: Failed to allocate %zu bytes\n", __func__,
2465		      alloc_size);
2466
2467	alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
2468	unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2469	if (!unit_off)
2470		panic("%s: Failed to allocate %zu bytes\n", __func__,
2471		      alloc_size);
2472
2473	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
2474		unit_map[cpu] = UINT_MAX;
2475
2476	pcpu_low_unit_cpu = NR_CPUS;
2477	pcpu_high_unit_cpu = NR_CPUS;
2478
2479	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2480		const struct pcpu_group_info *gi = &ai->groups[group];
2481
2482		group_offsets[group] = gi->base_offset;
2483		group_sizes[group] = gi->nr_units * ai->unit_size;
2484
2485		for (i = 0; i < gi->nr_units; i++) {
2486			cpu = gi->cpu_map[i];
2487			if (cpu == NR_CPUS)
2488				continue;
2489
2490			PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
2491			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
2492			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
2493
2494			unit_map[cpu] = unit + i;
2495			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2496
2497			/* determine low/high unit_cpu */
2498			if (pcpu_low_unit_cpu == NR_CPUS ||
2499			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
2500				pcpu_low_unit_cpu = cpu;
2501			if (pcpu_high_unit_cpu == NR_CPUS ||
2502			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
2503				pcpu_high_unit_cpu = cpu;
2504		}
2505	}
2506	pcpu_nr_units = unit;
2507
2508	for_each_possible_cpu(cpu)
2509		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
2510
2511	/* we're done parsing the input, undefine BUG macro and dump config */
2512#undef PCPU_SETUP_BUG_ON
2513	pcpu_dump_alloc_info(KERN_DEBUG, ai);
2514
2515	pcpu_nr_groups = ai->nr_groups;
2516	pcpu_group_offsets = group_offsets;
2517	pcpu_group_sizes = group_sizes;
2518	pcpu_unit_map = unit_map;
2519	pcpu_unit_offsets = unit_off;
2520
2521	/* determine basic parameters */
2522	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
2523	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
2524	pcpu_atom_size = ai->atom_size;
2525	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
2526		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
2527
2528	pcpu_stats_save_ai(ai);
2529
2530	/*
2531	 * Allocate chunk slots.  The additional last slot is for
2532	 * empty chunks.
2533	 */
2534	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
2535	pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots *
2536					  sizeof(pcpu_chunk_lists[0]) *
2537					  PCPU_NR_CHUNK_TYPES,
2538					  SMP_CACHE_BYTES);
2539	if (!pcpu_chunk_lists)
2540		panic("%s: Failed to allocate %zu bytes\n", __func__,
2541		      pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]) *
2542		      PCPU_NR_CHUNK_TYPES);
2543
2544	for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
2545		for (i = 0; i < pcpu_nr_slots; i++)
2546			INIT_LIST_HEAD(&pcpu_chunk_list(type)[i]);
 
 
 
 
 
 
 
 
 
2547
2548	/*
2549	 * The end of the static region needs to be aligned with the
2550	 * minimum allocation size as this offsets the reserved and
2551	 * dynamic region.  The first chunk ends page aligned by
2552	 * expanding the dynamic region, therefore the dynamic region
2553	 * can be shrunk to compensate while still staying above the
2554	 * configured sizes.
2555	 */
2556	static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2557	dyn_size = ai->dyn_size - (static_size - ai->static_size);
2558
2559	/*
2560	 * Initialize first chunk.
2561	 * If the reserved_size is non-zero, this initializes the reserved
2562	 * chunk.  If the reserved_size is zero, the reserved chunk is NULL
2563	 * and the dynamic region is initialized here.  The first chunk,
2564	 * pcpu_first_chunk, will always point to the chunk that serves
2565	 * the dynamic region.
2566	 */
2567	tmp_addr = (unsigned long)base_addr + static_size;
2568	map_size = ai->reserved_size ?: dyn_size;
2569	chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2570
2571	/* init dynamic chunk if necessary */
2572	if (ai->reserved_size) {
2573		pcpu_reserved_chunk = chunk;
2574
2575		tmp_addr = (unsigned long)base_addr + static_size +
2576			   ai->reserved_size;
2577		map_size = dyn_size;
2578		chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
 
 
 
 
 
 
 
 
 
2579	}
2580
2581	/* link the first chunk in */
2582	pcpu_first_chunk = chunk;
2583	pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
 
2584	pcpu_chunk_relocate(pcpu_first_chunk, -1);
2585
2586	/* include all regions of the first chunk */
2587	pcpu_nr_populated += PFN_DOWN(size_sum);
2588
2589	pcpu_stats_chunk_alloc();
2590	trace_percpu_create_chunk(base_addr);
2591
2592	/* we're done */
2593	pcpu_base_addr = base_addr;
 
2594}
2595
2596#ifdef CONFIG_SMP
2597
2598const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
2599	[PCPU_FC_AUTO]	= "auto",
2600	[PCPU_FC_EMBED]	= "embed",
2601	[PCPU_FC_PAGE]	= "page",
2602};
2603
2604enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
2605
2606static int __init percpu_alloc_setup(char *str)
2607{
2608	if (!str)
2609		return -EINVAL;
2610
2611	if (0)
2612		/* nada */;
2613#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2614	else if (!strcmp(str, "embed"))
2615		pcpu_chosen_fc = PCPU_FC_EMBED;
2616#endif
2617#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2618	else if (!strcmp(str, "page"))
2619		pcpu_chosen_fc = PCPU_FC_PAGE;
2620#endif
2621	else
2622		pr_warn("unknown allocator %s specified\n", str);
2623
2624	return 0;
2625}
2626early_param("percpu_alloc", percpu_alloc_setup);
2627
2628/*
2629 * pcpu_embed_first_chunk() is used by the generic percpu setup.
2630 * Build it if needed by the arch config or the generic setup is going
2631 * to be used.
2632 */
2633#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
2634	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
2635#define BUILD_EMBED_FIRST_CHUNK
2636#endif
2637
2638/* build pcpu_page_first_chunk() iff needed by the arch config */
2639#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
2640#define BUILD_PAGE_FIRST_CHUNK
2641#endif
2642
2643/* pcpu_build_alloc_info() is used by both embed and page first chunk */
2644#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
2645/**
2646 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2647 * @reserved_size: the size of reserved percpu area in bytes
2648 * @dyn_size: minimum free size for dynamic allocation in bytes
2649 * @atom_size: allocation atom size
2650 * @cpu_distance_fn: callback to determine distance between cpus, optional
2651 *
2652 * This function determines grouping of units, their mappings to cpus
2653 * and other parameters considering needed percpu size, allocation
2654 * atom size and distances between CPUs.
2655 *
2656 * Groups are always multiples of atom size and CPUs which are of
2657 * LOCAL_DISTANCE both ways are grouped together and share space for
2658 * units in the same group.  The returned configuration is guaranteed
2659 * to have CPUs on different nodes on different groups and >=75% usage
2660 * of allocated virtual address space.
2661 *
2662 * RETURNS:
2663 * On success, pointer to the new allocation_info is returned.  On
2664 * failure, ERR_PTR value is returned.
2665 */
2666static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
2667				size_t reserved_size, size_t dyn_size,
2668				size_t atom_size,
2669				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
2670{
2671	static int group_map[NR_CPUS] __initdata;
2672	static int group_cnt[NR_CPUS] __initdata;
2673	const size_t static_size = __per_cpu_end - __per_cpu_start;
2674	int nr_groups = 1, nr_units = 0;
2675	size_t size_sum, min_unit_size, alloc_size;
2676	int upa, max_upa, best_upa;	/* units_per_alloc */
2677	int last_allocs, group, unit;
2678	unsigned int cpu, tcpu;
2679	struct pcpu_alloc_info *ai;
2680	unsigned int *cpu_map;
2681
2682	/* this function may be called multiple times */
2683	memset(group_map, 0, sizeof(group_map));
2684	memset(group_cnt, 0, sizeof(group_cnt));
2685
2686	/* calculate size_sum and ensure dyn_size is enough for early alloc */
2687	size_sum = PFN_ALIGN(static_size + reserved_size +
2688			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
2689	dyn_size = size_sum - static_size - reserved_size;
2690
2691	/*
2692	 * Determine min_unit_size, alloc_size and max_upa such that
2693	 * alloc_size is multiple of atom_size and is the smallest
2694	 * which can accommodate 4k aligned segments which are equal to
2695	 * or larger than min_unit_size.
2696	 */
2697	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
2698
2699	/* determine the maximum # of units that can fit in an allocation */
2700	alloc_size = roundup(min_unit_size, atom_size);
2701	upa = alloc_size / min_unit_size;
2702	while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2703		upa--;
2704	max_upa = upa;
2705
2706	/* group cpus according to their proximity */
2707	for_each_possible_cpu(cpu) {
2708		group = 0;
2709	next_group:
2710		for_each_possible_cpu(tcpu) {
2711			if (cpu == tcpu)
2712				break;
2713			if (group_map[tcpu] == group && cpu_distance_fn &&
2714			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
2715			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
2716				group++;
2717				nr_groups = max(nr_groups, group + 1);
2718				goto next_group;
2719			}
2720		}
2721		group_map[cpu] = group;
2722		group_cnt[group]++;
2723	}
2724
2725	/*
2726	 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
2727	 * Expand the unit_size until we use >= 75% of the units allocated.
2728	 * Related to atom_size, which could be much larger than the unit_size.
2729	 */
2730	last_allocs = INT_MAX;
2731	for (upa = max_upa; upa; upa--) {
2732		int allocs = 0, wasted = 0;
2733
2734		if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2735			continue;
2736
2737		for (group = 0; group < nr_groups; group++) {
2738			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
2739			allocs += this_allocs;
2740			wasted += this_allocs * upa - group_cnt[group];
2741		}
2742
2743		/*
2744		 * Don't accept if wastage is over 1/3.  The
2745		 * greater-than comparison ensures upa==1 always
2746		 * passes the following check.
2747		 */
2748		if (wasted > num_possible_cpus() / 3)
2749			continue;
2750
2751		/* and then don't consume more memory */
2752		if (allocs > last_allocs)
2753			break;
2754		last_allocs = allocs;
2755		best_upa = upa;
2756	}
2757	upa = best_upa;
2758
2759	/* allocate and fill alloc_info */
2760	for (group = 0; group < nr_groups; group++)
2761		nr_units += roundup(group_cnt[group], upa);
2762
2763	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2764	if (!ai)
2765		return ERR_PTR(-ENOMEM);
2766	cpu_map = ai->groups[0].cpu_map;
2767
2768	for (group = 0; group < nr_groups; group++) {
2769		ai->groups[group].cpu_map = cpu_map;
2770		cpu_map += roundup(group_cnt[group], upa);
2771	}
2772
2773	ai->static_size = static_size;
2774	ai->reserved_size = reserved_size;
2775	ai->dyn_size = dyn_size;
2776	ai->unit_size = alloc_size / upa;
2777	ai->atom_size = atom_size;
2778	ai->alloc_size = alloc_size;
2779
2780	for (group = 0, unit = 0; group < nr_groups; group++) {
2781		struct pcpu_group_info *gi = &ai->groups[group];
2782
2783		/*
2784		 * Initialize base_offset as if all groups are located
2785		 * back-to-back.  The caller should update this to
2786		 * reflect actual allocation.
2787		 */
2788		gi->base_offset = unit * ai->unit_size;
2789
2790		for_each_possible_cpu(cpu)
2791			if (group_map[cpu] == group)
2792				gi->cpu_map[gi->nr_units++] = cpu;
2793		gi->nr_units = roundup(gi->nr_units, upa);
2794		unit += gi->nr_units;
2795	}
2796	BUG_ON(unit != nr_units);
2797
2798	return ai;
2799}
2800#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
2801
2802#if defined(BUILD_EMBED_FIRST_CHUNK)
2803/**
2804 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
2805 * @reserved_size: the size of reserved percpu area in bytes
2806 * @dyn_size: minimum free size for dynamic allocation in bytes
2807 * @atom_size: allocation atom size
2808 * @cpu_distance_fn: callback to determine distance between cpus, optional
2809 * @alloc_fn: function to allocate percpu page
2810 * @free_fn: function to free percpu page
2811 *
2812 * This is a helper to ease setting up embedded first percpu chunk and
2813 * can be called where pcpu_setup_first_chunk() is expected.
2814 *
2815 * If this function is used to setup the first chunk, it is allocated
2816 * by calling @alloc_fn and used as-is without being mapped into
2817 * vmalloc area.  Allocations are always whole multiples of @atom_size
2818 * aligned to @atom_size.
2819 *
2820 * This enables the first chunk to piggy back on the linear physical
2821 * mapping which often uses larger page size.  Please note that this
2822 * can result in very sparse cpu->unit mapping on NUMA machines thus
2823 * requiring large vmalloc address space.  Don't use this allocator if
2824 * vmalloc space is not orders of magnitude larger than distances
2825 * between node memory addresses (ie. 32bit NUMA machines).
2826 *
2827 * @dyn_size specifies the minimum dynamic area size.
2828 *
2829 * If the needed size is smaller than the minimum or specified unit
2830 * size, the leftover is returned using @free_fn.
2831 *
2832 * RETURNS:
2833 * 0 on success, -errno on failure.
2834 */
2835int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
2836				  size_t atom_size,
2837				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
2838				  pcpu_fc_alloc_fn_t alloc_fn,
2839				  pcpu_fc_free_fn_t free_fn)
2840{
2841	void *base = (void *)ULONG_MAX;
2842	void **areas = NULL;
2843	struct pcpu_alloc_info *ai;
2844	size_t size_sum, areas_size;
2845	unsigned long max_distance;
2846	int group, i, highest_group, rc = 0;
2847
2848	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
2849				   cpu_distance_fn);
2850	if (IS_ERR(ai))
2851		return PTR_ERR(ai);
2852
2853	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2854	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
2855
2856	areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
2857	if (!areas) {
2858		rc = -ENOMEM;
2859		goto out_free;
2860	}
2861
2862	/* allocate, copy and determine base address & max_distance */
2863	highest_group = 0;
2864	for (group = 0; group < ai->nr_groups; group++) {
2865		struct pcpu_group_info *gi = &ai->groups[group];
2866		unsigned int cpu = NR_CPUS;
2867		void *ptr;
2868
2869		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
2870			cpu = gi->cpu_map[i];
2871		BUG_ON(cpu == NR_CPUS);
2872
2873		/* allocate space for the whole group */
2874		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
2875		if (!ptr) {
2876			rc = -ENOMEM;
2877			goto out_free_areas;
2878		}
2879		/* kmemleak tracks the percpu allocations separately */
2880		kmemleak_free(ptr);
2881		areas[group] = ptr;
2882
2883		base = min(ptr, base);
2884		if (ptr > areas[highest_group])
2885			highest_group = group;
2886	}
2887	max_distance = areas[highest_group] - base;
2888	max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
2889
2890	/* warn if maximum distance is further than 75% of vmalloc space */
2891	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
2892		pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
2893				max_distance, VMALLOC_TOTAL);
2894#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2895		/* and fail if we have fallback */
2896		rc = -EINVAL;
2897		goto out_free_areas;
2898#endif
2899	}
2900
2901	/*
2902	 * Copy data and free unused parts.  This should happen after all
2903	 * allocations are complete; otherwise, we may end up with
2904	 * overlapping groups.
2905	 */
2906	for (group = 0; group < ai->nr_groups; group++) {
2907		struct pcpu_group_info *gi = &ai->groups[group];
2908		void *ptr = areas[group];
2909
2910		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
2911			if (gi->cpu_map[i] == NR_CPUS) {
2912				/* unused unit, free whole */
2913				free_fn(ptr, ai->unit_size);
2914				continue;
2915			}
2916			/* copy and return the unused part */
2917			memcpy(ptr, __per_cpu_load, ai->static_size);
2918			free_fn(ptr + size_sum, ai->unit_size - size_sum);
2919		}
2920	}
2921
2922	/* base address is now known, determine group base offsets */
2923	for (group = 0; group < ai->nr_groups; group++) {
2924		ai->groups[group].base_offset = areas[group] - base;
2925	}
2926
2927	pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
2928		PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
2929		ai->dyn_size, ai->unit_size);
2930
2931	pcpu_setup_first_chunk(ai, base);
2932	goto out_free;
2933
2934out_free_areas:
2935	for (group = 0; group < ai->nr_groups; group++)
2936		if (areas[group])
2937			free_fn(areas[group],
2938				ai->groups[group].nr_units * ai->unit_size);
2939out_free:
2940	pcpu_free_alloc_info(ai);
2941	if (areas)
2942		memblock_free_early(__pa(areas), areas_size);
2943	return rc;
2944}
2945#endif /* BUILD_EMBED_FIRST_CHUNK */
2946
2947#ifdef BUILD_PAGE_FIRST_CHUNK
2948/**
2949 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2950 * @reserved_size: the size of reserved percpu area in bytes
2951 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
2952 * @free_fn: function to free percpu page, always called with PAGE_SIZE
2953 * @populate_pte_fn: function to populate pte
2954 *
2955 * This is a helper to ease setting up page-remapped first percpu
2956 * chunk and can be called where pcpu_setup_first_chunk() is expected.
2957 *
2958 * This is the basic allocator.  Static percpu area is allocated
2959 * page-by-page into vmalloc area.
2960 *
2961 * RETURNS:
2962 * 0 on success, -errno on failure.
2963 */
2964int __init pcpu_page_first_chunk(size_t reserved_size,
2965				 pcpu_fc_alloc_fn_t alloc_fn,
2966				 pcpu_fc_free_fn_t free_fn,
2967				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
2968{
2969	static struct vm_struct vm;
2970	struct pcpu_alloc_info *ai;
2971	char psize_str[16];
2972	int unit_pages;
2973	size_t pages_size;
2974	struct page **pages;
2975	int unit, i, j, rc = 0;
2976	int upa;
2977	int nr_g0_units;
2978
2979	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
2980
2981	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2982	if (IS_ERR(ai))
2983		return PTR_ERR(ai);
2984	BUG_ON(ai->nr_groups != 1);
2985	upa = ai->alloc_size/ai->unit_size;
2986	nr_g0_units = roundup(num_possible_cpus(), upa);
2987	if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
2988		pcpu_free_alloc_info(ai);
2989		return -EINVAL;
2990	}
2991
2992	unit_pages = ai->unit_size >> PAGE_SHIFT;
2993
2994	/* unaligned allocations can't be freed, round up to page size */
2995	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2996			       sizeof(pages[0]));
2997	pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
2998	if (!pages)
2999		panic("%s: Failed to allocate %zu bytes\n", __func__,
3000		      pages_size);
3001
3002	/* allocate pages */
3003	j = 0;
3004	for (unit = 0; unit < num_possible_cpus(); unit++) {
3005		unsigned int cpu = ai->groups[0].cpu_map[unit];
3006		for (i = 0; i < unit_pages; i++) {
3007			void *ptr;
3008
3009			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
3010			if (!ptr) {
3011				pr_warn("failed to allocate %s page for cpu%u\n",
3012						psize_str, cpu);
3013				goto enomem;
3014			}
3015			/* kmemleak tracks the percpu allocations separately */
3016			kmemleak_free(ptr);
3017			pages[j++] = virt_to_page(ptr);
3018		}
3019	}
3020
3021	/* allocate vm area, map the pages and copy static data */
3022	vm.flags = VM_ALLOC;
3023	vm.size = num_possible_cpus() * ai->unit_size;
3024	vm_area_register_early(&vm, PAGE_SIZE);
3025
3026	for (unit = 0; unit < num_possible_cpus(); unit++) {
3027		unsigned long unit_addr =
3028			(unsigned long)vm.addr + unit * ai->unit_size;
3029
3030		for (i = 0; i < unit_pages; i++)
3031			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
3032
3033		/* pte already populated, the following shouldn't fail */
3034		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
3035				      unit_pages);
3036		if (rc < 0)
3037			panic("failed to map percpu area, err=%d\n", rc);
3038
3039		/*
3040		 * FIXME: Archs with virtual cache should flush local
3041		 * cache for the linear mapping here - something
3042		 * equivalent to flush_cache_vmap() on the local cpu.
3043		 * flush_cache_vmap() can't be used as most supporting
3044		 * data structures are not set up yet.
3045		 */
3046
3047		/* copy static data */
3048		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
3049	}
3050
3051	/* we're ready, commit */
3052	pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
3053		unit_pages, psize_str, ai->static_size,
3054		ai->reserved_size, ai->dyn_size);
3055
3056	pcpu_setup_first_chunk(ai, vm.addr);
3057	goto out_free_ar;
3058
3059enomem:
3060	while (--j >= 0)
3061		free_fn(page_address(pages[j]), PAGE_SIZE);
3062	rc = -ENOMEM;
3063out_free_ar:
3064	memblock_free_early(__pa(pages), pages_size);
3065	pcpu_free_alloc_info(ai);
3066	return rc;
3067}
3068#endif /* BUILD_PAGE_FIRST_CHUNK */
3069
3070#ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
3071/*
3072 * Generic SMP percpu area setup.
3073 *
3074 * The embedding helper is used because its behavior closely resembles
3075 * the original non-dynamic generic percpu area setup.  This is
3076 * important because many archs have addressing restrictions and might
3077 * fail if the percpu area is located far away from the previous
3078 * location.  As an added bonus, in non-NUMA cases, embedding is
3079 * generally a good idea TLB-wise because percpu area can piggy back
3080 * on the physical linear memory mapping which uses large page
3081 * mappings on applicable archs.
3082 */
3083unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
3084EXPORT_SYMBOL(__per_cpu_offset);
3085
3086static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
3087				       size_t align)
3088{
3089	return  memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS));
 
3090}
3091
3092static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
3093{
3094	memblock_free_early(__pa(ptr), size);
3095}
3096
3097void __init setup_per_cpu_areas(void)
3098{
3099	unsigned long delta;
3100	unsigned int cpu;
3101	int rc;
3102
3103	/*
3104	 * Always reserve area for module percpu variables.  That's
3105	 * what the legacy allocator did.
3106	 */
3107	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
3108				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
3109				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
3110	if (rc < 0)
3111		panic("Failed to initialize percpu areas.");
3112
3113	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
3114	for_each_possible_cpu(cpu)
3115		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
3116}
3117#endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */
3118
3119#else	/* CONFIG_SMP */
3120
3121/*
3122 * UP percpu area setup.
3123 *
3124 * UP always uses km-based percpu allocator with identity mapping.
3125 * Static percpu variables are indistinguishable from the usual static
3126 * variables and don't require any special preparation.
3127 */
3128void __init setup_per_cpu_areas(void)
3129{
3130	const size_t unit_size =
3131		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
3132					 PERCPU_DYNAMIC_RESERVE));
3133	struct pcpu_alloc_info *ai;
3134	void *fc;
3135
3136	ai = pcpu_alloc_alloc_info(1, 1);
3137	fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
 
 
3138	if (!ai || !fc)
3139		panic("Failed to allocate memory for percpu areas.");
3140	/* kmemleak tracks the percpu allocations separately */
3141	kmemleak_free(fc);
3142
3143	ai->dyn_size = unit_size;
3144	ai->unit_size = unit_size;
3145	ai->atom_size = unit_size;
3146	ai->alloc_size = unit_size;
3147	ai->groups[0].nr_units = 1;
3148	ai->groups[0].cpu_map[0] = 0;
3149
3150	pcpu_setup_first_chunk(ai, fc);
3151	pcpu_free_alloc_info(ai);
3152}
3153
3154#endif	/* CONFIG_SMP */
3155
3156/*
3157 * pcpu_nr_pages - calculate total number of populated backing pages
3158 *
3159 * This reflects the number of pages populated to back chunks.  Metadata is
3160 * excluded in the number exposed in meminfo as the number of backing pages
3161 * scales with the number of cpus and can quickly outweigh the memory used for
3162 * metadata.  It also keeps this calculation nice and simple.
3163 *
3164 * RETURNS:
3165 * Total number of populated backing pages in use by the allocator.
3166 */
3167unsigned long pcpu_nr_pages(void)
3168{
3169	return pcpu_nr_populated * pcpu_nr_units;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3170}
3171
3172/*
3173 * Percpu allocator is initialized early during boot when neither slab or
3174 * workqueue is available.  Plug async management until everything is up
3175 * and running.
3176 */
3177static int __init percpu_enable_async(void)
3178{
3179	pcpu_async_enabled = true;
3180	return 0;
3181}
3182subsys_initcall(percpu_enable_async);
v4.10.11
 
   1/*
   2 * mm/percpu.c - percpu memory allocator
   3 *
   4 * Copyright (C) 2009		SUSE Linux Products GmbH
   5 * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
   6 *
   7 * This file is released under the GPLv2.
 
   8 *
   9 * This is percpu allocator which can handle both static and dynamic
  10 * areas.  Percpu areas are allocated in chunks.  Each chunk is
  11 * consisted of boot-time determined number of units and the first
  12 * chunk is used for static percpu variables in the kernel image
  13 * (special boot time alloc/init handling necessary as these areas
  14 * need to be brought up before allocation services are running).
  15 * Unit grows as necessary and all units grow or shrink in unison.
  16 * When a chunk is filled up, another chunk is allocated.
  17 *
  18 *  c0                           c1                         c2
  19 *  -------------------          -------------------        ------------
  20 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
  21 *  -------------------  ......  -------------------  ....  ------------
  22 *
  23 * Allocation is done in offset-size areas of single unit space.  Ie,
  24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
  25 * c1:u1, c1:u2 and c1:u3.  On UMA, units corresponds directly to
  26 * cpus.  On NUMA, the mapping can be non-linear and even sparse.
  27 * Percpu access can be done by configuring percpu base registers
  28 * according to cpu to unit mapping and pcpu_unit_size.
  29 *
  30 * There are usually many small percpu allocations many of them being
  31 * as small as 4 bytes.  The allocator organizes chunks into lists
  32 * according to free size and tries to allocate from the fullest one.
  33 * Each chunk keeps the maximum contiguous area size hint which is
  34 * guaranteed to be equal to or larger than the maximum contiguous
  35 * area in the chunk.  This helps the allocator not to iterate the
  36 * chunk maps unnecessarily.
  37 *
  38 * Allocation state in each chunk is kept using an array of integers
  39 * on chunk->map.  A positive value in the map represents a free
  40 * region and negative allocated.  Allocation inside a chunk is done
  41 * by scanning this map sequentially and serving the first matching
  42 * entry.  This is mostly copied from the percpu_modalloc() allocator.
  43 * Chunks can be determined from the address using the index field
  44 * in the page struct. The index field contains a pointer to the chunk.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  45 *
  46 * To use this allocator, arch code should do the followings.
  47 *
  48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
  49 *   regular address to percpu pointer and back if they need to be
  50 *   different from the default
  51 *
  52 * - use pcpu_setup_first_chunk() during percpu area initialization to
  53 *   setup the first chunk containing the kernel static percpu area
  54 */
  55
  56#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  57
  58#include <linux/bitmap.h>
  59#include <linux/bootmem.h>
  60#include <linux/err.h>
 
  61#include <linux/list.h>
  62#include <linux/log2.h>
  63#include <linux/mm.h>
  64#include <linux/module.h>
  65#include <linux/mutex.h>
  66#include <linux/percpu.h>
  67#include <linux/pfn.h>
  68#include <linux/slab.h>
  69#include <linux/spinlock.h>
  70#include <linux/vmalloc.h>
  71#include <linux/workqueue.h>
  72#include <linux/kmemleak.h>
 
 
 
  73
  74#include <asm/cacheflush.h>
  75#include <asm/sections.h>
  76#include <asm/tlbflush.h>
  77#include <asm/io.h>
  78
  79#define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
  80#define PCPU_DFL_MAP_ALLOC		16	/* start a map with 16 ents */
  81#define PCPU_ATOMIC_MAP_MARGIN_LOW	32
  82#define PCPU_ATOMIC_MAP_MARGIN_HIGH	64
 
 
 
 
 
 
  83#define PCPU_EMPTY_POP_PAGES_LOW	2
  84#define PCPU_EMPTY_POP_PAGES_HIGH	4
  85
  86#ifdef CONFIG_SMP
  87/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
  88#ifndef __addr_to_pcpu_ptr
  89#define __addr_to_pcpu_ptr(addr)					\
  90	(void __percpu *)((unsigned long)(addr) -			\
  91			  (unsigned long)pcpu_base_addr	+		\
  92			  (unsigned long)__per_cpu_start)
  93#endif
  94#ifndef __pcpu_ptr_to_addr
  95#define __pcpu_ptr_to_addr(ptr)						\
  96	(void __force *)((unsigned long)(ptr) +				\
  97			 (unsigned long)pcpu_base_addr -		\
  98			 (unsigned long)__per_cpu_start)
  99#endif
 100#else	/* CONFIG_SMP */
 101/* on UP, it's always identity mapped */
 102#define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
 103#define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
 104#endif	/* CONFIG_SMP */
 105
 106struct pcpu_chunk {
 107	struct list_head	list;		/* linked to pcpu_slot lists */
 108	int			free_size;	/* free bytes in the chunk */
 109	int			contig_hint;	/* max contiguous size hint */
 110	void			*base_addr;	/* base address of this chunk */
 111
 112	int			map_used;	/* # of map entries used before the sentry */
 113	int			map_alloc;	/* # of map entries allocated */
 114	int			*map;		/* allocation map */
 115	struct list_head	map_extend_list;/* on pcpu_map_extend_chunks */
 116
 117	void			*data;		/* chunk data */
 118	int			first_free;	/* no free below this */
 119	bool			immutable;	/* no [de]population allowed */
 120	int			nr_populated;	/* # of populated pages */
 121	unsigned long		populated[];	/* populated bitmap */
 122};
 123
 124static int pcpu_unit_pages __read_mostly;
 125static int pcpu_unit_size __read_mostly;
 126static int pcpu_nr_units __read_mostly;
 127static int pcpu_atom_size __read_mostly;
 128static int pcpu_nr_slots __read_mostly;
 129static size_t pcpu_chunk_struct_size __read_mostly;
 130
 131/* cpus with the lowest and highest unit addresses */
 132static unsigned int pcpu_low_unit_cpu __read_mostly;
 133static unsigned int pcpu_high_unit_cpu __read_mostly;
 134
 135/* the address of the first chunk which starts with the kernel static area */
 136void *pcpu_base_addr __read_mostly;
 137EXPORT_SYMBOL_GPL(pcpu_base_addr);
 138
 139static const int *pcpu_unit_map __read_mostly;		/* cpu -> unit */
 140const unsigned long *pcpu_unit_offsets __read_mostly;	/* cpu -> unit offset */
 141
 142/* group information, used for vm allocation */
 143static int pcpu_nr_groups __read_mostly;
 144static const unsigned long *pcpu_group_offsets __read_mostly;
 145static const size_t *pcpu_group_sizes __read_mostly;
 146
 147/*
 148 * The first chunk which always exists.  Note that unlike other
 149 * chunks, this one can be allocated and mapped in several different
 150 * ways and thus often doesn't live in the vmalloc area.
 151 */
 152static struct pcpu_chunk *pcpu_first_chunk;
 153
 154/*
 155 * Optional reserved chunk.  This chunk reserves part of the first
 156 * chunk and serves it for reserved allocations.  The amount of
 157 * reserved offset is in pcpu_reserved_chunk_limit.  When reserved
 158 * area doesn't exist, the following variables contain NULL and 0
 159 * respectively.
 160 */
 161static struct pcpu_chunk *pcpu_reserved_chunk;
 162static int pcpu_reserved_chunk_limit;
 163
 164static DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
 165static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
 166
 167static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
 168
 169/* chunks which need their map areas extended, protected by pcpu_lock */
 170static LIST_HEAD(pcpu_map_extend_chunks);
 171
 172/*
 173 * The number of empty populated pages, protected by pcpu_lock.  The
 174 * reserved chunk doesn't contribute to the count.
 175 */
 176static int pcpu_nr_empty_pop_pages;
 
 
 
 
 
 
 
 
 177
 178/*
 179 * Balance work is used to populate or destroy chunks asynchronously.  We
 180 * try to keep the number of populated free pages between
 181 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
 182 * empty chunk.
 183 */
 184static void pcpu_balance_workfn(struct work_struct *work);
 185static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
 186static bool pcpu_async_enabled __read_mostly;
 187static bool pcpu_atomic_alloc_failed;
 188
 189static void pcpu_schedule_balance_work(void)
 190{
 191	if (pcpu_async_enabled)
 192		schedule_work(&pcpu_balance_work);
 193}
 194
 195static bool pcpu_addr_in_first_chunk(void *addr)
 
 
 
 
 
 
 
 
 196{
 197	void *first_start = pcpu_first_chunk->base_addr;
 198
 199	return addr >= first_start && addr < first_start + pcpu_unit_size;
 200}
 201
 202static bool pcpu_addr_in_reserved_chunk(void *addr)
 203{
 204	void *first_start = pcpu_first_chunk->base_addr;
 205
 206	return addr >= first_start &&
 207		addr < first_start + pcpu_reserved_chunk_limit;
 208}
 209
 210static int __pcpu_size_to_slot(int size)
 211{
 212	int highbit = fls(size);	/* size is in bytes */
 213	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
 214}
 215
 216static int pcpu_size_to_slot(int size)
 217{
 218	if (size == pcpu_unit_size)
 219		return pcpu_nr_slots - 1;
 220	return __pcpu_size_to_slot(size);
 221}
 222
 223static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
 224{
 225	if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
 
 
 
 226		return 0;
 227
 228	return pcpu_size_to_slot(chunk->free_size);
 229}
 230
 231/* set the pointer to a chunk in a page struct */
 232static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
 233{
 234	page->index = (unsigned long)pcpu;
 235}
 236
 237/* obtain pointer to a chunk from a page struct */
 238static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
 239{
 240	return (struct pcpu_chunk *)page->index;
 241}
 242
 243static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
 244{
 245	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
 246}
 247
 
 
 
 
 
 248static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
 249				     unsigned int cpu, int page_idx)
 250{
 251	return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
 252		(page_idx << PAGE_SHIFT);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 253}
 254
 255static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
 256					   int *rs, int *re, int end)
 257{
 258	*rs = find_next_zero_bit(chunk->populated, end, *rs);
 259	*re = find_next_bit(chunk->populated, end, *rs + 1);
 260}
 261
 262static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
 263					 int *rs, int *re, int end)
 264{
 265	*rs = find_next_bit(chunk->populated, end, *rs);
 266	*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 267}
 268
 269/*
 270 * (Un)populated page region iterators.  Iterate over (un)populated
 271 * page regions between @start and @end in @chunk.  @rs and @re should
 272 * be integer variables and will be set to start and end page index of
 273 * the current region.
 274 */
 275#define pcpu_for_each_unpop_region(chunk, rs, re, start, end)		    \
 276	for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
 277	     (rs) < (re);						    \
 278	     (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
 279
 280#define pcpu_for_each_pop_region(chunk, rs, re, start, end)		    \
 281	for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
 282	     (rs) < (re);						    \
 283	     (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
 
 
 
 
 284
 285/**
 286 * pcpu_mem_zalloc - allocate memory
 287 * @size: bytes to allocate
 
 288 *
 289 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
 290 * kzalloc() is used; otherwise, vzalloc() is used.  The returned
 291 * memory is always zeroed.
 292 *
 293 * CONTEXT:
 294 * Does GFP_KERNEL allocation.
 295 *
 296 * RETURNS:
 297 * Pointer to the allocated area on success, NULL on failure.
 298 */
 299static void *pcpu_mem_zalloc(size_t size)
 300{
 301	if (WARN_ON_ONCE(!slab_is_available()))
 302		return NULL;
 303
 304	if (size <= PAGE_SIZE)
 305		return kzalloc(size, GFP_KERNEL);
 306	else
 307		return vzalloc(size);
 308}
 309
 310/**
 311 * pcpu_mem_free - free memory
 312 * @ptr: memory to free
 313 *
 314 * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
 315 */
 316static void pcpu_mem_free(void *ptr)
 317{
 318	kvfree(ptr);
 319}
 320
 321/**
 322 * pcpu_count_occupied_pages - count the number of pages an area occupies
 323 * @chunk: chunk of interest
 324 * @i: index of the area in question
 325 *
 326 * Count the number of pages chunk's @i'th area occupies.  When the area's
 327 * start and/or end address isn't aligned to page boundary, the straddled
 328 * page is included in the count iff the rest of the page is free.
 329 */
 330static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i)
 331{
 332	int off = chunk->map[i] & ~1;
 333	int end = chunk->map[i + 1] & ~1;
 334
 335	if (!PAGE_ALIGNED(off) && i > 0) {
 336		int prev = chunk->map[i - 1];
 337
 338		if (!(prev & 1) && prev <= round_down(off, PAGE_SIZE))
 339			off = round_down(off, PAGE_SIZE);
 340	}
 
 341
 342	if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) {
 343		int next = chunk->map[i + 1];
 344		int nend = chunk->map[i + 2] & ~1;
 345
 346		if (!(next & 1) && nend >= round_up(end, PAGE_SIZE))
 347			end = round_up(end, PAGE_SIZE);
 348	}
 349
 350	return max_t(int, PFN_DOWN(end) - PFN_UP(off), 0);
 351}
 352
 353/**
 354 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
 355 * @chunk: chunk of interest
 356 * @oslot: the previous slot it was on
 357 *
 358 * This function is called after an allocation or free changed @chunk.
 359 * New slot according to the changed state is determined and @chunk is
 360 * moved to the slot.  Note that the reserved chunk is never put on
 361 * chunk slots.
 362 *
 363 * CONTEXT:
 364 * pcpu_lock.
 365 */
 366static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
 367{
 368	int nslot = pcpu_chunk_slot(chunk);
 369
 370	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
 371		if (oslot < nslot)
 372			list_move(&chunk->list, &pcpu_slot[nslot]);
 373		else
 374			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
 375	}
 376}
 377
 378/**
 379 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
 380 * @chunk: chunk of interest
 381 * @is_atomic: the allocation context
 382 *
 383 * Determine whether area map of @chunk needs to be extended.  If
 384 * @is_atomic, only the amount necessary for a new allocation is
 385 * considered; however, async extension is scheduled if the left amount is
 386 * low.  If !@is_atomic, it aims for more empty space.  Combined, this
 387 * ensures that the map is likely to have enough available space to
 388 * accomodate atomic allocations which can't extend maps directly.
 
 
 
 
 
 
 
 
 
 
 
 389 *
 390 * CONTEXT:
 391 * pcpu_lock.
 
 
 
 
 
 
 
 
 
 
 
 392 *
 393 * RETURNS:
 394 * New target map allocation length if extension is necessary, 0
 395 * otherwise.
 396 */
 397static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
 398{
 399	int margin, new_alloc;
 400
 401	lockdep_assert_held(&pcpu_lock);
 
 
 402
 403	if (is_atomic) {
 404		margin = 3;
 405
 406		if (chunk->map_alloc <
 407		    chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
 408			if (list_empty(&chunk->map_extend_list)) {
 409				list_add_tail(&chunk->map_extend_list,
 410					      &pcpu_map_extend_chunks);
 411				pcpu_schedule_balance_work();
 
 
 
 
 
 
 
 
 412			}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 413		}
 414	} else {
 415		margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
 
 
 
 
 
 
 
 
 
 
 
 416	}
 
 417
 418	if (chunk->map_alloc >= chunk->map_used + margin)
 419		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 420
 421	new_alloc = PCPU_DFL_MAP_ALLOC;
 422	while (new_alloc < chunk->map_used + margin)
 423		new_alloc *= 2;
 424
 425	return new_alloc;
 426}
 427
 428/**
 429 * pcpu_extend_area_map - extend area map of a chunk
 430 * @chunk: chunk of interest
 431 * @new_alloc: new target allocation length of the area map
 432 *
 433 * Extend area map of @chunk to have @new_alloc entries.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 434 *
 435 * CONTEXT:
 436 * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
 437 *
 438 * RETURNS:
 439 * 0 on success, -errno on failure.
 440 */
 441static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
 442{
 443	int *old = NULL, *new = NULL;
 444	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
 445	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 446
 447	lockdep_assert_held(&pcpu_alloc_mutex);
 
 
 
 
 
 
 
 
 
 448
 449	new = pcpu_mem_zalloc(new_size);
 450	if (!new)
 451		return -ENOMEM;
 452
 453	/* acquire pcpu_lock and switch to new area map */
 454	spin_lock_irqsave(&pcpu_lock, flags);
 
 
 
 
 
 
 455
 456	if (new_alloc <= chunk->map_alloc)
 457		goto out_unlock;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 458
 459	old_size = chunk->map_alloc * sizeof(chunk->map[0]);
 460	old = chunk->map;
 
 
 
 
 461
 462	memcpy(new, old, old_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 463
 464	chunk->map_alloc = new_alloc;
 465	chunk->map = new;
 466	new = NULL;
 
 
 
 
 
 
 467
 468out_unlock:
 469	spin_unlock_irqrestore(&pcpu_lock, flags);
 
 
 
 
 
 
 
 470
 471	/*
 472	 * pcpu_mem_free() might end up calling vfree() which uses
 473	 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
 
 474	 */
 475	pcpu_mem_free(old);
 476	pcpu_mem_free(new);
 477
 478	return 0;
 
 
 479}
 480
 481/**
 482 * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
 483 * @chunk: chunk the candidate area belongs to
 484 * @off: the offset to the start of the candidate area
 485 * @this_size: the size of the candidate area
 486 * @size: the size of the target allocation
 487 * @align: the alignment of the target allocation
 488 * @pop_only: only allocate from already populated region
 489 *
 490 * We're trying to allocate @size bytes aligned at @align.  @chunk's area
 491 * at @off sized @this_size is a candidate.  This function determines
 492 * whether the target allocation fits in the candidate area and returns the
 493 * number of bytes to pad after @off.  If the target area doesn't fit, -1
 494 * is returned.
 495 *
 496 * If @pop_only is %true, this function only considers the already
 497 * populated part of the candidate area.
 498 */
 499static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size,
 500			    int size, int align, bool pop_only)
 501{
 502	int cand_off = off;
 503
 504	while (true) {
 505		int head = ALIGN(cand_off, align) - off;
 506		int page_start, page_end, rs, re;
 507
 508		if (this_size < head + size)
 509			return -1;
 
 
 
 
 
 
 
 
 510
 511		if (!pop_only)
 512			return head;
 513
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 514		/*
 515		 * If the first unpopulated page is beyond the end of the
 516		 * allocation, the whole allocation is populated;
 517		 * otherwise, retry from the end of the unpopulated area.
 
 518		 */
 519		page_start = PFN_DOWN(head + off);
 520		page_end = PFN_UP(head + off + size);
 
 
 521
 522		rs = page_start;
 523		pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size));
 524		if (rs >= page_end)
 525			return head;
 526		cand_off = re * PAGE_SIZE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 527	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 528}
 529
 530/**
 531 * pcpu_alloc_area - allocate area from a pcpu_chunk
 532 * @chunk: chunk of interest
 533 * @size: wanted size in bytes
 534 * @align: wanted align
 535 * @pop_only: allocate only from the populated area
 536 * @occ_pages_p: out param for the number of pages the area occupies
 537 *
 538 * Try to allocate @size bytes area aligned at @align from @chunk.
 539 * Note that this function only allocates the offset.  It doesn't
 540 * populate or map the area.
 541 *
 542 * @chunk->map must have at least two free slots.
 543 *
 544 * CONTEXT:
 545 * pcpu_lock.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 546 *
 547 * RETURNS:
 548 * Allocated offset in @chunk on success, -1 if no matching area is
 549 * found.
 550 */
 551static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align,
 552			   bool pop_only, int *occ_pages_p)
 553{
 554	int oslot = pcpu_chunk_slot(chunk);
 555	int max_contig = 0;
 556	int i, off;
 557	bool seen_free = false;
 558	int *p;
 559
 560	for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) {
 561		int head, tail;
 562		int this_size;
 563
 564		off = *p;
 565		if (off & 1)
 566			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 567
 568		this_size = (p[1] & ~1) - off;
 
 569
 570		head = pcpu_fit_in_area(chunk, off, this_size, size, align,
 571					pop_only);
 572		if (head < 0) {
 573			if (!seen_free) {
 574				chunk->first_free = i;
 575				seen_free = true;
 576			}
 577			max_contig = max(this_size, max_contig);
 578			continue;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 579		}
 580
 581		/*
 582		 * If head is small or the previous block is free,
 583		 * merge'em.  Note that 'small' is defined as smaller
 584		 * than sizeof(int), which is very small but isn't too
 585		 * uncommon for percpu allocations.
 586		 */
 587		if (head && (head < sizeof(int) || !(p[-1] & 1))) {
 588			*p = off += head;
 589			if (p[-1] & 1)
 590				chunk->free_size -= head;
 591			else
 592				max_contig = max(*p - p[-1], max_contig);
 593			this_size -= head;
 594			head = 0;
 595		}
 596
 597		/* if tail is small, just keep it around */
 598		tail = this_size - head - size;
 599		if (tail < sizeof(int)) {
 600			tail = 0;
 601			size = this_size - head;
 602		}
 603
 604		/* split if warranted */
 605		if (head || tail) {
 606			int nr_extra = !!head + !!tail;
 607
 608			/* insert new subblocks */
 609			memmove(p + nr_extra + 1, p + 1,
 610				sizeof(chunk->map[0]) * (chunk->map_used - i));
 611			chunk->map_used += nr_extra;
 612
 613			if (head) {
 614				if (!seen_free) {
 615					chunk->first_free = i;
 616					seen_free = true;
 617				}
 618				*++p = off += head;
 619				++i;
 620				max_contig = max(head, max_contig);
 621			}
 622			if (tail) {
 623				p[1] = off + size;
 624				max_contig = max(tail, max_contig);
 625			}
 626		}
 627
 628		if (!seen_free)
 629			chunk->first_free = i + 1;
 630
 631		/* update hint and mark allocated */
 632		if (i + 1 == chunk->map_used)
 633			chunk->contig_hint = max_contig; /* fully scanned */
 634		else
 635			chunk->contig_hint = max(chunk->contig_hint,
 636						 max_contig);
 637
 638		chunk->free_size -= size;
 639		*p |= 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 640
 641		*occ_pages_p = pcpu_count_occupied_pages(chunk, i);
 642		pcpu_chunk_relocate(chunk, oslot);
 643		return off;
 644	}
 645
 646	chunk->contig_hint = max_contig;	/* fully scanned */
 647	pcpu_chunk_relocate(chunk, oslot);
 648
 649	/* tell the upper layer that this chunk has no matching area */
 650	return -1;
 651}
 652
 653/**
 654 * pcpu_free_area - free area to a pcpu_chunk
 655 * @chunk: chunk of interest
 656 * @freeme: offset of area to free
 657 * @occ_pages_p: out param for the number of pages the area occupies
 
 
 658 *
 659 * Free area starting from @freeme to @chunk.  Note that this function
 660 * only modifies the allocation map.  It doesn't depopulate or unmap
 661 * the area.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 662 *
 663 * CONTEXT:
 664 * pcpu_lock.
 665 */
 666static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme,
 667			   int *occ_pages_p)
 668{
 669	int oslot = pcpu_chunk_slot(chunk);
 670	int off = 0;
 671	unsigned i, j;
 672	int to_free = 0;
 673	int *p;
 674
 675	freeme |= 1;	/* we are searching for <given offset, in use> pair */
 676
 677	i = 0;
 678	j = chunk->map_used;
 679	while (i != j) {
 680		unsigned k = (i + j) / 2;
 681		off = chunk->map[k];
 682		if (off < freeme)
 683			i = k + 1;
 684		else if (off > freeme)
 685			j = k;
 686		else
 687			i = j = k;
 688	}
 689	BUG_ON(off != freeme);
 690
 691	if (i < chunk->first_free)
 692		chunk->first_free = i;
 
 
 
 
 
 693
 694	p = chunk->map + i;
 695	*p = off &= ~1;
 696	chunk->free_size += (p[1] & ~1) - off;
 697
 698	*occ_pages_p = pcpu_count_occupied_pages(chunk, i);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 699
 700	/* merge with next? */
 701	if (!(p[1] & 1))
 702		to_free++;
 703	/* merge with previous? */
 704	if (i > 0 && !(p[-1] & 1)) {
 705		to_free++;
 706		i--;
 707		p--;
 708	}
 709	if (to_free) {
 710		chunk->map_used -= to_free;
 711		memmove(p + 1, p + 1 + to_free,
 712			(chunk->map_used - i) * sizeof(chunk->map[0]));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 713	}
 714
 715	chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint);
 716	pcpu_chunk_relocate(chunk, oslot);
 717}
 718
 719static struct pcpu_chunk *pcpu_alloc_chunk(void)
 720{
 721	struct pcpu_chunk *chunk;
 
 722
 723	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
 724	if (!chunk)
 725		return NULL;
 726
 727	chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
 728						sizeof(chunk->map[0]));
 729	if (!chunk->map) {
 730		pcpu_mem_free(chunk);
 731		return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 732	}
 
 733
 734	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
 735	chunk->map[0] = 0;
 736	chunk->map[1] = pcpu_unit_size | 1;
 737	chunk->map_used = 1;
 738
 739	INIT_LIST_HEAD(&chunk->list);
 740	INIT_LIST_HEAD(&chunk->map_extend_list);
 741	chunk->free_size = pcpu_unit_size;
 742	chunk->contig_hint = pcpu_unit_size;
 743
 744	return chunk;
 
 
 
 
 
 
 
 
 
 
 
 
 
 745}
 746
 747static void pcpu_free_chunk(struct pcpu_chunk *chunk)
 748{
 749	if (!chunk)
 750		return;
 751	pcpu_mem_free(chunk->map);
 
 
 
 
 
 752	pcpu_mem_free(chunk);
 753}
 754
 755/**
 756 * pcpu_chunk_populated - post-population bookkeeping
 757 * @chunk: pcpu_chunk which got populated
 758 * @page_start: the start page
 759 * @page_end: the end page
 760 *
 761 * Pages in [@page_start,@page_end) have been populated to @chunk.  Update
 762 * the bookkeeping information accordingly.  Must be called after each
 763 * successful population.
 
 
 
 764 */
 765static void pcpu_chunk_populated(struct pcpu_chunk *chunk,
 766				 int page_start, int page_end)
 767{
 768	int nr = page_end - page_start;
 769
 770	lockdep_assert_held(&pcpu_lock);
 771
 772	bitmap_set(chunk->populated, page_start, nr);
 773	chunk->nr_populated += nr;
 774	pcpu_nr_empty_pop_pages += nr;
 
 
 775}
 776
 777/**
 778 * pcpu_chunk_depopulated - post-depopulation bookkeeping
 779 * @chunk: pcpu_chunk which got depopulated
 780 * @page_start: the start page
 781 * @page_end: the end page
 782 *
 783 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
 784 * Update the bookkeeping information accordingly.  Must be called after
 785 * each successful depopulation.
 786 */
 787static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
 788				   int page_start, int page_end)
 789{
 790	int nr = page_end - page_start;
 791
 792	lockdep_assert_held(&pcpu_lock);
 793
 794	bitmap_clear(chunk->populated, page_start, nr);
 795	chunk->nr_populated -= nr;
 796	pcpu_nr_empty_pop_pages -= nr;
 
 
 797}
 798
 799/*
 800 * Chunk management implementation.
 801 *
 802 * To allow different implementations, chunk alloc/free and
 803 * [de]population are implemented in a separate file which is pulled
 804 * into this file and compiled together.  The following functions
 805 * should be implemented.
 806 *
 807 * pcpu_populate_chunk		- populate the specified range of a chunk
 808 * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
 809 * pcpu_create_chunk		- create a new chunk
 810 * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
 811 * pcpu_addr_to_page		- translate address to physical address
 812 * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
 813 */
 814static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
 815static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
 816static struct pcpu_chunk *pcpu_create_chunk(void);
 
 
 
 817static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
 818static struct page *pcpu_addr_to_page(void *addr);
 819static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
 820
 821#ifdef CONFIG_NEED_PER_CPU_KM
 822#include "percpu-km.c"
 823#else
 824#include "percpu-vm.c"
 825#endif
 826
 827/**
 828 * pcpu_chunk_addr_search - determine chunk containing specified address
 829 * @addr: address for which the chunk needs to be determined.
 830 *
 
 
 
 831 * RETURNS:
 832 * The address of the found chunk.
 833 */
 834static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
 835{
 836	/* is it in the first chunk? */
 837	if (pcpu_addr_in_first_chunk(addr)) {
 838		/* is it in the reserved area? */
 839		if (pcpu_addr_in_reserved_chunk(addr))
 840			return pcpu_reserved_chunk;
 841		return pcpu_first_chunk;
 842	}
 
 
 
 843
 844	/*
 845	 * The address is relative to unit0 which might be unused and
 846	 * thus unmapped.  Offset the address to the unit space of the
 847	 * current processor before looking it up in the vmalloc
 848	 * space.  Note that any possible cpu id can be used here, so
 849	 * there's no need to worry about preemption or cpu hotplug.
 850	 */
 851	addr += pcpu_unit_offsets[raw_smp_processor_id()];
 852	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
 853}
 854
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 855/**
 856 * pcpu_alloc - the percpu allocator
 857 * @size: size of area to allocate in bytes
 858 * @align: alignment of area (max PAGE_SIZE)
 859 * @reserved: allocate from the reserved chunk if available
 860 * @gfp: allocation flags
 861 *
 862 * Allocate percpu area of @size bytes aligned at @align.  If @gfp doesn't
 863 * contain %GFP_KERNEL, the allocation is atomic.
 
 
 864 *
 865 * RETURNS:
 866 * Percpu pointer to the allocated area on success, NULL on failure.
 867 */
 868static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
 869				 gfp_t gfp)
 870{
 
 
 
 
 
 
 871	static int warn_limit = 10;
 872	struct pcpu_chunk *chunk;
 873	const char *err;
 874	bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
 875	int occ_pages = 0;
 876	int slot, off, new_alloc, cpu, ret;
 877	unsigned long flags;
 878	void __percpu *ptr;
 
 
 
 
 
 
 
 879
 880	/*
 881	 * We want the lowest bit of offset available for in-use/free
 882	 * indicator, so force >= 16bit alignment and make size even.
 
 
 883	 */
 884	if (unlikely(align < 2))
 885		align = 2;
 886
 887	size = ALIGN(size, 2);
 
 
 888
 889	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
 890		     !is_power_of_2(align))) {
 891		WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
 892		     size, align);
 893		return NULL;
 894	}
 895
 896	if (!is_atomic)
 897		mutex_lock(&pcpu_alloc_mutex);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 898
 899	spin_lock_irqsave(&pcpu_lock, flags);
 900
 901	/* serve reserved allocations from the reserved chunk if available */
 902	if (reserved && pcpu_reserved_chunk) {
 903		chunk = pcpu_reserved_chunk;
 904
 905		if (size > chunk->contig_hint) {
 
 906			err = "alloc from reserved chunk failed";
 907			goto fail_unlock;
 908		}
 909
 910		while ((new_alloc = pcpu_need_to_extend(chunk, is_atomic))) {
 911			spin_unlock_irqrestore(&pcpu_lock, flags);
 912			if (is_atomic ||
 913			    pcpu_extend_area_map(chunk, new_alloc) < 0) {
 914				err = "failed to extend area map of reserved chunk";
 915				goto fail;
 916			}
 917			spin_lock_irqsave(&pcpu_lock, flags);
 918		}
 919
 920		off = pcpu_alloc_area(chunk, size, align, is_atomic,
 921				      &occ_pages);
 922		if (off >= 0)
 923			goto area_found;
 924
 925		err = "alloc from reserved chunk failed";
 926		goto fail_unlock;
 927	}
 928
 929restart:
 930	/* search through normal chunks */
 931	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
 932		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
 933			if (size > chunk->contig_hint)
 
 
 
 
 934				continue;
 935
 936			new_alloc = pcpu_need_to_extend(chunk, is_atomic);
 937			if (new_alloc) {
 938				if (is_atomic)
 939					continue;
 940				spin_unlock_irqrestore(&pcpu_lock, flags);
 941				if (pcpu_extend_area_map(chunk,
 942							 new_alloc) < 0) {
 943					err = "failed to extend area map";
 944					goto fail;
 945				}
 946				spin_lock_irqsave(&pcpu_lock, flags);
 947				/*
 948				 * pcpu_lock has been dropped, need to
 949				 * restart cpu_slot list walking.
 950				 */
 951				goto restart;
 952			}
 953
 954			off = pcpu_alloc_area(chunk, size, align, is_atomic,
 955					      &occ_pages);
 956			if (off >= 0)
 957				goto area_found;
 
 958		}
 959	}
 960
 961	spin_unlock_irqrestore(&pcpu_lock, flags);
 962
 963	/*
 964	 * No space left.  Create a new chunk.  We don't want multiple
 965	 * tasks to create chunks simultaneously.  Serialize and create iff
 966	 * there's still no empty chunk after grabbing the mutex.
 967	 */
 968	if (is_atomic)
 
 969		goto fail;
 
 970
 971	if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
 972		chunk = pcpu_create_chunk();
 973		if (!chunk) {
 974			err = "failed to allocate new chunk";
 975			goto fail;
 976		}
 977
 978		spin_lock_irqsave(&pcpu_lock, flags);
 979		pcpu_chunk_relocate(chunk, -1);
 980	} else {
 981		spin_lock_irqsave(&pcpu_lock, flags);
 982	}
 983
 984	goto restart;
 985
 986area_found:
 
 987	spin_unlock_irqrestore(&pcpu_lock, flags);
 988
 989	/* populate if not all pages are already there */
 990	if (!is_atomic) {
 991		int page_start, page_end, rs, re;
 992
 993		page_start = PFN_DOWN(off);
 994		page_end = PFN_UP(off + size);
 995
 996		pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) {
 
 997			WARN_ON(chunk->immutable);
 998
 999			ret = pcpu_populate_chunk(chunk, rs, re);
1000
1001			spin_lock_irqsave(&pcpu_lock, flags);
1002			if (ret) {
1003				pcpu_free_area(chunk, off, &occ_pages);
1004				err = "failed to populate";
1005				goto fail_unlock;
1006			}
1007			pcpu_chunk_populated(chunk, rs, re);
1008			spin_unlock_irqrestore(&pcpu_lock, flags);
1009		}
1010
1011		mutex_unlock(&pcpu_alloc_mutex);
1012	}
1013
1014	if (chunk != pcpu_reserved_chunk) {
1015		spin_lock_irqsave(&pcpu_lock, flags);
1016		pcpu_nr_empty_pop_pages -= occ_pages;
1017		spin_unlock_irqrestore(&pcpu_lock, flags);
1018	}
1019
1020	if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1021		pcpu_schedule_balance_work();
1022
1023	/* clear the areas and return address relative to base address */
1024	for_each_possible_cpu(cpu)
1025		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1026
1027	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1028	kmemleak_alloc_percpu(ptr, size, gfp);
 
 
 
 
 
 
1029	return ptr;
1030
1031fail_unlock:
1032	spin_unlock_irqrestore(&pcpu_lock, flags);
1033fail:
1034	if (!is_atomic && warn_limit) {
 
 
1035		pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1036			size, align, is_atomic, err);
1037		dump_stack();
1038		if (!--warn_limit)
1039			pr_info("limit reached, disable warning\n");
1040	}
1041	if (is_atomic) {
1042		/* see the flag handling in pcpu_blance_workfn() */
1043		pcpu_atomic_alloc_failed = true;
1044		pcpu_schedule_balance_work();
1045	} else {
1046		mutex_unlock(&pcpu_alloc_mutex);
1047	}
 
 
 
1048	return NULL;
1049}
1050
1051/**
1052 * __alloc_percpu_gfp - allocate dynamic percpu area
1053 * @size: size of area to allocate in bytes
1054 * @align: alignment of area (max PAGE_SIZE)
1055 * @gfp: allocation flags
1056 *
1057 * Allocate zero-filled percpu area of @size bytes aligned at @align.  If
1058 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1059 * be called from any context but is a lot more likely to fail.
 
 
1060 *
1061 * RETURNS:
1062 * Percpu pointer to the allocated area on success, NULL on failure.
1063 */
1064void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1065{
1066	return pcpu_alloc(size, align, false, gfp);
1067}
1068EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1069
1070/**
1071 * __alloc_percpu - allocate dynamic percpu area
1072 * @size: size of area to allocate in bytes
1073 * @align: alignment of area (max PAGE_SIZE)
1074 *
1075 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1076 */
1077void __percpu *__alloc_percpu(size_t size, size_t align)
1078{
1079	return pcpu_alloc(size, align, false, GFP_KERNEL);
1080}
1081EXPORT_SYMBOL_GPL(__alloc_percpu);
1082
1083/**
1084 * __alloc_reserved_percpu - allocate reserved percpu area
1085 * @size: size of area to allocate in bytes
1086 * @align: alignment of area (max PAGE_SIZE)
1087 *
1088 * Allocate zero-filled percpu area of @size bytes aligned at @align
1089 * from reserved percpu area if arch has set it up; otherwise,
1090 * allocation is served from the same dynamic area.  Might sleep.
1091 * Might trigger writeouts.
1092 *
1093 * CONTEXT:
1094 * Does GFP_KERNEL allocation.
1095 *
1096 * RETURNS:
1097 * Percpu pointer to the allocated area on success, NULL on failure.
1098 */
1099void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1100{
1101	return pcpu_alloc(size, align, true, GFP_KERNEL);
1102}
1103
1104/**
1105 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
1106 * @work: unused
1107 *
1108 * Reclaim all fully free chunks except for the first one.
 
 
 
 
 
1109 */
1110static void pcpu_balance_workfn(struct work_struct *work)
1111{
 
 
1112	LIST_HEAD(to_free);
 
1113	struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
1114	struct pcpu_chunk *chunk, *next;
1115	int slot, nr_to_pop, ret;
1116
1117	/*
1118	 * There's no reason to keep around multiple unused chunks and VM
1119	 * areas can be scarce.  Destroy all free chunks except for one.
1120	 */
1121	mutex_lock(&pcpu_alloc_mutex);
1122	spin_lock_irq(&pcpu_lock);
1123
1124	list_for_each_entry_safe(chunk, next, free_head, list) {
1125		WARN_ON(chunk->immutable);
1126
1127		/* spare the first one */
1128		if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1129			continue;
1130
1131		list_del_init(&chunk->map_extend_list);
1132		list_move(&chunk->list, &to_free);
1133	}
1134
1135	spin_unlock_irq(&pcpu_lock);
1136
1137	list_for_each_entry_safe(chunk, next, &to_free, list) {
1138		int rs, re;
1139
1140		pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) {
 
1141			pcpu_depopulate_chunk(chunk, rs, re);
1142			spin_lock_irq(&pcpu_lock);
1143			pcpu_chunk_depopulated(chunk, rs, re);
1144			spin_unlock_irq(&pcpu_lock);
1145		}
1146		pcpu_destroy_chunk(chunk);
 
1147	}
1148
1149	/* service chunks which requested async area map extension */
1150	do {
1151		int new_alloc = 0;
1152
1153		spin_lock_irq(&pcpu_lock);
1154
1155		chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
1156					struct pcpu_chunk, map_extend_list);
1157		if (chunk) {
1158			list_del_init(&chunk->map_extend_list);
1159			new_alloc = pcpu_need_to_extend(chunk, false);
1160		}
1161
1162		spin_unlock_irq(&pcpu_lock);
1163
1164		if (new_alloc)
1165			pcpu_extend_area_map(chunk, new_alloc);
1166	} while (chunk);
1167
1168	/*
1169	 * Ensure there are certain number of free populated pages for
1170	 * atomic allocs.  Fill up from the most packed so that atomic
1171	 * allocs don't increase fragmentation.  If atomic allocation
1172	 * failed previously, always populate the maximum amount.  This
1173	 * should prevent atomic allocs larger than PAGE_SIZE from keeping
1174	 * failing indefinitely; however, large atomic allocs are not
1175	 * something we support properly and can be highly unreliable and
1176	 * inefficient.
1177	 */
1178retry_pop:
1179	if (pcpu_atomic_alloc_failed) {
1180		nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
1181		/* best effort anyway, don't worry about synchronization */
1182		pcpu_atomic_alloc_failed = false;
1183	} else {
1184		nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
1185				  pcpu_nr_empty_pop_pages,
1186				  0, PCPU_EMPTY_POP_PAGES_HIGH);
1187	}
1188
1189	for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
1190		int nr_unpop = 0, rs, re;
1191
1192		if (!nr_to_pop)
1193			break;
1194
1195		spin_lock_irq(&pcpu_lock);
1196		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
1197			nr_unpop = pcpu_unit_pages - chunk->nr_populated;
1198			if (nr_unpop)
1199				break;
1200		}
1201		spin_unlock_irq(&pcpu_lock);
1202
1203		if (!nr_unpop)
1204			continue;
1205
1206		/* @chunk can't go away while pcpu_alloc_mutex is held */
1207		pcpu_for_each_unpop_region(chunk, rs, re, 0, pcpu_unit_pages) {
1208			int nr = min(re - rs, nr_to_pop);
 
1209
1210			ret = pcpu_populate_chunk(chunk, rs, rs + nr);
1211			if (!ret) {
1212				nr_to_pop -= nr;
1213				spin_lock_irq(&pcpu_lock);
1214				pcpu_chunk_populated(chunk, rs, rs + nr);
1215				spin_unlock_irq(&pcpu_lock);
1216			} else {
1217				nr_to_pop = 0;
1218			}
1219
1220			if (!nr_to_pop)
1221				break;
1222		}
1223	}
1224
1225	if (nr_to_pop) {
1226		/* ran out of chunks to populate, create a new one and retry */
1227		chunk = pcpu_create_chunk();
1228		if (chunk) {
1229			spin_lock_irq(&pcpu_lock);
1230			pcpu_chunk_relocate(chunk, -1);
1231			spin_unlock_irq(&pcpu_lock);
1232			goto retry_pop;
1233		}
1234	}
1235
1236	mutex_unlock(&pcpu_alloc_mutex);
1237}
1238
1239/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1240 * free_percpu - free percpu area
1241 * @ptr: pointer to area to free
1242 *
1243 * Free percpu area @ptr.
1244 *
1245 * CONTEXT:
1246 * Can be called from atomic context.
1247 */
1248void free_percpu(void __percpu *ptr)
1249{
1250	void *addr;
1251	struct pcpu_chunk *chunk;
1252	unsigned long flags;
1253	int off, occ_pages;
 
 
1254
1255	if (!ptr)
1256		return;
1257
1258	kmemleak_free_percpu(ptr);
1259
1260	addr = __pcpu_ptr_to_addr(ptr);
1261
1262	spin_lock_irqsave(&pcpu_lock, flags);
1263
1264	chunk = pcpu_chunk_addr_search(addr);
1265	off = addr - chunk->base_addr;
1266
1267	pcpu_free_area(chunk, off, &occ_pages);
 
 
1268
1269	if (chunk != pcpu_reserved_chunk)
1270		pcpu_nr_empty_pop_pages += occ_pages;
1271
1272	/* if there are more than one fully free chunks, wake up grim reaper */
1273	if (chunk->free_size == pcpu_unit_size) {
1274		struct pcpu_chunk *pos;
1275
1276		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
1277			if (pos != chunk) {
1278				pcpu_schedule_balance_work();
1279				break;
1280			}
1281	}
1282
 
 
1283	spin_unlock_irqrestore(&pcpu_lock, flags);
 
 
 
1284}
1285EXPORT_SYMBOL_GPL(free_percpu);
1286
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1287/**
1288 * is_kernel_percpu_address - test whether address is from static percpu area
1289 * @addr: address to test
1290 *
1291 * Test whether @addr belongs to in-kernel static percpu area.  Module
1292 * static percpu areas are not considered.  For those, use
1293 * is_module_percpu_address().
1294 *
1295 * RETURNS:
1296 * %true if @addr is from in-kernel static percpu area, %false otherwise.
1297 */
1298bool is_kernel_percpu_address(unsigned long addr)
1299{
1300#ifdef CONFIG_SMP
1301	const size_t static_size = __per_cpu_end - __per_cpu_start;
1302	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1303	unsigned int cpu;
1304
1305	for_each_possible_cpu(cpu) {
1306		void *start = per_cpu_ptr(base, cpu);
1307
1308		if ((void *)addr >= start && (void *)addr < start + static_size)
1309			return true;
1310        }
1311#endif
1312	/* on UP, can't distinguish from other static vars, always false */
1313	return false;
1314}
1315
1316/**
1317 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
1318 * @addr: the address to be converted to physical address
1319 *
1320 * Given @addr which is dereferenceable address obtained via one of
1321 * percpu access macros, this function translates it into its physical
1322 * address.  The caller is responsible for ensuring @addr stays valid
1323 * until this function finishes.
1324 *
1325 * percpu allocator has special setup for the first chunk, which currently
1326 * supports either embedding in linear address space or vmalloc mapping,
1327 * and, from the second one, the backing allocator (currently either vm or
1328 * km) provides translation.
1329 *
1330 * The addr can be translated simply without checking if it falls into the
1331 * first chunk. But the current code reflects better how percpu allocator
1332 * actually works, and the verification can discover both bugs in percpu
1333 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
1334 * code.
1335 *
1336 * RETURNS:
1337 * The physical address for @addr.
1338 */
1339phys_addr_t per_cpu_ptr_to_phys(void *addr)
1340{
1341	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1342	bool in_first_chunk = false;
1343	unsigned long first_low, first_high;
1344	unsigned int cpu;
1345
1346	/*
1347	 * The following test on unit_low/high isn't strictly
1348	 * necessary but will speed up lookups of addresses which
1349	 * aren't in the first chunk.
 
 
 
 
 
1350	 */
1351	first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
1352	first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
1353				     pcpu_unit_pages);
 
1354	if ((unsigned long)addr >= first_low &&
1355	    (unsigned long)addr < first_high) {
1356		for_each_possible_cpu(cpu) {
1357			void *start = per_cpu_ptr(base, cpu);
1358
1359			if (addr >= start && addr < start + pcpu_unit_size) {
1360				in_first_chunk = true;
1361				break;
1362			}
1363		}
1364	}
1365
1366	if (in_first_chunk) {
1367		if (!is_vmalloc_addr(addr))
1368			return __pa(addr);
1369		else
1370			return page_to_phys(vmalloc_to_page(addr)) +
1371			       offset_in_page(addr);
1372	} else
1373		return page_to_phys(pcpu_addr_to_page(addr)) +
1374		       offset_in_page(addr);
1375}
1376
1377/**
1378 * pcpu_alloc_alloc_info - allocate percpu allocation info
1379 * @nr_groups: the number of groups
1380 * @nr_units: the number of units
1381 *
1382 * Allocate ai which is large enough for @nr_groups groups containing
1383 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
1384 * cpu_map array which is long enough for @nr_units and filled with
1385 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
1386 * pointer of other groups.
1387 *
1388 * RETURNS:
1389 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1390 * failure.
1391 */
1392struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1393						      int nr_units)
1394{
1395	struct pcpu_alloc_info *ai;
1396	size_t base_size, ai_size;
1397	void *ptr;
1398	int unit;
1399
1400	base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1401			  __alignof__(ai->groups[0].cpu_map[0]));
1402	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1403
1404	ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
1405	if (!ptr)
1406		return NULL;
1407	ai = ptr;
1408	ptr += base_size;
1409
1410	ai->groups[0].cpu_map = ptr;
1411
1412	for (unit = 0; unit < nr_units; unit++)
1413		ai->groups[0].cpu_map[unit] = NR_CPUS;
1414
1415	ai->nr_groups = nr_groups;
1416	ai->__ai_size = PFN_ALIGN(ai_size);
1417
1418	return ai;
1419}
1420
1421/**
1422 * pcpu_free_alloc_info - free percpu allocation info
1423 * @ai: pcpu_alloc_info to free
1424 *
1425 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1426 */
1427void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1428{
1429	memblock_free_early(__pa(ai), ai->__ai_size);
1430}
1431
1432/**
1433 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1434 * @lvl: loglevel
1435 * @ai: allocation info to dump
1436 *
1437 * Print out information about @ai using loglevel @lvl.
1438 */
1439static void pcpu_dump_alloc_info(const char *lvl,
1440				 const struct pcpu_alloc_info *ai)
1441{
1442	int group_width = 1, cpu_width = 1, width;
1443	char empty_str[] = "--------";
1444	int alloc = 0, alloc_end = 0;
1445	int group, v;
1446	int upa, apl;	/* units per alloc, allocs per line */
1447
1448	v = ai->nr_groups;
1449	while (v /= 10)
1450		group_width++;
1451
1452	v = num_possible_cpus();
1453	while (v /= 10)
1454		cpu_width++;
1455	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1456
1457	upa = ai->alloc_size / ai->unit_size;
1458	width = upa * (cpu_width + 1) + group_width + 3;
1459	apl = rounddown_pow_of_two(max(60 / width, 1));
1460
1461	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1462	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1463	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1464
1465	for (group = 0; group < ai->nr_groups; group++) {
1466		const struct pcpu_group_info *gi = &ai->groups[group];
1467		int unit = 0, unit_end = 0;
1468
1469		BUG_ON(gi->nr_units % upa);
1470		for (alloc_end += gi->nr_units / upa;
1471		     alloc < alloc_end; alloc++) {
1472			if (!(alloc % apl)) {
1473				pr_cont("\n");
1474				printk("%spcpu-alloc: ", lvl);
1475			}
1476			pr_cont("[%0*d] ", group_width, group);
1477
1478			for (unit_end += upa; unit < unit_end; unit++)
1479				if (gi->cpu_map[unit] != NR_CPUS)
1480					pr_cont("%0*d ",
1481						cpu_width, gi->cpu_map[unit]);
1482				else
1483					pr_cont("%s ", empty_str);
1484		}
1485	}
1486	pr_cont("\n");
1487}
1488
1489/**
1490 * pcpu_setup_first_chunk - initialize the first percpu chunk
1491 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1492 * @base_addr: mapped address
1493 *
1494 * Initialize the first percpu chunk which contains the kernel static
1495 * perpcu area.  This function is to be called from arch percpu area
1496 * setup path.
1497 *
1498 * @ai contains all information necessary to initialize the first
1499 * chunk and prime the dynamic percpu allocator.
1500 *
1501 * @ai->static_size is the size of static percpu area.
1502 *
1503 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1504 * reserve after the static area in the first chunk.  This reserves
1505 * the first chunk such that it's available only through reserved
1506 * percpu allocation.  This is primarily used to serve module percpu
1507 * static areas on architectures where the addressing model has
1508 * limited offset range for symbol relocations to guarantee module
1509 * percpu symbols fall inside the relocatable range.
1510 *
1511 * @ai->dyn_size determines the number of bytes available for dynamic
1512 * allocation in the first chunk.  The area between @ai->static_size +
1513 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1514 *
1515 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1516 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1517 * @ai->dyn_size.
1518 *
1519 * @ai->atom_size is the allocation atom size and used as alignment
1520 * for vm areas.
1521 *
1522 * @ai->alloc_size is the allocation size and always multiple of
1523 * @ai->atom_size.  This is larger than @ai->atom_size if
1524 * @ai->unit_size is larger than @ai->atom_size.
1525 *
1526 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1527 * percpu areas.  Units which should be colocated are put into the
1528 * same group.  Dynamic VM areas will be allocated according to these
1529 * groupings.  If @ai->nr_groups is zero, a single group containing
1530 * all units is assumed.
1531 *
1532 * The caller should have mapped the first chunk at @base_addr and
1533 * copied static data to each unit.
1534 *
1535 * If the first chunk ends up with both reserved and dynamic areas, it
1536 * is served by two chunks - one to serve the core static and reserved
1537 * areas and the other for the dynamic area.  They share the same vm
1538 * and page map but uses different area allocation map to stay away
1539 * from each other.  The latter chunk is circulated in the chunk slots
1540 * and available for dynamic allocation like any other chunks.
1541 *
1542 * RETURNS:
1543 * 0 on success, -errno on failure.
1544 */
1545int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1546				  void *base_addr)
1547{
1548	static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1549	static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1550	size_t dyn_size = ai->dyn_size;
1551	size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1552	struct pcpu_chunk *schunk, *dchunk = NULL;
1553	unsigned long *group_offsets;
1554	size_t *group_sizes;
1555	unsigned long *unit_off;
1556	unsigned int cpu;
1557	int *unit_map;
1558	int group, unit, i;
 
 
 
 
1559
1560#define PCPU_SETUP_BUG_ON(cond)	do {					\
1561	if (unlikely(cond)) {						\
1562		pr_emerg("failed to initialize, %s\n", #cond);		\
1563		pr_emerg("cpu_possible_mask=%*pb\n",			\
1564			 cpumask_pr_args(cpu_possible_mask));		\
1565		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
1566		BUG();							\
1567	}								\
1568} while (0)
1569
1570	/* sanity checks */
1571	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1572#ifdef CONFIG_SMP
1573	PCPU_SETUP_BUG_ON(!ai->static_size);
1574	PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
1575#endif
1576	PCPU_SETUP_BUG_ON(!base_addr);
1577	PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
1578	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1579	PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
1580	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
 
1581	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
 
 
 
 
1582	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1583
1584	/* process group information and build config tables accordingly */
1585	group_offsets = memblock_virt_alloc(ai->nr_groups *
1586					     sizeof(group_offsets[0]), 0);
1587	group_sizes = memblock_virt_alloc(ai->nr_groups *
1588					   sizeof(group_sizes[0]), 0);
1589	unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
1590	unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1591
1592	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1593		unit_map[cpu] = UINT_MAX;
1594
1595	pcpu_low_unit_cpu = NR_CPUS;
1596	pcpu_high_unit_cpu = NR_CPUS;
1597
1598	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1599		const struct pcpu_group_info *gi = &ai->groups[group];
1600
1601		group_offsets[group] = gi->base_offset;
1602		group_sizes[group] = gi->nr_units * ai->unit_size;
1603
1604		for (i = 0; i < gi->nr_units; i++) {
1605			cpu = gi->cpu_map[i];
1606			if (cpu == NR_CPUS)
1607				continue;
1608
1609			PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
1610			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1611			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1612
1613			unit_map[cpu] = unit + i;
1614			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1615
1616			/* determine low/high unit_cpu */
1617			if (pcpu_low_unit_cpu == NR_CPUS ||
1618			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
1619				pcpu_low_unit_cpu = cpu;
1620			if (pcpu_high_unit_cpu == NR_CPUS ||
1621			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
1622				pcpu_high_unit_cpu = cpu;
1623		}
1624	}
1625	pcpu_nr_units = unit;
1626
1627	for_each_possible_cpu(cpu)
1628		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1629
1630	/* we're done parsing the input, undefine BUG macro and dump config */
1631#undef PCPU_SETUP_BUG_ON
1632	pcpu_dump_alloc_info(KERN_DEBUG, ai);
1633
1634	pcpu_nr_groups = ai->nr_groups;
1635	pcpu_group_offsets = group_offsets;
1636	pcpu_group_sizes = group_sizes;
1637	pcpu_unit_map = unit_map;
1638	pcpu_unit_offsets = unit_off;
1639
1640	/* determine basic parameters */
1641	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1642	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1643	pcpu_atom_size = ai->atom_size;
1644	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1645		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1646
 
 
1647	/*
1648	 * Allocate chunk slots.  The additional last slot is for
1649	 * empty chunks.
1650	 */
1651	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1652	pcpu_slot = memblock_virt_alloc(
1653			pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
1654	for (i = 0; i < pcpu_nr_slots; i++)
1655		INIT_LIST_HEAD(&pcpu_slot[i]);
1656
1657	/*
1658	 * Initialize static chunk.  If reserved_size is zero, the
1659	 * static chunk covers static area + dynamic allocation area
1660	 * in the first chunk.  If reserved_size is not zero, it
1661	 * covers static area + reserved area (mostly used for module
1662	 * static percpu allocation).
1663	 */
1664	schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1665	INIT_LIST_HEAD(&schunk->list);
1666	INIT_LIST_HEAD(&schunk->map_extend_list);
1667	schunk->base_addr = base_addr;
1668	schunk->map = smap;
1669	schunk->map_alloc = ARRAY_SIZE(smap);
1670	schunk->immutable = true;
1671	bitmap_fill(schunk->populated, pcpu_unit_pages);
1672	schunk->nr_populated = pcpu_unit_pages;
1673
1674	if (ai->reserved_size) {
1675		schunk->free_size = ai->reserved_size;
1676		pcpu_reserved_chunk = schunk;
1677		pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1678	} else {
1679		schunk->free_size = dyn_size;
1680		dyn_size = 0;			/* dynamic area covered */
1681	}
1682	schunk->contig_hint = schunk->free_size;
 
1683
1684	schunk->map[0] = 1;
1685	schunk->map[1] = ai->static_size;
1686	schunk->map_used = 1;
1687	if (schunk->free_size)
1688		schunk->map[++schunk->map_used] = ai->static_size + schunk->free_size;
1689	schunk->map[schunk->map_used] |= 1;
 
 
 
 
 
1690
1691	/* init dynamic chunk if necessary */
1692	if (dyn_size) {
1693		dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1694		INIT_LIST_HEAD(&dchunk->list);
1695		INIT_LIST_HEAD(&dchunk->map_extend_list);
1696		dchunk->base_addr = base_addr;
1697		dchunk->map = dmap;
1698		dchunk->map_alloc = ARRAY_SIZE(dmap);
1699		dchunk->immutable = true;
1700		bitmap_fill(dchunk->populated, pcpu_unit_pages);
1701		dchunk->nr_populated = pcpu_unit_pages;
1702
1703		dchunk->contig_hint = dchunk->free_size = dyn_size;
1704		dchunk->map[0] = 1;
1705		dchunk->map[1] = pcpu_reserved_chunk_limit;
1706		dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1;
1707		dchunk->map_used = 2;
1708	}
1709
1710	/* link the first chunk in */
1711	pcpu_first_chunk = dchunk ?: schunk;
1712	pcpu_nr_empty_pop_pages +=
1713		pcpu_count_occupied_pages(pcpu_first_chunk, 1);
1714	pcpu_chunk_relocate(pcpu_first_chunk, -1);
1715
 
 
 
 
 
 
1716	/* we're done */
1717	pcpu_base_addr = base_addr;
1718	return 0;
1719}
1720
1721#ifdef CONFIG_SMP
1722
1723const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
1724	[PCPU_FC_AUTO]	= "auto",
1725	[PCPU_FC_EMBED]	= "embed",
1726	[PCPU_FC_PAGE]	= "page",
1727};
1728
1729enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1730
1731static int __init percpu_alloc_setup(char *str)
1732{
1733	if (!str)
1734		return -EINVAL;
1735
1736	if (0)
1737		/* nada */;
1738#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1739	else if (!strcmp(str, "embed"))
1740		pcpu_chosen_fc = PCPU_FC_EMBED;
1741#endif
1742#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1743	else if (!strcmp(str, "page"))
1744		pcpu_chosen_fc = PCPU_FC_PAGE;
1745#endif
1746	else
1747		pr_warn("unknown allocator %s specified\n", str);
1748
1749	return 0;
1750}
1751early_param("percpu_alloc", percpu_alloc_setup);
1752
1753/*
1754 * pcpu_embed_first_chunk() is used by the generic percpu setup.
1755 * Build it if needed by the arch config or the generic setup is going
1756 * to be used.
1757 */
1758#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1759	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1760#define BUILD_EMBED_FIRST_CHUNK
1761#endif
1762
1763/* build pcpu_page_first_chunk() iff needed by the arch config */
1764#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
1765#define BUILD_PAGE_FIRST_CHUNK
1766#endif
1767
1768/* pcpu_build_alloc_info() is used by both embed and page first chunk */
1769#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
1770/**
1771 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1772 * @reserved_size: the size of reserved percpu area in bytes
1773 * @dyn_size: minimum free size for dynamic allocation in bytes
1774 * @atom_size: allocation atom size
1775 * @cpu_distance_fn: callback to determine distance between cpus, optional
1776 *
1777 * This function determines grouping of units, their mappings to cpus
1778 * and other parameters considering needed percpu size, allocation
1779 * atom size and distances between CPUs.
1780 *
1781 * Groups are always multiples of atom size and CPUs which are of
1782 * LOCAL_DISTANCE both ways are grouped together and share space for
1783 * units in the same group.  The returned configuration is guaranteed
1784 * to have CPUs on different nodes on different groups and >=75% usage
1785 * of allocated virtual address space.
1786 *
1787 * RETURNS:
1788 * On success, pointer to the new allocation_info is returned.  On
1789 * failure, ERR_PTR value is returned.
1790 */
1791static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1792				size_t reserved_size, size_t dyn_size,
1793				size_t atom_size,
1794				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1795{
1796	static int group_map[NR_CPUS] __initdata;
1797	static int group_cnt[NR_CPUS] __initdata;
1798	const size_t static_size = __per_cpu_end - __per_cpu_start;
1799	int nr_groups = 1, nr_units = 0;
1800	size_t size_sum, min_unit_size, alloc_size;
1801	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
1802	int last_allocs, group, unit;
1803	unsigned int cpu, tcpu;
1804	struct pcpu_alloc_info *ai;
1805	unsigned int *cpu_map;
1806
1807	/* this function may be called multiple times */
1808	memset(group_map, 0, sizeof(group_map));
1809	memset(group_cnt, 0, sizeof(group_cnt));
1810
1811	/* calculate size_sum and ensure dyn_size is enough for early alloc */
1812	size_sum = PFN_ALIGN(static_size + reserved_size +
1813			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1814	dyn_size = size_sum - static_size - reserved_size;
1815
1816	/*
1817	 * Determine min_unit_size, alloc_size and max_upa such that
1818	 * alloc_size is multiple of atom_size and is the smallest
1819	 * which can accommodate 4k aligned segments which are equal to
1820	 * or larger than min_unit_size.
1821	 */
1822	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1823
 
1824	alloc_size = roundup(min_unit_size, atom_size);
1825	upa = alloc_size / min_unit_size;
1826	while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
1827		upa--;
1828	max_upa = upa;
1829
1830	/* group cpus according to their proximity */
1831	for_each_possible_cpu(cpu) {
1832		group = 0;
1833	next_group:
1834		for_each_possible_cpu(tcpu) {
1835			if (cpu == tcpu)
1836				break;
1837			if (group_map[tcpu] == group && cpu_distance_fn &&
1838			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1839			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1840				group++;
1841				nr_groups = max(nr_groups, group + 1);
1842				goto next_group;
1843			}
1844		}
1845		group_map[cpu] = group;
1846		group_cnt[group]++;
1847	}
1848
1849	/*
1850	 * Expand unit size until address space usage goes over 75%
1851	 * and then as much as possible without using more address
1852	 * space.
1853	 */
1854	last_allocs = INT_MAX;
1855	for (upa = max_upa; upa; upa--) {
1856		int allocs = 0, wasted = 0;
1857
1858		if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
1859			continue;
1860
1861		for (group = 0; group < nr_groups; group++) {
1862			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1863			allocs += this_allocs;
1864			wasted += this_allocs * upa - group_cnt[group];
1865		}
1866
1867		/*
1868		 * Don't accept if wastage is over 1/3.  The
1869		 * greater-than comparison ensures upa==1 always
1870		 * passes the following check.
1871		 */
1872		if (wasted > num_possible_cpus() / 3)
1873			continue;
1874
1875		/* and then don't consume more memory */
1876		if (allocs > last_allocs)
1877			break;
1878		last_allocs = allocs;
1879		best_upa = upa;
1880	}
1881	upa = best_upa;
1882
1883	/* allocate and fill alloc_info */
1884	for (group = 0; group < nr_groups; group++)
1885		nr_units += roundup(group_cnt[group], upa);
1886
1887	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1888	if (!ai)
1889		return ERR_PTR(-ENOMEM);
1890	cpu_map = ai->groups[0].cpu_map;
1891
1892	for (group = 0; group < nr_groups; group++) {
1893		ai->groups[group].cpu_map = cpu_map;
1894		cpu_map += roundup(group_cnt[group], upa);
1895	}
1896
1897	ai->static_size = static_size;
1898	ai->reserved_size = reserved_size;
1899	ai->dyn_size = dyn_size;
1900	ai->unit_size = alloc_size / upa;
1901	ai->atom_size = atom_size;
1902	ai->alloc_size = alloc_size;
1903
1904	for (group = 0, unit = 0; group_cnt[group]; group++) {
1905		struct pcpu_group_info *gi = &ai->groups[group];
1906
1907		/*
1908		 * Initialize base_offset as if all groups are located
1909		 * back-to-back.  The caller should update this to
1910		 * reflect actual allocation.
1911		 */
1912		gi->base_offset = unit * ai->unit_size;
1913
1914		for_each_possible_cpu(cpu)
1915			if (group_map[cpu] == group)
1916				gi->cpu_map[gi->nr_units++] = cpu;
1917		gi->nr_units = roundup(gi->nr_units, upa);
1918		unit += gi->nr_units;
1919	}
1920	BUG_ON(unit != nr_units);
1921
1922	return ai;
1923}
1924#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
1925
1926#if defined(BUILD_EMBED_FIRST_CHUNK)
1927/**
1928 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1929 * @reserved_size: the size of reserved percpu area in bytes
1930 * @dyn_size: minimum free size for dynamic allocation in bytes
1931 * @atom_size: allocation atom size
1932 * @cpu_distance_fn: callback to determine distance between cpus, optional
1933 * @alloc_fn: function to allocate percpu page
1934 * @free_fn: function to free percpu page
1935 *
1936 * This is a helper to ease setting up embedded first percpu chunk and
1937 * can be called where pcpu_setup_first_chunk() is expected.
1938 *
1939 * If this function is used to setup the first chunk, it is allocated
1940 * by calling @alloc_fn and used as-is without being mapped into
1941 * vmalloc area.  Allocations are always whole multiples of @atom_size
1942 * aligned to @atom_size.
1943 *
1944 * This enables the first chunk to piggy back on the linear physical
1945 * mapping which often uses larger page size.  Please note that this
1946 * can result in very sparse cpu->unit mapping on NUMA machines thus
1947 * requiring large vmalloc address space.  Don't use this allocator if
1948 * vmalloc space is not orders of magnitude larger than distances
1949 * between node memory addresses (ie. 32bit NUMA machines).
1950 *
1951 * @dyn_size specifies the minimum dynamic area size.
1952 *
1953 * If the needed size is smaller than the minimum or specified unit
1954 * size, the leftover is returned using @free_fn.
1955 *
1956 * RETURNS:
1957 * 0 on success, -errno on failure.
1958 */
1959int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1960				  size_t atom_size,
1961				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1962				  pcpu_fc_alloc_fn_t alloc_fn,
1963				  pcpu_fc_free_fn_t free_fn)
1964{
1965	void *base = (void *)ULONG_MAX;
1966	void **areas = NULL;
1967	struct pcpu_alloc_info *ai;
1968	size_t size_sum, areas_size;
1969	unsigned long max_distance;
1970	int group, i, highest_group, rc;
1971
1972	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1973				   cpu_distance_fn);
1974	if (IS_ERR(ai))
1975		return PTR_ERR(ai);
1976
1977	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1978	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1979
1980	areas = memblock_virt_alloc_nopanic(areas_size, 0);
1981	if (!areas) {
1982		rc = -ENOMEM;
1983		goto out_free;
1984	}
1985
1986	/* allocate, copy and determine base address & max_distance */
1987	highest_group = 0;
1988	for (group = 0; group < ai->nr_groups; group++) {
1989		struct pcpu_group_info *gi = &ai->groups[group];
1990		unsigned int cpu = NR_CPUS;
1991		void *ptr;
1992
1993		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1994			cpu = gi->cpu_map[i];
1995		BUG_ON(cpu == NR_CPUS);
1996
1997		/* allocate space for the whole group */
1998		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1999		if (!ptr) {
2000			rc = -ENOMEM;
2001			goto out_free_areas;
2002		}
2003		/* kmemleak tracks the percpu allocations separately */
2004		kmemleak_free(ptr);
2005		areas[group] = ptr;
2006
2007		base = min(ptr, base);
2008		if (ptr > areas[highest_group])
2009			highest_group = group;
2010	}
2011	max_distance = areas[highest_group] - base;
2012	max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
2013
2014	/* warn if maximum distance is further than 75% of vmalloc space */
2015	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
2016		pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
2017				max_distance, VMALLOC_TOTAL);
2018#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2019		/* and fail if we have fallback */
2020		rc = -EINVAL;
2021		goto out_free_areas;
2022#endif
2023	}
2024
2025	/*
2026	 * Copy data and free unused parts.  This should happen after all
2027	 * allocations are complete; otherwise, we may end up with
2028	 * overlapping groups.
2029	 */
2030	for (group = 0; group < ai->nr_groups; group++) {
2031		struct pcpu_group_info *gi = &ai->groups[group];
2032		void *ptr = areas[group];
2033
2034		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
2035			if (gi->cpu_map[i] == NR_CPUS) {
2036				/* unused unit, free whole */
2037				free_fn(ptr, ai->unit_size);
2038				continue;
2039			}
2040			/* copy and return the unused part */
2041			memcpy(ptr, __per_cpu_load, ai->static_size);
2042			free_fn(ptr + size_sum, ai->unit_size - size_sum);
2043		}
2044	}
2045
2046	/* base address is now known, determine group base offsets */
2047	for (group = 0; group < ai->nr_groups; group++) {
2048		ai->groups[group].base_offset = areas[group] - base;
2049	}
2050
2051	pr_info("Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
2052		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
2053		ai->dyn_size, ai->unit_size);
2054
2055	rc = pcpu_setup_first_chunk(ai, base);
2056	goto out_free;
2057
2058out_free_areas:
2059	for (group = 0; group < ai->nr_groups; group++)
2060		if (areas[group])
2061			free_fn(areas[group],
2062				ai->groups[group].nr_units * ai->unit_size);
2063out_free:
2064	pcpu_free_alloc_info(ai);
2065	if (areas)
2066		memblock_free_early(__pa(areas), areas_size);
2067	return rc;
2068}
2069#endif /* BUILD_EMBED_FIRST_CHUNK */
2070
2071#ifdef BUILD_PAGE_FIRST_CHUNK
2072/**
2073 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
2074 * @reserved_size: the size of reserved percpu area in bytes
2075 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
2076 * @free_fn: function to free percpu page, always called with PAGE_SIZE
2077 * @populate_pte_fn: function to populate pte
2078 *
2079 * This is a helper to ease setting up page-remapped first percpu
2080 * chunk and can be called where pcpu_setup_first_chunk() is expected.
2081 *
2082 * This is the basic allocator.  Static percpu area is allocated
2083 * page-by-page into vmalloc area.
2084 *
2085 * RETURNS:
2086 * 0 on success, -errno on failure.
2087 */
2088int __init pcpu_page_first_chunk(size_t reserved_size,
2089				 pcpu_fc_alloc_fn_t alloc_fn,
2090				 pcpu_fc_free_fn_t free_fn,
2091				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
2092{
2093	static struct vm_struct vm;
2094	struct pcpu_alloc_info *ai;
2095	char psize_str[16];
2096	int unit_pages;
2097	size_t pages_size;
2098	struct page **pages;
2099	int unit, i, j, rc;
2100	int upa;
2101	int nr_g0_units;
2102
2103	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
2104
2105	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
2106	if (IS_ERR(ai))
2107		return PTR_ERR(ai);
2108	BUG_ON(ai->nr_groups != 1);
2109	upa = ai->alloc_size/ai->unit_size;
2110	nr_g0_units = roundup(num_possible_cpus(), upa);
2111	if (unlikely(WARN_ON(ai->groups[0].nr_units != nr_g0_units))) {
2112		pcpu_free_alloc_info(ai);
2113		return -EINVAL;
2114	}
2115
2116	unit_pages = ai->unit_size >> PAGE_SHIFT;
2117
2118	/* unaligned allocations can't be freed, round up to page size */
2119	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
2120			       sizeof(pages[0]));
2121	pages = memblock_virt_alloc(pages_size, 0);
 
 
 
2122
2123	/* allocate pages */
2124	j = 0;
2125	for (unit = 0; unit < num_possible_cpus(); unit++) {
2126		unsigned int cpu = ai->groups[0].cpu_map[unit];
2127		for (i = 0; i < unit_pages; i++) {
2128			void *ptr;
2129
2130			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
2131			if (!ptr) {
2132				pr_warn("failed to allocate %s page for cpu%u\n",
2133						psize_str, cpu);
2134				goto enomem;
2135			}
2136			/* kmemleak tracks the percpu allocations separately */
2137			kmemleak_free(ptr);
2138			pages[j++] = virt_to_page(ptr);
2139		}
2140	}
2141
2142	/* allocate vm area, map the pages and copy static data */
2143	vm.flags = VM_ALLOC;
2144	vm.size = num_possible_cpus() * ai->unit_size;
2145	vm_area_register_early(&vm, PAGE_SIZE);
2146
2147	for (unit = 0; unit < num_possible_cpus(); unit++) {
2148		unsigned long unit_addr =
2149			(unsigned long)vm.addr + unit * ai->unit_size;
2150
2151		for (i = 0; i < unit_pages; i++)
2152			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
2153
2154		/* pte already populated, the following shouldn't fail */
2155		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
2156				      unit_pages);
2157		if (rc < 0)
2158			panic("failed to map percpu area, err=%d\n", rc);
2159
2160		/*
2161		 * FIXME: Archs with virtual cache should flush local
2162		 * cache for the linear mapping here - something
2163		 * equivalent to flush_cache_vmap() on the local cpu.
2164		 * flush_cache_vmap() can't be used as most supporting
2165		 * data structures are not set up yet.
2166		 */
2167
2168		/* copy static data */
2169		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
2170	}
2171
2172	/* we're ready, commit */
2173	pr_info("%d %s pages/cpu @%p s%zu r%zu d%zu\n",
2174		unit_pages, psize_str, vm.addr, ai->static_size,
2175		ai->reserved_size, ai->dyn_size);
2176
2177	rc = pcpu_setup_first_chunk(ai, vm.addr);
2178	goto out_free_ar;
2179
2180enomem:
2181	while (--j >= 0)
2182		free_fn(page_address(pages[j]), PAGE_SIZE);
2183	rc = -ENOMEM;
2184out_free_ar:
2185	memblock_free_early(__pa(pages), pages_size);
2186	pcpu_free_alloc_info(ai);
2187	return rc;
2188}
2189#endif /* BUILD_PAGE_FIRST_CHUNK */
2190
2191#ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
2192/*
2193 * Generic SMP percpu area setup.
2194 *
2195 * The embedding helper is used because its behavior closely resembles
2196 * the original non-dynamic generic percpu area setup.  This is
2197 * important because many archs have addressing restrictions and might
2198 * fail if the percpu area is located far away from the previous
2199 * location.  As an added bonus, in non-NUMA cases, embedding is
2200 * generally a good idea TLB-wise because percpu area can piggy back
2201 * on the physical linear memory mapping which uses large page
2202 * mappings on applicable archs.
2203 */
2204unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
2205EXPORT_SYMBOL(__per_cpu_offset);
2206
2207static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
2208				       size_t align)
2209{
2210	return  memblock_virt_alloc_from_nopanic(
2211			size, align, __pa(MAX_DMA_ADDRESS));
2212}
2213
2214static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
2215{
2216	memblock_free_early(__pa(ptr), size);
2217}
2218
2219void __init setup_per_cpu_areas(void)
2220{
2221	unsigned long delta;
2222	unsigned int cpu;
2223	int rc;
2224
2225	/*
2226	 * Always reserve area for module percpu variables.  That's
2227	 * what the legacy allocator did.
2228	 */
2229	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
2230				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
2231				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
2232	if (rc < 0)
2233		panic("Failed to initialize percpu areas.");
2234
2235	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
2236	for_each_possible_cpu(cpu)
2237		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
2238}
2239#endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */
2240
2241#else	/* CONFIG_SMP */
2242
2243/*
2244 * UP percpu area setup.
2245 *
2246 * UP always uses km-based percpu allocator with identity mapping.
2247 * Static percpu variables are indistinguishable from the usual static
2248 * variables and don't require any special preparation.
2249 */
2250void __init setup_per_cpu_areas(void)
2251{
2252	const size_t unit_size =
2253		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
2254					 PERCPU_DYNAMIC_RESERVE));
2255	struct pcpu_alloc_info *ai;
2256	void *fc;
2257
2258	ai = pcpu_alloc_alloc_info(1, 1);
2259	fc = memblock_virt_alloc_from_nopanic(unit_size,
2260					      PAGE_SIZE,
2261					      __pa(MAX_DMA_ADDRESS));
2262	if (!ai || !fc)
2263		panic("Failed to allocate memory for percpu areas.");
2264	/* kmemleak tracks the percpu allocations separately */
2265	kmemleak_free(fc);
2266
2267	ai->dyn_size = unit_size;
2268	ai->unit_size = unit_size;
2269	ai->atom_size = unit_size;
2270	ai->alloc_size = unit_size;
2271	ai->groups[0].nr_units = 1;
2272	ai->groups[0].cpu_map[0] = 0;
2273
2274	if (pcpu_setup_first_chunk(ai, fc) < 0)
2275		panic("Failed to initialize percpu areas.");
2276}
2277
2278#endif	/* CONFIG_SMP */
2279
2280/*
2281 * First and reserved chunks are initialized with temporary allocation
2282 * map in initdata so that they can be used before slab is online.
2283 * This function is called after slab is brought up and replaces those
2284 * with properly allocated maps.
 
 
 
 
 
2285 */
2286void __init percpu_init_late(void)
2287{
2288	struct pcpu_chunk *target_chunks[] =
2289		{ pcpu_first_chunk, pcpu_reserved_chunk, NULL };
2290	struct pcpu_chunk *chunk;
2291	unsigned long flags;
2292	int i;
2293
2294	for (i = 0; (chunk = target_chunks[i]); i++) {
2295		int *map;
2296		const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
2297
2298		BUILD_BUG_ON(size > PAGE_SIZE);
2299
2300		map = pcpu_mem_zalloc(size);
2301		BUG_ON(!map);
2302
2303		spin_lock_irqsave(&pcpu_lock, flags);
2304		memcpy(map, chunk->map, size);
2305		chunk->map = map;
2306		spin_unlock_irqrestore(&pcpu_lock, flags);
2307	}
2308}
2309
2310/*
2311 * Percpu allocator is initialized early during boot when neither slab or
2312 * workqueue is available.  Plug async management until everything is up
2313 * and running.
2314 */
2315static int __init percpu_enable_async(void)
2316{
2317	pcpu_async_enabled = true;
2318	return 0;
2319}
2320subsys_initcall(percpu_enable_async);