Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * mm/percpu.c - percpu memory allocator
   4 *
   5 * Copyright (C) 2009		SUSE Linux Products GmbH
   6 * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
   7 *
   8 * Copyright (C) 2017		Facebook Inc.
   9 * Copyright (C) 2017		Dennis Zhou <dennis@kernel.org>
  10 *
  11 * The percpu allocator handles both static and dynamic areas.  Percpu
  12 * areas are allocated in chunks which are divided into units.  There is
  13 * a 1-to-1 mapping for units to possible cpus.  These units are grouped
  14 * based on NUMA properties of the machine.
  15 *
  16 *  c0                           c1                         c2
  17 *  -------------------          -------------------        ------------
  18 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
  19 *  -------------------  ......  -------------------  ....  ------------
  20 *
  21 * Allocation is done by offsets into a unit's address space.  Ie., an
  22 * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
  23 * c1:u1, c1:u2, etc.  On NUMA machines, the mapping may be non-linear
  24 * and even sparse.  Access is handled by configuring percpu base
  25 * registers according to the cpu to unit mappings and offsetting the
  26 * base address using pcpu_unit_size.
  27 *
  28 * There is special consideration for the first chunk which must handle
  29 * the static percpu variables in the kernel image as allocation services
  30 * are not online yet.  In short, the first chunk is structured like so:
  31 *
  32 *                  <Static | [Reserved] | Dynamic>
  33 *
  34 * The static data is copied from the original section managed by the
  35 * linker.  The reserved section, if non-zero, primarily manages static
  36 * percpu variables from kernel modules.  Finally, the dynamic section
  37 * takes care of normal allocations.
  38 *
  39 * The allocator organizes chunks into lists according to free size and
  40 * memcg-awareness.  To make a percpu allocation memcg-aware the __GFP_ACCOUNT
  41 * flag should be passed.  All memcg-aware allocations are sharing one set
  42 * of chunks and all unaccounted allocations and allocations performed
  43 * by processes belonging to the root memory cgroup are using the second set.
  44 *
  45 * The allocator tries to allocate from the fullest chunk first. Each chunk
  46 * is managed by a bitmap with metadata blocks.  The allocation map is updated
  47 * on every allocation and free to reflect the current state while the boundary
  48 * map is only updated on allocation.  Each metadata block contains
  49 * information to help mitigate the need to iterate over large portions
  50 * of the bitmap.  The reverse mapping from page to chunk is stored in
  51 * the page's index.  Lastly, units are lazily backed and grow in unison.
  52 *
  53 * There is a unique conversion that goes on here between bytes and bits.
  54 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE.  The chunk
  55 * tracks the number of pages it is responsible for in nr_pages.  Helper
  56 * functions are used to convert from between the bytes, bits, and blocks.
  57 * All hints are managed in bits unless explicitly stated.
  58 *
  59 * To use this allocator, arch code should do the following:
  60 *
  61 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
  62 *   regular address to percpu pointer and back if they need to be
  63 *   different from the default
  64 *
  65 * - use pcpu_setup_first_chunk() during percpu area initialization to
  66 *   setup the first chunk containing the kernel static percpu area
  67 */
  68
  69#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  70
  71#include <linux/bitmap.h>
  72#include <linux/cpumask.h>
  73#include <linux/memblock.h>
  74#include <linux/err.h>
 
  75#include <linux/list.h>
  76#include <linux/log2.h>
  77#include <linux/mm.h>
  78#include <linux/module.h>
  79#include <linux/mutex.h>
  80#include <linux/percpu.h>
  81#include <linux/pfn.h>
  82#include <linux/slab.h>
  83#include <linux/spinlock.h>
  84#include <linux/vmalloc.h>
  85#include <linux/workqueue.h>
  86#include <linux/kmemleak.h>
  87#include <linux/sched.h>
  88#include <linux/sched/mm.h>
  89#include <linux/memcontrol.h>
  90
  91#include <asm/cacheflush.h>
  92#include <asm/sections.h>
  93#include <asm/tlbflush.h>
  94#include <asm/io.h>
  95
  96#define CREATE_TRACE_POINTS
  97#include <trace/events/percpu.h>
  98
  99#include "percpu-internal.h"
 100
 101/*
 102 * The slots are sorted by the size of the biggest continuous free area.
 103 * 1-31 bytes share the same slot.
 104 */
 105#define PCPU_SLOT_BASE_SHIFT		5
 106/* chunks in slots below this are subject to being sidelined on failed alloc */
 107#define PCPU_SLOT_FAIL_THRESHOLD	3
 108
 109#define PCPU_EMPTY_POP_PAGES_LOW	2
 110#define PCPU_EMPTY_POP_PAGES_HIGH	4
 111
 112#ifdef CONFIG_SMP
 113/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
 114#ifndef __addr_to_pcpu_ptr
 115#define __addr_to_pcpu_ptr(addr)					\
 116	(void __percpu *)((unsigned long)(addr) -			\
 117			  (unsigned long)pcpu_base_addr	+		\
 118			  (unsigned long)__per_cpu_start)
 119#endif
 120#ifndef __pcpu_ptr_to_addr
 121#define __pcpu_ptr_to_addr(ptr)						\
 122	(void __force *)((unsigned long)(ptr) +				\
 123			 (unsigned long)pcpu_base_addr -		\
 124			 (unsigned long)__per_cpu_start)
 125#endif
 126#else	/* CONFIG_SMP */
 127/* on UP, it's always identity mapped */
 128#define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
 129#define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
 130#endif	/* CONFIG_SMP */
 131
 132static int pcpu_unit_pages __ro_after_init;
 133static int pcpu_unit_size __ro_after_init;
 134static int pcpu_nr_units __ro_after_init;
 135static int pcpu_atom_size __ro_after_init;
 136int pcpu_nr_slots __ro_after_init;
 137static int pcpu_free_slot __ro_after_init;
 138int pcpu_sidelined_slot __ro_after_init;
 139int pcpu_to_depopulate_slot __ro_after_init;
 140static size_t pcpu_chunk_struct_size __ro_after_init;
 141
 142/* cpus with the lowest and highest unit addresses */
 143static unsigned int pcpu_low_unit_cpu __ro_after_init;
 144static unsigned int pcpu_high_unit_cpu __ro_after_init;
 145
 146/* the address of the first chunk which starts with the kernel static area */
 147void *pcpu_base_addr __ro_after_init;
 
 148
 149static const int *pcpu_unit_map __ro_after_init;		/* cpu -> unit */
 150const unsigned long *pcpu_unit_offsets __ro_after_init;	/* cpu -> unit offset */
 151
 152/* group information, used for vm allocation */
 153static int pcpu_nr_groups __ro_after_init;
 154static const unsigned long *pcpu_group_offsets __ro_after_init;
 155static const size_t *pcpu_group_sizes __ro_after_init;
 156
 157/*
 158 * The first chunk which always exists.  Note that unlike other
 159 * chunks, this one can be allocated and mapped in several different
 160 * ways and thus often doesn't live in the vmalloc area.
 161 */
 162struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
 163
 164/*
 165 * Optional reserved chunk.  This chunk reserves part of the first
 166 * chunk and serves it for reserved allocations.  When the reserved
 167 * region doesn't exist, the following variable is NULL.
 168 */
 169struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
 170
 171DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
 172static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
 173
 174struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
 175
 
 
 
 176/*
 177 * The number of empty populated pages, protected by pcpu_lock.
 178 * The reserved chunk doesn't contribute to the count.
 179 */
 180int pcpu_nr_empty_pop_pages;
 181
 182/*
 183 * The number of populated pages in use by the allocator, protected by
 184 * pcpu_lock.  This number is kept per a unit per chunk (i.e. when a page gets
 185 * allocated/deallocated, it is allocated/deallocated in all units of a chunk
 186 * and increments/decrements this count by 1).
 187 */
 188static unsigned long pcpu_nr_populated;
 189
 190/*
 191 * Balance work is used to populate or destroy chunks asynchronously.  We
 192 * try to keep the number of populated free pages between
 193 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
 194 * empty chunk.
 195 */
 196static void pcpu_balance_workfn(struct work_struct *work);
 197static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
 198static bool pcpu_async_enabled __read_mostly;
 199static bool pcpu_atomic_alloc_failed;
 200
 201static void pcpu_schedule_balance_work(void)
 202{
 203	if (pcpu_async_enabled)
 204		schedule_work(&pcpu_balance_work);
 205}
 206
 207/**
 208 * pcpu_addr_in_chunk - check if the address is served from this chunk
 209 * @chunk: chunk of interest
 210 * @addr: percpu address
 211 *
 212 * RETURNS:
 213 * True if the address is served from this chunk.
 214 */
 215static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
 216{
 217	void *start_addr, *end_addr;
 218
 219	if (!chunk)
 220		return false;
 221
 222	start_addr = chunk->base_addr + chunk->start_offset;
 223	end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
 224		   chunk->end_offset;
 225
 226	return addr >= start_addr && addr < end_addr;
 227}
 228
 229static int __pcpu_size_to_slot(int size)
 230{
 231	int highbit = fls(size);	/* size is in bytes */
 232	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
 233}
 234
 235static int pcpu_size_to_slot(int size)
 236{
 237	if (size == pcpu_unit_size)
 238		return pcpu_free_slot;
 239	return __pcpu_size_to_slot(size);
 240}
 241
 242static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
 243{
 244	const struct pcpu_block_md *chunk_md = &chunk->chunk_md;
 245
 246	if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE ||
 247	    chunk_md->contig_hint == 0)
 248		return 0;
 249
 250	return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);
 251}
 252
 253/* set the pointer to a chunk in a page struct */
 254static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
 255{
 256	page->index = (unsigned long)pcpu;
 257}
 258
 259/* obtain pointer to a chunk from a page struct */
 260static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
 261{
 262	return (struct pcpu_chunk *)page->index;
 263}
 264
 265static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
 266{
 267	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
 268}
 269
 270static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
 271{
 272	return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
 273}
 274
 275static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
 276				     unsigned int cpu, int page_idx)
 277{
 278	return (unsigned long)chunk->base_addr +
 279	       pcpu_unit_page_offset(cpu, page_idx);
 280}
 281
 282/*
 283 * The following are helper functions to help access bitmaps and convert
 284 * between bitmap offsets to address offsets.
 285 */
 286static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
 287{
 288	return chunk->alloc_map +
 289	       (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
 290}
 291
 292static unsigned long pcpu_off_to_block_index(int off)
 293{
 294	return off / PCPU_BITMAP_BLOCK_BITS;
 295}
 296
 297static unsigned long pcpu_off_to_block_off(int off)
 298{
 299	return off & (PCPU_BITMAP_BLOCK_BITS - 1);
 300}
 301
 302static unsigned long pcpu_block_off_to_off(int index, int off)
 303{
 304	return index * PCPU_BITMAP_BLOCK_BITS + off;
 305}
 306
 307/**
 308 * pcpu_check_block_hint - check against the contig hint
 309 * @block: block of interest
 310 * @bits: size of allocation
 311 * @align: alignment of area (max PAGE_SIZE)
 312 *
 313 * Check to see if the allocation can fit in the block's contig hint.
 314 * Note, a chunk uses the same hints as a block so this can also check against
 315 * the chunk's contig hint.
 316 */
 317static bool pcpu_check_block_hint(struct pcpu_block_md *block, int bits,
 318				  size_t align)
 319{
 320	int bit_off = ALIGN(block->contig_hint_start, align) -
 321		block->contig_hint_start;
 322
 323	return bit_off + bits <= block->contig_hint;
 324}
 325
 326/*
 327 * pcpu_next_hint - determine which hint to use
 328 * @block: block of interest
 329 * @alloc_bits: size of allocation
 330 *
 331 * This determines if we should scan based on the scan_hint or first_free.
 332 * In general, we want to scan from first_free to fulfill allocations by
 333 * first fit.  However, if we know a scan_hint at position scan_hint_start
 334 * cannot fulfill an allocation, we can begin scanning from there knowing
 335 * the contig_hint will be our fallback.
 336 */
 337static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
 338{
 339	/*
 340	 * The three conditions below determine if we can skip past the
 341	 * scan_hint.  First, does the scan hint exist.  Second, is the
 342	 * contig_hint after the scan_hint (possibly not true iff
 343	 * contig_hint == scan_hint).  Third, is the allocation request
 344	 * larger than the scan_hint.
 345	 */
 346	if (block->scan_hint &&
 347	    block->contig_hint_start > block->scan_hint_start &&
 348	    alloc_bits > block->scan_hint)
 349		return block->scan_hint_start + block->scan_hint;
 350
 351	return block->first_free;
 352}
 353
 354/**
 355 * pcpu_next_md_free_region - finds the next hint free area
 356 * @chunk: chunk of interest
 357 * @bit_off: chunk offset
 358 * @bits: size of free area
 359 *
 360 * Helper function for pcpu_for_each_md_free_region.  It checks
 361 * block->contig_hint and performs aggregation across blocks to find the
 362 * next hint.  It modifies bit_off and bits in-place to be consumed in the
 363 * loop.
 364 */
 365static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
 366				     int *bits)
 367{
 368	int i = pcpu_off_to_block_index(*bit_off);
 369	int block_off = pcpu_off_to_block_off(*bit_off);
 370	struct pcpu_block_md *block;
 371
 372	*bits = 0;
 373	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
 374	     block++, i++) {
 375		/* handles contig area across blocks */
 376		if (*bits) {
 377			*bits += block->left_free;
 378			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
 379				continue;
 380			return;
 381		}
 382
 383		/*
 384		 * This checks three things.  First is there a contig_hint to
 385		 * check.  Second, have we checked this hint before by
 386		 * comparing the block_off.  Third, is this the same as the
 387		 * right contig hint.  In the last case, it spills over into
 388		 * the next block and should be handled by the contig area
 389		 * across blocks code.
 390		 */
 391		*bits = block->contig_hint;
 392		if (*bits && block->contig_hint_start >= block_off &&
 393		    *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
 394			*bit_off = pcpu_block_off_to_off(i,
 395					block->contig_hint_start);
 396			return;
 397		}
 398		/* reset to satisfy the second predicate above */
 399		block_off = 0;
 400
 401		*bits = block->right_free;
 402		*bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
 403	}
 404}
 405
 406/**
 407 * pcpu_next_fit_region - finds fit areas for a given allocation request
 408 * @chunk: chunk of interest
 409 * @alloc_bits: size of allocation
 410 * @align: alignment of area (max PAGE_SIZE)
 411 * @bit_off: chunk offset
 412 * @bits: size of free area
 413 *
 414 * Finds the next free region that is viable for use with a given size and
 415 * alignment.  This only returns if there is a valid area to be used for this
 416 * allocation.  block->first_free is returned if the allocation request fits
 417 * within the block to see if the request can be fulfilled prior to the contig
 418 * hint.
 419 */
 420static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
 421				 int align, int *bit_off, int *bits)
 422{
 423	int i = pcpu_off_to_block_index(*bit_off);
 424	int block_off = pcpu_off_to_block_off(*bit_off);
 425	struct pcpu_block_md *block;
 426
 427	*bits = 0;
 428	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
 429	     block++, i++) {
 430		/* handles contig area across blocks */
 431		if (*bits) {
 432			*bits += block->left_free;
 433			if (*bits >= alloc_bits)
 434				return;
 435			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
 436				continue;
 437		}
 438
 439		/* check block->contig_hint */
 440		*bits = ALIGN(block->contig_hint_start, align) -
 441			block->contig_hint_start;
 442		/*
 443		 * This uses the block offset to determine if this has been
 444		 * checked in the prior iteration.
 445		 */
 446		if (block->contig_hint &&
 447		    block->contig_hint_start >= block_off &&
 448		    block->contig_hint >= *bits + alloc_bits) {
 449			int start = pcpu_next_hint(block, alloc_bits);
 450
 451			*bits += alloc_bits + block->contig_hint_start -
 452				 start;
 453			*bit_off = pcpu_block_off_to_off(i, start);
 454			return;
 455		}
 456		/* reset to satisfy the second predicate above */
 457		block_off = 0;
 458
 459		*bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
 460				 align);
 461		*bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
 462		*bit_off = pcpu_block_off_to_off(i, *bit_off);
 463		if (*bits >= alloc_bits)
 464			return;
 465	}
 466
 467	/* no valid offsets were found - fail condition */
 468	*bit_off = pcpu_chunk_map_bits(chunk);
 469}
 470
 471/*
 472 * Metadata free area iterators.  These perform aggregation of free areas
 473 * based on the metadata blocks and return the offset @bit_off and size in
 474 * bits of the free area @bits.  pcpu_for_each_fit_region only returns when
 475 * a fit is found for the allocation request.
 476 */
 477#define pcpu_for_each_md_free_region(chunk, bit_off, bits)		\
 478	for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits));	\
 479	     (bit_off) < pcpu_chunk_map_bits((chunk));			\
 480	     (bit_off) += (bits) + 1,					\
 481	     pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
 482
 483#define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits)     \
 484	for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
 485				  &(bits));				      \
 486	     (bit_off) < pcpu_chunk_map_bits((chunk));			      \
 487	     (bit_off) += (bits),					      \
 488	     pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
 489				  &(bits)))
 490
 491/**
 492 * pcpu_mem_zalloc - allocate memory
 493 * @size: bytes to allocate
 494 * @gfp: allocation flags
 495 *
 496 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
 497 * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
 498 * This is to facilitate passing through whitelisted flags.  The
 499 * returned memory is always zeroed.
 500 *
 501 * RETURNS:
 502 * Pointer to the allocated area on success, NULL on failure.
 503 */
 504static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
 505{
 506	if (WARN_ON_ONCE(!slab_is_available()))
 507		return NULL;
 508
 509	if (size <= PAGE_SIZE)
 510		return kzalloc(size, gfp);
 511	else
 512		return __vmalloc(size, gfp | __GFP_ZERO);
 513}
 514
 515/**
 516 * pcpu_mem_free - free memory
 517 * @ptr: memory to free
 518 *
 519 * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
 520 */
 521static void pcpu_mem_free(void *ptr)
 522{
 523	kvfree(ptr);
 524}
 525
 526static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
 527			      bool move_front)
 528{
 529	if (chunk != pcpu_reserved_chunk) {
 530		if (move_front)
 531			list_move(&chunk->list, &pcpu_chunk_lists[slot]);
 532		else
 533			list_move_tail(&chunk->list, &pcpu_chunk_lists[slot]);
 534	}
 535}
 536
 537static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
 538{
 539	__pcpu_chunk_move(chunk, slot, true);
 540}
 541
 542/**
 543 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
 544 * @chunk: chunk of interest
 545 * @oslot: the previous slot it was on
 546 *
 547 * This function is called after an allocation or free changed @chunk.
 548 * New slot according to the changed state is determined and @chunk is
 549 * moved to the slot.  Note that the reserved chunk is never put on
 550 * chunk slots.
 551 *
 552 * CONTEXT:
 553 * pcpu_lock.
 554 */
 555static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
 556{
 557	int nslot = pcpu_chunk_slot(chunk);
 558
 559	/* leave isolated chunks in-place */
 560	if (chunk->isolated)
 561		return;
 562
 563	if (oslot != nslot)
 564		__pcpu_chunk_move(chunk, nslot, oslot < nslot);
 565}
 566
 567static void pcpu_isolate_chunk(struct pcpu_chunk *chunk)
 568{
 569	lockdep_assert_held(&pcpu_lock);
 570
 571	if (!chunk->isolated) {
 572		chunk->isolated = true;
 573		pcpu_nr_empty_pop_pages -= chunk->nr_empty_pop_pages;
 574	}
 575	list_move(&chunk->list, &pcpu_chunk_lists[pcpu_to_depopulate_slot]);
 576}
 577
 578static void pcpu_reintegrate_chunk(struct pcpu_chunk *chunk)
 579{
 580	lockdep_assert_held(&pcpu_lock);
 581
 582	if (chunk->isolated) {
 583		chunk->isolated = false;
 584		pcpu_nr_empty_pop_pages += chunk->nr_empty_pop_pages;
 585		pcpu_chunk_relocate(chunk, -1);
 586	}
 587}
 588
 589/*
 590 * pcpu_update_empty_pages - update empty page counters
 591 * @chunk: chunk of interest
 592 * @nr: nr of empty pages
 593 *
 594 * This is used to keep track of the empty pages now based on the premise
 595 * a md_block covers a page.  The hint update functions recognize if a block
 596 * is made full or broken to calculate deltas for keeping track of free pages.
 597 */
 598static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
 599{
 600	chunk->nr_empty_pop_pages += nr;
 601	if (chunk != pcpu_reserved_chunk && !chunk->isolated)
 602		pcpu_nr_empty_pop_pages += nr;
 603}
 604
 605/*
 606 * pcpu_region_overlap - determines if two regions overlap
 607 * @a: start of first region, inclusive
 608 * @b: end of first region, exclusive
 609 * @x: start of second region, inclusive
 610 * @y: end of second region, exclusive
 611 *
 612 * This is used to determine if the hint region [a, b) overlaps with the
 613 * allocated region [x, y).
 614 */
 615static inline bool pcpu_region_overlap(int a, int b, int x, int y)
 616{
 617	return (a < y) && (x < b);
 618}
 619
 620/**
 621 * pcpu_block_update - updates a block given a free area
 622 * @block: block of interest
 623 * @start: start offset in block
 624 * @end: end offset in block
 625 *
 626 * Updates a block given a known free area.  The region [start, end) is
 627 * expected to be the entirety of the free area within a block.  Chooses
 628 * the best starting offset if the contig hints are equal.
 629 */
 630static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
 631{
 632	int contig = end - start;
 633
 634	block->first_free = min(block->first_free, start);
 635	if (start == 0)
 636		block->left_free = contig;
 637
 638	if (end == block->nr_bits)
 639		block->right_free = contig;
 640
 641	if (contig > block->contig_hint) {
 642		/* promote the old contig_hint to be the new scan_hint */
 643		if (start > block->contig_hint_start) {
 644			if (block->contig_hint > block->scan_hint) {
 645				block->scan_hint_start =
 646					block->contig_hint_start;
 647				block->scan_hint = block->contig_hint;
 648			} else if (start < block->scan_hint_start) {
 649				/*
 650				 * The old contig_hint == scan_hint.  But, the
 651				 * new contig is larger so hold the invariant
 652				 * scan_hint_start < contig_hint_start.
 653				 */
 654				block->scan_hint = 0;
 655			}
 656		} else {
 657			block->scan_hint = 0;
 658		}
 659		block->contig_hint_start = start;
 660		block->contig_hint = contig;
 661	} else if (contig == block->contig_hint) {
 662		if (block->contig_hint_start &&
 663		    (!start ||
 664		     __ffs(start) > __ffs(block->contig_hint_start))) {
 665			/* start has a better alignment so use it */
 666			block->contig_hint_start = start;
 667			if (start < block->scan_hint_start &&
 668			    block->contig_hint > block->scan_hint)
 669				block->scan_hint = 0;
 670		} else if (start > block->scan_hint_start ||
 671			   block->contig_hint > block->scan_hint) {
 672			/*
 673			 * Knowing contig == contig_hint, update the scan_hint
 674			 * if it is farther than or larger than the current
 675			 * scan_hint.
 676			 */
 677			block->scan_hint_start = start;
 678			block->scan_hint = contig;
 679		}
 680	} else {
 681		/*
 682		 * The region is smaller than the contig_hint.  So only update
 683		 * the scan_hint if it is larger than or equal and farther than
 684		 * the current scan_hint.
 685		 */
 686		if ((start < block->contig_hint_start &&
 687		     (contig > block->scan_hint ||
 688		      (contig == block->scan_hint &&
 689		       start > block->scan_hint_start)))) {
 690			block->scan_hint_start = start;
 691			block->scan_hint = contig;
 692		}
 693	}
 694}
 695
 696/*
 697 * pcpu_block_update_scan - update a block given a free area from a scan
 698 * @chunk: chunk of interest
 699 * @bit_off: chunk offset
 700 * @bits: size of free area
 701 *
 702 * Finding the final allocation spot first goes through pcpu_find_block_fit()
 703 * to find a block that can hold the allocation and then pcpu_alloc_area()
 704 * where a scan is used.  When allocations require specific alignments,
 705 * we can inadvertently create holes which will not be seen in the alloc
 706 * or free paths.
 707 *
 708 * This takes a given free area hole and updates a block as it may change the
 709 * scan_hint.  We need to scan backwards to ensure we don't miss free bits
 710 * from alignment.
 711 */
 712static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off,
 713				   int bits)
 714{
 715	int s_off = pcpu_off_to_block_off(bit_off);
 716	int e_off = s_off + bits;
 717	int s_index, l_bit;
 718	struct pcpu_block_md *block;
 719
 720	if (e_off > PCPU_BITMAP_BLOCK_BITS)
 721		return;
 722
 723	s_index = pcpu_off_to_block_index(bit_off);
 724	block = chunk->md_blocks + s_index;
 725
 726	/* scan backwards in case of alignment skipping free bits */
 727	l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off);
 728	s_off = (s_off == l_bit) ? 0 : l_bit + 1;
 729
 730	pcpu_block_update(block, s_off, e_off);
 731}
 732
 733/**
 734 * pcpu_chunk_refresh_hint - updates metadata about a chunk
 735 * @chunk: chunk of interest
 736 * @full_scan: if we should scan from the beginning
 737 *
 738 * Iterates over the metadata blocks to find the largest contig area.
 739 * A full scan can be avoided on the allocation path as this is triggered
 740 * if we broke the contig_hint.  In doing so, the scan_hint will be before
 741 * the contig_hint or after if the scan_hint == contig_hint.  This cannot
 742 * be prevented on freeing as we want to find the largest area possibly
 743 * spanning blocks.
 744 */
 745static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
 746{
 747	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
 748	int bit_off, bits;
 749
 750	/* promote scan_hint to contig_hint */
 751	if (!full_scan && chunk_md->scan_hint) {
 752		bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint;
 753		chunk_md->contig_hint_start = chunk_md->scan_hint_start;
 754		chunk_md->contig_hint = chunk_md->scan_hint;
 755		chunk_md->scan_hint = 0;
 756	} else {
 757		bit_off = chunk_md->first_free;
 758		chunk_md->contig_hint = 0;
 759	}
 760
 761	bits = 0;
 762	pcpu_for_each_md_free_region(chunk, bit_off, bits)
 763		pcpu_block_update(chunk_md, bit_off, bit_off + bits);
 764}
 765
 766/**
 767 * pcpu_block_refresh_hint
 768 * @chunk: chunk of interest
 769 * @index: index of the metadata block
 770 *
 771 * Scans over the block beginning at first_free and updates the block
 772 * metadata accordingly.
 773 */
 774static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
 775{
 776	struct pcpu_block_md *block = chunk->md_blocks + index;
 777	unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
 778	unsigned int start, end;	/* region start, region end */
 779
 780	/* promote scan_hint to contig_hint */
 781	if (block->scan_hint) {
 782		start = block->scan_hint_start + block->scan_hint;
 783		block->contig_hint_start = block->scan_hint_start;
 784		block->contig_hint = block->scan_hint;
 785		block->scan_hint = 0;
 786	} else {
 787		start = block->first_free;
 788		block->contig_hint = 0;
 789	}
 790
 791	block->right_free = 0;
 792
 793	/* iterate over free areas and update the contig hints */
 794	for_each_clear_bitrange_from(start, end, alloc_map, PCPU_BITMAP_BLOCK_BITS)
 795		pcpu_block_update(block, start, end);
 
 796}
 797
 798/**
 799 * pcpu_block_update_hint_alloc - update hint on allocation path
 800 * @chunk: chunk of interest
 801 * @bit_off: chunk offset
 802 * @bits: size of request
 803 *
 804 * Updates metadata for the allocation path.  The metadata only has to be
 805 * refreshed by a full scan iff the chunk's contig hint is broken.  Block level
 806 * scans are required if the block's contig hint is broken.
 807 */
 808static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
 809					 int bits)
 810{
 811	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
 812	int nr_empty_pages = 0;
 813	struct pcpu_block_md *s_block, *e_block, *block;
 814	int s_index, e_index;	/* block indexes of the freed allocation */
 815	int s_off, e_off;	/* block offsets of the freed allocation */
 816
 817	/*
 818	 * Calculate per block offsets.
 819	 * The calculation uses an inclusive range, but the resulting offsets
 820	 * are [start, end).  e_index always points to the last block in the
 821	 * range.
 822	 */
 823	s_index = pcpu_off_to_block_index(bit_off);
 824	e_index = pcpu_off_to_block_index(bit_off + bits - 1);
 825	s_off = pcpu_off_to_block_off(bit_off);
 826	e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
 827
 828	s_block = chunk->md_blocks + s_index;
 829	e_block = chunk->md_blocks + e_index;
 830
 831	/*
 832	 * Update s_block.
 833	 */
 834	if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
 835		nr_empty_pages++;
 836
 837	/*
 838	 * block->first_free must be updated if the allocation takes its place.
 839	 * If the allocation breaks the contig_hint, a scan is required to
 840	 * restore this hint.
 841	 */
 
 
 
 842	if (s_off == s_block->first_free)
 843		s_block->first_free = find_next_zero_bit(
 844					pcpu_index_alloc_map(chunk, s_index),
 845					PCPU_BITMAP_BLOCK_BITS,
 846					s_off + bits);
 847
 848	if (pcpu_region_overlap(s_block->scan_hint_start,
 849				s_block->scan_hint_start + s_block->scan_hint,
 850				s_off,
 851				s_off + bits))
 852		s_block->scan_hint = 0;
 853
 854	if (pcpu_region_overlap(s_block->contig_hint_start,
 855				s_block->contig_hint_start +
 856				s_block->contig_hint,
 857				s_off,
 858				s_off + bits)) {
 859		/* block contig hint is broken - scan to fix it */
 860		if (!s_off)
 861			s_block->left_free = 0;
 862		pcpu_block_refresh_hint(chunk, s_index);
 863	} else {
 864		/* update left and right contig manually */
 865		s_block->left_free = min(s_block->left_free, s_off);
 866		if (s_index == e_index)
 867			s_block->right_free = min_t(int, s_block->right_free,
 868					PCPU_BITMAP_BLOCK_BITS - e_off);
 869		else
 870			s_block->right_free = 0;
 871	}
 872
 873	/*
 874	 * Update e_block.
 875	 */
 876	if (s_index != e_index) {
 877		if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
 878			nr_empty_pages++;
 879
 880		/*
 881		 * When the allocation is across blocks, the end is along
 882		 * the left part of the e_block.
 883		 */
 884		e_block->first_free = find_next_zero_bit(
 885				pcpu_index_alloc_map(chunk, e_index),
 886				PCPU_BITMAP_BLOCK_BITS, e_off);
 887
 888		if (e_off == PCPU_BITMAP_BLOCK_BITS) {
 889			/* reset the block */
 890			e_block++;
 891		} else {
 892			if (e_off > e_block->scan_hint_start)
 893				e_block->scan_hint = 0;
 894
 895			e_block->left_free = 0;
 896			if (e_off > e_block->contig_hint_start) {
 897				/* contig hint is broken - scan to fix it */
 898				pcpu_block_refresh_hint(chunk, e_index);
 899			} else {
 900				e_block->right_free =
 901					min_t(int, e_block->right_free,
 902					      PCPU_BITMAP_BLOCK_BITS - e_off);
 903			}
 904		}
 905
 906		/* update in-between md_blocks */
 907		nr_empty_pages += (e_index - s_index - 1);
 908		for (block = s_block + 1; block < e_block; block++) {
 909			block->scan_hint = 0;
 910			block->contig_hint = 0;
 911			block->left_free = 0;
 912			block->right_free = 0;
 913		}
 914	}
 915
 916	/*
 917	 * If the allocation is not atomic, some blocks may not be
 918	 * populated with pages, while we account it here.  The number
 919	 * of pages will be added back with pcpu_chunk_populated()
 920	 * when populating pages.
 921	 */
 922	if (nr_empty_pages)
 923		pcpu_update_empty_pages(chunk, -nr_empty_pages);
 924
 925	if (pcpu_region_overlap(chunk_md->scan_hint_start,
 926				chunk_md->scan_hint_start +
 927				chunk_md->scan_hint,
 928				bit_off,
 929				bit_off + bits))
 930		chunk_md->scan_hint = 0;
 931
 932	/*
 933	 * The only time a full chunk scan is required is if the chunk
 934	 * contig hint is broken.  Otherwise, it means a smaller space
 935	 * was used and therefore the chunk contig hint is still correct.
 936	 */
 937	if (pcpu_region_overlap(chunk_md->contig_hint_start,
 938				chunk_md->contig_hint_start +
 939				chunk_md->contig_hint,
 940				bit_off,
 941				bit_off + bits))
 942		pcpu_chunk_refresh_hint(chunk, false);
 943}
 944
 945/**
 946 * pcpu_block_update_hint_free - updates the block hints on the free path
 947 * @chunk: chunk of interest
 948 * @bit_off: chunk offset
 949 * @bits: size of request
 950 *
 951 * Updates metadata for the allocation path.  This avoids a blind block
 952 * refresh by making use of the block contig hints.  If this fails, it scans
 953 * forward and backward to determine the extent of the free area.  This is
 954 * capped at the boundary of blocks.
 955 *
 956 * A chunk update is triggered if a page becomes free, a block becomes free,
 957 * or the free spans across blocks.  This tradeoff is to minimize iterating
 958 * over the block metadata to update chunk_md->contig_hint.
 959 * chunk_md->contig_hint may be off by up to a page, but it will never be more
 960 * than the available space.  If the contig hint is contained in one block, it
 961 * will be accurate.
 962 */
 963static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
 964					int bits)
 965{
 966	int nr_empty_pages = 0;
 967	struct pcpu_block_md *s_block, *e_block, *block;
 968	int s_index, e_index;	/* block indexes of the freed allocation */
 969	int s_off, e_off;	/* block offsets of the freed allocation */
 970	int start, end;		/* start and end of the whole free area */
 971
 972	/*
 973	 * Calculate per block offsets.
 974	 * The calculation uses an inclusive range, but the resulting offsets
 975	 * are [start, end).  e_index always points to the last block in the
 976	 * range.
 977	 */
 978	s_index = pcpu_off_to_block_index(bit_off);
 979	e_index = pcpu_off_to_block_index(bit_off + bits - 1);
 980	s_off = pcpu_off_to_block_off(bit_off);
 981	e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
 982
 983	s_block = chunk->md_blocks + s_index;
 984	e_block = chunk->md_blocks + e_index;
 985
 986	/*
 987	 * Check if the freed area aligns with the block->contig_hint.
 988	 * If it does, then the scan to find the beginning/end of the
 989	 * larger free area can be avoided.
 990	 *
 991	 * start and end refer to beginning and end of the free area
 992	 * within each their respective blocks.  This is not necessarily
 993	 * the entire free area as it may span blocks past the beginning
 994	 * or end of the block.
 995	 */
 996	start = s_off;
 997	if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
 998		start = s_block->contig_hint_start;
 999	} else {
1000		/*
1001		 * Scan backwards to find the extent of the free area.
1002		 * find_last_bit returns the starting bit, so if the start bit
1003		 * is returned, that means there was no last bit and the
1004		 * remainder of the chunk is free.
1005		 */
1006		int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
1007					  start);
1008		start = (start == l_bit) ? 0 : l_bit + 1;
1009	}
1010
1011	end = e_off;
1012	if (e_off == e_block->contig_hint_start)
1013		end = e_block->contig_hint_start + e_block->contig_hint;
1014	else
1015		end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
1016				    PCPU_BITMAP_BLOCK_BITS, end);
1017
1018	/* update s_block */
1019	e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
1020	if (!start && e_off == PCPU_BITMAP_BLOCK_BITS)
1021		nr_empty_pages++;
1022	pcpu_block_update(s_block, start, e_off);
1023
1024	/* freeing in the same block */
1025	if (s_index != e_index) {
1026		/* update e_block */
1027		if (end == PCPU_BITMAP_BLOCK_BITS)
1028			nr_empty_pages++;
1029		pcpu_block_update(e_block, 0, end);
1030
1031		/* reset md_blocks in the middle */
1032		nr_empty_pages += (e_index - s_index - 1);
1033		for (block = s_block + 1; block < e_block; block++) {
1034			block->first_free = 0;
1035			block->scan_hint = 0;
1036			block->contig_hint_start = 0;
1037			block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
1038			block->left_free = PCPU_BITMAP_BLOCK_BITS;
1039			block->right_free = PCPU_BITMAP_BLOCK_BITS;
1040		}
1041	}
1042
1043	if (nr_empty_pages)
1044		pcpu_update_empty_pages(chunk, nr_empty_pages);
1045
1046	/*
1047	 * Refresh chunk metadata when the free makes a block free or spans
1048	 * across blocks.  The contig_hint may be off by up to a page, but if
1049	 * the contig_hint is contained in a block, it will be accurate with
1050	 * the else condition below.
1051	 */
1052	if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index)
1053		pcpu_chunk_refresh_hint(chunk, true);
1054	else
1055		pcpu_block_update(&chunk->chunk_md,
1056				  pcpu_block_off_to_off(s_index, start),
1057				  end);
1058}
1059
1060/**
1061 * pcpu_is_populated - determines if the region is populated
1062 * @chunk: chunk of interest
1063 * @bit_off: chunk offset
1064 * @bits: size of area
1065 * @next_off: return value for the next offset to start searching
1066 *
1067 * For atomic allocations, check if the backing pages are populated.
1068 *
1069 * RETURNS:
1070 * Bool if the backing pages are populated.
1071 * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
1072 */
1073static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
1074			      int *next_off)
1075{
1076	unsigned int start, end;
1077
1078	start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
1079	end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
1080
1081	start = find_next_zero_bit(chunk->populated, end, start);
1082	if (start >= end)
 
1083		return true;
1084
1085	end = find_next_bit(chunk->populated, end, start + 1);
1086
1087	*next_off = end * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
1088	return false;
1089}
1090
1091/**
1092 * pcpu_find_block_fit - finds the block index to start searching
1093 * @chunk: chunk of interest
1094 * @alloc_bits: size of request in allocation units
1095 * @align: alignment of area (max PAGE_SIZE bytes)
1096 * @pop_only: use populated regions only
1097 *
1098 * Given a chunk and an allocation spec, find the offset to begin searching
1099 * for a free region.  This iterates over the bitmap metadata blocks to
1100 * find an offset that will be guaranteed to fit the requirements.  It is
1101 * not quite first fit as if the allocation does not fit in the contig hint
1102 * of a block or chunk, it is skipped.  This errs on the side of caution
1103 * to prevent excess iteration.  Poor alignment can cause the allocator to
1104 * skip over blocks and chunks that have valid free areas.
1105 *
1106 * RETURNS:
1107 * The offset in the bitmap to begin searching.
1108 * -1 if no offset is found.
1109 */
1110static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
1111			       size_t align, bool pop_only)
1112{
1113	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1114	int bit_off, bits, next_off;
1115
1116	/*
1117	 * This is an optimization to prevent scanning by assuming if the
1118	 * allocation cannot fit in the global hint, there is memory pressure
1119	 * and creating a new chunk would happen soon.
1120	 */
1121	if (!pcpu_check_block_hint(chunk_md, alloc_bits, align))
1122		return -1;
1123
1124	bit_off = pcpu_next_hint(chunk_md, alloc_bits);
1125	bits = 0;
1126	pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
1127		if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
1128						   &next_off))
1129			break;
1130
1131		bit_off = next_off;
1132		bits = 0;
1133	}
1134
1135	if (bit_off == pcpu_chunk_map_bits(chunk))
1136		return -1;
1137
1138	return bit_off;
1139}
1140
1141/*
1142 * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
1143 * @map: the address to base the search on
1144 * @size: the bitmap size in bits
1145 * @start: the bitnumber to start searching at
1146 * @nr: the number of zeroed bits we're looking for
1147 * @align_mask: alignment mask for zero area
1148 * @largest_off: offset of the largest area skipped
1149 * @largest_bits: size of the largest area skipped
1150 *
1151 * The @align_mask should be one less than a power of 2.
1152 *
1153 * This is a modified version of bitmap_find_next_zero_area_off() to remember
1154 * the largest area that was skipped.  This is imperfect, but in general is
1155 * good enough.  The largest remembered region is the largest failed region
1156 * seen.  This does not include anything we possibly skipped due to alignment.
1157 * pcpu_block_update_scan() does scan backwards to try and recover what was
1158 * lost to alignment.  While this can cause scanning to miss earlier possible
1159 * free areas, smaller allocations will eventually fill those holes.
1160 */
1161static unsigned long pcpu_find_zero_area(unsigned long *map,
1162					 unsigned long size,
1163					 unsigned long start,
1164					 unsigned long nr,
1165					 unsigned long align_mask,
1166					 unsigned long *largest_off,
1167					 unsigned long *largest_bits)
1168{
1169	unsigned long index, end, i, area_off, area_bits;
1170again:
1171	index = find_next_zero_bit(map, size, start);
1172
1173	/* Align allocation */
1174	index = __ALIGN_MASK(index, align_mask);
1175	area_off = index;
1176
1177	end = index + nr;
1178	if (end > size)
1179		return end;
1180	i = find_next_bit(map, end, index);
1181	if (i < end) {
1182		area_bits = i - area_off;
1183		/* remember largest unused area with best alignment */
1184		if (area_bits > *largest_bits ||
1185		    (area_bits == *largest_bits && *largest_off &&
1186		     (!area_off || __ffs(area_off) > __ffs(*largest_off)))) {
1187			*largest_off = area_off;
1188			*largest_bits = area_bits;
1189		}
1190
1191		start = i + 1;
1192		goto again;
1193	}
1194	return index;
1195}
1196
1197/**
1198 * pcpu_alloc_area - allocates an area from a pcpu_chunk
1199 * @chunk: chunk of interest
1200 * @alloc_bits: size of request in allocation units
1201 * @align: alignment of area (max PAGE_SIZE)
1202 * @start: bit_off to start searching
1203 *
1204 * This function takes in a @start offset to begin searching to fit an
1205 * allocation of @alloc_bits with alignment @align.  It needs to scan
1206 * the allocation map because if it fits within the block's contig hint,
1207 * @start will be block->first_free. This is an attempt to fill the
1208 * allocation prior to breaking the contig hint.  The allocation and
1209 * boundary maps are updated accordingly if it confirms a valid
1210 * free area.
1211 *
1212 * RETURNS:
1213 * Allocated addr offset in @chunk on success.
1214 * -1 if no matching area is found.
1215 */
1216static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
1217			   size_t align, int start)
1218{
1219	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1220	size_t align_mask = (align) ? (align - 1) : 0;
1221	unsigned long area_off = 0, area_bits = 0;
1222	int bit_off, end, oslot;
1223
1224	lockdep_assert_held(&pcpu_lock);
1225
1226	oslot = pcpu_chunk_slot(chunk);
1227
1228	/*
1229	 * Search to find a fit.
1230	 */
1231	end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
1232		    pcpu_chunk_map_bits(chunk));
1233	bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
1234				      align_mask, &area_off, &area_bits);
1235	if (bit_off >= end)
1236		return -1;
1237
1238	if (area_bits)
1239		pcpu_block_update_scan(chunk, area_off, area_bits);
1240
1241	/* update alloc map */
1242	bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
1243
1244	/* update boundary map */
1245	set_bit(bit_off, chunk->bound_map);
1246	bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
1247	set_bit(bit_off + alloc_bits, chunk->bound_map);
1248
1249	chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
1250
1251	/* update first free bit */
1252	if (bit_off == chunk_md->first_free)
1253		chunk_md->first_free = find_next_zero_bit(
1254					chunk->alloc_map,
1255					pcpu_chunk_map_bits(chunk),
1256					bit_off + alloc_bits);
1257
1258	pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
1259
1260	pcpu_chunk_relocate(chunk, oslot);
1261
1262	return bit_off * PCPU_MIN_ALLOC_SIZE;
1263}
1264
1265/**
1266 * pcpu_free_area - frees the corresponding offset
1267 * @chunk: chunk of interest
1268 * @off: addr offset into chunk
1269 *
1270 * This function determines the size of an allocation to free using
1271 * the boundary bitmap and clears the allocation map.
1272 *
1273 * RETURNS:
1274 * Number of freed bytes.
1275 */
1276static int pcpu_free_area(struct pcpu_chunk *chunk, int off)
1277{
1278	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1279	int bit_off, bits, end, oslot, freed;
1280
1281	lockdep_assert_held(&pcpu_lock);
1282	pcpu_stats_area_dealloc(chunk);
1283
1284	oslot = pcpu_chunk_slot(chunk);
1285
1286	bit_off = off / PCPU_MIN_ALLOC_SIZE;
1287
1288	/* find end index */
1289	end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
1290			    bit_off + 1);
1291	bits = end - bit_off;
1292	bitmap_clear(chunk->alloc_map, bit_off, bits);
1293
1294	freed = bits * PCPU_MIN_ALLOC_SIZE;
1295
1296	/* update metadata */
1297	chunk->free_bytes += freed;
1298
1299	/* update first free bit */
1300	chunk_md->first_free = min(chunk_md->first_free, bit_off);
1301
1302	pcpu_block_update_hint_free(chunk, bit_off, bits);
1303
1304	pcpu_chunk_relocate(chunk, oslot);
1305
1306	return freed;
1307}
1308
1309static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
1310{
1311	block->scan_hint = 0;
1312	block->contig_hint = nr_bits;
1313	block->left_free = nr_bits;
1314	block->right_free = nr_bits;
1315	block->first_free = 0;
1316	block->nr_bits = nr_bits;
1317}
1318
1319static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
1320{
1321	struct pcpu_block_md *md_block;
1322
1323	/* init the chunk's block */
1324	pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk));
1325
1326	for (md_block = chunk->md_blocks;
1327	     md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
1328	     md_block++)
1329		pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS);
1330}
1331
1332/**
1333 * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1334 * @tmp_addr: the start of the region served
1335 * @map_size: size of the region served
1336 *
1337 * This is responsible for creating the chunks that serve the first chunk.  The
1338 * base_addr is page aligned down of @tmp_addr while the region end is page
1339 * aligned up.  Offsets are kept track of to determine the region served. All
1340 * this is done to appease the bitmap allocator in avoiding partial blocks.
1341 *
1342 * RETURNS:
1343 * Chunk serving the region at @tmp_addr of @map_size.
1344 */
1345static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
1346							 int map_size)
1347{
1348	struct pcpu_chunk *chunk;
1349	unsigned long aligned_addr;
1350	int start_offset, offset_bits, region_size, region_bits;
1351	size_t alloc_size;
1352
1353	/* region calculations */
1354	aligned_addr = tmp_addr & PAGE_MASK;
1355
1356	start_offset = tmp_addr - aligned_addr;
1357	region_size = ALIGN(start_offset + map_size, PAGE_SIZE);
 
 
 
 
 
 
 
1358
1359	/* allocate chunk */
1360	alloc_size = struct_size(chunk, populated,
1361				 BITS_TO_LONGS(region_size >> PAGE_SHIFT));
1362	chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1363	if (!chunk)
1364		panic("%s: Failed to allocate %zu bytes\n", __func__,
1365		      alloc_size);
1366
1367	INIT_LIST_HEAD(&chunk->list);
1368
1369	chunk->base_addr = (void *)aligned_addr;
1370	chunk->start_offset = start_offset;
1371	chunk->end_offset = region_size - chunk->start_offset - map_size;
1372
1373	chunk->nr_pages = region_size >> PAGE_SHIFT;
1374	region_bits = pcpu_chunk_map_bits(chunk);
1375
1376	alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
1377	chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1378	if (!chunk->alloc_map)
1379		panic("%s: Failed to allocate %zu bytes\n", __func__,
1380		      alloc_size);
1381
1382	alloc_size =
1383		BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
1384	chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1385	if (!chunk->bound_map)
1386		panic("%s: Failed to allocate %zu bytes\n", __func__,
1387		      alloc_size);
1388
1389	alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
1390	chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1391	if (!chunk->md_blocks)
1392		panic("%s: Failed to allocate %zu bytes\n", __func__,
1393		      alloc_size);
1394
1395#ifdef CONFIG_MEMCG_KMEM
1396	/* first chunk is free to use */
1397	chunk->obj_cgroups = NULL;
1398#endif
1399	pcpu_init_md_blocks(chunk);
1400
1401	/* manage populated page bitmap */
1402	chunk->immutable = true;
1403	bitmap_fill(chunk->populated, chunk->nr_pages);
1404	chunk->nr_populated = chunk->nr_pages;
1405	chunk->nr_empty_pop_pages = chunk->nr_pages;
1406
1407	chunk->free_bytes = map_size;
1408
1409	if (chunk->start_offset) {
1410		/* hide the beginning of the bitmap */
1411		offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
1412		bitmap_set(chunk->alloc_map, 0, offset_bits);
1413		set_bit(0, chunk->bound_map);
1414		set_bit(offset_bits, chunk->bound_map);
1415
1416		chunk->chunk_md.first_free = offset_bits;
1417
1418		pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
1419	}
1420
1421	if (chunk->end_offset) {
1422		/* hide the end of the bitmap */
1423		offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
1424		bitmap_set(chunk->alloc_map,
1425			   pcpu_chunk_map_bits(chunk) - offset_bits,
1426			   offset_bits);
1427		set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
1428			chunk->bound_map);
1429		set_bit(region_bits, chunk->bound_map);
1430
1431		pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
1432					     - offset_bits, offset_bits);
1433	}
1434
1435	return chunk;
1436}
1437
1438static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
1439{
1440	struct pcpu_chunk *chunk;
1441	int region_bits;
1442
1443	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
1444	if (!chunk)
1445		return NULL;
1446
1447	INIT_LIST_HEAD(&chunk->list);
1448	chunk->nr_pages = pcpu_unit_pages;
1449	region_bits = pcpu_chunk_map_bits(chunk);
1450
1451	chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
1452					   sizeof(chunk->alloc_map[0]), gfp);
1453	if (!chunk->alloc_map)
1454		goto alloc_map_fail;
1455
1456	chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
1457					   sizeof(chunk->bound_map[0]), gfp);
1458	if (!chunk->bound_map)
1459		goto bound_map_fail;
1460
1461	chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
1462					   sizeof(chunk->md_blocks[0]), gfp);
1463	if (!chunk->md_blocks)
1464		goto md_blocks_fail;
1465
1466#ifdef CONFIG_MEMCG_KMEM
1467	if (!mem_cgroup_kmem_disabled()) {
1468		chunk->obj_cgroups =
1469			pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
1470					sizeof(struct obj_cgroup *), gfp);
1471		if (!chunk->obj_cgroups)
1472			goto objcg_fail;
1473	}
1474#endif
1475
1476	pcpu_init_md_blocks(chunk);
1477
1478	/* init metadata */
1479	chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
1480
1481	return chunk;
1482
1483#ifdef CONFIG_MEMCG_KMEM
1484objcg_fail:
1485	pcpu_mem_free(chunk->md_blocks);
1486#endif
1487md_blocks_fail:
1488	pcpu_mem_free(chunk->bound_map);
1489bound_map_fail:
1490	pcpu_mem_free(chunk->alloc_map);
1491alloc_map_fail:
1492	pcpu_mem_free(chunk);
1493
1494	return NULL;
1495}
1496
1497static void pcpu_free_chunk(struct pcpu_chunk *chunk)
1498{
1499	if (!chunk)
1500		return;
1501#ifdef CONFIG_MEMCG_KMEM
1502	pcpu_mem_free(chunk->obj_cgroups);
1503#endif
1504	pcpu_mem_free(chunk->md_blocks);
1505	pcpu_mem_free(chunk->bound_map);
1506	pcpu_mem_free(chunk->alloc_map);
1507	pcpu_mem_free(chunk);
1508}
1509
1510/**
1511 * pcpu_chunk_populated - post-population bookkeeping
1512 * @chunk: pcpu_chunk which got populated
1513 * @page_start: the start page
1514 * @page_end: the end page
1515 *
1516 * Pages in [@page_start,@page_end) have been populated to @chunk.  Update
1517 * the bookkeeping information accordingly.  Must be called after each
1518 * successful population.
 
 
 
1519 */
1520static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
1521				 int page_end)
1522{
1523	int nr = page_end - page_start;
1524
1525	lockdep_assert_held(&pcpu_lock);
1526
1527	bitmap_set(chunk->populated, page_start, nr);
1528	chunk->nr_populated += nr;
1529	pcpu_nr_populated += nr;
1530
1531	pcpu_update_empty_pages(chunk, nr);
1532}
1533
1534/**
1535 * pcpu_chunk_depopulated - post-depopulation bookkeeping
1536 * @chunk: pcpu_chunk which got depopulated
1537 * @page_start: the start page
1538 * @page_end: the end page
1539 *
1540 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1541 * Update the bookkeeping information accordingly.  Must be called after
1542 * each successful depopulation.
1543 */
1544static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1545				   int page_start, int page_end)
1546{
1547	int nr = page_end - page_start;
1548
1549	lockdep_assert_held(&pcpu_lock);
1550
1551	bitmap_clear(chunk->populated, page_start, nr);
1552	chunk->nr_populated -= nr;
1553	pcpu_nr_populated -= nr;
1554
1555	pcpu_update_empty_pages(chunk, -nr);
1556}
1557
1558/*
1559 * Chunk management implementation.
1560 *
1561 * To allow different implementations, chunk alloc/free and
1562 * [de]population are implemented in a separate file which is pulled
1563 * into this file and compiled together.  The following functions
1564 * should be implemented.
1565 *
1566 * pcpu_populate_chunk		- populate the specified range of a chunk
1567 * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
1568 * pcpu_post_unmap_tlb_flush	- flush tlb for the specified range of a chunk
1569 * pcpu_create_chunk		- create a new chunk
1570 * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
1571 * pcpu_addr_to_page		- translate address to physical address
1572 * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
1573 */
1574static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
1575			       int page_start, int page_end, gfp_t gfp);
1576static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
1577				  int page_start, int page_end);
1578static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
1579				      int page_start, int page_end);
1580static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
1581static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
1582static struct page *pcpu_addr_to_page(void *addr);
1583static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
1584
1585#ifdef CONFIG_NEED_PER_CPU_KM
1586#include "percpu-km.c"
1587#else
1588#include "percpu-vm.c"
1589#endif
1590
1591/**
1592 * pcpu_chunk_addr_search - determine chunk containing specified address
1593 * @addr: address for which the chunk needs to be determined.
1594 *
1595 * This is an internal function that handles all but static allocations.
1596 * Static percpu address values should never be passed into the allocator.
1597 *
1598 * RETURNS:
1599 * The address of the found chunk.
1600 */
1601static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1602{
1603	/* is it in the dynamic region (first chunk)? */
1604	if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
1605		return pcpu_first_chunk;
1606
1607	/* is it in the reserved region? */
1608	if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
1609		return pcpu_reserved_chunk;
1610
1611	/*
1612	 * The address is relative to unit0 which might be unused and
1613	 * thus unmapped.  Offset the address to the unit space of the
1614	 * current processor before looking it up in the vmalloc
1615	 * space.  Note that any possible cpu id can be used here, so
1616	 * there's no need to worry about preemption or cpu hotplug.
1617	 */
1618	addr += pcpu_unit_offsets[raw_smp_processor_id()];
1619	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
1620}
1621
1622#ifdef CONFIG_MEMCG_KMEM
1623static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
1624				      struct obj_cgroup **objcgp)
1625{
1626	struct obj_cgroup *objcg;
1627
1628	if (!memcg_kmem_online() || !(gfp & __GFP_ACCOUNT))
1629		return true;
1630
1631	objcg = current_obj_cgroup();
1632	if (!objcg)
1633		return true;
1634
1635	if (obj_cgroup_charge(objcg, gfp, pcpu_obj_full_size(size)))
 
1636		return false;
 
1637
1638	*objcgp = objcg;
1639	return true;
1640}
1641
1642static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1643				       struct pcpu_chunk *chunk, int off,
1644				       size_t size)
1645{
1646	if (!objcg)
1647		return;
1648
1649	if (likely(chunk && chunk->obj_cgroups)) {
1650		obj_cgroup_get(objcg);
1651		chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg;
1652
1653		rcu_read_lock();
1654		mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
1655				pcpu_obj_full_size(size));
1656		rcu_read_unlock();
1657	} else {
1658		obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
 
1659	}
1660}
1661
1662static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1663{
1664	struct obj_cgroup *objcg;
1665
1666	if (unlikely(!chunk->obj_cgroups))
1667		return;
1668
1669	objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT];
1670	if (!objcg)
1671		return;
1672	chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL;
1673
1674	obj_cgroup_uncharge(objcg, pcpu_obj_full_size(size));
1675
1676	rcu_read_lock();
1677	mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
1678			-pcpu_obj_full_size(size));
1679	rcu_read_unlock();
1680
1681	obj_cgroup_put(objcg);
1682}
1683
1684#else /* CONFIG_MEMCG_KMEM */
1685static bool
1686pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
1687{
1688	return true;
1689}
1690
1691static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1692				       struct pcpu_chunk *chunk, int off,
1693				       size_t size)
1694{
1695}
1696
1697static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1698{
1699}
1700#endif /* CONFIG_MEMCG_KMEM */
1701
1702/**
1703 * pcpu_alloc - the percpu allocator
1704 * @size: size of area to allocate in bytes
1705 * @align: alignment of area (max PAGE_SIZE)
1706 * @reserved: allocate from the reserved chunk if available
1707 * @gfp: allocation flags
1708 *
1709 * Allocate percpu area of @size bytes aligned at @align.  If @gfp doesn't
1710 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1711 * then no warning will be triggered on invalid or failed allocation
1712 * requests.
1713 *
1714 * RETURNS:
1715 * Percpu pointer to the allocated area on success, NULL on failure.
1716 */
1717static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1718				 gfp_t gfp)
1719{
1720	gfp_t pcpu_gfp;
1721	bool is_atomic;
1722	bool do_warn;
1723	struct obj_cgroup *objcg = NULL;
1724	static int warn_limit = 10;
1725	struct pcpu_chunk *chunk, *next;
1726	const char *err;
1727	int slot, off, cpu, ret;
1728	unsigned long flags;
1729	void __percpu *ptr;
1730	size_t bits, bit_align;
1731
1732	gfp = current_gfp_context(gfp);
1733	/* whitelisted flags that can be passed to the backing allocators */
1734	pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
1735	is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1736	do_warn = !(gfp & __GFP_NOWARN);
1737
1738	/*
1739	 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
1740	 * therefore alignment must be a minimum of that many bytes.
1741	 * An allocation may have internal fragmentation from rounding up
1742	 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1743	 */
1744	if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
1745		align = PCPU_MIN_ALLOC_SIZE;
1746
1747	size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
1748	bits = size >> PCPU_MIN_ALLOC_SHIFT;
1749	bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
1750
1751	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
1752		     !is_power_of_2(align))) {
1753		WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1754		     size, align);
1755		return NULL;
1756	}
1757
1758	if (unlikely(!pcpu_memcg_pre_alloc_hook(size, gfp, &objcg)))
1759		return NULL;
1760
1761	if (!is_atomic) {
1762		/*
1763		 * pcpu_balance_workfn() allocates memory under this mutex,
1764		 * and it may wait for memory reclaim. Allow current task
1765		 * to become OOM victim, in case of memory pressure.
1766		 */
1767		if (gfp & __GFP_NOFAIL) {
1768			mutex_lock(&pcpu_alloc_mutex);
1769		} else if (mutex_lock_killable(&pcpu_alloc_mutex)) {
1770			pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1771			return NULL;
1772		}
1773	}
1774
1775	spin_lock_irqsave(&pcpu_lock, flags);
1776
1777	/* serve reserved allocations from the reserved chunk if available */
1778	if (reserved && pcpu_reserved_chunk) {
1779		chunk = pcpu_reserved_chunk;
1780
1781		off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
1782		if (off < 0) {
1783			err = "alloc from reserved chunk failed";
1784			goto fail_unlock;
1785		}
1786
1787		off = pcpu_alloc_area(chunk, bits, bit_align, off);
1788		if (off >= 0)
1789			goto area_found;
1790
1791		err = "alloc from reserved chunk failed";
1792		goto fail_unlock;
1793	}
1794
1795restart:
1796	/* search through normal chunks */
1797	for (slot = pcpu_size_to_slot(size); slot <= pcpu_free_slot; slot++) {
1798		list_for_each_entry_safe(chunk, next, &pcpu_chunk_lists[slot],
1799					 list) {
1800			off = pcpu_find_block_fit(chunk, bits, bit_align,
1801						  is_atomic);
1802			if (off < 0) {
1803				if (slot < PCPU_SLOT_FAIL_THRESHOLD)
1804					pcpu_chunk_move(chunk, 0);
1805				continue;
1806			}
1807
1808			off = pcpu_alloc_area(chunk, bits, bit_align, off);
1809			if (off >= 0) {
1810				pcpu_reintegrate_chunk(chunk);
1811				goto area_found;
1812			}
1813		}
1814	}
1815
1816	spin_unlock_irqrestore(&pcpu_lock, flags);
1817
 
 
 
 
 
1818	if (is_atomic) {
1819		err = "atomic alloc failed, no space left";
1820		goto fail;
1821	}
1822
1823	/* No space left.  Create a new chunk. */
1824	if (list_empty(&pcpu_chunk_lists[pcpu_free_slot])) {
1825		chunk = pcpu_create_chunk(pcpu_gfp);
1826		if (!chunk) {
1827			err = "failed to allocate new chunk";
1828			goto fail;
1829		}
1830
1831		spin_lock_irqsave(&pcpu_lock, flags);
1832		pcpu_chunk_relocate(chunk, -1);
1833	} else {
1834		spin_lock_irqsave(&pcpu_lock, flags);
1835	}
1836
1837	goto restart;
1838
1839area_found:
1840	pcpu_stats_area_alloc(chunk, size);
1841	spin_unlock_irqrestore(&pcpu_lock, flags);
1842
1843	/* populate if not all pages are already there */
1844	if (!is_atomic) {
1845		unsigned int page_end, rs, re;
1846
1847		rs = PFN_DOWN(off);
1848		page_end = PFN_UP(off + size);
1849
1850		for_each_clear_bitrange_from(rs, re, chunk->populated, page_end) {
 
1851			WARN_ON(chunk->immutable);
1852
1853			ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
1854
1855			spin_lock_irqsave(&pcpu_lock, flags);
1856			if (ret) {
1857				pcpu_free_area(chunk, off);
1858				err = "failed to populate";
1859				goto fail_unlock;
1860			}
1861			pcpu_chunk_populated(chunk, rs, re);
1862			spin_unlock_irqrestore(&pcpu_lock, flags);
1863		}
1864
1865		mutex_unlock(&pcpu_alloc_mutex);
1866	}
1867
1868	if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1869		pcpu_schedule_balance_work();
1870
1871	/* clear the areas and return address relative to base address */
1872	for_each_possible_cpu(cpu)
1873		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1874
1875	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1876	kmemleak_alloc_percpu(ptr, size, gfp);
1877
1878	trace_percpu_alloc_percpu(_RET_IP_, reserved, is_atomic, size, align,
1879				  chunk->base_addr, off, ptr,
1880				  pcpu_obj_full_size(size), gfp);
1881
1882	pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
1883
1884	return ptr;
1885
1886fail_unlock:
1887	spin_unlock_irqrestore(&pcpu_lock, flags);
1888fail:
1889	trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1890
1891	if (do_warn && warn_limit) {
1892		pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1893			size, align, is_atomic, err);
1894		if (!is_atomic)
1895			dump_stack();
1896		if (!--warn_limit)
1897			pr_info("limit reached, disable warning\n");
1898	}
1899
1900	if (is_atomic) {
1901		/* see the flag handling in pcpu_balance_workfn() */
1902		pcpu_atomic_alloc_failed = true;
1903		pcpu_schedule_balance_work();
1904	} else {
1905		mutex_unlock(&pcpu_alloc_mutex);
1906	}
1907
1908	pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1909
1910	return NULL;
1911}
1912
1913/**
1914 * __alloc_percpu_gfp - allocate dynamic percpu area
1915 * @size: size of area to allocate in bytes
1916 * @align: alignment of area (max PAGE_SIZE)
1917 * @gfp: allocation flags
1918 *
1919 * Allocate zero-filled percpu area of @size bytes aligned at @align.  If
1920 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1921 * be called from any context but is a lot more likely to fail. If @gfp
1922 * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1923 * allocation requests.
1924 *
1925 * RETURNS:
1926 * Percpu pointer to the allocated area on success, NULL on failure.
1927 */
1928void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1929{
1930	return pcpu_alloc(size, align, false, gfp);
1931}
1932EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1933
1934/**
1935 * __alloc_percpu - allocate dynamic percpu area
1936 * @size: size of area to allocate in bytes
1937 * @align: alignment of area (max PAGE_SIZE)
1938 *
1939 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1940 */
1941void __percpu *__alloc_percpu(size_t size, size_t align)
1942{
1943	return pcpu_alloc(size, align, false, GFP_KERNEL);
1944}
1945EXPORT_SYMBOL_GPL(__alloc_percpu);
1946
1947/**
1948 * __alloc_reserved_percpu - allocate reserved percpu area
1949 * @size: size of area to allocate in bytes
1950 * @align: alignment of area (max PAGE_SIZE)
1951 *
1952 * Allocate zero-filled percpu area of @size bytes aligned at @align
1953 * from reserved percpu area if arch has set it up; otherwise,
1954 * allocation is served from the same dynamic area.  Might sleep.
1955 * Might trigger writeouts.
1956 *
1957 * CONTEXT:
1958 * Does GFP_KERNEL allocation.
1959 *
1960 * RETURNS:
1961 * Percpu pointer to the allocated area on success, NULL on failure.
1962 */
1963void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1964{
1965	return pcpu_alloc(size, align, true, GFP_KERNEL);
1966}
1967
1968/**
1969 * pcpu_balance_free - manage the amount of free chunks
1970 * @empty_only: free chunks only if there are no populated pages
1971 *
1972 * If empty_only is %false, reclaim all fully free chunks regardless of the
1973 * number of populated pages.  Otherwise, only reclaim chunks that have no
1974 * populated pages.
1975 *
1976 * CONTEXT:
1977 * pcpu_lock (can be dropped temporarily)
1978 */
1979static void pcpu_balance_free(bool empty_only)
1980{
1981	LIST_HEAD(to_free);
1982	struct list_head *free_head = &pcpu_chunk_lists[pcpu_free_slot];
1983	struct pcpu_chunk *chunk, *next;
1984
1985	lockdep_assert_held(&pcpu_lock);
1986
1987	/*
1988	 * There's no reason to keep around multiple unused chunks and VM
1989	 * areas can be scarce.  Destroy all free chunks except for one.
1990	 */
1991	list_for_each_entry_safe(chunk, next, free_head, list) {
1992		WARN_ON(chunk->immutable);
1993
1994		/* spare the first one */
1995		if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
1996			continue;
1997
1998		if (!empty_only || chunk->nr_empty_pop_pages == 0)
1999			list_move(&chunk->list, &to_free);
2000	}
2001
2002	if (list_empty(&to_free))
2003		return;
2004
2005	spin_unlock_irq(&pcpu_lock);
2006	list_for_each_entry_safe(chunk, next, &to_free, list) {
2007		unsigned int rs, re;
2008
2009		for_each_set_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
 
2010			pcpu_depopulate_chunk(chunk, rs, re);
2011			spin_lock_irq(&pcpu_lock);
2012			pcpu_chunk_depopulated(chunk, rs, re);
2013			spin_unlock_irq(&pcpu_lock);
2014		}
2015		pcpu_destroy_chunk(chunk);
2016		cond_resched();
2017	}
2018	spin_lock_irq(&pcpu_lock);
2019}
2020
2021/**
2022 * pcpu_balance_populated - manage the amount of populated pages
2023 *
2024 * Maintain a certain amount of populated pages to satisfy atomic allocations.
2025 * It is possible that this is called when physical memory is scarce causing
2026 * OOM killer to be triggered.  We should avoid doing so until an actual
2027 * allocation causes the failure as it is possible that requests can be
2028 * serviced from already backed regions.
2029 *
2030 * CONTEXT:
2031 * pcpu_lock (can be dropped temporarily)
2032 */
2033static void pcpu_balance_populated(void)
2034{
2035	/* gfp flags passed to underlying allocators */
2036	const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
2037	struct pcpu_chunk *chunk;
2038	int slot, nr_to_pop, ret;
2039
2040	lockdep_assert_held(&pcpu_lock);
2041
2042	/*
2043	 * Ensure there are certain number of free populated pages for
2044	 * atomic allocs.  Fill up from the most packed so that atomic
2045	 * allocs don't increase fragmentation.  If atomic allocation
2046	 * failed previously, always populate the maximum amount.  This
2047	 * should prevent atomic allocs larger than PAGE_SIZE from keeping
2048	 * failing indefinitely; however, large atomic allocs are not
2049	 * something we support properly and can be highly unreliable and
2050	 * inefficient.
2051	 */
2052retry_pop:
2053	if (pcpu_atomic_alloc_failed) {
2054		nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
2055		/* best effort anyway, don't worry about synchronization */
2056		pcpu_atomic_alloc_failed = false;
2057	} else {
2058		nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
2059				  pcpu_nr_empty_pop_pages,
2060				  0, PCPU_EMPTY_POP_PAGES_HIGH);
2061	}
2062
2063	for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) {
2064		unsigned int nr_unpop = 0, rs, re;
2065
2066		if (!nr_to_pop)
2067			break;
2068
2069		list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) {
2070			nr_unpop = chunk->nr_pages - chunk->nr_populated;
2071			if (nr_unpop)
2072				break;
2073		}
2074
2075		if (!nr_unpop)
2076			continue;
2077
2078		/* @chunk can't go away while pcpu_alloc_mutex is held */
2079		for_each_clear_bitrange(rs, re, chunk->populated, chunk->nr_pages) {
 
2080			int nr = min_t(int, re - rs, nr_to_pop);
2081
2082			spin_unlock_irq(&pcpu_lock);
2083			ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
2084			cond_resched();
2085			spin_lock_irq(&pcpu_lock);
2086			if (!ret) {
2087				nr_to_pop -= nr;
2088				pcpu_chunk_populated(chunk, rs, rs + nr);
2089			} else {
2090				nr_to_pop = 0;
2091			}
2092
2093			if (!nr_to_pop)
2094				break;
2095		}
2096	}
2097
2098	if (nr_to_pop) {
2099		/* ran out of chunks to populate, create a new one and retry */
2100		spin_unlock_irq(&pcpu_lock);
2101		chunk = pcpu_create_chunk(gfp);
2102		cond_resched();
2103		spin_lock_irq(&pcpu_lock);
2104		if (chunk) {
2105			pcpu_chunk_relocate(chunk, -1);
2106			goto retry_pop;
2107		}
2108	}
2109}
2110
2111/**
2112 * pcpu_reclaim_populated - scan over to_depopulate chunks and free empty pages
2113 *
2114 * Scan over chunks in the depopulate list and try to release unused populated
2115 * pages back to the system.  Depopulated chunks are sidelined to prevent
2116 * repopulating these pages unless required.  Fully free chunks are reintegrated
2117 * and freed accordingly (1 is kept around).  If we drop below the empty
2118 * populated pages threshold, reintegrate the chunk if it has empty free pages.
2119 * Each chunk is scanned in the reverse order to keep populated pages close to
2120 * the beginning of the chunk.
2121 *
2122 * CONTEXT:
2123 * pcpu_lock (can be dropped temporarily)
2124 *
2125 */
2126static void pcpu_reclaim_populated(void)
2127{
2128	struct pcpu_chunk *chunk;
2129	struct pcpu_block_md *block;
2130	int freed_page_start, freed_page_end;
2131	int i, end;
2132	bool reintegrate;
2133
2134	lockdep_assert_held(&pcpu_lock);
2135
2136	/*
2137	 * Once a chunk is isolated to the to_depopulate list, the chunk is no
2138	 * longer discoverable to allocations whom may populate pages.  The only
2139	 * other accessor is the free path which only returns area back to the
2140	 * allocator not touching the populated bitmap.
2141	 */
2142	while ((chunk = list_first_entry_or_null(
2143			&pcpu_chunk_lists[pcpu_to_depopulate_slot],
2144			struct pcpu_chunk, list))) {
2145		WARN_ON(chunk->immutable);
2146
2147		/*
2148		 * Scan chunk's pages in the reverse order to keep populated
2149		 * pages close to the beginning of the chunk.
2150		 */
2151		freed_page_start = chunk->nr_pages;
2152		freed_page_end = 0;
2153		reintegrate = false;
2154		for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) {
2155			/* no more work to do */
2156			if (chunk->nr_empty_pop_pages == 0)
2157				break;
2158
2159			/* reintegrate chunk to prevent atomic alloc failures */
2160			if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) {
2161				reintegrate = true;
2162				break;
2163			}
2164
2165			/*
2166			 * If the page is empty and populated, start or
2167			 * extend the (i, end) range.  If i == 0, decrease
2168			 * i and perform the depopulation to cover the last
2169			 * (first) page in the chunk.
2170			 */
2171			block = chunk->md_blocks + i;
2172			if (block->contig_hint == PCPU_BITMAP_BLOCK_BITS &&
2173			    test_bit(i, chunk->populated)) {
2174				if (end == -1)
2175					end = i;
2176				if (i > 0)
2177					continue;
2178				i--;
2179			}
2180
2181			/* depopulate if there is an active range */
2182			if (end == -1)
2183				continue;
2184
2185			spin_unlock_irq(&pcpu_lock);
2186			pcpu_depopulate_chunk(chunk, i + 1, end + 1);
2187			cond_resched();
2188			spin_lock_irq(&pcpu_lock);
2189
2190			pcpu_chunk_depopulated(chunk, i + 1, end + 1);
2191			freed_page_start = min(freed_page_start, i + 1);
2192			freed_page_end = max(freed_page_end, end + 1);
2193
2194			/* reset the range and continue */
2195			end = -1;
2196		}
2197
 
2198		/* batch tlb flush per chunk to amortize cost */
2199		if (freed_page_start < freed_page_end) {
2200			spin_unlock_irq(&pcpu_lock);
2201			pcpu_post_unmap_tlb_flush(chunk,
2202						  freed_page_start,
2203						  freed_page_end);
2204			cond_resched();
2205			spin_lock_irq(&pcpu_lock);
2206		}
2207
2208		if (reintegrate || chunk->free_bytes == pcpu_unit_size)
2209			pcpu_reintegrate_chunk(chunk);
2210		else
2211			list_move_tail(&chunk->list,
2212				       &pcpu_chunk_lists[pcpu_sidelined_slot]);
2213	}
2214}
2215
2216/**
2217 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
2218 * @work: unused
2219 *
2220 * For each chunk type, manage the number of fully free chunks and the number of
2221 * populated pages.  An important thing to consider is when pages are freed and
2222 * how they contribute to the global counts.
2223 */
2224static void pcpu_balance_workfn(struct work_struct *work)
2225{
2226	/*
2227	 * pcpu_balance_free() is called twice because the first time we may
2228	 * trim pages in the active pcpu_nr_empty_pop_pages which may cause us
2229	 * to grow other chunks.  This then gives pcpu_reclaim_populated() time
2230	 * to move fully free chunks to the active list to be freed if
2231	 * appropriate.
2232	 */
2233	mutex_lock(&pcpu_alloc_mutex);
2234	spin_lock_irq(&pcpu_lock);
2235
2236	pcpu_balance_free(false);
2237	pcpu_reclaim_populated();
2238	pcpu_balance_populated();
2239	pcpu_balance_free(true);
2240
2241	spin_unlock_irq(&pcpu_lock);
2242	mutex_unlock(&pcpu_alloc_mutex);
2243}
2244
2245/**
2246 * pcpu_alloc_size - the size of the dynamic percpu area
2247 * @ptr: pointer to the dynamic percpu area
2248 *
2249 * Returns the size of the @ptr allocation.  This is undefined for statically
2250 * defined percpu variables as there is no corresponding chunk->bound_map.
2251 *
2252 * RETURNS:
2253 * The size of the dynamic percpu area.
2254 *
2255 * CONTEXT:
2256 * Can be called from atomic context.
2257 */
2258size_t pcpu_alloc_size(void __percpu *ptr)
2259{
2260	struct pcpu_chunk *chunk;
2261	unsigned long bit_off, end;
2262	void *addr;
2263
2264	if (!ptr)
2265		return 0;
2266
2267	addr = __pcpu_ptr_to_addr(ptr);
2268	/* No pcpu_lock here: ptr has not been freed, so chunk is still alive */
2269	chunk = pcpu_chunk_addr_search(addr);
2270	bit_off = (addr - chunk->base_addr) / PCPU_MIN_ALLOC_SIZE;
2271	end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
2272			    bit_off + 1);
2273	return (end - bit_off) * PCPU_MIN_ALLOC_SIZE;
2274}
2275
2276/**
2277 * free_percpu - free percpu area
2278 * @ptr: pointer to area to free
2279 *
2280 * Free percpu area @ptr.
2281 *
2282 * CONTEXT:
2283 * Can be called from atomic context.
2284 */
2285void free_percpu(void __percpu *ptr)
2286{
2287	void *addr;
2288	struct pcpu_chunk *chunk;
2289	unsigned long flags;
2290	int size, off;
2291	bool need_balance = false;
2292
2293	if (!ptr)
2294		return;
2295
2296	kmemleak_free_percpu(ptr);
2297
2298	addr = __pcpu_ptr_to_addr(ptr);
 
 
 
2299	chunk = pcpu_chunk_addr_search(addr);
2300	off = addr - chunk->base_addr;
2301
2302	spin_lock_irqsave(&pcpu_lock, flags);
2303	size = pcpu_free_area(chunk, off);
2304
2305	pcpu_memcg_free_hook(chunk, off, size);
2306
2307	/*
2308	 * If there are more than one fully free chunks, wake up grim reaper.
2309	 * If the chunk is isolated, it may be in the process of being
2310	 * reclaimed.  Let reclaim manage cleaning up of that chunk.
2311	 */
2312	if (!chunk->isolated && chunk->free_bytes == pcpu_unit_size) {
2313		struct pcpu_chunk *pos;
2314
2315		list_for_each_entry(pos, &pcpu_chunk_lists[pcpu_free_slot], list)
2316			if (pos != chunk) {
2317				need_balance = true;
2318				break;
2319			}
2320	} else if (pcpu_should_reclaim_chunk(chunk)) {
2321		pcpu_isolate_chunk(chunk);
2322		need_balance = true;
2323	}
2324
2325	trace_percpu_free_percpu(chunk->base_addr, off, ptr);
2326
2327	spin_unlock_irqrestore(&pcpu_lock, flags);
2328
2329	if (need_balance)
2330		pcpu_schedule_balance_work();
2331}
2332EXPORT_SYMBOL_GPL(free_percpu);
2333
2334bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
2335{
2336#ifdef CONFIG_SMP
2337	const size_t static_size = __per_cpu_end - __per_cpu_start;
2338	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2339	unsigned int cpu;
2340
2341	for_each_possible_cpu(cpu) {
2342		void *start = per_cpu_ptr(base, cpu);
2343		void *va = (void *)addr;
2344
2345		if (va >= start && va < start + static_size) {
2346			if (can_addr) {
2347				*can_addr = (unsigned long) (va - start);
2348				*can_addr += (unsigned long)
2349					per_cpu_ptr(base, get_boot_cpu_id());
2350			}
2351			return true;
2352		}
2353	}
2354#endif
2355	/* on UP, can't distinguish from other static vars, always false */
2356	return false;
2357}
2358
2359/**
2360 * is_kernel_percpu_address - test whether address is from static percpu area
2361 * @addr: address to test
2362 *
2363 * Test whether @addr belongs to in-kernel static percpu area.  Module
2364 * static percpu areas are not considered.  For those, use
2365 * is_module_percpu_address().
2366 *
2367 * RETURNS:
2368 * %true if @addr is from in-kernel static percpu area, %false otherwise.
2369 */
2370bool is_kernel_percpu_address(unsigned long addr)
2371{
2372	return __is_kernel_percpu_address(addr, NULL);
2373}
2374
2375/**
2376 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
2377 * @addr: the address to be converted to physical address
2378 *
2379 * Given @addr which is dereferenceable address obtained via one of
2380 * percpu access macros, this function translates it into its physical
2381 * address.  The caller is responsible for ensuring @addr stays valid
2382 * until this function finishes.
2383 *
2384 * percpu allocator has special setup for the first chunk, which currently
2385 * supports either embedding in linear address space or vmalloc mapping,
2386 * and, from the second one, the backing allocator (currently either vm or
2387 * km) provides translation.
2388 *
2389 * The addr can be translated simply without checking if it falls into the
2390 * first chunk. But the current code reflects better how percpu allocator
2391 * actually works, and the verification can discover both bugs in percpu
2392 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
2393 * code.
2394 *
2395 * RETURNS:
2396 * The physical address for @addr.
2397 */
2398phys_addr_t per_cpu_ptr_to_phys(void *addr)
2399{
2400	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2401	bool in_first_chunk = false;
2402	unsigned long first_low, first_high;
2403	unsigned int cpu;
2404
2405	/*
2406	 * The following test on unit_low/high isn't strictly
2407	 * necessary but will speed up lookups of addresses which
2408	 * aren't in the first chunk.
2409	 *
2410	 * The address check is against full chunk sizes.  pcpu_base_addr
2411	 * points to the beginning of the first chunk including the
2412	 * static region.  Assumes good intent as the first chunk may
2413	 * not be full (ie. < pcpu_unit_pages in size).
2414	 */
2415	first_low = (unsigned long)pcpu_base_addr +
2416		    pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
2417	first_high = (unsigned long)pcpu_base_addr +
2418		     pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
2419	if ((unsigned long)addr >= first_low &&
2420	    (unsigned long)addr < first_high) {
2421		for_each_possible_cpu(cpu) {
2422			void *start = per_cpu_ptr(base, cpu);
2423
2424			if (addr >= start && addr < start + pcpu_unit_size) {
2425				in_first_chunk = true;
2426				break;
2427			}
2428		}
2429	}
2430
2431	if (in_first_chunk) {
2432		if (!is_vmalloc_addr(addr))
2433			return __pa(addr);
2434		else
2435			return page_to_phys(vmalloc_to_page(addr)) +
2436			       offset_in_page(addr);
2437	} else
2438		return page_to_phys(pcpu_addr_to_page(addr)) +
2439		       offset_in_page(addr);
2440}
2441
2442/**
2443 * pcpu_alloc_alloc_info - allocate percpu allocation info
2444 * @nr_groups: the number of groups
2445 * @nr_units: the number of units
2446 *
2447 * Allocate ai which is large enough for @nr_groups groups containing
2448 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
2449 * cpu_map array which is long enough for @nr_units and filled with
2450 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
2451 * pointer of other groups.
2452 *
2453 * RETURNS:
2454 * Pointer to the allocated pcpu_alloc_info on success, NULL on
2455 * failure.
2456 */
2457struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
2458						      int nr_units)
2459{
2460	struct pcpu_alloc_info *ai;
2461	size_t base_size, ai_size;
2462	void *ptr;
2463	int unit;
2464
2465	base_size = ALIGN(struct_size(ai, groups, nr_groups),
2466			  __alignof__(ai->groups[0].cpu_map[0]));
2467	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
2468
2469	ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
2470	if (!ptr)
2471		return NULL;
2472	ai = ptr;
2473	ptr += base_size;
2474
2475	ai->groups[0].cpu_map = ptr;
2476
2477	for (unit = 0; unit < nr_units; unit++)
2478		ai->groups[0].cpu_map[unit] = NR_CPUS;
2479
2480	ai->nr_groups = nr_groups;
2481	ai->__ai_size = PFN_ALIGN(ai_size);
2482
2483	return ai;
2484}
2485
2486/**
2487 * pcpu_free_alloc_info - free percpu allocation info
2488 * @ai: pcpu_alloc_info to free
2489 *
2490 * Free @ai which was allocated by pcpu_alloc_alloc_info().
2491 */
2492void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
2493{
2494	memblock_free(ai, ai->__ai_size);
2495}
2496
2497/**
2498 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
2499 * @lvl: loglevel
2500 * @ai: allocation info to dump
2501 *
2502 * Print out information about @ai using loglevel @lvl.
2503 */
2504static void pcpu_dump_alloc_info(const char *lvl,
2505				 const struct pcpu_alloc_info *ai)
2506{
2507	int group_width = 1, cpu_width = 1, width;
2508	char empty_str[] = "--------";
2509	int alloc = 0, alloc_end = 0;
2510	int group, v;
2511	int upa, apl;	/* units per alloc, allocs per line */
2512
2513	v = ai->nr_groups;
2514	while (v /= 10)
2515		group_width++;
2516
2517	v = num_possible_cpus();
2518	while (v /= 10)
2519		cpu_width++;
2520	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
2521
2522	upa = ai->alloc_size / ai->unit_size;
2523	width = upa * (cpu_width + 1) + group_width + 3;
2524	apl = rounddown_pow_of_two(max(60 / width, 1));
2525
2526	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
2527	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
2528	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
2529
2530	for (group = 0; group < ai->nr_groups; group++) {
2531		const struct pcpu_group_info *gi = &ai->groups[group];
2532		int unit = 0, unit_end = 0;
2533
2534		BUG_ON(gi->nr_units % upa);
2535		for (alloc_end += gi->nr_units / upa;
2536		     alloc < alloc_end; alloc++) {
2537			if (!(alloc % apl)) {
2538				pr_cont("\n");
2539				printk("%spcpu-alloc: ", lvl);
2540			}
2541			pr_cont("[%0*d] ", group_width, group);
2542
2543			for (unit_end += upa; unit < unit_end; unit++)
2544				if (gi->cpu_map[unit] != NR_CPUS)
2545					pr_cont("%0*d ",
2546						cpu_width, gi->cpu_map[unit]);
2547				else
2548					pr_cont("%s ", empty_str);
2549		}
2550	}
2551	pr_cont("\n");
2552}
2553
2554/**
2555 * pcpu_setup_first_chunk - initialize the first percpu chunk
2556 * @ai: pcpu_alloc_info describing how to percpu area is shaped
2557 * @base_addr: mapped address
2558 *
2559 * Initialize the first percpu chunk which contains the kernel static
2560 * percpu area.  This function is to be called from arch percpu area
2561 * setup path.
2562 *
2563 * @ai contains all information necessary to initialize the first
2564 * chunk and prime the dynamic percpu allocator.
2565 *
2566 * @ai->static_size is the size of static percpu area.
2567 *
2568 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
2569 * reserve after the static area in the first chunk.  This reserves
2570 * the first chunk such that it's available only through reserved
2571 * percpu allocation.  This is primarily used to serve module percpu
2572 * static areas on architectures where the addressing model has
2573 * limited offset range for symbol relocations to guarantee module
2574 * percpu symbols fall inside the relocatable range.
2575 *
2576 * @ai->dyn_size determines the number of bytes available for dynamic
2577 * allocation in the first chunk.  The area between @ai->static_size +
2578 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
2579 *
2580 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2581 * and equal to or larger than @ai->static_size + @ai->reserved_size +
2582 * @ai->dyn_size.
2583 *
2584 * @ai->atom_size is the allocation atom size and used as alignment
2585 * for vm areas.
2586 *
2587 * @ai->alloc_size is the allocation size and always multiple of
2588 * @ai->atom_size.  This is larger than @ai->atom_size if
2589 * @ai->unit_size is larger than @ai->atom_size.
2590 *
2591 * @ai->nr_groups and @ai->groups describe virtual memory layout of
2592 * percpu areas.  Units which should be colocated are put into the
2593 * same group.  Dynamic VM areas will be allocated according to these
2594 * groupings.  If @ai->nr_groups is zero, a single group containing
2595 * all units is assumed.
2596 *
2597 * The caller should have mapped the first chunk at @base_addr and
2598 * copied static data to each unit.
2599 *
2600 * The first chunk will always contain a static and a dynamic region.
2601 * However, the static region is not managed by any chunk.  If the first
2602 * chunk also contains a reserved region, it is served by two chunks -
2603 * one for the reserved region and one for the dynamic region.  They
2604 * share the same vm, but use offset regions in the area allocation map.
2605 * The chunk serving the dynamic region is circulated in the chunk slots
2606 * and available for dynamic allocation like any other chunk.
2607 */
2608void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2609				   void *base_addr)
2610{
2611	size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2612	size_t static_size, dyn_size;
 
2613	unsigned long *group_offsets;
2614	size_t *group_sizes;
2615	unsigned long *unit_off;
2616	unsigned int cpu;
2617	int *unit_map;
2618	int group, unit, i;
 
2619	unsigned long tmp_addr;
2620	size_t alloc_size;
2621
2622#define PCPU_SETUP_BUG_ON(cond)	do {					\
2623	if (unlikely(cond)) {						\
2624		pr_emerg("failed to initialize, %s\n", #cond);		\
2625		pr_emerg("cpu_possible_mask=%*pb\n",			\
2626			 cpumask_pr_args(cpu_possible_mask));		\
2627		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
2628		BUG();							\
2629	}								\
2630} while (0)
2631
2632	/* sanity checks */
2633	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
2634#ifdef CONFIG_SMP
2635	PCPU_SETUP_BUG_ON(!ai->static_size);
2636	PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
2637#endif
2638	PCPU_SETUP_BUG_ON(!base_addr);
2639	PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
2640	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
2641	PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
2642	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
2643	PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
2644	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
 
2645	PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
2646	PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
2647			    IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
2648	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
2649
2650	/* process group information and build config tables accordingly */
2651	alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
2652	group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2653	if (!group_offsets)
2654		panic("%s: Failed to allocate %zu bytes\n", __func__,
2655		      alloc_size);
2656
2657	alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
2658	group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2659	if (!group_sizes)
2660		panic("%s: Failed to allocate %zu bytes\n", __func__,
2661		      alloc_size);
2662
2663	alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
2664	unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2665	if (!unit_map)
2666		panic("%s: Failed to allocate %zu bytes\n", __func__,
2667		      alloc_size);
2668
2669	alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
2670	unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2671	if (!unit_off)
2672		panic("%s: Failed to allocate %zu bytes\n", __func__,
2673		      alloc_size);
2674
2675	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
2676		unit_map[cpu] = UINT_MAX;
2677
2678	pcpu_low_unit_cpu = NR_CPUS;
2679	pcpu_high_unit_cpu = NR_CPUS;
2680
2681	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2682		const struct pcpu_group_info *gi = &ai->groups[group];
2683
2684		group_offsets[group] = gi->base_offset;
2685		group_sizes[group] = gi->nr_units * ai->unit_size;
2686
2687		for (i = 0; i < gi->nr_units; i++) {
2688			cpu = gi->cpu_map[i];
2689			if (cpu == NR_CPUS)
2690				continue;
2691
2692			PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
2693			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
2694			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
2695
2696			unit_map[cpu] = unit + i;
2697			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2698
2699			/* determine low/high unit_cpu */
2700			if (pcpu_low_unit_cpu == NR_CPUS ||
2701			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
2702				pcpu_low_unit_cpu = cpu;
2703			if (pcpu_high_unit_cpu == NR_CPUS ||
2704			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
2705				pcpu_high_unit_cpu = cpu;
2706		}
2707	}
2708	pcpu_nr_units = unit;
2709
2710	for_each_possible_cpu(cpu)
2711		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
2712
2713	/* we're done parsing the input, undefine BUG macro and dump config */
2714#undef PCPU_SETUP_BUG_ON
2715	pcpu_dump_alloc_info(KERN_DEBUG, ai);
2716
2717	pcpu_nr_groups = ai->nr_groups;
2718	pcpu_group_offsets = group_offsets;
2719	pcpu_group_sizes = group_sizes;
2720	pcpu_unit_map = unit_map;
2721	pcpu_unit_offsets = unit_off;
2722
2723	/* determine basic parameters */
2724	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
2725	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
2726	pcpu_atom_size = ai->atom_size;
2727	pcpu_chunk_struct_size = struct_size((struct pcpu_chunk *)0, populated,
2728					     BITS_TO_LONGS(pcpu_unit_pages));
2729
2730	pcpu_stats_save_ai(ai);
2731
2732	/*
2733	 * Allocate chunk slots.  The slots after the active slots are:
2734	 *   sidelined_slot - isolated, depopulated chunks
2735	 *   free_slot - fully free chunks
2736	 *   to_depopulate_slot - isolated, chunks to depopulate
2737	 */
2738	pcpu_sidelined_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1;
2739	pcpu_free_slot = pcpu_sidelined_slot + 1;
2740	pcpu_to_depopulate_slot = pcpu_free_slot + 1;
2741	pcpu_nr_slots = pcpu_to_depopulate_slot + 1;
2742	pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots *
2743					  sizeof(pcpu_chunk_lists[0]),
2744					  SMP_CACHE_BYTES);
2745	if (!pcpu_chunk_lists)
2746		panic("%s: Failed to allocate %zu bytes\n", __func__,
2747		      pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]));
2748
2749	for (i = 0; i < pcpu_nr_slots; i++)
2750		INIT_LIST_HEAD(&pcpu_chunk_lists[i]);
2751
2752	/*
2753	 * The end of the static region needs to be aligned with the
2754	 * minimum allocation size as this offsets the reserved and
2755	 * dynamic region.  The first chunk ends page aligned by
2756	 * expanding the dynamic region, therefore the dynamic region
2757	 * can be shrunk to compensate while still staying above the
2758	 * configured sizes.
2759	 */
2760	static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2761	dyn_size = ai->dyn_size - (static_size - ai->static_size);
2762
2763	/*
2764	 * Initialize first chunk:
2765	 * This chunk is broken up into 3 parts:
2766	 *		< static | [reserved] | dynamic >
2767	 * - static - there is no backing chunk because these allocations can
2768	 *   never be freed.
2769	 * - reserved (pcpu_reserved_chunk) - exists primarily to serve
2770	 *   allocations from module load.
2771	 * - dynamic (pcpu_first_chunk) - serves the dynamic part of the first
2772	 *   chunk.
2773	 */
2774	tmp_addr = (unsigned long)base_addr + static_size;
2775	if (ai->reserved_size)
2776		pcpu_reserved_chunk = pcpu_alloc_first_chunk(tmp_addr,
2777						ai->reserved_size);
2778	tmp_addr = (unsigned long)base_addr + static_size + ai->reserved_size;
2779	pcpu_first_chunk = pcpu_alloc_first_chunk(tmp_addr, dyn_size);
 
2780
 
 
 
 
 
 
 
 
2781	pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
2782	pcpu_chunk_relocate(pcpu_first_chunk, -1);
2783
2784	/* include all regions of the first chunk */
2785	pcpu_nr_populated += PFN_DOWN(size_sum);
2786
2787	pcpu_stats_chunk_alloc();
2788	trace_percpu_create_chunk(base_addr);
2789
2790	/* we're done */
2791	pcpu_base_addr = base_addr;
2792}
2793
2794#ifdef CONFIG_SMP
2795
2796const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
2797	[PCPU_FC_AUTO]	= "auto",
2798	[PCPU_FC_EMBED]	= "embed",
2799	[PCPU_FC_PAGE]	= "page",
2800};
2801
2802enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
2803
2804static int __init percpu_alloc_setup(char *str)
2805{
2806	if (!str)
2807		return -EINVAL;
2808
2809	if (0)
2810		/* nada */;
2811#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2812	else if (!strcmp(str, "embed"))
2813		pcpu_chosen_fc = PCPU_FC_EMBED;
2814#endif
2815#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2816	else if (!strcmp(str, "page"))
2817		pcpu_chosen_fc = PCPU_FC_PAGE;
2818#endif
2819	else
2820		pr_warn("unknown allocator %s specified\n", str);
2821
2822	return 0;
2823}
2824early_param("percpu_alloc", percpu_alloc_setup);
2825
2826/*
2827 * pcpu_embed_first_chunk() is used by the generic percpu setup.
2828 * Build it if needed by the arch config or the generic setup is going
2829 * to be used.
2830 */
2831#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
2832	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
2833#define BUILD_EMBED_FIRST_CHUNK
2834#endif
2835
2836/* build pcpu_page_first_chunk() iff needed by the arch config */
2837#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
2838#define BUILD_PAGE_FIRST_CHUNK
2839#endif
2840
2841/* pcpu_build_alloc_info() is used by both embed and page first chunk */
2842#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
2843/**
2844 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2845 * @reserved_size: the size of reserved percpu area in bytes
2846 * @dyn_size: minimum free size for dynamic allocation in bytes
2847 * @atom_size: allocation atom size
2848 * @cpu_distance_fn: callback to determine distance between cpus, optional
2849 *
2850 * This function determines grouping of units, their mappings to cpus
2851 * and other parameters considering needed percpu size, allocation
2852 * atom size and distances between CPUs.
2853 *
2854 * Groups are always multiples of atom size and CPUs which are of
2855 * LOCAL_DISTANCE both ways are grouped together and share space for
2856 * units in the same group.  The returned configuration is guaranteed
2857 * to have CPUs on different nodes on different groups and >=75% usage
2858 * of allocated virtual address space.
2859 *
2860 * RETURNS:
2861 * On success, pointer to the new allocation_info is returned.  On
2862 * failure, ERR_PTR value is returned.
2863 */
2864static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info(
2865				size_t reserved_size, size_t dyn_size,
2866				size_t atom_size,
2867				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
2868{
2869	static int group_map[NR_CPUS] __initdata;
2870	static int group_cnt[NR_CPUS] __initdata;
2871	static struct cpumask mask __initdata;
2872	const size_t static_size = __per_cpu_end - __per_cpu_start;
2873	int nr_groups = 1, nr_units = 0;
2874	size_t size_sum, min_unit_size, alloc_size;
2875	int upa, max_upa, best_upa;	/* units_per_alloc */
2876	int last_allocs, group, unit;
2877	unsigned int cpu, tcpu;
2878	struct pcpu_alloc_info *ai;
2879	unsigned int *cpu_map;
2880
2881	/* this function may be called multiple times */
2882	memset(group_map, 0, sizeof(group_map));
2883	memset(group_cnt, 0, sizeof(group_cnt));
2884	cpumask_clear(&mask);
2885
2886	/* calculate size_sum and ensure dyn_size is enough for early alloc */
2887	size_sum = PFN_ALIGN(static_size + reserved_size +
2888			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
2889	dyn_size = size_sum - static_size - reserved_size;
2890
2891	/*
2892	 * Determine min_unit_size, alloc_size and max_upa such that
2893	 * alloc_size is multiple of atom_size and is the smallest
2894	 * which can accommodate 4k aligned segments which are equal to
2895	 * or larger than min_unit_size.
2896	 */
2897	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
2898
2899	/* determine the maximum # of units that can fit in an allocation */
2900	alloc_size = roundup(min_unit_size, atom_size);
2901	upa = alloc_size / min_unit_size;
2902	while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2903		upa--;
2904	max_upa = upa;
2905
2906	cpumask_copy(&mask, cpu_possible_mask);
2907
2908	/* group cpus according to their proximity */
2909	for (group = 0; !cpumask_empty(&mask); group++) {
2910		/* pop the group's first cpu */
2911		cpu = cpumask_first(&mask);
2912		group_map[cpu] = group;
2913		group_cnt[group]++;
2914		cpumask_clear_cpu(cpu, &mask);
2915
2916		for_each_cpu(tcpu, &mask) {
2917			if (!cpu_distance_fn ||
2918			    (cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE &&
2919			     cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) {
2920				group_map[tcpu] = group;
2921				group_cnt[group]++;
2922				cpumask_clear_cpu(tcpu, &mask);
2923			}
2924		}
2925	}
2926	nr_groups = group;
2927
2928	/*
2929	 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
2930	 * Expand the unit_size until we use >= 75% of the units allocated.
2931	 * Related to atom_size, which could be much larger than the unit_size.
2932	 */
2933	last_allocs = INT_MAX;
2934	best_upa = 0;
2935	for (upa = max_upa; upa; upa--) {
2936		int allocs = 0, wasted = 0;
2937
2938		if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2939			continue;
2940
2941		for (group = 0; group < nr_groups; group++) {
2942			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
2943			allocs += this_allocs;
2944			wasted += this_allocs * upa - group_cnt[group];
2945		}
2946
2947		/*
2948		 * Don't accept if wastage is over 1/3.  The
2949		 * greater-than comparison ensures upa==1 always
2950		 * passes the following check.
2951		 */
2952		if (wasted > num_possible_cpus() / 3)
2953			continue;
2954
2955		/* and then don't consume more memory */
2956		if (allocs > last_allocs)
2957			break;
2958		last_allocs = allocs;
2959		best_upa = upa;
2960	}
2961	BUG_ON(!best_upa);
2962	upa = best_upa;
2963
2964	/* allocate and fill alloc_info */
2965	for (group = 0; group < nr_groups; group++)
2966		nr_units += roundup(group_cnt[group], upa);
2967
2968	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2969	if (!ai)
2970		return ERR_PTR(-ENOMEM);
2971	cpu_map = ai->groups[0].cpu_map;
2972
2973	for (group = 0; group < nr_groups; group++) {
2974		ai->groups[group].cpu_map = cpu_map;
2975		cpu_map += roundup(group_cnt[group], upa);
2976	}
2977
2978	ai->static_size = static_size;
2979	ai->reserved_size = reserved_size;
2980	ai->dyn_size = dyn_size;
2981	ai->unit_size = alloc_size / upa;
2982	ai->atom_size = atom_size;
2983	ai->alloc_size = alloc_size;
2984
2985	for (group = 0, unit = 0; group < nr_groups; group++) {
2986		struct pcpu_group_info *gi = &ai->groups[group];
2987
2988		/*
2989		 * Initialize base_offset as if all groups are located
2990		 * back-to-back.  The caller should update this to
2991		 * reflect actual allocation.
2992		 */
2993		gi->base_offset = unit * ai->unit_size;
2994
2995		for_each_possible_cpu(cpu)
2996			if (group_map[cpu] == group)
2997				gi->cpu_map[gi->nr_units++] = cpu;
2998		gi->nr_units = roundup(gi->nr_units, upa);
2999		unit += gi->nr_units;
3000	}
3001	BUG_ON(unit != nr_units);
3002
3003	return ai;
3004}
3005
3006static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align,
3007				   pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
3008{
3009	const unsigned long goal = __pa(MAX_DMA_ADDRESS);
3010#ifdef CONFIG_NUMA
3011	int node = NUMA_NO_NODE;
3012	void *ptr;
3013
3014	if (cpu_to_nd_fn)
3015		node = cpu_to_nd_fn(cpu);
3016
3017	if (node == NUMA_NO_NODE || !node_online(node) || !NODE_DATA(node)) {
3018		ptr = memblock_alloc_from(size, align, goal);
3019		pr_info("cpu %d has no node %d or node-local memory\n",
3020			cpu, node);
3021		pr_debug("per cpu data for cpu%d %zu bytes at 0x%llx\n",
3022			 cpu, size, (u64)__pa(ptr));
3023	} else {
3024		ptr = memblock_alloc_try_nid(size, align, goal,
3025					     MEMBLOCK_ALLOC_ACCESSIBLE,
3026					     node);
3027
3028		pr_debug("per cpu data for cpu%d %zu bytes on node%d at 0x%llx\n",
3029			 cpu, size, node, (u64)__pa(ptr));
3030	}
3031	return ptr;
3032#else
3033	return memblock_alloc_from(size, align, goal);
3034#endif
3035}
3036
3037static void __init pcpu_fc_free(void *ptr, size_t size)
3038{
3039	memblock_free(ptr, size);
3040}
3041#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
3042
3043#if defined(BUILD_EMBED_FIRST_CHUNK)
3044/**
3045 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
3046 * @reserved_size: the size of reserved percpu area in bytes
3047 * @dyn_size: minimum free size for dynamic allocation in bytes
3048 * @atom_size: allocation atom size
3049 * @cpu_distance_fn: callback to determine distance between cpus, optional
3050 * @cpu_to_nd_fn: callback to convert cpu to it's node, optional
 
3051 *
3052 * This is a helper to ease setting up embedded first percpu chunk and
3053 * can be called where pcpu_setup_first_chunk() is expected.
3054 *
3055 * If this function is used to setup the first chunk, it is allocated
3056 * by calling pcpu_fc_alloc and used as-is without being mapped into
3057 * vmalloc area.  Allocations are always whole multiples of @atom_size
3058 * aligned to @atom_size.
3059 *
3060 * This enables the first chunk to piggy back on the linear physical
3061 * mapping which often uses larger page size.  Please note that this
3062 * can result in very sparse cpu->unit mapping on NUMA machines thus
3063 * requiring large vmalloc address space.  Don't use this allocator if
3064 * vmalloc space is not orders of magnitude larger than distances
3065 * between node memory addresses (ie. 32bit NUMA machines).
3066 *
3067 * @dyn_size specifies the minimum dynamic area size.
3068 *
3069 * If the needed size is smaller than the minimum or specified unit
3070 * size, the leftover is returned using pcpu_fc_free.
3071 *
3072 * RETURNS:
3073 * 0 on success, -errno on failure.
3074 */
3075int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
3076				  size_t atom_size,
3077				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
3078				  pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
 
3079{
3080	void *base = (void *)ULONG_MAX;
3081	void **areas = NULL;
3082	struct pcpu_alloc_info *ai;
3083	size_t size_sum, areas_size;
3084	unsigned long max_distance;
3085	int group, i, highest_group, rc = 0;
3086
3087	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
3088				   cpu_distance_fn);
3089	if (IS_ERR(ai))
3090		return PTR_ERR(ai);
3091
3092	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
3093	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
3094
3095	areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
3096	if (!areas) {
3097		rc = -ENOMEM;
3098		goto out_free;
3099	}
3100
3101	/* allocate, copy and determine base address & max_distance */
3102	highest_group = 0;
3103	for (group = 0; group < ai->nr_groups; group++) {
3104		struct pcpu_group_info *gi = &ai->groups[group];
3105		unsigned int cpu = NR_CPUS;
3106		void *ptr;
3107
3108		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
3109			cpu = gi->cpu_map[i];
3110		BUG_ON(cpu == NR_CPUS);
3111
3112		/* allocate space for the whole group */
3113		ptr = pcpu_fc_alloc(cpu, gi->nr_units * ai->unit_size, atom_size, cpu_to_nd_fn);
3114		if (!ptr) {
3115			rc = -ENOMEM;
3116			goto out_free_areas;
3117		}
3118		/* kmemleak tracks the percpu allocations separately */
3119		kmemleak_ignore_phys(__pa(ptr));
3120		areas[group] = ptr;
3121
3122		base = min(ptr, base);
3123		if (ptr > areas[highest_group])
3124			highest_group = group;
3125	}
3126	max_distance = areas[highest_group] - base;
3127	max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
3128
3129	/* warn if maximum distance is further than 75% of vmalloc space */
3130	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
3131		pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
3132				max_distance, VMALLOC_TOTAL);
3133#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
3134		/* and fail if we have fallback */
3135		rc = -EINVAL;
3136		goto out_free_areas;
3137#endif
3138	}
3139
3140	/*
3141	 * Copy data and free unused parts.  This should happen after all
3142	 * allocations are complete; otherwise, we may end up with
3143	 * overlapping groups.
3144	 */
3145	for (group = 0; group < ai->nr_groups; group++) {
3146		struct pcpu_group_info *gi = &ai->groups[group];
3147		void *ptr = areas[group];
3148
3149		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
3150			if (gi->cpu_map[i] == NR_CPUS) {
3151				/* unused unit, free whole */
3152				pcpu_fc_free(ptr, ai->unit_size);
3153				continue;
3154			}
3155			/* copy and return the unused part */
3156			memcpy(ptr, __per_cpu_load, ai->static_size);
3157			pcpu_fc_free(ptr + size_sum, ai->unit_size - size_sum);
3158		}
3159	}
3160
3161	/* base address is now known, determine group base offsets */
3162	for (group = 0; group < ai->nr_groups; group++) {
3163		ai->groups[group].base_offset = areas[group] - base;
3164	}
3165
3166	pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
3167		PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
3168		ai->dyn_size, ai->unit_size);
3169
3170	pcpu_setup_first_chunk(ai, base);
3171	goto out_free;
3172
3173out_free_areas:
3174	for (group = 0; group < ai->nr_groups; group++)
3175		if (areas[group])
3176			pcpu_fc_free(areas[group],
3177				ai->groups[group].nr_units * ai->unit_size);
3178out_free:
3179	pcpu_free_alloc_info(ai);
3180	if (areas)
3181		memblock_free(areas, areas_size);
3182	return rc;
3183}
3184#endif /* BUILD_EMBED_FIRST_CHUNK */
3185
3186#ifdef BUILD_PAGE_FIRST_CHUNK
3187#include <asm/pgalloc.h>
3188
3189#ifndef P4D_TABLE_SIZE
3190#define P4D_TABLE_SIZE PAGE_SIZE
3191#endif
3192
3193#ifndef PUD_TABLE_SIZE
3194#define PUD_TABLE_SIZE PAGE_SIZE
3195#endif
3196
3197#ifndef PMD_TABLE_SIZE
3198#define PMD_TABLE_SIZE PAGE_SIZE
3199#endif
3200
3201#ifndef PTE_TABLE_SIZE
3202#define PTE_TABLE_SIZE PAGE_SIZE
3203#endif
3204void __init __weak pcpu_populate_pte(unsigned long addr)
3205{
3206	pgd_t *pgd = pgd_offset_k(addr);
3207	p4d_t *p4d;
3208	pud_t *pud;
3209	pmd_t *pmd;
3210
3211	if (pgd_none(*pgd)) {
3212		p4d = memblock_alloc(P4D_TABLE_SIZE, P4D_TABLE_SIZE);
3213		if (!p4d)
3214			goto err_alloc;
3215		pgd_populate(&init_mm, pgd, p4d);
3216	}
3217
3218	p4d = p4d_offset(pgd, addr);
3219	if (p4d_none(*p4d)) {
3220		pud = memblock_alloc(PUD_TABLE_SIZE, PUD_TABLE_SIZE);
3221		if (!pud)
3222			goto err_alloc;
3223		p4d_populate(&init_mm, p4d, pud);
3224	}
3225
3226	pud = pud_offset(p4d, addr);
3227	if (pud_none(*pud)) {
3228		pmd = memblock_alloc(PMD_TABLE_SIZE, PMD_TABLE_SIZE);
3229		if (!pmd)
3230			goto err_alloc;
3231		pud_populate(&init_mm, pud, pmd);
3232	}
3233
3234	pmd = pmd_offset(pud, addr);
3235	if (!pmd_present(*pmd)) {
3236		pte_t *new;
3237
3238		new = memblock_alloc(PTE_TABLE_SIZE, PTE_TABLE_SIZE);
3239		if (!new)
3240			goto err_alloc;
3241		pmd_populate_kernel(&init_mm, pmd, new);
3242	}
3243
3244	return;
3245
3246err_alloc:
3247	panic("%s: Failed to allocate memory\n", __func__);
3248}
3249
3250/**
3251 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
3252 * @reserved_size: the size of reserved percpu area in bytes
3253 * @cpu_to_nd_fn: callback to convert cpu to it's node, optional
 
 
3254 *
3255 * This is a helper to ease setting up page-remapped first percpu
3256 * chunk and can be called where pcpu_setup_first_chunk() is expected.
3257 *
3258 * This is the basic allocator.  Static percpu area is allocated
3259 * page-by-page into vmalloc area.
3260 *
3261 * RETURNS:
3262 * 0 on success, -errno on failure.
3263 */
3264int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_cpu_to_node_fn_t cpu_to_nd_fn)
 
 
 
3265{
3266	static struct vm_struct vm;
3267	struct pcpu_alloc_info *ai;
3268	char psize_str[16];
3269	int unit_pages;
3270	size_t pages_size;
3271	struct page **pages;
3272	int unit, i, j, rc = 0;
3273	int upa;
3274	int nr_g0_units;
3275
3276	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
3277
3278	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
3279	if (IS_ERR(ai))
3280		return PTR_ERR(ai);
3281	BUG_ON(ai->nr_groups != 1);
3282	upa = ai->alloc_size/ai->unit_size;
3283	nr_g0_units = roundup(num_possible_cpus(), upa);
3284	if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
3285		pcpu_free_alloc_info(ai);
3286		return -EINVAL;
3287	}
3288
3289	unit_pages = ai->unit_size >> PAGE_SHIFT;
3290
3291	/* unaligned allocations can't be freed, round up to page size */
3292	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
3293			       sizeof(pages[0]));
3294	pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
3295	if (!pages)
3296		panic("%s: Failed to allocate %zu bytes\n", __func__,
3297		      pages_size);
3298
3299	/* allocate pages */
3300	j = 0;
3301	for (unit = 0; unit < num_possible_cpus(); unit++) {
3302		unsigned int cpu = ai->groups[0].cpu_map[unit];
3303		for (i = 0; i < unit_pages; i++) {
3304			void *ptr;
3305
3306			ptr = pcpu_fc_alloc(cpu, PAGE_SIZE, PAGE_SIZE, cpu_to_nd_fn);
3307			if (!ptr) {
3308				pr_warn("failed to allocate %s page for cpu%u\n",
3309						psize_str, cpu);
3310				goto enomem;
3311			}
3312			/* kmemleak tracks the percpu allocations separately */
3313			kmemleak_ignore_phys(__pa(ptr));
3314			pages[j++] = virt_to_page(ptr);
3315		}
3316	}
3317
3318	/* allocate vm area, map the pages and copy static data */
3319	vm.flags = VM_ALLOC;
3320	vm.size = num_possible_cpus() * ai->unit_size;
3321	vm_area_register_early(&vm, PAGE_SIZE);
3322
3323	for (unit = 0; unit < num_possible_cpus(); unit++) {
3324		unsigned long unit_addr =
3325			(unsigned long)vm.addr + unit * ai->unit_size;
3326
3327		for (i = 0; i < unit_pages; i++)
3328			pcpu_populate_pte(unit_addr + (i << PAGE_SHIFT));
3329
3330		/* pte already populated, the following shouldn't fail */
3331		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
3332				      unit_pages);
3333		if (rc < 0)
3334			panic("failed to map percpu area, err=%d\n", rc);
3335
3336		flush_cache_vmap_early(unit_addr, unit_addr + ai->unit_size);
 
 
 
 
 
 
3337
3338		/* copy static data */
3339		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
3340	}
3341
3342	/* we're ready, commit */
3343	pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
3344		unit_pages, psize_str, ai->static_size,
3345		ai->reserved_size, ai->dyn_size);
3346
3347	pcpu_setup_first_chunk(ai, vm.addr);
3348	goto out_free_ar;
3349
3350enomem:
3351	while (--j >= 0)
3352		pcpu_fc_free(page_address(pages[j]), PAGE_SIZE);
3353	rc = -ENOMEM;
3354out_free_ar:
3355	memblock_free(pages, pages_size);
3356	pcpu_free_alloc_info(ai);
3357	return rc;
3358}
3359#endif /* BUILD_PAGE_FIRST_CHUNK */
3360
3361#ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
3362/*
3363 * Generic SMP percpu area setup.
3364 *
3365 * The embedding helper is used because its behavior closely resembles
3366 * the original non-dynamic generic percpu area setup.  This is
3367 * important because many archs have addressing restrictions and might
3368 * fail if the percpu area is located far away from the previous
3369 * location.  As an added bonus, in non-NUMA cases, embedding is
3370 * generally a good idea TLB-wise because percpu area can piggy back
3371 * on the physical linear memory mapping which uses large page
3372 * mappings on applicable archs.
3373 */
3374unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
3375EXPORT_SYMBOL(__per_cpu_offset);
3376
 
 
 
 
 
 
 
 
 
 
 
3377void __init setup_per_cpu_areas(void)
3378{
3379	unsigned long delta;
3380	unsigned int cpu;
3381	int rc;
3382
3383	/*
3384	 * Always reserve area for module percpu variables.  That's
3385	 * what the legacy allocator did.
3386	 */
3387	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, PERCPU_DYNAMIC_RESERVE,
3388				    PAGE_SIZE, NULL, NULL);
 
3389	if (rc < 0)
3390		panic("Failed to initialize percpu areas.");
3391
3392	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
3393	for_each_possible_cpu(cpu)
3394		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
3395}
3396#endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */
3397
3398#else	/* CONFIG_SMP */
3399
3400/*
3401 * UP percpu area setup.
3402 *
3403 * UP always uses km-based percpu allocator with identity mapping.
3404 * Static percpu variables are indistinguishable from the usual static
3405 * variables and don't require any special preparation.
3406 */
3407void __init setup_per_cpu_areas(void)
3408{
3409	const size_t unit_size =
3410		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
3411					 PERCPU_DYNAMIC_RESERVE));
3412	struct pcpu_alloc_info *ai;
3413	void *fc;
3414
3415	ai = pcpu_alloc_alloc_info(1, 1);
3416	fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
3417	if (!ai || !fc)
3418		panic("Failed to allocate memory for percpu areas.");
3419	/* kmemleak tracks the percpu allocations separately */
3420	kmemleak_ignore_phys(__pa(fc));
3421
3422	ai->dyn_size = unit_size;
3423	ai->unit_size = unit_size;
3424	ai->atom_size = unit_size;
3425	ai->alloc_size = unit_size;
3426	ai->groups[0].nr_units = 1;
3427	ai->groups[0].cpu_map[0] = 0;
3428
3429	pcpu_setup_first_chunk(ai, fc);
3430	pcpu_free_alloc_info(ai);
3431}
3432
3433#endif	/* CONFIG_SMP */
3434
3435/*
3436 * pcpu_nr_pages - calculate total number of populated backing pages
3437 *
3438 * This reflects the number of pages populated to back chunks.  Metadata is
3439 * excluded in the number exposed in meminfo as the number of backing pages
3440 * scales with the number of cpus and can quickly outweigh the memory used for
3441 * metadata.  It also keeps this calculation nice and simple.
3442 *
3443 * RETURNS:
3444 * Total number of populated backing pages in use by the allocator.
3445 */
3446unsigned long pcpu_nr_pages(void)
3447{
3448	return pcpu_nr_populated * pcpu_nr_units;
3449}
3450
3451/*
3452 * Percpu allocator is initialized early during boot when neither slab or
3453 * workqueue is available.  Plug async management until everything is up
3454 * and running.
3455 */
3456static int __init percpu_enable_async(void)
3457{
3458	pcpu_async_enabled = true;
3459	return 0;
3460}
3461subsys_initcall(percpu_enable_async);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * mm/percpu.c - percpu memory allocator
   4 *
   5 * Copyright (C) 2009		SUSE Linux Products GmbH
   6 * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
   7 *
   8 * Copyright (C) 2017		Facebook Inc.
   9 * Copyright (C) 2017		Dennis Zhou <dennis@kernel.org>
  10 *
  11 * The percpu allocator handles both static and dynamic areas.  Percpu
  12 * areas are allocated in chunks which are divided into units.  There is
  13 * a 1-to-1 mapping for units to possible cpus.  These units are grouped
  14 * based on NUMA properties of the machine.
  15 *
  16 *  c0                           c1                         c2
  17 *  -------------------          -------------------        ------------
  18 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
  19 *  -------------------  ......  -------------------  ....  ------------
  20 *
  21 * Allocation is done by offsets into a unit's address space.  Ie., an
  22 * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
  23 * c1:u1, c1:u2, etc.  On NUMA machines, the mapping may be non-linear
  24 * and even sparse.  Access is handled by configuring percpu base
  25 * registers according to the cpu to unit mappings and offsetting the
  26 * base address using pcpu_unit_size.
  27 *
  28 * There is special consideration for the first chunk which must handle
  29 * the static percpu variables in the kernel image as allocation services
  30 * are not online yet.  In short, the first chunk is structured like so:
  31 *
  32 *                  <Static | [Reserved] | Dynamic>
  33 *
  34 * The static data is copied from the original section managed by the
  35 * linker.  The reserved section, if non-zero, primarily manages static
  36 * percpu variables from kernel modules.  Finally, the dynamic section
  37 * takes care of normal allocations.
  38 *
  39 * The allocator organizes chunks into lists according to free size and
  40 * memcg-awareness.  To make a percpu allocation memcg-aware the __GFP_ACCOUNT
  41 * flag should be passed.  All memcg-aware allocations are sharing one set
  42 * of chunks and all unaccounted allocations and allocations performed
  43 * by processes belonging to the root memory cgroup are using the second set.
  44 *
  45 * The allocator tries to allocate from the fullest chunk first. Each chunk
  46 * is managed by a bitmap with metadata blocks.  The allocation map is updated
  47 * on every allocation and free to reflect the current state while the boundary
  48 * map is only updated on allocation.  Each metadata block contains
  49 * information to help mitigate the need to iterate over large portions
  50 * of the bitmap.  The reverse mapping from page to chunk is stored in
  51 * the page's index.  Lastly, units are lazily backed and grow in unison.
  52 *
  53 * There is a unique conversion that goes on here between bytes and bits.
  54 * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE.  The chunk
  55 * tracks the number of pages it is responsible for in nr_pages.  Helper
  56 * functions are used to convert from between the bytes, bits, and blocks.
  57 * All hints are managed in bits unless explicitly stated.
  58 *
  59 * To use this allocator, arch code should do the following:
  60 *
  61 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
  62 *   regular address to percpu pointer and back if they need to be
  63 *   different from the default
  64 *
  65 * - use pcpu_setup_first_chunk() during percpu area initialization to
  66 *   setup the first chunk containing the kernel static percpu area
  67 */
  68
  69#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  70
  71#include <linux/bitmap.h>
  72#include <linux/cpumask.h>
  73#include <linux/memblock.h>
  74#include <linux/err.h>
  75#include <linux/lcm.h>
  76#include <linux/list.h>
  77#include <linux/log2.h>
  78#include <linux/mm.h>
  79#include <linux/module.h>
  80#include <linux/mutex.h>
  81#include <linux/percpu.h>
  82#include <linux/pfn.h>
  83#include <linux/slab.h>
  84#include <linux/spinlock.h>
  85#include <linux/vmalloc.h>
  86#include <linux/workqueue.h>
  87#include <linux/kmemleak.h>
  88#include <linux/sched.h>
  89#include <linux/sched/mm.h>
  90#include <linux/memcontrol.h>
  91
  92#include <asm/cacheflush.h>
  93#include <asm/sections.h>
  94#include <asm/tlbflush.h>
  95#include <asm/io.h>
  96
  97#define CREATE_TRACE_POINTS
  98#include <trace/events/percpu.h>
  99
 100#include "percpu-internal.h"
 101
 102/*
 103 * The slots are sorted by the size of the biggest continuous free area.
 104 * 1-31 bytes share the same slot.
 105 */
 106#define PCPU_SLOT_BASE_SHIFT		5
 107/* chunks in slots below this are subject to being sidelined on failed alloc */
 108#define PCPU_SLOT_FAIL_THRESHOLD	3
 109
 110#define PCPU_EMPTY_POP_PAGES_LOW	2
 111#define PCPU_EMPTY_POP_PAGES_HIGH	4
 112
 113#ifdef CONFIG_SMP
 114/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
 115#ifndef __addr_to_pcpu_ptr
 116#define __addr_to_pcpu_ptr(addr)					\
 117	(void __percpu *)((unsigned long)(addr) -			\
 118			  (unsigned long)pcpu_base_addr	+		\
 119			  (unsigned long)__per_cpu_start)
 120#endif
 121#ifndef __pcpu_ptr_to_addr
 122#define __pcpu_ptr_to_addr(ptr)						\
 123	(void __force *)((unsigned long)(ptr) +				\
 124			 (unsigned long)pcpu_base_addr -		\
 125			 (unsigned long)__per_cpu_start)
 126#endif
 127#else	/* CONFIG_SMP */
 128/* on UP, it's always identity mapped */
 129#define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
 130#define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
 131#endif	/* CONFIG_SMP */
 132
 133static int pcpu_unit_pages __ro_after_init;
 134static int pcpu_unit_size __ro_after_init;
 135static int pcpu_nr_units __ro_after_init;
 136static int pcpu_atom_size __ro_after_init;
 137int pcpu_nr_slots __ro_after_init;
 138static int pcpu_free_slot __ro_after_init;
 139int pcpu_sidelined_slot __ro_after_init;
 140int pcpu_to_depopulate_slot __ro_after_init;
 141static size_t pcpu_chunk_struct_size __ro_after_init;
 142
 143/* cpus with the lowest and highest unit addresses */
 144static unsigned int pcpu_low_unit_cpu __ro_after_init;
 145static unsigned int pcpu_high_unit_cpu __ro_after_init;
 146
 147/* the address of the first chunk which starts with the kernel static area */
 148void *pcpu_base_addr __ro_after_init;
 149EXPORT_SYMBOL_GPL(pcpu_base_addr);
 150
 151static const int *pcpu_unit_map __ro_after_init;		/* cpu -> unit */
 152const unsigned long *pcpu_unit_offsets __ro_after_init;	/* cpu -> unit offset */
 153
 154/* group information, used for vm allocation */
 155static int pcpu_nr_groups __ro_after_init;
 156static const unsigned long *pcpu_group_offsets __ro_after_init;
 157static const size_t *pcpu_group_sizes __ro_after_init;
 158
 159/*
 160 * The first chunk which always exists.  Note that unlike other
 161 * chunks, this one can be allocated and mapped in several different
 162 * ways and thus often doesn't live in the vmalloc area.
 163 */
 164struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
 165
 166/*
 167 * Optional reserved chunk.  This chunk reserves part of the first
 168 * chunk and serves it for reserved allocations.  When the reserved
 169 * region doesn't exist, the following variable is NULL.
 170 */
 171struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
 172
 173DEFINE_SPINLOCK(pcpu_lock);	/* all internal data structures */
 174static DEFINE_MUTEX(pcpu_alloc_mutex);	/* chunk create/destroy, [de]pop, map ext */
 175
 176struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
 177
 178/* chunks which need their map areas extended, protected by pcpu_lock */
 179static LIST_HEAD(pcpu_map_extend_chunks);
 180
 181/*
 182 * The number of empty populated pages, protected by pcpu_lock.
 183 * The reserved chunk doesn't contribute to the count.
 184 */
 185int pcpu_nr_empty_pop_pages;
 186
 187/*
 188 * The number of populated pages in use by the allocator, protected by
 189 * pcpu_lock.  This number is kept per a unit per chunk (i.e. when a page gets
 190 * allocated/deallocated, it is allocated/deallocated in all units of a chunk
 191 * and increments/decrements this count by 1).
 192 */
 193static unsigned long pcpu_nr_populated;
 194
 195/*
 196 * Balance work is used to populate or destroy chunks asynchronously.  We
 197 * try to keep the number of populated free pages between
 198 * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
 199 * empty chunk.
 200 */
 201static void pcpu_balance_workfn(struct work_struct *work);
 202static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
 203static bool pcpu_async_enabled __read_mostly;
 204static bool pcpu_atomic_alloc_failed;
 205
 206static void pcpu_schedule_balance_work(void)
 207{
 208	if (pcpu_async_enabled)
 209		schedule_work(&pcpu_balance_work);
 210}
 211
 212/**
 213 * pcpu_addr_in_chunk - check if the address is served from this chunk
 214 * @chunk: chunk of interest
 215 * @addr: percpu address
 216 *
 217 * RETURNS:
 218 * True if the address is served from this chunk.
 219 */
 220static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
 221{
 222	void *start_addr, *end_addr;
 223
 224	if (!chunk)
 225		return false;
 226
 227	start_addr = chunk->base_addr + chunk->start_offset;
 228	end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
 229		   chunk->end_offset;
 230
 231	return addr >= start_addr && addr < end_addr;
 232}
 233
 234static int __pcpu_size_to_slot(int size)
 235{
 236	int highbit = fls(size);	/* size is in bytes */
 237	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
 238}
 239
 240static int pcpu_size_to_slot(int size)
 241{
 242	if (size == pcpu_unit_size)
 243		return pcpu_free_slot;
 244	return __pcpu_size_to_slot(size);
 245}
 246
 247static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
 248{
 249	const struct pcpu_block_md *chunk_md = &chunk->chunk_md;
 250
 251	if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE ||
 252	    chunk_md->contig_hint == 0)
 253		return 0;
 254
 255	return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);
 256}
 257
 258/* set the pointer to a chunk in a page struct */
 259static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
 260{
 261	page->index = (unsigned long)pcpu;
 262}
 263
 264/* obtain pointer to a chunk from a page struct */
 265static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
 266{
 267	return (struct pcpu_chunk *)page->index;
 268}
 269
 270static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
 271{
 272	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
 273}
 274
 275static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
 276{
 277	return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
 278}
 279
 280static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
 281				     unsigned int cpu, int page_idx)
 282{
 283	return (unsigned long)chunk->base_addr +
 284	       pcpu_unit_page_offset(cpu, page_idx);
 285}
 286
 287/*
 288 * The following are helper functions to help access bitmaps and convert
 289 * between bitmap offsets to address offsets.
 290 */
 291static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
 292{
 293	return chunk->alloc_map +
 294	       (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
 295}
 296
 297static unsigned long pcpu_off_to_block_index(int off)
 298{
 299	return off / PCPU_BITMAP_BLOCK_BITS;
 300}
 301
 302static unsigned long pcpu_off_to_block_off(int off)
 303{
 304	return off & (PCPU_BITMAP_BLOCK_BITS - 1);
 305}
 306
 307static unsigned long pcpu_block_off_to_off(int index, int off)
 308{
 309	return index * PCPU_BITMAP_BLOCK_BITS + off;
 310}
 311
 312/**
 313 * pcpu_check_block_hint - check against the contig hint
 314 * @block: block of interest
 315 * @bits: size of allocation
 316 * @align: alignment of area (max PAGE_SIZE)
 317 *
 318 * Check to see if the allocation can fit in the block's contig hint.
 319 * Note, a chunk uses the same hints as a block so this can also check against
 320 * the chunk's contig hint.
 321 */
 322static bool pcpu_check_block_hint(struct pcpu_block_md *block, int bits,
 323				  size_t align)
 324{
 325	int bit_off = ALIGN(block->contig_hint_start, align) -
 326		block->contig_hint_start;
 327
 328	return bit_off + bits <= block->contig_hint;
 329}
 330
 331/*
 332 * pcpu_next_hint - determine which hint to use
 333 * @block: block of interest
 334 * @alloc_bits: size of allocation
 335 *
 336 * This determines if we should scan based on the scan_hint or first_free.
 337 * In general, we want to scan from first_free to fulfill allocations by
 338 * first fit.  However, if we know a scan_hint at position scan_hint_start
 339 * cannot fulfill an allocation, we can begin scanning from there knowing
 340 * the contig_hint will be our fallback.
 341 */
 342static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
 343{
 344	/*
 345	 * The three conditions below determine if we can skip past the
 346	 * scan_hint.  First, does the scan hint exist.  Second, is the
 347	 * contig_hint after the scan_hint (possibly not true iff
 348	 * contig_hint == scan_hint).  Third, is the allocation request
 349	 * larger than the scan_hint.
 350	 */
 351	if (block->scan_hint &&
 352	    block->contig_hint_start > block->scan_hint_start &&
 353	    alloc_bits > block->scan_hint)
 354		return block->scan_hint_start + block->scan_hint;
 355
 356	return block->first_free;
 357}
 358
 359/**
 360 * pcpu_next_md_free_region - finds the next hint free area
 361 * @chunk: chunk of interest
 362 * @bit_off: chunk offset
 363 * @bits: size of free area
 364 *
 365 * Helper function for pcpu_for_each_md_free_region.  It checks
 366 * block->contig_hint and performs aggregation across blocks to find the
 367 * next hint.  It modifies bit_off and bits in-place to be consumed in the
 368 * loop.
 369 */
 370static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
 371				     int *bits)
 372{
 373	int i = pcpu_off_to_block_index(*bit_off);
 374	int block_off = pcpu_off_to_block_off(*bit_off);
 375	struct pcpu_block_md *block;
 376
 377	*bits = 0;
 378	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
 379	     block++, i++) {
 380		/* handles contig area across blocks */
 381		if (*bits) {
 382			*bits += block->left_free;
 383			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
 384				continue;
 385			return;
 386		}
 387
 388		/*
 389		 * This checks three things.  First is there a contig_hint to
 390		 * check.  Second, have we checked this hint before by
 391		 * comparing the block_off.  Third, is this the same as the
 392		 * right contig hint.  In the last case, it spills over into
 393		 * the next block and should be handled by the contig area
 394		 * across blocks code.
 395		 */
 396		*bits = block->contig_hint;
 397		if (*bits && block->contig_hint_start >= block_off &&
 398		    *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
 399			*bit_off = pcpu_block_off_to_off(i,
 400					block->contig_hint_start);
 401			return;
 402		}
 403		/* reset to satisfy the second predicate above */
 404		block_off = 0;
 405
 406		*bits = block->right_free;
 407		*bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
 408	}
 409}
 410
 411/**
 412 * pcpu_next_fit_region - finds fit areas for a given allocation request
 413 * @chunk: chunk of interest
 414 * @alloc_bits: size of allocation
 415 * @align: alignment of area (max PAGE_SIZE)
 416 * @bit_off: chunk offset
 417 * @bits: size of free area
 418 *
 419 * Finds the next free region that is viable for use with a given size and
 420 * alignment.  This only returns if there is a valid area to be used for this
 421 * allocation.  block->first_free is returned if the allocation request fits
 422 * within the block to see if the request can be fulfilled prior to the contig
 423 * hint.
 424 */
 425static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
 426				 int align, int *bit_off, int *bits)
 427{
 428	int i = pcpu_off_to_block_index(*bit_off);
 429	int block_off = pcpu_off_to_block_off(*bit_off);
 430	struct pcpu_block_md *block;
 431
 432	*bits = 0;
 433	for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
 434	     block++, i++) {
 435		/* handles contig area across blocks */
 436		if (*bits) {
 437			*bits += block->left_free;
 438			if (*bits >= alloc_bits)
 439				return;
 440			if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
 441				continue;
 442		}
 443
 444		/* check block->contig_hint */
 445		*bits = ALIGN(block->contig_hint_start, align) -
 446			block->contig_hint_start;
 447		/*
 448		 * This uses the block offset to determine if this has been
 449		 * checked in the prior iteration.
 450		 */
 451		if (block->contig_hint &&
 452		    block->contig_hint_start >= block_off &&
 453		    block->contig_hint >= *bits + alloc_bits) {
 454			int start = pcpu_next_hint(block, alloc_bits);
 455
 456			*bits += alloc_bits + block->contig_hint_start -
 457				 start;
 458			*bit_off = pcpu_block_off_to_off(i, start);
 459			return;
 460		}
 461		/* reset to satisfy the second predicate above */
 462		block_off = 0;
 463
 464		*bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
 465				 align);
 466		*bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
 467		*bit_off = pcpu_block_off_to_off(i, *bit_off);
 468		if (*bits >= alloc_bits)
 469			return;
 470	}
 471
 472	/* no valid offsets were found - fail condition */
 473	*bit_off = pcpu_chunk_map_bits(chunk);
 474}
 475
 476/*
 477 * Metadata free area iterators.  These perform aggregation of free areas
 478 * based on the metadata blocks and return the offset @bit_off and size in
 479 * bits of the free area @bits.  pcpu_for_each_fit_region only returns when
 480 * a fit is found for the allocation request.
 481 */
 482#define pcpu_for_each_md_free_region(chunk, bit_off, bits)		\
 483	for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits));	\
 484	     (bit_off) < pcpu_chunk_map_bits((chunk));			\
 485	     (bit_off) += (bits) + 1,					\
 486	     pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
 487
 488#define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits)     \
 489	for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
 490				  &(bits));				      \
 491	     (bit_off) < pcpu_chunk_map_bits((chunk));			      \
 492	     (bit_off) += (bits),					      \
 493	     pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
 494				  &(bits)))
 495
 496/**
 497 * pcpu_mem_zalloc - allocate memory
 498 * @size: bytes to allocate
 499 * @gfp: allocation flags
 500 *
 501 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
 502 * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
 503 * This is to facilitate passing through whitelisted flags.  The
 504 * returned memory is always zeroed.
 505 *
 506 * RETURNS:
 507 * Pointer to the allocated area on success, NULL on failure.
 508 */
 509static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
 510{
 511	if (WARN_ON_ONCE(!slab_is_available()))
 512		return NULL;
 513
 514	if (size <= PAGE_SIZE)
 515		return kzalloc(size, gfp);
 516	else
 517		return __vmalloc(size, gfp | __GFP_ZERO);
 518}
 519
 520/**
 521 * pcpu_mem_free - free memory
 522 * @ptr: memory to free
 523 *
 524 * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
 525 */
 526static void pcpu_mem_free(void *ptr)
 527{
 528	kvfree(ptr);
 529}
 530
 531static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
 532			      bool move_front)
 533{
 534	if (chunk != pcpu_reserved_chunk) {
 535		if (move_front)
 536			list_move(&chunk->list, &pcpu_chunk_lists[slot]);
 537		else
 538			list_move_tail(&chunk->list, &pcpu_chunk_lists[slot]);
 539	}
 540}
 541
 542static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
 543{
 544	__pcpu_chunk_move(chunk, slot, true);
 545}
 546
 547/**
 548 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
 549 * @chunk: chunk of interest
 550 * @oslot: the previous slot it was on
 551 *
 552 * This function is called after an allocation or free changed @chunk.
 553 * New slot according to the changed state is determined and @chunk is
 554 * moved to the slot.  Note that the reserved chunk is never put on
 555 * chunk slots.
 556 *
 557 * CONTEXT:
 558 * pcpu_lock.
 559 */
 560static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
 561{
 562	int nslot = pcpu_chunk_slot(chunk);
 563
 564	/* leave isolated chunks in-place */
 565	if (chunk->isolated)
 566		return;
 567
 568	if (oslot != nslot)
 569		__pcpu_chunk_move(chunk, nslot, oslot < nslot);
 570}
 571
 572static void pcpu_isolate_chunk(struct pcpu_chunk *chunk)
 573{
 574	lockdep_assert_held(&pcpu_lock);
 575
 576	if (!chunk->isolated) {
 577		chunk->isolated = true;
 578		pcpu_nr_empty_pop_pages -= chunk->nr_empty_pop_pages;
 579	}
 580	list_move(&chunk->list, &pcpu_chunk_lists[pcpu_to_depopulate_slot]);
 581}
 582
 583static void pcpu_reintegrate_chunk(struct pcpu_chunk *chunk)
 584{
 585	lockdep_assert_held(&pcpu_lock);
 586
 587	if (chunk->isolated) {
 588		chunk->isolated = false;
 589		pcpu_nr_empty_pop_pages += chunk->nr_empty_pop_pages;
 590		pcpu_chunk_relocate(chunk, -1);
 591	}
 592}
 593
 594/*
 595 * pcpu_update_empty_pages - update empty page counters
 596 * @chunk: chunk of interest
 597 * @nr: nr of empty pages
 598 *
 599 * This is used to keep track of the empty pages now based on the premise
 600 * a md_block covers a page.  The hint update functions recognize if a block
 601 * is made full or broken to calculate deltas for keeping track of free pages.
 602 */
 603static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
 604{
 605	chunk->nr_empty_pop_pages += nr;
 606	if (chunk != pcpu_reserved_chunk && !chunk->isolated)
 607		pcpu_nr_empty_pop_pages += nr;
 608}
 609
 610/*
 611 * pcpu_region_overlap - determines if two regions overlap
 612 * @a: start of first region, inclusive
 613 * @b: end of first region, exclusive
 614 * @x: start of second region, inclusive
 615 * @y: end of second region, exclusive
 616 *
 617 * This is used to determine if the hint region [a, b) overlaps with the
 618 * allocated region [x, y).
 619 */
 620static inline bool pcpu_region_overlap(int a, int b, int x, int y)
 621{
 622	return (a < y) && (x < b);
 623}
 624
 625/**
 626 * pcpu_block_update - updates a block given a free area
 627 * @block: block of interest
 628 * @start: start offset in block
 629 * @end: end offset in block
 630 *
 631 * Updates a block given a known free area.  The region [start, end) is
 632 * expected to be the entirety of the free area within a block.  Chooses
 633 * the best starting offset if the contig hints are equal.
 634 */
 635static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
 636{
 637	int contig = end - start;
 638
 639	block->first_free = min(block->first_free, start);
 640	if (start == 0)
 641		block->left_free = contig;
 642
 643	if (end == block->nr_bits)
 644		block->right_free = contig;
 645
 646	if (contig > block->contig_hint) {
 647		/* promote the old contig_hint to be the new scan_hint */
 648		if (start > block->contig_hint_start) {
 649			if (block->contig_hint > block->scan_hint) {
 650				block->scan_hint_start =
 651					block->contig_hint_start;
 652				block->scan_hint = block->contig_hint;
 653			} else if (start < block->scan_hint_start) {
 654				/*
 655				 * The old contig_hint == scan_hint.  But, the
 656				 * new contig is larger so hold the invariant
 657				 * scan_hint_start < contig_hint_start.
 658				 */
 659				block->scan_hint = 0;
 660			}
 661		} else {
 662			block->scan_hint = 0;
 663		}
 664		block->contig_hint_start = start;
 665		block->contig_hint = contig;
 666	} else if (contig == block->contig_hint) {
 667		if (block->contig_hint_start &&
 668		    (!start ||
 669		     __ffs(start) > __ffs(block->contig_hint_start))) {
 670			/* start has a better alignment so use it */
 671			block->contig_hint_start = start;
 672			if (start < block->scan_hint_start &&
 673			    block->contig_hint > block->scan_hint)
 674				block->scan_hint = 0;
 675		} else if (start > block->scan_hint_start ||
 676			   block->contig_hint > block->scan_hint) {
 677			/*
 678			 * Knowing contig == contig_hint, update the scan_hint
 679			 * if it is farther than or larger than the current
 680			 * scan_hint.
 681			 */
 682			block->scan_hint_start = start;
 683			block->scan_hint = contig;
 684		}
 685	} else {
 686		/*
 687		 * The region is smaller than the contig_hint.  So only update
 688		 * the scan_hint if it is larger than or equal and farther than
 689		 * the current scan_hint.
 690		 */
 691		if ((start < block->contig_hint_start &&
 692		     (contig > block->scan_hint ||
 693		      (contig == block->scan_hint &&
 694		       start > block->scan_hint_start)))) {
 695			block->scan_hint_start = start;
 696			block->scan_hint = contig;
 697		}
 698	}
 699}
 700
 701/*
 702 * pcpu_block_update_scan - update a block given a free area from a scan
 703 * @chunk: chunk of interest
 704 * @bit_off: chunk offset
 705 * @bits: size of free area
 706 *
 707 * Finding the final allocation spot first goes through pcpu_find_block_fit()
 708 * to find a block that can hold the allocation and then pcpu_alloc_area()
 709 * where a scan is used.  When allocations require specific alignments,
 710 * we can inadvertently create holes which will not be seen in the alloc
 711 * or free paths.
 712 *
 713 * This takes a given free area hole and updates a block as it may change the
 714 * scan_hint.  We need to scan backwards to ensure we don't miss free bits
 715 * from alignment.
 716 */
 717static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off,
 718				   int bits)
 719{
 720	int s_off = pcpu_off_to_block_off(bit_off);
 721	int e_off = s_off + bits;
 722	int s_index, l_bit;
 723	struct pcpu_block_md *block;
 724
 725	if (e_off > PCPU_BITMAP_BLOCK_BITS)
 726		return;
 727
 728	s_index = pcpu_off_to_block_index(bit_off);
 729	block = chunk->md_blocks + s_index;
 730
 731	/* scan backwards in case of alignment skipping free bits */
 732	l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off);
 733	s_off = (s_off == l_bit) ? 0 : l_bit + 1;
 734
 735	pcpu_block_update(block, s_off, e_off);
 736}
 737
 738/**
 739 * pcpu_chunk_refresh_hint - updates metadata about a chunk
 740 * @chunk: chunk of interest
 741 * @full_scan: if we should scan from the beginning
 742 *
 743 * Iterates over the metadata blocks to find the largest contig area.
 744 * A full scan can be avoided on the allocation path as this is triggered
 745 * if we broke the contig_hint.  In doing so, the scan_hint will be before
 746 * the contig_hint or after if the scan_hint == contig_hint.  This cannot
 747 * be prevented on freeing as we want to find the largest area possibly
 748 * spanning blocks.
 749 */
 750static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
 751{
 752	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
 753	int bit_off, bits;
 754
 755	/* promote scan_hint to contig_hint */
 756	if (!full_scan && chunk_md->scan_hint) {
 757		bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint;
 758		chunk_md->contig_hint_start = chunk_md->scan_hint_start;
 759		chunk_md->contig_hint = chunk_md->scan_hint;
 760		chunk_md->scan_hint = 0;
 761	} else {
 762		bit_off = chunk_md->first_free;
 763		chunk_md->contig_hint = 0;
 764	}
 765
 766	bits = 0;
 767	pcpu_for_each_md_free_region(chunk, bit_off, bits)
 768		pcpu_block_update(chunk_md, bit_off, bit_off + bits);
 769}
 770
 771/**
 772 * pcpu_block_refresh_hint
 773 * @chunk: chunk of interest
 774 * @index: index of the metadata block
 775 *
 776 * Scans over the block beginning at first_free and updates the block
 777 * metadata accordingly.
 778 */
 779static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
 780{
 781	struct pcpu_block_md *block = chunk->md_blocks + index;
 782	unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
 783	unsigned int rs, re, start;	/* region start, region end */
 784
 785	/* promote scan_hint to contig_hint */
 786	if (block->scan_hint) {
 787		start = block->scan_hint_start + block->scan_hint;
 788		block->contig_hint_start = block->scan_hint_start;
 789		block->contig_hint = block->scan_hint;
 790		block->scan_hint = 0;
 791	} else {
 792		start = block->first_free;
 793		block->contig_hint = 0;
 794	}
 795
 796	block->right_free = 0;
 797
 798	/* iterate over free areas and update the contig hints */
 799	bitmap_for_each_clear_region(alloc_map, rs, re, start,
 800				     PCPU_BITMAP_BLOCK_BITS)
 801		pcpu_block_update(block, rs, re);
 802}
 803
 804/**
 805 * pcpu_block_update_hint_alloc - update hint on allocation path
 806 * @chunk: chunk of interest
 807 * @bit_off: chunk offset
 808 * @bits: size of request
 809 *
 810 * Updates metadata for the allocation path.  The metadata only has to be
 811 * refreshed by a full scan iff the chunk's contig hint is broken.  Block level
 812 * scans are required if the block's contig hint is broken.
 813 */
 814static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
 815					 int bits)
 816{
 817	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
 818	int nr_empty_pages = 0;
 819	struct pcpu_block_md *s_block, *e_block, *block;
 820	int s_index, e_index;	/* block indexes of the freed allocation */
 821	int s_off, e_off;	/* block offsets of the freed allocation */
 822
 823	/*
 824	 * Calculate per block offsets.
 825	 * The calculation uses an inclusive range, but the resulting offsets
 826	 * are [start, end).  e_index always points to the last block in the
 827	 * range.
 828	 */
 829	s_index = pcpu_off_to_block_index(bit_off);
 830	e_index = pcpu_off_to_block_index(bit_off + bits - 1);
 831	s_off = pcpu_off_to_block_off(bit_off);
 832	e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
 833
 834	s_block = chunk->md_blocks + s_index;
 835	e_block = chunk->md_blocks + e_index;
 836
 837	/*
 838	 * Update s_block.
 
 
 
 
 
 839	 * block->first_free must be updated if the allocation takes its place.
 840	 * If the allocation breaks the contig_hint, a scan is required to
 841	 * restore this hint.
 842	 */
 843	if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
 844		nr_empty_pages++;
 845
 846	if (s_off == s_block->first_free)
 847		s_block->first_free = find_next_zero_bit(
 848					pcpu_index_alloc_map(chunk, s_index),
 849					PCPU_BITMAP_BLOCK_BITS,
 850					s_off + bits);
 851
 852	if (pcpu_region_overlap(s_block->scan_hint_start,
 853				s_block->scan_hint_start + s_block->scan_hint,
 854				s_off,
 855				s_off + bits))
 856		s_block->scan_hint = 0;
 857
 858	if (pcpu_region_overlap(s_block->contig_hint_start,
 859				s_block->contig_hint_start +
 860				s_block->contig_hint,
 861				s_off,
 862				s_off + bits)) {
 863		/* block contig hint is broken - scan to fix it */
 864		if (!s_off)
 865			s_block->left_free = 0;
 866		pcpu_block_refresh_hint(chunk, s_index);
 867	} else {
 868		/* update left and right contig manually */
 869		s_block->left_free = min(s_block->left_free, s_off);
 870		if (s_index == e_index)
 871			s_block->right_free = min_t(int, s_block->right_free,
 872					PCPU_BITMAP_BLOCK_BITS - e_off);
 873		else
 874			s_block->right_free = 0;
 875	}
 876
 877	/*
 878	 * Update e_block.
 879	 */
 880	if (s_index != e_index) {
 881		if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
 882			nr_empty_pages++;
 883
 884		/*
 885		 * When the allocation is across blocks, the end is along
 886		 * the left part of the e_block.
 887		 */
 888		e_block->first_free = find_next_zero_bit(
 889				pcpu_index_alloc_map(chunk, e_index),
 890				PCPU_BITMAP_BLOCK_BITS, e_off);
 891
 892		if (e_off == PCPU_BITMAP_BLOCK_BITS) {
 893			/* reset the block */
 894			e_block++;
 895		} else {
 896			if (e_off > e_block->scan_hint_start)
 897				e_block->scan_hint = 0;
 898
 899			e_block->left_free = 0;
 900			if (e_off > e_block->contig_hint_start) {
 901				/* contig hint is broken - scan to fix it */
 902				pcpu_block_refresh_hint(chunk, e_index);
 903			} else {
 904				e_block->right_free =
 905					min_t(int, e_block->right_free,
 906					      PCPU_BITMAP_BLOCK_BITS - e_off);
 907			}
 908		}
 909
 910		/* update in-between md_blocks */
 911		nr_empty_pages += (e_index - s_index - 1);
 912		for (block = s_block + 1; block < e_block; block++) {
 913			block->scan_hint = 0;
 914			block->contig_hint = 0;
 915			block->left_free = 0;
 916			block->right_free = 0;
 917		}
 918	}
 919
 
 
 
 
 
 
 920	if (nr_empty_pages)
 921		pcpu_update_empty_pages(chunk, -nr_empty_pages);
 922
 923	if (pcpu_region_overlap(chunk_md->scan_hint_start,
 924				chunk_md->scan_hint_start +
 925				chunk_md->scan_hint,
 926				bit_off,
 927				bit_off + bits))
 928		chunk_md->scan_hint = 0;
 929
 930	/*
 931	 * The only time a full chunk scan is required is if the chunk
 932	 * contig hint is broken.  Otherwise, it means a smaller space
 933	 * was used and therefore the chunk contig hint is still correct.
 934	 */
 935	if (pcpu_region_overlap(chunk_md->contig_hint_start,
 936				chunk_md->contig_hint_start +
 937				chunk_md->contig_hint,
 938				bit_off,
 939				bit_off + bits))
 940		pcpu_chunk_refresh_hint(chunk, false);
 941}
 942
 943/**
 944 * pcpu_block_update_hint_free - updates the block hints on the free path
 945 * @chunk: chunk of interest
 946 * @bit_off: chunk offset
 947 * @bits: size of request
 948 *
 949 * Updates metadata for the allocation path.  This avoids a blind block
 950 * refresh by making use of the block contig hints.  If this fails, it scans
 951 * forward and backward to determine the extent of the free area.  This is
 952 * capped at the boundary of blocks.
 953 *
 954 * A chunk update is triggered if a page becomes free, a block becomes free,
 955 * or the free spans across blocks.  This tradeoff is to minimize iterating
 956 * over the block metadata to update chunk_md->contig_hint.
 957 * chunk_md->contig_hint may be off by up to a page, but it will never be more
 958 * than the available space.  If the contig hint is contained in one block, it
 959 * will be accurate.
 960 */
 961static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
 962					int bits)
 963{
 964	int nr_empty_pages = 0;
 965	struct pcpu_block_md *s_block, *e_block, *block;
 966	int s_index, e_index;	/* block indexes of the freed allocation */
 967	int s_off, e_off;	/* block offsets of the freed allocation */
 968	int start, end;		/* start and end of the whole free area */
 969
 970	/*
 971	 * Calculate per block offsets.
 972	 * The calculation uses an inclusive range, but the resulting offsets
 973	 * are [start, end).  e_index always points to the last block in the
 974	 * range.
 975	 */
 976	s_index = pcpu_off_to_block_index(bit_off);
 977	e_index = pcpu_off_to_block_index(bit_off + bits - 1);
 978	s_off = pcpu_off_to_block_off(bit_off);
 979	e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
 980
 981	s_block = chunk->md_blocks + s_index;
 982	e_block = chunk->md_blocks + e_index;
 983
 984	/*
 985	 * Check if the freed area aligns with the block->contig_hint.
 986	 * If it does, then the scan to find the beginning/end of the
 987	 * larger free area can be avoided.
 988	 *
 989	 * start and end refer to beginning and end of the free area
 990	 * within each their respective blocks.  This is not necessarily
 991	 * the entire free area as it may span blocks past the beginning
 992	 * or end of the block.
 993	 */
 994	start = s_off;
 995	if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
 996		start = s_block->contig_hint_start;
 997	} else {
 998		/*
 999		 * Scan backwards to find the extent of the free area.
1000		 * find_last_bit returns the starting bit, so if the start bit
1001		 * is returned, that means there was no last bit and the
1002		 * remainder of the chunk is free.
1003		 */
1004		int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
1005					  start);
1006		start = (start == l_bit) ? 0 : l_bit + 1;
1007	}
1008
1009	end = e_off;
1010	if (e_off == e_block->contig_hint_start)
1011		end = e_block->contig_hint_start + e_block->contig_hint;
1012	else
1013		end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
1014				    PCPU_BITMAP_BLOCK_BITS, end);
1015
1016	/* update s_block */
1017	e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
1018	if (!start && e_off == PCPU_BITMAP_BLOCK_BITS)
1019		nr_empty_pages++;
1020	pcpu_block_update(s_block, start, e_off);
1021
1022	/* freeing in the same block */
1023	if (s_index != e_index) {
1024		/* update e_block */
1025		if (end == PCPU_BITMAP_BLOCK_BITS)
1026			nr_empty_pages++;
1027		pcpu_block_update(e_block, 0, end);
1028
1029		/* reset md_blocks in the middle */
1030		nr_empty_pages += (e_index - s_index - 1);
1031		for (block = s_block + 1; block < e_block; block++) {
1032			block->first_free = 0;
1033			block->scan_hint = 0;
1034			block->contig_hint_start = 0;
1035			block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
1036			block->left_free = PCPU_BITMAP_BLOCK_BITS;
1037			block->right_free = PCPU_BITMAP_BLOCK_BITS;
1038		}
1039	}
1040
1041	if (nr_empty_pages)
1042		pcpu_update_empty_pages(chunk, nr_empty_pages);
1043
1044	/*
1045	 * Refresh chunk metadata when the free makes a block free or spans
1046	 * across blocks.  The contig_hint may be off by up to a page, but if
1047	 * the contig_hint is contained in a block, it will be accurate with
1048	 * the else condition below.
1049	 */
1050	if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index)
1051		pcpu_chunk_refresh_hint(chunk, true);
1052	else
1053		pcpu_block_update(&chunk->chunk_md,
1054				  pcpu_block_off_to_off(s_index, start),
1055				  end);
1056}
1057
1058/**
1059 * pcpu_is_populated - determines if the region is populated
1060 * @chunk: chunk of interest
1061 * @bit_off: chunk offset
1062 * @bits: size of area
1063 * @next_off: return value for the next offset to start searching
1064 *
1065 * For atomic allocations, check if the backing pages are populated.
1066 *
1067 * RETURNS:
1068 * Bool if the backing pages are populated.
1069 * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
1070 */
1071static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
1072			      int *next_off)
1073{
1074	unsigned int page_start, page_end, rs, re;
1075
1076	page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
1077	page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
1078
1079	rs = page_start;
1080	bitmap_next_clear_region(chunk->populated, &rs, &re, page_end);
1081	if (rs >= page_end)
1082		return true;
1083
1084	*next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
 
 
1085	return false;
1086}
1087
1088/**
1089 * pcpu_find_block_fit - finds the block index to start searching
1090 * @chunk: chunk of interest
1091 * @alloc_bits: size of request in allocation units
1092 * @align: alignment of area (max PAGE_SIZE bytes)
1093 * @pop_only: use populated regions only
1094 *
1095 * Given a chunk and an allocation spec, find the offset to begin searching
1096 * for a free region.  This iterates over the bitmap metadata blocks to
1097 * find an offset that will be guaranteed to fit the requirements.  It is
1098 * not quite first fit as if the allocation does not fit in the contig hint
1099 * of a block or chunk, it is skipped.  This errs on the side of caution
1100 * to prevent excess iteration.  Poor alignment can cause the allocator to
1101 * skip over blocks and chunks that have valid free areas.
1102 *
1103 * RETURNS:
1104 * The offset in the bitmap to begin searching.
1105 * -1 if no offset is found.
1106 */
1107static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
1108			       size_t align, bool pop_only)
1109{
1110	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1111	int bit_off, bits, next_off;
1112
1113	/*
1114	 * This is an optimization to prevent scanning by assuming if the
1115	 * allocation cannot fit in the global hint, there is memory pressure
1116	 * and creating a new chunk would happen soon.
1117	 */
1118	if (!pcpu_check_block_hint(chunk_md, alloc_bits, align))
1119		return -1;
1120
1121	bit_off = pcpu_next_hint(chunk_md, alloc_bits);
1122	bits = 0;
1123	pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
1124		if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
1125						   &next_off))
1126			break;
1127
1128		bit_off = next_off;
1129		bits = 0;
1130	}
1131
1132	if (bit_off == pcpu_chunk_map_bits(chunk))
1133		return -1;
1134
1135	return bit_off;
1136}
1137
1138/*
1139 * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
1140 * @map: the address to base the search on
1141 * @size: the bitmap size in bits
1142 * @start: the bitnumber to start searching at
1143 * @nr: the number of zeroed bits we're looking for
1144 * @align_mask: alignment mask for zero area
1145 * @largest_off: offset of the largest area skipped
1146 * @largest_bits: size of the largest area skipped
1147 *
1148 * The @align_mask should be one less than a power of 2.
1149 *
1150 * This is a modified version of bitmap_find_next_zero_area_off() to remember
1151 * the largest area that was skipped.  This is imperfect, but in general is
1152 * good enough.  The largest remembered region is the largest failed region
1153 * seen.  This does not include anything we possibly skipped due to alignment.
1154 * pcpu_block_update_scan() does scan backwards to try and recover what was
1155 * lost to alignment.  While this can cause scanning to miss earlier possible
1156 * free areas, smaller allocations will eventually fill those holes.
1157 */
1158static unsigned long pcpu_find_zero_area(unsigned long *map,
1159					 unsigned long size,
1160					 unsigned long start,
1161					 unsigned long nr,
1162					 unsigned long align_mask,
1163					 unsigned long *largest_off,
1164					 unsigned long *largest_bits)
1165{
1166	unsigned long index, end, i, area_off, area_bits;
1167again:
1168	index = find_next_zero_bit(map, size, start);
1169
1170	/* Align allocation */
1171	index = __ALIGN_MASK(index, align_mask);
1172	area_off = index;
1173
1174	end = index + nr;
1175	if (end > size)
1176		return end;
1177	i = find_next_bit(map, end, index);
1178	if (i < end) {
1179		area_bits = i - area_off;
1180		/* remember largest unused area with best alignment */
1181		if (area_bits > *largest_bits ||
1182		    (area_bits == *largest_bits && *largest_off &&
1183		     (!area_off || __ffs(area_off) > __ffs(*largest_off)))) {
1184			*largest_off = area_off;
1185			*largest_bits = area_bits;
1186		}
1187
1188		start = i + 1;
1189		goto again;
1190	}
1191	return index;
1192}
1193
1194/**
1195 * pcpu_alloc_area - allocates an area from a pcpu_chunk
1196 * @chunk: chunk of interest
1197 * @alloc_bits: size of request in allocation units
1198 * @align: alignment of area (max PAGE_SIZE)
1199 * @start: bit_off to start searching
1200 *
1201 * This function takes in a @start offset to begin searching to fit an
1202 * allocation of @alloc_bits with alignment @align.  It needs to scan
1203 * the allocation map because if it fits within the block's contig hint,
1204 * @start will be block->first_free. This is an attempt to fill the
1205 * allocation prior to breaking the contig hint.  The allocation and
1206 * boundary maps are updated accordingly if it confirms a valid
1207 * free area.
1208 *
1209 * RETURNS:
1210 * Allocated addr offset in @chunk on success.
1211 * -1 if no matching area is found.
1212 */
1213static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
1214			   size_t align, int start)
1215{
1216	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1217	size_t align_mask = (align) ? (align - 1) : 0;
1218	unsigned long area_off = 0, area_bits = 0;
1219	int bit_off, end, oslot;
1220
1221	lockdep_assert_held(&pcpu_lock);
1222
1223	oslot = pcpu_chunk_slot(chunk);
1224
1225	/*
1226	 * Search to find a fit.
1227	 */
1228	end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
1229		    pcpu_chunk_map_bits(chunk));
1230	bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
1231				      align_mask, &area_off, &area_bits);
1232	if (bit_off >= end)
1233		return -1;
1234
1235	if (area_bits)
1236		pcpu_block_update_scan(chunk, area_off, area_bits);
1237
1238	/* update alloc map */
1239	bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
1240
1241	/* update boundary map */
1242	set_bit(bit_off, chunk->bound_map);
1243	bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
1244	set_bit(bit_off + alloc_bits, chunk->bound_map);
1245
1246	chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
1247
1248	/* update first free bit */
1249	if (bit_off == chunk_md->first_free)
1250		chunk_md->first_free = find_next_zero_bit(
1251					chunk->alloc_map,
1252					pcpu_chunk_map_bits(chunk),
1253					bit_off + alloc_bits);
1254
1255	pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
1256
1257	pcpu_chunk_relocate(chunk, oslot);
1258
1259	return bit_off * PCPU_MIN_ALLOC_SIZE;
1260}
1261
1262/**
1263 * pcpu_free_area - frees the corresponding offset
1264 * @chunk: chunk of interest
1265 * @off: addr offset into chunk
1266 *
1267 * This function determines the size of an allocation to free using
1268 * the boundary bitmap and clears the allocation map.
1269 *
1270 * RETURNS:
1271 * Number of freed bytes.
1272 */
1273static int pcpu_free_area(struct pcpu_chunk *chunk, int off)
1274{
1275	struct pcpu_block_md *chunk_md = &chunk->chunk_md;
1276	int bit_off, bits, end, oslot, freed;
1277
1278	lockdep_assert_held(&pcpu_lock);
1279	pcpu_stats_area_dealloc(chunk);
1280
1281	oslot = pcpu_chunk_slot(chunk);
1282
1283	bit_off = off / PCPU_MIN_ALLOC_SIZE;
1284
1285	/* find end index */
1286	end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
1287			    bit_off + 1);
1288	bits = end - bit_off;
1289	bitmap_clear(chunk->alloc_map, bit_off, bits);
1290
1291	freed = bits * PCPU_MIN_ALLOC_SIZE;
1292
1293	/* update metadata */
1294	chunk->free_bytes += freed;
1295
1296	/* update first free bit */
1297	chunk_md->first_free = min(chunk_md->first_free, bit_off);
1298
1299	pcpu_block_update_hint_free(chunk, bit_off, bits);
1300
1301	pcpu_chunk_relocate(chunk, oslot);
1302
1303	return freed;
1304}
1305
1306static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
1307{
1308	block->scan_hint = 0;
1309	block->contig_hint = nr_bits;
1310	block->left_free = nr_bits;
1311	block->right_free = nr_bits;
1312	block->first_free = 0;
1313	block->nr_bits = nr_bits;
1314}
1315
1316static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
1317{
1318	struct pcpu_block_md *md_block;
1319
1320	/* init the chunk's block */
1321	pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk));
1322
1323	for (md_block = chunk->md_blocks;
1324	     md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
1325	     md_block++)
1326		pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS);
1327}
1328
1329/**
1330 * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
1331 * @tmp_addr: the start of the region served
1332 * @map_size: size of the region served
1333 *
1334 * This is responsible for creating the chunks that serve the first chunk.  The
1335 * base_addr is page aligned down of @tmp_addr while the region end is page
1336 * aligned up.  Offsets are kept track of to determine the region served. All
1337 * this is done to appease the bitmap allocator in avoiding partial blocks.
1338 *
1339 * RETURNS:
1340 * Chunk serving the region at @tmp_addr of @map_size.
1341 */
1342static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
1343							 int map_size)
1344{
1345	struct pcpu_chunk *chunk;
1346	unsigned long aligned_addr, lcm_align;
1347	int start_offset, offset_bits, region_size, region_bits;
1348	size_t alloc_size;
1349
1350	/* region calculations */
1351	aligned_addr = tmp_addr & PAGE_MASK;
1352
1353	start_offset = tmp_addr - aligned_addr;
1354
1355	/*
1356	 * Align the end of the region with the LCM of PAGE_SIZE and
1357	 * PCPU_BITMAP_BLOCK_SIZE.  One of these constants is a multiple of
1358	 * the other.
1359	 */
1360	lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE);
1361	region_size = ALIGN(start_offset + map_size, lcm_align);
1362
1363	/* allocate chunk */
1364	alloc_size = struct_size(chunk, populated,
1365				 BITS_TO_LONGS(region_size >> PAGE_SHIFT));
1366	chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1367	if (!chunk)
1368		panic("%s: Failed to allocate %zu bytes\n", __func__,
1369		      alloc_size);
1370
1371	INIT_LIST_HEAD(&chunk->list);
1372
1373	chunk->base_addr = (void *)aligned_addr;
1374	chunk->start_offset = start_offset;
1375	chunk->end_offset = region_size - chunk->start_offset - map_size;
1376
1377	chunk->nr_pages = region_size >> PAGE_SHIFT;
1378	region_bits = pcpu_chunk_map_bits(chunk);
1379
1380	alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
1381	chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1382	if (!chunk->alloc_map)
1383		panic("%s: Failed to allocate %zu bytes\n", __func__,
1384		      alloc_size);
1385
1386	alloc_size =
1387		BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
1388	chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1389	if (!chunk->bound_map)
1390		panic("%s: Failed to allocate %zu bytes\n", __func__,
1391		      alloc_size);
1392
1393	alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
1394	chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
1395	if (!chunk->md_blocks)
1396		panic("%s: Failed to allocate %zu bytes\n", __func__,
1397		      alloc_size);
1398
1399#ifdef CONFIG_MEMCG_KMEM
1400	/* first chunk is free to use */
1401	chunk->obj_cgroups = NULL;
1402#endif
1403	pcpu_init_md_blocks(chunk);
1404
1405	/* manage populated page bitmap */
1406	chunk->immutable = true;
1407	bitmap_fill(chunk->populated, chunk->nr_pages);
1408	chunk->nr_populated = chunk->nr_pages;
1409	chunk->nr_empty_pop_pages = chunk->nr_pages;
1410
1411	chunk->free_bytes = map_size;
1412
1413	if (chunk->start_offset) {
1414		/* hide the beginning of the bitmap */
1415		offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
1416		bitmap_set(chunk->alloc_map, 0, offset_bits);
1417		set_bit(0, chunk->bound_map);
1418		set_bit(offset_bits, chunk->bound_map);
1419
1420		chunk->chunk_md.first_free = offset_bits;
1421
1422		pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
1423	}
1424
1425	if (chunk->end_offset) {
1426		/* hide the end of the bitmap */
1427		offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
1428		bitmap_set(chunk->alloc_map,
1429			   pcpu_chunk_map_bits(chunk) - offset_bits,
1430			   offset_bits);
1431		set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
1432			chunk->bound_map);
1433		set_bit(region_bits, chunk->bound_map);
1434
1435		pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
1436					     - offset_bits, offset_bits);
1437	}
1438
1439	return chunk;
1440}
1441
1442static struct pcpu_chunk *pcpu_alloc_chunk(gfp_t gfp)
1443{
1444	struct pcpu_chunk *chunk;
1445	int region_bits;
1446
1447	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
1448	if (!chunk)
1449		return NULL;
1450
1451	INIT_LIST_HEAD(&chunk->list);
1452	chunk->nr_pages = pcpu_unit_pages;
1453	region_bits = pcpu_chunk_map_bits(chunk);
1454
1455	chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
1456					   sizeof(chunk->alloc_map[0]), gfp);
1457	if (!chunk->alloc_map)
1458		goto alloc_map_fail;
1459
1460	chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
1461					   sizeof(chunk->bound_map[0]), gfp);
1462	if (!chunk->bound_map)
1463		goto bound_map_fail;
1464
1465	chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
1466					   sizeof(chunk->md_blocks[0]), gfp);
1467	if (!chunk->md_blocks)
1468		goto md_blocks_fail;
1469
1470#ifdef CONFIG_MEMCG_KMEM
1471	if (!mem_cgroup_kmem_disabled()) {
1472		chunk->obj_cgroups =
1473			pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
1474					sizeof(struct obj_cgroup *), gfp);
1475		if (!chunk->obj_cgroups)
1476			goto objcg_fail;
1477	}
1478#endif
1479
1480	pcpu_init_md_blocks(chunk);
1481
1482	/* init metadata */
1483	chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
1484
1485	return chunk;
1486
1487#ifdef CONFIG_MEMCG_KMEM
1488objcg_fail:
1489	pcpu_mem_free(chunk->md_blocks);
1490#endif
1491md_blocks_fail:
1492	pcpu_mem_free(chunk->bound_map);
1493bound_map_fail:
1494	pcpu_mem_free(chunk->alloc_map);
1495alloc_map_fail:
1496	pcpu_mem_free(chunk);
1497
1498	return NULL;
1499}
1500
1501static void pcpu_free_chunk(struct pcpu_chunk *chunk)
1502{
1503	if (!chunk)
1504		return;
1505#ifdef CONFIG_MEMCG_KMEM
1506	pcpu_mem_free(chunk->obj_cgroups);
1507#endif
1508	pcpu_mem_free(chunk->md_blocks);
1509	pcpu_mem_free(chunk->bound_map);
1510	pcpu_mem_free(chunk->alloc_map);
1511	pcpu_mem_free(chunk);
1512}
1513
1514/**
1515 * pcpu_chunk_populated - post-population bookkeeping
1516 * @chunk: pcpu_chunk which got populated
1517 * @page_start: the start page
1518 * @page_end: the end page
1519 *
1520 * Pages in [@page_start,@page_end) have been populated to @chunk.  Update
1521 * the bookkeeping information accordingly.  Must be called after each
1522 * successful population.
1523 *
1524 * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it
1525 * is to serve an allocation in that area.
1526 */
1527static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
1528				 int page_end)
1529{
1530	int nr = page_end - page_start;
1531
1532	lockdep_assert_held(&pcpu_lock);
1533
1534	bitmap_set(chunk->populated, page_start, nr);
1535	chunk->nr_populated += nr;
1536	pcpu_nr_populated += nr;
1537
1538	pcpu_update_empty_pages(chunk, nr);
1539}
1540
1541/**
1542 * pcpu_chunk_depopulated - post-depopulation bookkeeping
1543 * @chunk: pcpu_chunk which got depopulated
1544 * @page_start: the start page
1545 * @page_end: the end page
1546 *
1547 * Pages in [@page_start,@page_end) have been depopulated from @chunk.
1548 * Update the bookkeeping information accordingly.  Must be called after
1549 * each successful depopulation.
1550 */
1551static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
1552				   int page_start, int page_end)
1553{
1554	int nr = page_end - page_start;
1555
1556	lockdep_assert_held(&pcpu_lock);
1557
1558	bitmap_clear(chunk->populated, page_start, nr);
1559	chunk->nr_populated -= nr;
1560	pcpu_nr_populated -= nr;
1561
1562	pcpu_update_empty_pages(chunk, -nr);
1563}
1564
1565/*
1566 * Chunk management implementation.
1567 *
1568 * To allow different implementations, chunk alloc/free and
1569 * [de]population are implemented in a separate file which is pulled
1570 * into this file and compiled together.  The following functions
1571 * should be implemented.
1572 *
1573 * pcpu_populate_chunk		- populate the specified range of a chunk
1574 * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
1575 * pcpu_post_unmap_tlb_flush	- flush tlb for the specified range of a chunk
1576 * pcpu_create_chunk		- create a new chunk
1577 * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
1578 * pcpu_addr_to_page		- translate address to physical address
1579 * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
1580 */
1581static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
1582			       int page_start, int page_end, gfp_t gfp);
1583static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
1584				  int page_start, int page_end);
1585static void pcpu_post_unmap_tlb_flush(struct pcpu_chunk *chunk,
1586				      int page_start, int page_end);
1587static struct pcpu_chunk *pcpu_create_chunk(gfp_t gfp);
1588static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
1589static struct page *pcpu_addr_to_page(void *addr);
1590static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
1591
1592#ifdef CONFIG_NEED_PER_CPU_KM
1593#include "percpu-km.c"
1594#else
1595#include "percpu-vm.c"
1596#endif
1597
1598/**
1599 * pcpu_chunk_addr_search - determine chunk containing specified address
1600 * @addr: address for which the chunk needs to be determined.
1601 *
1602 * This is an internal function that handles all but static allocations.
1603 * Static percpu address values should never be passed into the allocator.
1604 *
1605 * RETURNS:
1606 * The address of the found chunk.
1607 */
1608static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
1609{
1610	/* is it in the dynamic region (first chunk)? */
1611	if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
1612		return pcpu_first_chunk;
1613
1614	/* is it in the reserved region? */
1615	if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
1616		return pcpu_reserved_chunk;
1617
1618	/*
1619	 * The address is relative to unit0 which might be unused and
1620	 * thus unmapped.  Offset the address to the unit space of the
1621	 * current processor before looking it up in the vmalloc
1622	 * space.  Note that any possible cpu id can be used here, so
1623	 * there's no need to worry about preemption or cpu hotplug.
1624	 */
1625	addr += pcpu_unit_offsets[raw_smp_processor_id()];
1626	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
1627}
1628
1629#ifdef CONFIG_MEMCG_KMEM
1630static bool pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
1631				      struct obj_cgroup **objcgp)
1632{
1633	struct obj_cgroup *objcg;
1634
1635	if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT))
1636		return true;
1637
1638	objcg = get_obj_cgroup_from_current();
1639	if (!objcg)
1640		return true;
1641
1642	if (obj_cgroup_charge(objcg, gfp, size * num_possible_cpus())) {
1643		obj_cgroup_put(objcg);
1644		return false;
1645	}
1646
1647	*objcgp = objcg;
1648	return true;
1649}
1650
1651static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1652				       struct pcpu_chunk *chunk, int off,
1653				       size_t size)
1654{
1655	if (!objcg)
1656		return;
1657
1658	if (likely(chunk && chunk->obj_cgroups)) {
 
1659		chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg;
1660
1661		rcu_read_lock();
1662		mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
1663				size * num_possible_cpus());
1664		rcu_read_unlock();
1665	} else {
1666		obj_cgroup_uncharge(objcg, size * num_possible_cpus());
1667		obj_cgroup_put(objcg);
1668	}
1669}
1670
1671static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1672{
1673	struct obj_cgroup *objcg;
1674
1675	if (unlikely(!chunk->obj_cgroups))
1676		return;
1677
1678	objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT];
1679	if (!objcg)
1680		return;
1681	chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL;
1682
1683	obj_cgroup_uncharge(objcg, size * num_possible_cpus());
1684
1685	rcu_read_lock();
1686	mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
1687			-(size * num_possible_cpus()));
1688	rcu_read_unlock();
1689
1690	obj_cgroup_put(objcg);
1691}
1692
1693#else /* CONFIG_MEMCG_KMEM */
1694static bool
1695pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
1696{
1697	return true;
1698}
1699
1700static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
1701				       struct pcpu_chunk *chunk, int off,
1702				       size_t size)
1703{
1704}
1705
1706static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
1707{
1708}
1709#endif /* CONFIG_MEMCG_KMEM */
1710
1711/**
1712 * pcpu_alloc - the percpu allocator
1713 * @size: size of area to allocate in bytes
1714 * @align: alignment of area (max PAGE_SIZE)
1715 * @reserved: allocate from the reserved chunk if available
1716 * @gfp: allocation flags
1717 *
1718 * Allocate percpu area of @size bytes aligned at @align.  If @gfp doesn't
1719 * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
1720 * then no warning will be triggered on invalid or failed allocation
1721 * requests.
1722 *
1723 * RETURNS:
1724 * Percpu pointer to the allocated area on success, NULL on failure.
1725 */
1726static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
1727				 gfp_t gfp)
1728{
1729	gfp_t pcpu_gfp;
1730	bool is_atomic;
1731	bool do_warn;
1732	struct obj_cgroup *objcg = NULL;
1733	static int warn_limit = 10;
1734	struct pcpu_chunk *chunk, *next;
1735	const char *err;
1736	int slot, off, cpu, ret;
1737	unsigned long flags;
1738	void __percpu *ptr;
1739	size_t bits, bit_align;
1740
1741	gfp = current_gfp_context(gfp);
1742	/* whitelisted flags that can be passed to the backing allocators */
1743	pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
1744	is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
1745	do_warn = !(gfp & __GFP_NOWARN);
1746
1747	/*
1748	 * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
1749	 * therefore alignment must be a minimum of that many bytes.
1750	 * An allocation may have internal fragmentation from rounding up
1751	 * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
1752	 */
1753	if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
1754		align = PCPU_MIN_ALLOC_SIZE;
1755
1756	size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
1757	bits = size >> PCPU_MIN_ALLOC_SHIFT;
1758	bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
1759
1760	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
1761		     !is_power_of_2(align))) {
1762		WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
1763		     size, align);
1764		return NULL;
1765	}
1766
1767	if (unlikely(!pcpu_memcg_pre_alloc_hook(size, gfp, &objcg)))
1768		return NULL;
1769
1770	if (!is_atomic) {
1771		/*
1772		 * pcpu_balance_workfn() allocates memory under this mutex,
1773		 * and it may wait for memory reclaim. Allow current task
1774		 * to become OOM victim, in case of memory pressure.
1775		 */
1776		if (gfp & __GFP_NOFAIL) {
1777			mutex_lock(&pcpu_alloc_mutex);
1778		} else if (mutex_lock_killable(&pcpu_alloc_mutex)) {
1779			pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1780			return NULL;
1781		}
1782	}
1783
1784	spin_lock_irqsave(&pcpu_lock, flags);
1785
1786	/* serve reserved allocations from the reserved chunk if available */
1787	if (reserved && pcpu_reserved_chunk) {
1788		chunk = pcpu_reserved_chunk;
1789
1790		off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
1791		if (off < 0) {
1792			err = "alloc from reserved chunk failed";
1793			goto fail_unlock;
1794		}
1795
1796		off = pcpu_alloc_area(chunk, bits, bit_align, off);
1797		if (off >= 0)
1798			goto area_found;
1799
1800		err = "alloc from reserved chunk failed";
1801		goto fail_unlock;
1802	}
1803
1804restart:
1805	/* search through normal chunks */
1806	for (slot = pcpu_size_to_slot(size); slot <= pcpu_free_slot; slot++) {
1807		list_for_each_entry_safe(chunk, next, &pcpu_chunk_lists[slot],
1808					 list) {
1809			off = pcpu_find_block_fit(chunk, bits, bit_align,
1810						  is_atomic);
1811			if (off < 0) {
1812				if (slot < PCPU_SLOT_FAIL_THRESHOLD)
1813					pcpu_chunk_move(chunk, 0);
1814				continue;
1815			}
1816
1817			off = pcpu_alloc_area(chunk, bits, bit_align, off);
1818			if (off >= 0) {
1819				pcpu_reintegrate_chunk(chunk);
1820				goto area_found;
1821			}
1822		}
1823	}
1824
1825	spin_unlock_irqrestore(&pcpu_lock, flags);
1826
1827	/*
1828	 * No space left.  Create a new chunk.  We don't want multiple
1829	 * tasks to create chunks simultaneously.  Serialize and create iff
1830	 * there's still no empty chunk after grabbing the mutex.
1831	 */
1832	if (is_atomic) {
1833		err = "atomic alloc failed, no space left";
1834		goto fail;
1835	}
1836
 
1837	if (list_empty(&pcpu_chunk_lists[pcpu_free_slot])) {
1838		chunk = pcpu_create_chunk(pcpu_gfp);
1839		if (!chunk) {
1840			err = "failed to allocate new chunk";
1841			goto fail;
1842		}
1843
1844		spin_lock_irqsave(&pcpu_lock, flags);
1845		pcpu_chunk_relocate(chunk, -1);
1846	} else {
1847		spin_lock_irqsave(&pcpu_lock, flags);
1848	}
1849
1850	goto restart;
1851
1852area_found:
1853	pcpu_stats_area_alloc(chunk, size);
1854	spin_unlock_irqrestore(&pcpu_lock, flags);
1855
1856	/* populate if not all pages are already there */
1857	if (!is_atomic) {
1858		unsigned int page_start, page_end, rs, re;
1859
1860		page_start = PFN_DOWN(off);
1861		page_end = PFN_UP(off + size);
1862
1863		bitmap_for_each_clear_region(chunk->populated, rs, re,
1864					     page_start, page_end) {
1865			WARN_ON(chunk->immutable);
1866
1867			ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
1868
1869			spin_lock_irqsave(&pcpu_lock, flags);
1870			if (ret) {
1871				pcpu_free_area(chunk, off);
1872				err = "failed to populate";
1873				goto fail_unlock;
1874			}
1875			pcpu_chunk_populated(chunk, rs, re);
1876			spin_unlock_irqrestore(&pcpu_lock, flags);
1877		}
1878
1879		mutex_unlock(&pcpu_alloc_mutex);
1880	}
1881
1882	if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_LOW)
1883		pcpu_schedule_balance_work();
1884
1885	/* clear the areas and return address relative to base address */
1886	for_each_possible_cpu(cpu)
1887		memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
1888
1889	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
1890	kmemleak_alloc_percpu(ptr, size, gfp);
1891
1892	trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
1893			chunk->base_addr, off, ptr);
 
1894
1895	pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
1896
1897	return ptr;
1898
1899fail_unlock:
1900	spin_unlock_irqrestore(&pcpu_lock, flags);
1901fail:
1902	trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
1903
1904	if (!is_atomic && do_warn && warn_limit) {
1905		pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
1906			size, align, is_atomic, err);
1907		dump_stack();
 
1908		if (!--warn_limit)
1909			pr_info("limit reached, disable warning\n");
1910	}
 
1911	if (is_atomic) {
1912		/* see the flag handling in pcpu_balance_workfn() */
1913		pcpu_atomic_alloc_failed = true;
1914		pcpu_schedule_balance_work();
1915	} else {
1916		mutex_unlock(&pcpu_alloc_mutex);
1917	}
1918
1919	pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
1920
1921	return NULL;
1922}
1923
1924/**
1925 * __alloc_percpu_gfp - allocate dynamic percpu area
1926 * @size: size of area to allocate in bytes
1927 * @align: alignment of area (max PAGE_SIZE)
1928 * @gfp: allocation flags
1929 *
1930 * Allocate zero-filled percpu area of @size bytes aligned at @align.  If
1931 * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
1932 * be called from any context but is a lot more likely to fail. If @gfp
1933 * has __GFP_NOWARN then no warning will be triggered on invalid or failed
1934 * allocation requests.
1935 *
1936 * RETURNS:
1937 * Percpu pointer to the allocated area on success, NULL on failure.
1938 */
1939void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
1940{
1941	return pcpu_alloc(size, align, false, gfp);
1942}
1943EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
1944
1945/**
1946 * __alloc_percpu - allocate dynamic percpu area
1947 * @size: size of area to allocate in bytes
1948 * @align: alignment of area (max PAGE_SIZE)
1949 *
1950 * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
1951 */
1952void __percpu *__alloc_percpu(size_t size, size_t align)
1953{
1954	return pcpu_alloc(size, align, false, GFP_KERNEL);
1955}
1956EXPORT_SYMBOL_GPL(__alloc_percpu);
1957
1958/**
1959 * __alloc_reserved_percpu - allocate reserved percpu area
1960 * @size: size of area to allocate in bytes
1961 * @align: alignment of area (max PAGE_SIZE)
1962 *
1963 * Allocate zero-filled percpu area of @size bytes aligned at @align
1964 * from reserved percpu area if arch has set it up; otherwise,
1965 * allocation is served from the same dynamic area.  Might sleep.
1966 * Might trigger writeouts.
1967 *
1968 * CONTEXT:
1969 * Does GFP_KERNEL allocation.
1970 *
1971 * RETURNS:
1972 * Percpu pointer to the allocated area on success, NULL on failure.
1973 */
1974void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
1975{
1976	return pcpu_alloc(size, align, true, GFP_KERNEL);
1977}
1978
1979/**
1980 * pcpu_balance_free - manage the amount of free chunks
1981 * @empty_only: free chunks only if there are no populated pages
1982 *
1983 * If empty_only is %false, reclaim all fully free chunks regardless of the
1984 * number of populated pages.  Otherwise, only reclaim chunks that have no
1985 * populated pages.
1986 *
1987 * CONTEXT:
1988 * pcpu_lock (can be dropped temporarily)
1989 */
1990static void pcpu_balance_free(bool empty_only)
1991{
1992	LIST_HEAD(to_free);
1993	struct list_head *free_head = &pcpu_chunk_lists[pcpu_free_slot];
1994	struct pcpu_chunk *chunk, *next;
1995
1996	lockdep_assert_held(&pcpu_lock);
1997
1998	/*
1999	 * There's no reason to keep around multiple unused chunks and VM
2000	 * areas can be scarce.  Destroy all free chunks except for one.
2001	 */
2002	list_for_each_entry_safe(chunk, next, free_head, list) {
2003		WARN_ON(chunk->immutable);
2004
2005		/* spare the first one */
2006		if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
2007			continue;
2008
2009		if (!empty_only || chunk->nr_empty_pop_pages == 0)
2010			list_move(&chunk->list, &to_free);
2011	}
2012
2013	if (list_empty(&to_free))
2014		return;
2015
2016	spin_unlock_irq(&pcpu_lock);
2017	list_for_each_entry_safe(chunk, next, &to_free, list) {
2018		unsigned int rs, re;
2019
2020		bitmap_for_each_set_region(chunk->populated, rs, re, 0,
2021					   chunk->nr_pages) {
2022			pcpu_depopulate_chunk(chunk, rs, re);
2023			spin_lock_irq(&pcpu_lock);
2024			pcpu_chunk_depopulated(chunk, rs, re);
2025			spin_unlock_irq(&pcpu_lock);
2026		}
2027		pcpu_destroy_chunk(chunk);
2028		cond_resched();
2029	}
2030	spin_lock_irq(&pcpu_lock);
2031}
2032
2033/**
2034 * pcpu_balance_populated - manage the amount of populated pages
2035 *
2036 * Maintain a certain amount of populated pages to satisfy atomic allocations.
2037 * It is possible that this is called when physical memory is scarce causing
2038 * OOM killer to be triggered.  We should avoid doing so until an actual
2039 * allocation causes the failure as it is possible that requests can be
2040 * serviced from already backed regions.
2041 *
2042 * CONTEXT:
2043 * pcpu_lock (can be dropped temporarily)
2044 */
2045static void pcpu_balance_populated(void)
2046{
2047	/* gfp flags passed to underlying allocators */
2048	const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
2049	struct pcpu_chunk *chunk;
2050	int slot, nr_to_pop, ret;
2051
2052	lockdep_assert_held(&pcpu_lock);
2053
2054	/*
2055	 * Ensure there are certain number of free populated pages for
2056	 * atomic allocs.  Fill up from the most packed so that atomic
2057	 * allocs don't increase fragmentation.  If atomic allocation
2058	 * failed previously, always populate the maximum amount.  This
2059	 * should prevent atomic allocs larger than PAGE_SIZE from keeping
2060	 * failing indefinitely; however, large atomic allocs are not
2061	 * something we support properly and can be highly unreliable and
2062	 * inefficient.
2063	 */
2064retry_pop:
2065	if (pcpu_atomic_alloc_failed) {
2066		nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
2067		/* best effort anyway, don't worry about synchronization */
2068		pcpu_atomic_alloc_failed = false;
2069	} else {
2070		nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
2071				  pcpu_nr_empty_pop_pages,
2072				  0, PCPU_EMPTY_POP_PAGES_HIGH);
2073	}
2074
2075	for (slot = pcpu_size_to_slot(PAGE_SIZE); slot <= pcpu_free_slot; slot++) {
2076		unsigned int nr_unpop = 0, rs, re;
2077
2078		if (!nr_to_pop)
2079			break;
2080
2081		list_for_each_entry(chunk, &pcpu_chunk_lists[slot], list) {
2082			nr_unpop = chunk->nr_pages - chunk->nr_populated;
2083			if (nr_unpop)
2084				break;
2085		}
2086
2087		if (!nr_unpop)
2088			continue;
2089
2090		/* @chunk can't go away while pcpu_alloc_mutex is held */
2091		bitmap_for_each_clear_region(chunk->populated, rs, re, 0,
2092					     chunk->nr_pages) {
2093			int nr = min_t(int, re - rs, nr_to_pop);
2094
2095			spin_unlock_irq(&pcpu_lock);
2096			ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
2097			cond_resched();
2098			spin_lock_irq(&pcpu_lock);
2099			if (!ret) {
2100				nr_to_pop -= nr;
2101				pcpu_chunk_populated(chunk, rs, rs + nr);
2102			} else {
2103				nr_to_pop = 0;
2104			}
2105
2106			if (!nr_to_pop)
2107				break;
2108		}
2109	}
2110
2111	if (nr_to_pop) {
2112		/* ran out of chunks to populate, create a new one and retry */
2113		spin_unlock_irq(&pcpu_lock);
2114		chunk = pcpu_create_chunk(gfp);
2115		cond_resched();
2116		spin_lock_irq(&pcpu_lock);
2117		if (chunk) {
2118			pcpu_chunk_relocate(chunk, -1);
2119			goto retry_pop;
2120		}
2121	}
2122}
2123
2124/**
2125 * pcpu_reclaim_populated - scan over to_depopulate chunks and free empty pages
2126 *
2127 * Scan over chunks in the depopulate list and try to release unused populated
2128 * pages back to the system.  Depopulated chunks are sidelined to prevent
2129 * repopulating these pages unless required.  Fully free chunks are reintegrated
2130 * and freed accordingly (1 is kept around).  If we drop below the empty
2131 * populated pages threshold, reintegrate the chunk if it has empty free pages.
2132 * Each chunk is scanned in the reverse order to keep populated pages close to
2133 * the beginning of the chunk.
2134 *
2135 * CONTEXT:
2136 * pcpu_lock (can be dropped temporarily)
2137 *
2138 */
2139static void pcpu_reclaim_populated(void)
2140{
2141	struct pcpu_chunk *chunk;
2142	struct pcpu_block_md *block;
2143	int freed_page_start, freed_page_end;
2144	int i, end;
2145	bool reintegrate;
2146
2147	lockdep_assert_held(&pcpu_lock);
2148
2149	/*
2150	 * Once a chunk is isolated to the to_depopulate list, the chunk is no
2151	 * longer discoverable to allocations whom may populate pages.  The only
2152	 * other accessor is the free path which only returns area back to the
2153	 * allocator not touching the populated bitmap.
2154	 */
2155	while (!list_empty(&pcpu_chunk_lists[pcpu_to_depopulate_slot])) {
2156		chunk = list_first_entry(&pcpu_chunk_lists[pcpu_to_depopulate_slot],
2157					 struct pcpu_chunk, list);
2158		WARN_ON(chunk->immutable);
2159
2160		/*
2161		 * Scan chunk's pages in the reverse order to keep populated
2162		 * pages close to the beginning of the chunk.
2163		 */
2164		freed_page_start = chunk->nr_pages;
2165		freed_page_end = 0;
2166		reintegrate = false;
2167		for (i = chunk->nr_pages - 1, end = -1; i >= 0; i--) {
2168			/* no more work to do */
2169			if (chunk->nr_empty_pop_pages == 0)
2170				break;
2171
2172			/* reintegrate chunk to prevent atomic alloc failures */
2173			if (pcpu_nr_empty_pop_pages < PCPU_EMPTY_POP_PAGES_HIGH) {
2174				reintegrate = true;
2175				goto end_chunk;
2176			}
2177
2178			/*
2179			 * If the page is empty and populated, start or
2180			 * extend the (i, end) range.  If i == 0, decrease
2181			 * i and perform the depopulation to cover the last
2182			 * (first) page in the chunk.
2183			 */
2184			block = chunk->md_blocks + i;
2185			if (block->contig_hint == PCPU_BITMAP_BLOCK_BITS &&
2186			    test_bit(i, chunk->populated)) {
2187				if (end == -1)
2188					end = i;
2189				if (i > 0)
2190					continue;
2191				i--;
2192			}
2193
2194			/* depopulate if there is an active range */
2195			if (end == -1)
2196				continue;
2197
2198			spin_unlock_irq(&pcpu_lock);
2199			pcpu_depopulate_chunk(chunk, i + 1, end + 1);
2200			cond_resched();
2201			spin_lock_irq(&pcpu_lock);
2202
2203			pcpu_chunk_depopulated(chunk, i + 1, end + 1);
2204			freed_page_start = min(freed_page_start, i + 1);
2205			freed_page_end = max(freed_page_end, end + 1);
2206
2207			/* reset the range and continue */
2208			end = -1;
2209		}
2210
2211end_chunk:
2212		/* batch tlb flush per chunk to amortize cost */
2213		if (freed_page_start < freed_page_end) {
2214			spin_unlock_irq(&pcpu_lock);
2215			pcpu_post_unmap_tlb_flush(chunk,
2216						  freed_page_start,
2217						  freed_page_end);
2218			cond_resched();
2219			spin_lock_irq(&pcpu_lock);
2220		}
2221
2222		if (reintegrate || chunk->free_bytes == pcpu_unit_size)
2223			pcpu_reintegrate_chunk(chunk);
2224		else
2225			list_move_tail(&chunk->list,
2226				       &pcpu_chunk_lists[pcpu_sidelined_slot]);
2227	}
2228}
2229
2230/**
2231 * pcpu_balance_workfn - manage the amount of free chunks and populated pages
2232 * @work: unused
2233 *
2234 * For each chunk type, manage the number of fully free chunks and the number of
2235 * populated pages.  An important thing to consider is when pages are freed and
2236 * how they contribute to the global counts.
2237 */
2238static void pcpu_balance_workfn(struct work_struct *work)
2239{
2240	/*
2241	 * pcpu_balance_free() is called twice because the first time we may
2242	 * trim pages in the active pcpu_nr_empty_pop_pages which may cause us
2243	 * to grow other chunks.  This then gives pcpu_reclaim_populated() time
2244	 * to move fully free chunks to the active list to be freed if
2245	 * appropriate.
2246	 */
2247	mutex_lock(&pcpu_alloc_mutex);
2248	spin_lock_irq(&pcpu_lock);
2249
2250	pcpu_balance_free(false);
2251	pcpu_reclaim_populated();
2252	pcpu_balance_populated();
2253	pcpu_balance_free(true);
2254
2255	spin_unlock_irq(&pcpu_lock);
2256	mutex_unlock(&pcpu_alloc_mutex);
2257}
2258
2259/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2260 * free_percpu - free percpu area
2261 * @ptr: pointer to area to free
2262 *
2263 * Free percpu area @ptr.
2264 *
2265 * CONTEXT:
2266 * Can be called from atomic context.
2267 */
2268void free_percpu(void __percpu *ptr)
2269{
2270	void *addr;
2271	struct pcpu_chunk *chunk;
2272	unsigned long flags;
2273	int size, off;
2274	bool need_balance = false;
2275
2276	if (!ptr)
2277		return;
2278
2279	kmemleak_free_percpu(ptr);
2280
2281	addr = __pcpu_ptr_to_addr(ptr);
2282
2283	spin_lock_irqsave(&pcpu_lock, flags);
2284
2285	chunk = pcpu_chunk_addr_search(addr);
2286	off = addr - chunk->base_addr;
2287
 
2288	size = pcpu_free_area(chunk, off);
2289
2290	pcpu_memcg_free_hook(chunk, off, size);
2291
2292	/*
2293	 * If there are more than one fully free chunks, wake up grim reaper.
2294	 * If the chunk is isolated, it may be in the process of being
2295	 * reclaimed.  Let reclaim manage cleaning up of that chunk.
2296	 */
2297	if (!chunk->isolated && chunk->free_bytes == pcpu_unit_size) {
2298		struct pcpu_chunk *pos;
2299
2300		list_for_each_entry(pos, &pcpu_chunk_lists[pcpu_free_slot], list)
2301			if (pos != chunk) {
2302				need_balance = true;
2303				break;
2304			}
2305	} else if (pcpu_should_reclaim_chunk(chunk)) {
2306		pcpu_isolate_chunk(chunk);
2307		need_balance = true;
2308	}
2309
2310	trace_percpu_free_percpu(chunk->base_addr, off, ptr);
2311
2312	spin_unlock_irqrestore(&pcpu_lock, flags);
2313
2314	if (need_balance)
2315		pcpu_schedule_balance_work();
2316}
2317EXPORT_SYMBOL_GPL(free_percpu);
2318
2319bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
2320{
2321#ifdef CONFIG_SMP
2322	const size_t static_size = __per_cpu_end - __per_cpu_start;
2323	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2324	unsigned int cpu;
2325
2326	for_each_possible_cpu(cpu) {
2327		void *start = per_cpu_ptr(base, cpu);
2328		void *va = (void *)addr;
2329
2330		if (va >= start && va < start + static_size) {
2331			if (can_addr) {
2332				*can_addr = (unsigned long) (va - start);
2333				*can_addr += (unsigned long)
2334					per_cpu_ptr(base, get_boot_cpu_id());
2335			}
2336			return true;
2337		}
2338	}
2339#endif
2340	/* on UP, can't distinguish from other static vars, always false */
2341	return false;
2342}
2343
2344/**
2345 * is_kernel_percpu_address - test whether address is from static percpu area
2346 * @addr: address to test
2347 *
2348 * Test whether @addr belongs to in-kernel static percpu area.  Module
2349 * static percpu areas are not considered.  For those, use
2350 * is_module_percpu_address().
2351 *
2352 * RETURNS:
2353 * %true if @addr is from in-kernel static percpu area, %false otherwise.
2354 */
2355bool is_kernel_percpu_address(unsigned long addr)
2356{
2357	return __is_kernel_percpu_address(addr, NULL);
2358}
2359
2360/**
2361 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
2362 * @addr: the address to be converted to physical address
2363 *
2364 * Given @addr which is dereferenceable address obtained via one of
2365 * percpu access macros, this function translates it into its physical
2366 * address.  The caller is responsible for ensuring @addr stays valid
2367 * until this function finishes.
2368 *
2369 * percpu allocator has special setup for the first chunk, which currently
2370 * supports either embedding in linear address space or vmalloc mapping,
2371 * and, from the second one, the backing allocator (currently either vm or
2372 * km) provides translation.
2373 *
2374 * The addr can be translated simply without checking if it falls into the
2375 * first chunk. But the current code reflects better how percpu allocator
2376 * actually works, and the verification can discover both bugs in percpu
2377 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
2378 * code.
2379 *
2380 * RETURNS:
2381 * The physical address for @addr.
2382 */
2383phys_addr_t per_cpu_ptr_to_phys(void *addr)
2384{
2385	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
2386	bool in_first_chunk = false;
2387	unsigned long first_low, first_high;
2388	unsigned int cpu;
2389
2390	/*
2391	 * The following test on unit_low/high isn't strictly
2392	 * necessary but will speed up lookups of addresses which
2393	 * aren't in the first chunk.
2394	 *
2395	 * The address check is against full chunk sizes.  pcpu_base_addr
2396	 * points to the beginning of the first chunk including the
2397	 * static region.  Assumes good intent as the first chunk may
2398	 * not be full (ie. < pcpu_unit_pages in size).
2399	 */
2400	first_low = (unsigned long)pcpu_base_addr +
2401		    pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
2402	first_high = (unsigned long)pcpu_base_addr +
2403		     pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
2404	if ((unsigned long)addr >= first_low &&
2405	    (unsigned long)addr < first_high) {
2406		for_each_possible_cpu(cpu) {
2407			void *start = per_cpu_ptr(base, cpu);
2408
2409			if (addr >= start && addr < start + pcpu_unit_size) {
2410				in_first_chunk = true;
2411				break;
2412			}
2413		}
2414	}
2415
2416	if (in_first_chunk) {
2417		if (!is_vmalloc_addr(addr))
2418			return __pa(addr);
2419		else
2420			return page_to_phys(vmalloc_to_page(addr)) +
2421			       offset_in_page(addr);
2422	} else
2423		return page_to_phys(pcpu_addr_to_page(addr)) +
2424		       offset_in_page(addr);
2425}
2426
2427/**
2428 * pcpu_alloc_alloc_info - allocate percpu allocation info
2429 * @nr_groups: the number of groups
2430 * @nr_units: the number of units
2431 *
2432 * Allocate ai which is large enough for @nr_groups groups containing
2433 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
2434 * cpu_map array which is long enough for @nr_units and filled with
2435 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
2436 * pointer of other groups.
2437 *
2438 * RETURNS:
2439 * Pointer to the allocated pcpu_alloc_info on success, NULL on
2440 * failure.
2441 */
2442struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
2443						      int nr_units)
2444{
2445	struct pcpu_alloc_info *ai;
2446	size_t base_size, ai_size;
2447	void *ptr;
2448	int unit;
2449
2450	base_size = ALIGN(struct_size(ai, groups, nr_groups),
2451			  __alignof__(ai->groups[0].cpu_map[0]));
2452	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
2453
2454	ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
2455	if (!ptr)
2456		return NULL;
2457	ai = ptr;
2458	ptr += base_size;
2459
2460	ai->groups[0].cpu_map = ptr;
2461
2462	for (unit = 0; unit < nr_units; unit++)
2463		ai->groups[0].cpu_map[unit] = NR_CPUS;
2464
2465	ai->nr_groups = nr_groups;
2466	ai->__ai_size = PFN_ALIGN(ai_size);
2467
2468	return ai;
2469}
2470
2471/**
2472 * pcpu_free_alloc_info - free percpu allocation info
2473 * @ai: pcpu_alloc_info to free
2474 *
2475 * Free @ai which was allocated by pcpu_alloc_alloc_info().
2476 */
2477void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
2478{
2479	memblock_free_early(__pa(ai), ai->__ai_size);
2480}
2481
2482/**
2483 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
2484 * @lvl: loglevel
2485 * @ai: allocation info to dump
2486 *
2487 * Print out information about @ai using loglevel @lvl.
2488 */
2489static void pcpu_dump_alloc_info(const char *lvl,
2490				 const struct pcpu_alloc_info *ai)
2491{
2492	int group_width = 1, cpu_width = 1, width;
2493	char empty_str[] = "--------";
2494	int alloc = 0, alloc_end = 0;
2495	int group, v;
2496	int upa, apl;	/* units per alloc, allocs per line */
2497
2498	v = ai->nr_groups;
2499	while (v /= 10)
2500		group_width++;
2501
2502	v = num_possible_cpus();
2503	while (v /= 10)
2504		cpu_width++;
2505	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
2506
2507	upa = ai->alloc_size / ai->unit_size;
2508	width = upa * (cpu_width + 1) + group_width + 3;
2509	apl = rounddown_pow_of_two(max(60 / width, 1));
2510
2511	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
2512	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
2513	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
2514
2515	for (group = 0; group < ai->nr_groups; group++) {
2516		const struct pcpu_group_info *gi = &ai->groups[group];
2517		int unit = 0, unit_end = 0;
2518
2519		BUG_ON(gi->nr_units % upa);
2520		for (alloc_end += gi->nr_units / upa;
2521		     alloc < alloc_end; alloc++) {
2522			if (!(alloc % apl)) {
2523				pr_cont("\n");
2524				printk("%spcpu-alloc: ", lvl);
2525			}
2526			pr_cont("[%0*d] ", group_width, group);
2527
2528			for (unit_end += upa; unit < unit_end; unit++)
2529				if (gi->cpu_map[unit] != NR_CPUS)
2530					pr_cont("%0*d ",
2531						cpu_width, gi->cpu_map[unit]);
2532				else
2533					pr_cont("%s ", empty_str);
2534		}
2535	}
2536	pr_cont("\n");
2537}
2538
2539/**
2540 * pcpu_setup_first_chunk - initialize the first percpu chunk
2541 * @ai: pcpu_alloc_info describing how to percpu area is shaped
2542 * @base_addr: mapped address
2543 *
2544 * Initialize the first percpu chunk which contains the kernel static
2545 * percpu area.  This function is to be called from arch percpu area
2546 * setup path.
2547 *
2548 * @ai contains all information necessary to initialize the first
2549 * chunk and prime the dynamic percpu allocator.
2550 *
2551 * @ai->static_size is the size of static percpu area.
2552 *
2553 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
2554 * reserve after the static area in the first chunk.  This reserves
2555 * the first chunk such that it's available only through reserved
2556 * percpu allocation.  This is primarily used to serve module percpu
2557 * static areas on architectures where the addressing model has
2558 * limited offset range for symbol relocations to guarantee module
2559 * percpu symbols fall inside the relocatable range.
2560 *
2561 * @ai->dyn_size determines the number of bytes available for dynamic
2562 * allocation in the first chunk.  The area between @ai->static_size +
2563 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
2564 *
2565 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
2566 * and equal to or larger than @ai->static_size + @ai->reserved_size +
2567 * @ai->dyn_size.
2568 *
2569 * @ai->atom_size is the allocation atom size and used as alignment
2570 * for vm areas.
2571 *
2572 * @ai->alloc_size is the allocation size and always multiple of
2573 * @ai->atom_size.  This is larger than @ai->atom_size if
2574 * @ai->unit_size is larger than @ai->atom_size.
2575 *
2576 * @ai->nr_groups and @ai->groups describe virtual memory layout of
2577 * percpu areas.  Units which should be colocated are put into the
2578 * same group.  Dynamic VM areas will be allocated according to these
2579 * groupings.  If @ai->nr_groups is zero, a single group containing
2580 * all units is assumed.
2581 *
2582 * The caller should have mapped the first chunk at @base_addr and
2583 * copied static data to each unit.
2584 *
2585 * The first chunk will always contain a static and a dynamic region.
2586 * However, the static region is not managed by any chunk.  If the first
2587 * chunk also contains a reserved region, it is served by two chunks -
2588 * one for the reserved region and one for the dynamic region.  They
2589 * share the same vm, but use offset regions in the area allocation map.
2590 * The chunk serving the dynamic region is circulated in the chunk slots
2591 * and available for dynamic allocation like any other chunk.
2592 */
2593void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
2594				   void *base_addr)
2595{
2596	size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
2597	size_t static_size, dyn_size;
2598	struct pcpu_chunk *chunk;
2599	unsigned long *group_offsets;
2600	size_t *group_sizes;
2601	unsigned long *unit_off;
2602	unsigned int cpu;
2603	int *unit_map;
2604	int group, unit, i;
2605	int map_size;
2606	unsigned long tmp_addr;
2607	size_t alloc_size;
2608
2609#define PCPU_SETUP_BUG_ON(cond)	do {					\
2610	if (unlikely(cond)) {						\
2611		pr_emerg("failed to initialize, %s\n", #cond);		\
2612		pr_emerg("cpu_possible_mask=%*pb\n",			\
2613			 cpumask_pr_args(cpu_possible_mask));		\
2614		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
2615		BUG();							\
2616	}								\
2617} while (0)
2618
2619	/* sanity checks */
2620	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
2621#ifdef CONFIG_SMP
2622	PCPU_SETUP_BUG_ON(!ai->static_size);
2623	PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
2624#endif
2625	PCPU_SETUP_BUG_ON(!base_addr);
2626	PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
2627	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
2628	PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
2629	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
2630	PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
2631	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
2632	PCPU_SETUP_BUG_ON(!ai->dyn_size);
2633	PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
2634	PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
2635			    IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
2636	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
2637
2638	/* process group information and build config tables accordingly */
2639	alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
2640	group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2641	if (!group_offsets)
2642		panic("%s: Failed to allocate %zu bytes\n", __func__,
2643		      alloc_size);
2644
2645	alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
2646	group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2647	if (!group_sizes)
2648		panic("%s: Failed to allocate %zu bytes\n", __func__,
2649		      alloc_size);
2650
2651	alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
2652	unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2653	if (!unit_map)
2654		panic("%s: Failed to allocate %zu bytes\n", __func__,
2655		      alloc_size);
2656
2657	alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
2658	unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
2659	if (!unit_off)
2660		panic("%s: Failed to allocate %zu bytes\n", __func__,
2661		      alloc_size);
2662
2663	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
2664		unit_map[cpu] = UINT_MAX;
2665
2666	pcpu_low_unit_cpu = NR_CPUS;
2667	pcpu_high_unit_cpu = NR_CPUS;
2668
2669	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
2670		const struct pcpu_group_info *gi = &ai->groups[group];
2671
2672		group_offsets[group] = gi->base_offset;
2673		group_sizes[group] = gi->nr_units * ai->unit_size;
2674
2675		for (i = 0; i < gi->nr_units; i++) {
2676			cpu = gi->cpu_map[i];
2677			if (cpu == NR_CPUS)
2678				continue;
2679
2680			PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
2681			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
2682			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
2683
2684			unit_map[cpu] = unit + i;
2685			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
2686
2687			/* determine low/high unit_cpu */
2688			if (pcpu_low_unit_cpu == NR_CPUS ||
2689			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
2690				pcpu_low_unit_cpu = cpu;
2691			if (pcpu_high_unit_cpu == NR_CPUS ||
2692			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
2693				pcpu_high_unit_cpu = cpu;
2694		}
2695	}
2696	pcpu_nr_units = unit;
2697
2698	for_each_possible_cpu(cpu)
2699		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
2700
2701	/* we're done parsing the input, undefine BUG macro and dump config */
2702#undef PCPU_SETUP_BUG_ON
2703	pcpu_dump_alloc_info(KERN_DEBUG, ai);
2704
2705	pcpu_nr_groups = ai->nr_groups;
2706	pcpu_group_offsets = group_offsets;
2707	pcpu_group_sizes = group_sizes;
2708	pcpu_unit_map = unit_map;
2709	pcpu_unit_offsets = unit_off;
2710
2711	/* determine basic parameters */
2712	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
2713	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
2714	pcpu_atom_size = ai->atom_size;
2715	pcpu_chunk_struct_size = struct_size(chunk, populated,
2716					     BITS_TO_LONGS(pcpu_unit_pages));
2717
2718	pcpu_stats_save_ai(ai);
2719
2720	/*
2721	 * Allocate chunk slots.  The slots after the active slots are:
2722	 *   sidelined_slot - isolated, depopulated chunks
2723	 *   free_slot - fully free chunks
2724	 *   to_depopulate_slot - isolated, chunks to depopulate
2725	 */
2726	pcpu_sidelined_slot = __pcpu_size_to_slot(pcpu_unit_size) + 1;
2727	pcpu_free_slot = pcpu_sidelined_slot + 1;
2728	pcpu_to_depopulate_slot = pcpu_free_slot + 1;
2729	pcpu_nr_slots = pcpu_to_depopulate_slot + 1;
2730	pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots *
2731					  sizeof(pcpu_chunk_lists[0]),
2732					  SMP_CACHE_BYTES);
2733	if (!pcpu_chunk_lists)
2734		panic("%s: Failed to allocate %zu bytes\n", __func__,
2735		      pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]));
2736
2737	for (i = 0; i < pcpu_nr_slots; i++)
2738		INIT_LIST_HEAD(&pcpu_chunk_lists[i]);
2739
2740	/*
2741	 * The end of the static region needs to be aligned with the
2742	 * minimum allocation size as this offsets the reserved and
2743	 * dynamic region.  The first chunk ends page aligned by
2744	 * expanding the dynamic region, therefore the dynamic region
2745	 * can be shrunk to compensate while still staying above the
2746	 * configured sizes.
2747	 */
2748	static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
2749	dyn_size = ai->dyn_size - (static_size - ai->static_size);
2750
2751	/*
2752	 * Initialize first chunk.
2753	 * If the reserved_size is non-zero, this initializes the reserved
2754	 * chunk.  If the reserved_size is zero, the reserved chunk is NULL
2755	 * and the dynamic region is initialized here.  The first chunk,
2756	 * pcpu_first_chunk, will always point to the chunk that serves
2757	 * the dynamic region.
 
 
 
2758	 */
2759	tmp_addr = (unsigned long)base_addr + static_size;
2760	map_size = ai->reserved_size ?: dyn_size;
2761	chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2762
2763	/* init dynamic chunk if necessary */
2764	if (ai->reserved_size) {
2765		pcpu_reserved_chunk = chunk;
2766
2767		tmp_addr = (unsigned long)base_addr + static_size +
2768			   ai->reserved_size;
2769		map_size = dyn_size;
2770		chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
2771	}
2772
2773	/* link the first chunk in */
2774	pcpu_first_chunk = chunk;
2775	pcpu_nr_empty_pop_pages = pcpu_first_chunk->nr_empty_pop_pages;
2776	pcpu_chunk_relocate(pcpu_first_chunk, -1);
2777
2778	/* include all regions of the first chunk */
2779	pcpu_nr_populated += PFN_DOWN(size_sum);
2780
2781	pcpu_stats_chunk_alloc();
2782	trace_percpu_create_chunk(base_addr);
2783
2784	/* we're done */
2785	pcpu_base_addr = base_addr;
2786}
2787
2788#ifdef CONFIG_SMP
2789
2790const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
2791	[PCPU_FC_AUTO]	= "auto",
2792	[PCPU_FC_EMBED]	= "embed",
2793	[PCPU_FC_PAGE]	= "page",
2794};
2795
2796enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
2797
2798static int __init percpu_alloc_setup(char *str)
2799{
2800	if (!str)
2801		return -EINVAL;
2802
2803	if (0)
2804		/* nada */;
2805#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
2806	else if (!strcmp(str, "embed"))
2807		pcpu_chosen_fc = PCPU_FC_EMBED;
2808#endif
2809#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
2810	else if (!strcmp(str, "page"))
2811		pcpu_chosen_fc = PCPU_FC_PAGE;
2812#endif
2813	else
2814		pr_warn("unknown allocator %s specified\n", str);
2815
2816	return 0;
2817}
2818early_param("percpu_alloc", percpu_alloc_setup);
2819
2820/*
2821 * pcpu_embed_first_chunk() is used by the generic percpu setup.
2822 * Build it if needed by the arch config or the generic setup is going
2823 * to be used.
2824 */
2825#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
2826	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
2827#define BUILD_EMBED_FIRST_CHUNK
2828#endif
2829
2830/* build pcpu_page_first_chunk() iff needed by the arch config */
2831#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
2832#define BUILD_PAGE_FIRST_CHUNK
2833#endif
2834
2835/* pcpu_build_alloc_info() is used by both embed and page first chunk */
2836#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
2837/**
2838 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
2839 * @reserved_size: the size of reserved percpu area in bytes
2840 * @dyn_size: minimum free size for dynamic allocation in bytes
2841 * @atom_size: allocation atom size
2842 * @cpu_distance_fn: callback to determine distance between cpus, optional
2843 *
2844 * This function determines grouping of units, their mappings to cpus
2845 * and other parameters considering needed percpu size, allocation
2846 * atom size and distances between CPUs.
2847 *
2848 * Groups are always multiples of atom size and CPUs which are of
2849 * LOCAL_DISTANCE both ways are grouped together and share space for
2850 * units in the same group.  The returned configuration is guaranteed
2851 * to have CPUs on different nodes on different groups and >=75% usage
2852 * of allocated virtual address space.
2853 *
2854 * RETURNS:
2855 * On success, pointer to the new allocation_info is returned.  On
2856 * failure, ERR_PTR value is returned.
2857 */
2858static struct pcpu_alloc_info * __init __flatten pcpu_build_alloc_info(
2859				size_t reserved_size, size_t dyn_size,
2860				size_t atom_size,
2861				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
2862{
2863	static int group_map[NR_CPUS] __initdata;
2864	static int group_cnt[NR_CPUS] __initdata;
2865	static struct cpumask mask __initdata;
2866	const size_t static_size = __per_cpu_end - __per_cpu_start;
2867	int nr_groups = 1, nr_units = 0;
2868	size_t size_sum, min_unit_size, alloc_size;
2869	int upa, max_upa, best_upa;	/* units_per_alloc */
2870	int last_allocs, group, unit;
2871	unsigned int cpu, tcpu;
2872	struct pcpu_alloc_info *ai;
2873	unsigned int *cpu_map;
2874
2875	/* this function may be called multiple times */
2876	memset(group_map, 0, sizeof(group_map));
2877	memset(group_cnt, 0, sizeof(group_cnt));
2878	cpumask_clear(&mask);
2879
2880	/* calculate size_sum and ensure dyn_size is enough for early alloc */
2881	size_sum = PFN_ALIGN(static_size + reserved_size +
2882			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
2883	dyn_size = size_sum - static_size - reserved_size;
2884
2885	/*
2886	 * Determine min_unit_size, alloc_size and max_upa such that
2887	 * alloc_size is multiple of atom_size and is the smallest
2888	 * which can accommodate 4k aligned segments which are equal to
2889	 * or larger than min_unit_size.
2890	 */
2891	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
2892
2893	/* determine the maximum # of units that can fit in an allocation */
2894	alloc_size = roundup(min_unit_size, atom_size);
2895	upa = alloc_size / min_unit_size;
2896	while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2897		upa--;
2898	max_upa = upa;
2899
2900	cpumask_copy(&mask, cpu_possible_mask);
2901
2902	/* group cpus according to their proximity */
2903	for (group = 0; !cpumask_empty(&mask); group++) {
2904		/* pop the group's first cpu */
2905		cpu = cpumask_first(&mask);
2906		group_map[cpu] = group;
2907		group_cnt[group]++;
2908		cpumask_clear_cpu(cpu, &mask);
2909
2910		for_each_cpu(tcpu, &mask) {
2911			if (!cpu_distance_fn ||
2912			    (cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE &&
2913			     cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) {
2914				group_map[tcpu] = group;
2915				group_cnt[group]++;
2916				cpumask_clear_cpu(tcpu, &mask);
2917			}
2918		}
2919	}
2920	nr_groups = group;
2921
2922	/*
2923	 * Wasted space is caused by a ratio imbalance of upa to group_cnt.
2924	 * Expand the unit_size until we use >= 75% of the units allocated.
2925	 * Related to atom_size, which could be much larger than the unit_size.
2926	 */
2927	last_allocs = INT_MAX;
2928	best_upa = 0;
2929	for (upa = max_upa; upa; upa--) {
2930		int allocs = 0, wasted = 0;
2931
2932		if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
2933			continue;
2934
2935		for (group = 0; group < nr_groups; group++) {
2936			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
2937			allocs += this_allocs;
2938			wasted += this_allocs * upa - group_cnt[group];
2939		}
2940
2941		/*
2942		 * Don't accept if wastage is over 1/3.  The
2943		 * greater-than comparison ensures upa==1 always
2944		 * passes the following check.
2945		 */
2946		if (wasted > num_possible_cpus() / 3)
2947			continue;
2948
2949		/* and then don't consume more memory */
2950		if (allocs > last_allocs)
2951			break;
2952		last_allocs = allocs;
2953		best_upa = upa;
2954	}
2955	BUG_ON(!best_upa);
2956	upa = best_upa;
2957
2958	/* allocate and fill alloc_info */
2959	for (group = 0; group < nr_groups; group++)
2960		nr_units += roundup(group_cnt[group], upa);
2961
2962	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
2963	if (!ai)
2964		return ERR_PTR(-ENOMEM);
2965	cpu_map = ai->groups[0].cpu_map;
2966
2967	for (group = 0; group < nr_groups; group++) {
2968		ai->groups[group].cpu_map = cpu_map;
2969		cpu_map += roundup(group_cnt[group], upa);
2970	}
2971
2972	ai->static_size = static_size;
2973	ai->reserved_size = reserved_size;
2974	ai->dyn_size = dyn_size;
2975	ai->unit_size = alloc_size / upa;
2976	ai->atom_size = atom_size;
2977	ai->alloc_size = alloc_size;
2978
2979	for (group = 0, unit = 0; group < nr_groups; group++) {
2980		struct pcpu_group_info *gi = &ai->groups[group];
2981
2982		/*
2983		 * Initialize base_offset as if all groups are located
2984		 * back-to-back.  The caller should update this to
2985		 * reflect actual allocation.
2986		 */
2987		gi->base_offset = unit * ai->unit_size;
2988
2989		for_each_possible_cpu(cpu)
2990			if (group_map[cpu] == group)
2991				gi->cpu_map[gi->nr_units++] = cpu;
2992		gi->nr_units = roundup(gi->nr_units, upa);
2993		unit += gi->nr_units;
2994	}
2995	BUG_ON(unit != nr_units);
2996
2997	return ai;
2998}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2999#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
3000
3001#if defined(BUILD_EMBED_FIRST_CHUNK)
3002/**
3003 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
3004 * @reserved_size: the size of reserved percpu area in bytes
3005 * @dyn_size: minimum free size for dynamic allocation in bytes
3006 * @atom_size: allocation atom size
3007 * @cpu_distance_fn: callback to determine distance between cpus, optional
3008 * @alloc_fn: function to allocate percpu page
3009 * @free_fn: function to free percpu page
3010 *
3011 * This is a helper to ease setting up embedded first percpu chunk and
3012 * can be called where pcpu_setup_first_chunk() is expected.
3013 *
3014 * If this function is used to setup the first chunk, it is allocated
3015 * by calling @alloc_fn and used as-is without being mapped into
3016 * vmalloc area.  Allocations are always whole multiples of @atom_size
3017 * aligned to @atom_size.
3018 *
3019 * This enables the first chunk to piggy back on the linear physical
3020 * mapping which often uses larger page size.  Please note that this
3021 * can result in very sparse cpu->unit mapping on NUMA machines thus
3022 * requiring large vmalloc address space.  Don't use this allocator if
3023 * vmalloc space is not orders of magnitude larger than distances
3024 * between node memory addresses (ie. 32bit NUMA machines).
3025 *
3026 * @dyn_size specifies the minimum dynamic area size.
3027 *
3028 * If the needed size is smaller than the minimum or specified unit
3029 * size, the leftover is returned using @free_fn.
3030 *
3031 * RETURNS:
3032 * 0 on success, -errno on failure.
3033 */
3034int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
3035				  size_t atom_size,
3036				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
3037				  pcpu_fc_alloc_fn_t alloc_fn,
3038				  pcpu_fc_free_fn_t free_fn)
3039{
3040	void *base = (void *)ULONG_MAX;
3041	void **areas = NULL;
3042	struct pcpu_alloc_info *ai;
3043	size_t size_sum, areas_size;
3044	unsigned long max_distance;
3045	int group, i, highest_group, rc = 0;
3046
3047	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
3048				   cpu_distance_fn);
3049	if (IS_ERR(ai))
3050		return PTR_ERR(ai);
3051
3052	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
3053	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
3054
3055	areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
3056	if (!areas) {
3057		rc = -ENOMEM;
3058		goto out_free;
3059	}
3060
3061	/* allocate, copy and determine base address & max_distance */
3062	highest_group = 0;
3063	for (group = 0; group < ai->nr_groups; group++) {
3064		struct pcpu_group_info *gi = &ai->groups[group];
3065		unsigned int cpu = NR_CPUS;
3066		void *ptr;
3067
3068		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
3069			cpu = gi->cpu_map[i];
3070		BUG_ON(cpu == NR_CPUS);
3071
3072		/* allocate space for the whole group */
3073		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
3074		if (!ptr) {
3075			rc = -ENOMEM;
3076			goto out_free_areas;
3077		}
3078		/* kmemleak tracks the percpu allocations separately */
3079		kmemleak_free(ptr);
3080		areas[group] = ptr;
3081
3082		base = min(ptr, base);
3083		if (ptr > areas[highest_group])
3084			highest_group = group;
3085	}
3086	max_distance = areas[highest_group] - base;
3087	max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
3088
3089	/* warn if maximum distance is further than 75% of vmalloc space */
3090	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
3091		pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
3092				max_distance, VMALLOC_TOTAL);
3093#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
3094		/* and fail if we have fallback */
3095		rc = -EINVAL;
3096		goto out_free_areas;
3097#endif
3098	}
3099
3100	/*
3101	 * Copy data and free unused parts.  This should happen after all
3102	 * allocations are complete; otherwise, we may end up with
3103	 * overlapping groups.
3104	 */
3105	for (group = 0; group < ai->nr_groups; group++) {
3106		struct pcpu_group_info *gi = &ai->groups[group];
3107		void *ptr = areas[group];
3108
3109		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
3110			if (gi->cpu_map[i] == NR_CPUS) {
3111				/* unused unit, free whole */
3112				free_fn(ptr, ai->unit_size);
3113				continue;
3114			}
3115			/* copy and return the unused part */
3116			memcpy(ptr, __per_cpu_load, ai->static_size);
3117			free_fn(ptr + size_sum, ai->unit_size - size_sum);
3118		}
3119	}
3120
3121	/* base address is now known, determine group base offsets */
3122	for (group = 0; group < ai->nr_groups; group++) {
3123		ai->groups[group].base_offset = areas[group] - base;
3124	}
3125
3126	pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
3127		PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
3128		ai->dyn_size, ai->unit_size);
3129
3130	pcpu_setup_first_chunk(ai, base);
3131	goto out_free;
3132
3133out_free_areas:
3134	for (group = 0; group < ai->nr_groups; group++)
3135		if (areas[group])
3136			free_fn(areas[group],
3137				ai->groups[group].nr_units * ai->unit_size);
3138out_free:
3139	pcpu_free_alloc_info(ai);
3140	if (areas)
3141		memblock_free_early(__pa(areas), areas_size);
3142	return rc;
3143}
3144#endif /* BUILD_EMBED_FIRST_CHUNK */
3145
3146#ifdef BUILD_PAGE_FIRST_CHUNK
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3147/**
3148 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
3149 * @reserved_size: the size of reserved percpu area in bytes
3150 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
3151 * @free_fn: function to free percpu page, always called with PAGE_SIZE
3152 * @populate_pte_fn: function to populate pte
3153 *
3154 * This is a helper to ease setting up page-remapped first percpu
3155 * chunk and can be called where pcpu_setup_first_chunk() is expected.
3156 *
3157 * This is the basic allocator.  Static percpu area is allocated
3158 * page-by-page into vmalloc area.
3159 *
3160 * RETURNS:
3161 * 0 on success, -errno on failure.
3162 */
3163int __init pcpu_page_first_chunk(size_t reserved_size,
3164				 pcpu_fc_alloc_fn_t alloc_fn,
3165				 pcpu_fc_free_fn_t free_fn,
3166				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
3167{
3168	static struct vm_struct vm;
3169	struct pcpu_alloc_info *ai;
3170	char psize_str[16];
3171	int unit_pages;
3172	size_t pages_size;
3173	struct page **pages;
3174	int unit, i, j, rc = 0;
3175	int upa;
3176	int nr_g0_units;
3177
3178	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
3179
3180	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
3181	if (IS_ERR(ai))
3182		return PTR_ERR(ai);
3183	BUG_ON(ai->nr_groups != 1);
3184	upa = ai->alloc_size/ai->unit_size;
3185	nr_g0_units = roundup(num_possible_cpus(), upa);
3186	if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
3187		pcpu_free_alloc_info(ai);
3188		return -EINVAL;
3189	}
3190
3191	unit_pages = ai->unit_size >> PAGE_SHIFT;
3192
3193	/* unaligned allocations can't be freed, round up to page size */
3194	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
3195			       sizeof(pages[0]));
3196	pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
3197	if (!pages)
3198		panic("%s: Failed to allocate %zu bytes\n", __func__,
3199		      pages_size);
3200
3201	/* allocate pages */
3202	j = 0;
3203	for (unit = 0; unit < num_possible_cpus(); unit++) {
3204		unsigned int cpu = ai->groups[0].cpu_map[unit];
3205		for (i = 0; i < unit_pages; i++) {
3206			void *ptr;
3207
3208			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
3209			if (!ptr) {
3210				pr_warn("failed to allocate %s page for cpu%u\n",
3211						psize_str, cpu);
3212				goto enomem;
3213			}
3214			/* kmemleak tracks the percpu allocations separately */
3215			kmemleak_free(ptr);
3216			pages[j++] = virt_to_page(ptr);
3217		}
3218	}
3219
3220	/* allocate vm area, map the pages and copy static data */
3221	vm.flags = VM_ALLOC;
3222	vm.size = num_possible_cpus() * ai->unit_size;
3223	vm_area_register_early(&vm, PAGE_SIZE);
3224
3225	for (unit = 0; unit < num_possible_cpus(); unit++) {
3226		unsigned long unit_addr =
3227			(unsigned long)vm.addr + unit * ai->unit_size;
3228
3229		for (i = 0; i < unit_pages; i++)
3230			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
3231
3232		/* pte already populated, the following shouldn't fail */
3233		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
3234				      unit_pages);
3235		if (rc < 0)
3236			panic("failed to map percpu area, err=%d\n", rc);
3237
3238		/*
3239		 * FIXME: Archs with virtual cache should flush local
3240		 * cache for the linear mapping here - something
3241		 * equivalent to flush_cache_vmap() on the local cpu.
3242		 * flush_cache_vmap() can't be used as most supporting
3243		 * data structures are not set up yet.
3244		 */
3245
3246		/* copy static data */
3247		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
3248	}
3249
3250	/* we're ready, commit */
3251	pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
3252		unit_pages, psize_str, ai->static_size,
3253		ai->reserved_size, ai->dyn_size);
3254
3255	pcpu_setup_first_chunk(ai, vm.addr);
3256	goto out_free_ar;
3257
3258enomem:
3259	while (--j >= 0)
3260		free_fn(page_address(pages[j]), PAGE_SIZE);
3261	rc = -ENOMEM;
3262out_free_ar:
3263	memblock_free_early(__pa(pages), pages_size);
3264	pcpu_free_alloc_info(ai);
3265	return rc;
3266}
3267#endif /* BUILD_PAGE_FIRST_CHUNK */
3268
3269#ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
3270/*
3271 * Generic SMP percpu area setup.
3272 *
3273 * The embedding helper is used because its behavior closely resembles
3274 * the original non-dynamic generic percpu area setup.  This is
3275 * important because many archs have addressing restrictions and might
3276 * fail if the percpu area is located far away from the previous
3277 * location.  As an added bonus, in non-NUMA cases, embedding is
3278 * generally a good idea TLB-wise because percpu area can piggy back
3279 * on the physical linear memory mapping which uses large page
3280 * mappings on applicable archs.
3281 */
3282unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
3283EXPORT_SYMBOL(__per_cpu_offset);
3284
3285static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
3286				       size_t align)
3287{
3288	return  memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS));
3289}
3290
3291static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
3292{
3293	memblock_free_early(__pa(ptr), size);
3294}
3295
3296void __init setup_per_cpu_areas(void)
3297{
3298	unsigned long delta;
3299	unsigned int cpu;
3300	int rc;
3301
3302	/*
3303	 * Always reserve area for module percpu variables.  That's
3304	 * what the legacy allocator did.
3305	 */
3306	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
3307				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
3308				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
3309	if (rc < 0)
3310		panic("Failed to initialize percpu areas.");
3311
3312	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
3313	for_each_possible_cpu(cpu)
3314		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
3315}
3316#endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */
3317
3318#else	/* CONFIG_SMP */
3319
3320/*
3321 * UP percpu area setup.
3322 *
3323 * UP always uses km-based percpu allocator with identity mapping.
3324 * Static percpu variables are indistinguishable from the usual static
3325 * variables and don't require any special preparation.
3326 */
3327void __init setup_per_cpu_areas(void)
3328{
3329	const size_t unit_size =
3330		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
3331					 PERCPU_DYNAMIC_RESERVE));
3332	struct pcpu_alloc_info *ai;
3333	void *fc;
3334
3335	ai = pcpu_alloc_alloc_info(1, 1);
3336	fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
3337	if (!ai || !fc)
3338		panic("Failed to allocate memory for percpu areas.");
3339	/* kmemleak tracks the percpu allocations separately */
3340	kmemleak_free(fc);
3341
3342	ai->dyn_size = unit_size;
3343	ai->unit_size = unit_size;
3344	ai->atom_size = unit_size;
3345	ai->alloc_size = unit_size;
3346	ai->groups[0].nr_units = 1;
3347	ai->groups[0].cpu_map[0] = 0;
3348
3349	pcpu_setup_first_chunk(ai, fc);
3350	pcpu_free_alloc_info(ai);
3351}
3352
3353#endif	/* CONFIG_SMP */
3354
3355/*
3356 * pcpu_nr_pages - calculate total number of populated backing pages
3357 *
3358 * This reflects the number of pages populated to back chunks.  Metadata is
3359 * excluded in the number exposed in meminfo as the number of backing pages
3360 * scales with the number of cpus and can quickly outweigh the memory used for
3361 * metadata.  It also keeps this calculation nice and simple.
3362 *
3363 * RETURNS:
3364 * Total number of populated backing pages in use by the allocator.
3365 */
3366unsigned long pcpu_nr_pages(void)
3367{
3368	return pcpu_nr_populated * pcpu_nr_units;
3369}
3370
3371/*
3372 * Percpu allocator is initialized early during boot when neither slab or
3373 * workqueue is available.  Plug async management until everything is up
3374 * and running.
3375 */
3376static int __init percpu_enable_async(void)
3377{
3378	pcpu_async_enabled = true;
3379	return 0;
3380}
3381subsys_initcall(percpu_enable_async);