Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * mm/percpu.c - percpu memory allocator
   3 *
   4 * Copyright (C) 2009		SUSE Linux Products GmbH
   5 * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
   6 *
   7 * This file is released under the GPLv2.
   8 *
   9 * This is percpu allocator which can handle both static and dynamic
  10 * areas.  Percpu areas are allocated in chunks.  Each chunk is
  11 * consisted of boot-time determined number of units and the first
  12 * chunk is used for static percpu variables in the kernel image
  13 * (special boot time alloc/init handling necessary as these areas
  14 * need to be brought up before allocation services are running).
  15 * Unit grows as necessary and all units grow or shrink in unison.
  16 * When a chunk is filled up, another chunk is allocated.
  17 *
  18 *  c0                           c1                         c2
  19 *  -------------------          -------------------        ------------
  20 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
  21 *  -------------------  ......  -------------------  ....  ------------
  22 *
  23 * Allocation is done in offset-size areas of single unit space.  Ie,
  24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
  25 * c1:u1, c1:u2 and c1:u3.  On UMA, units corresponds directly to
  26 * cpus.  On NUMA, the mapping can be non-linear and even sparse.
  27 * Percpu access can be done by configuring percpu base registers
  28 * according to cpu to unit mapping and pcpu_unit_size.
  29 *
  30 * There are usually many small percpu allocations many of them being
  31 * as small as 4 bytes.  The allocator organizes chunks into lists
  32 * according to free size and tries to allocate from the fullest one.
  33 * Each chunk keeps the maximum contiguous area size hint which is
  34 * guaranteed to be equal to or larger than the maximum contiguous
  35 * area in the chunk.  This helps the allocator not to iterate the
  36 * chunk maps unnecessarily.
  37 *
  38 * Allocation state in each chunk is kept using an array of integers
  39 * on chunk->map.  A positive value in the map represents a free
  40 * region and negative allocated.  Allocation inside a chunk is done
  41 * by scanning this map sequentially and serving the first matching
  42 * entry.  This is mostly copied from the percpu_modalloc() allocator.
  43 * Chunks can be determined from the address using the index field
  44 * in the page struct. The index field contains a pointer to the chunk.
  45 *
  46 * To use this allocator, arch code should do the followings.
  47 *
  48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
  49 *   regular address to percpu pointer and back if they need to be
  50 *   different from the default
  51 *
  52 * - use pcpu_setup_first_chunk() during percpu area initialization to
  53 *   setup the first chunk containing the kernel static percpu area
  54 */
  55
  56#include <linux/bitmap.h>
  57#include <linux/bootmem.h>
  58#include <linux/err.h>
  59#include <linux/list.h>
  60#include <linux/log2.h>
  61#include <linux/mm.h>
  62#include <linux/module.h>
  63#include <linux/mutex.h>
  64#include <linux/percpu.h>
  65#include <linux/pfn.h>
  66#include <linux/slab.h>
  67#include <linux/spinlock.h>
  68#include <linux/vmalloc.h>
  69#include <linux/workqueue.h>
 
  70
  71#include <asm/cacheflush.h>
  72#include <asm/sections.h>
  73#include <asm/tlbflush.h>
  74#include <asm/io.h>
  75
  76#define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
  77#define PCPU_DFL_MAP_ALLOC		16	/* start a map with 16 ents */
  78
  79#ifdef CONFIG_SMP
  80/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
  81#ifndef __addr_to_pcpu_ptr
  82#define __addr_to_pcpu_ptr(addr)					\
  83	(void __percpu *)((unsigned long)(addr) -			\
  84			  (unsigned long)pcpu_base_addr	+		\
  85			  (unsigned long)__per_cpu_start)
  86#endif
  87#ifndef __pcpu_ptr_to_addr
  88#define __pcpu_ptr_to_addr(ptr)						\
  89	(void __force *)((unsigned long)(ptr) +				\
  90			 (unsigned long)pcpu_base_addr -		\
  91			 (unsigned long)__per_cpu_start)
  92#endif
  93#else	/* CONFIG_SMP */
  94/* on UP, it's always identity mapped */
  95#define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
  96#define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
  97#endif	/* CONFIG_SMP */
  98
  99struct pcpu_chunk {
 100	struct list_head	list;		/* linked to pcpu_slot lists */
 101	int			free_size;	/* free bytes in the chunk */
 102	int			contig_hint;	/* max contiguous size hint */
 103	void			*base_addr;	/* base address of this chunk */
 104	int			map_used;	/* # of map entries used */
 105	int			map_alloc;	/* # of map entries allocated */
 106	int			*map;		/* allocation map */
 107	void			*data;		/* chunk data */
 
 108	bool			immutable;	/* no [de]population allowed */
 109	unsigned long		populated[];	/* populated bitmap */
 110};
 111
 112static int pcpu_unit_pages __read_mostly;
 113static int pcpu_unit_size __read_mostly;
 114static int pcpu_nr_units __read_mostly;
 115static int pcpu_atom_size __read_mostly;
 116static int pcpu_nr_slots __read_mostly;
 117static size_t pcpu_chunk_struct_size __read_mostly;
 118
 119/* cpus with the lowest and highest unit numbers */
 120static unsigned int pcpu_first_unit_cpu __read_mostly;
 121static unsigned int pcpu_last_unit_cpu __read_mostly;
 122
 123/* the address of the first chunk which starts with the kernel static area */
 124void *pcpu_base_addr __read_mostly;
 125EXPORT_SYMBOL_GPL(pcpu_base_addr);
 126
 127static const int *pcpu_unit_map __read_mostly;		/* cpu -> unit */
 128const unsigned long *pcpu_unit_offsets __read_mostly;	/* cpu -> unit offset */
 129
 130/* group information, used for vm allocation */
 131static int pcpu_nr_groups __read_mostly;
 132static const unsigned long *pcpu_group_offsets __read_mostly;
 133static const size_t *pcpu_group_sizes __read_mostly;
 134
 135/*
 136 * The first chunk which always exists.  Note that unlike other
 137 * chunks, this one can be allocated and mapped in several different
 138 * ways and thus often doesn't live in the vmalloc area.
 139 */
 140static struct pcpu_chunk *pcpu_first_chunk;
 141
 142/*
 143 * Optional reserved chunk.  This chunk reserves part of the first
 144 * chunk and serves it for reserved allocations.  The amount of
 145 * reserved offset is in pcpu_reserved_chunk_limit.  When reserved
 146 * area doesn't exist, the following variables contain NULL and 0
 147 * respectively.
 148 */
 149static struct pcpu_chunk *pcpu_reserved_chunk;
 150static int pcpu_reserved_chunk_limit;
 151
 152/*
 153 * Synchronization rules.
 154 *
 155 * There are two locks - pcpu_alloc_mutex and pcpu_lock.  The former
 156 * protects allocation/reclaim paths, chunks, populated bitmap and
 157 * vmalloc mapping.  The latter is a spinlock and protects the index
 158 * data structures - chunk slots, chunks and area maps in chunks.
 159 *
 160 * During allocation, pcpu_alloc_mutex is kept locked all the time and
 161 * pcpu_lock is grabbed and released as necessary.  All actual memory
 162 * allocations are done using GFP_KERNEL with pcpu_lock released.  In
 163 * general, percpu memory can't be allocated with irq off but
 164 * irqsave/restore are still used in alloc path so that it can be used
 165 * from early init path - sched_init() specifically.
 166 *
 167 * Free path accesses and alters only the index data structures, so it
 168 * can be safely called from atomic context.  When memory needs to be
 169 * returned to the system, free path schedules reclaim_work which
 170 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
 171 * reclaimed, release both locks and frees the chunks.  Note that it's
 172 * necessary to grab both locks to remove a chunk from circulation as
 173 * allocation path might be referencing the chunk with only
 174 * pcpu_alloc_mutex locked.
 175 */
 176static DEFINE_MUTEX(pcpu_alloc_mutex);	/* protects whole alloc and reclaim */
 177static DEFINE_SPINLOCK(pcpu_lock);	/* protects index data structures */
 178
 179static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
 180
 181/* reclaim work to release fully free chunks, scheduled from free path */
 182static void pcpu_reclaim(struct work_struct *work);
 183static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
 184
 185static bool pcpu_addr_in_first_chunk(void *addr)
 186{
 187	void *first_start = pcpu_first_chunk->base_addr;
 188
 189	return addr >= first_start && addr < first_start + pcpu_unit_size;
 190}
 191
 192static bool pcpu_addr_in_reserved_chunk(void *addr)
 193{
 194	void *first_start = pcpu_first_chunk->base_addr;
 195
 196	return addr >= first_start &&
 197		addr < first_start + pcpu_reserved_chunk_limit;
 198}
 199
 200static int __pcpu_size_to_slot(int size)
 201{
 202	int highbit = fls(size);	/* size is in bytes */
 203	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
 204}
 205
 206static int pcpu_size_to_slot(int size)
 207{
 208	if (size == pcpu_unit_size)
 209		return pcpu_nr_slots - 1;
 210	return __pcpu_size_to_slot(size);
 211}
 212
 213static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
 214{
 215	if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
 216		return 0;
 217
 218	return pcpu_size_to_slot(chunk->free_size);
 219}
 220
 221/* set the pointer to a chunk in a page struct */
 222static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
 223{
 224	page->index = (unsigned long)pcpu;
 225}
 226
 227/* obtain pointer to a chunk from a page struct */
 228static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
 229{
 230	return (struct pcpu_chunk *)page->index;
 231}
 232
 233static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
 234{
 235	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
 236}
 237
 238static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
 239				     unsigned int cpu, int page_idx)
 240{
 241	return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
 242		(page_idx << PAGE_SHIFT);
 243}
 244
 245static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
 246					   int *rs, int *re, int end)
 247{
 248	*rs = find_next_zero_bit(chunk->populated, end, *rs);
 249	*re = find_next_bit(chunk->populated, end, *rs + 1);
 250}
 251
 252static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
 253					 int *rs, int *re, int end)
 254{
 255	*rs = find_next_bit(chunk->populated, end, *rs);
 256	*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
 257}
 258
 259/*
 260 * (Un)populated page region iterators.  Iterate over (un)populated
 261 * page regions between @start and @end in @chunk.  @rs and @re should
 262 * be integer variables and will be set to start and end page index of
 263 * the current region.
 264 */
 265#define pcpu_for_each_unpop_region(chunk, rs, re, start, end)		    \
 266	for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
 267	     (rs) < (re);						    \
 268	     (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
 269
 270#define pcpu_for_each_pop_region(chunk, rs, re, start, end)		    \
 271	for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
 272	     (rs) < (re);						    \
 273	     (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
 274
 275/**
 276 * pcpu_mem_alloc - allocate memory
 277 * @size: bytes to allocate
 278 *
 279 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
 280 * kzalloc() is used; otherwise, vmalloc() is used.  The returned
 281 * memory is always zeroed.
 282 *
 283 * CONTEXT:
 284 * Does GFP_KERNEL allocation.
 285 *
 286 * RETURNS:
 287 * Pointer to the allocated area on success, NULL on failure.
 288 */
 289static void *pcpu_mem_alloc(size_t size)
 290{
 291	if (WARN_ON_ONCE(!slab_is_available()))
 292		return NULL;
 293
 294	if (size <= PAGE_SIZE)
 295		return kzalloc(size, GFP_KERNEL);
 296	else
 297		return vzalloc(size);
 298}
 299
 300/**
 301 * pcpu_mem_free - free memory
 302 * @ptr: memory to free
 303 * @size: size of the area
 304 *
 305 * Free @ptr.  @ptr should have been allocated using pcpu_mem_alloc().
 306 */
 307static void pcpu_mem_free(void *ptr, size_t size)
 308{
 309	if (size <= PAGE_SIZE)
 310		kfree(ptr);
 311	else
 312		vfree(ptr);
 313}
 314
 315/**
 316 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
 317 * @chunk: chunk of interest
 318 * @oslot: the previous slot it was on
 319 *
 320 * This function is called after an allocation or free changed @chunk.
 321 * New slot according to the changed state is determined and @chunk is
 322 * moved to the slot.  Note that the reserved chunk is never put on
 323 * chunk slots.
 324 *
 325 * CONTEXT:
 326 * pcpu_lock.
 327 */
 328static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
 329{
 330	int nslot = pcpu_chunk_slot(chunk);
 331
 332	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
 333		if (oslot < nslot)
 334			list_move(&chunk->list, &pcpu_slot[nslot]);
 335		else
 336			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
 337	}
 338}
 339
 340/**
 341 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
 342 * @chunk: chunk of interest
 343 *
 344 * Determine whether area map of @chunk needs to be extended to
 345 * accommodate a new allocation.
 346 *
 347 * CONTEXT:
 348 * pcpu_lock.
 349 *
 350 * RETURNS:
 351 * New target map allocation length if extension is necessary, 0
 352 * otherwise.
 353 */
 354static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
 355{
 356	int new_alloc;
 357
 358	if (chunk->map_alloc >= chunk->map_used + 2)
 359		return 0;
 360
 361	new_alloc = PCPU_DFL_MAP_ALLOC;
 362	while (new_alloc < chunk->map_used + 2)
 363		new_alloc *= 2;
 364
 365	return new_alloc;
 366}
 367
 368/**
 369 * pcpu_extend_area_map - extend area map of a chunk
 370 * @chunk: chunk of interest
 371 * @new_alloc: new target allocation length of the area map
 372 *
 373 * Extend area map of @chunk to have @new_alloc entries.
 374 *
 375 * CONTEXT:
 376 * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
 377 *
 378 * RETURNS:
 379 * 0 on success, -errno on failure.
 380 */
 381static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
 382{
 383	int *old = NULL, *new = NULL;
 384	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
 385	unsigned long flags;
 386
 387	new = pcpu_mem_alloc(new_size);
 388	if (!new)
 389		return -ENOMEM;
 390
 391	/* acquire pcpu_lock and switch to new area map */
 392	spin_lock_irqsave(&pcpu_lock, flags);
 393
 394	if (new_alloc <= chunk->map_alloc)
 395		goto out_unlock;
 396
 397	old_size = chunk->map_alloc * sizeof(chunk->map[0]);
 398	old = chunk->map;
 399
 400	memcpy(new, old, old_size);
 401
 402	chunk->map_alloc = new_alloc;
 403	chunk->map = new;
 404	new = NULL;
 405
 406out_unlock:
 407	spin_unlock_irqrestore(&pcpu_lock, flags);
 408
 409	/*
 410	 * pcpu_mem_free() might end up calling vfree() which uses
 411	 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
 412	 */
 413	pcpu_mem_free(old, old_size);
 414	pcpu_mem_free(new, new_size);
 415
 416	return 0;
 417}
 418
 419/**
 420 * pcpu_split_block - split a map block
 421 * @chunk: chunk of interest
 422 * @i: index of map block to split
 423 * @head: head size in bytes (can be 0)
 424 * @tail: tail size in bytes (can be 0)
 425 *
 426 * Split the @i'th map block into two or three blocks.  If @head is
 427 * non-zero, @head bytes block is inserted before block @i moving it
 428 * to @i+1 and reducing its size by @head bytes.
 429 *
 430 * If @tail is non-zero, the target block, which can be @i or @i+1
 431 * depending on @head, is reduced by @tail bytes and @tail byte block
 432 * is inserted after the target block.
 433 *
 434 * @chunk->map must have enough free slots to accommodate the split.
 435 *
 436 * CONTEXT:
 437 * pcpu_lock.
 438 */
 439static void pcpu_split_block(struct pcpu_chunk *chunk, int i,
 440			     int head, int tail)
 441{
 442	int nr_extra = !!head + !!tail;
 443
 444	BUG_ON(chunk->map_alloc < chunk->map_used + nr_extra);
 445
 446	/* insert new subblocks */
 447	memmove(&chunk->map[i + nr_extra], &chunk->map[i],
 448		sizeof(chunk->map[0]) * (chunk->map_used - i));
 449	chunk->map_used += nr_extra;
 450
 451	if (head) {
 452		chunk->map[i + 1] = chunk->map[i] - head;
 453		chunk->map[i++] = head;
 454	}
 455	if (tail) {
 456		chunk->map[i++] -= tail;
 457		chunk->map[i] = tail;
 458	}
 459}
 460
 461/**
 462 * pcpu_alloc_area - allocate area from a pcpu_chunk
 463 * @chunk: chunk of interest
 464 * @size: wanted size in bytes
 465 * @align: wanted align
 466 *
 467 * Try to allocate @size bytes area aligned at @align from @chunk.
 468 * Note that this function only allocates the offset.  It doesn't
 469 * populate or map the area.
 470 *
 471 * @chunk->map must have at least two free slots.
 472 *
 473 * CONTEXT:
 474 * pcpu_lock.
 475 *
 476 * RETURNS:
 477 * Allocated offset in @chunk on success, -1 if no matching area is
 478 * found.
 479 */
 480static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
 481{
 482	int oslot = pcpu_chunk_slot(chunk);
 483	int max_contig = 0;
 484	int i, off;
 
 
 485
 486	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++])) {
 487		bool is_last = i + 1 == chunk->map_used;
 488		int head, tail;
 
 
 
 
 
 489
 490		/* extra for alignment requirement */
 491		head = ALIGN(off, align) - off;
 492		BUG_ON(i == 0 && head != 0);
 493
 494		if (chunk->map[i] < 0)
 495			continue;
 496		if (chunk->map[i] < head + size) {
 497			max_contig = max(chunk->map[i], max_contig);
 
 
 
 498			continue;
 499		}
 500
 501		/*
 502		 * If head is small or the previous block is free,
 503		 * merge'em.  Note that 'small' is defined as smaller
 504		 * than sizeof(int), which is very small but isn't too
 505		 * uncommon for percpu allocations.
 506		 */
 507		if (head && (head < sizeof(int) || chunk->map[i - 1] > 0)) {
 508			if (chunk->map[i - 1] > 0)
 509				chunk->map[i - 1] += head;
 510			else {
 511				chunk->map[i - 1] -= head;
 512				chunk->free_size -= head;
 513			}
 514			chunk->map[i] -= head;
 515			off += head;
 516			head = 0;
 517		}
 518
 519		/* if tail is small, just keep it around */
 520		tail = chunk->map[i] - head - size;
 521		if (tail < sizeof(int))
 522			tail = 0;
 
 
 523
 524		/* split if warranted */
 525		if (head || tail) {
 526			pcpu_split_block(chunk, i, head, tail);
 
 
 
 
 
 
 527			if (head) {
 528				i++;
 529				off += head;
 530				max_contig = max(chunk->map[i - 1], max_contig);
 
 
 
 
 
 
 
 
 531			}
 532			if (tail)
 533				max_contig = max(chunk->map[i + 1], max_contig);
 534		}
 535
 
 
 
 536		/* update hint and mark allocated */
 537		if (is_last)
 538			chunk->contig_hint = max_contig; /* fully scanned */
 539		else
 540			chunk->contig_hint = max(chunk->contig_hint,
 541						 max_contig);
 542
 543		chunk->free_size -= chunk->map[i];
 544		chunk->map[i] = -chunk->map[i];
 545
 546		pcpu_chunk_relocate(chunk, oslot);
 547		return off;
 548	}
 549
 550	chunk->contig_hint = max_contig;	/* fully scanned */
 551	pcpu_chunk_relocate(chunk, oslot);
 552
 553	/* tell the upper layer that this chunk has no matching area */
 554	return -1;
 555}
 556
 557/**
 558 * pcpu_free_area - free area to a pcpu_chunk
 559 * @chunk: chunk of interest
 560 * @freeme: offset of area to free
 561 *
 562 * Free area starting from @freeme to @chunk.  Note that this function
 563 * only modifies the allocation map.  It doesn't depopulate or unmap
 564 * the area.
 565 *
 566 * CONTEXT:
 567 * pcpu_lock.
 568 */
 569static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
 570{
 571	int oslot = pcpu_chunk_slot(chunk);
 572	int i, off;
 573
 574	for (i = 0, off = 0; i < chunk->map_used; off += abs(chunk->map[i++]))
 575		if (off == freeme)
 576			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 577	BUG_ON(off != freeme);
 578	BUG_ON(chunk->map[i] > 0);
 579
 580	chunk->map[i] = -chunk->map[i];
 581	chunk->free_size += chunk->map[i];
 
 
 
 
 582
 
 
 
 583	/* merge with previous? */
 584	if (i > 0 && chunk->map[i - 1] >= 0) {
 585		chunk->map[i - 1] += chunk->map[i];
 586		chunk->map_used--;
 587		memmove(&chunk->map[i], &chunk->map[i + 1],
 588			(chunk->map_used - i) * sizeof(chunk->map[0]));
 589		i--;
 
 590	}
 591	/* merge with next? */
 592	if (i + 1 < chunk->map_used && chunk->map[i + 1] >= 0) {
 593		chunk->map[i] += chunk->map[i + 1];
 594		chunk->map_used--;
 595		memmove(&chunk->map[i + 1], &chunk->map[i + 2],
 596			(chunk->map_used - (i + 1)) * sizeof(chunk->map[0]));
 597	}
 598
 599	chunk->contig_hint = max(chunk->map[i], chunk->contig_hint);
 600	pcpu_chunk_relocate(chunk, oslot);
 601}
 602
 603static struct pcpu_chunk *pcpu_alloc_chunk(void)
 604{
 605	struct pcpu_chunk *chunk;
 606
 607	chunk = pcpu_mem_alloc(pcpu_chunk_struct_size);
 608	if (!chunk)
 609		return NULL;
 610
 611	chunk->map = pcpu_mem_alloc(PCPU_DFL_MAP_ALLOC * sizeof(chunk->map[0]));
 
 612	if (!chunk->map) {
 613		kfree(chunk);
 614		return NULL;
 615	}
 616
 617	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
 618	chunk->map[chunk->map_used++] = pcpu_unit_size;
 
 
 619
 620	INIT_LIST_HEAD(&chunk->list);
 621	chunk->free_size = pcpu_unit_size;
 622	chunk->contig_hint = pcpu_unit_size;
 623
 624	return chunk;
 625}
 626
 627static void pcpu_free_chunk(struct pcpu_chunk *chunk)
 628{
 629	if (!chunk)
 630		return;
 631	pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
 632	kfree(chunk);
 633}
 634
 635/*
 636 * Chunk management implementation.
 637 *
 638 * To allow different implementations, chunk alloc/free and
 639 * [de]population are implemented in a separate file which is pulled
 640 * into this file and compiled together.  The following functions
 641 * should be implemented.
 642 *
 643 * pcpu_populate_chunk		- populate the specified range of a chunk
 644 * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
 645 * pcpu_create_chunk		- create a new chunk
 646 * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
 647 * pcpu_addr_to_page		- translate address to physical address
 648 * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
 649 */
 650static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
 651static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
 652static struct pcpu_chunk *pcpu_create_chunk(void);
 653static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
 654static struct page *pcpu_addr_to_page(void *addr);
 655static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
 656
 657#ifdef CONFIG_NEED_PER_CPU_KM
 658#include "percpu-km.c"
 659#else
 660#include "percpu-vm.c"
 661#endif
 662
 663/**
 664 * pcpu_chunk_addr_search - determine chunk containing specified address
 665 * @addr: address for which the chunk needs to be determined.
 666 *
 667 * RETURNS:
 668 * The address of the found chunk.
 669 */
 670static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
 671{
 672	/* is it in the first chunk? */
 673	if (pcpu_addr_in_first_chunk(addr)) {
 674		/* is it in the reserved area? */
 675		if (pcpu_addr_in_reserved_chunk(addr))
 676			return pcpu_reserved_chunk;
 677		return pcpu_first_chunk;
 678	}
 679
 680	/*
 681	 * The address is relative to unit0 which might be unused and
 682	 * thus unmapped.  Offset the address to the unit space of the
 683	 * current processor before looking it up in the vmalloc
 684	 * space.  Note that any possible cpu id can be used here, so
 685	 * there's no need to worry about preemption or cpu hotplug.
 686	 */
 687	addr += pcpu_unit_offsets[raw_smp_processor_id()];
 688	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
 689}
 690
 691/**
 692 * pcpu_alloc - the percpu allocator
 693 * @size: size of area to allocate in bytes
 694 * @align: alignment of area (max PAGE_SIZE)
 695 * @reserved: allocate from the reserved chunk if available
 696 *
 697 * Allocate percpu area of @size bytes aligned at @align.
 698 *
 699 * CONTEXT:
 700 * Does GFP_KERNEL allocation.
 701 *
 702 * RETURNS:
 703 * Percpu pointer to the allocated area on success, NULL on failure.
 704 */
 705static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
 706{
 707	static int warn_limit = 10;
 708	struct pcpu_chunk *chunk;
 709	const char *err;
 710	int slot, off, new_alloc;
 711	unsigned long flags;
 
 
 
 
 
 
 
 
 
 
 
 712
 713	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
 714		WARN(true, "illegal size (%zu) or align (%zu) for "
 715		     "percpu allocation\n", size, align);
 716		return NULL;
 717	}
 718
 719	mutex_lock(&pcpu_alloc_mutex);
 720	spin_lock_irqsave(&pcpu_lock, flags);
 721
 722	/* serve reserved allocations from the reserved chunk if available */
 723	if (reserved && pcpu_reserved_chunk) {
 724		chunk = pcpu_reserved_chunk;
 725
 726		if (size > chunk->contig_hint) {
 727			err = "alloc from reserved chunk failed";
 728			goto fail_unlock;
 729		}
 730
 731		while ((new_alloc = pcpu_need_to_extend(chunk))) {
 732			spin_unlock_irqrestore(&pcpu_lock, flags);
 733			if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
 734				err = "failed to extend area map of reserved chunk";
 735				goto fail_unlock_mutex;
 736			}
 737			spin_lock_irqsave(&pcpu_lock, flags);
 738		}
 739
 740		off = pcpu_alloc_area(chunk, size, align);
 741		if (off >= 0)
 742			goto area_found;
 743
 744		err = "alloc from reserved chunk failed";
 745		goto fail_unlock;
 746	}
 747
 748restart:
 749	/* search through normal chunks */
 750	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
 751		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
 752			if (size > chunk->contig_hint)
 753				continue;
 754
 755			new_alloc = pcpu_need_to_extend(chunk);
 756			if (new_alloc) {
 757				spin_unlock_irqrestore(&pcpu_lock, flags);
 758				if (pcpu_extend_area_map(chunk,
 759							 new_alloc) < 0) {
 760					err = "failed to extend area map";
 761					goto fail_unlock_mutex;
 762				}
 763				spin_lock_irqsave(&pcpu_lock, flags);
 764				/*
 765				 * pcpu_lock has been dropped, need to
 766				 * restart cpu_slot list walking.
 767				 */
 768				goto restart;
 769			}
 770
 771			off = pcpu_alloc_area(chunk, size, align);
 772			if (off >= 0)
 773				goto area_found;
 774		}
 775	}
 776
 777	/* hmmm... no space left, create a new chunk */
 778	spin_unlock_irqrestore(&pcpu_lock, flags);
 779
 780	chunk = pcpu_create_chunk();
 781	if (!chunk) {
 782		err = "failed to allocate new chunk";
 783		goto fail_unlock_mutex;
 784	}
 785
 786	spin_lock_irqsave(&pcpu_lock, flags);
 787	pcpu_chunk_relocate(chunk, -1);
 788	goto restart;
 789
 790area_found:
 791	spin_unlock_irqrestore(&pcpu_lock, flags);
 792
 793	/* populate, map and clear the area */
 794	if (pcpu_populate_chunk(chunk, off, size)) {
 795		spin_lock_irqsave(&pcpu_lock, flags);
 796		pcpu_free_area(chunk, off);
 797		err = "failed to populate";
 798		goto fail_unlock;
 799	}
 800
 801	mutex_unlock(&pcpu_alloc_mutex);
 802
 803	/* return address relative to base address */
 804	return __addr_to_pcpu_ptr(chunk->base_addr + off);
 
 
 805
 806fail_unlock:
 807	spin_unlock_irqrestore(&pcpu_lock, flags);
 808fail_unlock_mutex:
 809	mutex_unlock(&pcpu_alloc_mutex);
 810	if (warn_limit) {
 811		pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
 812			   "%s\n", size, align, err);
 813		dump_stack();
 814		if (!--warn_limit)
 815			pr_info("PERCPU: limit reached, disable warning\n");
 816	}
 817	return NULL;
 818}
 819
 820/**
 821 * __alloc_percpu - allocate dynamic percpu area
 822 * @size: size of area to allocate in bytes
 823 * @align: alignment of area (max PAGE_SIZE)
 824 *
 825 * Allocate zero-filled percpu area of @size bytes aligned at @align.
 826 * Might sleep.  Might trigger writeouts.
 827 *
 828 * CONTEXT:
 829 * Does GFP_KERNEL allocation.
 830 *
 831 * RETURNS:
 832 * Percpu pointer to the allocated area on success, NULL on failure.
 833 */
 834void __percpu *__alloc_percpu(size_t size, size_t align)
 835{
 836	return pcpu_alloc(size, align, false);
 837}
 838EXPORT_SYMBOL_GPL(__alloc_percpu);
 839
 840/**
 841 * __alloc_reserved_percpu - allocate reserved percpu area
 842 * @size: size of area to allocate in bytes
 843 * @align: alignment of area (max PAGE_SIZE)
 844 *
 845 * Allocate zero-filled percpu area of @size bytes aligned at @align
 846 * from reserved percpu area if arch has set it up; otherwise,
 847 * allocation is served from the same dynamic area.  Might sleep.
 848 * Might trigger writeouts.
 849 *
 850 * CONTEXT:
 851 * Does GFP_KERNEL allocation.
 852 *
 853 * RETURNS:
 854 * Percpu pointer to the allocated area on success, NULL on failure.
 855 */
 856void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
 857{
 858	return pcpu_alloc(size, align, true);
 859}
 860
 861/**
 862 * pcpu_reclaim - reclaim fully free chunks, workqueue function
 863 * @work: unused
 864 *
 865 * Reclaim all fully free chunks except for the first one.
 866 *
 867 * CONTEXT:
 868 * workqueue context.
 869 */
 870static void pcpu_reclaim(struct work_struct *work)
 871{
 872	LIST_HEAD(todo);
 873	struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
 874	struct pcpu_chunk *chunk, *next;
 875
 876	mutex_lock(&pcpu_alloc_mutex);
 877	spin_lock_irq(&pcpu_lock);
 878
 879	list_for_each_entry_safe(chunk, next, head, list) {
 880		WARN_ON(chunk->immutable);
 881
 882		/* spare the first one */
 883		if (chunk == list_first_entry(head, struct pcpu_chunk, list))
 884			continue;
 885
 886		list_move(&chunk->list, &todo);
 887	}
 888
 889	spin_unlock_irq(&pcpu_lock);
 890
 891	list_for_each_entry_safe(chunk, next, &todo, list) {
 892		pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
 893		pcpu_destroy_chunk(chunk);
 894	}
 895
 896	mutex_unlock(&pcpu_alloc_mutex);
 897}
 898
 899/**
 900 * free_percpu - free percpu area
 901 * @ptr: pointer to area to free
 902 *
 903 * Free percpu area @ptr.
 904 *
 905 * CONTEXT:
 906 * Can be called from atomic context.
 907 */
 908void free_percpu(void __percpu *ptr)
 909{
 910	void *addr;
 911	struct pcpu_chunk *chunk;
 912	unsigned long flags;
 913	int off;
 914
 915	if (!ptr)
 916		return;
 917
 
 
 918	addr = __pcpu_ptr_to_addr(ptr);
 919
 920	spin_lock_irqsave(&pcpu_lock, flags);
 921
 922	chunk = pcpu_chunk_addr_search(addr);
 923	off = addr - chunk->base_addr;
 924
 925	pcpu_free_area(chunk, off);
 926
 927	/* if there are more than one fully free chunks, wake up grim reaper */
 928	if (chunk->free_size == pcpu_unit_size) {
 929		struct pcpu_chunk *pos;
 930
 931		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
 932			if (pos != chunk) {
 933				schedule_work(&pcpu_reclaim_work);
 934				break;
 935			}
 936	}
 937
 938	spin_unlock_irqrestore(&pcpu_lock, flags);
 939}
 940EXPORT_SYMBOL_GPL(free_percpu);
 941
 942/**
 943 * is_kernel_percpu_address - test whether address is from static percpu area
 944 * @addr: address to test
 945 *
 946 * Test whether @addr belongs to in-kernel static percpu area.  Module
 947 * static percpu areas are not considered.  For those, use
 948 * is_module_percpu_address().
 949 *
 950 * RETURNS:
 951 * %true if @addr is from in-kernel static percpu area, %false otherwise.
 952 */
 953bool is_kernel_percpu_address(unsigned long addr)
 954{
 955#ifdef CONFIG_SMP
 956	const size_t static_size = __per_cpu_end - __per_cpu_start;
 957	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
 958	unsigned int cpu;
 959
 960	for_each_possible_cpu(cpu) {
 961		void *start = per_cpu_ptr(base, cpu);
 962
 963		if ((void *)addr >= start && (void *)addr < start + static_size)
 964			return true;
 965        }
 966#endif
 967	/* on UP, can't distinguish from other static vars, always false */
 968	return false;
 969}
 970
 971/**
 972 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
 973 * @addr: the address to be converted to physical address
 974 *
 975 * Given @addr which is dereferenceable address obtained via one of
 976 * percpu access macros, this function translates it into its physical
 977 * address.  The caller is responsible for ensuring @addr stays valid
 978 * until this function finishes.
 979 *
 
 
 
 
 
 
 
 
 
 
 
 980 * RETURNS:
 981 * The physical address for @addr.
 982 */
 983phys_addr_t per_cpu_ptr_to_phys(void *addr)
 984{
 985	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
 986	bool in_first_chunk = false;
 987	unsigned long first_start, first_end;
 988	unsigned int cpu;
 989
 990	/*
 991	 * The following test on first_start/end isn't strictly
 992	 * necessary but will speed up lookups of addresses which
 993	 * aren't in the first chunk.
 994	 */
 995	first_start = pcpu_chunk_addr(pcpu_first_chunk, pcpu_first_unit_cpu, 0);
 996	first_end = pcpu_chunk_addr(pcpu_first_chunk, pcpu_last_unit_cpu,
 997				    pcpu_unit_pages);
 998	if ((unsigned long)addr >= first_start &&
 999	    (unsigned long)addr < first_end) {
1000		for_each_possible_cpu(cpu) {
1001			void *start = per_cpu_ptr(base, cpu);
1002
1003			if (addr >= start && addr < start + pcpu_unit_size) {
1004				in_first_chunk = true;
1005				break;
1006			}
1007		}
1008	}
1009
1010	if (in_first_chunk) {
1011		if (!is_vmalloc_addr(addr))
1012			return __pa(addr);
1013		else
1014			return page_to_phys(vmalloc_to_page(addr));
 
1015	} else
1016		return page_to_phys(pcpu_addr_to_page(addr));
 
1017}
1018
1019/**
1020 * pcpu_alloc_alloc_info - allocate percpu allocation info
1021 * @nr_groups: the number of groups
1022 * @nr_units: the number of units
1023 *
1024 * Allocate ai which is large enough for @nr_groups groups containing
1025 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
1026 * cpu_map array which is long enough for @nr_units and filled with
1027 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
1028 * pointer of other groups.
1029 *
1030 * RETURNS:
1031 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1032 * failure.
1033 */
1034struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1035						      int nr_units)
1036{
1037	struct pcpu_alloc_info *ai;
1038	size_t base_size, ai_size;
1039	void *ptr;
1040	int unit;
1041
1042	base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1043			  __alignof__(ai->groups[0].cpu_map[0]));
1044	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1045
1046	ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size));
1047	if (!ptr)
1048		return NULL;
1049	ai = ptr;
1050	ptr += base_size;
1051
1052	ai->groups[0].cpu_map = ptr;
1053
1054	for (unit = 0; unit < nr_units; unit++)
1055		ai->groups[0].cpu_map[unit] = NR_CPUS;
1056
1057	ai->nr_groups = nr_groups;
1058	ai->__ai_size = PFN_ALIGN(ai_size);
1059
1060	return ai;
1061}
1062
1063/**
1064 * pcpu_free_alloc_info - free percpu allocation info
1065 * @ai: pcpu_alloc_info to free
1066 *
1067 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1068 */
1069void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1070{
1071	free_bootmem(__pa(ai), ai->__ai_size);
1072}
1073
1074/**
1075 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1076 * @lvl: loglevel
1077 * @ai: allocation info to dump
1078 *
1079 * Print out information about @ai using loglevel @lvl.
1080 */
1081static void pcpu_dump_alloc_info(const char *lvl,
1082				 const struct pcpu_alloc_info *ai)
1083{
1084	int group_width = 1, cpu_width = 1, width;
1085	char empty_str[] = "--------";
1086	int alloc = 0, alloc_end = 0;
1087	int group, v;
1088	int upa, apl;	/* units per alloc, allocs per line */
1089
1090	v = ai->nr_groups;
1091	while (v /= 10)
1092		group_width++;
1093
1094	v = num_possible_cpus();
1095	while (v /= 10)
1096		cpu_width++;
1097	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1098
1099	upa = ai->alloc_size / ai->unit_size;
1100	width = upa * (cpu_width + 1) + group_width + 3;
1101	apl = rounddown_pow_of_two(max(60 / width, 1));
1102
1103	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1104	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1105	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1106
1107	for (group = 0; group < ai->nr_groups; group++) {
1108		const struct pcpu_group_info *gi = &ai->groups[group];
1109		int unit = 0, unit_end = 0;
1110
1111		BUG_ON(gi->nr_units % upa);
1112		for (alloc_end += gi->nr_units / upa;
1113		     alloc < alloc_end; alloc++) {
1114			if (!(alloc % apl)) {
1115				printk("\n");
1116				printk("%spcpu-alloc: ", lvl);
1117			}
1118			printk("[%0*d] ", group_width, group);
1119
1120			for (unit_end += upa; unit < unit_end; unit++)
1121				if (gi->cpu_map[unit] != NR_CPUS)
1122					printk("%0*d ", cpu_width,
1123					       gi->cpu_map[unit]);
1124				else
1125					printk("%s ", empty_str);
1126		}
1127	}
1128	printk("\n");
1129}
1130
1131/**
1132 * pcpu_setup_first_chunk - initialize the first percpu chunk
1133 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1134 * @base_addr: mapped address
1135 *
1136 * Initialize the first percpu chunk which contains the kernel static
1137 * perpcu area.  This function is to be called from arch percpu area
1138 * setup path.
1139 *
1140 * @ai contains all information necessary to initialize the first
1141 * chunk and prime the dynamic percpu allocator.
1142 *
1143 * @ai->static_size is the size of static percpu area.
1144 *
1145 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1146 * reserve after the static area in the first chunk.  This reserves
1147 * the first chunk such that it's available only through reserved
1148 * percpu allocation.  This is primarily used to serve module percpu
1149 * static areas on architectures where the addressing model has
1150 * limited offset range for symbol relocations to guarantee module
1151 * percpu symbols fall inside the relocatable range.
1152 *
1153 * @ai->dyn_size determines the number of bytes available for dynamic
1154 * allocation in the first chunk.  The area between @ai->static_size +
1155 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1156 *
1157 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1158 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1159 * @ai->dyn_size.
1160 *
1161 * @ai->atom_size is the allocation atom size and used as alignment
1162 * for vm areas.
1163 *
1164 * @ai->alloc_size is the allocation size and always multiple of
1165 * @ai->atom_size.  This is larger than @ai->atom_size if
1166 * @ai->unit_size is larger than @ai->atom_size.
1167 *
1168 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1169 * percpu areas.  Units which should be colocated are put into the
1170 * same group.  Dynamic VM areas will be allocated according to these
1171 * groupings.  If @ai->nr_groups is zero, a single group containing
1172 * all units is assumed.
1173 *
1174 * The caller should have mapped the first chunk at @base_addr and
1175 * copied static data to each unit.
1176 *
1177 * If the first chunk ends up with both reserved and dynamic areas, it
1178 * is served by two chunks - one to serve the core static and reserved
1179 * areas and the other for the dynamic area.  They share the same vm
1180 * and page map but uses different area allocation map to stay away
1181 * from each other.  The latter chunk is circulated in the chunk slots
1182 * and available for dynamic allocation like any other chunks.
1183 *
1184 * RETURNS:
1185 * 0 on success, -errno on failure.
1186 */
1187int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1188				  void *base_addr)
1189{
1190	static char cpus_buf[4096] __initdata;
1191	static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1192	static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1193	size_t dyn_size = ai->dyn_size;
1194	size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1195	struct pcpu_chunk *schunk, *dchunk = NULL;
1196	unsigned long *group_offsets;
1197	size_t *group_sizes;
1198	unsigned long *unit_off;
1199	unsigned int cpu;
1200	int *unit_map;
1201	int group, unit, i;
1202
1203	cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1204
1205#define PCPU_SETUP_BUG_ON(cond)	do {					\
1206	if (unlikely(cond)) {						\
1207		pr_emerg("PERCPU: failed to initialize, %s", #cond);	\
1208		pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf);	\
1209		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
1210		BUG();							\
1211	}								\
1212} while (0)
1213
1214	/* sanity checks */
1215	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1216#ifdef CONFIG_SMP
1217	PCPU_SETUP_BUG_ON(!ai->static_size);
1218	PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK);
1219#endif
1220	PCPU_SETUP_BUG_ON(!base_addr);
1221	PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK);
1222	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1223	PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1224	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1225	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1226	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1227
1228	/* process group information and build config tables accordingly */
1229	group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0]));
1230	group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0]));
1231	unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0]));
1232	unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0]));
 
 
1233
1234	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1235		unit_map[cpu] = UINT_MAX;
1236	pcpu_first_unit_cpu = NR_CPUS;
 
 
1237
1238	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1239		const struct pcpu_group_info *gi = &ai->groups[group];
1240
1241		group_offsets[group] = gi->base_offset;
1242		group_sizes[group] = gi->nr_units * ai->unit_size;
1243
1244		for (i = 0; i < gi->nr_units; i++) {
1245			cpu = gi->cpu_map[i];
1246			if (cpu == NR_CPUS)
1247				continue;
1248
1249			PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1250			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1251			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1252
1253			unit_map[cpu] = unit + i;
1254			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1255
1256			if (pcpu_first_unit_cpu == NR_CPUS)
1257				pcpu_first_unit_cpu = cpu;
1258			pcpu_last_unit_cpu = cpu;
 
 
 
 
1259		}
1260	}
1261	pcpu_nr_units = unit;
1262
1263	for_each_possible_cpu(cpu)
1264		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1265
1266	/* we're done parsing the input, undefine BUG macro and dump config */
1267#undef PCPU_SETUP_BUG_ON
1268	pcpu_dump_alloc_info(KERN_DEBUG, ai);
1269
1270	pcpu_nr_groups = ai->nr_groups;
1271	pcpu_group_offsets = group_offsets;
1272	pcpu_group_sizes = group_sizes;
1273	pcpu_unit_map = unit_map;
1274	pcpu_unit_offsets = unit_off;
1275
1276	/* determine basic parameters */
1277	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1278	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1279	pcpu_atom_size = ai->atom_size;
1280	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1281		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1282
1283	/*
1284	 * Allocate chunk slots.  The additional last slot is for
1285	 * empty chunks.
1286	 */
1287	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1288	pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0]));
 
1289	for (i = 0; i < pcpu_nr_slots; i++)
1290		INIT_LIST_HEAD(&pcpu_slot[i]);
1291
1292	/*
1293	 * Initialize static chunk.  If reserved_size is zero, the
1294	 * static chunk covers static area + dynamic allocation area
1295	 * in the first chunk.  If reserved_size is not zero, it
1296	 * covers static area + reserved area (mostly used for module
1297	 * static percpu allocation).
1298	 */
1299	schunk = alloc_bootmem(pcpu_chunk_struct_size);
1300	INIT_LIST_HEAD(&schunk->list);
1301	schunk->base_addr = base_addr;
1302	schunk->map = smap;
1303	schunk->map_alloc = ARRAY_SIZE(smap);
1304	schunk->immutable = true;
1305	bitmap_fill(schunk->populated, pcpu_unit_pages);
1306
1307	if (ai->reserved_size) {
1308		schunk->free_size = ai->reserved_size;
1309		pcpu_reserved_chunk = schunk;
1310		pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1311	} else {
1312		schunk->free_size = dyn_size;
1313		dyn_size = 0;			/* dynamic area covered */
1314	}
1315	schunk->contig_hint = schunk->free_size;
1316
1317	schunk->map[schunk->map_used++] = -ai->static_size;
 
 
1318	if (schunk->free_size)
1319		schunk->map[schunk->map_used++] = schunk->free_size;
 
 
1320
1321	/* init dynamic chunk if necessary */
1322	if (dyn_size) {
1323		dchunk = alloc_bootmem(pcpu_chunk_struct_size);
1324		INIT_LIST_HEAD(&dchunk->list);
1325		dchunk->base_addr = base_addr;
1326		dchunk->map = dmap;
1327		dchunk->map_alloc = ARRAY_SIZE(dmap);
1328		dchunk->immutable = true;
1329		bitmap_fill(dchunk->populated, pcpu_unit_pages);
1330
1331		dchunk->contig_hint = dchunk->free_size = dyn_size;
1332		dchunk->map[dchunk->map_used++] = -pcpu_reserved_chunk_limit;
1333		dchunk->map[dchunk->map_used++] = dchunk->free_size;
 
 
1334	}
1335
1336	/* link the first chunk in */
1337	pcpu_first_chunk = dchunk ?: schunk;
1338	pcpu_chunk_relocate(pcpu_first_chunk, -1);
1339
1340	/* we're done */
1341	pcpu_base_addr = base_addr;
1342	return 0;
1343}
1344
1345#ifdef CONFIG_SMP
1346
1347const char *pcpu_fc_names[PCPU_FC_NR] __initdata = {
1348	[PCPU_FC_AUTO]	= "auto",
1349	[PCPU_FC_EMBED]	= "embed",
1350	[PCPU_FC_PAGE]	= "page",
1351};
1352
1353enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1354
1355static int __init percpu_alloc_setup(char *str)
1356{
 
 
 
1357	if (0)
1358		/* nada */;
1359#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1360	else if (!strcmp(str, "embed"))
1361		pcpu_chosen_fc = PCPU_FC_EMBED;
1362#endif
1363#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1364	else if (!strcmp(str, "page"))
1365		pcpu_chosen_fc = PCPU_FC_PAGE;
1366#endif
1367	else
1368		pr_warning("PERCPU: unknown allocator %s specified\n", str);
1369
1370	return 0;
1371}
1372early_param("percpu_alloc", percpu_alloc_setup);
1373
1374/*
1375 * pcpu_embed_first_chunk() is used by the generic percpu setup.
1376 * Build it if needed by the arch config or the generic setup is going
1377 * to be used.
1378 */
1379#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1380	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1381#define BUILD_EMBED_FIRST_CHUNK
1382#endif
1383
1384/* build pcpu_page_first_chunk() iff needed by the arch config */
1385#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
1386#define BUILD_PAGE_FIRST_CHUNK
1387#endif
1388
1389/* pcpu_build_alloc_info() is used by both embed and page first chunk */
1390#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
1391/**
1392 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1393 * @reserved_size: the size of reserved percpu area in bytes
1394 * @dyn_size: minimum free size for dynamic allocation in bytes
1395 * @atom_size: allocation atom size
1396 * @cpu_distance_fn: callback to determine distance between cpus, optional
1397 *
1398 * This function determines grouping of units, their mappings to cpus
1399 * and other parameters considering needed percpu size, allocation
1400 * atom size and distances between CPUs.
1401 *
1402 * Groups are always mutliples of atom size and CPUs which are of
1403 * LOCAL_DISTANCE both ways are grouped together and share space for
1404 * units in the same group.  The returned configuration is guaranteed
1405 * to have CPUs on different nodes on different groups and >=75% usage
1406 * of allocated virtual address space.
1407 *
1408 * RETURNS:
1409 * On success, pointer to the new allocation_info is returned.  On
1410 * failure, ERR_PTR value is returned.
1411 */
1412static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1413				size_t reserved_size, size_t dyn_size,
1414				size_t atom_size,
1415				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1416{
1417	static int group_map[NR_CPUS] __initdata;
1418	static int group_cnt[NR_CPUS] __initdata;
1419	const size_t static_size = __per_cpu_end - __per_cpu_start;
1420	int nr_groups = 1, nr_units = 0;
1421	size_t size_sum, min_unit_size, alloc_size;
1422	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
1423	int last_allocs, group, unit;
1424	unsigned int cpu, tcpu;
1425	struct pcpu_alloc_info *ai;
1426	unsigned int *cpu_map;
1427
1428	/* this function may be called multiple times */
1429	memset(group_map, 0, sizeof(group_map));
1430	memset(group_cnt, 0, sizeof(group_cnt));
1431
1432	/* calculate size_sum and ensure dyn_size is enough for early alloc */
1433	size_sum = PFN_ALIGN(static_size + reserved_size +
1434			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1435	dyn_size = size_sum - static_size - reserved_size;
1436
1437	/*
1438	 * Determine min_unit_size, alloc_size and max_upa such that
1439	 * alloc_size is multiple of atom_size and is the smallest
1440	 * which can accommodate 4k aligned segments which are equal to
1441	 * or larger than min_unit_size.
1442	 */
1443	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1444
1445	alloc_size = roundup(min_unit_size, atom_size);
1446	upa = alloc_size / min_unit_size;
1447	while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1448		upa--;
1449	max_upa = upa;
1450
1451	/* group cpus according to their proximity */
1452	for_each_possible_cpu(cpu) {
1453		group = 0;
1454	next_group:
1455		for_each_possible_cpu(tcpu) {
1456			if (cpu == tcpu)
1457				break;
1458			if (group_map[tcpu] == group && cpu_distance_fn &&
1459			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1460			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1461				group++;
1462				nr_groups = max(nr_groups, group + 1);
1463				goto next_group;
1464			}
1465		}
1466		group_map[cpu] = group;
1467		group_cnt[group]++;
1468	}
1469
1470	/*
1471	 * Expand unit size until address space usage goes over 75%
1472	 * and then as much as possible without using more address
1473	 * space.
1474	 */
1475	last_allocs = INT_MAX;
1476	for (upa = max_upa; upa; upa--) {
1477		int allocs = 0, wasted = 0;
1478
1479		if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1480			continue;
1481
1482		for (group = 0; group < nr_groups; group++) {
1483			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1484			allocs += this_allocs;
1485			wasted += this_allocs * upa - group_cnt[group];
1486		}
1487
1488		/*
1489		 * Don't accept if wastage is over 1/3.  The
1490		 * greater-than comparison ensures upa==1 always
1491		 * passes the following check.
1492		 */
1493		if (wasted > num_possible_cpus() / 3)
1494			continue;
1495
1496		/* and then don't consume more memory */
1497		if (allocs > last_allocs)
1498			break;
1499		last_allocs = allocs;
1500		best_upa = upa;
1501	}
1502	upa = best_upa;
1503
1504	/* allocate and fill alloc_info */
1505	for (group = 0; group < nr_groups; group++)
1506		nr_units += roundup(group_cnt[group], upa);
1507
1508	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1509	if (!ai)
1510		return ERR_PTR(-ENOMEM);
1511	cpu_map = ai->groups[0].cpu_map;
1512
1513	for (group = 0; group < nr_groups; group++) {
1514		ai->groups[group].cpu_map = cpu_map;
1515		cpu_map += roundup(group_cnt[group], upa);
1516	}
1517
1518	ai->static_size = static_size;
1519	ai->reserved_size = reserved_size;
1520	ai->dyn_size = dyn_size;
1521	ai->unit_size = alloc_size / upa;
1522	ai->atom_size = atom_size;
1523	ai->alloc_size = alloc_size;
1524
1525	for (group = 0, unit = 0; group_cnt[group]; group++) {
1526		struct pcpu_group_info *gi = &ai->groups[group];
1527
1528		/*
1529		 * Initialize base_offset as if all groups are located
1530		 * back-to-back.  The caller should update this to
1531		 * reflect actual allocation.
1532		 */
1533		gi->base_offset = unit * ai->unit_size;
1534
1535		for_each_possible_cpu(cpu)
1536			if (group_map[cpu] == group)
1537				gi->cpu_map[gi->nr_units++] = cpu;
1538		gi->nr_units = roundup(gi->nr_units, upa);
1539		unit += gi->nr_units;
1540	}
1541	BUG_ON(unit != nr_units);
1542
1543	return ai;
1544}
1545#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
1546
1547#if defined(BUILD_EMBED_FIRST_CHUNK)
1548/**
1549 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1550 * @reserved_size: the size of reserved percpu area in bytes
1551 * @dyn_size: minimum free size for dynamic allocation in bytes
1552 * @atom_size: allocation atom size
1553 * @cpu_distance_fn: callback to determine distance between cpus, optional
1554 * @alloc_fn: function to allocate percpu page
1555 * @free_fn: function to free percpu page
1556 *
1557 * This is a helper to ease setting up embedded first percpu chunk and
1558 * can be called where pcpu_setup_first_chunk() is expected.
1559 *
1560 * If this function is used to setup the first chunk, it is allocated
1561 * by calling @alloc_fn and used as-is without being mapped into
1562 * vmalloc area.  Allocations are always whole multiples of @atom_size
1563 * aligned to @atom_size.
1564 *
1565 * This enables the first chunk to piggy back on the linear physical
1566 * mapping which often uses larger page size.  Please note that this
1567 * can result in very sparse cpu->unit mapping on NUMA machines thus
1568 * requiring large vmalloc address space.  Don't use this allocator if
1569 * vmalloc space is not orders of magnitude larger than distances
1570 * between node memory addresses (ie. 32bit NUMA machines).
1571 *
1572 * @dyn_size specifies the minimum dynamic area size.
1573 *
1574 * If the needed size is smaller than the minimum or specified unit
1575 * size, the leftover is returned using @free_fn.
1576 *
1577 * RETURNS:
1578 * 0 on success, -errno on failure.
1579 */
1580int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1581				  size_t atom_size,
1582				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1583				  pcpu_fc_alloc_fn_t alloc_fn,
1584				  pcpu_fc_free_fn_t free_fn)
1585{
1586	void *base = (void *)ULONG_MAX;
1587	void **areas = NULL;
1588	struct pcpu_alloc_info *ai;
1589	size_t size_sum, areas_size, max_distance;
1590	int group, i, rc;
1591
1592	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1593				   cpu_distance_fn);
1594	if (IS_ERR(ai))
1595		return PTR_ERR(ai);
1596
1597	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1598	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1599
1600	areas = alloc_bootmem_nopanic(areas_size);
1601	if (!areas) {
1602		rc = -ENOMEM;
1603		goto out_free;
1604	}
1605
1606	/* allocate, copy and determine base address */
1607	for (group = 0; group < ai->nr_groups; group++) {
1608		struct pcpu_group_info *gi = &ai->groups[group];
1609		unsigned int cpu = NR_CPUS;
1610		void *ptr;
1611
1612		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1613			cpu = gi->cpu_map[i];
1614		BUG_ON(cpu == NR_CPUS);
1615
1616		/* allocate space for the whole group */
1617		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1618		if (!ptr) {
1619			rc = -ENOMEM;
1620			goto out_free_areas;
1621		}
 
 
1622		areas[group] = ptr;
1623
1624		base = min(ptr, base);
 
 
 
 
 
 
 
 
 
 
1625
1626		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1627			if (gi->cpu_map[i] == NR_CPUS) {
1628				/* unused unit, free whole */
1629				free_fn(ptr, ai->unit_size);
1630				continue;
1631			}
1632			/* copy and return the unused part */
1633			memcpy(ptr, __per_cpu_load, ai->static_size);
1634			free_fn(ptr + size_sum, ai->unit_size - size_sum);
1635		}
1636	}
1637
1638	/* base address is now known, determine group base offsets */
1639	max_distance = 0;
1640	for (group = 0; group < ai->nr_groups; group++) {
1641		ai->groups[group].base_offset = areas[group] - base;
1642		max_distance = max_t(size_t, max_distance,
1643				     ai->groups[group].base_offset);
1644	}
1645	max_distance += ai->unit_size;
1646
1647	/* warn if maximum distance is further than 75% of vmalloc space */
1648	if (max_distance > (VMALLOC_END - VMALLOC_START) * 3 / 4) {
1649		pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1650			   "space 0x%lx\n", max_distance,
1651			   (unsigned long)(VMALLOC_END - VMALLOC_START));
1652#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1653		/* and fail if we have fallback */
1654		rc = -EINVAL;
1655		goto out_free;
1656#endif
1657	}
1658
1659	pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1660		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1661		ai->dyn_size, ai->unit_size);
1662
1663	rc = pcpu_setup_first_chunk(ai, base);
1664	goto out_free;
1665
1666out_free_areas:
1667	for (group = 0; group < ai->nr_groups; group++)
1668		free_fn(areas[group],
1669			ai->groups[group].nr_units * ai->unit_size);
 
1670out_free:
1671	pcpu_free_alloc_info(ai);
1672	if (areas)
1673		free_bootmem(__pa(areas), areas_size);
1674	return rc;
1675}
1676#endif /* BUILD_EMBED_FIRST_CHUNK */
1677
1678#ifdef BUILD_PAGE_FIRST_CHUNK
1679/**
1680 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1681 * @reserved_size: the size of reserved percpu area in bytes
1682 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1683 * @free_fn: function to free percpu page, always called with PAGE_SIZE
1684 * @populate_pte_fn: function to populate pte
1685 *
1686 * This is a helper to ease setting up page-remapped first percpu
1687 * chunk and can be called where pcpu_setup_first_chunk() is expected.
1688 *
1689 * This is the basic allocator.  Static percpu area is allocated
1690 * page-by-page into vmalloc area.
1691 *
1692 * RETURNS:
1693 * 0 on success, -errno on failure.
1694 */
1695int __init pcpu_page_first_chunk(size_t reserved_size,
1696				 pcpu_fc_alloc_fn_t alloc_fn,
1697				 pcpu_fc_free_fn_t free_fn,
1698				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1699{
1700	static struct vm_struct vm;
1701	struct pcpu_alloc_info *ai;
1702	char psize_str[16];
1703	int unit_pages;
1704	size_t pages_size;
1705	struct page **pages;
1706	int unit, i, j, rc;
1707
1708	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1709
1710	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
1711	if (IS_ERR(ai))
1712		return PTR_ERR(ai);
1713	BUG_ON(ai->nr_groups != 1);
1714	BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1715
1716	unit_pages = ai->unit_size >> PAGE_SHIFT;
1717
1718	/* unaligned allocations can't be freed, round up to page size */
1719	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1720			       sizeof(pages[0]));
1721	pages = alloc_bootmem(pages_size);
1722
1723	/* allocate pages */
1724	j = 0;
1725	for (unit = 0; unit < num_possible_cpus(); unit++)
1726		for (i = 0; i < unit_pages; i++) {
1727			unsigned int cpu = ai->groups[0].cpu_map[unit];
1728			void *ptr;
1729
1730			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
1731			if (!ptr) {
1732				pr_warning("PERCPU: failed to allocate %s page "
1733					   "for cpu%u\n", psize_str, cpu);
1734				goto enomem;
1735			}
 
 
1736			pages[j++] = virt_to_page(ptr);
1737		}
1738
1739	/* allocate vm area, map the pages and copy static data */
1740	vm.flags = VM_ALLOC;
1741	vm.size = num_possible_cpus() * ai->unit_size;
1742	vm_area_register_early(&vm, PAGE_SIZE);
1743
1744	for (unit = 0; unit < num_possible_cpus(); unit++) {
1745		unsigned long unit_addr =
1746			(unsigned long)vm.addr + unit * ai->unit_size;
1747
1748		for (i = 0; i < unit_pages; i++)
1749			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1750
1751		/* pte already populated, the following shouldn't fail */
1752		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1753				      unit_pages);
1754		if (rc < 0)
1755			panic("failed to map percpu area, err=%d\n", rc);
1756
1757		/*
1758		 * FIXME: Archs with virtual cache should flush local
1759		 * cache for the linear mapping here - something
1760		 * equivalent to flush_cache_vmap() on the local cpu.
1761		 * flush_cache_vmap() can't be used as most supporting
1762		 * data structures are not set up yet.
1763		 */
1764
1765		/* copy static data */
1766		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
1767	}
1768
1769	/* we're ready, commit */
1770	pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1771		unit_pages, psize_str, vm.addr, ai->static_size,
1772		ai->reserved_size, ai->dyn_size);
1773
1774	rc = pcpu_setup_first_chunk(ai, vm.addr);
1775	goto out_free_ar;
1776
1777enomem:
1778	while (--j >= 0)
1779		free_fn(page_address(pages[j]), PAGE_SIZE);
1780	rc = -ENOMEM;
1781out_free_ar:
1782	free_bootmem(__pa(pages), pages_size);
1783	pcpu_free_alloc_info(ai);
1784	return rc;
1785}
1786#endif /* BUILD_PAGE_FIRST_CHUNK */
1787
1788#ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
1789/*
1790 * Generic SMP percpu area setup.
1791 *
1792 * The embedding helper is used because its behavior closely resembles
1793 * the original non-dynamic generic percpu area setup.  This is
1794 * important because many archs have addressing restrictions and might
1795 * fail if the percpu area is located far away from the previous
1796 * location.  As an added bonus, in non-NUMA cases, embedding is
1797 * generally a good idea TLB-wise because percpu area can piggy back
1798 * on the physical linear memory mapping which uses large page
1799 * mappings on applicable archs.
1800 */
1801unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1802EXPORT_SYMBOL(__per_cpu_offset);
1803
1804static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
1805				       size_t align)
1806{
1807	return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
 
1808}
1809
1810static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
1811{
1812	free_bootmem(__pa(ptr), size);
1813}
1814
1815void __init setup_per_cpu_areas(void)
1816{
1817	unsigned long delta;
1818	unsigned int cpu;
1819	int rc;
1820
1821	/*
1822	 * Always reserve area for module percpu variables.  That's
1823	 * what the legacy allocator did.
1824	 */
1825	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1826				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
1827				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
1828	if (rc < 0)
1829		panic("Failed to initialize percpu areas.");
1830
1831	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1832	for_each_possible_cpu(cpu)
1833		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1834}
1835#endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */
1836
1837#else	/* CONFIG_SMP */
1838
1839/*
1840 * UP percpu area setup.
1841 *
1842 * UP always uses km-based percpu allocator with identity mapping.
1843 * Static percpu variables are indistinguishable from the usual static
1844 * variables and don't require any special preparation.
1845 */
1846void __init setup_per_cpu_areas(void)
1847{
1848	const size_t unit_size =
1849		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
1850					 PERCPU_DYNAMIC_RESERVE));
1851	struct pcpu_alloc_info *ai;
1852	void *fc;
1853
1854	ai = pcpu_alloc_alloc_info(1, 1);
1855	fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
 
 
1856	if (!ai || !fc)
1857		panic("Failed to allocate memory for percpu areas.");
 
 
1858
1859	ai->dyn_size = unit_size;
1860	ai->unit_size = unit_size;
1861	ai->atom_size = unit_size;
1862	ai->alloc_size = unit_size;
1863	ai->groups[0].nr_units = 1;
1864	ai->groups[0].cpu_map[0] = 0;
1865
1866	if (pcpu_setup_first_chunk(ai, fc) < 0)
1867		panic("Failed to initialize percpu areas.");
1868}
1869
1870#endif	/* CONFIG_SMP */
1871
1872/*
1873 * First and reserved chunks are initialized with temporary allocation
1874 * map in initdata so that they can be used before slab is online.
1875 * This function is called after slab is brought up and replaces those
1876 * with properly allocated maps.
1877 */
1878void __init percpu_init_late(void)
1879{
1880	struct pcpu_chunk *target_chunks[] =
1881		{ pcpu_first_chunk, pcpu_reserved_chunk, NULL };
1882	struct pcpu_chunk *chunk;
1883	unsigned long flags;
1884	int i;
1885
1886	for (i = 0; (chunk = target_chunks[i]); i++) {
1887		int *map;
1888		const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
1889
1890		BUILD_BUG_ON(size > PAGE_SIZE);
1891
1892		map = pcpu_mem_alloc(size);
1893		BUG_ON(!map);
1894
1895		spin_lock_irqsave(&pcpu_lock, flags);
1896		memcpy(map, chunk->map, size);
1897		chunk->map = map;
1898		spin_unlock_irqrestore(&pcpu_lock, flags);
1899	}
1900}
v3.15
   1/*
   2 * mm/percpu.c - percpu memory allocator
   3 *
   4 * Copyright (C) 2009		SUSE Linux Products GmbH
   5 * Copyright (C) 2009		Tejun Heo <tj@kernel.org>
   6 *
   7 * This file is released under the GPLv2.
   8 *
   9 * This is percpu allocator which can handle both static and dynamic
  10 * areas.  Percpu areas are allocated in chunks.  Each chunk is
  11 * consisted of boot-time determined number of units and the first
  12 * chunk is used for static percpu variables in the kernel image
  13 * (special boot time alloc/init handling necessary as these areas
  14 * need to be brought up before allocation services are running).
  15 * Unit grows as necessary and all units grow or shrink in unison.
  16 * When a chunk is filled up, another chunk is allocated.
  17 *
  18 *  c0                           c1                         c2
  19 *  -------------------          -------------------        ------------
  20 * | u0 | u1 | u2 | u3 |        | u0 | u1 | u2 | u3 |      | u0 | u1 | u
  21 *  -------------------  ......  -------------------  ....  ------------
  22 *
  23 * Allocation is done in offset-size areas of single unit space.  Ie,
  24 * an area of 512 bytes at 6k in c1 occupies 512 bytes at 6k of c1:u0,
  25 * c1:u1, c1:u2 and c1:u3.  On UMA, units corresponds directly to
  26 * cpus.  On NUMA, the mapping can be non-linear and even sparse.
  27 * Percpu access can be done by configuring percpu base registers
  28 * according to cpu to unit mapping and pcpu_unit_size.
  29 *
  30 * There are usually many small percpu allocations many of them being
  31 * as small as 4 bytes.  The allocator organizes chunks into lists
  32 * according to free size and tries to allocate from the fullest one.
  33 * Each chunk keeps the maximum contiguous area size hint which is
  34 * guaranteed to be equal to or larger than the maximum contiguous
  35 * area in the chunk.  This helps the allocator not to iterate the
  36 * chunk maps unnecessarily.
  37 *
  38 * Allocation state in each chunk is kept using an array of integers
  39 * on chunk->map.  A positive value in the map represents a free
  40 * region and negative allocated.  Allocation inside a chunk is done
  41 * by scanning this map sequentially and serving the first matching
  42 * entry.  This is mostly copied from the percpu_modalloc() allocator.
  43 * Chunks can be determined from the address using the index field
  44 * in the page struct. The index field contains a pointer to the chunk.
  45 *
  46 * To use this allocator, arch code should do the followings.
  47 *
  48 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
  49 *   regular address to percpu pointer and back if they need to be
  50 *   different from the default
  51 *
  52 * - use pcpu_setup_first_chunk() during percpu area initialization to
  53 *   setup the first chunk containing the kernel static percpu area
  54 */
  55
  56#include <linux/bitmap.h>
  57#include <linux/bootmem.h>
  58#include <linux/err.h>
  59#include <linux/list.h>
  60#include <linux/log2.h>
  61#include <linux/mm.h>
  62#include <linux/module.h>
  63#include <linux/mutex.h>
  64#include <linux/percpu.h>
  65#include <linux/pfn.h>
  66#include <linux/slab.h>
  67#include <linux/spinlock.h>
  68#include <linux/vmalloc.h>
  69#include <linux/workqueue.h>
  70#include <linux/kmemleak.h>
  71
  72#include <asm/cacheflush.h>
  73#include <asm/sections.h>
  74#include <asm/tlbflush.h>
  75#include <asm/io.h>
  76
  77#define PCPU_SLOT_BASE_SHIFT		5	/* 1-31 shares the same slot */
  78#define PCPU_DFL_MAP_ALLOC		16	/* start a map with 16 ents */
  79
  80#ifdef CONFIG_SMP
  81/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
  82#ifndef __addr_to_pcpu_ptr
  83#define __addr_to_pcpu_ptr(addr)					\
  84	(void __percpu *)((unsigned long)(addr) -			\
  85			  (unsigned long)pcpu_base_addr	+		\
  86			  (unsigned long)__per_cpu_start)
  87#endif
  88#ifndef __pcpu_ptr_to_addr
  89#define __pcpu_ptr_to_addr(ptr)						\
  90	(void __force *)((unsigned long)(ptr) +				\
  91			 (unsigned long)pcpu_base_addr -		\
  92			 (unsigned long)__per_cpu_start)
  93#endif
  94#else	/* CONFIG_SMP */
  95/* on UP, it's always identity mapped */
  96#define __addr_to_pcpu_ptr(addr)	(void __percpu *)(addr)
  97#define __pcpu_ptr_to_addr(ptr)		(void __force *)(ptr)
  98#endif	/* CONFIG_SMP */
  99
 100struct pcpu_chunk {
 101	struct list_head	list;		/* linked to pcpu_slot lists */
 102	int			free_size;	/* free bytes in the chunk */
 103	int			contig_hint;	/* max contiguous size hint */
 104	void			*base_addr;	/* base address of this chunk */
 105	int			map_used;	/* # of map entries used before the sentry */
 106	int			map_alloc;	/* # of map entries allocated */
 107	int			*map;		/* allocation map */
 108	void			*data;		/* chunk data */
 109	int			first_free;	/* no free below this */
 110	bool			immutable;	/* no [de]population allowed */
 111	unsigned long		populated[];	/* populated bitmap */
 112};
 113
 114static int pcpu_unit_pages __read_mostly;
 115static int pcpu_unit_size __read_mostly;
 116static int pcpu_nr_units __read_mostly;
 117static int pcpu_atom_size __read_mostly;
 118static int pcpu_nr_slots __read_mostly;
 119static size_t pcpu_chunk_struct_size __read_mostly;
 120
 121/* cpus with the lowest and highest unit addresses */
 122static unsigned int pcpu_low_unit_cpu __read_mostly;
 123static unsigned int pcpu_high_unit_cpu __read_mostly;
 124
 125/* the address of the first chunk which starts with the kernel static area */
 126void *pcpu_base_addr __read_mostly;
 127EXPORT_SYMBOL_GPL(pcpu_base_addr);
 128
 129static const int *pcpu_unit_map __read_mostly;		/* cpu -> unit */
 130const unsigned long *pcpu_unit_offsets __read_mostly;	/* cpu -> unit offset */
 131
 132/* group information, used for vm allocation */
 133static int pcpu_nr_groups __read_mostly;
 134static const unsigned long *pcpu_group_offsets __read_mostly;
 135static const size_t *pcpu_group_sizes __read_mostly;
 136
 137/*
 138 * The first chunk which always exists.  Note that unlike other
 139 * chunks, this one can be allocated and mapped in several different
 140 * ways and thus often doesn't live in the vmalloc area.
 141 */
 142static struct pcpu_chunk *pcpu_first_chunk;
 143
 144/*
 145 * Optional reserved chunk.  This chunk reserves part of the first
 146 * chunk and serves it for reserved allocations.  The amount of
 147 * reserved offset is in pcpu_reserved_chunk_limit.  When reserved
 148 * area doesn't exist, the following variables contain NULL and 0
 149 * respectively.
 150 */
 151static struct pcpu_chunk *pcpu_reserved_chunk;
 152static int pcpu_reserved_chunk_limit;
 153
 154/*
 155 * Synchronization rules.
 156 *
 157 * There are two locks - pcpu_alloc_mutex and pcpu_lock.  The former
 158 * protects allocation/reclaim paths, chunks, populated bitmap and
 159 * vmalloc mapping.  The latter is a spinlock and protects the index
 160 * data structures - chunk slots, chunks and area maps in chunks.
 161 *
 162 * During allocation, pcpu_alloc_mutex is kept locked all the time and
 163 * pcpu_lock is grabbed and released as necessary.  All actual memory
 164 * allocations are done using GFP_KERNEL with pcpu_lock released.  In
 165 * general, percpu memory can't be allocated with irq off but
 166 * irqsave/restore are still used in alloc path so that it can be used
 167 * from early init path - sched_init() specifically.
 168 *
 169 * Free path accesses and alters only the index data structures, so it
 170 * can be safely called from atomic context.  When memory needs to be
 171 * returned to the system, free path schedules reclaim_work which
 172 * grabs both pcpu_alloc_mutex and pcpu_lock, unlinks chunks to be
 173 * reclaimed, release both locks and frees the chunks.  Note that it's
 174 * necessary to grab both locks to remove a chunk from circulation as
 175 * allocation path might be referencing the chunk with only
 176 * pcpu_alloc_mutex locked.
 177 */
 178static DEFINE_MUTEX(pcpu_alloc_mutex);	/* protects whole alloc and reclaim */
 179static DEFINE_SPINLOCK(pcpu_lock);	/* protects index data structures */
 180
 181static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
 182
 183/* reclaim work to release fully free chunks, scheduled from free path */
 184static void pcpu_reclaim(struct work_struct *work);
 185static DECLARE_WORK(pcpu_reclaim_work, pcpu_reclaim);
 186
 187static bool pcpu_addr_in_first_chunk(void *addr)
 188{
 189	void *first_start = pcpu_first_chunk->base_addr;
 190
 191	return addr >= first_start && addr < first_start + pcpu_unit_size;
 192}
 193
 194static bool pcpu_addr_in_reserved_chunk(void *addr)
 195{
 196	void *first_start = pcpu_first_chunk->base_addr;
 197
 198	return addr >= first_start &&
 199		addr < first_start + pcpu_reserved_chunk_limit;
 200}
 201
 202static int __pcpu_size_to_slot(int size)
 203{
 204	int highbit = fls(size);	/* size is in bytes */
 205	return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
 206}
 207
 208static int pcpu_size_to_slot(int size)
 209{
 210	if (size == pcpu_unit_size)
 211		return pcpu_nr_slots - 1;
 212	return __pcpu_size_to_slot(size);
 213}
 214
 215static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
 216{
 217	if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int))
 218		return 0;
 219
 220	return pcpu_size_to_slot(chunk->free_size);
 221}
 222
 223/* set the pointer to a chunk in a page struct */
 224static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
 225{
 226	page->index = (unsigned long)pcpu;
 227}
 228
 229/* obtain pointer to a chunk from a page struct */
 230static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
 231{
 232	return (struct pcpu_chunk *)page->index;
 233}
 234
 235static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
 236{
 237	return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
 238}
 239
 240static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
 241				     unsigned int cpu, int page_idx)
 242{
 243	return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] +
 244		(page_idx << PAGE_SHIFT);
 245}
 246
 247static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk,
 248					   int *rs, int *re, int end)
 249{
 250	*rs = find_next_zero_bit(chunk->populated, end, *rs);
 251	*re = find_next_bit(chunk->populated, end, *rs + 1);
 252}
 253
 254static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
 255					 int *rs, int *re, int end)
 256{
 257	*rs = find_next_bit(chunk->populated, end, *rs);
 258	*re = find_next_zero_bit(chunk->populated, end, *rs + 1);
 259}
 260
 261/*
 262 * (Un)populated page region iterators.  Iterate over (un)populated
 263 * page regions between @start and @end in @chunk.  @rs and @re should
 264 * be integer variables and will be set to start and end page index of
 265 * the current region.
 266 */
 267#define pcpu_for_each_unpop_region(chunk, rs, re, start, end)		    \
 268	for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
 269	     (rs) < (re);						    \
 270	     (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
 271
 272#define pcpu_for_each_pop_region(chunk, rs, re, start, end)		    \
 273	for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end));   \
 274	     (rs) < (re);						    \
 275	     (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
 276
 277/**
 278 * pcpu_mem_zalloc - allocate memory
 279 * @size: bytes to allocate
 280 *
 281 * Allocate @size bytes.  If @size is smaller than PAGE_SIZE,
 282 * kzalloc() is used; otherwise, vzalloc() is used.  The returned
 283 * memory is always zeroed.
 284 *
 285 * CONTEXT:
 286 * Does GFP_KERNEL allocation.
 287 *
 288 * RETURNS:
 289 * Pointer to the allocated area on success, NULL on failure.
 290 */
 291static void *pcpu_mem_zalloc(size_t size)
 292{
 293	if (WARN_ON_ONCE(!slab_is_available()))
 294		return NULL;
 295
 296	if (size <= PAGE_SIZE)
 297		return kzalloc(size, GFP_KERNEL);
 298	else
 299		return vzalloc(size);
 300}
 301
 302/**
 303 * pcpu_mem_free - free memory
 304 * @ptr: memory to free
 305 * @size: size of the area
 306 *
 307 * Free @ptr.  @ptr should have been allocated using pcpu_mem_zalloc().
 308 */
 309static void pcpu_mem_free(void *ptr, size_t size)
 310{
 311	if (size <= PAGE_SIZE)
 312		kfree(ptr);
 313	else
 314		vfree(ptr);
 315}
 316
 317/**
 318 * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
 319 * @chunk: chunk of interest
 320 * @oslot: the previous slot it was on
 321 *
 322 * This function is called after an allocation or free changed @chunk.
 323 * New slot according to the changed state is determined and @chunk is
 324 * moved to the slot.  Note that the reserved chunk is never put on
 325 * chunk slots.
 326 *
 327 * CONTEXT:
 328 * pcpu_lock.
 329 */
 330static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
 331{
 332	int nslot = pcpu_chunk_slot(chunk);
 333
 334	if (chunk != pcpu_reserved_chunk && oslot != nslot) {
 335		if (oslot < nslot)
 336			list_move(&chunk->list, &pcpu_slot[nslot]);
 337		else
 338			list_move_tail(&chunk->list, &pcpu_slot[nslot]);
 339	}
 340}
 341
 342/**
 343 * pcpu_need_to_extend - determine whether chunk area map needs to be extended
 344 * @chunk: chunk of interest
 345 *
 346 * Determine whether area map of @chunk needs to be extended to
 347 * accommodate a new allocation.
 348 *
 349 * CONTEXT:
 350 * pcpu_lock.
 351 *
 352 * RETURNS:
 353 * New target map allocation length if extension is necessary, 0
 354 * otherwise.
 355 */
 356static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
 357{
 358	int new_alloc;
 359
 360	if (chunk->map_alloc >= chunk->map_used + 3)
 361		return 0;
 362
 363	new_alloc = PCPU_DFL_MAP_ALLOC;
 364	while (new_alloc < chunk->map_used + 3)
 365		new_alloc *= 2;
 366
 367	return new_alloc;
 368}
 369
 370/**
 371 * pcpu_extend_area_map - extend area map of a chunk
 372 * @chunk: chunk of interest
 373 * @new_alloc: new target allocation length of the area map
 374 *
 375 * Extend area map of @chunk to have @new_alloc entries.
 376 *
 377 * CONTEXT:
 378 * Does GFP_KERNEL allocation.  Grabs and releases pcpu_lock.
 379 *
 380 * RETURNS:
 381 * 0 on success, -errno on failure.
 382 */
 383static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
 384{
 385	int *old = NULL, *new = NULL;
 386	size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
 387	unsigned long flags;
 388
 389	new = pcpu_mem_zalloc(new_size);
 390	if (!new)
 391		return -ENOMEM;
 392
 393	/* acquire pcpu_lock and switch to new area map */
 394	spin_lock_irqsave(&pcpu_lock, flags);
 395
 396	if (new_alloc <= chunk->map_alloc)
 397		goto out_unlock;
 398
 399	old_size = chunk->map_alloc * sizeof(chunk->map[0]);
 400	old = chunk->map;
 401
 402	memcpy(new, old, old_size);
 403
 404	chunk->map_alloc = new_alloc;
 405	chunk->map = new;
 406	new = NULL;
 407
 408out_unlock:
 409	spin_unlock_irqrestore(&pcpu_lock, flags);
 410
 411	/*
 412	 * pcpu_mem_free() might end up calling vfree() which uses
 413	 * IRQ-unsafe lock and thus can't be called under pcpu_lock.
 414	 */
 415	pcpu_mem_free(old, old_size);
 416	pcpu_mem_free(new, new_size);
 417
 418	return 0;
 419}
 420
 421/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 422 * pcpu_alloc_area - allocate area from a pcpu_chunk
 423 * @chunk: chunk of interest
 424 * @size: wanted size in bytes
 425 * @align: wanted align
 426 *
 427 * Try to allocate @size bytes area aligned at @align from @chunk.
 428 * Note that this function only allocates the offset.  It doesn't
 429 * populate or map the area.
 430 *
 431 * @chunk->map must have at least two free slots.
 432 *
 433 * CONTEXT:
 434 * pcpu_lock.
 435 *
 436 * RETURNS:
 437 * Allocated offset in @chunk on success, -1 if no matching area is
 438 * found.
 439 */
 440static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align)
 441{
 442	int oslot = pcpu_chunk_slot(chunk);
 443	int max_contig = 0;
 444	int i, off;
 445	bool seen_free = false;
 446	int *p;
 447
 448	for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) {
 
 449		int head, tail;
 450		int this_size;
 451
 452		off = *p;
 453		if (off & 1)
 454			continue;
 455
 456		/* extra for alignment requirement */
 457		head = ALIGN(off, align) - off;
 
 458
 459		this_size = (p[1] & ~1) - off;
 460		if (this_size < head + size) {
 461			if (!seen_free) {
 462				chunk->first_free = i;
 463				seen_free = true;
 464			}
 465			max_contig = max(this_size, max_contig);
 466			continue;
 467		}
 468
 469		/*
 470		 * If head is small or the previous block is free,
 471		 * merge'em.  Note that 'small' is defined as smaller
 472		 * than sizeof(int), which is very small but isn't too
 473		 * uncommon for percpu allocations.
 474		 */
 475		if (head && (head < sizeof(int) || !(p[-1] & 1))) {
 476			*p = off += head;
 477			if (p[-1] & 1)
 
 
 478				chunk->free_size -= head;
 479			else
 480				max_contig = max(*p - p[-1], max_contig);
 481			this_size -= head;
 482			head = 0;
 483		}
 484
 485		/* if tail is small, just keep it around */
 486		tail = this_size - head - size;
 487		if (tail < sizeof(int)) {
 488			tail = 0;
 489			size = this_size - head;
 490		}
 491
 492		/* split if warranted */
 493		if (head || tail) {
 494			int nr_extra = !!head + !!tail;
 495
 496			/* insert new subblocks */
 497			memmove(p + nr_extra + 1, p + 1,
 498				sizeof(chunk->map[0]) * (chunk->map_used - i));
 499			chunk->map_used += nr_extra;
 500
 501			if (head) {
 502				if (!seen_free) {
 503					chunk->first_free = i;
 504					seen_free = true;
 505				}
 506				*++p = off += head;
 507				++i;
 508				max_contig = max(head, max_contig);
 509			}
 510			if (tail) {
 511				p[1] = off + size;
 512				max_contig = max(tail, max_contig);
 513			}
 
 
 514		}
 515
 516		if (!seen_free)
 517			chunk->first_free = i + 1;
 518
 519		/* update hint and mark allocated */
 520		if (i + 1 == chunk->map_used)
 521			chunk->contig_hint = max_contig; /* fully scanned */
 522		else
 523			chunk->contig_hint = max(chunk->contig_hint,
 524						 max_contig);
 525
 526		chunk->free_size -= size;
 527		*p |= 1;
 528
 529		pcpu_chunk_relocate(chunk, oslot);
 530		return off;
 531	}
 532
 533	chunk->contig_hint = max_contig;	/* fully scanned */
 534	pcpu_chunk_relocate(chunk, oslot);
 535
 536	/* tell the upper layer that this chunk has no matching area */
 537	return -1;
 538}
 539
 540/**
 541 * pcpu_free_area - free area to a pcpu_chunk
 542 * @chunk: chunk of interest
 543 * @freeme: offset of area to free
 544 *
 545 * Free area starting from @freeme to @chunk.  Note that this function
 546 * only modifies the allocation map.  It doesn't depopulate or unmap
 547 * the area.
 548 *
 549 * CONTEXT:
 550 * pcpu_lock.
 551 */
 552static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme)
 553{
 554	int oslot = pcpu_chunk_slot(chunk);
 555	int off = 0;
 556	unsigned i, j;
 557	int to_free = 0;
 558	int *p;
 559
 560	freeme |= 1;	/* we are searching for <given offset, in use> pair */
 561
 562	i = 0;
 563	j = chunk->map_used;
 564	while (i != j) {
 565		unsigned k = (i + j) / 2;
 566		off = chunk->map[k];
 567		if (off < freeme)
 568			i = k + 1;
 569		else if (off > freeme)
 570			j = k;
 571		else
 572			i = j = k;
 573	}
 574	BUG_ON(off != freeme);
 
 575
 576	if (i < chunk->first_free)
 577		chunk->first_free = i;
 578
 579	p = chunk->map + i;
 580	*p = off &= ~1;
 581	chunk->free_size += (p[1] & ~1) - off;
 582
 583	/* merge with next? */
 584	if (!(p[1] & 1))
 585		to_free++;
 586	/* merge with previous? */
 587	if (i > 0 && !(p[-1] & 1)) {
 588		to_free++;
 
 
 
 589		i--;
 590		p--;
 591	}
 592	if (to_free) {
 593		chunk->map_used -= to_free;
 594		memmove(p + 1, p + 1 + to_free,
 595			(chunk->map_used - i) * sizeof(chunk->map[0]));
 
 
 596	}
 597
 598	chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint);
 599	pcpu_chunk_relocate(chunk, oslot);
 600}
 601
 602static struct pcpu_chunk *pcpu_alloc_chunk(void)
 603{
 604	struct pcpu_chunk *chunk;
 605
 606	chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size);
 607	if (!chunk)
 608		return NULL;
 609
 610	chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC *
 611						sizeof(chunk->map[0]));
 612	if (!chunk->map) {
 613		pcpu_mem_free(chunk, pcpu_chunk_struct_size);
 614		return NULL;
 615	}
 616
 617	chunk->map_alloc = PCPU_DFL_MAP_ALLOC;
 618	chunk->map[0] = 0;
 619	chunk->map[1] = pcpu_unit_size | 1;
 620	chunk->map_used = 1;
 621
 622	INIT_LIST_HEAD(&chunk->list);
 623	chunk->free_size = pcpu_unit_size;
 624	chunk->contig_hint = pcpu_unit_size;
 625
 626	return chunk;
 627}
 628
 629static void pcpu_free_chunk(struct pcpu_chunk *chunk)
 630{
 631	if (!chunk)
 632		return;
 633	pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0]));
 634	pcpu_mem_free(chunk, pcpu_chunk_struct_size);
 635}
 636
 637/*
 638 * Chunk management implementation.
 639 *
 640 * To allow different implementations, chunk alloc/free and
 641 * [de]population are implemented in a separate file which is pulled
 642 * into this file and compiled together.  The following functions
 643 * should be implemented.
 644 *
 645 * pcpu_populate_chunk		- populate the specified range of a chunk
 646 * pcpu_depopulate_chunk	- depopulate the specified range of a chunk
 647 * pcpu_create_chunk		- create a new chunk
 648 * pcpu_destroy_chunk		- destroy a chunk, always preceded by full depop
 649 * pcpu_addr_to_page		- translate address to physical address
 650 * pcpu_verify_alloc_info	- check alloc_info is acceptable during init
 651 */
 652static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
 653static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
 654static struct pcpu_chunk *pcpu_create_chunk(void);
 655static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
 656static struct page *pcpu_addr_to_page(void *addr);
 657static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
 658
 659#ifdef CONFIG_NEED_PER_CPU_KM
 660#include "percpu-km.c"
 661#else
 662#include "percpu-vm.c"
 663#endif
 664
 665/**
 666 * pcpu_chunk_addr_search - determine chunk containing specified address
 667 * @addr: address for which the chunk needs to be determined.
 668 *
 669 * RETURNS:
 670 * The address of the found chunk.
 671 */
 672static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
 673{
 674	/* is it in the first chunk? */
 675	if (pcpu_addr_in_first_chunk(addr)) {
 676		/* is it in the reserved area? */
 677		if (pcpu_addr_in_reserved_chunk(addr))
 678			return pcpu_reserved_chunk;
 679		return pcpu_first_chunk;
 680	}
 681
 682	/*
 683	 * The address is relative to unit0 which might be unused and
 684	 * thus unmapped.  Offset the address to the unit space of the
 685	 * current processor before looking it up in the vmalloc
 686	 * space.  Note that any possible cpu id can be used here, so
 687	 * there's no need to worry about preemption or cpu hotplug.
 688	 */
 689	addr += pcpu_unit_offsets[raw_smp_processor_id()];
 690	return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
 691}
 692
 693/**
 694 * pcpu_alloc - the percpu allocator
 695 * @size: size of area to allocate in bytes
 696 * @align: alignment of area (max PAGE_SIZE)
 697 * @reserved: allocate from the reserved chunk if available
 698 *
 699 * Allocate percpu area of @size bytes aligned at @align.
 700 *
 701 * CONTEXT:
 702 * Does GFP_KERNEL allocation.
 703 *
 704 * RETURNS:
 705 * Percpu pointer to the allocated area on success, NULL on failure.
 706 */
 707static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
 708{
 709	static int warn_limit = 10;
 710	struct pcpu_chunk *chunk;
 711	const char *err;
 712	int slot, off, new_alloc;
 713	unsigned long flags;
 714	void __percpu *ptr;
 715
 716	/*
 717	 * We want the lowest bit of offset available for in-use/free
 718	 * indicator, so force >= 16bit alignment and make size even.
 719	 */
 720	if (unlikely(align < 2))
 721		align = 2;
 722
 723	if (unlikely(size & 1))
 724		size++;
 725
 726	if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
 727		WARN(true, "illegal size (%zu) or align (%zu) for "
 728		     "percpu allocation\n", size, align);
 729		return NULL;
 730	}
 731
 732	mutex_lock(&pcpu_alloc_mutex);
 733	spin_lock_irqsave(&pcpu_lock, flags);
 734
 735	/* serve reserved allocations from the reserved chunk if available */
 736	if (reserved && pcpu_reserved_chunk) {
 737		chunk = pcpu_reserved_chunk;
 738
 739		if (size > chunk->contig_hint) {
 740			err = "alloc from reserved chunk failed";
 741			goto fail_unlock;
 742		}
 743
 744		while ((new_alloc = pcpu_need_to_extend(chunk))) {
 745			spin_unlock_irqrestore(&pcpu_lock, flags);
 746			if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
 747				err = "failed to extend area map of reserved chunk";
 748				goto fail_unlock_mutex;
 749			}
 750			spin_lock_irqsave(&pcpu_lock, flags);
 751		}
 752
 753		off = pcpu_alloc_area(chunk, size, align);
 754		if (off >= 0)
 755			goto area_found;
 756
 757		err = "alloc from reserved chunk failed";
 758		goto fail_unlock;
 759	}
 760
 761restart:
 762	/* search through normal chunks */
 763	for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
 764		list_for_each_entry(chunk, &pcpu_slot[slot], list) {
 765			if (size > chunk->contig_hint)
 766				continue;
 767
 768			new_alloc = pcpu_need_to_extend(chunk);
 769			if (new_alloc) {
 770				spin_unlock_irqrestore(&pcpu_lock, flags);
 771				if (pcpu_extend_area_map(chunk,
 772							 new_alloc) < 0) {
 773					err = "failed to extend area map";
 774					goto fail_unlock_mutex;
 775				}
 776				spin_lock_irqsave(&pcpu_lock, flags);
 777				/*
 778				 * pcpu_lock has been dropped, need to
 779				 * restart cpu_slot list walking.
 780				 */
 781				goto restart;
 782			}
 783
 784			off = pcpu_alloc_area(chunk, size, align);
 785			if (off >= 0)
 786				goto area_found;
 787		}
 788	}
 789
 790	/* hmmm... no space left, create a new chunk */
 791	spin_unlock_irqrestore(&pcpu_lock, flags);
 792
 793	chunk = pcpu_create_chunk();
 794	if (!chunk) {
 795		err = "failed to allocate new chunk";
 796		goto fail_unlock_mutex;
 797	}
 798
 799	spin_lock_irqsave(&pcpu_lock, flags);
 800	pcpu_chunk_relocate(chunk, -1);
 801	goto restart;
 802
 803area_found:
 804	spin_unlock_irqrestore(&pcpu_lock, flags);
 805
 806	/* populate, map and clear the area */
 807	if (pcpu_populate_chunk(chunk, off, size)) {
 808		spin_lock_irqsave(&pcpu_lock, flags);
 809		pcpu_free_area(chunk, off);
 810		err = "failed to populate";
 811		goto fail_unlock;
 812	}
 813
 814	mutex_unlock(&pcpu_alloc_mutex);
 815
 816	/* return address relative to base address */
 817	ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
 818	kmemleak_alloc_percpu(ptr, size);
 819	return ptr;
 820
 821fail_unlock:
 822	spin_unlock_irqrestore(&pcpu_lock, flags);
 823fail_unlock_mutex:
 824	mutex_unlock(&pcpu_alloc_mutex);
 825	if (warn_limit) {
 826		pr_warning("PERCPU: allocation failed, size=%zu align=%zu, "
 827			   "%s\n", size, align, err);
 828		dump_stack();
 829		if (!--warn_limit)
 830			pr_info("PERCPU: limit reached, disable warning\n");
 831	}
 832	return NULL;
 833}
 834
 835/**
 836 * __alloc_percpu - allocate dynamic percpu area
 837 * @size: size of area to allocate in bytes
 838 * @align: alignment of area (max PAGE_SIZE)
 839 *
 840 * Allocate zero-filled percpu area of @size bytes aligned at @align.
 841 * Might sleep.  Might trigger writeouts.
 842 *
 843 * CONTEXT:
 844 * Does GFP_KERNEL allocation.
 845 *
 846 * RETURNS:
 847 * Percpu pointer to the allocated area on success, NULL on failure.
 848 */
 849void __percpu *__alloc_percpu(size_t size, size_t align)
 850{
 851	return pcpu_alloc(size, align, false);
 852}
 853EXPORT_SYMBOL_GPL(__alloc_percpu);
 854
 855/**
 856 * __alloc_reserved_percpu - allocate reserved percpu area
 857 * @size: size of area to allocate in bytes
 858 * @align: alignment of area (max PAGE_SIZE)
 859 *
 860 * Allocate zero-filled percpu area of @size bytes aligned at @align
 861 * from reserved percpu area if arch has set it up; otherwise,
 862 * allocation is served from the same dynamic area.  Might sleep.
 863 * Might trigger writeouts.
 864 *
 865 * CONTEXT:
 866 * Does GFP_KERNEL allocation.
 867 *
 868 * RETURNS:
 869 * Percpu pointer to the allocated area on success, NULL on failure.
 870 */
 871void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
 872{
 873	return pcpu_alloc(size, align, true);
 874}
 875
 876/**
 877 * pcpu_reclaim - reclaim fully free chunks, workqueue function
 878 * @work: unused
 879 *
 880 * Reclaim all fully free chunks except for the first one.
 881 *
 882 * CONTEXT:
 883 * workqueue context.
 884 */
 885static void pcpu_reclaim(struct work_struct *work)
 886{
 887	LIST_HEAD(todo);
 888	struct list_head *head = &pcpu_slot[pcpu_nr_slots - 1];
 889	struct pcpu_chunk *chunk, *next;
 890
 891	mutex_lock(&pcpu_alloc_mutex);
 892	spin_lock_irq(&pcpu_lock);
 893
 894	list_for_each_entry_safe(chunk, next, head, list) {
 895		WARN_ON(chunk->immutable);
 896
 897		/* spare the first one */
 898		if (chunk == list_first_entry(head, struct pcpu_chunk, list))
 899			continue;
 900
 901		list_move(&chunk->list, &todo);
 902	}
 903
 904	spin_unlock_irq(&pcpu_lock);
 905
 906	list_for_each_entry_safe(chunk, next, &todo, list) {
 907		pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
 908		pcpu_destroy_chunk(chunk);
 909	}
 910
 911	mutex_unlock(&pcpu_alloc_mutex);
 912}
 913
 914/**
 915 * free_percpu - free percpu area
 916 * @ptr: pointer to area to free
 917 *
 918 * Free percpu area @ptr.
 919 *
 920 * CONTEXT:
 921 * Can be called from atomic context.
 922 */
 923void free_percpu(void __percpu *ptr)
 924{
 925	void *addr;
 926	struct pcpu_chunk *chunk;
 927	unsigned long flags;
 928	int off;
 929
 930	if (!ptr)
 931		return;
 932
 933	kmemleak_free_percpu(ptr);
 934
 935	addr = __pcpu_ptr_to_addr(ptr);
 936
 937	spin_lock_irqsave(&pcpu_lock, flags);
 938
 939	chunk = pcpu_chunk_addr_search(addr);
 940	off = addr - chunk->base_addr;
 941
 942	pcpu_free_area(chunk, off);
 943
 944	/* if there are more than one fully free chunks, wake up grim reaper */
 945	if (chunk->free_size == pcpu_unit_size) {
 946		struct pcpu_chunk *pos;
 947
 948		list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
 949			if (pos != chunk) {
 950				schedule_work(&pcpu_reclaim_work);
 951				break;
 952			}
 953	}
 954
 955	spin_unlock_irqrestore(&pcpu_lock, flags);
 956}
 957EXPORT_SYMBOL_GPL(free_percpu);
 958
 959/**
 960 * is_kernel_percpu_address - test whether address is from static percpu area
 961 * @addr: address to test
 962 *
 963 * Test whether @addr belongs to in-kernel static percpu area.  Module
 964 * static percpu areas are not considered.  For those, use
 965 * is_module_percpu_address().
 966 *
 967 * RETURNS:
 968 * %true if @addr is from in-kernel static percpu area, %false otherwise.
 969 */
 970bool is_kernel_percpu_address(unsigned long addr)
 971{
 972#ifdef CONFIG_SMP
 973	const size_t static_size = __per_cpu_end - __per_cpu_start;
 974	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
 975	unsigned int cpu;
 976
 977	for_each_possible_cpu(cpu) {
 978		void *start = per_cpu_ptr(base, cpu);
 979
 980		if ((void *)addr >= start && (void *)addr < start + static_size)
 981			return true;
 982        }
 983#endif
 984	/* on UP, can't distinguish from other static vars, always false */
 985	return false;
 986}
 987
 988/**
 989 * per_cpu_ptr_to_phys - convert translated percpu address to physical address
 990 * @addr: the address to be converted to physical address
 991 *
 992 * Given @addr which is dereferenceable address obtained via one of
 993 * percpu access macros, this function translates it into its physical
 994 * address.  The caller is responsible for ensuring @addr stays valid
 995 * until this function finishes.
 996 *
 997 * percpu allocator has special setup for the first chunk, which currently
 998 * supports either embedding in linear address space or vmalloc mapping,
 999 * and, from the second one, the backing allocator (currently either vm or
1000 * km) provides translation.
1001 *
1002 * The addr can be tranlated simply without checking if it falls into the
1003 * first chunk. But the current code reflects better how percpu allocator
1004 * actually works, and the verification can discover both bugs in percpu
1005 * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
1006 * code.
1007 *
1008 * RETURNS:
1009 * The physical address for @addr.
1010 */
1011phys_addr_t per_cpu_ptr_to_phys(void *addr)
1012{
1013	void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
1014	bool in_first_chunk = false;
1015	unsigned long first_low, first_high;
1016	unsigned int cpu;
1017
1018	/*
1019	 * The following test on unit_low/high isn't strictly
1020	 * necessary but will speed up lookups of addresses which
1021	 * aren't in the first chunk.
1022	 */
1023	first_low = pcpu_chunk_addr(pcpu_first_chunk, pcpu_low_unit_cpu, 0);
1024	first_high = pcpu_chunk_addr(pcpu_first_chunk, pcpu_high_unit_cpu,
1025				     pcpu_unit_pages);
1026	if ((unsigned long)addr >= first_low &&
1027	    (unsigned long)addr < first_high) {
1028		for_each_possible_cpu(cpu) {
1029			void *start = per_cpu_ptr(base, cpu);
1030
1031			if (addr >= start && addr < start + pcpu_unit_size) {
1032				in_first_chunk = true;
1033				break;
1034			}
1035		}
1036	}
1037
1038	if (in_first_chunk) {
1039		if (!is_vmalloc_addr(addr))
1040			return __pa(addr);
1041		else
1042			return page_to_phys(vmalloc_to_page(addr)) +
1043			       offset_in_page(addr);
1044	} else
1045		return page_to_phys(pcpu_addr_to_page(addr)) +
1046		       offset_in_page(addr);
1047}
1048
1049/**
1050 * pcpu_alloc_alloc_info - allocate percpu allocation info
1051 * @nr_groups: the number of groups
1052 * @nr_units: the number of units
1053 *
1054 * Allocate ai which is large enough for @nr_groups groups containing
1055 * @nr_units units.  The returned ai's groups[0].cpu_map points to the
1056 * cpu_map array which is long enough for @nr_units and filled with
1057 * NR_CPUS.  It's the caller's responsibility to initialize cpu_map
1058 * pointer of other groups.
1059 *
1060 * RETURNS:
1061 * Pointer to the allocated pcpu_alloc_info on success, NULL on
1062 * failure.
1063 */
1064struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
1065						      int nr_units)
1066{
1067	struct pcpu_alloc_info *ai;
1068	size_t base_size, ai_size;
1069	void *ptr;
1070	int unit;
1071
1072	base_size = ALIGN(sizeof(*ai) + nr_groups * sizeof(ai->groups[0]),
1073			  __alignof__(ai->groups[0].cpu_map[0]));
1074	ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
1075
1076	ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
1077	if (!ptr)
1078		return NULL;
1079	ai = ptr;
1080	ptr += base_size;
1081
1082	ai->groups[0].cpu_map = ptr;
1083
1084	for (unit = 0; unit < nr_units; unit++)
1085		ai->groups[0].cpu_map[unit] = NR_CPUS;
1086
1087	ai->nr_groups = nr_groups;
1088	ai->__ai_size = PFN_ALIGN(ai_size);
1089
1090	return ai;
1091}
1092
1093/**
1094 * pcpu_free_alloc_info - free percpu allocation info
1095 * @ai: pcpu_alloc_info to free
1096 *
1097 * Free @ai which was allocated by pcpu_alloc_alloc_info().
1098 */
1099void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
1100{
1101	memblock_free_early(__pa(ai), ai->__ai_size);
1102}
1103
1104/**
1105 * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
1106 * @lvl: loglevel
1107 * @ai: allocation info to dump
1108 *
1109 * Print out information about @ai using loglevel @lvl.
1110 */
1111static void pcpu_dump_alloc_info(const char *lvl,
1112				 const struct pcpu_alloc_info *ai)
1113{
1114	int group_width = 1, cpu_width = 1, width;
1115	char empty_str[] = "--------";
1116	int alloc = 0, alloc_end = 0;
1117	int group, v;
1118	int upa, apl;	/* units per alloc, allocs per line */
1119
1120	v = ai->nr_groups;
1121	while (v /= 10)
1122		group_width++;
1123
1124	v = num_possible_cpus();
1125	while (v /= 10)
1126		cpu_width++;
1127	empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
1128
1129	upa = ai->alloc_size / ai->unit_size;
1130	width = upa * (cpu_width + 1) + group_width + 3;
1131	apl = rounddown_pow_of_two(max(60 / width, 1));
1132
1133	printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
1134	       lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
1135	       ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
1136
1137	for (group = 0; group < ai->nr_groups; group++) {
1138		const struct pcpu_group_info *gi = &ai->groups[group];
1139		int unit = 0, unit_end = 0;
1140
1141		BUG_ON(gi->nr_units % upa);
1142		for (alloc_end += gi->nr_units / upa;
1143		     alloc < alloc_end; alloc++) {
1144			if (!(alloc % apl)) {
1145				printk(KERN_CONT "\n");
1146				printk("%spcpu-alloc: ", lvl);
1147			}
1148			printk(KERN_CONT "[%0*d] ", group_width, group);
1149
1150			for (unit_end += upa; unit < unit_end; unit++)
1151				if (gi->cpu_map[unit] != NR_CPUS)
1152					printk(KERN_CONT "%0*d ", cpu_width,
1153					       gi->cpu_map[unit]);
1154				else
1155					printk(KERN_CONT "%s ", empty_str);
1156		}
1157	}
1158	printk(KERN_CONT "\n");
1159}
1160
1161/**
1162 * pcpu_setup_first_chunk - initialize the first percpu chunk
1163 * @ai: pcpu_alloc_info describing how to percpu area is shaped
1164 * @base_addr: mapped address
1165 *
1166 * Initialize the first percpu chunk which contains the kernel static
1167 * perpcu area.  This function is to be called from arch percpu area
1168 * setup path.
1169 *
1170 * @ai contains all information necessary to initialize the first
1171 * chunk and prime the dynamic percpu allocator.
1172 *
1173 * @ai->static_size is the size of static percpu area.
1174 *
1175 * @ai->reserved_size, if non-zero, specifies the amount of bytes to
1176 * reserve after the static area in the first chunk.  This reserves
1177 * the first chunk such that it's available only through reserved
1178 * percpu allocation.  This is primarily used to serve module percpu
1179 * static areas on architectures where the addressing model has
1180 * limited offset range for symbol relocations to guarantee module
1181 * percpu symbols fall inside the relocatable range.
1182 *
1183 * @ai->dyn_size determines the number of bytes available for dynamic
1184 * allocation in the first chunk.  The area between @ai->static_size +
1185 * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
1186 *
1187 * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
1188 * and equal to or larger than @ai->static_size + @ai->reserved_size +
1189 * @ai->dyn_size.
1190 *
1191 * @ai->atom_size is the allocation atom size and used as alignment
1192 * for vm areas.
1193 *
1194 * @ai->alloc_size is the allocation size and always multiple of
1195 * @ai->atom_size.  This is larger than @ai->atom_size if
1196 * @ai->unit_size is larger than @ai->atom_size.
1197 *
1198 * @ai->nr_groups and @ai->groups describe virtual memory layout of
1199 * percpu areas.  Units which should be colocated are put into the
1200 * same group.  Dynamic VM areas will be allocated according to these
1201 * groupings.  If @ai->nr_groups is zero, a single group containing
1202 * all units is assumed.
1203 *
1204 * The caller should have mapped the first chunk at @base_addr and
1205 * copied static data to each unit.
1206 *
1207 * If the first chunk ends up with both reserved and dynamic areas, it
1208 * is served by two chunks - one to serve the core static and reserved
1209 * areas and the other for the dynamic area.  They share the same vm
1210 * and page map but uses different area allocation map to stay away
1211 * from each other.  The latter chunk is circulated in the chunk slots
1212 * and available for dynamic allocation like any other chunks.
1213 *
1214 * RETURNS:
1215 * 0 on success, -errno on failure.
1216 */
1217int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1218				  void *base_addr)
1219{
1220	static char cpus_buf[4096] __initdata;
1221	static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1222	static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
1223	size_t dyn_size = ai->dyn_size;
1224	size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
1225	struct pcpu_chunk *schunk, *dchunk = NULL;
1226	unsigned long *group_offsets;
1227	size_t *group_sizes;
1228	unsigned long *unit_off;
1229	unsigned int cpu;
1230	int *unit_map;
1231	int group, unit, i;
1232
1233	cpumask_scnprintf(cpus_buf, sizeof(cpus_buf), cpu_possible_mask);
1234
1235#define PCPU_SETUP_BUG_ON(cond)	do {					\
1236	if (unlikely(cond)) {						\
1237		pr_emerg("PERCPU: failed to initialize, %s", #cond);	\
1238		pr_emerg("PERCPU: cpu_possible_mask=%s\n", cpus_buf);	\
1239		pcpu_dump_alloc_info(KERN_EMERG, ai);			\
1240		BUG();							\
1241	}								\
1242} while (0)
1243
1244	/* sanity checks */
1245	PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
1246#ifdef CONFIG_SMP
1247	PCPU_SETUP_BUG_ON(!ai->static_size);
1248	PCPU_SETUP_BUG_ON((unsigned long)__per_cpu_start & ~PAGE_MASK);
1249#endif
1250	PCPU_SETUP_BUG_ON(!base_addr);
1251	PCPU_SETUP_BUG_ON((unsigned long)base_addr & ~PAGE_MASK);
1252	PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
1253	PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
1254	PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
1255	PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
1256	PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
1257
1258	/* process group information and build config tables accordingly */
1259	group_offsets = memblock_virt_alloc(ai->nr_groups *
1260					     sizeof(group_offsets[0]), 0);
1261	group_sizes = memblock_virt_alloc(ai->nr_groups *
1262					   sizeof(group_sizes[0]), 0);
1263	unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
1264	unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
1265
1266	for (cpu = 0; cpu < nr_cpu_ids; cpu++)
1267		unit_map[cpu] = UINT_MAX;
1268
1269	pcpu_low_unit_cpu = NR_CPUS;
1270	pcpu_high_unit_cpu = NR_CPUS;
1271
1272	for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
1273		const struct pcpu_group_info *gi = &ai->groups[group];
1274
1275		group_offsets[group] = gi->base_offset;
1276		group_sizes[group] = gi->nr_units * ai->unit_size;
1277
1278		for (i = 0; i < gi->nr_units; i++) {
1279			cpu = gi->cpu_map[i];
1280			if (cpu == NR_CPUS)
1281				continue;
1282
1283			PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids);
1284			PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1285			PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1286
1287			unit_map[cpu] = unit + i;
1288			unit_off[cpu] = gi->base_offset + i * ai->unit_size;
1289
1290			/* determine low/high unit_cpu */
1291			if (pcpu_low_unit_cpu == NR_CPUS ||
1292			    unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
1293				pcpu_low_unit_cpu = cpu;
1294			if (pcpu_high_unit_cpu == NR_CPUS ||
1295			    unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
1296				pcpu_high_unit_cpu = cpu;
1297		}
1298	}
1299	pcpu_nr_units = unit;
1300
1301	for_each_possible_cpu(cpu)
1302		PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
1303
1304	/* we're done parsing the input, undefine BUG macro and dump config */
1305#undef PCPU_SETUP_BUG_ON
1306	pcpu_dump_alloc_info(KERN_DEBUG, ai);
1307
1308	pcpu_nr_groups = ai->nr_groups;
1309	pcpu_group_offsets = group_offsets;
1310	pcpu_group_sizes = group_sizes;
1311	pcpu_unit_map = unit_map;
1312	pcpu_unit_offsets = unit_off;
1313
1314	/* determine basic parameters */
1315	pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
1316	pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
1317	pcpu_atom_size = ai->atom_size;
1318	pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
1319		BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
1320
1321	/*
1322	 * Allocate chunk slots.  The additional last slot is for
1323	 * empty chunks.
1324	 */
1325	pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
1326	pcpu_slot = memblock_virt_alloc(
1327			pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
1328	for (i = 0; i < pcpu_nr_slots; i++)
1329		INIT_LIST_HEAD(&pcpu_slot[i]);
1330
1331	/*
1332	 * Initialize static chunk.  If reserved_size is zero, the
1333	 * static chunk covers static area + dynamic allocation area
1334	 * in the first chunk.  If reserved_size is not zero, it
1335	 * covers static area + reserved area (mostly used for module
1336	 * static percpu allocation).
1337	 */
1338	schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1339	INIT_LIST_HEAD(&schunk->list);
1340	schunk->base_addr = base_addr;
1341	schunk->map = smap;
1342	schunk->map_alloc = ARRAY_SIZE(smap);
1343	schunk->immutable = true;
1344	bitmap_fill(schunk->populated, pcpu_unit_pages);
1345
1346	if (ai->reserved_size) {
1347		schunk->free_size = ai->reserved_size;
1348		pcpu_reserved_chunk = schunk;
1349		pcpu_reserved_chunk_limit = ai->static_size + ai->reserved_size;
1350	} else {
1351		schunk->free_size = dyn_size;
1352		dyn_size = 0;			/* dynamic area covered */
1353	}
1354	schunk->contig_hint = schunk->free_size;
1355
1356	schunk->map[0] = 1;
1357	schunk->map[1] = ai->static_size;
1358	schunk->map_used = 1;
1359	if (schunk->free_size)
1360		schunk->map[++schunk->map_used] = 1 | (ai->static_size + schunk->free_size);
1361	else
1362		schunk->map[1] |= 1;
1363
1364	/* init dynamic chunk if necessary */
1365	if (dyn_size) {
1366		dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
1367		INIT_LIST_HEAD(&dchunk->list);
1368		dchunk->base_addr = base_addr;
1369		dchunk->map = dmap;
1370		dchunk->map_alloc = ARRAY_SIZE(dmap);
1371		dchunk->immutable = true;
1372		bitmap_fill(dchunk->populated, pcpu_unit_pages);
1373
1374		dchunk->contig_hint = dchunk->free_size = dyn_size;
1375		dchunk->map[0] = 1;
1376		dchunk->map[1] = pcpu_reserved_chunk_limit;
1377		dchunk->map[2] = (pcpu_reserved_chunk_limit + dchunk->free_size) | 1;
1378		dchunk->map_used = 2;
1379	}
1380
1381	/* link the first chunk in */
1382	pcpu_first_chunk = dchunk ?: schunk;
1383	pcpu_chunk_relocate(pcpu_first_chunk, -1);
1384
1385	/* we're done */
1386	pcpu_base_addr = base_addr;
1387	return 0;
1388}
1389
1390#ifdef CONFIG_SMP
1391
1392const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
1393	[PCPU_FC_AUTO]	= "auto",
1394	[PCPU_FC_EMBED]	= "embed",
1395	[PCPU_FC_PAGE]	= "page",
1396};
1397
1398enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
1399
1400static int __init percpu_alloc_setup(char *str)
1401{
1402	if (!str)
1403		return -EINVAL;
1404
1405	if (0)
1406		/* nada */;
1407#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
1408	else if (!strcmp(str, "embed"))
1409		pcpu_chosen_fc = PCPU_FC_EMBED;
1410#endif
1411#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1412	else if (!strcmp(str, "page"))
1413		pcpu_chosen_fc = PCPU_FC_PAGE;
1414#endif
1415	else
1416		pr_warning("PERCPU: unknown allocator %s specified\n", str);
1417
1418	return 0;
1419}
1420early_param("percpu_alloc", percpu_alloc_setup);
1421
1422/*
1423 * pcpu_embed_first_chunk() is used by the generic percpu setup.
1424 * Build it if needed by the arch config or the generic setup is going
1425 * to be used.
1426 */
1427#if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
1428	!defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
1429#define BUILD_EMBED_FIRST_CHUNK
1430#endif
1431
1432/* build pcpu_page_first_chunk() iff needed by the arch config */
1433#if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
1434#define BUILD_PAGE_FIRST_CHUNK
1435#endif
1436
1437/* pcpu_build_alloc_info() is used by both embed and page first chunk */
1438#if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
1439/**
1440 * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
1441 * @reserved_size: the size of reserved percpu area in bytes
1442 * @dyn_size: minimum free size for dynamic allocation in bytes
1443 * @atom_size: allocation atom size
1444 * @cpu_distance_fn: callback to determine distance between cpus, optional
1445 *
1446 * This function determines grouping of units, their mappings to cpus
1447 * and other parameters considering needed percpu size, allocation
1448 * atom size and distances between CPUs.
1449 *
1450 * Groups are always mutliples of atom size and CPUs which are of
1451 * LOCAL_DISTANCE both ways are grouped together and share space for
1452 * units in the same group.  The returned configuration is guaranteed
1453 * to have CPUs on different nodes on different groups and >=75% usage
1454 * of allocated virtual address space.
1455 *
1456 * RETURNS:
1457 * On success, pointer to the new allocation_info is returned.  On
1458 * failure, ERR_PTR value is returned.
1459 */
1460static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
1461				size_t reserved_size, size_t dyn_size,
1462				size_t atom_size,
1463				pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
1464{
1465	static int group_map[NR_CPUS] __initdata;
1466	static int group_cnt[NR_CPUS] __initdata;
1467	const size_t static_size = __per_cpu_end - __per_cpu_start;
1468	int nr_groups = 1, nr_units = 0;
1469	size_t size_sum, min_unit_size, alloc_size;
1470	int upa, max_upa, uninitialized_var(best_upa);	/* units_per_alloc */
1471	int last_allocs, group, unit;
1472	unsigned int cpu, tcpu;
1473	struct pcpu_alloc_info *ai;
1474	unsigned int *cpu_map;
1475
1476	/* this function may be called multiple times */
1477	memset(group_map, 0, sizeof(group_map));
1478	memset(group_cnt, 0, sizeof(group_cnt));
1479
1480	/* calculate size_sum and ensure dyn_size is enough for early alloc */
1481	size_sum = PFN_ALIGN(static_size + reserved_size +
1482			    max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
1483	dyn_size = size_sum - static_size - reserved_size;
1484
1485	/*
1486	 * Determine min_unit_size, alloc_size and max_upa such that
1487	 * alloc_size is multiple of atom_size and is the smallest
1488	 * which can accommodate 4k aligned segments which are equal to
1489	 * or larger than min_unit_size.
1490	 */
1491	min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
1492
1493	alloc_size = roundup(min_unit_size, atom_size);
1494	upa = alloc_size / min_unit_size;
1495	while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1496		upa--;
1497	max_upa = upa;
1498
1499	/* group cpus according to their proximity */
1500	for_each_possible_cpu(cpu) {
1501		group = 0;
1502	next_group:
1503		for_each_possible_cpu(tcpu) {
1504			if (cpu == tcpu)
1505				break;
1506			if (group_map[tcpu] == group && cpu_distance_fn &&
1507			    (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
1508			     cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
1509				group++;
1510				nr_groups = max(nr_groups, group + 1);
1511				goto next_group;
1512			}
1513		}
1514		group_map[cpu] = group;
1515		group_cnt[group]++;
1516	}
1517
1518	/*
1519	 * Expand unit size until address space usage goes over 75%
1520	 * and then as much as possible without using more address
1521	 * space.
1522	 */
1523	last_allocs = INT_MAX;
1524	for (upa = max_upa; upa; upa--) {
1525		int allocs = 0, wasted = 0;
1526
1527		if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK))
1528			continue;
1529
1530		for (group = 0; group < nr_groups; group++) {
1531			int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
1532			allocs += this_allocs;
1533			wasted += this_allocs * upa - group_cnt[group];
1534		}
1535
1536		/*
1537		 * Don't accept if wastage is over 1/3.  The
1538		 * greater-than comparison ensures upa==1 always
1539		 * passes the following check.
1540		 */
1541		if (wasted > num_possible_cpus() / 3)
1542			continue;
1543
1544		/* and then don't consume more memory */
1545		if (allocs > last_allocs)
1546			break;
1547		last_allocs = allocs;
1548		best_upa = upa;
1549	}
1550	upa = best_upa;
1551
1552	/* allocate and fill alloc_info */
1553	for (group = 0; group < nr_groups; group++)
1554		nr_units += roundup(group_cnt[group], upa);
1555
1556	ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
1557	if (!ai)
1558		return ERR_PTR(-ENOMEM);
1559	cpu_map = ai->groups[0].cpu_map;
1560
1561	for (group = 0; group < nr_groups; group++) {
1562		ai->groups[group].cpu_map = cpu_map;
1563		cpu_map += roundup(group_cnt[group], upa);
1564	}
1565
1566	ai->static_size = static_size;
1567	ai->reserved_size = reserved_size;
1568	ai->dyn_size = dyn_size;
1569	ai->unit_size = alloc_size / upa;
1570	ai->atom_size = atom_size;
1571	ai->alloc_size = alloc_size;
1572
1573	for (group = 0, unit = 0; group_cnt[group]; group++) {
1574		struct pcpu_group_info *gi = &ai->groups[group];
1575
1576		/*
1577		 * Initialize base_offset as if all groups are located
1578		 * back-to-back.  The caller should update this to
1579		 * reflect actual allocation.
1580		 */
1581		gi->base_offset = unit * ai->unit_size;
1582
1583		for_each_possible_cpu(cpu)
1584			if (group_map[cpu] == group)
1585				gi->cpu_map[gi->nr_units++] = cpu;
1586		gi->nr_units = roundup(gi->nr_units, upa);
1587		unit += gi->nr_units;
1588	}
1589	BUG_ON(unit != nr_units);
1590
1591	return ai;
1592}
1593#endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
1594
1595#if defined(BUILD_EMBED_FIRST_CHUNK)
1596/**
1597 * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
1598 * @reserved_size: the size of reserved percpu area in bytes
1599 * @dyn_size: minimum free size for dynamic allocation in bytes
1600 * @atom_size: allocation atom size
1601 * @cpu_distance_fn: callback to determine distance between cpus, optional
1602 * @alloc_fn: function to allocate percpu page
1603 * @free_fn: function to free percpu page
1604 *
1605 * This is a helper to ease setting up embedded first percpu chunk and
1606 * can be called where pcpu_setup_first_chunk() is expected.
1607 *
1608 * If this function is used to setup the first chunk, it is allocated
1609 * by calling @alloc_fn and used as-is without being mapped into
1610 * vmalloc area.  Allocations are always whole multiples of @atom_size
1611 * aligned to @atom_size.
1612 *
1613 * This enables the first chunk to piggy back on the linear physical
1614 * mapping which often uses larger page size.  Please note that this
1615 * can result in very sparse cpu->unit mapping on NUMA machines thus
1616 * requiring large vmalloc address space.  Don't use this allocator if
1617 * vmalloc space is not orders of magnitude larger than distances
1618 * between node memory addresses (ie. 32bit NUMA machines).
1619 *
1620 * @dyn_size specifies the minimum dynamic area size.
1621 *
1622 * If the needed size is smaller than the minimum or specified unit
1623 * size, the leftover is returned using @free_fn.
1624 *
1625 * RETURNS:
1626 * 0 on success, -errno on failure.
1627 */
1628int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
1629				  size_t atom_size,
1630				  pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
1631				  pcpu_fc_alloc_fn_t alloc_fn,
1632				  pcpu_fc_free_fn_t free_fn)
1633{
1634	void *base = (void *)ULONG_MAX;
1635	void **areas = NULL;
1636	struct pcpu_alloc_info *ai;
1637	size_t size_sum, areas_size, max_distance;
1638	int group, i, rc;
1639
1640	ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
1641				   cpu_distance_fn);
1642	if (IS_ERR(ai))
1643		return PTR_ERR(ai);
1644
1645	size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
1646	areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
1647
1648	areas = memblock_virt_alloc_nopanic(areas_size, 0);
1649	if (!areas) {
1650		rc = -ENOMEM;
1651		goto out_free;
1652	}
1653
1654	/* allocate, copy and determine base address */
1655	for (group = 0; group < ai->nr_groups; group++) {
1656		struct pcpu_group_info *gi = &ai->groups[group];
1657		unsigned int cpu = NR_CPUS;
1658		void *ptr;
1659
1660		for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
1661			cpu = gi->cpu_map[i];
1662		BUG_ON(cpu == NR_CPUS);
1663
1664		/* allocate space for the whole group */
1665		ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
1666		if (!ptr) {
1667			rc = -ENOMEM;
1668			goto out_free_areas;
1669		}
1670		/* kmemleak tracks the percpu allocations separately */
1671		kmemleak_free(ptr);
1672		areas[group] = ptr;
1673
1674		base = min(ptr, base);
1675	}
1676
1677	/*
1678	 * Copy data and free unused parts.  This should happen after all
1679	 * allocations are complete; otherwise, we may end up with
1680	 * overlapping groups.
1681	 */
1682	for (group = 0; group < ai->nr_groups; group++) {
1683		struct pcpu_group_info *gi = &ai->groups[group];
1684		void *ptr = areas[group];
1685
1686		for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
1687			if (gi->cpu_map[i] == NR_CPUS) {
1688				/* unused unit, free whole */
1689				free_fn(ptr, ai->unit_size);
1690				continue;
1691			}
1692			/* copy and return the unused part */
1693			memcpy(ptr, __per_cpu_load, ai->static_size);
1694			free_fn(ptr + size_sum, ai->unit_size - size_sum);
1695		}
1696	}
1697
1698	/* base address is now known, determine group base offsets */
1699	max_distance = 0;
1700	for (group = 0; group < ai->nr_groups; group++) {
1701		ai->groups[group].base_offset = areas[group] - base;
1702		max_distance = max_t(size_t, max_distance,
1703				     ai->groups[group].base_offset);
1704	}
1705	max_distance += ai->unit_size;
1706
1707	/* warn if maximum distance is further than 75% of vmalloc space */
1708	if (max_distance > VMALLOC_TOTAL * 3 / 4) {
1709		pr_warning("PERCPU: max_distance=0x%zx too large for vmalloc "
1710			   "space 0x%lx\n", max_distance,
1711			   VMALLOC_TOTAL);
1712#ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
1713		/* and fail if we have fallback */
1714		rc = -EINVAL;
1715		goto out_free;
1716#endif
1717	}
1718
1719	pr_info("PERCPU: Embedded %zu pages/cpu @%p s%zu r%zu d%zu u%zu\n",
1720		PFN_DOWN(size_sum), base, ai->static_size, ai->reserved_size,
1721		ai->dyn_size, ai->unit_size);
1722
1723	rc = pcpu_setup_first_chunk(ai, base);
1724	goto out_free;
1725
1726out_free_areas:
1727	for (group = 0; group < ai->nr_groups; group++)
1728		if (areas[group])
1729			free_fn(areas[group],
1730				ai->groups[group].nr_units * ai->unit_size);
1731out_free:
1732	pcpu_free_alloc_info(ai);
1733	if (areas)
1734		memblock_free_early(__pa(areas), areas_size);
1735	return rc;
1736}
1737#endif /* BUILD_EMBED_FIRST_CHUNK */
1738
1739#ifdef BUILD_PAGE_FIRST_CHUNK
1740/**
1741 * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
1742 * @reserved_size: the size of reserved percpu area in bytes
1743 * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
1744 * @free_fn: function to free percpu page, always called with PAGE_SIZE
1745 * @populate_pte_fn: function to populate pte
1746 *
1747 * This is a helper to ease setting up page-remapped first percpu
1748 * chunk and can be called where pcpu_setup_first_chunk() is expected.
1749 *
1750 * This is the basic allocator.  Static percpu area is allocated
1751 * page-by-page into vmalloc area.
1752 *
1753 * RETURNS:
1754 * 0 on success, -errno on failure.
1755 */
1756int __init pcpu_page_first_chunk(size_t reserved_size,
1757				 pcpu_fc_alloc_fn_t alloc_fn,
1758				 pcpu_fc_free_fn_t free_fn,
1759				 pcpu_fc_populate_pte_fn_t populate_pte_fn)
1760{
1761	static struct vm_struct vm;
1762	struct pcpu_alloc_info *ai;
1763	char psize_str[16];
1764	int unit_pages;
1765	size_t pages_size;
1766	struct page **pages;
1767	int unit, i, j, rc;
1768
1769	snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
1770
1771	ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
1772	if (IS_ERR(ai))
1773		return PTR_ERR(ai);
1774	BUG_ON(ai->nr_groups != 1);
1775	BUG_ON(ai->groups[0].nr_units != num_possible_cpus());
1776
1777	unit_pages = ai->unit_size >> PAGE_SHIFT;
1778
1779	/* unaligned allocations can't be freed, round up to page size */
1780	pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
1781			       sizeof(pages[0]));
1782	pages = memblock_virt_alloc(pages_size, 0);
1783
1784	/* allocate pages */
1785	j = 0;
1786	for (unit = 0; unit < num_possible_cpus(); unit++)
1787		for (i = 0; i < unit_pages; i++) {
1788			unsigned int cpu = ai->groups[0].cpu_map[unit];
1789			void *ptr;
1790
1791			ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
1792			if (!ptr) {
1793				pr_warning("PERCPU: failed to allocate %s page "
1794					   "for cpu%u\n", psize_str, cpu);
1795				goto enomem;
1796			}
1797			/* kmemleak tracks the percpu allocations separately */
1798			kmemleak_free(ptr);
1799			pages[j++] = virt_to_page(ptr);
1800		}
1801
1802	/* allocate vm area, map the pages and copy static data */
1803	vm.flags = VM_ALLOC;
1804	vm.size = num_possible_cpus() * ai->unit_size;
1805	vm_area_register_early(&vm, PAGE_SIZE);
1806
1807	for (unit = 0; unit < num_possible_cpus(); unit++) {
1808		unsigned long unit_addr =
1809			(unsigned long)vm.addr + unit * ai->unit_size;
1810
1811		for (i = 0; i < unit_pages; i++)
1812			populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
1813
1814		/* pte already populated, the following shouldn't fail */
1815		rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
1816				      unit_pages);
1817		if (rc < 0)
1818			panic("failed to map percpu area, err=%d\n", rc);
1819
1820		/*
1821		 * FIXME: Archs with virtual cache should flush local
1822		 * cache for the linear mapping here - something
1823		 * equivalent to flush_cache_vmap() on the local cpu.
1824		 * flush_cache_vmap() can't be used as most supporting
1825		 * data structures are not set up yet.
1826		 */
1827
1828		/* copy static data */
1829		memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
1830	}
1831
1832	/* we're ready, commit */
1833	pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
1834		unit_pages, psize_str, vm.addr, ai->static_size,
1835		ai->reserved_size, ai->dyn_size);
1836
1837	rc = pcpu_setup_first_chunk(ai, vm.addr);
1838	goto out_free_ar;
1839
1840enomem:
1841	while (--j >= 0)
1842		free_fn(page_address(pages[j]), PAGE_SIZE);
1843	rc = -ENOMEM;
1844out_free_ar:
1845	memblock_free_early(__pa(pages), pages_size);
1846	pcpu_free_alloc_info(ai);
1847	return rc;
1848}
1849#endif /* BUILD_PAGE_FIRST_CHUNK */
1850
1851#ifndef	CONFIG_HAVE_SETUP_PER_CPU_AREA
1852/*
1853 * Generic SMP percpu area setup.
1854 *
1855 * The embedding helper is used because its behavior closely resembles
1856 * the original non-dynamic generic percpu area setup.  This is
1857 * important because many archs have addressing restrictions and might
1858 * fail if the percpu area is located far away from the previous
1859 * location.  As an added bonus, in non-NUMA cases, embedding is
1860 * generally a good idea TLB-wise because percpu area can piggy back
1861 * on the physical linear memory mapping which uses large page
1862 * mappings on applicable archs.
1863 */
1864unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
1865EXPORT_SYMBOL(__per_cpu_offset);
1866
1867static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
1868				       size_t align)
1869{
1870	return  memblock_virt_alloc_from_nopanic(
1871			size, align, __pa(MAX_DMA_ADDRESS));
1872}
1873
1874static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
1875{
1876	memblock_free_early(__pa(ptr), size);
1877}
1878
1879void __init setup_per_cpu_areas(void)
1880{
1881	unsigned long delta;
1882	unsigned int cpu;
1883	int rc;
1884
1885	/*
1886	 * Always reserve area for module percpu variables.  That's
1887	 * what the legacy allocator did.
1888	 */
1889	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
1890				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
1891				    pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
1892	if (rc < 0)
1893		panic("Failed to initialize percpu areas.");
1894
1895	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
1896	for_each_possible_cpu(cpu)
1897		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
1898}
1899#endif	/* CONFIG_HAVE_SETUP_PER_CPU_AREA */
1900
1901#else	/* CONFIG_SMP */
1902
1903/*
1904 * UP percpu area setup.
1905 *
1906 * UP always uses km-based percpu allocator with identity mapping.
1907 * Static percpu variables are indistinguishable from the usual static
1908 * variables and don't require any special preparation.
1909 */
1910void __init setup_per_cpu_areas(void)
1911{
1912	const size_t unit_size =
1913		roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
1914					 PERCPU_DYNAMIC_RESERVE));
1915	struct pcpu_alloc_info *ai;
1916	void *fc;
1917
1918	ai = pcpu_alloc_alloc_info(1, 1);
1919	fc = memblock_virt_alloc_from_nopanic(unit_size,
1920					      PAGE_SIZE,
1921					      __pa(MAX_DMA_ADDRESS));
1922	if (!ai || !fc)
1923		panic("Failed to allocate memory for percpu areas.");
1924	/* kmemleak tracks the percpu allocations separately */
1925	kmemleak_free(fc);
1926
1927	ai->dyn_size = unit_size;
1928	ai->unit_size = unit_size;
1929	ai->atom_size = unit_size;
1930	ai->alloc_size = unit_size;
1931	ai->groups[0].nr_units = 1;
1932	ai->groups[0].cpu_map[0] = 0;
1933
1934	if (pcpu_setup_first_chunk(ai, fc) < 0)
1935		panic("Failed to initialize percpu areas.");
1936}
1937
1938#endif	/* CONFIG_SMP */
1939
1940/*
1941 * First and reserved chunks are initialized with temporary allocation
1942 * map in initdata so that they can be used before slab is online.
1943 * This function is called after slab is brought up and replaces those
1944 * with properly allocated maps.
1945 */
1946void __init percpu_init_late(void)
1947{
1948	struct pcpu_chunk *target_chunks[] =
1949		{ pcpu_first_chunk, pcpu_reserved_chunk, NULL };
1950	struct pcpu_chunk *chunk;
1951	unsigned long flags;
1952	int i;
1953
1954	for (i = 0; (chunk = target_chunks[i]); i++) {
1955		int *map;
1956		const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
1957
1958		BUILD_BUG_ON(size > PAGE_SIZE);
1959
1960		map = pcpu_mem_zalloc(size);
1961		BUG_ON(!map);
1962
1963		spin_lock_irqsave(&pcpu_lock, flags);
1964		memcpy(map, chunk->map, size);
1965		chunk->map = map;
1966		spin_unlock_irqrestore(&pcpu_lock, flags);
1967	}
1968}