Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/vmalloc.c
   4 *
   5 *  Copyright (C) 1993  Linus Torvalds
   6 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
   7 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
   8 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
   9 *  Numa awareness, Christoph Lameter, SGI, June 2005
  10 */
  11
  12#include <linux/vmalloc.h>
  13#include <linux/mm.h>
  14#include <linux/module.h>
  15#include <linux/highmem.h>
  16#include <linux/sched/signal.h>
  17#include <linux/slab.h>
  18#include <linux/spinlock.h>
  19#include <linux/interrupt.h>
  20#include <linux/proc_fs.h>
  21#include <linux/seq_file.h>
  22#include <linux/set_memory.h>
  23#include <linux/debugobjects.h>
  24#include <linux/kallsyms.h>
  25#include <linux/list.h>
  26#include <linux/notifier.h>
  27#include <linux/rbtree.h>
  28#include <linux/radix-tree.h>
  29#include <linux/rcupdate.h>
  30#include <linux/pfn.h>
  31#include <linux/kmemleak.h>
  32#include <linux/atomic.h>
  33#include <linux/compiler.h>
  34#include <linux/llist.h>
  35#include <linux/bitops.h>
  36#include <linux/rbtree_augmented.h>
  37
  38#include <linux/uaccess.h>
  39#include <asm/tlbflush.h>
  40#include <asm/shmparam.h>
  41
  42#include "internal.h"
  43
  44struct vfree_deferred {
  45	struct llist_head list;
  46	struct work_struct wq;
  47};
  48static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
  49
  50static void __vunmap(const void *, int);
  51
  52static void free_work(struct work_struct *w)
  53{
  54	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
  55	struct llist_node *t, *llnode;
  56
  57	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
  58		__vunmap((void *)llnode, 1);
 
 
  59}
  60
  61/*** Page table manipulation functions ***/
  62
  63static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
  64{
  65	pte_t *pte;
  66
  67	pte = pte_offset_kernel(pmd, addr);
  68	do {
  69		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
  70		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
  71	} while (pte++, addr += PAGE_SIZE, addr != end);
  72}
  73
  74static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
  75{
  76	pmd_t *pmd;
  77	unsigned long next;
  78
  79	pmd = pmd_offset(pud, addr);
  80	do {
  81		next = pmd_addr_end(addr, end);
  82		if (pmd_clear_huge(pmd))
  83			continue;
  84		if (pmd_none_or_clear_bad(pmd))
  85			continue;
  86		vunmap_pte_range(pmd, addr, next);
  87	} while (pmd++, addr = next, addr != end);
  88}
  89
  90static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end)
  91{
  92	pud_t *pud;
  93	unsigned long next;
  94
  95	pud = pud_offset(p4d, addr);
  96	do {
  97		next = pud_addr_end(addr, end);
  98		if (pud_clear_huge(pud))
  99			continue;
 100		if (pud_none_or_clear_bad(pud))
 101			continue;
 102		vunmap_pmd_range(pud, addr, next);
 103	} while (pud++, addr = next, addr != end);
 104}
 105
 106static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
 107{
 108	p4d_t *p4d;
 109	unsigned long next;
 110
 111	p4d = p4d_offset(pgd, addr);
 112	do {
 113		next = p4d_addr_end(addr, end);
 114		if (p4d_clear_huge(p4d))
 115			continue;
 116		if (p4d_none_or_clear_bad(p4d))
 117			continue;
 118		vunmap_pud_range(p4d, addr, next);
 119	} while (p4d++, addr = next, addr != end);
 120}
 121
 122static void vunmap_page_range(unsigned long addr, unsigned long end)
 123{
 124	pgd_t *pgd;
 125	unsigned long next;
 126
 127	BUG_ON(addr >= end);
 128	pgd = pgd_offset_k(addr);
 129	do {
 130		next = pgd_addr_end(addr, end);
 131		if (pgd_none_or_clear_bad(pgd))
 132			continue;
 133		vunmap_p4d_range(pgd, addr, next);
 134	} while (pgd++, addr = next, addr != end);
 135}
 136
 137static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
 138		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
 139{
 140	pte_t *pte;
 141
 142	/*
 143	 * nr is a running index into the array which helps higher level
 144	 * callers keep track of where we're up to.
 145	 */
 146
 147	pte = pte_alloc_kernel(pmd, addr);
 148	if (!pte)
 149		return -ENOMEM;
 150	do {
 151		struct page *page = pages[*nr];
 152
 153		if (WARN_ON(!pte_none(*pte)))
 154			return -EBUSY;
 155		if (WARN_ON(!page))
 156			return -ENOMEM;
 157		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
 158		(*nr)++;
 159	} while (pte++, addr += PAGE_SIZE, addr != end);
 160	return 0;
 161}
 162
 163static int vmap_pmd_range(pud_t *pud, unsigned long addr,
 164		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
 165{
 166	pmd_t *pmd;
 167	unsigned long next;
 168
 169	pmd = pmd_alloc(&init_mm, pud, addr);
 170	if (!pmd)
 171		return -ENOMEM;
 172	do {
 173		next = pmd_addr_end(addr, end);
 174		if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
 175			return -ENOMEM;
 176	} while (pmd++, addr = next, addr != end);
 177	return 0;
 178}
 179
 180static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
 181		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
 182{
 183	pud_t *pud;
 184	unsigned long next;
 185
 186	pud = pud_alloc(&init_mm, p4d, addr);
 187	if (!pud)
 188		return -ENOMEM;
 189	do {
 190		next = pud_addr_end(addr, end);
 191		if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
 192			return -ENOMEM;
 193	} while (pud++, addr = next, addr != end);
 194	return 0;
 195}
 196
 197static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
 198		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
 199{
 200	p4d_t *p4d;
 201	unsigned long next;
 202
 203	p4d = p4d_alloc(&init_mm, pgd, addr);
 204	if (!p4d)
 205		return -ENOMEM;
 206	do {
 207		next = p4d_addr_end(addr, end);
 208		if (vmap_pud_range(p4d, addr, next, prot, pages, nr))
 209			return -ENOMEM;
 210	} while (p4d++, addr = next, addr != end);
 211	return 0;
 212}
 213
 214/*
 215 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
 216 * will have pfns corresponding to the "pages" array.
 217 *
 218 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
 219 */
 220static int vmap_page_range_noflush(unsigned long start, unsigned long end,
 221				   pgprot_t prot, struct page **pages)
 222{
 223	pgd_t *pgd;
 224	unsigned long next;
 225	unsigned long addr = start;
 226	int err = 0;
 227	int nr = 0;
 228
 229	BUG_ON(addr >= end);
 230	pgd = pgd_offset_k(addr);
 231	do {
 232		next = pgd_addr_end(addr, end);
 233		err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
 234		if (err)
 235			return err;
 236	} while (pgd++, addr = next, addr != end);
 237
 238	return nr;
 239}
 240
 241static int vmap_page_range(unsigned long start, unsigned long end,
 242			   pgprot_t prot, struct page **pages)
 243{
 244	int ret;
 245
 246	ret = vmap_page_range_noflush(start, end, prot, pages);
 247	flush_cache_vmap(start, end);
 248	return ret;
 249}
 250
 251int is_vmalloc_or_module_addr(const void *x)
 252{
 253	/*
 254	 * ARM, x86-64 and sparc64 put modules in a special place,
 255	 * and fall back on vmalloc() if that fails. Others
 256	 * just put it in the vmalloc space.
 257	 */
 258#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
 259	unsigned long addr = (unsigned long)x;
 260	if (addr >= MODULES_VADDR && addr < MODULES_END)
 261		return 1;
 262#endif
 263	return is_vmalloc_addr(x);
 264}
 265
 266/*
 267 * Walk a vmap address to the struct page it maps.
 268 */
 269struct page *vmalloc_to_page(const void *vmalloc_addr)
 270{
 271	unsigned long addr = (unsigned long) vmalloc_addr;
 272	struct page *page = NULL;
 273	pgd_t *pgd = pgd_offset_k(addr);
 274	p4d_t *p4d;
 275	pud_t *pud;
 276	pmd_t *pmd;
 277	pte_t *ptep, pte;
 278
 279	/*
 280	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
 281	 * architectures that do not vmalloc module space
 282	 */
 283	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
 284
 285	if (pgd_none(*pgd))
 286		return NULL;
 287	p4d = p4d_offset(pgd, addr);
 288	if (p4d_none(*p4d))
 289		return NULL;
 290	pud = pud_offset(p4d, addr);
 291
 292	/*
 293	 * Don't dereference bad PUD or PMD (below) entries. This will also
 294	 * identify huge mappings, which we may encounter on architectures
 295	 * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
 296	 * identified as vmalloc addresses by is_vmalloc_addr(), but are
 297	 * not [unambiguously] associated with a struct page, so there is
 298	 * no correct value to return for them.
 299	 */
 300	WARN_ON_ONCE(pud_bad(*pud));
 301	if (pud_none(*pud) || pud_bad(*pud))
 302		return NULL;
 303	pmd = pmd_offset(pud, addr);
 304	WARN_ON_ONCE(pmd_bad(*pmd));
 305	if (pmd_none(*pmd) || pmd_bad(*pmd))
 306		return NULL;
 307
 308	ptep = pte_offset_map(pmd, addr);
 309	pte = *ptep;
 310	if (pte_present(pte))
 311		page = pte_page(pte);
 312	pte_unmap(ptep);
 313	return page;
 314}
 315EXPORT_SYMBOL(vmalloc_to_page);
 316
 317/*
 318 * Map a vmalloc()-space virtual address to the physical page frame number.
 319 */
 320unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
 321{
 322	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
 323}
 324EXPORT_SYMBOL(vmalloc_to_pfn);
 325
 326
 327/*** Global kva allocator ***/
 328
 329#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
 330#define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
 331
 332
 333static DEFINE_SPINLOCK(vmap_area_lock);
 334/* Export for kexec only */
 335LIST_HEAD(vmap_area_list);
 336static LLIST_HEAD(vmap_purge_list);
 337static struct rb_root vmap_area_root = RB_ROOT;
 338static bool vmap_initialized __read_mostly;
 339
 340/*
 341 * This kmem_cache is used for vmap_area objects. Instead of
 342 * allocating from slab we reuse an object from this cache to
 343 * make things faster. Especially in "no edge" splitting of
 344 * free block.
 345 */
 346static struct kmem_cache *vmap_area_cachep;
 347
 348/*
 349 * This linked list is used in pair with free_vmap_area_root.
 350 * It gives O(1) access to prev/next to perform fast coalescing.
 351 */
 352static LIST_HEAD(free_vmap_area_list);
 353
 354/*
 355 * This augment red-black tree represents the free vmap space.
 356 * All vmap_area objects in this tree are sorted by va->va_start
 357 * address. It is used for allocation and merging when a vmap
 358 * object is released.
 359 *
 360 * Each vmap_area node contains a maximum available free block
 361 * of its sub-tree, right or left. Therefore it is possible to
 362 * find a lowest match of free area.
 363 */
 364static struct rb_root free_vmap_area_root = RB_ROOT;
 365
 366/*
 367 * Preload a CPU with one object for "no edge" split case. The
 368 * aim is to get rid of allocations from the atomic context, thus
 369 * to use more permissive allocation masks.
 370 */
 371static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
 372
 373static __always_inline unsigned long
 374va_size(struct vmap_area *va)
 375{
 376	return (va->va_end - va->va_start);
 377}
 378
 379static __always_inline unsigned long
 380get_subtree_max_size(struct rb_node *node)
 381{
 382	struct vmap_area *va;
 383
 384	va = rb_entry_safe(node, struct vmap_area, rb_node);
 385	return va ? va->subtree_max_size : 0;
 386}
 387
 388/*
 389 * Gets called when remove the node and rotate.
 390 */
 391static __always_inline unsigned long
 392compute_subtree_max_size(struct vmap_area *va)
 393{
 394	return max3(va_size(va),
 395		get_subtree_max_size(va->rb_node.rb_left),
 396		get_subtree_max_size(va->rb_node.rb_right));
 397}
 398
 399RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
 400	struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
 401
 402static void purge_vmap_area_lazy(void);
 403static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
 404static unsigned long lazy_max_pages(void);
 405
 406static atomic_long_t nr_vmalloc_pages;
 407
 408unsigned long vmalloc_nr_pages(void)
 409{
 410	return atomic_long_read(&nr_vmalloc_pages);
 411}
 412
 413static struct vmap_area *__find_vmap_area(unsigned long addr)
 414{
 415	struct rb_node *n = vmap_area_root.rb_node;
 416
 417	while (n) {
 418		struct vmap_area *va;
 419
 420		va = rb_entry(n, struct vmap_area, rb_node);
 421		if (addr < va->va_start)
 422			n = n->rb_left;
 423		else if (addr >= va->va_end)
 424			n = n->rb_right;
 425		else
 426			return va;
 427	}
 428
 429	return NULL;
 430}
 431
 432/*
 433 * This function returns back addresses of parent node
 434 * and its left or right link for further processing.
 435 */
 436static __always_inline struct rb_node **
 437find_va_links(struct vmap_area *va,
 438	struct rb_root *root, struct rb_node *from,
 439	struct rb_node **parent)
 440{
 441	struct vmap_area *tmp_va;
 442	struct rb_node **link;
 443
 444	if (root) {
 445		link = &root->rb_node;
 446		if (unlikely(!*link)) {
 447			*parent = NULL;
 448			return link;
 449		}
 450	} else {
 451		link = &from;
 452	}
 453
 454	/*
 455	 * Go to the bottom of the tree. When we hit the last point
 456	 * we end up with parent rb_node and correct direction, i name
 457	 * it link, where the new va->rb_node will be attached to.
 458	 */
 459	do {
 460		tmp_va = rb_entry(*link, struct vmap_area, rb_node);
 461
 462		/*
 463		 * During the traversal we also do some sanity check.
 464		 * Trigger the BUG() if there are sides(left/right)
 465		 * or full overlaps.
 466		 */
 467		if (va->va_start < tmp_va->va_end &&
 468				va->va_end <= tmp_va->va_start)
 469			link = &(*link)->rb_left;
 470		else if (va->va_end > tmp_va->va_start &&
 471				va->va_start >= tmp_va->va_end)
 472			link = &(*link)->rb_right;
 473		else
 474			BUG();
 475	} while (*link);
 476
 477	*parent = &tmp_va->rb_node;
 478	return link;
 479}
 480
 481static __always_inline struct list_head *
 482get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
 483{
 484	struct list_head *list;
 485
 486	if (unlikely(!parent))
 487		/*
 488		 * The red-black tree where we try to find VA neighbors
 489		 * before merging or inserting is empty, i.e. it means
 490		 * there is no free vmap space. Normally it does not
 491		 * happen but we handle this case anyway.
 492		 */
 493		return NULL;
 494
 495	list = &rb_entry(parent, struct vmap_area, rb_node)->list;
 496	return (&parent->rb_right == link ? list->next : list);
 497}
 498
 499static __always_inline void
 500link_va(struct vmap_area *va, struct rb_root *root,
 501	struct rb_node *parent, struct rb_node **link, struct list_head *head)
 502{
 503	/*
 504	 * VA is still not in the list, but we can
 505	 * identify its future previous list_head node.
 506	 */
 507	if (likely(parent)) {
 508		head = &rb_entry(parent, struct vmap_area, rb_node)->list;
 509		if (&parent->rb_right != link)
 510			head = head->prev;
 511	}
 512
 513	/* Insert to the rb-tree */
 514	rb_link_node(&va->rb_node, parent, link);
 515	if (root == &free_vmap_area_root) {
 516		/*
 517		 * Some explanation here. Just perform simple insertion
 518		 * to the tree. We do not set va->subtree_max_size to
 519		 * its current size before calling rb_insert_augmented().
 520		 * It is because of we populate the tree from the bottom
 521		 * to parent levels when the node _is_ in the tree.
 522		 *
 523		 * Therefore we set subtree_max_size to zero after insertion,
 524		 * to let __augment_tree_propagate_from() puts everything to
 525		 * the correct order later on.
 526		 */
 527		rb_insert_augmented(&va->rb_node,
 528			root, &free_vmap_area_rb_augment_cb);
 529		va->subtree_max_size = 0;
 530	} else {
 531		rb_insert_color(&va->rb_node, root);
 532	}
 533
 534	/* Address-sort this list */
 535	list_add(&va->list, head);
 536}
 537
 538static __always_inline void
 539unlink_va(struct vmap_area *va, struct rb_root *root)
 540{
 541	if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
 542		return;
 543
 544	if (root == &free_vmap_area_root)
 545		rb_erase_augmented(&va->rb_node,
 546			root, &free_vmap_area_rb_augment_cb);
 547	else
 548		rb_erase(&va->rb_node, root);
 549
 550	list_del(&va->list);
 551	RB_CLEAR_NODE(&va->rb_node);
 552}
 553
 554#if DEBUG_AUGMENT_PROPAGATE_CHECK
 555static void
 556augment_tree_propagate_check(struct rb_node *n)
 557{
 558	struct vmap_area *va;
 559	struct rb_node *node;
 560	unsigned long size;
 561	bool found = false;
 562
 563	if (n == NULL)
 564		return;
 565
 566	va = rb_entry(n, struct vmap_area, rb_node);
 567	size = va->subtree_max_size;
 568	node = n;
 569
 570	while (node) {
 571		va = rb_entry(node, struct vmap_area, rb_node);
 572
 573		if (get_subtree_max_size(node->rb_left) == size) {
 574			node = node->rb_left;
 575		} else {
 576			if (va_size(va) == size) {
 577				found = true;
 578				break;
 579			}
 580
 581			node = node->rb_right;
 582		}
 583	}
 584
 585	if (!found) {
 586		va = rb_entry(n, struct vmap_area, rb_node);
 587		pr_emerg("tree is corrupted: %lu, %lu\n",
 588			va_size(va), va->subtree_max_size);
 589	}
 590
 591	augment_tree_propagate_check(n->rb_left);
 592	augment_tree_propagate_check(n->rb_right);
 593}
 594#endif
 595
 596/*
 597 * This function populates subtree_max_size from bottom to upper
 598 * levels starting from VA point. The propagation must be done
 599 * when VA size is modified by changing its va_start/va_end. Or
 600 * in case of newly inserting of VA to the tree.
 601 *
 602 * It means that __augment_tree_propagate_from() must be called:
 603 * - After VA has been inserted to the tree(free path);
 604 * - After VA has been shrunk(allocation path);
 605 * - After VA has been increased(merging path).
 606 *
 607 * Please note that, it does not mean that upper parent nodes
 608 * and their subtree_max_size are recalculated all the time up
 609 * to the root node.
 610 *
 611 *       4--8
 612 *        /\
 613 *       /  \
 614 *      /    \
 615 *    2--2  8--8
 616 *
 617 * For example if we modify the node 4, shrinking it to 2, then
 618 * no any modification is required. If we shrink the node 2 to 1
 619 * its subtree_max_size is updated only, and set to 1. If we shrink
 620 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
 621 * node becomes 4--6.
 622 */
 623static __always_inline void
 624augment_tree_propagate_from(struct vmap_area *va)
 625{
 626	struct rb_node *node = &va->rb_node;
 627	unsigned long new_va_sub_max_size;
 628
 629	while (node) {
 630		va = rb_entry(node, struct vmap_area, rb_node);
 631		new_va_sub_max_size = compute_subtree_max_size(va);
 632
 633		/*
 634		 * If the newly calculated maximum available size of the
 635		 * subtree is equal to the current one, then it means that
 636		 * the tree is propagated correctly. So we have to stop at
 637		 * this point to save cycles.
 638		 */
 639		if (va->subtree_max_size == new_va_sub_max_size)
 640			break;
 641
 642		va->subtree_max_size = new_va_sub_max_size;
 643		node = rb_parent(&va->rb_node);
 644	}
 645
 646#if DEBUG_AUGMENT_PROPAGATE_CHECK
 647	augment_tree_propagate_check(free_vmap_area_root.rb_node);
 648#endif
 649}
 650
 651static void
 652insert_vmap_area(struct vmap_area *va,
 653	struct rb_root *root, struct list_head *head)
 654{
 655	struct rb_node **link;
 656	struct rb_node *parent;
 657
 658	link = find_va_links(va, root, NULL, &parent);
 659	link_va(va, root, parent, link, head);
 660}
 661
 662static void
 663insert_vmap_area_augment(struct vmap_area *va,
 664	struct rb_node *from, struct rb_root *root,
 665	struct list_head *head)
 666{
 667	struct rb_node **link;
 668	struct rb_node *parent;
 669
 670	if (from)
 671		link = find_va_links(va, NULL, from, &parent);
 672	else
 673		link = find_va_links(va, root, NULL, &parent);
 674
 675	link_va(va, root, parent, link, head);
 676	augment_tree_propagate_from(va);
 677}
 678
 679/*
 680 * Merge de-allocated chunk of VA memory with previous
 681 * and next free blocks. If coalesce is not done a new
 682 * free area is inserted. If VA has been merged, it is
 683 * freed.
 684 */
 685static __always_inline void
 686merge_or_add_vmap_area(struct vmap_area *va,
 687	struct rb_root *root, struct list_head *head)
 688{
 689	struct vmap_area *sibling;
 690	struct list_head *next;
 691	struct rb_node **link;
 692	struct rb_node *parent;
 693	bool merged = false;
 694
 695	/*
 696	 * Find a place in the tree where VA potentially will be
 697	 * inserted, unless it is merged with its sibling/siblings.
 698	 */
 699	link = find_va_links(va, root, NULL, &parent);
 700
 701	/*
 702	 * Get next node of VA to check if merging can be done.
 703	 */
 704	next = get_va_next_sibling(parent, link);
 705	if (unlikely(next == NULL))
 706		goto insert;
 707
 708	/*
 709	 * start            end
 710	 * |                |
 711	 * |<------VA------>|<-----Next----->|
 712	 *                  |                |
 713	 *                  start            end
 714	 */
 715	if (next != head) {
 716		sibling = list_entry(next, struct vmap_area, list);
 717		if (sibling->va_start == va->va_end) {
 718			sibling->va_start = va->va_start;
 719
 720			/* Check and update the tree if needed. */
 721			augment_tree_propagate_from(sibling);
 722
 723			/* Free vmap_area object. */
 724			kmem_cache_free(vmap_area_cachep, va);
 725
 726			/* Point to the new merged area. */
 727			va = sibling;
 728			merged = true;
 729		}
 730	}
 731
 732	/*
 733	 * start            end
 734	 * |                |
 735	 * |<-----Prev----->|<------VA------>|
 736	 *                  |                |
 737	 *                  start            end
 738	 */
 739	if (next->prev != head) {
 740		sibling = list_entry(next->prev, struct vmap_area, list);
 741		if (sibling->va_end == va->va_start) {
 742			sibling->va_end = va->va_end;
 743
 744			/* Check and update the tree if needed. */
 745			augment_tree_propagate_from(sibling);
 746
 747			if (merged)
 748				unlink_va(va, root);
 749
 750			/* Free vmap_area object. */
 751			kmem_cache_free(vmap_area_cachep, va);
 752			return;
 753		}
 754	}
 755
 756insert:
 757	if (!merged) {
 758		link_va(va, root, parent, link, head);
 759		augment_tree_propagate_from(va);
 760	}
 761}
 762
 763static __always_inline bool
 764is_within_this_va(struct vmap_area *va, unsigned long size,
 765	unsigned long align, unsigned long vstart)
 766{
 767	unsigned long nva_start_addr;
 768
 769	if (va->va_start > vstart)
 770		nva_start_addr = ALIGN(va->va_start, align);
 771	else
 772		nva_start_addr = ALIGN(vstart, align);
 773
 774	/* Can be overflowed due to big size or alignment. */
 775	if (nva_start_addr + size < nva_start_addr ||
 776			nva_start_addr < vstart)
 777		return false;
 778
 779	return (nva_start_addr + size <= va->va_end);
 780}
 781
 782/*
 783 * Find the first free block(lowest start address) in the tree,
 784 * that will accomplish the request corresponding to passing
 785 * parameters.
 786 */
 787static __always_inline struct vmap_area *
 788find_vmap_lowest_match(unsigned long size,
 789	unsigned long align, unsigned long vstart)
 790{
 791	struct vmap_area *va;
 792	struct rb_node *node;
 793	unsigned long length;
 794
 795	/* Start from the root. */
 796	node = free_vmap_area_root.rb_node;
 797
 798	/* Adjust the search size for alignment overhead. */
 799	length = size + align - 1;
 800
 801	while (node) {
 802		va = rb_entry(node, struct vmap_area, rb_node);
 803
 804		if (get_subtree_max_size(node->rb_left) >= length &&
 805				vstart < va->va_start) {
 806			node = node->rb_left;
 807		} else {
 808			if (is_within_this_va(va, size, align, vstart))
 809				return va;
 810
 811			/*
 812			 * Does not make sense to go deeper towards the right
 813			 * sub-tree if it does not have a free block that is
 814			 * equal or bigger to the requested search length.
 815			 */
 816			if (get_subtree_max_size(node->rb_right) >= length) {
 817				node = node->rb_right;
 818				continue;
 819			}
 820
 821			/*
 822			 * OK. We roll back and find the first right sub-tree,
 823			 * that will satisfy the search criteria. It can happen
 824			 * only once due to "vstart" restriction.
 825			 */
 826			while ((node = rb_parent(node))) {
 827				va = rb_entry(node, struct vmap_area, rb_node);
 828				if (is_within_this_va(va, size, align, vstart))
 829					return va;
 830
 831				if (get_subtree_max_size(node->rb_right) >= length &&
 832						vstart <= va->va_start) {
 833					node = node->rb_right;
 834					break;
 835				}
 836			}
 837		}
 838	}
 839
 840	return NULL;
 841}
 842
 843#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
 844#include <linux/random.h>
 845
 846static struct vmap_area *
 847find_vmap_lowest_linear_match(unsigned long size,
 848	unsigned long align, unsigned long vstart)
 849{
 850	struct vmap_area *va;
 851
 852	list_for_each_entry(va, &free_vmap_area_list, list) {
 853		if (!is_within_this_va(va, size, align, vstart))
 854			continue;
 855
 856		return va;
 857	}
 858
 859	return NULL;
 860}
 861
 862static void
 863find_vmap_lowest_match_check(unsigned long size)
 864{
 865	struct vmap_area *va_1, *va_2;
 866	unsigned long vstart;
 867	unsigned int rnd;
 868
 869	get_random_bytes(&rnd, sizeof(rnd));
 870	vstart = VMALLOC_START + rnd;
 871
 872	va_1 = find_vmap_lowest_match(size, 1, vstart);
 873	va_2 = find_vmap_lowest_linear_match(size, 1, vstart);
 874
 875	if (va_1 != va_2)
 876		pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
 877			va_1, va_2, vstart);
 878}
 879#endif
 880
 881enum fit_type {
 882	NOTHING_FIT = 0,
 883	FL_FIT_TYPE = 1,	/* full fit */
 884	LE_FIT_TYPE = 2,	/* left edge fit */
 885	RE_FIT_TYPE = 3,	/* right edge fit */
 886	NE_FIT_TYPE = 4		/* no edge fit */
 887};
 888
 889static __always_inline enum fit_type
 890classify_va_fit_type(struct vmap_area *va,
 891	unsigned long nva_start_addr, unsigned long size)
 892{
 893	enum fit_type type;
 894
 895	/* Check if it is within VA. */
 896	if (nva_start_addr < va->va_start ||
 897			nva_start_addr + size > va->va_end)
 898		return NOTHING_FIT;
 899
 900	/* Now classify. */
 901	if (va->va_start == nva_start_addr) {
 902		if (va->va_end == nva_start_addr + size)
 903			type = FL_FIT_TYPE;
 904		else
 905			type = LE_FIT_TYPE;
 906	} else if (va->va_end == nva_start_addr + size) {
 907		type = RE_FIT_TYPE;
 908	} else {
 909		type = NE_FIT_TYPE;
 910	}
 911
 912	return type;
 913}
 914
 915static __always_inline int
 916adjust_va_to_fit_type(struct vmap_area *va,
 917	unsigned long nva_start_addr, unsigned long size,
 918	enum fit_type type)
 919{
 920	struct vmap_area *lva = NULL;
 921
 922	if (type == FL_FIT_TYPE) {
 923		/*
 924		 * No need to split VA, it fully fits.
 925		 *
 926		 * |               |
 927		 * V      NVA      V
 928		 * |---------------|
 929		 */
 930		unlink_va(va, &free_vmap_area_root);
 931		kmem_cache_free(vmap_area_cachep, va);
 932	} else if (type == LE_FIT_TYPE) {
 933		/*
 934		 * Split left edge of fit VA.
 935		 *
 936		 * |       |
 937		 * V  NVA  V   R
 938		 * |-------|-------|
 939		 */
 940		va->va_start += size;
 941	} else if (type == RE_FIT_TYPE) {
 942		/*
 943		 * Split right edge of fit VA.
 944		 *
 945		 *         |       |
 946		 *     L   V  NVA  V
 947		 * |-------|-------|
 948		 */
 949		va->va_end = nva_start_addr;
 950	} else if (type == NE_FIT_TYPE) {
 951		/*
 952		 * Split no edge of fit VA.
 953		 *
 954		 *     |       |
 955		 *   L V  NVA  V R
 956		 * |---|-------|---|
 957		 */
 958		lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
 959		if (unlikely(!lva)) {
 960			/*
 961			 * For percpu allocator we do not do any pre-allocation
 962			 * and leave it as it is. The reason is it most likely
 963			 * never ends up with NE_FIT_TYPE splitting. In case of
 964			 * percpu allocations offsets and sizes are aligned to
 965			 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
 966			 * are its main fitting cases.
 967			 *
 968			 * There are a few exceptions though, as an example it is
 969			 * a first allocation (early boot up) when we have "one"
 970			 * big free space that has to be split.
 971			 */
 972			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
 973			if (!lva)
 974				return -1;
 975		}
 976
 977		/*
 978		 * Build the remainder.
 979		 */
 980		lva->va_start = va->va_start;
 981		lva->va_end = nva_start_addr;
 982
 983		/*
 984		 * Shrink this VA to remaining size.
 985		 */
 986		va->va_start = nva_start_addr + size;
 987	} else {
 988		return -1;
 989	}
 990
 991	if (type != FL_FIT_TYPE) {
 992		augment_tree_propagate_from(va);
 993
 994		if (lva)	/* type == NE_FIT_TYPE */
 995			insert_vmap_area_augment(lva, &va->rb_node,
 996				&free_vmap_area_root, &free_vmap_area_list);
 997	}
 998
 999	return 0;
 
 
 
 
 
 
 
1000}
1001
1002/*
1003 * Returns a start address of the newly allocated area, if success.
1004 * Otherwise a vend is returned that indicates failure.
1005 */
1006static __always_inline unsigned long
1007__alloc_vmap_area(unsigned long size, unsigned long align,
1008	unsigned long vstart, unsigned long vend)
1009{
1010	unsigned long nva_start_addr;
1011	struct vmap_area *va;
1012	enum fit_type type;
1013	int ret;
1014
1015	va = find_vmap_lowest_match(size, align, vstart);
1016	if (unlikely(!va))
1017		return vend;
1018
1019	if (va->va_start > vstart)
1020		nva_start_addr = ALIGN(va->va_start, align);
1021	else
1022		nva_start_addr = ALIGN(vstart, align);
1023
1024	/* Check the "vend" restriction. */
1025	if (nva_start_addr + size > vend)
1026		return vend;
1027
1028	/* Classify what we have found. */
1029	type = classify_va_fit_type(va, nva_start_addr, size);
1030	if (WARN_ON_ONCE(type == NOTHING_FIT))
1031		return vend;
1032
1033	/* Update the free vmap_area. */
1034	ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1035	if (ret)
1036		return vend;
1037
1038#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1039	find_vmap_lowest_match_check(size);
1040#endif
1041
1042	return nva_start_addr;
1043}
1044
1045/*
1046 * Allocate a region of KVA of the specified size and alignment, within the
1047 * vstart and vend.
1048 */
1049static struct vmap_area *alloc_vmap_area(unsigned long size,
1050				unsigned long align,
1051				unsigned long vstart, unsigned long vend,
1052				int node, gfp_t gfp_mask)
1053{
1054	struct vmap_area *va, *pva;
 
1055	unsigned long addr;
1056	int purged = 0;
 
1057
1058	BUG_ON(!size);
1059	BUG_ON(offset_in_page(size));
1060	BUG_ON(!is_power_of_2(align));
1061
1062	if (unlikely(!vmap_initialized))
1063		return ERR_PTR(-EBUSY);
1064
1065	might_sleep();
1066
1067	va = kmem_cache_alloc_node(vmap_area_cachep,
1068			gfp_mask & GFP_RECLAIM_MASK, node);
1069	if (unlikely(!va))
1070		return ERR_PTR(-ENOMEM);
1071
1072	/*
1073	 * Only scan the relevant parts containing pointers to other objects
1074	 * to avoid false negatives.
1075	 */
1076	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
1077
1078retry:
 
1079	/*
1080	 * Preload this CPU with one extra vmap_area object to ensure
1081	 * that we have it available when fit type of free area is
1082	 * NE_FIT_TYPE.
1083	 *
1084	 * The preload is done in non-atomic context, thus it allows us
1085	 * to use more permissive allocation masks to be more stable under
1086	 * low memory condition and high memory pressure.
1087	 *
1088	 * Even if it fails we do not really care about that. Just proceed
1089	 * as it is. "overflow" path will refill the cache we allocate from.
1090	 */
1091	preempt_disable();
1092	if (!__this_cpu_read(ne_fit_preload_node)) {
1093		preempt_enable();
1094		pva = kmem_cache_alloc_node(vmap_area_cachep, GFP_KERNEL, node);
1095		preempt_disable();
1096
1097		if (__this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva)) {
1098			if (pva)
1099				kmem_cache_free(vmap_area_cachep, pva);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1100		}
 
 
 
1101	}
1102
1103	spin_lock(&vmap_area_lock);
1104	preempt_enable();
 
 
 
 
 
1105
1106	/*
1107	 * If an allocation fails, the "vend" address is
1108	 * returned. Therefore trigger the overflow path.
1109	 */
1110	addr = __alloc_vmap_area(size, align, vstart, vend);
1111	if (unlikely(addr == vend))
 
 
 
1112		goto overflow;
1113
1114	va->va_start = addr;
1115	va->va_end = addr + size;
1116	va->vm = NULL;
1117	insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1118
1119	spin_unlock(&vmap_area_lock);
1120
1121	BUG_ON(!IS_ALIGNED(va->va_start, align));
1122	BUG_ON(va->va_start < vstart);
1123	BUG_ON(va->va_end > vend);
1124
1125	return va;
1126
1127overflow:
1128	spin_unlock(&vmap_area_lock);
1129	if (!purged) {
1130		purge_vmap_area_lazy();
1131		purged = 1;
1132		goto retry;
1133	}
1134
1135	if (gfpflags_allow_blocking(gfp_mask)) {
1136		unsigned long freed = 0;
1137		blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1138		if (freed > 0) {
1139			purged = 0;
1140			goto retry;
1141		}
1142	}
1143
1144	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1145		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1146			size);
1147
1148	kmem_cache_free(vmap_area_cachep, va);
1149	return ERR_PTR(-EBUSY);
1150}
1151
1152int register_vmap_purge_notifier(struct notifier_block *nb)
1153{
1154	return blocking_notifier_chain_register(&vmap_notify_list, nb);
1155}
1156EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1157
1158int unregister_vmap_purge_notifier(struct notifier_block *nb)
1159{
1160	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1161}
1162EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
 
 
 
 
 
 
 
 
 
 
 
 
 
1163
1164static void __free_vmap_area(struct vmap_area *va)
1165{
1166	/*
1167	 * Remove from the busy tree/list.
 
 
 
1168	 */
1169	unlink_va(va, &vmap_area_root);
 
1170
1171	/*
1172	 * Merge VA with its neighbors, otherwise just add it.
1173	 */
1174	merge_or_add_vmap_area(va,
1175		&free_vmap_area_root, &free_vmap_area_list);
1176}
1177
1178/*
1179 * Free a region of KVA allocated by alloc_vmap_area
1180 */
1181static void free_vmap_area(struct vmap_area *va)
1182{
1183	spin_lock(&vmap_area_lock);
1184	__free_vmap_area(va);
1185	spin_unlock(&vmap_area_lock);
1186}
1187
1188/*
1189 * Clear the pagetable entries of a given vmap_area
1190 */
1191static void unmap_vmap_area(struct vmap_area *va)
1192{
1193	vunmap_page_range(va->va_start, va->va_end);
1194}
1195
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1196/*
1197 * lazy_max_pages is the maximum amount of virtual address space we gather up
1198 * before attempting to purge with a TLB flush.
1199 *
1200 * There is a tradeoff here: a larger number will cover more kernel page tables
1201 * and take slightly longer to purge, but it will linearly reduce the number of
1202 * global TLB flushes that must be performed. It would seem natural to scale
1203 * this number up linearly with the number of CPUs (because vmapping activity
1204 * could also scale linearly with the number of CPUs), however it is likely
1205 * that in practice, workloads might be constrained in other ways that mean
1206 * vmap activity will not scale linearly with CPUs. Also, I want to be
1207 * conservative and not introduce a big latency on huge systems, so go with
1208 * a less aggressive log scale. It will still be an improvement over the old
1209 * code, and it will be simple to change the scale factor if we find that it
1210 * becomes a problem on bigger systems.
1211 */
1212static unsigned long lazy_max_pages(void)
1213{
1214	unsigned int log;
1215
1216	log = fls(num_online_cpus());
1217
1218	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1219}
1220
1221static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1222
1223/*
1224 * Serialize vmap purging.  There is no actual criticial section protected
1225 * by this look, but we want to avoid concurrent calls for performance
1226 * reasons and to make the pcpu_get_vm_areas more deterministic.
1227 */
1228static DEFINE_MUTEX(vmap_purge_lock);
1229
1230/* for per-CPU blocks */
1231static void purge_fragmented_blocks_allcpus(void);
1232
1233/*
1234 * called before a call to iounmap() if the caller wants vm_area_struct's
1235 * immediately freed.
1236 */
1237void set_iounmap_nonlazy(void)
1238{
1239	atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
1240}
1241
1242/*
1243 * Purges all lazily-freed vmap areas.
 
 
 
 
 
 
 
1244 */
1245static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
 
1246{
1247	unsigned long resched_threshold;
1248	struct llist_node *valist;
1249	struct vmap_area *va;
1250	struct vmap_area *n_va;
1251
1252	lockdep_assert_held(&vmap_purge_lock);
1253
1254	valist = llist_del_all(&vmap_purge_list);
1255	if (unlikely(valist == NULL))
1256		return false;
1257
1258	/*
1259	 * First make sure the mappings are removed from all page-tables
1260	 * before they are freed.
1261	 */
1262	vmalloc_sync_all();
1263
1264	/*
1265	 * TODO: to calculate a flush range without looping.
1266	 * The list can be up to lazy_max_pages() elements.
 
1267	 */
1268	llist_for_each_entry(va, valist, purge_list) {
1269		if (va->va_start < start)
1270			start = va->va_start;
1271		if (va->va_end > end)
1272			end = va->va_end;
1273	}
1274
1275	flush_tlb_kernel_range(start, end);
1276	resched_threshold = lazy_max_pages() << 1;
1277
1278	spin_lock(&vmap_area_lock);
1279	llist_for_each_entry_safe(va, n_va, valist, purge_list) {
1280		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
 
 
 
 
 
 
 
 
 
 
 
1281
1282		/*
1283		 * Finally insert or merge lazily-freed area. It is
1284		 * detached and there is no need to "unlink" it from
1285		 * anything.
1286		 */
1287		merge_or_add_vmap_area(va,
1288			&free_vmap_area_root, &free_vmap_area_list);
1289
1290		atomic_long_sub(nr, &vmap_lazy_nr);
 
1291
1292		if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1293			cond_resched_lock(&vmap_area_lock);
 
 
 
1294	}
1295	spin_unlock(&vmap_area_lock);
1296	return true;
1297}
1298
1299/*
1300 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
1301 * is already purging.
1302 */
1303static void try_purge_vmap_area_lazy(void)
1304{
1305	if (mutex_trylock(&vmap_purge_lock)) {
1306		__purge_vmap_area_lazy(ULONG_MAX, 0);
1307		mutex_unlock(&vmap_purge_lock);
1308	}
1309}
1310
1311/*
1312 * Kick off a purge of the outstanding lazy areas.
1313 */
1314static void purge_vmap_area_lazy(void)
1315{
1316	mutex_lock(&vmap_purge_lock);
1317	purge_fragmented_blocks_allcpus();
1318	__purge_vmap_area_lazy(ULONG_MAX, 0);
1319	mutex_unlock(&vmap_purge_lock);
1320}
1321
1322/*
1323 * Free a vmap area, caller ensuring that the area has been unmapped
1324 * and flush_cache_vunmap had been called for the correct range
1325 * previously.
1326 */
1327static void free_vmap_area_noflush(struct vmap_area *va)
1328{
1329	unsigned long nr_lazy;
1330
1331	spin_lock(&vmap_area_lock);
1332	unlink_va(va, &vmap_area_root);
1333	spin_unlock(&vmap_area_lock);
1334
1335	nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1336				PAGE_SHIFT, &vmap_lazy_nr);
1337
1338	/* After this point, we may free va at any time */
1339	llist_add(&va->purge_list, &vmap_purge_list);
1340
1341	if (unlikely(nr_lazy > lazy_max_pages()))
1342		try_purge_vmap_area_lazy();
1343}
1344
1345/*
 
 
 
 
 
 
 
 
 
 
1346 * Free and unmap a vmap area
1347 */
1348static void free_unmap_vmap_area(struct vmap_area *va)
1349{
1350	flush_cache_vunmap(va->va_start, va->va_end);
1351	unmap_vmap_area(va);
1352	if (debug_pagealloc_enabled())
1353		flush_tlb_kernel_range(va->va_start, va->va_end);
1354
1355	free_vmap_area_noflush(va);
1356}
1357
1358static struct vmap_area *find_vmap_area(unsigned long addr)
1359{
1360	struct vmap_area *va;
1361
1362	spin_lock(&vmap_area_lock);
1363	va = __find_vmap_area(addr);
1364	spin_unlock(&vmap_area_lock);
1365
1366	return va;
1367}
1368
 
 
 
 
 
 
 
 
 
 
1369/*** Per cpu kva allocator ***/
1370
1371/*
1372 * vmap space is limited especially on 32 bit architectures. Ensure there is
1373 * room for at least 16 percpu vmap blocks per CPU.
1374 */
1375/*
1376 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1377 * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
1378 * instead (we just need a rough idea)
1379 */
1380#if BITS_PER_LONG == 32
1381#define VMALLOC_SPACE		(128UL*1024*1024)
1382#else
1383#define VMALLOC_SPACE		(128UL*1024*1024*1024)
1384#endif
1385
1386#define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
1387#define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
1388#define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
1389#define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
1390#define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
1391#define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
1392#define VMAP_BBMAP_BITS		\
1393		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
1394		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
1395			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1396
1397#define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
1398
 
 
1399struct vmap_block_queue {
1400	spinlock_t lock;
1401	struct list_head free;
1402};
1403
1404struct vmap_block {
1405	spinlock_t lock;
1406	struct vmap_area *va;
1407	unsigned long free, dirty;
1408	unsigned long dirty_min, dirty_max; /*< dirty range */
1409	struct list_head free_list;
1410	struct rcu_head rcu_head;
1411	struct list_head purge;
1412};
1413
1414/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1415static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1416
1417/*
1418 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
1419 * in the free path. Could get rid of this if we change the API to return a
1420 * "cookie" from alloc, to be passed to free. But no big deal yet.
1421 */
1422static DEFINE_SPINLOCK(vmap_block_tree_lock);
1423static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
1424
1425/*
1426 * We should probably have a fallback mechanism to allocate virtual memory
1427 * out of partially filled vmap blocks. However vmap block sizing should be
1428 * fairly reasonable according to the vmalloc size, so it shouldn't be a
1429 * big problem.
1430 */
1431
1432static unsigned long addr_to_vb_idx(unsigned long addr)
1433{
1434	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1435	addr /= VMAP_BLOCK_SIZE;
1436	return addr;
1437}
1438
1439static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1440{
1441	unsigned long addr;
1442
1443	addr = va_start + (pages_off << PAGE_SHIFT);
1444	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1445	return (void *)addr;
1446}
1447
1448/**
1449 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1450 *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
1451 * @order:    how many 2^order pages should be occupied in newly allocated block
1452 * @gfp_mask: flags for the page level allocator
1453 *
1454 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1455 */
1456static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
1457{
1458	struct vmap_block_queue *vbq;
1459	struct vmap_block *vb;
1460	struct vmap_area *va;
1461	unsigned long vb_idx;
1462	int node, err;
1463	void *vaddr;
1464
1465	node = numa_node_id();
1466
1467	vb = kmalloc_node(sizeof(struct vmap_block),
1468			gfp_mask & GFP_RECLAIM_MASK, node);
1469	if (unlikely(!vb))
1470		return ERR_PTR(-ENOMEM);
1471
1472	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1473					VMALLOC_START, VMALLOC_END,
1474					node, gfp_mask);
1475	if (IS_ERR(va)) {
1476		kfree(vb);
1477		return ERR_CAST(va);
1478	}
1479
1480	err = radix_tree_preload(gfp_mask);
1481	if (unlikely(err)) {
1482		kfree(vb);
1483		free_vmap_area(va);
1484		return ERR_PTR(err);
1485	}
1486
1487	vaddr = vmap_block_vaddr(va->va_start, 0);
1488	spin_lock_init(&vb->lock);
1489	vb->va = va;
1490	/* At least something should be left free */
1491	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
1492	vb->free = VMAP_BBMAP_BITS - (1UL << order);
1493	vb->dirty = 0;
1494	vb->dirty_min = VMAP_BBMAP_BITS;
1495	vb->dirty_max = 0;
1496	INIT_LIST_HEAD(&vb->free_list);
1497
1498	vb_idx = addr_to_vb_idx(va->va_start);
1499	spin_lock(&vmap_block_tree_lock);
1500	err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
1501	spin_unlock(&vmap_block_tree_lock);
1502	BUG_ON(err);
1503	radix_tree_preload_end();
1504
1505	vbq = &get_cpu_var(vmap_block_queue);
1506	spin_lock(&vbq->lock);
1507	list_add_tail_rcu(&vb->free_list, &vbq->free);
1508	spin_unlock(&vbq->lock);
1509	put_cpu_var(vmap_block_queue);
1510
1511	return vaddr;
1512}
1513
1514static void free_vmap_block(struct vmap_block *vb)
1515{
1516	struct vmap_block *tmp;
1517	unsigned long vb_idx;
1518
1519	vb_idx = addr_to_vb_idx(vb->va->va_start);
1520	spin_lock(&vmap_block_tree_lock);
1521	tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
1522	spin_unlock(&vmap_block_tree_lock);
1523	BUG_ON(tmp != vb);
1524
1525	free_vmap_area_noflush(vb->va);
1526	kfree_rcu(vb, rcu_head);
1527}
1528
1529static void purge_fragmented_blocks(int cpu)
1530{
1531	LIST_HEAD(purge);
1532	struct vmap_block *vb;
1533	struct vmap_block *n_vb;
1534	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1535
1536	rcu_read_lock();
1537	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1538
1539		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
1540			continue;
1541
1542		spin_lock(&vb->lock);
1543		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
1544			vb->free = 0; /* prevent further allocs after releasing lock */
1545			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
1546			vb->dirty_min = 0;
1547			vb->dirty_max = VMAP_BBMAP_BITS;
1548			spin_lock(&vbq->lock);
1549			list_del_rcu(&vb->free_list);
1550			spin_unlock(&vbq->lock);
1551			spin_unlock(&vb->lock);
1552			list_add_tail(&vb->purge, &purge);
1553		} else
1554			spin_unlock(&vb->lock);
1555	}
1556	rcu_read_unlock();
1557
1558	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
1559		list_del(&vb->purge);
1560		free_vmap_block(vb);
1561	}
1562}
1563
1564static void purge_fragmented_blocks_allcpus(void)
1565{
1566	int cpu;
1567
1568	for_each_possible_cpu(cpu)
1569		purge_fragmented_blocks(cpu);
1570}
1571
1572static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
1573{
1574	struct vmap_block_queue *vbq;
1575	struct vmap_block *vb;
1576	void *vaddr = NULL;
1577	unsigned int order;
1578
1579	BUG_ON(offset_in_page(size));
1580	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1581	if (WARN_ON(size == 0)) {
1582		/*
1583		 * Allocating 0 bytes isn't what caller wants since
1584		 * get_order(0) returns funny result. Just warn and terminate
1585		 * early.
1586		 */
1587		return NULL;
1588	}
1589	order = get_order(size);
1590
 
1591	rcu_read_lock();
1592	vbq = &get_cpu_var(vmap_block_queue);
1593	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1594		unsigned long pages_off;
1595
1596		spin_lock(&vb->lock);
1597		if (vb->free < (1UL << order)) {
1598			spin_unlock(&vb->lock);
1599			continue;
1600		}
1601
1602		pages_off = VMAP_BBMAP_BITS - vb->free;
1603		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
 
 
1604		vb->free -= 1UL << order;
1605		if (vb->free == 0) {
1606			spin_lock(&vbq->lock);
1607			list_del_rcu(&vb->free_list);
1608			spin_unlock(&vbq->lock);
1609		}
1610
1611		spin_unlock(&vb->lock);
1612		break;
 
 
1613	}
1614
1615	put_cpu_var(vmap_block_queue);
1616	rcu_read_unlock();
1617
1618	/* Allocate new block if nothing was found */
1619	if (!vaddr)
1620		vaddr = new_vmap_block(order, gfp_mask);
 
 
 
1621
1622	return vaddr;
1623}
1624
1625static void vb_free(const void *addr, unsigned long size)
1626{
1627	unsigned long offset;
1628	unsigned long vb_idx;
1629	unsigned int order;
1630	struct vmap_block *vb;
1631
1632	BUG_ON(offset_in_page(size));
1633	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1634
1635	flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
1636
1637	order = get_order(size);
1638
1639	offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
1640	offset >>= PAGE_SHIFT;
1641
1642	vb_idx = addr_to_vb_idx((unsigned long)addr);
1643	rcu_read_lock();
1644	vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
1645	rcu_read_unlock();
1646	BUG_ON(!vb);
1647
1648	vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
1649
1650	if (debug_pagealloc_enabled())
1651		flush_tlb_kernel_range((unsigned long)addr,
1652					(unsigned long)addr + size);
1653
1654	spin_lock(&vb->lock);
1655
1656	/* Expand dirty range */
1657	vb->dirty_min = min(vb->dirty_min, offset);
1658	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
1659
1660	vb->dirty += 1UL << order;
1661	if (vb->dirty == VMAP_BBMAP_BITS) {
1662		BUG_ON(vb->free);
1663		spin_unlock(&vb->lock);
1664		free_vmap_block(vb);
1665	} else
1666		spin_unlock(&vb->lock);
1667}
1668
1669static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
 
 
 
 
 
 
 
 
 
 
 
 
 
1670{
 
1671	int cpu;
 
1672
1673	if (unlikely(!vmap_initialized))
1674		return;
1675
1676	might_sleep();
1677
1678	for_each_possible_cpu(cpu) {
1679		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1680		struct vmap_block *vb;
1681
1682		rcu_read_lock();
1683		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
 
 
1684			spin_lock(&vb->lock);
1685			if (vb->dirty) {
1686				unsigned long va_start = vb->va->va_start;
1687				unsigned long s, e;
1688
1689				s = va_start + (vb->dirty_min << PAGE_SHIFT);
1690				e = va_start + (vb->dirty_max << PAGE_SHIFT);
1691
1692				start = min(s, start);
1693				end   = max(e, end);
1694
 
 
1695				flush = 1;
 
 
 
 
 
1696			}
1697			spin_unlock(&vb->lock);
1698		}
1699		rcu_read_unlock();
1700	}
1701
1702	mutex_lock(&vmap_purge_lock);
1703	purge_fragmented_blocks_allcpus();
1704	if (!__purge_vmap_area_lazy(start, end) && flush)
1705		flush_tlb_kernel_range(start, end);
1706	mutex_unlock(&vmap_purge_lock);
1707}
1708
1709/**
1710 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1711 *
1712 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
1713 * to amortize TLB flushing overheads. What this means is that any page you
1714 * have now, may, in a former life, have been mapped into kernel virtual
1715 * address by the vmap layer and so there might be some CPUs with TLB entries
1716 * still referencing that page (additional to the regular 1:1 kernel mapping).
1717 *
1718 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
1719 * be sure that none of the pages we have control over will have any aliases
1720 * from the vmap layer.
1721 */
1722void vm_unmap_aliases(void)
1723{
1724	unsigned long start = ULONG_MAX, end = 0;
1725	int flush = 0;
1726
1727	_vm_unmap_aliases(start, end, flush);
1728}
1729EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1730
1731/**
1732 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1733 * @mem: the pointer returned by vm_map_ram
1734 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1735 */
1736void vm_unmap_ram(const void *mem, unsigned int count)
1737{
1738	unsigned long size = (unsigned long)count << PAGE_SHIFT;
1739	unsigned long addr = (unsigned long)mem;
1740	struct vmap_area *va;
1741
1742	might_sleep();
1743	BUG_ON(!addr);
1744	BUG_ON(addr < VMALLOC_START);
1745	BUG_ON(addr > VMALLOC_END);
1746	BUG_ON(!PAGE_ALIGNED(addr));
1747
1748	if (likely(count <= VMAP_MAX_ALLOC)) {
1749		debug_check_no_locks_freed(mem, size);
1750		vb_free(mem, size);
1751		return;
1752	}
1753
1754	va = find_vmap_area(addr);
1755	BUG_ON(!va);
1756	debug_check_no_locks_freed((void *)va->va_start,
1757				    (va->va_end - va->va_start));
1758	free_unmap_vmap_area(va);
1759}
1760EXPORT_SYMBOL(vm_unmap_ram);
1761
1762/**
1763 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1764 * @pages: an array of pointers to the pages to be mapped
1765 * @count: number of pages
1766 * @node: prefer to allocate data structures on this node
1767 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
1768 *
1769 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
1770 * faster than vmap so it's good.  But if you mix long-life and short-life
1771 * objects with vm_map_ram(), it could consume lots of address space through
1772 * fragmentation (especially on a 32bit machine).  You could see failures in
1773 * the end.  Please use this function for short-lived objects.
1774 *
1775 * Returns: a pointer to the address that has been mapped, or %NULL on failure
1776 */
1777void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1778{
1779	unsigned long size = (unsigned long)count << PAGE_SHIFT;
1780	unsigned long addr;
1781	void *mem;
1782
1783	if (likely(count <= VMAP_MAX_ALLOC)) {
1784		mem = vb_alloc(size, GFP_KERNEL);
1785		if (IS_ERR(mem))
1786			return NULL;
1787		addr = (unsigned long)mem;
1788	} else {
1789		struct vmap_area *va;
1790		va = alloc_vmap_area(size, PAGE_SIZE,
1791				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1792		if (IS_ERR(va))
1793			return NULL;
1794
1795		addr = va->va_start;
1796		mem = (void *)addr;
1797	}
1798	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
1799		vm_unmap_ram(mem, count);
1800		return NULL;
1801	}
1802	return mem;
1803}
1804EXPORT_SYMBOL(vm_map_ram);
1805
1806static struct vm_struct *vmlist __initdata;
1807
1808/**
1809 * vm_area_add_early - add vmap area early during boot
1810 * @vm: vm_struct to add
1811 *
1812 * This function is used to add fixed kernel vm area to vmlist before
1813 * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
1814 * should contain proper values and the other fields should be zero.
1815 *
1816 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1817 */
1818void __init vm_area_add_early(struct vm_struct *vm)
1819{
1820	struct vm_struct *tmp, **p;
1821
1822	BUG_ON(vmap_initialized);
1823	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1824		if (tmp->addr >= vm->addr) {
1825			BUG_ON(tmp->addr < vm->addr + vm->size);
1826			break;
1827		} else
1828			BUG_ON(tmp->addr + tmp->size > vm->addr);
1829	}
1830	vm->next = *p;
1831	*p = vm;
1832}
1833
1834/**
1835 * vm_area_register_early - register vmap area early during boot
1836 * @vm: vm_struct to register
1837 * @align: requested alignment
1838 *
1839 * This function is used to register kernel vm area before
1840 * vmalloc_init() is called.  @vm->size and @vm->flags should contain
1841 * proper values on entry and other fields should be zero.  On return,
1842 * vm->addr contains the allocated address.
1843 *
1844 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1845 */
1846void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1847{
1848	static size_t vm_init_off __initdata;
1849	unsigned long addr;
1850
1851	addr = ALIGN(VMALLOC_START + vm_init_off, align);
1852	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1853
1854	vm->addr = (void *)addr;
1855
1856	vm_area_add_early(vm);
1857}
1858
1859static void vmap_init_free_space(void)
1860{
1861	unsigned long vmap_start = 1;
1862	const unsigned long vmap_end = ULONG_MAX;
1863	struct vmap_area *busy, *free;
1864
1865	/*
1866	 *     B     F     B     B     B     F
1867	 * -|-----|.....|-----|-----|-----|.....|-
1868	 *  |           The KVA space           |
1869	 *  |<--------------------------------->|
1870	 */
1871	list_for_each_entry(busy, &vmap_area_list, list) {
1872		if (busy->va_start - vmap_start > 0) {
1873			free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1874			if (!WARN_ON_ONCE(!free)) {
1875				free->va_start = vmap_start;
1876				free->va_end = busy->va_start;
1877
1878				insert_vmap_area_augment(free, NULL,
1879					&free_vmap_area_root,
1880						&free_vmap_area_list);
1881			}
1882		}
1883
1884		vmap_start = busy->va_end;
1885	}
1886
1887	if (vmap_end - vmap_start > 0) {
1888		free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1889		if (!WARN_ON_ONCE(!free)) {
1890			free->va_start = vmap_start;
1891			free->va_end = vmap_end;
1892
1893			insert_vmap_area_augment(free, NULL,
1894				&free_vmap_area_root,
1895					&free_vmap_area_list);
1896		}
1897	}
1898}
1899
1900void __init vmalloc_init(void)
1901{
1902	struct vmap_area *va;
1903	struct vm_struct *tmp;
1904	int i;
1905
1906	/*
1907	 * Create the cache for vmap_area objects.
1908	 */
1909	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
1910
1911	for_each_possible_cpu(i) {
1912		struct vmap_block_queue *vbq;
1913		struct vfree_deferred *p;
1914
1915		vbq = &per_cpu(vmap_block_queue, i);
1916		spin_lock_init(&vbq->lock);
1917		INIT_LIST_HEAD(&vbq->free);
1918		p = &per_cpu(vfree_deferred, i);
1919		init_llist_head(&p->list);
1920		INIT_WORK(&p->wq, free_work);
1921	}
1922
1923	/* Import existing vmlist entries. */
1924	for (tmp = vmlist; tmp; tmp = tmp->next) {
1925		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1926		if (WARN_ON_ONCE(!va))
1927			continue;
1928
1929		va->va_start = (unsigned long)tmp->addr;
1930		va->va_end = va->va_start + tmp->size;
1931		va->vm = tmp;
1932		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1933	}
1934
1935	/*
1936	 * Now we can initialize a free vmap space.
1937	 */
1938	vmap_init_free_space();
1939	vmap_initialized = true;
1940}
1941
1942/**
1943 * map_kernel_range_noflush - map kernel VM area with the specified pages
1944 * @addr: start of the VM area to map
1945 * @size: size of the VM area to map
1946 * @prot: page protection flags to use
1947 * @pages: pages to map
1948 *
1949 * Map PFN_UP(@size) pages at @addr.  The VM area @addr and @size
1950 * specify should have been allocated using get_vm_area() and its
1951 * friends.
1952 *
1953 * NOTE:
1954 * This function does NOT do any cache flushing.  The caller is
1955 * responsible for calling flush_cache_vmap() on to-be-mapped areas
1956 * before calling this function.
1957 *
1958 * RETURNS:
1959 * The number of pages mapped on success, -errno on failure.
1960 */
1961int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1962			     pgprot_t prot, struct page **pages)
1963{
1964	return vmap_page_range_noflush(addr, addr + size, prot, pages);
1965}
1966
1967/**
1968 * unmap_kernel_range_noflush - unmap kernel VM area
1969 * @addr: start of the VM area to unmap
1970 * @size: size of the VM area to unmap
1971 *
1972 * Unmap PFN_UP(@size) pages at @addr.  The VM area @addr and @size
1973 * specify should have been allocated using get_vm_area() and its
1974 * friends.
1975 *
1976 * NOTE:
1977 * This function does NOT do any cache flushing.  The caller is
1978 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
1979 * before calling this function and flush_tlb_kernel_range() after.
1980 */
1981void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1982{
1983	vunmap_page_range(addr, addr + size);
1984}
1985EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
1986
1987/**
1988 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1989 * @addr: start of the VM area to unmap
1990 * @size: size of the VM area to unmap
1991 *
1992 * Similar to unmap_kernel_range_noflush() but flushes vcache before
1993 * the unmapping and tlb after.
1994 */
1995void unmap_kernel_range(unsigned long addr, unsigned long size)
1996{
1997	unsigned long end = addr + size;
1998
1999	flush_cache_vunmap(addr, end);
2000	vunmap_page_range(addr, end);
2001	flush_tlb_kernel_range(addr, end);
2002}
2003EXPORT_SYMBOL_GPL(unmap_kernel_range);
2004
2005int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
2006{
2007	unsigned long addr = (unsigned long)area->addr;
2008	unsigned long end = addr + get_vm_area_size(area);
2009	int err;
2010
2011	err = vmap_page_range(addr, end, prot, pages);
 
 
 
 
2012
2013	return err > 0 ? 0 : err;
2014}
2015EXPORT_SYMBOL_GPL(map_vm_area);
2016
2017static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2018			      unsigned long flags, const void *caller)
2019{
2020	spin_lock(&vmap_area_lock);
2021	vm->flags = flags;
2022	vm->addr = (void *)va->va_start;
2023	vm->size = va->va_end - va->va_start;
2024	vm->caller = caller;
2025	va->vm = vm;
 
2026	spin_unlock(&vmap_area_lock);
2027}
2028
2029static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2030{
2031	/*
2032	 * Before removing VM_UNINITIALIZED,
2033	 * we should make sure that vm has proper values.
2034	 * Pair with smp_rmb() in show_numa_info().
2035	 */
2036	smp_wmb();
2037	vm->flags &= ~VM_UNINITIALIZED;
2038}
2039
2040static struct vm_struct *__get_vm_area_node(unsigned long size,
2041		unsigned long align, unsigned long flags, unsigned long start,
2042		unsigned long end, int node, gfp_t gfp_mask, const void *caller)
2043{
2044	struct vmap_area *va;
2045	struct vm_struct *area;
2046
2047	BUG_ON(in_interrupt());
 
 
 
2048	size = PAGE_ALIGN(size);
2049	if (unlikely(!size))
2050		return NULL;
2051
2052	if (flags & VM_IOREMAP)
2053		align = 1ul << clamp_t(int, get_count_order_long(size),
2054				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
2055
2056	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2057	if (unlikely(!area))
2058		return NULL;
2059
2060	if (!(flags & VM_NO_GUARD))
2061		size += PAGE_SIZE;
 
 
2062
2063	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2064	if (IS_ERR(va)) {
2065		kfree(area);
2066		return NULL;
2067	}
2068
2069	setup_vmalloc_vm(area, va, flags, caller);
2070
2071	return area;
2072}
2073
2074struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
2075				unsigned long start, unsigned long end)
2076{
2077	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
2078				  GFP_KERNEL, __builtin_return_address(0));
2079}
2080EXPORT_SYMBOL_GPL(__get_vm_area);
2081
2082struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2083				       unsigned long start, unsigned long end,
2084				       const void *caller)
2085{
2086	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
2087				  GFP_KERNEL, caller);
2088}
2089
2090/**
2091 * get_vm_area - reserve a contiguous kernel virtual area
2092 * @size:	 size of the area
2093 * @flags:	 %VM_IOREMAP for I/O mappings or VM_ALLOC
2094 *
2095 * Search an area of @size in the kernel virtual mapping area,
2096 * and reserved it for out purposes.  Returns the area descriptor
2097 * on success or %NULL on failure.
2098 *
2099 * Return: the area descriptor on success or %NULL on failure.
2100 */
2101struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2102{
2103	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
2104				  NUMA_NO_NODE, GFP_KERNEL,
2105				  __builtin_return_address(0));
2106}
2107
2108struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
2109				const void *caller)
2110{
2111	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
2112				  NUMA_NO_NODE, GFP_KERNEL, caller);
2113}
2114
2115/**
2116 * find_vm_area - find a continuous kernel virtual area
2117 * @addr:	  base address
2118 *
2119 * Search for the kernel VM area starting at @addr, and return it.
2120 * It is up to the caller to do all required locking to keep the returned
2121 * pointer valid.
2122 *
2123 * Return: pointer to the found area or %NULL on faulure
 
 
2124 */
2125struct vm_struct *find_vm_area(const void *addr)
2126{
2127	struct vmap_area *va;
2128
2129	va = find_vmap_area((unsigned long)addr);
2130	if (!va)
2131		return NULL;
2132
2133	return va->vm;
2134}
2135
2136/**
2137 * remove_vm_area - find and remove a continuous kernel virtual area
2138 * @addr:	    base address
2139 *
2140 * Search for the kernel VM area starting at @addr, and remove it.
2141 * This function returns the found VM area, but using it is NOT safe
2142 * on SMP machines, except for its size or flags.
2143 *
2144 * Return: pointer to the found area or %NULL on faulure
 
 
2145 */
2146struct vm_struct *remove_vm_area(const void *addr)
2147{
2148	struct vmap_area *va;
2149
2150	might_sleep();
2151
2152	spin_lock(&vmap_area_lock);
2153	va = __find_vmap_area((unsigned long)addr);
2154	if (va && va->vm) {
2155		struct vm_struct *vm = va->vm;
2156
 
2157		va->vm = NULL;
 
2158		spin_unlock(&vmap_area_lock);
2159
2160		kasan_free_shadow(vm);
2161		free_unmap_vmap_area(va);
 
2162
2163		return vm;
2164	}
2165
2166	spin_unlock(&vmap_area_lock);
2167	return NULL;
2168}
2169
2170static inline void set_area_direct_map(const struct vm_struct *area,
2171				       int (*set_direct_map)(struct page *page))
2172{
2173	int i;
2174
2175	for (i = 0; i < area->nr_pages; i++)
2176		if (page_address(area->pages[i]))
2177			set_direct_map(area->pages[i]);
2178}
2179
2180/* Handle removing and resetting vm mappings related to the vm_struct. */
2181static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2182{
2183	unsigned long start = ULONG_MAX, end = 0;
2184	int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2185	int flush_dmap = 0;
2186	int i;
2187
2188	remove_vm_area(area->addr);
2189
2190	/* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2191	if (!flush_reset)
2192		return;
2193
2194	/*
2195	 * If not deallocating pages, just do the flush of the VM area and
2196	 * return.
2197	 */
2198	if (!deallocate_pages) {
2199		vm_unmap_aliases();
2200		return;
2201	}
2202
2203	/*
2204	 * If execution gets here, flush the vm mapping and reset the direct
2205	 * map. Find the start and end range of the direct mappings to make sure
2206	 * the vm_unmap_aliases() flush includes the direct map.
2207	 */
2208	for (i = 0; i < area->nr_pages; i++) {
2209		unsigned long addr = (unsigned long)page_address(area->pages[i]);
2210		if (addr) {
2211			start = min(addr, start);
2212			end = max(addr + PAGE_SIZE, end);
2213			flush_dmap = 1;
2214		}
2215	}
2216
2217	/*
2218	 * Set direct map to something invalid so that it won't be cached if
2219	 * there are any accesses after the TLB flush, then flush the TLB and
2220	 * reset the direct map permissions to the default.
2221	 */
2222	set_area_direct_map(area, set_direct_map_invalid_noflush);
2223	_vm_unmap_aliases(start, end, flush_dmap);
2224	set_area_direct_map(area, set_direct_map_default_noflush);
2225}
2226
2227static void __vunmap(const void *addr, int deallocate_pages)
2228{
2229	struct vm_struct *area;
2230
2231	if (!addr)
2232		return;
2233
2234	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2235			addr))
2236		return;
2237
2238	area = find_vm_area(addr);
2239	if (unlikely(!area)) {
2240		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2241				addr);
2242		return;
2243	}
2244
2245	debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2246	debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
2247
2248	vm_remove_mappings(area, deallocate_pages);
2249
2250	if (deallocate_pages) {
2251		int i;
2252
2253		for (i = 0; i < area->nr_pages; i++) {
2254			struct page *page = area->pages[i];
2255
2256			BUG_ON(!page);
2257			__free_pages(page, 0);
2258		}
2259		atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
2260
2261		kvfree(area->pages);
 
 
 
2262	}
2263
2264	kfree(area);
2265	return;
2266}
2267
2268static inline void __vfree_deferred(const void *addr)
2269{
2270	/*
2271	 * Use raw_cpu_ptr() because this can be called from preemptible
2272	 * context. Preemption is absolutely fine here, because the llist_add()
2273	 * implementation is lockless, so it works even if we are adding to
2274	 * nother cpu's list.  schedule_work() should be fine with this too.
2275	 */
2276	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2277
2278	if (llist_add((struct llist_node *)addr, &p->list))
2279		schedule_work(&p->wq);
2280}
2281
2282/**
2283 * vfree_atomic - release memory allocated by vmalloc()
2284 * @addr:	  memory base address
2285 *
2286 * This one is just like vfree() but can be called in any atomic context
2287 * except NMIs.
2288 */
2289void vfree_atomic(const void *addr)
2290{
2291	BUG_ON(in_nmi());
2292
2293	kmemleak_free(addr);
2294
2295	if (!addr)
2296		return;
2297	__vfree_deferred(addr);
2298}
2299
2300static void __vfree(const void *addr)
2301{
2302	if (unlikely(in_interrupt()))
2303		__vfree_deferred(addr);
2304	else
2305		__vunmap(addr, 1);
2306}
2307
2308/**
2309 * vfree - release memory allocated by vmalloc()
2310 * @addr:  memory base address
2311 *
2312 * Free the virtually continuous memory area starting at @addr, as
2313 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
2314 * NULL, no operation is performed.
2315 *
2316 * Must not be called in NMI context (strictly speaking, only if we don't
2317 * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2318 * conventions for vfree() arch-depenedent would be a really bad idea)
2319 *
2320 * May sleep if called *not* from interrupt context.
2321 *
2322 * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node)
2323 */
2324void vfree(const void *addr)
2325{
2326	BUG_ON(in_nmi());
2327
2328	kmemleak_free(addr);
2329
2330	might_sleep_if(!in_interrupt());
2331
2332	if (!addr)
2333		return;
2334
2335	__vfree(addr);
 
 
 
 
2336}
2337EXPORT_SYMBOL(vfree);
2338
2339/**
2340 * vunmap - release virtual mapping obtained by vmap()
2341 * @addr:   memory base address
2342 *
2343 * Free the virtually contiguous memory area starting at @addr,
2344 * which was created from the page array passed to vmap().
2345 *
2346 * Must not be called in interrupt context.
2347 */
2348void vunmap(const void *addr)
2349{
2350	BUG_ON(in_interrupt());
2351	might_sleep();
2352	if (addr)
2353		__vunmap(addr, 0);
2354}
2355EXPORT_SYMBOL(vunmap);
2356
2357/**
2358 * vmap - map an array of pages into virtually contiguous space
2359 * @pages: array of page pointers
2360 * @count: number of pages to map
2361 * @flags: vm_area->flags
2362 * @prot: page protection for the mapping
2363 *
2364 * Maps @count pages from @pages into contiguous kernel virtual
2365 * space.
2366 *
2367 * Return: the address of the area or %NULL on failure
 
2368 */
2369void *vmap(struct page **pages, unsigned int count,
2370	   unsigned long flags, pgprot_t prot)
2371{
2372	struct vm_struct *area;
2373	unsigned long size;		/* In bytes */
2374
2375	might_sleep();
2376
2377	if (count > totalram_pages())
2378		return NULL;
2379
2380	size = (unsigned long)count << PAGE_SHIFT;
2381	area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2382	if (!area)
2383		return NULL;
2384
2385	if (map_vm_area(area, prot, pages)) {
2386		vunmap(area->addr);
2387		return NULL;
2388	}
2389
2390	return area->addr;
2391}
2392EXPORT_SYMBOL(vmap);
2393
2394static void *__vmalloc_node(unsigned long size, unsigned long align,
2395			    gfp_t gfp_mask, pgprot_t prot,
2396			    int node, const void *caller);
2397static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2398				 pgprot_t prot, int node)
2399{
 
2400	struct page **pages;
2401	unsigned int nr_pages, array_size, i;
2402	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
2403	const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
2404	const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
2405					0 :
2406					__GFP_HIGHMEM;
2407
2408	nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
2409	array_size = (nr_pages * sizeof(struct page *));
2410
 
2411	/* Please note that the recursion is strictly bounded. */
2412	if (array_size > PAGE_SIZE) {
2413		pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
2414				PAGE_KERNEL, node, area->caller);
 
2415	} else {
2416		pages = kmalloc_node(array_size, nested_gfp, node);
2417	}
2418
2419	if (!pages) {
2420		remove_vm_area(area->addr);
2421		kfree(area);
2422		return NULL;
2423	}
2424
2425	area->pages = pages;
2426	area->nr_pages = nr_pages;
2427
2428	for (i = 0; i < area->nr_pages; i++) {
2429		struct page *page;
 
2430
2431		if (node == NUMA_NO_NODE)
2432			page = alloc_page(alloc_mask|highmem_mask);
2433		else
2434			page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
2435
2436		if (unlikely(!page)) {
2437			/* Successfully allocated i pages, free them in __vunmap() */
2438			area->nr_pages = i;
2439			atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2440			goto fail;
2441		}
2442		area->pages[i] = page;
2443		if (gfpflags_allow_blocking(gfp_mask|highmem_mask))
2444			cond_resched();
2445	}
2446	atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2447
2448	if (map_vm_area(area, prot, pages))
2449		goto fail;
2450	return area->addr;
2451
2452fail:
2453	warn_alloc(gfp_mask, NULL,
2454			  "vmalloc: allocation failure, allocated %ld of %ld bytes",
2455			  (area->nr_pages*PAGE_SIZE), area->size);
2456	__vfree(area->addr);
2457	return NULL;
2458}
2459
2460/**
2461 * __vmalloc_node_range - allocate virtually contiguous memory
2462 * @size:		  allocation size
2463 * @align:		  desired alignment
2464 * @start:		  vm area range start
2465 * @end:		  vm area range end
2466 * @gfp_mask:		  flags for the page level allocator
2467 * @prot:		  protection mask for the allocated pages
2468 * @vm_flags:		  additional vm area flags (e.g. %VM_NO_GUARD)
2469 * @node:		  node to use for allocation or NUMA_NO_NODE
2470 * @caller:		  caller's return address
2471 *
2472 * Allocate enough pages to cover @size from the page level
2473 * allocator with @gfp_mask flags.  Map them into contiguous
2474 * kernel virtual space, using a pagetable protection of @prot.
2475 *
2476 * Return: the address of the area or %NULL on failure
2477 */
2478void *__vmalloc_node_range(unsigned long size, unsigned long align,
2479			unsigned long start, unsigned long end, gfp_t gfp_mask,
2480			pgprot_t prot, unsigned long vm_flags, int node,
2481			const void *caller)
2482{
2483	struct vm_struct *area;
2484	void *addr;
2485	unsigned long real_size = size;
2486
2487	size = PAGE_ALIGN(size);
2488	if (!size || (size >> PAGE_SHIFT) > totalram_pages())
2489		goto fail;
2490
2491	area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
2492				vm_flags, start, end, node, gfp_mask, caller);
2493	if (!area)
2494		goto fail;
2495
2496	addr = __vmalloc_area_node(area, gfp_mask, prot, node);
2497	if (!addr)
2498		return NULL;
2499
2500	/*
2501	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
2502	 * flag. It means that vm_struct is not fully initialized.
2503	 * Now, it is fully initialized, so remove this flag here.
2504	 */
2505	clear_vm_uninitialized_flag(area);
2506
2507	kmemleak_vmalloc(area, size, gfp_mask);
 
 
 
 
 
2508
2509	return addr;
2510
2511fail:
2512	warn_alloc(gfp_mask, NULL,
2513			  "vmalloc: allocation failure: %lu bytes", real_size);
 
2514	return NULL;
2515}
2516
2517/*
2518 * This is only for performance analysis of vmalloc and stress purpose.
2519 * It is required by vmalloc test module, therefore do not use it other
2520 * than that.
2521 */
2522#ifdef CONFIG_TEST_VMALLOC_MODULE
2523EXPORT_SYMBOL_GPL(__vmalloc_node_range);
2524#endif
2525
2526/**
2527 * __vmalloc_node - allocate virtually contiguous memory
2528 * @size:	    allocation size
2529 * @align:	    desired alignment
2530 * @gfp_mask:	    flags for the page level allocator
2531 * @prot:	    protection mask for the allocated pages
2532 * @node:	    node to use for allocation or NUMA_NO_NODE
2533 * @caller:	    caller's return address
2534 *
2535 * Allocate enough pages to cover @size from the page level
2536 * allocator with @gfp_mask flags.  Map them into contiguous
2537 * kernel virtual space, using a pagetable protection of @prot.
2538 *
2539 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
2540 * and __GFP_NOFAIL are not supported
2541 *
2542 * Any use of gfp flags outside of GFP_KERNEL should be consulted
2543 * with mm people.
2544 *
2545 * Return: pointer to the allocated memory or %NULL on error
2546 */
2547static void *__vmalloc_node(unsigned long size, unsigned long align,
2548			    gfp_t gfp_mask, pgprot_t prot,
2549			    int node, const void *caller)
2550{
2551	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
2552				gfp_mask, prot, 0, node, caller);
2553}
2554
2555void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
2556{
2557	return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
2558				__builtin_return_address(0));
2559}
2560EXPORT_SYMBOL(__vmalloc);
2561
2562static inline void *__vmalloc_node_flags(unsigned long size,
2563					int node, gfp_t flags)
2564{
2565	return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
2566					node, __builtin_return_address(0));
2567}
2568
2569
2570void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags,
2571				  void *caller)
2572{
2573	return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, caller);
2574}
2575
2576/**
2577 * vmalloc - allocate virtually contiguous memory
2578 * @size:    allocation size
2579 *
2580 * Allocate enough pages to cover @size from the page level
2581 * allocator and map them into contiguous kernel virtual space.
2582 *
2583 * For tight control over page level allocator and protection flags
2584 * use __vmalloc() instead.
2585 *
2586 * Return: pointer to the allocated memory or %NULL on error
 
2587 */
2588void *vmalloc(unsigned long size)
2589{
2590	return __vmalloc_node_flags(size, NUMA_NO_NODE,
2591				    GFP_KERNEL);
2592}
2593EXPORT_SYMBOL(vmalloc);
2594
2595/**
2596 * vzalloc - allocate virtually contiguous memory with zero fill
2597 * @size:    allocation size
 
 
 
2598 *
2599 * Allocate enough pages to cover @size from the page level
2600 * allocator and map them into contiguous kernel virtual space.
2601 * The memory allocated is set to zero.
2602 *
2603 * For tight control over page level allocator and protection flags
2604 * use __vmalloc() instead.
2605 *
2606 * Return: pointer to the allocated memory or %NULL on error
2607 */
2608void *vzalloc(unsigned long size)
2609{
2610	return __vmalloc_node_flags(size, NUMA_NO_NODE,
2611				GFP_KERNEL | __GFP_ZERO);
2612}
2613EXPORT_SYMBOL(vzalloc);
2614
2615/**
2616 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
2617 * @size: allocation size
2618 *
2619 * The resulting memory area is zeroed so it can be mapped to userspace
2620 * without leaking data.
2621 *
2622 * Return: pointer to the allocated memory or %NULL on error
2623 */
2624void *vmalloc_user(unsigned long size)
2625{
2626	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
2627				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
2628				    VM_USERMAP, NUMA_NO_NODE,
2629				    __builtin_return_address(0));
 
 
 
 
 
 
 
 
2630}
2631EXPORT_SYMBOL(vmalloc_user);
2632
2633/**
2634 * vmalloc_node - allocate memory on a specific node
2635 * @size:	  allocation size
2636 * @node:	  numa node
2637 *
2638 * Allocate enough pages to cover @size from the page level
2639 * allocator and map them into contiguous kernel virtual space.
2640 *
2641 * For tight control over page level allocator and protection flags
2642 * use __vmalloc() instead.
2643 *
2644 * Return: pointer to the allocated memory or %NULL on error
 
2645 */
2646void *vmalloc_node(unsigned long size, int node)
2647{
2648	return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL,
2649					node, __builtin_return_address(0));
2650}
2651EXPORT_SYMBOL(vmalloc_node);
2652
2653/**
2654 * vzalloc_node - allocate memory on a specific node with zero fill
2655 * @size:	allocation size
2656 * @node:	numa node
2657 *
2658 * Allocate enough pages to cover @size from the page level
2659 * allocator and map them into contiguous kernel virtual space.
2660 * The memory allocated is set to zero.
2661 *
2662 * For tight control over page level allocator and protection flags
2663 * use __vmalloc_node() instead.
2664 *
2665 * Return: pointer to the allocated memory or %NULL on error
2666 */
2667void *vzalloc_node(unsigned long size, int node)
2668{
2669	return __vmalloc_node_flags(size, node,
2670			 GFP_KERNEL | __GFP_ZERO);
2671}
2672EXPORT_SYMBOL(vzalloc_node);
2673
 
 
 
 
2674/**
2675 * vmalloc_exec - allocate virtually contiguous, executable memory
2676 * @size:	  allocation size
2677 *
2678 * Kernel-internal function to allocate enough pages to cover @size
2679 * the page level allocator and map them into contiguous and
2680 * executable kernel virtual space.
2681 *
2682 * For tight control over page level allocator and protection flags
2683 * use __vmalloc() instead.
2684 *
2685 * Return: pointer to the allocated memory or %NULL on error
2686 */
 
2687void *vmalloc_exec(unsigned long size)
2688{
2689	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
2690			GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
2691			NUMA_NO_NODE, __builtin_return_address(0));
2692}
2693
2694#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
2695#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
2696#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
2697#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
2698#else
2699/*
2700 * 64b systems should always have either DMA or DMA32 zones. For others
2701 * GFP_DMA32 should do the right thing and use the normal zone.
2702 */
2703#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
2704#endif
2705
2706/**
2707 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
2708 * @size:	allocation size
2709 *
2710 * Allocate enough 32bit PA addressable pages to cover @size from the
2711 * page level allocator and map them into contiguous kernel virtual space.
2712 *
2713 * Return: pointer to the allocated memory or %NULL on error
 
2714 */
2715void *vmalloc_32(unsigned long size)
2716{
2717	return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
2718			      NUMA_NO_NODE, __builtin_return_address(0));
2719}
2720EXPORT_SYMBOL(vmalloc_32);
2721
2722/**
2723 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
2724 * @size:	     allocation size
2725 *
2726 * The resulting memory area is 32bit addressable and zeroed so it can be
2727 * mapped to userspace without leaking data.
2728 *
2729 * Return: pointer to the allocated memory or %NULL on error
2730 */
2731void *vmalloc_32_user(unsigned long size)
2732{
2733	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
2734				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
2735				    VM_USERMAP, NUMA_NO_NODE,
2736				    __builtin_return_address(0));
 
 
 
 
 
 
2737}
2738EXPORT_SYMBOL(vmalloc_32_user);
2739
2740/*
2741 * small helper routine , copy contents to buf from addr.
2742 * If the page is not present, fill zero.
2743 */
2744
2745static int aligned_vread(char *buf, char *addr, unsigned long count)
2746{
2747	struct page *p;
2748	int copied = 0;
2749
2750	while (count) {
2751		unsigned long offset, length;
2752
2753		offset = offset_in_page(addr);
2754		length = PAGE_SIZE - offset;
2755		if (length > count)
2756			length = count;
2757		p = vmalloc_to_page(addr);
2758		/*
2759		 * To do safe access to this _mapped_ area, we need
2760		 * lock. But adding lock here means that we need to add
2761		 * overhead of vmalloc()/vfree() calles for this _debug_
2762		 * interface, rarely used. Instead of that, we'll use
2763		 * kmap() and get small overhead in this access function.
2764		 */
2765		if (p) {
2766			/*
2767			 * we can expect USER0 is not used (see vread/vwrite's
2768			 * function description)
2769			 */
2770			void *map = kmap_atomic(p);
2771			memcpy(buf, map + offset, length);
2772			kunmap_atomic(map);
2773		} else
2774			memset(buf, 0, length);
2775
2776		addr += length;
2777		buf += length;
2778		copied += length;
2779		count -= length;
2780	}
2781	return copied;
2782}
2783
2784static int aligned_vwrite(char *buf, char *addr, unsigned long count)
2785{
2786	struct page *p;
2787	int copied = 0;
2788
2789	while (count) {
2790		unsigned long offset, length;
2791
2792		offset = offset_in_page(addr);
2793		length = PAGE_SIZE - offset;
2794		if (length > count)
2795			length = count;
2796		p = vmalloc_to_page(addr);
2797		/*
2798		 * To do safe access to this _mapped_ area, we need
2799		 * lock. But adding lock here means that we need to add
2800		 * overhead of vmalloc()/vfree() calles for this _debug_
2801		 * interface, rarely used. Instead of that, we'll use
2802		 * kmap() and get small overhead in this access function.
2803		 */
2804		if (p) {
2805			/*
2806			 * we can expect USER0 is not used (see vread/vwrite's
2807			 * function description)
2808			 */
2809			void *map = kmap_atomic(p);
2810			memcpy(map + offset, buf, length);
2811			kunmap_atomic(map);
2812		}
2813		addr += length;
2814		buf += length;
2815		copied += length;
2816		count -= length;
2817	}
2818	return copied;
2819}
2820
2821/**
2822 * vread() - read vmalloc area in a safe way.
2823 * @buf:     buffer for reading data
2824 * @addr:    vm address.
2825 * @count:   number of bytes to be read.
2826 *
2827 * This function checks that addr is a valid vmalloc'ed area, and
2828 * copy data from that area to a given buffer. If the given memory range
2829 * of [addr...addr+count) includes some valid address, data is copied to
2830 * proper area of @buf. If there are memory holes, they'll be zero-filled.
2831 * IOREMAP area is treated as memory hole and no copy is done.
2832 *
2833 * If [addr...addr+count) doesn't includes any intersects with alive
2834 * vm_struct area, returns 0. @buf should be kernel's buffer.
2835 *
2836 * Note: In usual ops, vread() is never necessary because the caller
2837 * should know vmalloc() area is valid and can use memcpy().
2838 * This is for routines which have to access vmalloc area without
2839 * any information, as /dev/kmem.
2840 *
2841 * Return: number of bytes for which addr and buf should be increased
2842 * (same number as @count) or %0 if [addr...addr+count) doesn't
2843 * include any intersection with valid vmalloc area
 
2844 */
 
2845long vread(char *buf, char *addr, unsigned long count)
2846{
2847	struct vmap_area *va;
2848	struct vm_struct *vm;
2849	char *vaddr, *buf_start = buf;
2850	unsigned long buflen = count;
2851	unsigned long n;
2852
2853	/* Don't allow overflow */
2854	if ((unsigned long) addr + count < count)
2855		count = -(unsigned long) addr;
2856
2857	spin_lock(&vmap_area_lock);
2858	list_for_each_entry(va, &vmap_area_list, list) {
2859		if (!count)
2860			break;
2861
2862		if (!va->vm)
2863			continue;
2864
2865		vm = va->vm;
2866		vaddr = (char *) vm->addr;
2867		if (addr >= vaddr + get_vm_area_size(vm))
2868			continue;
2869		while (addr < vaddr) {
2870			if (count == 0)
2871				goto finished;
2872			*buf = '\0';
2873			buf++;
2874			addr++;
2875			count--;
2876		}
2877		n = vaddr + get_vm_area_size(vm) - addr;
2878		if (n > count)
2879			n = count;
2880		if (!(vm->flags & VM_IOREMAP))
2881			aligned_vread(buf, addr, n);
2882		else /* IOREMAP area is treated as memory hole */
2883			memset(buf, 0, n);
2884		buf += n;
2885		addr += n;
2886		count -= n;
2887	}
2888finished:
2889	spin_unlock(&vmap_area_lock);
2890
2891	if (buf == buf_start)
2892		return 0;
2893	/* zero-fill memory holes */
2894	if (buf != buf_start + buflen)
2895		memset(buf, 0, buflen - (buf - buf_start));
2896
2897	return buflen;
2898}
2899
2900/**
2901 * vwrite() - write vmalloc area in a safe way.
2902 * @buf:      buffer for source data
2903 * @addr:     vm address.
2904 * @count:    number of bytes to be read.
2905 *
2906 * This function checks that addr is a valid vmalloc'ed area, and
2907 * copy data from a buffer to the given addr. If specified range of
2908 * [addr...addr+count) includes some valid address, data is copied from
2909 * proper area of @buf. If there are memory holes, no copy to hole.
2910 * IOREMAP area is treated as memory hole and no copy is done.
2911 *
2912 * If [addr...addr+count) doesn't includes any intersects with alive
2913 * vm_struct area, returns 0. @buf should be kernel's buffer.
2914 *
2915 * Note: In usual ops, vwrite() is never necessary because the caller
2916 * should know vmalloc() area is valid and can use memcpy().
2917 * This is for routines which have to access vmalloc area without
2918 * any information, as /dev/kmem.
2919 *
2920 * Return: number of bytes for which addr and buf should be
2921 * increased (same number as @count) or %0 if [addr...addr+count)
2922 * doesn't include any intersection with valid vmalloc area
 
2923 */
 
2924long vwrite(char *buf, char *addr, unsigned long count)
2925{
2926	struct vmap_area *va;
2927	struct vm_struct *vm;
2928	char *vaddr;
2929	unsigned long n, buflen;
2930	int copied = 0;
2931
2932	/* Don't allow overflow */
2933	if ((unsigned long) addr + count < count)
2934		count = -(unsigned long) addr;
2935	buflen = count;
2936
2937	spin_lock(&vmap_area_lock);
2938	list_for_each_entry(va, &vmap_area_list, list) {
2939		if (!count)
2940			break;
2941
2942		if (!va->vm)
2943			continue;
2944
2945		vm = va->vm;
2946		vaddr = (char *) vm->addr;
2947		if (addr >= vaddr + get_vm_area_size(vm))
2948			continue;
2949		while (addr < vaddr) {
2950			if (count == 0)
2951				goto finished;
2952			buf++;
2953			addr++;
2954			count--;
2955		}
2956		n = vaddr + get_vm_area_size(vm) - addr;
2957		if (n > count)
2958			n = count;
2959		if (!(vm->flags & VM_IOREMAP)) {
2960			aligned_vwrite(buf, addr, n);
2961			copied++;
2962		}
2963		buf += n;
2964		addr += n;
2965		count -= n;
2966	}
2967finished:
2968	spin_unlock(&vmap_area_lock);
2969	if (!copied)
2970		return 0;
2971	return buflen;
2972}
2973
2974/**
2975 * remap_vmalloc_range_partial - map vmalloc pages to userspace
2976 * @vma:		vma to cover
2977 * @uaddr:		target user address to start at
2978 * @kaddr:		virtual address of vmalloc kernel memory
2979 * @size:		size of map area
2980 *
2981 * Returns:	0 for success, -Exxx on failure
2982 *
2983 * This function checks that @kaddr is a valid vmalloc'ed area,
2984 * and that it is big enough to cover the range starting at
2985 * @uaddr in @vma. Will return failure if that criteria isn't
2986 * met.
2987 *
2988 * Similar to remap_pfn_range() (see mm/memory.c)
2989 */
2990int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2991				void *kaddr, unsigned long size)
2992{
2993	struct vm_struct *area;
2994
2995	size = PAGE_ALIGN(size);
2996
2997	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2998		return -EINVAL;
2999
3000	area = find_vm_area(kaddr);
3001	if (!area)
3002		return -EINVAL;
3003
3004	if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
3005		return -EINVAL;
3006
3007	if (kaddr + size > area->addr + get_vm_area_size(area))
3008		return -EINVAL;
3009
3010	do {
3011		struct page *page = vmalloc_to_page(kaddr);
3012		int ret;
3013
3014		ret = vm_insert_page(vma, uaddr, page);
3015		if (ret)
3016			return ret;
3017
3018		uaddr += PAGE_SIZE;
3019		kaddr += PAGE_SIZE;
3020		size -= PAGE_SIZE;
3021	} while (size > 0);
3022
3023	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3024
3025	return 0;
3026}
3027EXPORT_SYMBOL(remap_vmalloc_range_partial);
3028
3029/**
3030 * remap_vmalloc_range - map vmalloc pages to userspace
3031 * @vma:		vma to cover (map full range of vma)
3032 * @addr:		vmalloc memory
3033 * @pgoff:		number of pages into addr before first page to map
3034 *
3035 * Returns:	0 for success, -Exxx on failure
3036 *
3037 * This function checks that addr is a valid vmalloc'ed area, and
3038 * that it is big enough to cover the vma. Will return failure if
3039 * that criteria isn't met.
3040 *
3041 * Similar to remap_pfn_range() (see mm/memory.c)
3042 */
3043int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3044						unsigned long pgoff)
3045{
3046	return remap_vmalloc_range_partial(vma, vma->vm_start,
3047					   addr + (pgoff << PAGE_SHIFT),
3048					   vma->vm_end - vma->vm_start);
3049}
3050EXPORT_SYMBOL(remap_vmalloc_range);
3051
3052/*
3053 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
3054 * have one.
3055 *
3056 * The purpose of this function is to make sure the vmalloc area
3057 * mappings are identical in all page-tables in the system.
3058 */
3059void __weak vmalloc_sync_all(void)
3060{
3061}
3062
3063
3064static int f(pte_t *pte, unsigned long addr, void *data)
3065{
3066	pte_t ***p = data;
3067
3068	if (p) {
3069		*(*p) = pte;
3070		(*p)++;
3071	}
3072	return 0;
3073}
3074
3075/**
3076 * alloc_vm_area - allocate a range of kernel address space
3077 * @size:	   size of the area
3078 * @ptes:	   returns the PTEs for the address space
3079 *
3080 * Returns:	NULL on failure, vm_struct on success
3081 *
3082 * This function reserves a range of kernel address space, and
3083 * allocates pagetables to map that range.  No actual mappings
3084 * are created.
3085 *
3086 * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
3087 * allocated for the VM area are returned.
3088 */
3089struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
3090{
3091	struct vm_struct *area;
3092
3093	area = get_vm_area_caller(size, VM_IOREMAP,
3094				__builtin_return_address(0));
3095	if (area == NULL)
3096		return NULL;
3097
3098	/*
3099	 * This ensures that page tables are constructed for this region
3100	 * of kernel virtual address space and mapped into init_mm.
3101	 */
3102	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
3103				size, f, ptes ? &ptes : NULL)) {
3104		free_vm_area(area);
3105		return NULL;
3106	}
3107
3108	return area;
3109}
3110EXPORT_SYMBOL_GPL(alloc_vm_area);
3111
3112void free_vm_area(struct vm_struct *area)
3113{
3114	struct vm_struct *ret;
3115	ret = remove_vm_area(area->addr);
3116	BUG_ON(ret != area);
3117	kfree(area);
3118}
3119EXPORT_SYMBOL_GPL(free_vm_area);
3120
3121#ifdef CONFIG_SMP
3122static struct vmap_area *node_to_va(struct rb_node *n)
3123{
3124	return rb_entry_safe(n, struct vmap_area, rb_node);
3125}
3126
3127/**
3128 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3129 * @addr: target address
 
 
 
 
 
3130 *
3131 * Returns: vmap_area if it is found. If there is no such area
3132 *   the first highest(reverse order) vmap_area is returned
3133 *   i.e. va->va_start < addr && va->va_end < addr or NULL
3134 *   if there are no any areas before @addr.
3135 */
3136static struct vmap_area *
3137pvm_find_va_enclose_addr(unsigned long addr)
 
3138{
3139	struct vmap_area *va, *tmp;
3140	struct rb_node *n;
3141
3142	n = free_vmap_area_root.rb_node;
3143	va = NULL;
3144
3145	while (n) {
3146		tmp = rb_entry(n, struct vmap_area, rb_node);
3147		if (tmp->va_start <= addr) {
3148			va = tmp;
3149			if (tmp->va_end >= addr)
3150				break;
3151
3152			n = n->rb_right;
3153		} else {
3154			n = n->rb_left;
3155		}
 
 
 
3156	}
3157
3158	return va;
 
 
 
 
 
 
 
 
 
 
3159}
3160
3161/**
3162 * pvm_determine_end_from_reverse - find the highest aligned address
3163 * of free block below VMALLOC_END
3164 * @va:
3165 *   in - the VA we start the search(reverse order);
3166 *   out - the VA with the highest aligned end address.
3167 *
3168 * Returns: determined end address within vmap_area
3169 */
3170static unsigned long
3171pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
 
 
 
 
 
 
 
 
3172{
3173	unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3174	unsigned long addr;
3175
3176	if (likely(*va)) {
3177		list_for_each_entry_from_reverse((*va),
3178				&free_vmap_area_list, list) {
3179			addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3180			if ((*va)->va_start < addr)
3181				return addr;
3182		}
 
3183	}
3184
3185	return 0;
3186}
3187
3188/**
3189 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3190 * @offsets: array containing offset of each area
3191 * @sizes: array containing size of each area
3192 * @nr_vms: the number of areas to allocate
3193 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
3194 *
3195 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3196 *	    vm_structs on success, %NULL on failure
3197 *
3198 * Percpu allocator wants to use congruent vm areas so that it can
3199 * maintain the offsets among percpu areas.  This function allocates
3200 * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
3201 * be scattered pretty far, distance between two areas easily going up
3202 * to gigabytes.  To avoid interacting with regular vmallocs, these
3203 * areas are allocated from top.
3204 *
3205 * Despite its complicated look, this allocator is rather simple. It
3206 * does everything top-down and scans free blocks from the end looking
3207 * for matching base. While scanning, if any of the areas do not fit the
3208 * base address is pulled down to fit the area. Scanning is repeated till
3209 * all the areas fit and then all necessary data structures are inserted
3210 * and the result is returned.
3211 */
3212struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3213				     const size_t *sizes, int nr_vms,
3214				     size_t align)
3215{
3216	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3217	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3218	struct vmap_area **vas, *va;
3219	struct vm_struct **vms;
3220	int area, area2, last_area, term_area;
3221	unsigned long base, start, size, end, last_end;
3222	bool purged = false;
3223	enum fit_type type;
3224
3225	/* verify parameters and allocate data structures */
3226	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
3227	for (last_area = 0, area = 0; area < nr_vms; area++) {
3228		start = offsets[area];
3229		end = start + sizes[area];
3230
3231		/* is everything aligned properly? */
3232		BUG_ON(!IS_ALIGNED(offsets[area], align));
3233		BUG_ON(!IS_ALIGNED(sizes[area], align));
3234
3235		/* detect the area with the highest address */
3236		if (start > offsets[last_area])
3237			last_area = area;
3238
3239		for (area2 = area + 1; area2 < nr_vms; area2++) {
3240			unsigned long start2 = offsets[area2];
3241			unsigned long end2 = start2 + sizes[area2];
3242
3243			BUG_ON(start2 < end && start < end2);
 
 
 
 
3244		}
3245	}
3246	last_end = offsets[last_area] + sizes[last_area];
3247
3248	if (vmalloc_end - vmalloc_start < last_end) {
3249		WARN_ON(true);
3250		return NULL;
3251	}
3252
3253	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3254	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
3255	if (!vas || !vms)
3256		goto err_free2;
3257
3258	for (area = 0; area < nr_vms; area++) {
3259		vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
3260		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
3261		if (!vas[area] || !vms[area])
3262			goto err_free;
3263	}
3264retry:
3265	spin_lock(&vmap_area_lock);
3266
3267	/* start scanning - we scan from the top, begin with the last area */
3268	area = term_area = last_area;
3269	start = offsets[area];
3270	end = start + sizes[area];
3271
3272	va = pvm_find_va_enclose_addr(vmalloc_end);
3273	base = pvm_determine_end_from_reverse(&va, align) - end;
 
 
 
3274
3275	while (true) {
 
 
 
3276		/*
3277		 * base might have underflowed, add last_end before
3278		 * comparing.
3279		 */
3280		if (base + last_end < vmalloc_start + last_end)
3281			goto overflow;
3282
3283		/*
3284		 * Fitting base has not been found.
3285		 */
3286		if (va == NULL)
3287			goto overflow;
 
3288
3289		/*
3290		 * If required width exeeds current VA block, move
3291		 * base downwards and then recheck.
3292		 */
3293		if (base + end > va->va_end) {
3294			base = pvm_determine_end_from_reverse(&va, align) - end;
3295			term_area = area;
3296			continue;
3297		}
3298
3299		/*
3300		 * If this VA does not fit, move base downwards and recheck.
 
 
3301		 */
3302		if (base + start < va->va_start) {
3303			va = node_to_va(rb_prev(&va->rb_node));
3304			base = pvm_determine_end_from_reverse(&va, align) - end;
 
3305			term_area = area;
3306			continue;
3307		}
3308
3309		/*
3310		 * This area fits, move on to the previous one.  If
3311		 * the previous one is the terminal one, we're done.
3312		 */
3313		area = (area + nr_vms - 1) % nr_vms;
3314		if (area == term_area)
3315			break;
3316
3317		start = offsets[area];
3318		end = start + sizes[area];
3319		va = pvm_find_va_enclose_addr(base + end);
3320	}
3321
3322	/* we've found a fitting base, insert all va's */
3323	for (area = 0; area < nr_vms; area++) {
3324		int ret;
3325
3326		start = base + offsets[area];
3327		size = sizes[area];
3328
3329		va = pvm_find_va_enclose_addr(start);
3330		if (WARN_ON_ONCE(va == NULL))
3331			/* It is a BUG(), but trigger recovery instead. */
3332			goto recovery;
3333
3334		type = classify_va_fit_type(va, start, size);
3335		if (WARN_ON_ONCE(type == NOTHING_FIT))
3336			/* It is a BUG(), but trigger recovery instead. */
3337			goto recovery;
3338
3339		ret = adjust_va_to_fit_type(va, start, size, type);
3340		if (unlikely(ret))
3341			goto recovery;
3342
3343		/* Allocated area. */
3344		va = vas[area];
3345		va->va_start = start;
3346		va->va_end = start + size;
3347
3348		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
3349	}
3350
 
 
3351	spin_unlock(&vmap_area_lock);
3352
3353	/* insert all vm's */
3354	for (area = 0; area < nr_vms; area++)
3355		setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
3356				 pcpu_get_vm_areas);
3357
3358	kfree(vas);
3359	return vms;
3360
3361recovery:
3362	/* Remove previously inserted areas. */
3363	while (area--) {
3364		__free_vmap_area(vas[area]);
3365		vas[area] = NULL;
3366	}
3367
3368overflow:
3369	spin_unlock(&vmap_area_lock);
3370	if (!purged) {
3371		purge_vmap_area_lazy();
3372		purged = true;
3373
3374		/* Before "retry", check if we recover. */
3375		for (area = 0; area < nr_vms; area++) {
3376			if (vas[area])
3377				continue;
3378
3379			vas[area] = kmem_cache_zalloc(
3380				vmap_area_cachep, GFP_KERNEL);
3381			if (!vas[area])
3382				goto err_free;
3383		}
3384
3385		goto retry;
3386	}
3387
3388err_free:
3389	for (area = 0; area < nr_vms; area++) {
3390		if (vas[area])
3391			kmem_cache_free(vmap_area_cachep, vas[area]);
3392
3393		kfree(vms[area]);
3394	}
3395err_free2:
3396	kfree(vas);
3397	kfree(vms);
3398	return NULL;
3399}
3400
3401/**
3402 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3403 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3404 * @nr_vms: the number of allocated areas
3405 *
3406 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3407 */
3408void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
3409{
3410	int i;
3411
3412	for (i = 0; i < nr_vms; i++)
3413		free_vm_area(vms[i]);
3414	kfree(vms);
3415}
3416#endif	/* CONFIG_SMP */
3417
3418#ifdef CONFIG_PROC_FS
3419static void *s_start(struct seq_file *m, loff_t *pos)
3420	__acquires(&vmap_area_lock)
3421{
 
 
 
3422	spin_lock(&vmap_area_lock);
3423	return seq_list_start(&vmap_area_list, *pos);
 
 
 
 
 
 
 
 
 
3424}
3425
3426static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3427{
3428	return seq_list_next(p, &vmap_area_list, pos);
 
 
 
 
 
 
 
3429}
3430
3431static void s_stop(struct seq_file *m, void *p)
3432	__releases(&vmap_area_lock)
3433{
3434	spin_unlock(&vmap_area_lock);
3435}
3436
3437static void show_numa_info(struct seq_file *m, struct vm_struct *v)
3438{
3439	if (IS_ENABLED(CONFIG_NUMA)) {
3440		unsigned int nr, *counters = m->private;
3441
3442		if (!counters)
3443			return;
3444
3445		if (v->flags & VM_UNINITIALIZED)
3446			return;
3447		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3448		smp_rmb();
 
 
3449
3450		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
3451
3452		for (nr = 0; nr < v->nr_pages; nr++)
3453			counters[page_to_nid(v->pages[nr])]++;
3454
3455		for_each_node_state(nr, N_HIGH_MEMORY)
3456			if (counters[nr])
3457				seq_printf(m, " N%u=%u", nr, counters[nr]);
3458	}
3459}
3460
3461static void show_purge_info(struct seq_file *m)
3462{
3463	struct llist_node *head;
3464	struct vmap_area *va;
3465
3466	head = READ_ONCE(vmap_purge_list.first);
3467	if (head == NULL)
3468		return;
3469
3470	llist_for_each_entry(va, head, purge_list) {
3471		seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
3472			(void *)va->va_start, (void *)va->va_end,
3473			va->va_end - va->va_start);
3474	}
3475}
3476
3477static int s_show(struct seq_file *m, void *p)
3478{
3479	struct vmap_area *va;
3480	struct vm_struct *v;
3481
3482	va = list_entry(p, struct vmap_area, list);
3483
3484	/*
3485	 * s_show can encounter race with remove_vm_area, !vm on behalf
3486	 * of vmap area is being tear down or vm_map_ram allocation.
3487	 */
3488	if (!va->vm) {
3489		seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
3490			(void *)va->va_start, (void *)va->va_end,
3491			va->va_end - va->va_start);
3492
3493		return 0;
3494	}
3495
3496	v = va->vm;
3497
3498	seq_printf(m, "0x%pK-0x%pK %7ld",
3499		v->addr, v->addr + v->size, v->size);
3500
3501	if (v->caller)
3502		seq_printf(m, " %pS", v->caller);
3503
3504	if (v->nr_pages)
3505		seq_printf(m, " pages=%d", v->nr_pages);
3506
3507	if (v->phys_addr)
3508		seq_printf(m, " phys=%pa", &v->phys_addr);
3509
3510	if (v->flags & VM_IOREMAP)
3511		seq_puts(m, " ioremap");
3512
3513	if (v->flags & VM_ALLOC)
3514		seq_puts(m, " vmalloc");
3515
3516	if (v->flags & VM_MAP)
3517		seq_puts(m, " vmap");
3518
3519	if (v->flags & VM_USERMAP)
3520		seq_puts(m, " user");
3521
3522	if (v->flags & VM_DMA_COHERENT)
3523		seq_puts(m, " dma-coherent");
3524
3525	if (is_vmalloc_addr(v->pages))
3526		seq_puts(m, " vpages");
3527
3528	show_numa_info(m, v);
3529	seq_putc(m, '\n');
3530
3531	/*
3532	 * As a final step, dump "unpurged" areas. Note,
3533	 * that entire "/proc/vmallocinfo" output will not
3534	 * be address sorted, because the purge list is not
3535	 * sorted.
3536	 */
3537	if (list_is_last(&va->list, &vmap_area_list))
3538		show_purge_info(m);
3539
3540	return 0;
3541}
3542
3543static const struct seq_operations vmalloc_op = {
3544	.start = s_start,
3545	.next = s_next,
3546	.stop = s_stop,
3547	.show = s_show,
3548};
3549
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3550static int __init proc_vmalloc_init(void)
3551{
3552	if (IS_ENABLED(CONFIG_NUMA))
3553		proc_create_seq_private("vmallocinfo", 0400, NULL,
3554				&vmalloc_op,
3555				nr_node_ids * sizeof(unsigned int), NULL);
3556	else
3557		proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
3558	return 0;
3559}
3560module_init(proc_vmalloc_init);
3561
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3562#endif
v3.15
 
   1/*
   2 *  linux/mm/vmalloc.c
   3 *
   4 *  Copyright (C) 1993  Linus Torvalds
   5 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
   6 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
   7 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
   8 *  Numa awareness, Christoph Lameter, SGI, June 2005
   9 */
  10
  11#include <linux/vmalloc.h>
  12#include <linux/mm.h>
  13#include <linux/module.h>
  14#include <linux/highmem.h>
  15#include <linux/sched.h>
  16#include <linux/slab.h>
  17#include <linux/spinlock.h>
  18#include <linux/interrupt.h>
  19#include <linux/proc_fs.h>
  20#include <linux/seq_file.h>
 
  21#include <linux/debugobjects.h>
  22#include <linux/kallsyms.h>
  23#include <linux/list.h>
 
  24#include <linux/rbtree.h>
  25#include <linux/radix-tree.h>
  26#include <linux/rcupdate.h>
  27#include <linux/pfn.h>
  28#include <linux/kmemleak.h>
  29#include <linux/atomic.h>
  30#include <linux/compiler.h>
  31#include <linux/llist.h>
 
 
  32
  33#include <asm/uaccess.h>
  34#include <asm/tlbflush.h>
  35#include <asm/shmparam.h>
  36
 
 
  37struct vfree_deferred {
  38	struct llist_head list;
  39	struct work_struct wq;
  40};
  41static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
  42
  43static void __vunmap(const void *, int);
  44
  45static void free_work(struct work_struct *w)
  46{
  47	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
  48	struct llist_node *llnode = llist_del_all(&p->list);
  49	while (llnode) {
  50		void *p = llnode;
  51		llnode = llist_next(llnode);
  52		__vunmap(p, 1);
  53	}
  54}
  55
  56/*** Page table manipulation functions ***/
  57
  58static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
  59{
  60	pte_t *pte;
  61
  62	pte = pte_offset_kernel(pmd, addr);
  63	do {
  64		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
  65		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
  66	} while (pte++, addr += PAGE_SIZE, addr != end);
  67}
  68
  69static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
  70{
  71	pmd_t *pmd;
  72	unsigned long next;
  73
  74	pmd = pmd_offset(pud, addr);
  75	do {
  76		next = pmd_addr_end(addr, end);
 
 
  77		if (pmd_none_or_clear_bad(pmd))
  78			continue;
  79		vunmap_pte_range(pmd, addr, next);
  80	} while (pmd++, addr = next, addr != end);
  81}
  82
  83static void vunmap_pud_range(pgd_t *pgd, unsigned long addr, unsigned long end)
  84{
  85	pud_t *pud;
  86	unsigned long next;
  87
  88	pud = pud_offset(pgd, addr);
  89	do {
  90		next = pud_addr_end(addr, end);
 
 
  91		if (pud_none_or_clear_bad(pud))
  92			continue;
  93		vunmap_pmd_range(pud, addr, next);
  94	} while (pud++, addr = next, addr != end);
  95}
  96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  97static void vunmap_page_range(unsigned long addr, unsigned long end)
  98{
  99	pgd_t *pgd;
 100	unsigned long next;
 101
 102	BUG_ON(addr >= end);
 103	pgd = pgd_offset_k(addr);
 104	do {
 105		next = pgd_addr_end(addr, end);
 106		if (pgd_none_or_clear_bad(pgd))
 107			continue;
 108		vunmap_pud_range(pgd, addr, next);
 109	} while (pgd++, addr = next, addr != end);
 110}
 111
 112static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
 113		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
 114{
 115	pte_t *pte;
 116
 117	/*
 118	 * nr is a running index into the array which helps higher level
 119	 * callers keep track of where we're up to.
 120	 */
 121
 122	pte = pte_alloc_kernel(pmd, addr);
 123	if (!pte)
 124		return -ENOMEM;
 125	do {
 126		struct page *page = pages[*nr];
 127
 128		if (WARN_ON(!pte_none(*pte)))
 129			return -EBUSY;
 130		if (WARN_ON(!page))
 131			return -ENOMEM;
 132		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
 133		(*nr)++;
 134	} while (pte++, addr += PAGE_SIZE, addr != end);
 135	return 0;
 136}
 137
 138static int vmap_pmd_range(pud_t *pud, unsigned long addr,
 139		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
 140{
 141	pmd_t *pmd;
 142	unsigned long next;
 143
 144	pmd = pmd_alloc(&init_mm, pud, addr);
 145	if (!pmd)
 146		return -ENOMEM;
 147	do {
 148		next = pmd_addr_end(addr, end);
 149		if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
 150			return -ENOMEM;
 151	} while (pmd++, addr = next, addr != end);
 152	return 0;
 153}
 154
 155static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
 156		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
 157{
 158	pud_t *pud;
 159	unsigned long next;
 160
 161	pud = pud_alloc(&init_mm, pgd, addr);
 162	if (!pud)
 163		return -ENOMEM;
 164	do {
 165		next = pud_addr_end(addr, end);
 166		if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
 167			return -ENOMEM;
 168	} while (pud++, addr = next, addr != end);
 169	return 0;
 170}
 171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 172/*
 173 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
 174 * will have pfns corresponding to the "pages" array.
 175 *
 176 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
 177 */
 178static int vmap_page_range_noflush(unsigned long start, unsigned long end,
 179				   pgprot_t prot, struct page **pages)
 180{
 181	pgd_t *pgd;
 182	unsigned long next;
 183	unsigned long addr = start;
 184	int err = 0;
 185	int nr = 0;
 186
 187	BUG_ON(addr >= end);
 188	pgd = pgd_offset_k(addr);
 189	do {
 190		next = pgd_addr_end(addr, end);
 191		err = vmap_pud_range(pgd, addr, next, prot, pages, &nr);
 192		if (err)
 193			return err;
 194	} while (pgd++, addr = next, addr != end);
 195
 196	return nr;
 197}
 198
 199static int vmap_page_range(unsigned long start, unsigned long end,
 200			   pgprot_t prot, struct page **pages)
 201{
 202	int ret;
 203
 204	ret = vmap_page_range_noflush(start, end, prot, pages);
 205	flush_cache_vmap(start, end);
 206	return ret;
 207}
 208
 209int is_vmalloc_or_module_addr(const void *x)
 210{
 211	/*
 212	 * ARM, x86-64 and sparc64 put modules in a special place,
 213	 * and fall back on vmalloc() if that fails. Others
 214	 * just put it in the vmalloc space.
 215	 */
 216#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
 217	unsigned long addr = (unsigned long)x;
 218	if (addr >= MODULES_VADDR && addr < MODULES_END)
 219		return 1;
 220#endif
 221	return is_vmalloc_addr(x);
 222}
 223
 224/*
 225 * Walk a vmap address to the struct page it maps.
 226 */
 227struct page *vmalloc_to_page(const void *vmalloc_addr)
 228{
 229	unsigned long addr = (unsigned long) vmalloc_addr;
 230	struct page *page = NULL;
 231	pgd_t *pgd = pgd_offset_k(addr);
 
 
 
 
 232
 233	/*
 234	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
 235	 * architectures that do not vmalloc module space
 236	 */
 237	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
 238
 239	if (!pgd_none(*pgd)) {
 240		pud_t *pud = pud_offset(pgd, addr);
 241		if (!pud_none(*pud)) {
 242			pmd_t *pmd = pmd_offset(pud, addr);
 243			if (!pmd_none(*pmd)) {
 244				pte_t *ptep, pte;
 245
 246				ptep = pte_offset_map(pmd, addr);
 247				pte = *ptep;
 248				if (pte_present(pte))
 249					page = pte_page(pte);
 250				pte_unmap(ptep);
 251			}
 252		}
 253	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 254	return page;
 255}
 256EXPORT_SYMBOL(vmalloc_to_page);
 257
 258/*
 259 * Map a vmalloc()-space virtual address to the physical page frame number.
 260 */
 261unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
 262{
 263	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
 264}
 265EXPORT_SYMBOL(vmalloc_to_pfn);
 266
 267
 268/*** Global kva allocator ***/
 269
 270#define VM_LAZY_FREE	0x01
 271#define VM_LAZY_FREEING	0x02
 272#define VM_VM_AREA	0x04
 273
 274static DEFINE_SPINLOCK(vmap_area_lock);
 275/* Export for kexec only */
 276LIST_HEAD(vmap_area_list);
 
 277static struct rb_root vmap_area_root = RB_ROOT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 278
 279/* The vmap cache globals are protected by vmap_area_lock */
 280static struct rb_node *free_vmap_cache;
 281static unsigned long cached_hole_size;
 282static unsigned long cached_vstart;
 283static unsigned long cached_align;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 284
 285static unsigned long vmap_area_pcpu_hole;
 
 
 
 286
 287static struct vmap_area *__find_vmap_area(unsigned long addr)
 288{
 289	struct rb_node *n = vmap_area_root.rb_node;
 290
 291	while (n) {
 292		struct vmap_area *va;
 293
 294		va = rb_entry(n, struct vmap_area, rb_node);
 295		if (addr < va->va_start)
 296			n = n->rb_left;
 297		else if (addr >= va->va_end)
 298			n = n->rb_right;
 299		else
 300			return va;
 301	}
 302
 303	return NULL;
 304}
 305
 306static void __insert_vmap_area(struct vmap_area *va)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 307{
 308	struct rb_node **p = &vmap_area_root.rb_node;
 309	struct rb_node *parent = NULL;
 310	struct rb_node *tmp;
 311
 312	while (*p) {
 313		struct vmap_area *tmp_va;
 314
 315		parent = *p;
 316		tmp_va = rb_entry(parent, struct vmap_area, rb_node);
 317		if (va->va_start < tmp_va->va_end)
 318			p = &(*p)->rb_left;
 319		else if (va->va_end > tmp_va->va_start)
 320			p = &(*p)->rb_right;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 321		else
 322			BUG();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 323	}
 324
 325	rb_link_node(&va->rb_node, parent, p);
 326	rb_insert_color(&va->rb_node, &vmap_area_root);
 
 
 
 
 
 327
 328	/* address-sort this list */
 329	tmp = rb_prev(&va->rb_node);
 330	if (tmp) {
 331		struct vmap_area *prev;
 332		prev = rb_entry(tmp, struct vmap_area, rb_node);
 333		list_add_rcu(&va->list, &prev->list);
 334	} else
 335		list_add_rcu(&va->list, &vmap_area_list);
 336}
 337
 338static void purge_vmap_area_lazy(void);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 339
 340/*
 341 * Allocate a region of KVA of the specified size and alignment, within the
 342 * vstart and vend.
 343 */
 344static struct vmap_area *alloc_vmap_area(unsigned long size,
 345				unsigned long align,
 346				unsigned long vstart, unsigned long vend,
 347				int node, gfp_t gfp_mask)
 348{
 349	struct vmap_area *va;
 350	struct rb_node *n;
 351	unsigned long addr;
 352	int purged = 0;
 353	struct vmap_area *first;
 354
 355	BUG_ON(!size);
 356	BUG_ON(size & ~PAGE_MASK);
 357	BUG_ON(!is_power_of_2(align));
 358
 359	va = kmalloc_node(sizeof(struct vmap_area),
 
 
 
 
 
 360			gfp_mask & GFP_RECLAIM_MASK, node);
 361	if (unlikely(!va))
 362		return ERR_PTR(-ENOMEM);
 363
 364	/*
 365	 * Only scan the relevant parts containing pointers to other objects
 366	 * to avoid false negatives.
 367	 */
 368	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
 369
 370retry:
 371	spin_lock(&vmap_area_lock);
 372	/*
 373	 * Invalidate cache if we have more permissive parameters.
 374	 * cached_hole_size notes the largest hole noticed _below_
 375	 * the vmap_area cached in free_vmap_cache: if size fits
 376	 * into that hole, we want to scan from vstart to reuse
 377	 * the hole instead of allocating above free_vmap_cache.
 378	 * Note that __free_vmap_area may update free_vmap_cache
 379	 * without updating cached_hole_size or cached_align.
 380	 */
 381	if (!free_vmap_cache ||
 382			size < cached_hole_size ||
 383			vstart < cached_vstart ||
 384			align < cached_align) {
 385nocache:
 386		cached_hole_size = 0;
 387		free_vmap_cache = NULL;
 388	}
 389	/* record if we encounter less permissive parameters */
 390	cached_vstart = vstart;
 391	cached_align = align;
 392
 393	/* find starting point for our search */
 394	if (free_vmap_cache) {
 395		first = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
 396		addr = ALIGN(first->va_end, align);
 397		if (addr < vstart)
 398			goto nocache;
 399		if (addr + size < addr)
 400			goto overflow;
 401
 402	} else {
 403		addr = ALIGN(vstart, align);
 404		if (addr + size < addr)
 405			goto overflow;
 406
 407		n = vmap_area_root.rb_node;
 408		first = NULL;
 409
 410		while (n) {
 411			struct vmap_area *tmp;
 412			tmp = rb_entry(n, struct vmap_area, rb_node);
 413			if (tmp->va_end >= addr) {
 414				first = tmp;
 415				if (tmp->va_start <= addr)
 416					break;
 417				n = n->rb_left;
 418			} else
 419				n = n->rb_right;
 420		}
 421
 422		if (!first)
 423			goto found;
 424	}
 425
 426	/* from the starting point, walk areas until a suitable hole is found */
 427	while (addr + size > first->va_start && addr + size <= vend) {
 428		if (addr + cached_hole_size < first->va_start)
 429			cached_hole_size = first->va_start - addr;
 430		addr = ALIGN(first->va_end, align);
 431		if (addr + size < addr)
 432			goto overflow;
 433
 434		if (list_is_last(&first->list, &vmap_area_list))
 435			goto found;
 436
 437		first = list_entry(first->list.next,
 438				struct vmap_area, list);
 439	}
 440
 441found:
 442	if (addr + size > vend)
 443		goto overflow;
 444
 445	va->va_start = addr;
 446	va->va_end = addr + size;
 447	va->flags = 0;
 448	__insert_vmap_area(va);
 449	free_vmap_cache = &va->rb_node;
 450	spin_unlock(&vmap_area_lock);
 451
 452	BUG_ON(va->va_start & (align-1));
 453	BUG_ON(va->va_start < vstart);
 454	BUG_ON(va->va_end > vend);
 455
 456	return va;
 457
 458overflow:
 459	spin_unlock(&vmap_area_lock);
 460	if (!purged) {
 461		purge_vmap_area_lazy();
 462		purged = 1;
 463		goto retry;
 464	}
 465	if (printk_ratelimit())
 466		printk(KERN_WARNING
 467			"vmap allocation for size %lu failed: "
 468			"use vmalloc=<size> to increase size.\n", size);
 469	kfree(va);
 
 
 
 
 
 
 
 
 
 
 470	return ERR_PTR(-EBUSY);
 471}
 472
 473static void __free_vmap_area(struct vmap_area *va)
 474{
 475	BUG_ON(RB_EMPTY_NODE(&va->rb_node));
 
 
 476
 477	if (free_vmap_cache) {
 478		if (va->va_end < cached_vstart) {
 479			free_vmap_cache = NULL;
 480		} else {
 481			struct vmap_area *cache;
 482			cache = rb_entry(free_vmap_cache, struct vmap_area, rb_node);
 483			if (va->va_start <= cache->va_start) {
 484				free_vmap_cache = rb_prev(&va->rb_node);
 485				/*
 486				 * We don't try to update cached_hole_size or
 487				 * cached_align, but it won't go very wrong.
 488				 */
 489			}
 490		}
 491	}
 492	rb_erase(&va->rb_node, &vmap_area_root);
 493	RB_CLEAR_NODE(&va->rb_node);
 494	list_del_rcu(&va->list);
 495
 
 
 496	/*
 497	 * Track the highest possible candidate for pcpu area
 498	 * allocation.  Areas outside of vmalloc area can be returned
 499	 * here too, consider only end addresses which fall inside
 500	 * vmalloc area proper.
 501	 */
 502	if (va->va_end > VMALLOC_START && va->va_end <= VMALLOC_END)
 503		vmap_area_pcpu_hole = max(vmap_area_pcpu_hole, va->va_end);
 504
 505	kfree_rcu(va, rcu_head);
 
 
 
 
 506}
 507
 508/*
 509 * Free a region of KVA allocated by alloc_vmap_area
 510 */
 511static void free_vmap_area(struct vmap_area *va)
 512{
 513	spin_lock(&vmap_area_lock);
 514	__free_vmap_area(va);
 515	spin_unlock(&vmap_area_lock);
 516}
 517
 518/*
 519 * Clear the pagetable entries of a given vmap_area
 520 */
 521static void unmap_vmap_area(struct vmap_area *va)
 522{
 523	vunmap_page_range(va->va_start, va->va_end);
 524}
 525
 526static void vmap_debug_free_range(unsigned long start, unsigned long end)
 527{
 528	/*
 529	 * Unmap page tables and force a TLB flush immediately if
 530	 * CONFIG_DEBUG_PAGEALLOC is set. This catches use after free
 531	 * bugs similarly to those in linear kernel virtual address
 532	 * space after a page has been freed.
 533	 *
 534	 * All the lazy freeing logic is still retained, in order to
 535	 * minimise intrusiveness of this debugging feature.
 536	 *
 537	 * This is going to be *slow* (linear kernel virtual address
 538	 * debugging doesn't do a broadcast TLB flush so it is a lot
 539	 * faster).
 540	 */
 541#ifdef CONFIG_DEBUG_PAGEALLOC
 542	vunmap_page_range(start, end);
 543	flush_tlb_kernel_range(start, end);
 544#endif
 545}
 546
 547/*
 548 * lazy_max_pages is the maximum amount of virtual address space we gather up
 549 * before attempting to purge with a TLB flush.
 550 *
 551 * There is a tradeoff here: a larger number will cover more kernel page tables
 552 * and take slightly longer to purge, but it will linearly reduce the number of
 553 * global TLB flushes that must be performed. It would seem natural to scale
 554 * this number up linearly with the number of CPUs (because vmapping activity
 555 * could also scale linearly with the number of CPUs), however it is likely
 556 * that in practice, workloads might be constrained in other ways that mean
 557 * vmap activity will not scale linearly with CPUs. Also, I want to be
 558 * conservative and not introduce a big latency on huge systems, so go with
 559 * a less aggressive log scale. It will still be an improvement over the old
 560 * code, and it will be simple to change the scale factor if we find that it
 561 * becomes a problem on bigger systems.
 562 */
 563static unsigned long lazy_max_pages(void)
 564{
 565	unsigned int log;
 566
 567	log = fls(num_online_cpus());
 568
 569	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
 570}
 571
 572static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
 
 
 
 
 
 
 
 573
 574/* for per-CPU blocks */
 575static void purge_fragmented_blocks_allcpus(void);
 576
 577/*
 578 * called before a call to iounmap() if the caller wants vm_area_struct's
 579 * immediately freed.
 580 */
 581void set_iounmap_nonlazy(void)
 582{
 583	atomic_set(&vmap_lazy_nr, lazy_max_pages()+1);
 584}
 585
 586/*
 587 * Purges all lazily-freed vmap areas.
 588 *
 589 * If sync is 0 then don't purge if there is already a purge in progress.
 590 * If force_flush is 1, then flush kernel TLBs between *start and *end even
 591 * if we found no lazy vmap areas to unmap (callers can use this to optimise
 592 * their own TLB flushing).
 593 * Returns with *start = min(*start, lowest purged address)
 594 *              *end = max(*end, highest purged address)
 595 */
 596static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
 597					int sync, int force_flush)
 598{
 599	static DEFINE_SPINLOCK(purge_lock);
 600	LIST_HEAD(valist);
 601	struct vmap_area *va;
 602	struct vmap_area *n_va;
 603	int nr = 0;
 
 
 
 
 
 
 
 
 
 
 
 604
 605	/*
 606	 * If sync is 0 but force_flush is 1, we'll go sync anyway but callers
 607	 * should not expect such behaviour. This just simplifies locking for
 608	 * the case that isn't actually used at the moment anyway.
 609	 */
 610	if (!sync && !force_flush) {
 611		if (!spin_trylock(&purge_lock))
 612			return;
 613	} else
 614		spin_lock(&purge_lock);
 
 615
 616	if (sync)
 617		purge_fragmented_blocks_allcpus();
 618
 619	rcu_read_lock();
 620	list_for_each_entry_rcu(va, &vmap_area_list, list) {
 621		if (va->flags & VM_LAZY_FREE) {
 622			if (va->va_start < *start)
 623				*start = va->va_start;
 624			if (va->va_end > *end)
 625				*end = va->va_end;
 626			nr += (va->va_end - va->va_start) >> PAGE_SHIFT;
 627			list_add_tail(&va->purge_list, &valist);
 628			va->flags |= VM_LAZY_FREEING;
 629			va->flags &= ~VM_LAZY_FREE;
 630		}
 631	}
 632	rcu_read_unlock();
 633
 634	if (nr)
 635		atomic_sub(nr, &vmap_lazy_nr);
 
 
 
 
 
 636
 637	if (nr || force_flush)
 638		flush_tlb_kernel_range(*start, *end);
 639
 640	if (nr) {
 641		spin_lock(&vmap_area_lock);
 642		list_for_each_entry_safe(va, n_va, &valist, purge_list)
 643			__free_vmap_area(va);
 644		spin_unlock(&vmap_area_lock);
 645	}
 646	spin_unlock(&purge_lock);
 
 647}
 648
 649/*
 650 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
 651 * is already purging.
 652 */
 653static void try_purge_vmap_area_lazy(void)
 654{
 655	unsigned long start = ULONG_MAX, end = 0;
 656
 657	__purge_vmap_area_lazy(&start, &end, 0, 0);
 
 658}
 659
 660/*
 661 * Kick off a purge of the outstanding lazy areas.
 662 */
 663static void purge_vmap_area_lazy(void)
 664{
 665	unsigned long start = ULONG_MAX, end = 0;
 666
 667	__purge_vmap_area_lazy(&start, &end, 1, 0);
 
 668}
 669
 670/*
 671 * Free a vmap area, caller ensuring that the area has been unmapped
 672 * and flush_cache_vunmap had been called for the correct range
 673 * previously.
 674 */
 675static void free_vmap_area_noflush(struct vmap_area *va)
 676{
 677	va->flags |= VM_LAZY_FREE;
 678	atomic_add((va->va_end - va->va_start) >> PAGE_SHIFT, &vmap_lazy_nr);
 679	if (unlikely(atomic_read(&vmap_lazy_nr) > lazy_max_pages()))
 
 
 
 
 
 
 
 
 
 
 680		try_purge_vmap_area_lazy();
 681}
 682
 683/*
 684 * Free and unmap a vmap area, caller ensuring flush_cache_vunmap had been
 685 * called for the correct range previously.
 686 */
 687static void free_unmap_vmap_area_noflush(struct vmap_area *va)
 688{
 689	unmap_vmap_area(va);
 690	free_vmap_area_noflush(va);
 691}
 692
 693/*
 694 * Free and unmap a vmap area
 695 */
 696static void free_unmap_vmap_area(struct vmap_area *va)
 697{
 698	flush_cache_vunmap(va->va_start, va->va_end);
 699	free_unmap_vmap_area_noflush(va);
 
 
 
 
 700}
 701
 702static struct vmap_area *find_vmap_area(unsigned long addr)
 703{
 704	struct vmap_area *va;
 705
 706	spin_lock(&vmap_area_lock);
 707	va = __find_vmap_area(addr);
 708	spin_unlock(&vmap_area_lock);
 709
 710	return va;
 711}
 712
 713static void free_unmap_vmap_area_addr(unsigned long addr)
 714{
 715	struct vmap_area *va;
 716
 717	va = find_vmap_area(addr);
 718	BUG_ON(!va);
 719	free_unmap_vmap_area(va);
 720}
 721
 722
 723/*** Per cpu kva allocator ***/
 724
 725/*
 726 * vmap space is limited especially on 32 bit architectures. Ensure there is
 727 * room for at least 16 percpu vmap blocks per CPU.
 728 */
 729/*
 730 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
 731 * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
 732 * instead (we just need a rough idea)
 733 */
 734#if BITS_PER_LONG == 32
 735#define VMALLOC_SPACE		(128UL*1024*1024)
 736#else
 737#define VMALLOC_SPACE		(128UL*1024*1024*1024)
 738#endif
 739
 740#define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
 741#define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
 742#define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
 743#define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
 744#define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
 745#define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
 746#define VMAP_BBMAP_BITS		\
 747		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
 748		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
 749			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
 750
 751#define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
 752
 753static bool vmap_initialized __read_mostly = false;
 754
 755struct vmap_block_queue {
 756	spinlock_t lock;
 757	struct list_head free;
 758};
 759
 760struct vmap_block {
 761	spinlock_t lock;
 762	struct vmap_area *va;
 763	unsigned long free, dirty;
 764	DECLARE_BITMAP(dirty_map, VMAP_BBMAP_BITS);
 765	struct list_head free_list;
 766	struct rcu_head rcu_head;
 767	struct list_head purge;
 768};
 769
 770/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
 771static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
 772
 773/*
 774 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
 775 * in the free path. Could get rid of this if we change the API to return a
 776 * "cookie" from alloc, to be passed to free. But no big deal yet.
 777 */
 778static DEFINE_SPINLOCK(vmap_block_tree_lock);
 779static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
 780
 781/*
 782 * We should probably have a fallback mechanism to allocate virtual memory
 783 * out of partially filled vmap blocks. However vmap block sizing should be
 784 * fairly reasonable according to the vmalloc size, so it shouldn't be a
 785 * big problem.
 786 */
 787
 788static unsigned long addr_to_vb_idx(unsigned long addr)
 789{
 790	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
 791	addr /= VMAP_BLOCK_SIZE;
 792	return addr;
 793}
 794
 795static struct vmap_block *new_vmap_block(gfp_t gfp_mask)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 796{
 797	struct vmap_block_queue *vbq;
 798	struct vmap_block *vb;
 799	struct vmap_area *va;
 800	unsigned long vb_idx;
 801	int node, err;
 
 802
 803	node = numa_node_id();
 804
 805	vb = kmalloc_node(sizeof(struct vmap_block),
 806			gfp_mask & GFP_RECLAIM_MASK, node);
 807	if (unlikely(!vb))
 808		return ERR_PTR(-ENOMEM);
 809
 810	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
 811					VMALLOC_START, VMALLOC_END,
 812					node, gfp_mask);
 813	if (IS_ERR(va)) {
 814		kfree(vb);
 815		return ERR_CAST(va);
 816	}
 817
 818	err = radix_tree_preload(gfp_mask);
 819	if (unlikely(err)) {
 820		kfree(vb);
 821		free_vmap_area(va);
 822		return ERR_PTR(err);
 823	}
 824
 
 825	spin_lock_init(&vb->lock);
 826	vb->va = va;
 827	vb->free = VMAP_BBMAP_BITS;
 
 
 828	vb->dirty = 0;
 829	bitmap_zero(vb->dirty_map, VMAP_BBMAP_BITS);
 
 830	INIT_LIST_HEAD(&vb->free_list);
 831
 832	vb_idx = addr_to_vb_idx(va->va_start);
 833	spin_lock(&vmap_block_tree_lock);
 834	err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
 835	spin_unlock(&vmap_block_tree_lock);
 836	BUG_ON(err);
 837	radix_tree_preload_end();
 838
 839	vbq = &get_cpu_var(vmap_block_queue);
 840	spin_lock(&vbq->lock);
 841	list_add_rcu(&vb->free_list, &vbq->free);
 842	spin_unlock(&vbq->lock);
 843	put_cpu_var(vmap_block_queue);
 844
 845	return vb;
 846}
 847
 848static void free_vmap_block(struct vmap_block *vb)
 849{
 850	struct vmap_block *tmp;
 851	unsigned long vb_idx;
 852
 853	vb_idx = addr_to_vb_idx(vb->va->va_start);
 854	spin_lock(&vmap_block_tree_lock);
 855	tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
 856	spin_unlock(&vmap_block_tree_lock);
 857	BUG_ON(tmp != vb);
 858
 859	free_vmap_area_noflush(vb->va);
 860	kfree_rcu(vb, rcu_head);
 861}
 862
 863static void purge_fragmented_blocks(int cpu)
 864{
 865	LIST_HEAD(purge);
 866	struct vmap_block *vb;
 867	struct vmap_block *n_vb;
 868	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
 869
 870	rcu_read_lock();
 871	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
 872
 873		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
 874			continue;
 875
 876		spin_lock(&vb->lock);
 877		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
 878			vb->free = 0; /* prevent further allocs after releasing lock */
 879			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
 880			bitmap_fill(vb->dirty_map, VMAP_BBMAP_BITS);
 
 881			spin_lock(&vbq->lock);
 882			list_del_rcu(&vb->free_list);
 883			spin_unlock(&vbq->lock);
 884			spin_unlock(&vb->lock);
 885			list_add_tail(&vb->purge, &purge);
 886		} else
 887			spin_unlock(&vb->lock);
 888	}
 889	rcu_read_unlock();
 890
 891	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
 892		list_del(&vb->purge);
 893		free_vmap_block(vb);
 894	}
 895}
 896
 897static void purge_fragmented_blocks_allcpus(void)
 898{
 899	int cpu;
 900
 901	for_each_possible_cpu(cpu)
 902		purge_fragmented_blocks(cpu);
 903}
 904
 905static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
 906{
 907	struct vmap_block_queue *vbq;
 908	struct vmap_block *vb;
 909	unsigned long addr = 0;
 910	unsigned int order;
 911
 912	BUG_ON(size & ~PAGE_MASK);
 913	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
 914	if (WARN_ON(size == 0)) {
 915		/*
 916		 * Allocating 0 bytes isn't what caller wants since
 917		 * get_order(0) returns funny result. Just warn and terminate
 918		 * early.
 919		 */
 920		return NULL;
 921	}
 922	order = get_order(size);
 923
 924again:
 925	rcu_read_lock();
 926	vbq = &get_cpu_var(vmap_block_queue);
 927	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
 928		int i;
 929
 930		spin_lock(&vb->lock);
 931		if (vb->free < 1UL << order)
 932			goto next;
 
 
 933
 934		i = VMAP_BBMAP_BITS - vb->free;
 935		addr = vb->va->va_start + (i << PAGE_SHIFT);
 936		BUG_ON(addr_to_vb_idx(addr) !=
 937				addr_to_vb_idx(vb->va->va_start));
 938		vb->free -= 1UL << order;
 939		if (vb->free == 0) {
 940			spin_lock(&vbq->lock);
 941			list_del_rcu(&vb->free_list);
 942			spin_unlock(&vbq->lock);
 943		}
 
 944		spin_unlock(&vb->lock);
 945		break;
 946next:
 947		spin_unlock(&vb->lock);
 948	}
 949
 950	put_cpu_var(vmap_block_queue);
 951	rcu_read_unlock();
 952
 953	if (!addr) {
 954		vb = new_vmap_block(gfp_mask);
 955		if (IS_ERR(vb))
 956			return vb;
 957		goto again;
 958	}
 959
 960	return (void *)addr;
 961}
 962
 963static void vb_free(const void *addr, unsigned long size)
 964{
 965	unsigned long offset;
 966	unsigned long vb_idx;
 967	unsigned int order;
 968	struct vmap_block *vb;
 969
 970	BUG_ON(size & ~PAGE_MASK);
 971	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
 972
 973	flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
 974
 975	order = get_order(size);
 976
 977	offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
 
 978
 979	vb_idx = addr_to_vb_idx((unsigned long)addr);
 980	rcu_read_lock();
 981	vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
 982	rcu_read_unlock();
 983	BUG_ON(!vb);
 984
 985	vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
 986
 
 
 
 
 987	spin_lock(&vb->lock);
 988	BUG_ON(bitmap_allocate_region(vb->dirty_map, offset >> PAGE_SHIFT, order));
 
 
 
 989
 990	vb->dirty += 1UL << order;
 991	if (vb->dirty == VMAP_BBMAP_BITS) {
 992		BUG_ON(vb->free);
 993		spin_unlock(&vb->lock);
 994		free_vmap_block(vb);
 995	} else
 996		spin_unlock(&vb->lock);
 997}
 998
 999/**
1000 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1001 *
1002 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
1003 * to amortize TLB flushing overheads. What this means is that any page you
1004 * have now, may, in a former life, have been mapped into kernel virtual
1005 * address by the vmap layer and so there might be some CPUs with TLB entries
1006 * still referencing that page (additional to the regular 1:1 kernel mapping).
1007 *
1008 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
1009 * be sure that none of the pages we have control over will have any aliases
1010 * from the vmap layer.
1011 */
1012void vm_unmap_aliases(void)
1013{
1014	unsigned long start = ULONG_MAX, end = 0;
1015	int cpu;
1016	int flush = 0;
1017
1018	if (unlikely(!vmap_initialized))
1019		return;
1020
 
 
1021	for_each_possible_cpu(cpu) {
1022		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1023		struct vmap_block *vb;
1024
1025		rcu_read_lock();
1026		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1027			int i, j;
1028
1029			spin_lock(&vb->lock);
1030			i = find_first_bit(vb->dirty_map, VMAP_BBMAP_BITS);
1031			if (i < VMAP_BBMAP_BITS) {
1032				unsigned long s, e;
1033
1034				j = find_last_bit(vb->dirty_map,
1035							VMAP_BBMAP_BITS);
1036				j = j + 1; /* need exclusive index */
 
 
1037
1038				s = vb->va->va_start + (i << PAGE_SHIFT);
1039				e = vb->va->va_start + (j << PAGE_SHIFT);
1040				flush = 1;
1041
1042				if (s < start)
1043					start = s;
1044				if (e > end)
1045					end = e;
1046			}
1047			spin_unlock(&vb->lock);
1048		}
1049		rcu_read_unlock();
1050	}
1051
1052	__purge_vmap_area_lazy(&start, &end, 1, flush);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1053}
1054EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1055
1056/**
1057 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1058 * @mem: the pointer returned by vm_map_ram
1059 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1060 */
1061void vm_unmap_ram(const void *mem, unsigned int count)
1062{
1063	unsigned long size = count << PAGE_SHIFT;
1064	unsigned long addr = (unsigned long)mem;
 
1065
 
1066	BUG_ON(!addr);
1067	BUG_ON(addr < VMALLOC_START);
1068	BUG_ON(addr > VMALLOC_END);
1069	BUG_ON(addr & (PAGE_SIZE-1));
1070
1071	debug_check_no_locks_freed(mem, size);
1072	vmap_debug_free_range(addr, addr+size);
 
 
 
1073
1074	if (likely(count <= VMAP_MAX_ALLOC))
1075		vb_free(mem, size);
1076	else
1077		free_unmap_vmap_area_addr(addr);
 
1078}
1079EXPORT_SYMBOL(vm_unmap_ram);
1080
1081/**
1082 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1083 * @pages: an array of pointers to the pages to be mapped
1084 * @count: number of pages
1085 * @node: prefer to allocate data structures on this node
1086 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
1087 *
1088 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
1089 * faster than vmap so it's good.  But if you mix long-life and short-life
1090 * objects with vm_map_ram(), it could consume lots of address space through
1091 * fragmentation (especially on a 32bit machine).  You could see failures in
1092 * the end.  Please use this function for short-lived objects.
1093 *
1094 * Returns: a pointer to the address that has been mapped, or %NULL on failure
1095 */
1096void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1097{
1098	unsigned long size = count << PAGE_SHIFT;
1099	unsigned long addr;
1100	void *mem;
1101
1102	if (likely(count <= VMAP_MAX_ALLOC)) {
1103		mem = vb_alloc(size, GFP_KERNEL);
1104		if (IS_ERR(mem))
1105			return NULL;
1106		addr = (unsigned long)mem;
1107	} else {
1108		struct vmap_area *va;
1109		va = alloc_vmap_area(size, PAGE_SIZE,
1110				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1111		if (IS_ERR(va))
1112			return NULL;
1113
1114		addr = va->va_start;
1115		mem = (void *)addr;
1116	}
1117	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
1118		vm_unmap_ram(mem, count);
1119		return NULL;
1120	}
1121	return mem;
1122}
1123EXPORT_SYMBOL(vm_map_ram);
1124
1125static struct vm_struct *vmlist __initdata;
 
1126/**
1127 * vm_area_add_early - add vmap area early during boot
1128 * @vm: vm_struct to add
1129 *
1130 * This function is used to add fixed kernel vm area to vmlist before
1131 * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
1132 * should contain proper values and the other fields should be zero.
1133 *
1134 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1135 */
1136void __init vm_area_add_early(struct vm_struct *vm)
1137{
1138	struct vm_struct *tmp, **p;
1139
1140	BUG_ON(vmap_initialized);
1141	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1142		if (tmp->addr >= vm->addr) {
1143			BUG_ON(tmp->addr < vm->addr + vm->size);
1144			break;
1145		} else
1146			BUG_ON(tmp->addr + tmp->size > vm->addr);
1147	}
1148	vm->next = *p;
1149	*p = vm;
1150}
1151
1152/**
1153 * vm_area_register_early - register vmap area early during boot
1154 * @vm: vm_struct to register
1155 * @align: requested alignment
1156 *
1157 * This function is used to register kernel vm area before
1158 * vmalloc_init() is called.  @vm->size and @vm->flags should contain
1159 * proper values on entry and other fields should be zero.  On return,
1160 * vm->addr contains the allocated address.
1161 *
1162 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1163 */
1164void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1165{
1166	static size_t vm_init_off __initdata;
1167	unsigned long addr;
1168
1169	addr = ALIGN(VMALLOC_START + vm_init_off, align);
1170	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1171
1172	vm->addr = (void *)addr;
1173
1174	vm_area_add_early(vm);
1175}
1176
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1177void __init vmalloc_init(void)
1178{
1179	struct vmap_area *va;
1180	struct vm_struct *tmp;
1181	int i;
1182
 
 
 
 
 
1183	for_each_possible_cpu(i) {
1184		struct vmap_block_queue *vbq;
1185		struct vfree_deferred *p;
1186
1187		vbq = &per_cpu(vmap_block_queue, i);
1188		spin_lock_init(&vbq->lock);
1189		INIT_LIST_HEAD(&vbq->free);
1190		p = &per_cpu(vfree_deferred, i);
1191		init_llist_head(&p->list);
1192		INIT_WORK(&p->wq, free_work);
1193	}
1194
1195	/* Import existing vmlist entries. */
1196	for (tmp = vmlist; tmp; tmp = tmp->next) {
1197		va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
1198		va->flags = VM_VM_AREA;
 
 
1199		va->va_start = (unsigned long)tmp->addr;
1200		va->va_end = va->va_start + tmp->size;
1201		va->vm = tmp;
1202		__insert_vmap_area(va);
1203	}
1204
1205	vmap_area_pcpu_hole = VMALLOC_END;
1206
 
 
1207	vmap_initialized = true;
1208}
1209
1210/**
1211 * map_kernel_range_noflush - map kernel VM area with the specified pages
1212 * @addr: start of the VM area to map
1213 * @size: size of the VM area to map
1214 * @prot: page protection flags to use
1215 * @pages: pages to map
1216 *
1217 * Map PFN_UP(@size) pages at @addr.  The VM area @addr and @size
1218 * specify should have been allocated using get_vm_area() and its
1219 * friends.
1220 *
1221 * NOTE:
1222 * This function does NOT do any cache flushing.  The caller is
1223 * responsible for calling flush_cache_vmap() on to-be-mapped areas
1224 * before calling this function.
1225 *
1226 * RETURNS:
1227 * The number of pages mapped on success, -errno on failure.
1228 */
1229int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1230			     pgprot_t prot, struct page **pages)
1231{
1232	return vmap_page_range_noflush(addr, addr + size, prot, pages);
1233}
1234
1235/**
1236 * unmap_kernel_range_noflush - unmap kernel VM area
1237 * @addr: start of the VM area to unmap
1238 * @size: size of the VM area to unmap
1239 *
1240 * Unmap PFN_UP(@size) pages at @addr.  The VM area @addr and @size
1241 * specify should have been allocated using get_vm_area() and its
1242 * friends.
1243 *
1244 * NOTE:
1245 * This function does NOT do any cache flushing.  The caller is
1246 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
1247 * before calling this function and flush_tlb_kernel_range() after.
1248 */
1249void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1250{
1251	vunmap_page_range(addr, addr + size);
1252}
1253EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
1254
1255/**
1256 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1257 * @addr: start of the VM area to unmap
1258 * @size: size of the VM area to unmap
1259 *
1260 * Similar to unmap_kernel_range_noflush() but flushes vcache before
1261 * the unmapping and tlb after.
1262 */
1263void unmap_kernel_range(unsigned long addr, unsigned long size)
1264{
1265	unsigned long end = addr + size;
1266
1267	flush_cache_vunmap(addr, end);
1268	vunmap_page_range(addr, end);
1269	flush_tlb_kernel_range(addr, end);
1270}
 
1271
1272int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page ***pages)
1273{
1274	unsigned long addr = (unsigned long)area->addr;
1275	unsigned long end = addr + get_vm_area_size(area);
1276	int err;
1277
1278	err = vmap_page_range(addr, end, prot, *pages);
1279	if (err > 0) {
1280		*pages += err;
1281		err = 0;
1282	}
1283
1284	return err;
1285}
1286EXPORT_SYMBOL_GPL(map_vm_area);
1287
1288static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
1289			      unsigned long flags, const void *caller)
1290{
1291	spin_lock(&vmap_area_lock);
1292	vm->flags = flags;
1293	vm->addr = (void *)va->va_start;
1294	vm->size = va->va_end - va->va_start;
1295	vm->caller = caller;
1296	va->vm = vm;
1297	va->flags |= VM_VM_AREA;
1298	spin_unlock(&vmap_area_lock);
1299}
1300
1301static void clear_vm_uninitialized_flag(struct vm_struct *vm)
1302{
1303	/*
1304	 * Before removing VM_UNINITIALIZED,
1305	 * we should make sure that vm has proper values.
1306	 * Pair with smp_rmb() in show_numa_info().
1307	 */
1308	smp_wmb();
1309	vm->flags &= ~VM_UNINITIALIZED;
1310}
1311
1312static struct vm_struct *__get_vm_area_node(unsigned long size,
1313		unsigned long align, unsigned long flags, unsigned long start,
1314		unsigned long end, int node, gfp_t gfp_mask, const void *caller)
1315{
1316	struct vmap_area *va;
1317	struct vm_struct *area;
1318
1319	BUG_ON(in_interrupt());
1320	if (flags & VM_IOREMAP)
1321		align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
1322
1323	size = PAGE_ALIGN(size);
1324	if (unlikely(!size))
1325		return NULL;
1326
 
 
 
 
1327	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
1328	if (unlikely(!area))
1329		return NULL;
1330
1331	/*
1332	 * We always allocate a guard page.
1333	 */
1334	size += PAGE_SIZE;
1335
1336	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
1337	if (IS_ERR(va)) {
1338		kfree(area);
1339		return NULL;
1340	}
1341
1342	setup_vmalloc_vm(area, va, flags, caller);
1343
1344	return area;
1345}
1346
1347struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
1348				unsigned long start, unsigned long end)
1349{
1350	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1351				  GFP_KERNEL, __builtin_return_address(0));
1352}
1353EXPORT_SYMBOL_GPL(__get_vm_area);
1354
1355struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
1356				       unsigned long start, unsigned long end,
1357				       const void *caller)
1358{
1359	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
1360				  GFP_KERNEL, caller);
1361}
1362
1363/**
1364 *	get_vm_area  -  reserve a contiguous kernel virtual area
1365 *	@size:		size of the area
1366 *	@flags:		%VM_IOREMAP for I/O mappings or VM_ALLOC
1367 *
1368 *	Search an area of @size in the kernel virtual mapping area,
1369 *	and reserved it for out purposes.  Returns the area descriptor
1370 *	on success or %NULL on failure.
 
 
1371 */
1372struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
1373{
1374	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1375				  NUMA_NO_NODE, GFP_KERNEL,
1376				  __builtin_return_address(0));
1377}
1378
1379struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
1380				const void *caller)
1381{
1382	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
1383				  NUMA_NO_NODE, GFP_KERNEL, caller);
1384}
1385
1386/**
1387 *	find_vm_area  -  find a continuous kernel virtual area
1388 *	@addr:		base address
 
 
 
 
1389 *
1390 *	Search for the kernel VM area starting at @addr, and return it.
1391 *	It is up to the caller to do all required locking to keep the returned
1392 *	pointer valid.
1393 */
1394struct vm_struct *find_vm_area(const void *addr)
1395{
1396	struct vmap_area *va;
1397
1398	va = find_vmap_area((unsigned long)addr);
1399	if (va && va->flags & VM_VM_AREA)
1400		return va->vm;
1401
1402	return NULL;
1403}
1404
1405/**
1406 *	remove_vm_area  -  find and remove a continuous kernel virtual area
1407 *	@addr:		base address
 
 
 
 
1408 *
1409 *	Search for the kernel VM area starting at @addr, and remove it.
1410 *	This function returns the found VM area, but using it is NOT safe
1411 *	on SMP machines, except for its size or flags.
1412 */
1413struct vm_struct *remove_vm_area(const void *addr)
1414{
1415	struct vmap_area *va;
1416
1417	va = find_vmap_area((unsigned long)addr);
1418	if (va && va->flags & VM_VM_AREA) {
 
 
 
1419		struct vm_struct *vm = va->vm;
1420
1421		spin_lock(&vmap_area_lock);
1422		va->vm = NULL;
1423		va->flags &= ~VM_VM_AREA;
1424		spin_unlock(&vmap_area_lock);
1425
1426		vmap_debug_free_range(va->va_start, va->va_end);
1427		free_unmap_vmap_area(va);
1428		vm->size -= PAGE_SIZE;
1429
1430		return vm;
1431	}
 
 
1432	return NULL;
1433}
1434
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1435static void __vunmap(const void *addr, int deallocate_pages)
1436{
1437	struct vm_struct *area;
1438
1439	if (!addr)
1440		return;
1441
1442	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
1443			addr))
1444		return;
1445
1446	area = remove_vm_area(addr);
1447	if (unlikely(!area)) {
1448		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
1449				addr);
1450		return;
1451	}
1452
1453	debug_check_no_locks_freed(addr, area->size);
1454	debug_check_no_obj_freed(addr, area->size);
 
 
1455
1456	if (deallocate_pages) {
1457		int i;
1458
1459		for (i = 0; i < area->nr_pages; i++) {
1460			struct page *page = area->pages[i];
1461
1462			BUG_ON(!page);
1463			__free_page(page);
1464		}
 
1465
1466		if (area->flags & VM_VPAGES)
1467			vfree(area->pages);
1468		else
1469			kfree(area->pages);
1470	}
1471
1472	kfree(area);
1473	return;
1474}
1475 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1476/**
1477 *	vfree  -  release memory allocated by vmalloc()
1478 *	@addr:		memory base address
1479 *
1480 *	Free the virtually continuous memory area starting at @addr, as
1481 *	obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
1482 *	NULL, no operation is performed.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1483 *
1484 *	Must not be called in NMI context (strictly speaking, only if we don't
1485 *	have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
1486 *	conventions for vfree() arch-depenedent would be a really bad idea)
1487 *
1488 *	NOTE: assumes that the object at *addr has a size >= sizeof(llist_node)
 
 
1489 */
1490void vfree(const void *addr)
1491{
1492	BUG_ON(in_nmi());
1493
1494	kmemleak_free(addr);
1495
 
 
1496	if (!addr)
1497		return;
1498	if (unlikely(in_interrupt())) {
1499		struct vfree_deferred *p = &__get_cpu_var(vfree_deferred);
1500		if (llist_add((struct llist_node *)addr, &p->list))
1501			schedule_work(&p->wq);
1502	} else
1503		__vunmap(addr, 1);
1504}
1505EXPORT_SYMBOL(vfree);
1506
1507/**
1508 *	vunmap  -  release virtual mapping obtained by vmap()
1509 *	@addr:		memory base address
1510 *
1511 *	Free the virtually contiguous memory area starting at @addr,
1512 *	which was created from the page array passed to vmap().
1513 *
1514 *	Must not be called in interrupt context.
1515 */
1516void vunmap(const void *addr)
1517{
1518	BUG_ON(in_interrupt());
1519	might_sleep();
1520	if (addr)
1521		__vunmap(addr, 0);
1522}
1523EXPORT_SYMBOL(vunmap);
1524
1525/**
1526 *	vmap  -  map an array of pages into virtually contiguous space
1527 *	@pages:		array of page pointers
1528 *	@count:		number of pages to map
1529 *	@flags:		vm_area->flags
1530 *	@prot:		page protection for the mapping
 
 
 
1531 *
1532 *	Maps @count pages from @pages into contiguous kernel virtual
1533 *	space.
1534 */
1535void *vmap(struct page **pages, unsigned int count,
1536		unsigned long flags, pgprot_t prot)
1537{
1538	struct vm_struct *area;
 
1539
1540	might_sleep();
1541
1542	if (count > totalram_pages)
1543		return NULL;
1544
1545	area = get_vm_area_caller((count << PAGE_SHIFT), flags,
1546					__builtin_return_address(0));
1547	if (!area)
1548		return NULL;
1549
1550	if (map_vm_area(area, prot, &pages)) {
1551		vunmap(area->addr);
1552		return NULL;
1553	}
1554
1555	return area->addr;
1556}
1557EXPORT_SYMBOL(vmap);
1558
1559static void *__vmalloc_node(unsigned long size, unsigned long align,
1560			    gfp_t gfp_mask, pgprot_t prot,
1561			    int node, const void *caller);
1562static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
1563				 pgprot_t prot, int node)
1564{
1565	const int order = 0;
1566	struct page **pages;
1567	unsigned int nr_pages, array_size, i;
1568	gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
 
 
 
 
1569
1570	nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
1571	array_size = (nr_pages * sizeof(struct page *));
1572
1573	area->nr_pages = nr_pages;
1574	/* Please note that the recursion is strictly bounded. */
1575	if (array_size > PAGE_SIZE) {
1576		pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
1577				PAGE_KERNEL, node, area->caller);
1578		area->flags |= VM_VPAGES;
1579	} else {
1580		pages = kmalloc_node(array_size, nested_gfp, node);
1581	}
1582	area->pages = pages;
1583	if (!area->pages) {
1584		remove_vm_area(area->addr);
1585		kfree(area);
1586		return NULL;
1587	}
1588
 
 
 
1589	for (i = 0; i < area->nr_pages; i++) {
1590		struct page *page;
1591		gfp_t tmp_mask = gfp_mask | __GFP_NOWARN;
1592
1593		if (node == NUMA_NO_NODE)
1594			page = alloc_page(tmp_mask);
1595		else
1596			page = alloc_pages_node(node, tmp_mask, order);
1597
1598		if (unlikely(!page)) {
1599			/* Successfully allocated i pages, free them in __vunmap() */
1600			area->nr_pages = i;
 
1601			goto fail;
1602		}
1603		area->pages[i] = page;
 
 
1604	}
 
1605
1606	if (map_vm_area(area, prot, &pages))
1607		goto fail;
1608	return area->addr;
1609
1610fail:
1611	warn_alloc_failed(gfp_mask, order,
1612			  "vmalloc: allocation failure, allocated %ld of %ld bytes\n",
1613			  (area->nr_pages*PAGE_SIZE), area->size);
1614	vfree(area->addr);
1615	return NULL;
1616}
1617
1618/**
1619 *	__vmalloc_node_range  -  allocate virtually contiguous memory
1620 *	@size:		allocation size
1621 *	@align:		desired alignment
1622 *	@start:		vm area range start
1623 *	@end:		vm area range end
1624 *	@gfp_mask:	flags for the page level allocator
1625 *	@prot:		protection mask for the allocated pages
1626 *	@node:		node to use for allocation or NUMA_NO_NODE
1627 *	@caller:	caller's return address
1628 *
1629 *	Allocate enough pages to cover @size from the page level
1630 *	allocator with @gfp_mask flags.  Map them into contiguous
1631 *	kernel virtual space, using a pagetable protection of @prot.
 
 
 
1632 */
1633void *__vmalloc_node_range(unsigned long size, unsigned long align,
1634			unsigned long start, unsigned long end, gfp_t gfp_mask,
1635			pgprot_t prot, int node, const void *caller)
 
1636{
1637	struct vm_struct *area;
1638	void *addr;
1639	unsigned long real_size = size;
1640
1641	size = PAGE_ALIGN(size);
1642	if (!size || (size >> PAGE_SHIFT) > totalram_pages)
1643		goto fail;
1644
1645	area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
1646				  start, end, node, gfp_mask, caller);
1647	if (!area)
1648		goto fail;
1649
1650	addr = __vmalloc_area_node(area, gfp_mask, prot, node);
1651	if (!addr)
1652		return NULL;
1653
1654	/*
1655	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
1656	 * flag. It means that vm_struct is not fully initialized.
1657	 * Now, it is fully initialized, so remove this flag here.
1658	 */
1659	clear_vm_uninitialized_flag(area);
1660
1661	/*
1662	 * A ref_count = 2 is needed because vm_struct allocated in
1663	 * __get_vm_area_node() contains a reference to the virtual address of
1664	 * the vmalloc'ed block.
1665	 */
1666	kmemleak_alloc(addr, real_size, 2, gfp_mask);
1667
1668	return addr;
1669
1670fail:
1671	warn_alloc_failed(gfp_mask, 0,
1672			  "vmalloc: allocation failure: %lu bytes\n",
1673			  real_size);
1674	return NULL;
1675}
1676
 
 
 
 
 
 
 
 
 
1677/**
1678 *	__vmalloc_node  -  allocate virtually contiguous memory
1679 *	@size:		allocation size
1680 *	@align:		desired alignment
1681 *	@gfp_mask:	flags for the page level allocator
1682 *	@prot:		protection mask for the allocated pages
1683 *	@node:		node to use for allocation or NUMA_NO_NODE
1684 *	@caller:	caller's return address
1685 *
1686 *	Allocate enough pages to cover @size from the page level
1687 *	allocator with @gfp_mask flags.  Map them into contiguous
1688 *	kernel virtual space, using a pagetable protection of @prot.
 
 
 
 
 
 
 
 
1689 */
1690static void *__vmalloc_node(unsigned long size, unsigned long align,
1691			    gfp_t gfp_mask, pgprot_t prot,
1692			    int node, const void *caller)
1693{
1694	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
1695				gfp_mask, prot, node, caller);
1696}
1697
1698void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
1699{
1700	return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
1701				__builtin_return_address(0));
1702}
1703EXPORT_SYMBOL(__vmalloc);
1704
1705static inline void *__vmalloc_node_flags(unsigned long size,
1706					int node, gfp_t flags)
1707{
1708	return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
1709					node, __builtin_return_address(0));
1710}
1711
 
 
 
 
 
 
 
1712/**
1713 *	vmalloc  -  allocate virtually contiguous memory
1714 *	@size:		allocation size
1715 *	Allocate enough pages to cover @size from the page level
1716 *	allocator and map them into contiguous kernel virtual space.
 
 
 
 
1717 *
1718 *	For tight control over page level allocator and protection flags
1719 *	use __vmalloc() instead.
1720 */
1721void *vmalloc(unsigned long size)
1722{
1723	return __vmalloc_node_flags(size, NUMA_NO_NODE,
1724				    GFP_KERNEL | __GFP_HIGHMEM);
1725}
1726EXPORT_SYMBOL(vmalloc);
1727
1728/**
1729 *	vzalloc - allocate virtually contiguous memory with zero fill
1730 *	@size:	allocation size
1731 *	Allocate enough pages to cover @size from the page level
1732 *	allocator and map them into contiguous kernel virtual space.
1733 *	The memory allocated is set to zero.
1734 *
1735 *	For tight control over page level allocator and protection flags
1736 *	use __vmalloc() instead.
 
 
 
 
 
 
1737 */
1738void *vzalloc(unsigned long size)
1739{
1740	return __vmalloc_node_flags(size, NUMA_NO_NODE,
1741				GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1742}
1743EXPORT_SYMBOL(vzalloc);
1744
1745/**
1746 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
1747 * @size: allocation size
1748 *
1749 * The resulting memory area is zeroed so it can be mapped to userspace
1750 * without leaking data.
 
 
1751 */
1752void *vmalloc_user(unsigned long size)
1753{
1754	struct vm_struct *area;
1755	void *ret;
1756
1757	ret = __vmalloc_node(size, SHMLBA,
1758			     GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
1759			     PAGE_KERNEL, NUMA_NO_NODE,
1760			     __builtin_return_address(0));
1761	if (ret) {
1762		area = find_vm_area(ret);
1763		area->flags |= VM_USERMAP;
1764	}
1765	return ret;
1766}
1767EXPORT_SYMBOL(vmalloc_user);
1768
1769/**
1770 *	vmalloc_node  -  allocate memory on a specific node
1771 *	@size:		allocation size
1772 *	@node:		numa node
 
 
 
1773 *
1774 *	Allocate enough pages to cover @size from the page level
1775 *	allocator and map them into contiguous kernel virtual space.
1776 *
1777 *	For tight control over page level allocator and protection flags
1778 *	use __vmalloc() instead.
1779 */
1780void *vmalloc_node(unsigned long size, int node)
1781{
1782	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
1783					node, __builtin_return_address(0));
1784}
1785EXPORT_SYMBOL(vmalloc_node);
1786
1787/**
1788 * vzalloc_node - allocate memory on a specific node with zero fill
1789 * @size:	allocation size
1790 * @node:	numa node
1791 *
1792 * Allocate enough pages to cover @size from the page level
1793 * allocator and map them into contiguous kernel virtual space.
1794 * The memory allocated is set to zero.
1795 *
1796 * For tight control over page level allocator and protection flags
1797 * use __vmalloc_node() instead.
 
 
1798 */
1799void *vzalloc_node(unsigned long size, int node)
1800{
1801	return __vmalloc_node_flags(size, node,
1802			 GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO);
1803}
1804EXPORT_SYMBOL(vzalloc_node);
1805
1806#ifndef PAGE_KERNEL_EXEC
1807# define PAGE_KERNEL_EXEC PAGE_KERNEL
1808#endif
1809
1810/**
1811 *	vmalloc_exec  -  allocate virtually contiguous, executable memory
1812 *	@size:		allocation size
1813 *
1814 *	Kernel-internal function to allocate enough pages to cover @size
1815 *	the page level allocator and map them into contiguous and
1816 *	executable kernel virtual space.
1817 *
1818 *	For tight control over page level allocator and protection flags
1819 *	use __vmalloc() instead.
 
 
1820 */
1821
1822void *vmalloc_exec(unsigned long size)
1823{
1824	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
1825			      NUMA_NO_NODE, __builtin_return_address(0));
 
1826}
1827
1828#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
1829#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1830#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1831#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1832#else
1833#define GFP_VMALLOC32 GFP_KERNEL
 
 
 
 
1834#endif
1835
1836/**
1837 *	vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
1838 *	@size:		allocation size
 
 
 
1839 *
1840 *	Allocate enough 32bit PA addressable pages to cover @size from the
1841 *	page level allocator and map them into contiguous kernel virtual space.
1842 */
1843void *vmalloc_32(unsigned long size)
1844{
1845	return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
1846			      NUMA_NO_NODE, __builtin_return_address(0));
1847}
1848EXPORT_SYMBOL(vmalloc_32);
1849
1850/**
1851 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
1852 *	@size:		allocation size
1853 *
1854 * The resulting memory area is 32bit addressable and zeroed so it can be
1855 * mapped to userspace without leaking data.
 
 
1856 */
1857void *vmalloc_32_user(unsigned long size)
1858{
1859	struct vm_struct *area;
1860	void *ret;
1861
1862	ret = __vmalloc_node(size, 1, GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
1863			     NUMA_NO_NODE, __builtin_return_address(0));
1864	if (ret) {
1865		area = find_vm_area(ret);
1866		area->flags |= VM_USERMAP;
1867	}
1868	return ret;
1869}
1870EXPORT_SYMBOL(vmalloc_32_user);
1871
1872/*
1873 * small helper routine , copy contents to buf from addr.
1874 * If the page is not present, fill zero.
1875 */
1876
1877static int aligned_vread(char *buf, char *addr, unsigned long count)
1878{
1879	struct page *p;
1880	int copied = 0;
1881
1882	while (count) {
1883		unsigned long offset, length;
1884
1885		offset = (unsigned long)addr & ~PAGE_MASK;
1886		length = PAGE_SIZE - offset;
1887		if (length > count)
1888			length = count;
1889		p = vmalloc_to_page(addr);
1890		/*
1891		 * To do safe access to this _mapped_ area, we need
1892		 * lock. But adding lock here means that we need to add
1893		 * overhead of vmalloc()/vfree() calles for this _debug_
1894		 * interface, rarely used. Instead of that, we'll use
1895		 * kmap() and get small overhead in this access function.
1896		 */
1897		if (p) {
1898			/*
1899			 * we can expect USER0 is not used (see vread/vwrite's
1900			 * function description)
1901			 */
1902			void *map = kmap_atomic(p);
1903			memcpy(buf, map + offset, length);
1904			kunmap_atomic(map);
1905		} else
1906			memset(buf, 0, length);
1907
1908		addr += length;
1909		buf += length;
1910		copied += length;
1911		count -= length;
1912	}
1913	return copied;
1914}
1915
1916static int aligned_vwrite(char *buf, char *addr, unsigned long count)
1917{
1918	struct page *p;
1919	int copied = 0;
1920
1921	while (count) {
1922		unsigned long offset, length;
1923
1924		offset = (unsigned long)addr & ~PAGE_MASK;
1925		length = PAGE_SIZE - offset;
1926		if (length > count)
1927			length = count;
1928		p = vmalloc_to_page(addr);
1929		/*
1930		 * To do safe access to this _mapped_ area, we need
1931		 * lock. But adding lock here means that we need to add
1932		 * overhead of vmalloc()/vfree() calles for this _debug_
1933		 * interface, rarely used. Instead of that, we'll use
1934		 * kmap() and get small overhead in this access function.
1935		 */
1936		if (p) {
1937			/*
1938			 * we can expect USER0 is not used (see vread/vwrite's
1939			 * function description)
1940			 */
1941			void *map = kmap_atomic(p);
1942			memcpy(map + offset, buf, length);
1943			kunmap_atomic(map);
1944		}
1945		addr += length;
1946		buf += length;
1947		copied += length;
1948		count -= length;
1949	}
1950	return copied;
1951}
1952
1953/**
1954 *	vread() -  read vmalloc area in a safe way.
1955 *	@buf:		buffer for reading data
1956 *	@addr:		vm address.
1957 *	@count:		number of bytes to be read.
1958 *
1959 *	Returns # of bytes which addr and buf should be increased.
1960 *	(same number to @count). Returns 0 if [addr...addr+count) doesn't
1961 *	includes any intersect with alive vmalloc area.
1962 *
1963 *	This function checks that addr is a valid vmalloc'ed area, and
1964 *	copy data from that area to a given buffer. If the given memory range
1965 *	of [addr...addr+count) includes some valid address, data is copied to
1966 *	proper area of @buf. If there are memory holes, they'll be zero-filled.
1967 *	IOREMAP area is treated as memory hole and no copy is done.
1968 *
1969 *	If [addr...addr+count) doesn't includes any intersects with alive
1970 *	vm_struct area, returns 0. @buf should be kernel's buffer.
1971 *
1972 *	Note: In usual ops, vread() is never necessary because the caller
1973 *	should know vmalloc() area is valid and can use memcpy().
1974 *	This is for routines which have to access vmalloc area without
1975 *	any informaion, as /dev/kmem.
1976 *
1977 */
1978
1979long vread(char *buf, char *addr, unsigned long count)
1980{
1981	struct vmap_area *va;
1982	struct vm_struct *vm;
1983	char *vaddr, *buf_start = buf;
1984	unsigned long buflen = count;
1985	unsigned long n;
1986
1987	/* Don't allow overflow */
1988	if ((unsigned long) addr + count < count)
1989		count = -(unsigned long) addr;
1990
1991	spin_lock(&vmap_area_lock);
1992	list_for_each_entry(va, &vmap_area_list, list) {
1993		if (!count)
1994			break;
1995
1996		if (!(va->flags & VM_VM_AREA))
1997			continue;
1998
1999		vm = va->vm;
2000		vaddr = (char *) vm->addr;
2001		if (addr >= vaddr + get_vm_area_size(vm))
2002			continue;
2003		while (addr < vaddr) {
2004			if (count == 0)
2005				goto finished;
2006			*buf = '\0';
2007			buf++;
2008			addr++;
2009			count--;
2010		}
2011		n = vaddr + get_vm_area_size(vm) - addr;
2012		if (n > count)
2013			n = count;
2014		if (!(vm->flags & VM_IOREMAP))
2015			aligned_vread(buf, addr, n);
2016		else /* IOREMAP area is treated as memory hole */
2017			memset(buf, 0, n);
2018		buf += n;
2019		addr += n;
2020		count -= n;
2021	}
2022finished:
2023	spin_unlock(&vmap_area_lock);
2024
2025	if (buf == buf_start)
2026		return 0;
2027	/* zero-fill memory holes */
2028	if (buf != buf_start + buflen)
2029		memset(buf, 0, buflen - (buf - buf_start));
2030
2031	return buflen;
2032}
2033
2034/**
2035 *	vwrite() -  write vmalloc area in a safe way.
2036 *	@buf:		buffer for source data
2037 *	@addr:		vm address.
2038 *	@count:		number of bytes to be read.
2039 *
2040 *	Returns # of bytes which addr and buf should be incresed.
2041 *	(same number to @count).
2042 *	If [addr...addr+count) doesn't includes any intersect with valid
2043 *	vmalloc area, returns 0.
2044 *
2045 *	This function checks that addr is a valid vmalloc'ed area, and
2046 *	copy data from a buffer to the given addr. If specified range of
2047 *	[addr...addr+count) includes some valid address, data is copied from
2048 *	proper area of @buf. If there are memory holes, no copy to hole.
2049 *	IOREMAP area is treated as memory hole and no copy is done.
2050 *
2051 *	If [addr...addr+count) doesn't includes any intersects with alive
2052 *	vm_struct area, returns 0. @buf should be kernel's buffer.
2053 *
2054 *	Note: In usual ops, vwrite() is never necessary because the caller
2055 *	should know vmalloc() area is valid and can use memcpy().
2056 *	This is for routines which have to access vmalloc area without
2057 *	any informaion, as /dev/kmem.
2058 */
2059
2060long vwrite(char *buf, char *addr, unsigned long count)
2061{
2062	struct vmap_area *va;
2063	struct vm_struct *vm;
2064	char *vaddr;
2065	unsigned long n, buflen;
2066	int copied = 0;
2067
2068	/* Don't allow overflow */
2069	if ((unsigned long) addr + count < count)
2070		count = -(unsigned long) addr;
2071	buflen = count;
2072
2073	spin_lock(&vmap_area_lock);
2074	list_for_each_entry(va, &vmap_area_list, list) {
2075		if (!count)
2076			break;
2077
2078		if (!(va->flags & VM_VM_AREA))
2079			continue;
2080
2081		vm = va->vm;
2082		vaddr = (char *) vm->addr;
2083		if (addr >= vaddr + get_vm_area_size(vm))
2084			continue;
2085		while (addr < vaddr) {
2086			if (count == 0)
2087				goto finished;
2088			buf++;
2089			addr++;
2090			count--;
2091		}
2092		n = vaddr + get_vm_area_size(vm) - addr;
2093		if (n > count)
2094			n = count;
2095		if (!(vm->flags & VM_IOREMAP)) {
2096			aligned_vwrite(buf, addr, n);
2097			copied++;
2098		}
2099		buf += n;
2100		addr += n;
2101		count -= n;
2102	}
2103finished:
2104	spin_unlock(&vmap_area_lock);
2105	if (!copied)
2106		return 0;
2107	return buflen;
2108}
2109
2110/**
2111 *	remap_vmalloc_range_partial  -  map vmalloc pages to userspace
2112 *	@vma:		vma to cover
2113 *	@uaddr:		target user address to start at
2114 *	@kaddr:		virtual address of vmalloc kernel memory
2115 *	@size:		size of map area
2116 *
2117 *	Returns:	0 for success, -Exxx on failure
2118 *
2119 *	This function checks that @kaddr is a valid vmalloc'ed area,
2120 *	and that it is big enough to cover the range starting at
2121 *	@uaddr in @vma. Will return failure if that criteria isn't
2122 *	met.
2123 *
2124 *	Similar to remap_pfn_range() (see mm/memory.c)
2125 */
2126int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2127				void *kaddr, unsigned long size)
2128{
2129	struct vm_struct *area;
2130
2131	size = PAGE_ALIGN(size);
2132
2133	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2134		return -EINVAL;
2135
2136	area = find_vm_area(kaddr);
2137	if (!area)
2138		return -EINVAL;
2139
2140	if (!(area->flags & VM_USERMAP))
2141		return -EINVAL;
2142
2143	if (kaddr + size > area->addr + area->size)
2144		return -EINVAL;
2145
2146	do {
2147		struct page *page = vmalloc_to_page(kaddr);
2148		int ret;
2149
2150		ret = vm_insert_page(vma, uaddr, page);
2151		if (ret)
2152			return ret;
2153
2154		uaddr += PAGE_SIZE;
2155		kaddr += PAGE_SIZE;
2156		size -= PAGE_SIZE;
2157	} while (size > 0);
2158
2159	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
2160
2161	return 0;
2162}
2163EXPORT_SYMBOL(remap_vmalloc_range_partial);
2164
2165/**
2166 *	remap_vmalloc_range  -  map vmalloc pages to userspace
2167 *	@vma:		vma to cover (map full range of vma)
2168 *	@addr:		vmalloc memory
2169 *	@pgoff:		number of pages into addr before first page to map
2170 *
2171 *	Returns:	0 for success, -Exxx on failure
2172 *
2173 *	This function checks that addr is a valid vmalloc'ed area, and
2174 *	that it is big enough to cover the vma. Will return failure if
2175 *	that criteria isn't met.
2176 *
2177 *	Similar to remap_pfn_range() (see mm/memory.c)
2178 */
2179int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
2180						unsigned long pgoff)
2181{
2182	return remap_vmalloc_range_partial(vma, vma->vm_start,
2183					   addr + (pgoff << PAGE_SHIFT),
2184					   vma->vm_end - vma->vm_start);
2185}
2186EXPORT_SYMBOL(remap_vmalloc_range);
2187
2188/*
2189 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
2190 * have one.
 
 
 
2191 */
2192void __weak vmalloc_sync_all(void)
2193{
2194}
2195
2196
2197static int f(pte_t *pte, pgtable_t table, unsigned long addr, void *data)
2198{
2199	pte_t ***p = data;
2200
2201	if (p) {
2202		*(*p) = pte;
2203		(*p)++;
2204	}
2205	return 0;
2206}
2207
2208/**
2209 *	alloc_vm_area - allocate a range of kernel address space
2210 *	@size:		size of the area
2211 *	@ptes:		returns the PTEs for the address space
2212 *
2213 *	Returns:	NULL on failure, vm_struct on success
2214 *
2215 *	This function reserves a range of kernel address space, and
2216 *	allocates pagetables to map that range.  No actual mappings
2217 *	are created.
2218 *
2219 *	If @ptes is non-NULL, pointers to the PTEs (in init_mm)
2220 *	allocated for the VM area are returned.
2221 */
2222struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
2223{
2224	struct vm_struct *area;
2225
2226	area = get_vm_area_caller(size, VM_IOREMAP,
2227				__builtin_return_address(0));
2228	if (area == NULL)
2229		return NULL;
2230
2231	/*
2232	 * This ensures that page tables are constructed for this region
2233	 * of kernel virtual address space and mapped into init_mm.
2234	 */
2235	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2236				size, f, ptes ? &ptes : NULL)) {
2237		free_vm_area(area);
2238		return NULL;
2239	}
2240
2241	return area;
2242}
2243EXPORT_SYMBOL_GPL(alloc_vm_area);
2244
2245void free_vm_area(struct vm_struct *area)
2246{
2247	struct vm_struct *ret;
2248	ret = remove_vm_area(area->addr);
2249	BUG_ON(ret != area);
2250	kfree(area);
2251}
2252EXPORT_SYMBOL_GPL(free_vm_area);
2253
2254#ifdef CONFIG_SMP
2255static struct vmap_area *node_to_va(struct rb_node *n)
2256{
2257	return n ? rb_entry(n, struct vmap_area, rb_node) : NULL;
2258}
2259
2260/**
2261 * pvm_find_next_prev - find the next and prev vmap_area surrounding @end
2262 * @end: target address
2263 * @pnext: out arg for the next vmap_area
2264 * @pprev: out arg for the previous vmap_area
2265 *
2266 * Returns: %true if either or both of next and prev are found,
2267 *	    %false if no vmap_area exists
2268 *
2269 * Find vmap_areas end addresses of which enclose @end.  ie. if not
2270 * NULL, *pnext->va_end > @end and *pprev->va_end <= @end.
 
 
2271 */
2272static bool pvm_find_next_prev(unsigned long end,
2273			       struct vmap_area **pnext,
2274			       struct vmap_area **pprev)
2275{
2276	struct rb_node *n = vmap_area_root.rb_node;
2277	struct vmap_area *va = NULL;
 
 
 
2278
2279	while (n) {
2280		va = rb_entry(n, struct vmap_area, rb_node);
2281		if (end < va->va_end)
 
 
 
 
 
 
2282			n = n->rb_left;
2283		else if (end > va->va_end)
2284			n = n->rb_right;
2285		else
2286			break;
2287	}
2288
2289	if (!va)
2290		return false;
2291
2292	if (va->va_end > end) {
2293		*pnext = va;
2294		*pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2295	} else {
2296		*pprev = va;
2297		*pnext = node_to_va(rb_next(&(*pprev)->rb_node));
2298	}
2299	return true;
2300}
2301
2302/**
2303 * pvm_determine_end - find the highest aligned address between two vmap_areas
2304 * @pnext: in/out arg for the next vmap_area
2305 * @pprev: in/out arg for the previous vmap_area
2306 * @align: alignment
2307 *
2308 * Returns: determined end address
2309 *
2310 * Find the highest aligned address between *@pnext and *@pprev below
2311 * VMALLOC_END.  *@pnext and *@pprev are adjusted so that the aligned
2312 * down address is between the end addresses of the two vmap_areas.
2313 *
2314 * Please note that the address returned by this function may fall
2315 * inside *@pnext vmap_area.  The caller is responsible for checking
2316 * that.
2317 */
2318static unsigned long pvm_determine_end(struct vmap_area **pnext,
2319				       struct vmap_area **pprev,
2320				       unsigned long align)
2321{
2322	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2323	unsigned long addr;
2324
2325	if (*pnext)
2326		addr = min((*pnext)->va_start & ~(align - 1), vmalloc_end);
2327	else
2328		addr = vmalloc_end;
2329
2330	while (*pprev && (*pprev)->va_end > addr) {
2331		*pnext = *pprev;
2332		*pprev = node_to_va(rb_prev(&(*pnext)->rb_node));
2333	}
2334
2335	return addr;
2336}
2337
2338/**
2339 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
2340 * @offsets: array containing offset of each area
2341 * @sizes: array containing size of each area
2342 * @nr_vms: the number of areas to allocate
2343 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
2344 *
2345 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
2346 *	    vm_structs on success, %NULL on failure
2347 *
2348 * Percpu allocator wants to use congruent vm areas so that it can
2349 * maintain the offsets among percpu areas.  This function allocates
2350 * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
2351 * be scattered pretty far, distance between two areas easily going up
2352 * to gigabytes.  To avoid interacting with regular vmallocs, these
2353 * areas are allocated from top.
2354 *
2355 * Despite its complicated look, this allocator is rather simple.  It
2356 * does everything top-down and scans areas from the end looking for
2357 * matching slot.  While scanning, if any of the areas overlaps with
2358 * existing vmap_area, the base address is pulled down to fit the
2359 * area.  Scanning is repeated till all the areas fit and then all
2360 * necessary data structres are inserted and the result is returned.
2361 */
2362struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2363				     const size_t *sizes, int nr_vms,
2364				     size_t align)
2365{
2366	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
2367	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
2368	struct vmap_area **vas, *prev, *next;
2369	struct vm_struct **vms;
2370	int area, area2, last_area, term_area;
2371	unsigned long base, start, end, last_end;
2372	bool purged = false;
 
2373
2374	/* verify parameters and allocate data structures */
2375	BUG_ON(align & ~PAGE_MASK || !is_power_of_2(align));
2376	for (last_area = 0, area = 0; area < nr_vms; area++) {
2377		start = offsets[area];
2378		end = start + sizes[area];
2379
2380		/* is everything aligned properly? */
2381		BUG_ON(!IS_ALIGNED(offsets[area], align));
2382		BUG_ON(!IS_ALIGNED(sizes[area], align));
2383
2384		/* detect the area with the highest address */
2385		if (start > offsets[last_area])
2386			last_area = area;
2387
2388		for (area2 = 0; area2 < nr_vms; area2++) {
2389			unsigned long start2 = offsets[area2];
2390			unsigned long end2 = start2 + sizes[area2];
2391
2392			if (area2 == area)
2393				continue;
2394
2395			BUG_ON(start2 >= start && start2 < end);
2396			BUG_ON(end2 <= end && end2 > start);
2397		}
2398	}
2399	last_end = offsets[last_area] + sizes[last_area];
2400
2401	if (vmalloc_end - vmalloc_start < last_end) {
2402		WARN_ON(true);
2403		return NULL;
2404	}
2405
2406	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
2407	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
2408	if (!vas || !vms)
2409		goto err_free2;
2410
2411	for (area = 0; area < nr_vms; area++) {
2412		vas[area] = kzalloc(sizeof(struct vmap_area), GFP_KERNEL);
2413		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
2414		if (!vas[area] || !vms[area])
2415			goto err_free;
2416	}
2417retry:
2418	spin_lock(&vmap_area_lock);
2419
2420	/* start scanning - we scan from the top, begin with the last area */
2421	area = term_area = last_area;
2422	start = offsets[area];
2423	end = start + sizes[area];
2424
2425	if (!pvm_find_next_prev(vmap_area_pcpu_hole, &next, &prev)) {
2426		base = vmalloc_end - last_end;
2427		goto found;
2428	}
2429	base = pvm_determine_end(&next, &prev, align) - end;
2430
2431	while (true) {
2432		BUG_ON(next && next->va_end <= base + end);
2433		BUG_ON(prev && prev->va_end > base + end);
2434
2435		/*
2436		 * base might have underflowed, add last_end before
2437		 * comparing.
2438		 */
2439		if (base + last_end < vmalloc_start + last_end) {
2440			spin_unlock(&vmap_area_lock);
2441			if (!purged) {
2442				purge_vmap_area_lazy();
2443				purged = true;
2444				goto retry;
2445			}
2446			goto err_free;
2447		}
2448
2449		/*
2450		 * If next overlaps, move base downwards so that it's
2451		 * right below next and then recheck.
2452		 */
2453		if (next && next->va_start < base + end) {
2454			base = pvm_determine_end(&next, &prev, align) - end;
2455			term_area = area;
2456			continue;
2457		}
2458
2459		/*
2460		 * If prev overlaps, shift down next and prev and move
2461		 * base so that it's right below new next and then
2462		 * recheck.
2463		 */
2464		if (prev && prev->va_end > base + start)  {
2465			next = prev;
2466			prev = node_to_va(rb_prev(&next->rb_node));
2467			base = pvm_determine_end(&next, &prev, align) - end;
2468			term_area = area;
2469			continue;
2470		}
2471
2472		/*
2473		 * This area fits, move on to the previous one.  If
2474		 * the previous one is the terminal one, we're done.
2475		 */
2476		area = (area + nr_vms - 1) % nr_vms;
2477		if (area == term_area)
2478			break;
 
2479		start = offsets[area];
2480		end = start + sizes[area];
2481		pvm_find_next_prev(base + end, &next, &prev);
2482	}
2483found:
2484	/* we've found a fitting base, insert all va's */
2485	for (area = 0; area < nr_vms; area++) {
2486		struct vmap_area *va = vas[area];
2487
2488		va->va_start = base + offsets[area];
2489		va->va_end = va->va_start + sizes[area];
2490		__insert_vmap_area(va);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2491	}
2492
2493	vmap_area_pcpu_hole = base + offsets[last_area];
2494
2495	spin_unlock(&vmap_area_lock);
2496
2497	/* insert all vm's */
2498	for (area = 0; area < nr_vms; area++)
2499		setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
2500				 pcpu_get_vm_areas);
2501
2502	kfree(vas);
2503	return vms;
2504
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2505err_free:
2506	for (area = 0; area < nr_vms; area++) {
2507		kfree(vas[area]);
 
 
2508		kfree(vms[area]);
2509	}
2510err_free2:
2511	kfree(vas);
2512	kfree(vms);
2513	return NULL;
2514}
2515
2516/**
2517 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
2518 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
2519 * @nr_vms: the number of allocated areas
2520 *
2521 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
2522 */
2523void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
2524{
2525	int i;
2526
2527	for (i = 0; i < nr_vms; i++)
2528		free_vm_area(vms[i]);
2529	kfree(vms);
2530}
2531#endif	/* CONFIG_SMP */
2532
2533#ifdef CONFIG_PROC_FS
2534static void *s_start(struct seq_file *m, loff_t *pos)
2535	__acquires(&vmap_area_lock)
2536{
2537	loff_t n = *pos;
2538	struct vmap_area *va;
2539
2540	spin_lock(&vmap_area_lock);
2541	va = list_entry((&vmap_area_list)->next, typeof(*va), list);
2542	while (n > 0 && &va->list != &vmap_area_list) {
2543		n--;
2544		va = list_entry(va->list.next, typeof(*va), list);
2545	}
2546	if (!n && &va->list != &vmap_area_list)
2547		return va;
2548
2549	return NULL;
2550
2551}
2552
2553static void *s_next(struct seq_file *m, void *p, loff_t *pos)
2554{
2555	struct vmap_area *va = p, *next;
2556
2557	++*pos;
2558	next = list_entry(va->list.next, typeof(*va), list);
2559	if (&next->list != &vmap_area_list)
2560		return next;
2561
2562	return NULL;
2563}
2564
2565static void s_stop(struct seq_file *m, void *p)
2566	__releases(&vmap_area_lock)
2567{
2568	spin_unlock(&vmap_area_lock);
2569}
2570
2571static void show_numa_info(struct seq_file *m, struct vm_struct *v)
2572{
2573	if (IS_ENABLED(CONFIG_NUMA)) {
2574		unsigned int nr, *counters = m->private;
2575
2576		if (!counters)
2577			return;
2578
 
 
2579		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
2580		smp_rmb();
2581		if (v->flags & VM_UNINITIALIZED)
2582			return;
2583
2584		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
2585
2586		for (nr = 0; nr < v->nr_pages; nr++)
2587			counters[page_to_nid(v->pages[nr])]++;
2588
2589		for_each_node_state(nr, N_HIGH_MEMORY)
2590			if (counters[nr])
2591				seq_printf(m, " N%u=%u", nr, counters[nr]);
2592	}
2593}
2594
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2595static int s_show(struct seq_file *m, void *p)
2596{
2597	struct vmap_area *va = p;
2598	struct vm_struct *v;
2599
 
 
2600	/*
2601	 * s_show can encounter race with remove_vm_area, !VM_VM_AREA on
2602	 * behalf of vmap area is being tear down or vm_map_ram allocation.
2603	 */
2604	if (!(va->flags & VM_VM_AREA))
 
 
 
 
2605		return 0;
 
2606
2607	v = va->vm;
2608
2609	seq_printf(m, "0x%pK-0x%pK %7ld",
2610		v->addr, v->addr + v->size, v->size);
2611
2612	if (v->caller)
2613		seq_printf(m, " %pS", v->caller);
2614
2615	if (v->nr_pages)
2616		seq_printf(m, " pages=%d", v->nr_pages);
2617
2618	if (v->phys_addr)
2619		seq_printf(m, " phys=%llx", (unsigned long long)v->phys_addr);
2620
2621	if (v->flags & VM_IOREMAP)
2622		seq_printf(m, " ioremap");
2623
2624	if (v->flags & VM_ALLOC)
2625		seq_printf(m, " vmalloc");
2626
2627	if (v->flags & VM_MAP)
2628		seq_printf(m, " vmap");
2629
2630	if (v->flags & VM_USERMAP)
2631		seq_printf(m, " user");
2632
2633	if (v->flags & VM_VPAGES)
2634		seq_printf(m, " vpages");
 
 
 
2635
2636	show_numa_info(m, v);
2637	seq_putc(m, '\n');
 
 
 
 
 
 
 
 
 
 
2638	return 0;
2639}
2640
2641static const struct seq_operations vmalloc_op = {
2642	.start = s_start,
2643	.next = s_next,
2644	.stop = s_stop,
2645	.show = s_show,
2646};
2647
2648static int vmalloc_open(struct inode *inode, struct file *file)
2649{
2650	unsigned int *ptr = NULL;
2651	int ret;
2652
2653	if (IS_ENABLED(CONFIG_NUMA)) {
2654		ptr = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
2655		if (ptr == NULL)
2656			return -ENOMEM;
2657	}
2658	ret = seq_open(file, &vmalloc_op);
2659	if (!ret) {
2660		struct seq_file *m = file->private_data;
2661		m->private = ptr;
2662	} else
2663		kfree(ptr);
2664	return ret;
2665}
2666
2667static const struct file_operations proc_vmalloc_operations = {
2668	.open		= vmalloc_open,
2669	.read		= seq_read,
2670	.llseek		= seq_lseek,
2671	.release	= seq_release_private,
2672};
2673
2674static int __init proc_vmalloc_init(void)
2675{
2676	proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations);
 
 
 
 
 
2677	return 0;
2678}
2679module_init(proc_vmalloc_init);
2680
2681void get_vmalloc_info(struct vmalloc_info *vmi)
2682{
2683	struct vmap_area *va;
2684	unsigned long free_area_size;
2685	unsigned long prev_end;
2686
2687	vmi->used = 0;
2688	vmi->largest_chunk = 0;
2689
2690	prev_end = VMALLOC_START;
2691
2692	spin_lock(&vmap_area_lock);
2693
2694	if (list_empty(&vmap_area_list)) {
2695		vmi->largest_chunk = VMALLOC_TOTAL;
2696		goto out;
2697	}
2698
2699	list_for_each_entry(va, &vmap_area_list, list) {
2700		unsigned long addr = va->va_start;
2701
2702		/*
2703		 * Some archs keep another range for modules in vmalloc space
2704		 */
2705		if (addr < VMALLOC_START)
2706			continue;
2707		if (addr >= VMALLOC_END)
2708			break;
2709
2710		if (va->flags & (VM_LAZY_FREE | VM_LAZY_FREEING))
2711			continue;
2712
2713		vmi->used += (va->va_end - va->va_start);
2714
2715		free_area_size = addr - prev_end;
2716		if (vmi->largest_chunk < free_area_size)
2717			vmi->largest_chunk = free_area_size;
2718
2719		prev_end = va->va_end;
2720	}
2721
2722	if (VMALLOC_END - prev_end > vmi->largest_chunk)
2723		vmi->largest_chunk = VMALLOC_END - prev_end;
2724
2725out:
2726	spin_unlock(&vmap_area_lock);
2727}
2728#endif
2729