Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/mm/vmalloc.c
   4 *
   5 *  Copyright (C) 1993  Linus Torvalds
   6 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
   7 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
   8 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
   9 *  Numa awareness, Christoph Lameter, SGI, June 2005
 
  10 */
  11
  12#include <linux/vmalloc.h>
  13#include <linux/mm.h>
  14#include <linux/module.h>
  15#include <linux/highmem.h>
  16#include <linux/sched/signal.h>
  17#include <linux/slab.h>
  18#include <linux/spinlock.h>
  19#include <linux/interrupt.h>
  20#include <linux/proc_fs.h>
  21#include <linux/seq_file.h>
  22#include <linux/set_memory.h>
  23#include <linux/debugobjects.h>
  24#include <linux/kallsyms.h>
  25#include <linux/list.h>
  26#include <linux/notifier.h>
  27#include <linux/rbtree.h>
  28#include <linux/radix-tree.h>
 
  29#include <linux/rcupdate.h>
  30#include <linux/pfn.h>
  31#include <linux/kmemleak.h>
  32#include <linux/atomic.h>
  33#include <linux/compiler.h>
  34#include <linux/llist.h>
  35#include <linux/bitops.h>
  36#include <linux/rbtree_augmented.h>
  37
 
  38#include <linux/uaccess.h>
 
  39#include <asm/tlbflush.h>
  40#include <asm/shmparam.h>
  41
  42#include "internal.h"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  43
  44struct vfree_deferred {
  45	struct llist_head list;
  46	struct work_struct wq;
  47};
  48static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
  49
  50static void __vunmap(const void *, int);
  51
  52static void free_work(struct work_struct *w)
  53{
  54	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
  55	struct llist_node *t, *llnode;
  56
  57	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
  58		__vunmap((void *)llnode, 1);
  59}
  60
  61/*** Page table manipulation functions ***/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  62
  63static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  64{
  65	pte_t *pte;
  66
  67	pte = pte_offset_kernel(pmd, addr);
  68	do {
  69		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
  70		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
  71	} while (pte++, addr += PAGE_SIZE, addr != end);
 
  72}
  73
  74static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end)
 
  75{
  76	pmd_t *pmd;
  77	unsigned long next;
 
  78
  79	pmd = pmd_offset(pud, addr);
  80	do {
  81		next = pmd_addr_end(addr, end);
  82		if (pmd_clear_huge(pmd))
 
 
 
 
 
  83			continue;
  84		if (pmd_none_or_clear_bad(pmd))
  85			continue;
  86		vunmap_pte_range(pmd, addr, next);
 
 
  87	} while (pmd++, addr = next, addr != end);
  88}
  89
  90static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end)
 
  91{
  92	pud_t *pud;
  93	unsigned long next;
 
  94
  95	pud = pud_offset(p4d, addr);
  96	do {
  97		next = pud_addr_end(addr, end);
  98		if (pud_clear_huge(pud))
 
 
 
 
 
  99			continue;
 100		if (pud_none_or_clear_bad(pud))
 101			continue;
 102		vunmap_pmd_range(pud, addr, next);
 103	} while (pud++, addr = next, addr != end);
 104}
 105
 106static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end)
 
 107{
 108	p4d_t *p4d;
 109	unsigned long next;
 
 110
 111	p4d = p4d_offset(pgd, addr);
 112	do {
 113		next = p4d_addr_end(addr, end);
 114		if (p4d_clear_huge(p4d))
 
 
 
 
 
 115			continue;
 116		if (p4d_none_or_clear_bad(p4d))
 117			continue;
 118		vunmap_pud_range(p4d, addr, next);
 119	} while (p4d++, addr = next, addr != end);
 120}
 121
 122static void vunmap_page_range(unsigned long addr, unsigned long end)
 
 
 
 
 
 
 
 
 
 
 
 
 123{
 124	pgd_t *pgd;
 125	unsigned long next;
 
 
 
 126
 127	BUG_ON(addr >= end);
 128	pgd = pgd_offset_k(addr);
 129	do {
 130		next = pgd_addr_end(addr, end);
 
 
 131		if (pgd_none_or_clear_bad(pgd))
 132			continue;
 133		vunmap_p4d_range(pgd, addr, next);
 134	} while (pgd++, addr = next, addr != end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 135}
 136
 137static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
 138		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
 
 139{
 140	pte_t *pte;
 141
 142	/*
 143	 * nr is a running index into the array which helps higher level
 144	 * callers keep track of where we're up to.
 145	 */
 146
 147	pte = pte_alloc_kernel(pmd, addr);
 148	if (!pte)
 149		return -ENOMEM;
 150	do {
 151		struct page *page = pages[*nr];
 152
 153		if (WARN_ON(!pte_none(*pte)))
 154			return -EBUSY;
 155		if (WARN_ON(!page))
 156			return -ENOMEM;
 157		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
 158		(*nr)++;
 159	} while (pte++, addr += PAGE_SIZE, addr != end);
 
 160	return 0;
 161}
 162
 163static int vmap_pmd_range(pud_t *pud, unsigned long addr,
 164		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
 
 165{
 166	pmd_t *pmd;
 167	unsigned long next;
 168
 169	pmd = pmd_alloc(&init_mm, pud, addr);
 170	if (!pmd)
 171		return -ENOMEM;
 172	do {
 173		next = pmd_addr_end(addr, end);
 174		if (vmap_pte_range(pmd, addr, next, prot, pages, nr))
 175			return -ENOMEM;
 176	} while (pmd++, addr = next, addr != end);
 177	return 0;
 178}
 179
 180static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
 181		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
 
 182{
 183	pud_t *pud;
 184	unsigned long next;
 185
 186	pud = pud_alloc(&init_mm, p4d, addr);
 187	if (!pud)
 188		return -ENOMEM;
 189	do {
 190		next = pud_addr_end(addr, end);
 191		if (vmap_pmd_range(pud, addr, next, prot, pages, nr))
 192			return -ENOMEM;
 193	} while (pud++, addr = next, addr != end);
 194	return 0;
 195}
 196
 197static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
 198		unsigned long end, pgprot_t prot, struct page **pages, int *nr)
 
 199{
 200	p4d_t *p4d;
 201	unsigned long next;
 202
 203	p4d = p4d_alloc(&init_mm, pgd, addr);
 204	if (!p4d)
 205		return -ENOMEM;
 206	do {
 207		next = p4d_addr_end(addr, end);
 208		if (vmap_pud_range(p4d, addr, next, prot, pages, nr))
 209			return -ENOMEM;
 210	} while (p4d++, addr = next, addr != end);
 211	return 0;
 212}
 213
 214/*
 215 * Set up page tables in kva (addr, end). The ptes shall have prot "prot", and
 216 * will have pfns corresponding to the "pages" array.
 217 *
 218 * Ie. pte at addr+N*PAGE_SIZE shall point to pfn corresponding to pages[N]
 219 */
 220static int vmap_page_range_noflush(unsigned long start, unsigned long end,
 221				   pgprot_t prot, struct page **pages)
 222{
 
 223	pgd_t *pgd;
 224	unsigned long next;
 225	unsigned long addr = start;
 226	int err = 0;
 227	int nr = 0;
 
 228
 229	BUG_ON(addr >= end);
 230	pgd = pgd_offset_k(addr);
 231	do {
 232		next = pgd_addr_end(addr, end);
 233		err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr);
 
 
 234		if (err)
 235			return err;
 236	} while (pgd++, addr = next, addr != end);
 237
 238	return nr;
 
 
 
 239}
 240
 241static int vmap_page_range(unsigned long start, unsigned long end,
 242			   pgprot_t prot, struct page **pages)
 
 
 
 
 
 
 
 
 
 243{
 244	int ret;
 
 
 
 
 
 
 
 
 
 245
 246	ret = vmap_page_range_noflush(start, end, prot, pages);
 247	flush_cache_vmap(start, end);
 248	return ret;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 249}
 250
 251int is_vmalloc_or_module_addr(const void *x)
 252{
 253	/*
 254	 * ARM, x86-64 and sparc64 put modules in a special place,
 255	 * and fall back on vmalloc() if that fails. Others
 256	 * just put it in the vmalloc space.
 257	 */
 258#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
 259	unsigned long addr = (unsigned long)x;
 260	if (addr >= MODULES_VADDR && addr < MODULES_END)
 261		return 1;
 262#endif
 263	return is_vmalloc_addr(x);
 264}
 265
 266/*
 267 * Walk a vmap address to the struct page it maps.
 
 
 268 */
 269struct page *vmalloc_to_page(const void *vmalloc_addr)
 270{
 271	unsigned long addr = (unsigned long) vmalloc_addr;
 272	struct page *page = NULL;
 273	pgd_t *pgd = pgd_offset_k(addr);
 274	p4d_t *p4d;
 275	pud_t *pud;
 276	pmd_t *pmd;
 277	pte_t *ptep, pte;
 278
 279	/*
 280	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
 281	 * architectures that do not vmalloc module space
 282	 */
 283	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
 284
 285	if (pgd_none(*pgd))
 286		return NULL;
 
 
 
 
 
 287	p4d = p4d_offset(pgd, addr);
 288	if (p4d_none(*p4d))
 289		return NULL;
 290	pud = pud_offset(p4d, addr);
 
 
 
 291
 292	/*
 293	 * Don't dereference bad PUD or PMD (below) entries. This will also
 294	 * identify huge mappings, which we may encounter on architectures
 295	 * that define CONFIG_HAVE_ARCH_HUGE_VMAP=y. Such regions will be
 296	 * identified as vmalloc addresses by is_vmalloc_addr(), but are
 297	 * not [unambiguously] associated with a struct page, so there is
 298	 * no correct value to return for them.
 299	 */
 300	WARN_ON_ONCE(pud_bad(*pud));
 301	if (pud_none(*pud) || pud_bad(*pud))
 302		return NULL;
 
 303	pmd = pmd_offset(pud, addr);
 304	WARN_ON_ONCE(pmd_bad(*pmd));
 305	if (pmd_none(*pmd) || pmd_bad(*pmd))
 
 
 
 306		return NULL;
 307
 308	ptep = pte_offset_map(pmd, addr);
 309	pte = *ptep;
 310	if (pte_present(pte))
 311		page = pte_page(pte);
 312	pte_unmap(ptep);
 
 313	return page;
 314}
 315EXPORT_SYMBOL(vmalloc_to_page);
 316
 317/*
 318 * Map a vmalloc()-space virtual address to the physical page frame number.
 319 */
 320unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
 321{
 322	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
 323}
 324EXPORT_SYMBOL(vmalloc_to_pfn);
 325
 326
 327/*** Global kva allocator ***/
 328
 329#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
 330#define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
 331
 332
 333static DEFINE_SPINLOCK(vmap_area_lock);
 
 334/* Export for kexec only */
 335LIST_HEAD(vmap_area_list);
 336static LLIST_HEAD(vmap_purge_list);
 337static struct rb_root vmap_area_root = RB_ROOT;
 338static bool vmap_initialized __read_mostly;
 339
 
 
 
 
 340/*
 341 * This kmem_cache is used for vmap_area objects. Instead of
 342 * allocating from slab we reuse an object from this cache to
 343 * make things faster. Especially in "no edge" splitting of
 344 * free block.
 345 */
 346static struct kmem_cache *vmap_area_cachep;
 347
 348/*
 349 * This linked list is used in pair with free_vmap_area_root.
 350 * It gives O(1) access to prev/next to perform fast coalescing.
 351 */
 352static LIST_HEAD(free_vmap_area_list);
 353
 354/*
 355 * This augment red-black tree represents the free vmap space.
 356 * All vmap_area objects in this tree are sorted by va->va_start
 357 * address. It is used for allocation and merging when a vmap
 358 * object is released.
 359 *
 360 * Each vmap_area node contains a maximum available free block
 361 * of its sub-tree, right or left. Therefore it is possible to
 362 * find a lowest match of free area.
 363 */
 364static struct rb_root free_vmap_area_root = RB_ROOT;
 365
 366/*
 367 * Preload a CPU with one object for "no edge" split case. The
 368 * aim is to get rid of allocations from the atomic context, thus
 369 * to use more permissive allocation masks.
 370 */
 371static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
 372
 373static __always_inline unsigned long
 374va_size(struct vmap_area *va)
 375{
 376	return (va->va_end - va->va_start);
 377}
 378
 379static __always_inline unsigned long
 380get_subtree_max_size(struct rb_node *node)
 381{
 382	struct vmap_area *va;
 383
 384	va = rb_entry_safe(node, struct vmap_area, rb_node);
 385	return va ? va->subtree_max_size : 0;
 386}
 387
 388/*
 389 * Gets called when remove the node and rotate.
 390 */
 391static __always_inline unsigned long
 392compute_subtree_max_size(struct vmap_area *va)
 393{
 394	return max3(va_size(va),
 395		get_subtree_max_size(va->rb_node.rb_left),
 396		get_subtree_max_size(va->rb_node.rb_right));
 397}
 398
 399RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
 400	struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
 401
 402static void purge_vmap_area_lazy(void);
 403static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
 404static unsigned long lazy_max_pages(void);
 405
 406static atomic_long_t nr_vmalloc_pages;
 407
 408unsigned long vmalloc_nr_pages(void)
 409{
 410	return atomic_long_read(&nr_vmalloc_pages);
 411}
 412
 413static struct vmap_area *__find_vmap_area(unsigned long addr)
 414{
 415	struct rb_node *n = vmap_area_root.rb_node;
 416
 417	while (n) {
 418		struct vmap_area *va;
 419
 420		va = rb_entry(n, struct vmap_area, rb_node);
 421		if (addr < va->va_start)
 422			n = n->rb_left;
 423		else if (addr >= va->va_end)
 424			n = n->rb_right;
 425		else
 426			return va;
 427	}
 428
 429	return NULL;
 430}
 431
 432/*
 433 * This function returns back addresses of parent node
 434 * and its left or right link for further processing.
 
 
 
 
 435 */
 436static __always_inline struct rb_node **
 437find_va_links(struct vmap_area *va,
 438	struct rb_root *root, struct rb_node *from,
 439	struct rb_node **parent)
 440{
 441	struct vmap_area *tmp_va;
 442	struct rb_node **link;
 443
 444	if (root) {
 445		link = &root->rb_node;
 446		if (unlikely(!*link)) {
 447			*parent = NULL;
 448			return link;
 449		}
 450	} else {
 451		link = &from;
 452	}
 453
 454	/*
 455	 * Go to the bottom of the tree. When we hit the last point
 456	 * we end up with parent rb_node and correct direction, i name
 457	 * it link, where the new va->rb_node will be attached to.
 458	 */
 459	do {
 460		tmp_va = rb_entry(*link, struct vmap_area, rb_node);
 461
 462		/*
 463		 * During the traversal we also do some sanity check.
 464		 * Trigger the BUG() if there are sides(left/right)
 465		 * or full overlaps.
 466		 */
 467		if (va->va_start < tmp_va->va_end &&
 468				va->va_end <= tmp_va->va_start)
 469			link = &(*link)->rb_left;
 470		else if (va->va_end > tmp_va->va_start &&
 471				va->va_start >= tmp_va->va_end)
 472			link = &(*link)->rb_right;
 473		else
 474			BUG();
 
 
 
 
 475	} while (*link);
 476
 477	*parent = &tmp_va->rb_node;
 478	return link;
 479}
 480
 481static __always_inline struct list_head *
 482get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
 483{
 484	struct list_head *list;
 485
 486	if (unlikely(!parent))
 487		/*
 488		 * The red-black tree where we try to find VA neighbors
 489		 * before merging or inserting is empty, i.e. it means
 490		 * there is no free vmap space. Normally it does not
 491		 * happen but we handle this case anyway.
 492		 */
 493		return NULL;
 494
 495	list = &rb_entry(parent, struct vmap_area, rb_node)->list;
 496	return (&parent->rb_right == link ? list->next : list);
 497}
 498
 499static __always_inline void
 500link_va(struct vmap_area *va, struct rb_root *root,
 501	struct rb_node *parent, struct rb_node **link, struct list_head *head)
 502{
 503	/*
 504	 * VA is still not in the list, but we can
 505	 * identify its future previous list_head node.
 506	 */
 507	if (likely(parent)) {
 508		head = &rb_entry(parent, struct vmap_area, rb_node)->list;
 509		if (&parent->rb_right != link)
 510			head = head->prev;
 511	}
 512
 513	/* Insert to the rb-tree */
 514	rb_link_node(&va->rb_node, parent, link);
 515	if (root == &free_vmap_area_root) {
 516		/*
 517		 * Some explanation here. Just perform simple insertion
 518		 * to the tree. We do not set va->subtree_max_size to
 519		 * its current size before calling rb_insert_augmented().
 520		 * It is because of we populate the tree from the bottom
 521		 * to parent levels when the node _is_ in the tree.
 522		 *
 523		 * Therefore we set subtree_max_size to zero after insertion,
 524		 * to let __augment_tree_propagate_from() puts everything to
 525		 * the correct order later on.
 526		 */
 527		rb_insert_augmented(&va->rb_node,
 528			root, &free_vmap_area_rb_augment_cb);
 529		va->subtree_max_size = 0;
 530	} else {
 531		rb_insert_color(&va->rb_node, root);
 532	}
 533
 534	/* Address-sort this list */
 535	list_add(&va->list, head);
 536}
 537
 538static __always_inline void
 539unlink_va(struct vmap_area *va, struct rb_root *root)
 540{
 541	if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
 542		return;
 543
 544	if (root == &free_vmap_area_root)
 545		rb_erase_augmented(&va->rb_node,
 546			root, &free_vmap_area_rb_augment_cb);
 547	else
 548		rb_erase(&va->rb_node, root);
 549
 550	list_del(&va->list);
 551	RB_CLEAR_NODE(&va->rb_node);
 552}
 553
 554#if DEBUG_AUGMENT_PROPAGATE_CHECK
 555static void
 556augment_tree_propagate_check(struct rb_node *n)
 557{
 558	struct vmap_area *va;
 559	struct rb_node *node;
 560	unsigned long size;
 561	bool found = false;
 562
 563	if (n == NULL)
 564		return;
 565
 566	va = rb_entry(n, struct vmap_area, rb_node);
 567	size = va->subtree_max_size;
 568	node = n;
 569
 570	while (node) {
 571		va = rb_entry(node, struct vmap_area, rb_node);
 572
 573		if (get_subtree_max_size(node->rb_left) == size) {
 574			node = node->rb_left;
 575		} else {
 576			if (va_size(va) == size) {
 577				found = true;
 578				break;
 579			}
 580
 581			node = node->rb_right;
 582		}
 583	}
 584
 585	if (!found) {
 586		va = rb_entry(n, struct vmap_area, rb_node);
 587		pr_emerg("tree is corrupted: %lu, %lu\n",
 588			va_size(va), va->subtree_max_size);
 589	}
 590
 591	augment_tree_propagate_check(n->rb_left);
 592	augment_tree_propagate_check(n->rb_right);
 593}
 594#endif
 595
 596/*
 597 * This function populates subtree_max_size from bottom to upper
 598 * levels starting from VA point. The propagation must be done
 599 * when VA size is modified by changing its va_start/va_end. Or
 600 * in case of newly inserting of VA to the tree.
 601 *
 602 * It means that __augment_tree_propagate_from() must be called:
 603 * - After VA has been inserted to the tree(free path);
 604 * - After VA has been shrunk(allocation path);
 605 * - After VA has been increased(merging path).
 606 *
 607 * Please note that, it does not mean that upper parent nodes
 608 * and their subtree_max_size are recalculated all the time up
 609 * to the root node.
 610 *
 611 *       4--8
 612 *        /\
 613 *       /  \
 614 *      /    \
 615 *    2--2  8--8
 616 *
 617 * For example if we modify the node 4, shrinking it to 2, then
 618 * no any modification is required. If we shrink the node 2 to 1
 619 * its subtree_max_size is updated only, and set to 1. If we shrink
 620 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
 621 * node becomes 4--6.
 622 */
 623static __always_inline void
 624augment_tree_propagate_from(struct vmap_area *va)
 625{
 626	struct rb_node *node = &va->rb_node;
 627	unsigned long new_va_sub_max_size;
 628
 629	while (node) {
 630		va = rb_entry(node, struct vmap_area, rb_node);
 631		new_va_sub_max_size = compute_subtree_max_size(va);
 632
 633		/*
 634		 * If the newly calculated maximum available size of the
 635		 * subtree is equal to the current one, then it means that
 636		 * the tree is propagated correctly. So we have to stop at
 637		 * this point to save cycles.
 638		 */
 639		if (va->subtree_max_size == new_va_sub_max_size)
 640			break;
 641
 642		va->subtree_max_size = new_va_sub_max_size;
 643		node = rb_parent(&va->rb_node);
 644	}
 645
 646#if DEBUG_AUGMENT_PROPAGATE_CHECK
 647	augment_tree_propagate_check(free_vmap_area_root.rb_node);
 648#endif
 649}
 650
 651static void
 652insert_vmap_area(struct vmap_area *va,
 653	struct rb_root *root, struct list_head *head)
 654{
 655	struct rb_node **link;
 656	struct rb_node *parent;
 657
 658	link = find_va_links(va, root, NULL, &parent);
 659	link_va(va, root, parent, link, head);
 
 660}
 661
 662static void
 663insert_vmap_area_augment(struct vmap_area *va,
 664	struct rb_node *from, struct rb_root *root,
 665	struct list_head *head)
 666{
 667	struct rb_node **link;
 668	struct rb_node *parent;
 669
 670	if (from)
 671		link = find_va_links(va, NULL, from, &parent);
 672	else
 673		link = find_va_links(va, root, NULL, &parent);
 674
 675	link_va(va, root, parent, link, head);
 676	augment_tree_propagate_from(va);
 
 
 677}
 678
 679/*
 680 * Merge de-allocated chunk of VA memory with previous
 681 * and next free blocks. If coalesce is not done a new
 682 * free area is inserted. If VA has been merged, it is
 683 * freed.
 
 
 
 
 
 684 */
 685static __always_inline void
 686merge_or_add_vmap_area(struct vmap_area *va,
 687	struct rb_root *root, struct list_head *head)
 688{
 689	struct vmap_area *sibling;
 690	struct list_head *next;
 691	struct rb_node **link;
 692	struct rb_node *parent;
 693	bool merged = false;
 694
 695	/*
 696	 * Find a place in the tree where VA potentially will be
 697	 * inserted, unless it is merged with its sibling/siblings.
 698	 */
 699	link = find_va_links(va, root, NULL, &parent);
 
 
 700
 701	/*
 702	 * Get next node of VA to check if merging can be done.
 703	 */
 704	next = get_va_next_sibling(parent, link);
 705	if (unlikely(next == NULL))
 706		goto insert;
 707
 708	/*
 709	 * start            end
 710	 * |                |
 711	 * |<------VA------>|<-----Next----->|
 712	 *                  |                |
 713	 *                  start            end
 714	 */
 715	if (next != head) {
 716		sibling = list_entry(next, struct vmap_area, list);
 717		if (sibling->va_start == va->va_end) {
 718			sibling->va_start = va->va_start;
 719
 720			/* Check and update the tree if needed. */
 721			augment_tree_propagate_from(sibling);
 722
 723			/* Free vmap_area object. */
 724			kmem_cache_free(vmap_area_cachep, va);
 725
 726			/* Point to the new merged area. */
 727			va = sibling;
 728			merged = true;
 729		}
 730	}
 731
 732	/*
 733	 * start            end
 734	 * |                |
 735	 * |<-----Prev----->|<------VA------>|
 736	 *                  |                |
 737	 *                  start            end
 738	 */
 739	if (next->prev != head) {
 740		sibling = list_entry(next->prev, struct vmap_area, list);
 741		if (sibling->va_end == va->va_start) {
 742			sibling->va_end = va->va_end;
 743
 744			/* Check and update the tree if needed. */
 745			augment_tree_propagate_from(sibling);
 746
 
 
 747			if (merged)
 748				unlink_va(va, root);
 749
 
 
 750			/* Free vmap_area object. */
 751			kmem_cache_free(vmap_area_cachep, va);
 752			return;
 
 
 
 753		}
 754	}
 755
 756insert:
 757	if (!merged) {
 758		link_va(va, root, parent, link, head);
 
 
 
 
 
 
 
 
 
 
 759		augment_tree_propagate_from(va);
 760	}
 
 761}
 762
 763static __always_inline bool
 764is_within_this_va(struct vmap_area *va, unsigned long size,
 765	unsigned long align, unsigned long vstart)
 766{
 767	unsigned long nva_start_addr;
 768
 769	if (va->va_start > vstart)
 770		nva_start_addr = ALIGN(va->va_start, align);
 771	else
 772		nva_start_addr = ALIGN(vstart, align);
 773
 774	/* Can be overflowed due to big size or alignment. */
 775	if (nva_start_addr + size < nva_start_addr ||
 776			nva_start_addr < vstart)
 777		return false;
 778
 779	return (nva_start_addr + size <= va->va_end);
 780}
 781
 782/*
 783 * Find the first free block(lowest start address) in the tree,
 784 * that will accomplish the request corresponding to passing
 785 * parameters.
 786 */
 787static __always_inline struct vmap_area *
 788find_vmap_lowest_match(unsigned long size,
 789	unsigned long align, unsigned long vstart)
 790{
 791	struct vmap_area *va;
 792	struct rb_node *node;
 793	unsigned long length;
 794
 795	/* Start from the root. */
 796	node = free_vmap_area_root.rb_node;
 797
 798	/* Adjust the search size for alignment overhead. */
 799	length = size + align - 1;
 800
 801	while (node) {
 802		va = rb_entry(node, struct vmap_area, rb_node);
 803
 804		if (get_subtree_max_size(node->rb_left) >= length &&
 805				vstart < va->va_start) {
 806			node = node->rb_left;
 807		} else {
 808			if (is_within_this_va(va, size, align, vstart))
 809				return va;
 810
 811			/*
 812			 * Does not make sense to go deeper towards the right
 813			 * sub-tree if it does not have a free block that is
 814			 * equal or bigger to the requested search length.
 815			 */
 816			if (get_subtree_max_size(node->rb_right) >= length) {
 817				node = node->rb_right;
 818				continue;
 819			}
 820
 821			/*
 822			 * OK. We roll back and find the first right sub-tree,
 823			 * that will satisfy the search criteria. It can happen
 824			 * only once due to "vstart" restriction.
 825			 */
 826			while ((node = rb_parent(node))) {
 827				va = rb_entry(node, struct vmap_area, rb_node);
 828				if (is_within_this_va(va, size, align, vstart))
 829					return va;
 830
 831				if (get_subtree_max_size(node->rb_right) >= length &&
 832						vstart <= va->va_start) {
 833					node = node->rb_right;
 834					break;
 835				}
 836			}
 837		}
 838	}
 839
 840	return NULL;
 841}
 842
 843#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
 844#include <linux/random.h>
 845
 846static struct vmap_area *
 847find_vmap_lowest_linear_match(unsigned long size,
 848	unsigned long align, unsigned long vstart)
 849{
 850	struct vmap_area *va;
 851
 852	list_for_each_entry(va, &free_vmap_area_list, list) {
 853		if (!is_within_this_va(va, size, align, vstart))
 854			continue;
 855
 856		return va;
 857	}
 858
 859	return NULL;
 860}
 861
 862static void
 863find_vmap_lowest_match_check(unsigned long size)
 864{
 865	struct vmap_area *va_1, *va_2;
 866	unsigned long vstart;
 867	unsigned int rnd;
 868
 869	get_random_bytes(&rnd, sizeof(rnd));
 870	vstart = VMALLOC_START + rnd;
 871
 872	va_1 = find_vmap_lowest_match(size, 1, vstart);
 873	va_2 = find_vmap_lowest_linear_match(size, 1, vstart);
 874
 875	if (va_1 != va_2)
 876		pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
 877			va_1, va_2, vstart);
 878}
 879#endif
 880
 881enum fit_type {
 882	NOTHING_FIT = 0,
 883	FL_FIT_TYPE = 1,	/* full fit */
 884	LE_FIT_TYPE = 2,	/* left edge fit */
 885	RE_FIT_TYPE = 3,	/* right edge fit */
 886	NE_FIT_TYPE = 4		/* no edge fit */
 887};
 888
 889static __always_inline enum fit_type
 890classify_va_fit_type(struct vmap_area *va,
 891	unsigned long nva_start_addr, unsigned long size)
 892{
 893	enum fit_type type;
 894
 895	/* Check if it is within VA. */
 896	if (nva_start_addr < va->va_start ||
 897			nva_start_addr + size > va->va_end)
 898		return NOTHING_FIT;
 899
 900	/* Now classify. */
 901	if (va->va_start == nva_start_addr) {
 902		if (va->va_end == nva_start_addr + size)
 903			type = FL_FIT_TYPE;
 904		else
 905			type = LE_FIT_TYPE;
 906	} else if (va->va_end == nva_start_addr + size) {
 907		type = RE_FIT_TYPE;
 908	} else {
 909		type = NE_FIT_TYPE;
 910	}
 911
 912	return type;
 913}
 914
 915static __always_inline int
 916adjust_va_to_fit_type(struct vmap_area *va,
 917	unsigned long nva_start_addr, unsigned long size,
 918	enum fit_type type)
 919{
 920	struct vmap_area *lva = NULL;
 921
 922	if (type == FL_FIT_TYPE) {
 923		/*
 924		 * No need to split VA, it fully fits.
 925		 *
 926		 * |               |
 927		 * V      NVA      V
 928		 * |---------------|
 929		 */
 930		unlink_va(va, &free_vmap_area_root);
 931		kmem_cache_free(vmap_area_cachep, va);
 932	} else if (type == LE_FIT_TYPE) {
 933		/*
 934		 * Split left edge of fit VA.
 935		 *
 936		 * |       |
 937		 * V  NVA  V   R
 938		 * |-------|-------|
 939		 */
 940		va->va_start += size;
 941	} else if (type == RE_FIT_TYPE) {
 942		/*
 943		 * Split right edge of fit VA.
 944		 *
 945		 *         |       |
 946		 *     L   V  NVA  V
 947		 * |-------|-------|
 948		 */
 949		va->va_end = nva_start_addr;
 950	} else if (type == NE_FIT_TYPE) {
 951		/*
 952		 * Split no edge of fit VA.
 953		 *
 954		 *     |       |
 955		 *   L V  NVA  V R
 956		 * |---|-------|---|
 957		 */
 958		lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
 959		if (unlikely(!lva)) {
 960			/*
 961			 * For percpu allocator we do not do any pre-allocation
 962			 * and leave it as it is. The reason is it most likely
 963			 * never ends up with NE_FIT_TYPE splitting. In case of
 964			 * percpu allocations offsets and sizes are aligned to
 965			 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
 966			 * are its main fitting cases.
 967			 *
 968			 * There are a few exceptions though, as an example it is
 969			 * a first allocation (early boot up) when we have "one"
 970			 * big free space that has to be split.
 
 
 
 
 
 
 
 
 
 
 
 
 
 971			 */
 972			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
 973			if (!lva)
 974				return -1;
 975		}
 976
 977		/*
 978		 * Build the remainder.
 979		 */
 980		lva->va_start = va->va_start;
 981		lva->va_end = nva_start_addr;
 982
 983		/*
 984		 * Shrink this VA to remaining size.
 985		 */
 986		va->va_start = nva_start_addr + size;
 987	} else {
 988		return -1;
 989	}
 990
 991	if (type != FL_FIT_TYPE) {
 992		augment_tree_propagate_from(va);
 993
 994		if (lva)	/* type == NE_FIT_TYPE */
 995			insert_vmap_area_augment(lva, &va->rb_node,
 996				&free_vmap_area_root, &free_vmap_area_list);
 997	}
 998
 999	return 0;
1000}
1001
1002/*
1003 * Returns a start address of the newly allocated area, if success.
1004 * Otherwise a vend is returned that indicates failure.
1005 */
1006static __always_inline unsigned long
1007__alloc_vmap_area(unsigned long size, unsigned long align,
1008	unsigned long vstart, unsigned long vend)
1009{
1010	unsigned long nva_start_addr;
1011	struct vmap_area *va;
1012	enum fit_type type;
1013	int ret;
1014
1015	va = find_vmap_lowest_match(size, align, vstart);
1016	if (unlikely(!va))
1017		return vend;
1018
1019	if (va->va_start > vstart)
1020		nva_start_addr = ALIGN(va->va_start, align);
1021	else
1022		nva_start_addr = ALIGN(vstart, align);
1023
1024	/* Check the "vend" restriction. */
1025	if (nva_start_addr + size > vend)
1026		return vend;
1027
1028	/* Classify what we have found. */
1029	type = classify_va_fit_type(va, nva_start_addr, size);
1030	if (WARN_ON_ONCE(type == NOTHING_FIT))
1031		return vend;
1032
1033	/* Update the free vmap_area. */
1034	ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1035	if (ret)
1036		return vend;
1037
1038#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1039	find_vmap_lowest_match_check(size);
1040#endif
1041
1042	return nva_start_addr;
1043}
1044
1045/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1046 * Allocate a region of KVA of the specified size and alignment, within the
1047 * vstart and vend.
1048 */
1049static struct vmap_area *alloc_vmap_area(unsigned long size,
1050				unsigned long align,
1051				unsigned long vstart, unsigned long vend,
1052				int node, gfp_t gfp_mask)
1053{
1054	struct vmap_area *va, *pva;
1055	unsigned long addr;
1056	int purged = 0;
 
1057
1058	BUG_ON(!size);
1059	BUG_ON(offset_in_page(size));
1060	BUG_ON(!is_power_of_2(align));
1061
1062	if (unlikely(!vmap_initialized))
1063		return ERR_PTR(-EBUSY);
1064
1065	might_sleep();
 
1066
1067	va = kmem_cache_alloc_node(vmap_area_cachep,
1068			gfp_mask & GFP_RECLAIM_MASK, node);
1069	if (unlikely(!va))
1070		return ERR_PTR(-ENOMEM);
1071
1072	/*
1073	 * Only scan the relevant parts containing pointers to other objects
1074	 * to avoid false negatives.
1075	 */
1076	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask & GFP_RECLAIM_MASK);
1077
1078retry:
1079	/*
1080	 * Preload this CPU with one extra vmap_area object to ensure
1081	 * that we have it available when fit type of free area is
1082	 * NE_FIT_TYPE.
1083	 *
1084	 * The preload is done in non-atomic context, thus it allows us
1085	 * to use more permissive allocation masks to be more stable under
1086	 * low memory condition and high memory pressure.
1087	 *
1088	 * Even if it fails we do not really care about that. Just proceed
1089	 * as it is. "overflow" path will refill the cache we allocate from.
1090	 */
1091	preempt_disable();
1092	if (!__this_cpu_read(ne_fit_preload_node)) {
1093		preempt_enable();
1094		pva = kmem_cache_alloc_node(vmap_area_cachep, GFP_KERNEL, node);
1095		preempt_disable();
1096
1097		if (__this_cpu_cmpxchg(ne_fit_preload_node, NULL, pva)) {
1098			if (pva)
1099				kmem_cache_free(vmap_area_cachep, pva);
1100		}
1101	}
1102
1103	spin_lock(&vmap_area_lock);
1104	preempt_enable();
1105
1106	/*
1107	 * If an allocation fails, the "vend" address is
1108	 * returned. Therefore trigger the overflow path.
1109	 */
1110	addr = __alloc_vmap_area(size, align, vstart, vend);
1111	if (unlikely(addr == vend))
1112		goto overflow;
1113
1114	va->va_start = addr;
1115	va->va_end = addr + size;
1116	va->vm = NULL;
1117	insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1118
 
 
1119	spin_unlock(&vmap_area_lock);
1120
1121	BUG_ON(!IS_ALIGNED(va->va_start, align));
1122	BUG_ON(va->va_start < vstart);
1123	BUG_ON(va->va_end > vend);
1124
 
 
 
 
 
 
1125	return va;
1126
1127overflow:
1128	spin_unlock(&vmap_area_lock);
1129	if (!purged) {
1130		purge_vmap_area_lazy();
1131		purged = 1;
1132		goto retry;
1133	}
1134
1135	if (gfpflags_allow_blocking(gfp_mask)) {
1136		unsigned long freed = 0;
1137		blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1138		if (freed > 0) {
1139			purged = 0;
1140			goto retry;
1141		}
1142	}
1143
1144	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1145		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1146			size);
1147
1148	kmem_cache_free(vmap_area_cachep, va);
1149	return ERR_PTR(-EBUSY);
1150}
1151
1152int register_vmap_purge_notifier(struct notifier_block *nb)
1153{
1154	return blocking_notifier_chain_register(&vmap_notify_list, nb);
1155}
1156EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1157
1158int unregister_vmap_purge_notifier(struct notifier_block *nb)
1159{
1160	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1161}
1162EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1163
1164static void __free_vmap_area(struct vmap_area *va)
1165{
1166	/*
1167	 * Remove from the busy tree/list.
1168	 */
1169	unlink_va(va, &vmap_area_root);
1170
1171	/*
1172	 * Merge VA with its neighbors, otherwise just add it.
1173	 */
1174	merge_or_add_vmap_area(va,
1175		&free_vmap_area_root, &free_vmap_area_list);
1176}
1177
1178/*
1179 * Free a region of KVA allocated by alloc_vmap_area
1180 */
1181static void free_vmap_area(struct vmap_area *va)
1182{
1183	spin_lock(&vmap_area_lock);
1184	__free_vmap_area(va);
1185	spin_unlock(&vmap_area_lock);
1186}
1187
1188/*
1189 * Clear the pagetable entries of a given vmap_area
1190 */
1191static void unmap_vmap_area(struct vmap_area *va)
1192{
1193	vunmap_page_range(va->va_start, va->va_end);
1194}
1195
1196/*
1197 * lazy_max_pages is the maximum amount of virtual address space we gather up
1198 * before attempting to purge with a TLB flush.
1199 *
1200 * There is a tradeoff here: a larger number will cover more kernel page tables
1201 * and take slightly longer to purge, but it will linearly reduce the number of
1202 * global TLB flushes that must be performed. It would seem natural to scale
1203 * this number up linearly with the number of CPUs (because vmapping activity
1204 * could also scale linearly with the number of CPUs), however it is likely
1205 * that in practice, workloads might be constrained in other ways that mean
1206 * vmap activity will not scale linearly with CPUs. Also, I want to be
1207 * conservative and not introduce a big latency on huge systems, so go with
1208 * a less aggressive log scale. It will still be an improvement over the old
1209 * code, and it will be simple to change the scale factor if we find that it
1210 * becomes a problem on bigger systems.
1211 */
1212static unsigned long lazy_max_pages(void)
1213{
1214	unsigned int log;
1215
1216	log = fls(num_online_cpus());
1217
1218	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1219}
1220
1221static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1222
1223/*
1224 * Serialize vmap purging.  There is no actual criticial section protected
1225 * by this look, but we want to avoid concurrent calls for performance
1226 * reasons and to make the pcpu_get_vm_areas more deterministic.
1227 */
1228static DEFINE_MUTEX(vmap_purge_lock);
1229
1230/* for per-CPU blocks */
1231static void purge_fragmented_blocks_allcpus(void);
1232
 
1233/*
1234 * called before a call to iounmap() if the caller wants vm_area_struct's
1235 * immediately freed.
1236 */
1237void set_iounmap_nonlazy(void)
1238{
1239	atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
1240}
 
1241
1242/*
1243 * Purges all lazily-freed vmap areas.
1244 */
1245static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1246{
1247	unsigned long resched_threshold;
1248	struct llist_node *valist;
1249	struct vmap_area *va;
1250	struct vmap_area *n_va;
1251
1252	lockdep_assert_held(&vmap_purge_lock);
1253
1254	valist = llist_del_all(&vmap_purge_list);
1255	if (unlikely(valist == NULL))
1256		return false;
 
1257
1258	/*
1259	 * First make sure the mappings are removed from all page-tables
1260	 * before they are freed.
1261	 */
1262	vmalloc_sync_all();
1263
1264	/*
1265	 * TODO: to calculate a flush range without looping.
1266	 * The list can be up to lazy_max_pages() elements.
1267	 */
1268	llist_for_each_entry(va, valist, purge_list) {
1269		if (va->va_start < start)
1270			start = va->va_start;
1271		if (va->va_end > end)
1272			end = va->va_end;
1273	}
1274
1275	flush_tlb_kernel_range(start, end);
1276	resched_threshold = lazy_max_pages() << 1;
1277
1278	spin_lock(&vmap_area_lock);
1279	llist_for_each_entry_safe(va, n_va, valist, purge_list) {
1280		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
 
 
1281
1282		/*
1283		 * Finally insert or merge lazily-freed area. It is
1284		 * detached and there is no need to "unlink" it from
1285		 * anything.
1286		 */
1287		merge_or_add_vmap_area(va,
1288			&free_vmap_area_root, &free_vmap_area_list);
 
 
 
 
 
 
 
1289
1290		atomic_long_sub(nr, &vmap_lazy_nr);
1291
1292		if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1293			cond_resched_lock(&vmap_area_lock);
1294	}
1295	spin_unlock(&vmap_area_lock);
1296	return true;
1297}
1298
1299/*
1300 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
1301 * is already purging.
1302 */
1303static void try_purge_vmap_area_lazy(void)
1304{
1305	if (mutex_trylock(&vmap_purge_lock)) {
1306		__purge_vmap_area_lazy(ULONG_MAX, 0);
1307		mutex_unlock(&vmap_purge_lock);
1308	}
1309}
1310
1311/*
1312 * Kick off a purge of the outstanding lazy areas.
1313 */
1314static void purge_vmap_area_lazy(void)
1315{
1316	mutex_lock(&vmap_purge_lock);
1317	purge_fragmented_blocks_allcpus();
1318	__purge_vmap_area_lazy(ULONG_MAX, 0);
1319	mutex_unlock(&vmap_purge_lock);
1320}
1321
1322/*
1323 * Free a vmap area, caller ensuring that the area has been unmapped
1324 * and flush_cache_vunmap had been called for the correct range
1325 * previously.
1326 */
1327static void free_vmap_area_noflush(struct vmap_area *va)
1328{
1329	unsigned long nr_lazy;
1330
1331	spin_lock(&vmap_area_lock);
1332	unlink_va(va, &vmap_area_root);
1333	spin_unlock(&vmap_area_lock);
1334
1335	nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1336				PAGE_SHIFT, &vmap_lazy_nr);
1337
1338	/* After this point, we may free va at any time */
1339	llist_add(&va->purge_list, &vmap_purge_list);
 
 
 
 
 
1340
 
1341	if (unlikely(nr_lazy > lazy_max_pages()))
1342		try_purge_vmap_area_lazy();
1343}
1344
1345/*
1346 * Free and unmap a vmap area
1347 */
1348static void free_unmap_vmap_area(struct vmap_area *va)
1349{
1350	flush_cache_vunmap(va->va_start, va->va_end);
1351	unmap_vmap_area(va);
1352	if (debug_pagealloc_enabled())
1353		flush_tlb_kernel_range(va->va_start, va->va_end);
1354
1355	free_vmap_area_noflush(va);
1356}
1357
1358static struct vmap_area *find_vmap_area(unsigned long addr)
1359{
1360	struct vmap_area *va;
1361
1362	spin_lock(&vmap_area_lock);
1363	va = __find_vmap_area(addr);
1364	spin_unlock(&vmap_area_lock);
1365
1366	return va;
1367}
1368
1369/*** Per cpu kva allocator ***/
1370
1371/*
1372 * vmap space is limited especially on 32 bit architectures. Ensure there is
1373 * room for at least 16 percpu vmap blocks per CPU.
1374 */
1375/*
1376 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1377 * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
1378 * instead (we just need a rough idea)
1379 */
1380#if BITS_PER_LONG == 32
1381#define VMALLOC_SPACE		(128UL*1024*1024)
1382#else
1383#define VMALLOC_SPACE		(128UL*1024*1024*1024)
1384#endif
1385
1386#define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
1387#define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
1388#define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
1389#define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
1390#define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
1391#define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
1392#define VMAP_BBMAP_BITS		\
1393		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
1394		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
1395			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1396
1397#define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
1398
1399struct vmap_block_queue {
1400	spinlock_t lock;
1401	struct list_head free;
1402};
1403
1404struct vmap_block {
1405	spinlock_t lock;
1406	struct vmap_area *va;
1407	unsigned long free, dirty;
1408	unsigned long dirty_min, dirty_max; /*< dirty range */
1409	struct list_head free_list;
1410	struct rcu_head rcu_head;
1411	struct list_head purge;
1412};
1413
1414/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1415static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1416
1417/*
1418 * Radix tree of vmap blocks, indexed by address, to quickly find a vmap block
1419 * in the free path. Could get rid of this if we change the API to return a
1420 * "cookie" from alloc, to be passed to free. But no big deal yet.
1421 */
1422static DEFINE_SPINLOCK(vmap_block_tree_lock);
1423static RADIX_TREE(vmap_block_tree, GFP_ATOMIC);
1424
1425/*
1426 * We should probably have a fallback mechanism to allocate virtual memory
1427 * out of partially filled vmap blocks. However vmap block sizing should be
1428 * fairly reasonable according to the vmalloc size, so it shouldn't be a
1429 * big problem.
1430 */
1431
1432static unsigned long addr_to_vb_idx(unsigned long addr)
1433{
1434	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1435	addr /= VMAP_BLOCK_SIZE;
1436	return addr;
1437}
1438
1439static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1440{
1441	unsigned long addr;
1442
1443	addr = va_start + (pages_off << PAGE_SHIFT);
1444	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1445	return (void *)addr;
1446}
1447
1448/**
1449 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1450 *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
1451 * @order:    how many 2^order pages should be occupied in newly allocated block
1452 * @gfp_mask: flags for the page level allocator
1453 *
1454 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1455 */
1456static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
1457{
1458	struct vmap_block_queue *vbq;
1459	struct vmap_block *vb;
1460	struct vmap_area *va;
1461	unsigned long vb_idx;
1462	int node, err;
1463	void *vaddr;
1464
1465	node = numa_node_id();
1466
1467	vb = kmalloc_node(sizeof(struct vmap_block),
1468			gfp_mask & GFP_RECLAIM_MASK, node);
1469	if (unlikely(!vb))
1470		return ERR_PTR(-ENOMEM);
1471
1472	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1473					VMALLOC_START, VMALLOC_END,
1474					node, gfp_mask);
1475	if (IS_ERR(va)) {
1476		kfree(vb);
1477		return ERR_CAST(va);
1478	}
1479
1480	err = radix_tree_preload(gfp_mask);
1481	if (unlikely(err)) {
1482		kfree(vb);
1483		free_vmap_area(va);
1484		return ERR_PTR(err);
1485	}
1486
1487	vaddr = vmap_block_vaddr(va->va_start, 0);
1488	spin_lock_init(&vb->lock);
1489	vb->va = va;
1490	/* At least something should be left free */
1491	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
1492	vb->free = VMAP_BBMAP_BITS - (1UL << order);
1493	vb->dirty = 0;
1494	vb->dirty_min = VMAP_BBMAP_BITS;
1495	vb->dirty_max = 0;
1496	INIT_LIST_HEAD(&vb->free_list);
1497
1498	vb_idx = addr_to_vb_idx(va->va_start);
1499	spin_lock(&vmap_block_tree_lock);
1500	err = radix_tree_insert(&vmap_block_tree, vb_idx, vb);
1501	spin_unlock(&vmap_block_tree_lock);
1502	BUG_ON(err);
1503	radix_tree_preload_end();
 
1504
1505	vbq = &get_cpu_var(vmap_block_queue);
1506	spin_lock(&vbq->lock);
1507	list_add_tail_rcu(&vb->free_list, &vbq->free);
1508	spin_unlock(&vbq->lock);
1509	put_cpu_var(vmap_block_queue);
1510
1511	return vaddr;
1512}
1513
1514static void free_vmap_block(struct vmap_block *vb)
1515{
1516	struct vmap_block *tmp;
1517	unsigned long vb_idx;
1518
1519	vb_idx = addr_to_vb_idx(vb->va->va_start);
1520	spin_lock(&vmap_block_tree_lock);
1521	tmp = radix_tree_delete(&vmap_block_tree, vb_idx);
1522	spin_unlock(&vmap_block_tree_lock);
1523	BUG_ON(tmp != vb);
1524
1525	free_vmap_area_noflush(vb->va);
1526	kfree_rcu(vb, rcu_head);
1527}
1528
1529static void purge_fragmented_blocks(int cpu)
1530{
1531	LIST_HEAD(purge);
1532	struct vmap_block *vb;
1533	struct vmap_block *n_vb;
1534	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1535
1536	rcu_read_lock();
1537	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1538
1539		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
1540			continue;
1541
1542		spin_lock(&vb->lock);
1543		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
1544			vb->free = 0; /* prevent further allocs after releasing lock */
1545			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
1546			vb->dirty_min = 0;
1547			vb->dirty_max = VMAP_BBMAP_BITS;
1548			spin_lock(&vbq->lock);
1549			list_del_rcu(&vb->free_list);
1550			spin_unlock(&vbq->lock);
1551			spin_unlock(&vb->lock);
1552			list_add_tail(&vb->purge, &purge);
1553		} else
1554			spin_unlock(&vb->lock);
1555	}
1556	rcu_read_unlock();
1557
1558	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
1559		list_del(&vb->purge);
1560		free_vmap_block(vb);
1561	}
1562}
1563
1564static void purge_fragmented_blocks_allcpus(void)
1565{
1566	int cpu;
1567
1568	for_each_possible_cpu(cpu)
1569		purge_fragmented_blocks(cpu);
1570}
1571
1572static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
1573{
1574	struct vmap_block_queue *vbq;
1575	struct vmap_block *vb;
1576	void *vaddr = NULL;
1577	unsigned int order;
1578
1579	BUG_ON(offset_in_page(size));
1580	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1581	if (WARN_ON(size == 0)) {
1582		/*
1583		 * Allocating 0 bytes isn't what caller wants since
1584		 * get_order(0) returns funny result. Just warn and terminate
1585		 * early.
1586		 */
1587		return NULL;
1588	}
1589	order = get_order(size);
1590
1591	rcu_read_lock();
1592	vbq = &get_cpu_var(vmap_block_queue);
1593	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1594		unsigned long pages_off;
1595
1596		spin_lock(&vb->lock);
1597		if (vb->free < (1UL << order)) {
1598			spin_unlock(&vb->lock);
1599			continue;
1600		}
1601
1602		pages_off = VMAP_BBMAP_BITS - vb->free;
1603		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
1604		vb->free -= 1UL << order;
1605		if (vb->free == 0) {
1606			spin_lock(&vbq->lock);
1607			list_del_rcu(&vb->free_list);
1608			spin_unlock(&vbq->lock);
1609		}
1610
1611		spin_unlock(&vb->lock);
1612		break;
1613	}
1614
1615	put_cpu_var(vmap_block_queue);
1616	rcu_read_unlock();
1617
1618	/* Allocate new block if nothing was found */
1619	if (!vaddr)
1620		vaddr = new_vmap_block(order, gfp_mask);
1621
1622	return vaddr;
1623}
1624
1625static void vb_free(const void *addr, unsigned long size)
1626{
1627	unsigned long offset;
1628	unsigned long vb_idx;
1629	unsigned int order;
1630	struct vmap_block *vb;
1631
1632	BUG_ON(offset_in_page(size));
1633	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1634
1635	flush_cache_vunmap((unsigned long)addr, (unsigned long)addr + size);
1636
1637	order = get_order(size);
 
 
1638
1639	offset = (unsigned long)addr & (VMAP_BLOCK_SIZE - 1);
1640	offset >>= PAGE_SHIFT;
1641
1642	vb_idx = addr_to_vb_idx((unsigned long)addr);
1643	rcu_read_lock();
1644	vb = radix_tree_lookup(&vmap_block_tree, vb_idx);
1645	rcu_read_unlock();
1646	BUG_ON(!vb);
1647
1648	vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
1649
1650	if (debug_pagealloc_enabled())
1651		flush_tlb_kernel_range((unsigned long)addr,
1652					(unsigned long)addr + size);
1653
1654	spin_lock(&vb->lock);
1655
1656	/* Expand dirty range */
1657	vb->dirty_min = min(vb->dirty_min, offset);
1658	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
1659
1660	vb->dirty += 1UL << order;
1661	if (vb->dirty == VMAP_BBMAP_BITS) {
1662		BUG_ON(vb->free);
1663		spin_unlock(&vb->lock);
1664		free_vmap_block(vb);
1665	} else
1666		spin_unlock(&vb->lock);
1667}
1668
1669static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
1670{
1671	int cpu;
1672
1673	if (unlikely(!vmap_initialized))
1674		return;
1675
1676	might_sleep();
1677
1678	for_each_possible_cpu(cpu) {
1679		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1680		struct vmap_block *vb;
1681
1682		rcu_read_lock();
1683		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1684			spin_lock(&vb->lock);
1685			if (vb->dirty) {
1686				unsigned long va_start = vb->va->va_start;
1687				unsigned long s, e;
1688
1689				s = va_start + (vb->dirty_min << PAGE_SHIFT);
1690				e = va_start + (vb->dirty_max << PAGE_SHIFT);
1691
1692				start = min(s, start);
1693				end   = max(e, end);
1694
1695				flush = 1;
1696			}
1697			spin_unlock(&vb->lock);
1698		}
1699		rcu_read_unlock();
1700	}
1701
1702	mutex_lock(&vmap_purge_lock);
1703	purge_fragmented_blocks_allcpus();
1704	if (!__purge_vmap_area_lazy(start, end) && flush)
1705		flush_tlb_kernel_range(start, end);
1706	mutex_unlock(&vmap_purge_lock);
1707}
1708
1709/**
1710 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
1711 *
1712 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
1713 * to amortize TLB flushing overheads. What this means is that any page you
1714 * have now, may, in a former life, have been mapped into kernel virtual
1715 * address by the vmap layer and so there might be some CPUs with TLB entries
1716 * still referencing that page (additional to the regular 1:1 kernel mapping).
1717 *
1718 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
1719 * be sure that none of the pages we have control over will have any aliases
1720 * from the vmap layer.
1721 */
1722void vm_unmap_aliases(void)
1723{
1724	unsigned long start = ULONG_MAX, end = 0;
1725	int flush = 0;
1726
1727	_vm_unmap_aliases(start, end, flush);
1728}
1729EXPORT_SYMBOL_GPL(vm_unmap_aliases);
1730
1731/**
1732 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
1733 * @mem: the pointer returned by vm_map_ram
1734 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
1735 */
1736void vm_unmap_ram(const void *mem, unsigned int count)
1737{
1738	unsigned long size = (unsigned long)count << PAGE_SHIFT;
1739	unsigned long addr = (unsigned long)mem;
1740	struct vmap_area *va;
1741
1742	might_sleep();
1743	BUG_ON(!addr);
1744	BUG_ON(addr < VMALLOC_START);
1745	BUG_ON(addr > VMALLOC_END);
1746	BUG_ON(!PAGE_ALIGNED(addr));
1747
 
 
1748	if (likely(count <= VMAP_MAX_ALLOC)) {
1749		debug_check_no_locks_freed(mem, size);
1750		vb_free(mem, size);
1751		return;
1752	}
1753
1754	va = find_vmap_area(addr);
1755	BUG_ON(!va);
1756	debug_check_no_locks_freed((void *)va->va_start,
1757				    (va->va_end - va->va_start));
1758	free_unmap_vmap_area(va);
1759}
1760EXPORT_SYMBOL(vm_unmap_ram);
1761
1762/**
1763 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
1764 * @pages: an array of pointers to the pages to be mapped
1765 * @count: number of pages
1766 * @node: prefer to allocate data structures on this node
1767 * @prot: memory protection to use. PAGE_KERNEL for regular RAM
1768 *
1769 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
1770 * faster than vmap so it's good.  But if you mix long-life and short-life
1771 * objects with vm_map_ram(), it could consume lots of address space through
1772 * fragmentation (especially on a 32bit machine).  You could see failures in
1773 * the end.  Please use this function for short-lived objects.
1774 *
1775 * Returns: a pointer to the address that has been mapped, or %NULL on failure
1776 */
1777void *vm_map_ram(struct page **pages, unsigned int count, int node, pgprot_t prot)
1778{
1779	unsigned long size = (unsigned long)count << PAGE_SHIFT;
1780	unsigned long addr;
1781	void *mem;
1782
1783	if (likely(count <= VMAP_MAX_ALLOC)) {
1784		mem = vb_alloc(size, GFP_KERNEL);
1785		if (IS_ERR(mem))
1786			return NULL;
1787		addr = (unsigned long)mem;
1788	} else {
1789		struct vmap_area *va;
1790		va = alloc_vmap_area(size, PAGE_SIZE,
1791				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
1792		if (IS_ERR(va))
1793			return NULL;
1794
1795		addr = va->va_start;
1796		mem = (void *)addr;
1797	}
1798	if (vmap_page_range(addr, addr + size, prot, pages) < 0) {
 
 
 
 
1799		vm_unmap_ram(mem, count);
1800		return NULL;
1801	}
 
1802	return mem;
1803}
1804EXPORT_SYMBOL(vm_map_ram);
1805
1806static struct vm_struct *vmlist __initdata;
1807
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1808/**
1809 * vm_area_add_early - add vmap area early during boot
1810 * @vm: vm_struct to add
1811 *
1812 * This function is used to add fixed kernel vm area to vmlist before
1813 * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
1814 * should contain proper values and the other fields should be zero.
1815 *
1816 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1817 */
1818void __init vm_area_add_early(struct vm_struct *vm)
1819{
1820	struct vm_struct *tmp, **p;
1821
1822	BUG_ON(vmap_initialized);
1823	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
1824		if (tmp->addr >= vm->addr) {
1825			BUG_ON(tmp->addr < vm->addr + vm->size);
1826			break;
1827		} else
1828			BUG_ON(tmp->addr + tmp->size > vm->addr);
1829	}
1830	vm->next = *p;
1831	*p = vm;
1832}
1833
1834/**
1835 * vm_area_register_early - register vmap area early during boot
1836 * @vm: vm_struct to register
1837 * @align: requested alignment
1838 *
1839 * This function is used to register kernel vm area before
1840 * vmalloc_init() is called.  @vm->size and @vm->flags should contain
1841 * proper values on entry and other fields should be zero.  On return,
1842 * vm->addr contains the allocated address.
1843 *
1844 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
1845 */
1846void __init vm_area_register_early(struct vm_struct *vm, size_t align)
1847{
1848	static size_t vm_init_off __initdata;
1849	unsigned long addr;
1850
1851	addr = ALIGN(VMALLOC_START + vm_init_off, align);
1852	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
1853
1854	vm->addr = (void *)addr;
1855
1856	vm_area_add_early(vm);
1857}
1858
1859static void vmap_init_free_space(void)
1860{
1861	unsigned long vmap_start = 1;
1862	const unsigned long vmap_end = ULONG_MAX;
1863	struct vmap_area *busy, *free;
1864
1865	/*
1866	 *     B     F     B     B     B     F
1867	 * -|-----|.....|-----|-----|-----|.....|-
1868	 *  |           The KVA space           |
1869	 *  |<--------------------------------->|
1870	 */
1871	list_for_each_entry(busy, &vmap_area_list, list) {
1872		if (busy->va_start - vmap_start > 0) {
1873			free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1874			if (!WARN_ON_ONCE(!free)) {
1875				free->va_start = vmap_start;
1876				free->va_end = busy->va_start;
1877
1878				insert_vmap_area_augment(free, NULL,
1879					&free_vmap_area_root,
1880						&free_vmap_area_list);
1881			}
1882		}
1883
1884		vmap_start = busy->va_end;
1885	}
1886
1887	if (vmap_end - vmap_start > 0) {
1888		free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1889		if (!WARN_ON_ONCE(!free)) {
1890			free->va_start = vmap_start;
1891			free->va_end = vmap_end;
1892
1893			insert_vmap_area_augment(free, NULL,
1894				&free_vmap_area_root,
1895					&free_vmap_area_list);
1896		}
1897	}
1898}
1899
1900void __init vmalloc_init(void)
1901{
1902	struct vmap_area *va;
1903	struct vm_struct *tmp;
1904	int i;
1905
1906	/*
1907	 * Create the cache for vmap_area objects.
1908	 */
1909	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
1910
1911	for_each_possible_cpu(i) {
1912		struct vmap_block_queue *vbq;
1913		struct vfree_deferred *p;
1914
1915		vbq = &per_cpu(vmap_block_queue, i);
1916		spin_lock_init(&vbq->lock);
1917		INIT_LIST_HEAD(&vbq->free);
1918		p = &per_cpu(vfree_deferred, i);
1919		init_llist_head(&p->list);
1920		INIT_WORK(&p->wq, free_work);
1921	}
1922
1923	/* Import existing vmlist entries. */
1924	for (tmp = vmlist; tmp; tmp = tmp->next) {
1925		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
1926		if (WARN_ON_ONCE(!va))
1927			continue;
1928
1929		va->va_start = (unsigned long)tmp->addr;
1930		va->va_end = va->va_start + tmp->size;
1931		va->vm = tmp;
1932		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1933	}
1934
1935	/*
1936	 * Now we can initialize a free vmap space.
1937	 */
1938	vmap_init_free_space();
1939	vmap_initialized = true;
1940}
1941
1942/**
1943 * map_kernel_range_noflush - map kernel VM area with the specified pages
1944 * @addr: start of the VM area to map
1945 * @size: size of the VM area to map
1946 * @prot: page protection flags to use
1947 * @pages: pages to map
1948 *
1949 * Map PFN_UP(@size) pages at @addr.  The VM area @addr and @size
1950 * specify should have been allocated using get_vm_area() and its
1951 * friends.
1952 *
1953 * NOTE:
1954 * This function does NOT do any cache flushing.  The caller is
1955 * responsible for calling flush_cache_vmap() on to-be-mapped areas
1956 * before calling this function.
1957 *
1958 * RETURNS:
1959 * The number of pages mapped on success, -errno on failure.
1960 */
1961int map_kernel_range_noflush(unsigned long addr, unsigned long size,
1962			     pgprot_t prot, struct page **pages)
1963{
1964	return vmap_page_range_noflush(addr, addr + size, prot, pages);
1965}
1966
1967/**
1968 * unmap_kernel_range_noflush - unmap kernel VM area
1969 * @addr: start of the VM area to unmap
1970 * @size: size of the VM area to unmap
1971 *
1972 * Unmap PFN_UP(@size) pages at @addr.  The VM area @addr and @size
1973 * specify should have been allocated using get_vm_area() and its
1974 * friends.
1975 *
1976 * NOTE:
1977 * This function does NOT do any cache flushing.  The caller is
1978 * responsible for calling flush_cache_vunmap() on to-be-mapped areas
1979 * before calling this function and flush_tlb_kernel_range() after.
1980 */
1981void unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
1982{
1983	vunmap_page_range(addr, addr + size);
1984}
1985EXPORT_SYMBOL_GPL(unmap_kernel_range_noflush);
1986
1987/**
1988 * unmap_kernel_range - unmap kernel VM area and flush cache and TLB
1989 * @addr: start of the VM area to unmap
1990 * @size: size of the VM area to unmap
1991 *
1992 * Similar to unmap_kernel_range_noflush() but flushes vcache before
1993 * the unmapping and tlb after.
1994 */
1995void unmap_kernel_range(unsigned long addr, unsigned long size)
1996{
1997	unsigned long end = addr + size;
1998
1999	flush_cache_vunmap(addr, end);
2000	vunmap_page_range(addr, end);
2001	flush_tlb_kernel_range(addr, end);
2002}
2003EXPORT_SYMBOL_GPL(unmap_kernel_range);
2004
2005int map_vm_area(struct vm_struct *area, pgprot_t prot, struct page **pages)
2006{
2007	unsigned long addr = (unsigned long)area->addr;
2008	unsigned long end = addr + get_vm_area_size(area);
2009	int err;
2010
2011	err = vmap_page_range(addr, end, prot, pages);
2012
2013	return err > 0 ? 0 : err;
2014}
2015EXPORT_SYMBOL_GPL(map_vm_area);
2016
2017static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2018			      unsigned long flags, const void *caller)
2019{
2020	spin_lock(&vmap_area_lock);
2021	vm->flags = flags;
2022	vm->addr = (void *)va->va_start;
2023	vm->size = va->va_end - va->va_start;
2024	vm->caller = caller;
2025	va->vm = vm;
2026	spin_unlock(&vmap_area_lock);
2027}
2028
2029static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2030{
2031	/*
2032	 * Before removing VM_UNINITIALIZED,
2033	 * we should make sure that vm has proper values.
2034	 * Pair with smp_rmb() in show_numa_info().
2035	 */
2036	smp_wmb();
2037	vm->flags &= ~VM_UNINITIALIZED;
2038}
2039
2040static struct vm_struct *__get_vm_area_node(unsigned long size,
2041		unsigned long align, unsigned long flags, unsigned long start,
2042		unsigned long end, int node, gfp_t gfp_mask, const void *caller)
 
2043{
2044	struct vmap_area *va;
2045	struct vm_struct *area;
 
2046
2047	BUG_ON(in_interrupt());
2048	size = PAGE_ALIGN(size);
2049	if (unlikely(!size))
2050		return NULL;
2051
2052	if (flags & VM_IOREMAP)
2053		align = 1ul << clamp_t(int, get_count_order_long(size),
2054				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
2055
2056	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2057	if (unlikely(!area))
2058		return NULL;
2059
2060	if (!(flags & VM_NO_GUARD))
2061		size += PAGE_SIZE;
2062
2063	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2064	if (IS_ERR(va)) {
2065		kfree(area);
2066		return NULL;
2067	}
2068
 
 
2069	setup_vmalloc_vm(area, va, flags, caller);
2070
2071	return area;
2072}
2073
2074struct vm_struct *__get_vm_area(unsigned long size, unsigned long flags,
2075				unsigned long start, unsigned long end)
2076{
2077	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
2078				  GFP_KERNEL, __builtin_return_address(0));
2079}
2080EXPORT_SYMBOL_GPL(__get_vm_area);
2081
2082struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2083				       unsigned long start, unsigned long end,
2084				       const void *caller)
2085{
2086	return __get_vm_area_node(size, 1, flags, start, end, NUMA_NO_NODE,
2087				  GFP_KERNEL, caller);
2088}
2089
2090/**
2091 * get_vm_area - reserve a contiguous kernel virtual area
2092 * @size:	 size of the area
2093 * @flags:	 %VM_IOREMAP for I/O mappings or VM_ALLOC
2094 *
2095 * Search an area of @size in the kernel virtual mapping area,
2096 * and reserved it for out purposes.  Returns the area descriptor
2097 * on success or %NULL on failure.
2098 *
2099 * Return: the area descriptor on success or %NULL on failure.
2100 */
2101struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2102{
2103	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
 
2104				  NUMA_NO_NODE, GFP_KERNEL,
2105				  __builtin_return_address(0));
2106}
2107
2108struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
2109				const void *caller)
2110{
2111	return __get_vm_area_node(size, 1, flags, VMALLOC_START, VMALLOC_END,
 
2112				  NUMA_NO_NODE, GFP_KERNEL, caller);
2113}
2114
2115/**
2116 * find_vm_area - find a continuous kernel virtual area
2117 * @addr:	  base address
2118 *
2119 * Search for the kernel VM area starting at @addr, and return it.
2120 * It is up to the caller to do all required locking to keep the returned
2121 * pointer valid.
2122 *
2123 * Return: pointer to the found area or %NULL on faulure
2124 */
2125struct vm_struct *find_vm_area(const void *addr)
2126{
2127	struct vmap_area *va;
2128
2129	va = find_vmap_area((unsigned long)addr);
2130	if (!va)
2131		return NULL;
2132
2133	return va->vm;
2134}
2135
2136/**
2137 * remove_vm_area - find and remove a continuous kernel virtual area
2138 * @addr:	    base address
2139 *
2140 * Search for the kernel VM area starting at @addr, and remove it.
2141 * This function returns the found VM area, but using it is NOT safe
2142 * on SMP machines, except for its size or flags.
2143 *
2144 * Return: pointer to the found area or %NULL on faulure
2145 */
2146struct vm_struct *remove_vm_area(const void *addr)
2147{
2148	struct vmap_area *va;
2149
2150	might_sleep();
2151
2152	spin_lock(&vmap_area_lock);
2153	va = __find_vmap_area((unsigned long)addr);
2154	if (va && va->vm) {
2155		struct vm_struct *vm = va->vm;
2156
2157		va->vm = NULL;
2158		spin_unlock(&vmap_area_lock);
2159
2160		kasan_free_shadow(vm);
2161		free_unmap_vmap_area(va);
2162
2163		return vm;
2164	}
2165
2166	spin_unlock(&vmap_area_lock);
2167	return NULL;
2168}
2169
2170static inline void set_area_direct_map(const struct vm_struct *area,
2171				       int (*set_direct_map)(struct page *page))
2172{
2173	int i;
2174
 
2175	for (i = 0; i < area->nr_pages; i++)
2176		if (page_address(area->pages[i]))
2177			set_direct_map(area->pages[i]);
2178}
2179
2180/* Handle removing and resetting vm mappings related to the vm_struct. */
2181static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2182{
2183	unsigned long start = ULONG_MAX, end = 0;
 
2184	int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2185	int flush_dmap = 0;
2186	int i;
2187
2188	remove_vm_area(area->addr);
2189
2190	/* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2191	if (!flush_reset)
2192		return;
2193
2194	/*
2195	 * If not deallocating pages, just do the flush of the VM area and
2196	 * return.
2197	 */
2198	if (!deallocate_pages) {
2199		vm_unmap_aliases();
2200		return;
2201	}
2202
2203	/*
2204	 * If execution gets here, flush the vm mapping and reset the direct
2205	 * map. Find the start and end range of the direct mappings to make sure
2206	 * the vm_unmap_aliases() flush includes the direct map.
2207	 */
2208	for (i = 0; i < area->nr_pages; i++) {
2209		unsigned long addr = (unsigned long)page_address(area->pages[i]);
2210		if (addr) {
 
 
 
2211			start = min(addr, start);
2212			end = max(addr + PAGE_SIZE, end);
2213			flush_dmap = 1;
2214		}
2215	}
2216
2217	/*
2218	 * Set direct map to something invalid so that it won't be cached if
2219	 * there are any accesses after the TLB flush, then flush the TLB and
2220	 * reset the direct map permissions to the default.
2221	 */
2222	set_area_direct_map(area, set_direct_map_invalid_noflush);
2223	_vm_unmap_aliases(start, end, flush_dmap);
2224	set_area_direct_map(area, set_direct_map_default_noflush);
2225}
2226
2227static void __vunmap(const void *addr, int deallocate_pages)
2228{
2229	struct vm_struct *area;
2230
2231	if (!addr)
2232		return;
2233
2234	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2235			addr))
2236		return;
2237
2238	area = find_vm_area(addr);
2239	if (unlikely(!area)) {
2240		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2241				addr);
2242		return;
2243	}
2244
2245	debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2246	debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
2247
 
 
2248	vm_remove_mappings(area, deallocate_pages);
2249
2250	if (deallocate_pages) {
 
2251		int i;
2252
2253		for (i = 0; i < area->nr_pages; i++) {
2254			struct page *page = area->pages[i];
2255
2256			BUG_ON(!page);
2257			__free_pages(page, 0);
 
2258		}
2259		atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
2260
2261		kvfree(area->pages);
2262	}
2263
2264	kfree(area);
2265	return;
2266}
2267
2268static inline void __vfree_deferred(const void *addr)
2269{
2270	/*
2271	 * Use raw_cpu_ptr() because this can be called from preemptible
2272	 * context. Preemption is absolutely fine here, because the llist_add()
2273	 * implementation is lockless, so it works even if we are adding to
2274	 * nother cpu's list.  schedule_work() should be fine with this too.
2275	 */
2276	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2277
2278	if (llist_add((struct llist_node *)addr, &p->list))
2279		schedule_work(&p->wq);
2280}
2281
2282/**
2283 * vfree_atomic - release memory allocated by vmalloc()
2284 * @addr:	  memory base address
2285 *
2286 * This one is just like vfree() but can be called in any atomic context
2287 * except NMIs.
2288 */
2289void vfree_atomic(const void *addr)
2290{
2291	BUG_ON(in_nmi());
2292
2293	kmemleak_free(addr);
2294
2295	if (!addr)
2296		return;
2297	__vfree_deferred(addr);
2298}
2299
2300static void __vfree(const void *addr)
2301{
2302	if (unlikely(in_interrupt()))
2303		__vfree_deferred(addr);
2304	else
2305		__vunmap(addr, 1);
2306}
2307
2308/**
2309 * vfree - release memory allocated by vmalloc()
2310 * @addr:  memory base address
2311 *
2312 * Free the virtually continuous memory area starting at @addr, as
2313 * obtained from vmalloc(), vmalloc_32() or __vmalloc(). If @addr is
2314 * NULL, no operation is performed.
2315 *
2316 * Must not be called in NMI context (strictly speaking, only if we don't
2317 * have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2318 * conventions for vfree() arch-depenedent would be a really bad idea)
2319 *
2320 * May sleep if called *not* from interrupt context.
2321 *
2322 * NOTE: assumes that the object at @addr has a size >= sizeof(llist_node)
 
 
 
 
2323 */
2324void vfree(const void *addr)
2325{
2326	BUG_ON(in_nmi());
2327
2328	kmemleak_free(addr);
2329
2330	might_sleep_if(!in_interrupt());
2331
2332	if (!addr)
2333		return;
2334
2335	__vfree(addr);
2336}
2337EXPORT_SYMBOL(vfree);
2338
2339/**
2340 * vunmap - release virtual mapping obtained by vmap()
2341 * @addr:   memory base address
2342 *
2343 * Free the virtually contiguous memory area starting at @addr,
2344 * which was created from the page array passed to vmap().
2345 *
2346 * Must not be called in interrupt context.
2347 */
2348void vunmap(const void *addr)
2349{
2350	BUG_ON(in_interrupt());
2351	might_sleep();
2352	if (addr)
2353		__vunmap(addr, 0);
2354}
2355EXPORT_SYMBOL(vunmap);
2356
2357/**
2358 * vmap - map an array of pages into virtually contiguous space
2359 * @pages: array of page pointers
2360 * @count: number of pages to map
2361 * @flags: vm_area->flags
2362 * @prot: page protection for the mapping
2363 *
2364 * Maps @count pages from @pages into contiguous kernel virtual
2365 * space.
 
 
 
2366 *
2367 * Return: the address of the area or %NULL on failure
2368 */
2369void *vmap(struct page **pages, unsigned int count,
2370	   unsigned long flags, pgprot_t prot)
2371{
2372	struct vm_struct *area;
 
2373	unsigned long size;		/* In bytes */
2374
2375	might_sleep();
2376
2377	if (count > totalram_pages())
2378		return NULL;
2379
2380	size = (unsigned long)count << PAGE_SHIFT;
2381	area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2382	if (!area)
2383		return NULL;
2384
2385	if (map_vm_area(area, prot, pages)) {
 
 
2386		vunmap(area->addr);
2387		return NULL;
2388	}
2389
 
 
 
 
2390	return area->addr;
2391}
2392EXPORT_SYMBOL(vmap);
2393
2394static void *__vmalloc_node(unsigned long size, unsigned long align,
2395			    gfp_t gfp_mask, pgprot_t prot,
2396			    int node, const void *caller);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2397static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2398				 pgprot_t prot, int node)
 
2399{
2400	struct page **pages;
2401	unsigned int nr_pages, array_size, i;
2402	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
2403	const gfp_t alloc_mask = gfp_mask | __GFP_NOWARN;
2404	const gfp_t highmem_mask = (gfp_mask & (GFP_DMA | GFP_DMA32)) ?
2405					0 :
2406					__GFP_HIGHMEM;
2407
2408	nr_pages = get_vm_area_size(area) >> PAGE_SHIFT;
2409	array_size = (nr_pages * sizeof(struct page *));
 
 
 
2410
2411	/* Please note that the recursion is strictly bounded. */
2412	if (array_size > PAGE_SIZE) {
2413		pages = __vmalloc_node(array_size, 1, nested_gfp|highmem_mask,
2414				PAGE_KERNEL, node, area->caller);
2415	} else {
2416		pages = kmalloc_node(array_size, nested_gfp, node);
2417	}
2418
2419	if (!pages) {
2420		remove_vm_area(area->addr);
2421		kfree(area);
 
 
2422		return NULL;
2423	}
2424
2425	area->pages = pages;
2426	area->nr_pages = nr_pages;
2427
2428	for (i = 0; i < area->nr_pages; i++) {
2429		struct page *page;
2430
2431		if (node == NUMA_NO_NODE)
2432			page = alloc_page(alloc_mask|highmem_mask);
2433		else
2434			page = alloc_pages_node(node, alloc_mask|highmem_mask, 0);
2435
2436		if (unlikely(!page)) {
2437			/* Successfully allocated i pages, free them in __vunmap() */
2438			area->nr_pages = i;
2439			atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2440			goto fail;
2441		}
2442		area->pages[i] = page;
2443		if (gfpflags_allow_blocking(gfp_mask|highmem_mask))
2444			cond_resched();
2445	}
2446	atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
2447
2448	if (map_vm_area(area, prot, pages))
 
 
 
 
2449		goto fail;
 
 
2450	return area->addr;
2451
2452fail:
2453	warn_alloc(gfp_mask, NULL,
2454			  "vmalloc: allocation failure, allocated %ld of %ld bytes",
2455			  (area->nr_pages*PAGE_SIZE), area->size);
2456	__vfree(area->addr);
2457	return NULL;
2458}
2459
2460/**
2461 * __vmalloc_node_range - allocate virtually contiguous memory
2462 * @size:		  allocation size
2463 * @align:		  desired alignment
2464 * @start:		  vm area range start
2465 * @end:		  vm area range end
2466 * @gfp_mask:		  flags for the page level allocator
2467 * @prot:		  protection mask for the allocated pages
2468 * @vm_flags:		  additional vm area flags (e.g. %VM_NO_GUARD)
2469 * @node:		  node to use for allocation or NUMA_NO_NODE
2470 * @caller:		  caller's return address
2471 *
2472 * Allocate enough pages to cover @size from the page level
2473 * allocator with @gfp_mask flags.  Map them into contiguous
2474 * kernel virtual space, using a pagetable protection of @prot.
2475 *
2476 * Return: the address of the area or %NULL on failure
2477 */
2478void *__vmalloc_node_range(unsigned long size, unsigned long align,
2479			unsigned long start, unsigned long end, gfp_t gfp_mask,
2480			pgprot_t prot, unsigned long vm_flags, int node,
2481			const void *caller)
2482{
2483	struct vm_struct *area;
2484	void *addr;
2485	unsigned long real_size = size;
 
 
2486
2487	size = PAGE_ALIGN(size);
2488	if (!size || (size >> PAGE_SHIFT) > totalram_pages())
2489		goto fail;
2490
2491	area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED |
2492				vm_flags, start, end, node, gfp_mask, caller);
2493	if (!area)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2494		goto fail;
 
2495
2496	addr = __vmalloc_area_node(area, gfp_mask, prot, node);
2497	if (!addr)
2498		return NULL;
2499
2500	/*
2501	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
2502	 * flag. It means that vm_struct is not fully initialized.
2503	 * Now, it is fully initialized, so remove this flag here.
2504	 */
2505	clear_vm_uninitialized_flag(area);
2506
 
2507	kmemleak_vmalloc(area, size, gfp_mask);
2508
2509	return addr;
2510
2511fail:
2512	warn_alloc(gfp_mask, NULL,
2513			  "vmalloc: allocation failure: %lu bytes", real_size);
 
 
 
 
 
2514	return NULL;
2515}
2516
2517/*
2518 * This is only for performance analysis of vmalloc and stress purpose.
2519 * It is required by vmalloc test module, therefore do not use it other
2520 * than that.
2521 */
2522#ifdef CONFIG_TEST_VMALLOC_MODULE
2523EXPORT_SYMBOL_GPL(__vmalloc_node_range);
2524#endif
2525
2526/**
2527 * __vmalloc_node - allocate virtually contiguous memory
2528 * @size:	    allocation size
2529 * @align:	    desired alignment
2530 * @gfp_mask:	    flags for the page level allocator
2531 * @prot:	    protection mask for the allocated pages
2532 * @node:	    node to use for allocation or NUMA_NO_NODE
2533 * @caller:	    caller's return address
2534 *
2535 * Allocate enough pages to cover @size from the page level
2536 * allocator with @gfp_mask flags.  Map them into contiguous
2537 * kernel virtual space, using a pagetable protection of @prot.
2538 *
2539 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
2540 * and __GFP_NOFAIL are not supported
2541 *
2542 * Any use of gfp flags outside of GFP_KERNEL should be consulted
2543 * with mm people.
2544 *
2545 * Return: pointer to the allocated memory or %NULL on error
2546 */
2547static void *__vmalloc_node(unsigned long size, unsigned long align,
2548			    gfp_t gfp_mask, pgprot_t prot,
2549			    int node, const void *caller)
2550{
2551	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
2552				gfp_mask, prot, 0, node, caller);
2553}
 
 
 
 
 
 
 
 
2554
2555void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
2556{
2557	return __vmalloc_node(size, 1, gfp_mask, prot, NUMA_NO_NODE,
2558				__builtin_return_address(0));
2559}
2560EXPORT_SYMBOL(__vmalloc);
2561
2562static inline void *__vmalloc_node_flags(unsigned long size,
2563					int node, gfp_t flags)
2564{
2565	return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
2566					node, __builtin_return_address(0));
2567}
2568
2569
2570void *__vmalloc_node_flags_caller(unsigned long size, int node, gfp_t flags,
2571				  void *caller)
2572{
2573	return __vmalloc_node(size, 1, flags, PAGE_KERNEL, node, caller);
2574}
2575
2576/**
2577 * vmalloc - allocate virtually contiguous memory
2578 * @size:    allocation size
2579 *
2580 * Allocate enough pages to cover @size from the page level
2581 * allocator and map them into contiguous kernel virtual space.
2582 *
2583 * For tight control over page level allocator and protection flags
2584 * use __vmalloc() instead.
2585 *
2586 * Return: pointer to the allocated memory or %NULL on error
2587 */
2588void *vmalloc(unsigned long size)
2589{
2590	return __vmalloc_node_flags(size, NUMA_NO_NODE,
2591				    GFP_KERNEL);
2592}
2593EXPORT_SYMBOL(vmalloc);
2594
2595/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2596 * vzalloc - allocate virtually contiguous memory with zero fill
2597 * @size:    allocation size
2598 *
2599 * Allocate enough pages to cover @size from the page level
2600 * allocator and map them into contiguous kernel virtual space.
2601 * The memory allocated is set to zero.
2602 *
2603 * For tight control over page level allocator and protection flags
2604 * use __vmalloc() instead.
2605 *
2606 * Return: pointer to the allocated memory or %NULL on error
2607 */
2608void *vzalloc(unsigned long size)
2609{
2610	return __vmalloc_node_flags(size, NUMA_NO_NODE,
2611				GFP_KERNEL | __GFP_ZERO);
2612}
2613EXPORT_SYMBOL(vzalloc);
2614
2615/**
2616 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
2617 * @size: allocation size
2618 *
2619 * The resulting memory area is zeroed so it can be mapped to userspace
2620 * without leaking data.
2621 *
2622 * Return: pointer to the allocated memory or %NULL on error
2623 */
2624void *vmalloc_user(unsigned long size)
2625{
2626	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
2627				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
2628				    VM_USERMAP, NUMA_NO_NODE,
2629				    __builtin_return_address(0));
2630}
2631EXPORT_SYMBOL(vmalloc_user);
2632
2633/**
2634 * vmalloc_node - allocate memory on a specific node
2635 * @size:	  allocation size
2636 * @node:	  numa node
2637 *
2638 * Allocate enough pages to cover @size from the page level
2639 * allocator and map them into contiguous kernel virtual space.
2640 *
2641 * For tight control over page level allocator and protection flags
2642 * use __vmalloc() instead.
2643 *
2644 * Return: pointer to the allocated memory or %NULL on error
2645 */
2646void *vmalloc_node(unsigned long size, int node)
2647{
2648	return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL,
2649					node, __builtin_return_address(0));
2650}
2651EXPORT_SYMBOL(vmalloc_node);
2652
2653/**
2654 * vzalloc_node - allocate memory on a specific node with zero fill
2655 * @size:	allocation size
2656 * @node:	numa node
2657 *
2658 * Allocate enough pages to cover @size from the page level
2659 * allocator and map them into contiguous kernel virtual space.
2660 * The memory allocated is set to zero.
2661 *
2662 * For tight control over page level allocator and protection flags
2663 * use __vmalloc_node() instead.
2664 *
2665 * Return: pointer to the allocated memory or %NULL on error
2666 */
2667void *vzalloc_node(unsigned long size, int node)
2668{
2669	return __vmalloc_node_flags(size, node,
2670			 GFP_KERNEL | __GFP_ZERO);
2671}
2672EXPORT_SYMBOL(vzalloc_node);
2673
2674/**
2675 * vmalloc_exec - allocate virtually contiguous, executable memory
2676 * @size:	  allocation size
2677 *
2678 * Kernel-internal function to allocate enough pages to cover @size
2679 * the page level allocator and map them into contiguous and
2680 * executable kernel virtual space.
2681 *
2682 * For tight control over page level allocator and protection flags
2683 * use __vmalloc() instead.
2684 *
2685 * Return: pointer to the allocated memory or %NULL on error
2686 */
2687void *vmalloc_exec(unsigned long size)
2688{
2689	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
2690			GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
2691			NUMA_NO_NODE, __builtin_return_address(0));
2692}
2693
2694#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
2695#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
2696#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
2697#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
2698#else
2699/*
2700 * 64b systems should always have either DMA or DMA32 zones. For others
2701 * GFP_DMA32 should do the right thing and use the normal zone.
2702 */
2703#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
2704#endif
2705
2706/**
2707 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
2708 * @size:	allocation size
2709 *
2710 * Allocate enough 32bit PA addressable pages to cover @size from the
2711 * page level allocator and map them into contiguous kernel virtual space.
2712 *
2713 * Return: pointer to the allocated memory or %NULL on error
2714 */
2715void *vmalloc_32(unsigned long size)
2716{
2717	return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
2718			      NUMA_NO_NODE, __builtin_return_address(0));
2719}
2720EXPORT_SYMBOL(vmalloc_32);
2721
2722/**
2723 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
2724 * @size:	     allocation size
2725 *
2726 * The resulting memory area is 32bit addressable and zeroed so it can be
2727 * mapped to userspace without leaking data.
2728 *
2729 * Return: pointer to the allocated memory or %NULL on error
2730 */
2731void *vmalloc_32_user(unsigned long size)
2732{
2733	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
2734				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
2735				    VM_USERMAP, NUMA_NO_NODE,
2736				    __builtin_return_address(0));
2737}
2738EXPORT_SYMBOL(vmalloc_32_user);
2739
2740/*
2741 * small helper routine , copy contents to buf from addr.
2742 * If the page is not present, fill zero.
2743 */
2744
2745static int aligned_vread(char *buf, char *addr, unsigned long count)
2746{
2747	struct page *p;
2748	int copied = 0;
2749
2750	while (count) {
2751		unsigned long offset, length;
2752
2753		offset = offset_in_page(addr);
2754		length = PAGE_SIZE - offset;
2755		if (length > count)
2756			length = count;
2757		p = vmalloc_to_page(addr);
2758		/*
2759		 * To do safe access to this _mapped_ area, we need
2760		 * lock. But adding lock here means that we need to add
2761		 * overhead of vmalloc()/vfree() calles for this _debug_
2762		 * interface, rarely used. Instead of that, we'll use
2763		 * kmap() and get small overhead in this access function.
2764		 */
2765		if (p) {
2766			/*
2767			 * we can expect USER0 is not used (see vread/vwrite's
2768			 * function description)
2769			 */
2770			void *map = kmap_atomic(p);
2771			memcpy(buf, map + offset, length);
2772			kunmap_atomic(map);
2773		} else
2774			memset(buf, 0, length);
2775
2776		addr += length;
2777		buf += length;
2778		copied += length;
2779		count -= length;
2780	}
2781	return copied;
2782}
2783
2784static int aligned_vwrite(char *buf, char *addr, unsigned long count)
2785{
2786	struct page *p;
2787	int copied = 0;
2788
2789	while (count) {
2790		unsigned long offset, length;
2791
2792		offset = offset_in_page(addr);
2793		length = PAGE_SIZE - offset;
2794		if (length > count)
2795			length = count;
2796		p = vmalloc_to_page(addr);
2797		/*
2798		 * To do safe access to this _mapped_ area, we need
2799		 * lock. But adding lock here means that we need to add
2800		 * overhead of vmalloc()/vfree() calles for this _debug_
2801		 * interface, rarely used. Instead of that, we'll use
2802		 * kmap() and get small overhead in this access function.
2803		 */
2804		if (p) {
2805			/*
2806			 * we can expect USER0 is not used (see vread/vwrite's
2807			 * function description)
2808			 */
2809			void *map = kmap_atomic(p);
2810			memcpy(map + offset, buf, length);
2811			kunmap_atomic(map);
2812		}
2813		addr += length;
2814		buf += length;
2815		copied += length;
2816		count -= length;
2817	}
2818	return copied;
2819}
2820
2821/**
2822 * vread() - read vmalloc area in a safe way.
2823 * @buf:     buffer for reading data
2824 * @addr:    vm address.
2825 * @count:   number of bytes to be read.
2826 *
2827 * This function checks that addr is a valid vmalloc'ed area, and
2828 * copy data from that area to a given buffer. If the given memory range
2829 * of [addr...addr+count) includes some valid address, data is copied to
2830 * proper area of @buf. If there are memory holes, they'll be zero-filled.
2831 * IOREMAP area is treated as memory hole and no copy is done.
2832 *
2833 * If [addr...addr+count) doesn't includes any intersects with alive
2834 * vm_struct area, returns 0. @buf should be kernel's buffer.
2835 *
2836 * Note: In usual ops, vread() is never necessary because the caller
2837 * should know vmalloc() area is valid and can use memcpy().
2838 * This is for routines which have to access vmalloc area without
2839 * any information, as /dev/kmem.
2840 *
2841 * Return: number of bytes for which addr and buf should be increased
2842 * (same number as @count) or %0 if [addr...addr+count) doesn't
2843 * include any intersection with valid vmalloc area
2844 */
2845long vread(char *buf, char *addr, unsigned long count)
2846{
2847	struct vmap_area *va;
2848	struct vm_struct *vm;
2849	char *vaddr, *buf_start = buf;
2850	unsigned long buflen = count;
2851	unsigned long n;
2852
2853	/* Don't allow overflow */
2854	if ((unsigned long) addr + count < count)
2855		count = -(unsigned long) addr;
2856
2857	spin_lock(&vmap_area_lock);
2858	list_for_each_entry(va, &vmap_area_list, list) {
 
 
 
2859		if (!count)
2860			break;
2861
2862		if (!va->vm)
2863			continue;
2864
2865		vm = va->vm;
2866		vaddr = (char *) vm->addr;
2867		if (addr >= vaddr + get_vm_area_size(vm))
2868			continue;
2869		while (addr < vaddr) {
2870			if (count == 0)
2871				goto finished;
2872			*buf = '\0';
2873			buf++;
2874			addr++;
2875			count--;
2876		}
2877		n = vaddr + get_vm_area_size(vm) - addr;
2878		if (n > count)
2879			n = count;
2880		if (!(vm->flags & VM_IOREMAP))
2881			aligned_vread(buf, addr, n);
2882		else /* IOREMAP area is treated as memory hole */
2883			memset(buf, 0, n);
2884		buf += n;
2885		addr += n;
2886		count -= n;
2887	}
2888finished:
2889	spin_unlock(&vmap_area_lock);
2890
2891	if (buf == buf_start)
2892		return 0;
2893	/* zero-fill memory holes */
2894	if (buf != buf_start + buflen)
2895		memset(buf, 0, buflen - (buf - buf_start));
2896
2897	return buflen;
2898}
2899
2900/**
2901 * vwrite() - write vmalloc area in a safe way.
2902 * @buf:      buffer for source data
2903 * @addr:     vm address.
2904 * @count:    number of bytes to be read.
2905 *
2906 * This function checks that addr is a valid vmalloc'ed area, and
2907 * copy data from a buffer to the given addr. If specified range of
2908 * [addr...addr+count) includes some valid address, data is copied from
2909 * proper area of @buf. If there are memory holes, no copy to hole.
2910 * IOREMAP area is treated as memory hole and no copy is done.
2911 *
2912 * If [addr...addr+count) doesn't includes any intersects with alive
2913 * vm_struct area, returns 0. @buf should be kernel's buffer.
2914 *
2915 * Note: In usual ops, vwrite() is never necessary because the caller
2916 * should know vmalloc() area is valid and can use memcpy().
2917 * This is for routines which have to access vmalloc area without
2918 * any information, as /dev/kmem.
2919 *
2920 * Return: number of bytes for which addr and buf should be
2921 * increased (same number as @count) or %0 if [addr...addr+count)
2922 * doesn't include any intersection with valid vmalloc area
2923 */
2924long vwrite(char *buf, char *addr, unsigned long count)
2925{
2926	struct vmap_area *va;
2927	struct vm_struct *vm;
2928	char *vaddr;
2929	unsigned long n, buflen;
2930	int copied = 0;
2931
2932	/* Don't allow overflow */
2933	if ((unsigned long) addr + count < count)
2934		count = -(unsigned long) addr;
2935	buflen = count;
2936
2937	spin_lock(&vmap_area_lock);
2938	list_for_each_entry(va, &vmap_area_list, list) {
2939		if (!count)
2940			break;
2941
2942		if (!va->vm)
2943			continue;
2944
2945		vm = va->vm;
2946		vaddr = (char *) vm->addr;
2947		if (addr >= vaddr + get_vm_area_size(vm))
2948			continue;
2949		while (addr < vaddr) {
2950			if (count == 0)
2951				goto finished;
2952			buf++;
2953			addr++;
2954			count--;
2955		}
2956		n = vaddr + get_vm_area_size(vm) - addr;
2957		if (n > count)
2958			n = count;
2959		if (!(vm->flags & VM_IOREMAP)) {
2960			aligned_vwrite(buf, addr, n);
2961			copied++;
2962		}
2963		buf += n;
2964		addr += n;
2965		count -= n;
2966	}
2967finished:
2968	spin_unlock(&vmap_area_lock);
2969	if (!copied)
2970		return 0;
2971	return buflen;
2972}
2973
2974/**
2975 * remap_vmalloc_range_partial - map vmalloc pages to userspace
2976 * @vma:		vma to cover
2977 * @uaddr:		target user address to start at
2978 * @kaddr:		virtual address of vmalloc kernel memory
 
2979 * @size:		size of map area
2980 *
2981 * Returns:	0 for success, -Exxx on failure
2982 *
2983 * This function checks that @kaddr is a valid vmalloc'ed area,
2984 * and that it is big enough to cover the range starting at
2985 * @uaddr in @vma. Will return failure if that criteria isn't
2986 * met.
2987 *
2988 * Similar to remap_pfn_range() (see mm/memory.c)
2989 */
2990int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
2991				void *kaddr, unsigned long size)
 
2992{
2993	struct vm_struct *area;
 
 
 
 
 
2994
2995	size = PAGE_ALIGN(size);
2996
2997	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
2998		return -EINVAL;
2999
3000	area = find_vm_area(kaddr);
3001	if (!area)
3002		return -EINVAL;
3003
3004	if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
3005		return -EINVAL;
3006
3007	if (kaddr + size > area->addr + get_vm_area_size(area))
 
3008		return -EINVAL;
 
3009
3010	do {
3011		struct page *page = vmalloc_to_page(kaddr);
3012		int ret;
3013
3014		ret = vm_insert_page(vma, uaddr, page);
3015		if (ret)
3016			return ret;
3017
3018		uaddr += PAGE_SIZE;
3019		kaddr += PAGE_SIZE;
3020		size -= PAGE_SIZE;
3021	} while (size > 0);
3022
3023	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3024
3025	return 0;
3026}
3027EXPORT_SYMBOL(remap_vmalloc_range_partial);
3028
3029/**
3030 * remap_vmalloc_range - map vmalloc pages to userspace
3031 * @vma:		vma to cover (map full range of vma)
3032 * @addr:		vmalloc memory
3033 * @pgoff:		number of pages into addr before first page to map
3034 *
3035 * Returns:	0 for success, -Exxx on failure
3036 *
3037 * This function checks that addr is a valid vmalloc'ed area, and
3038 * that it is big enough to cover the vma. Will return failure if
3039 * that criteria isn't met.
3040 *
3041 * Similar to remap_pfn_range() (see mm/memory.c)
3042 */
3043int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3044						unsigned long pgoff)
3045{
3046	return remap_vmalloc_range_partial(vma, vma->vm_start,
3047					   addr + (pgoff << PAGE_SHIFT),
3048					   vma->vm_end - vma->vm_start);
3049}
3050EXPORT_SYMBOL(remap_vmalloc_range);
3051
3052/*
3053 * Implement a stub for vmalloc_sync_all() if the architecture chose not to
3054 * have one.
3055 *
3056 * The purpose of this function is to make sure the vmalloc area
3057 * mappings are identical in all page-tables in the system.
3058 */
3059void __weak vmalloc_sync_all(void)
3060{
3061}
3062
3063
3064static int f(pte_t *pte, unsigned long addr, void *data)
3065{
3066	pte_t ***p = data;
3067
3068	if (p) {
3069		*(*p) = pte;
3070		(*p)++;
3071	}
3072	return 0;
3073}
3074
3075/**
3076 * alloc_vm_area - allocate a range of kernel address space
3077 * @size:	   size of the area
3078 * @ptes:	   returns the PTEs for the address space
3079 *
3080 * Returns:	NULL on failure, vm_struct on success
3081 *
3082 * This function reserves a range of kernel address space, and
3083 * allocates pagetables to map that range.  No actual mappings
3084 * are created.
3085 *
3086 * If @ptes is non-NULL, pointers to the PTEs (in init_mm)
3087 * allocated for the VM area are returned.
3088 */
3089struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes)
3090{
3091	struct vm_struct *area;
3092
3093	area = get_vm_area_caller(size, VM_IOREMAP,
3094				__builtin_return_address(0));
3095	if (area == NULL)
3096		return NULL;
3097
3098	/*
3099	 * This ensures that page tables are constructed for this region
3100	 * of kernel virtual address space and mapped into init_mm.
3101	 */
3102	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
3103				size, f, ptes ? &ptes : NULL)) {
3104		free_vm_area(area);
3105		return NULL;
3106	}
3107
3108	return area;
3109}
3110EXPORT_SYMBOL_GPL(alloc_vm_area);
3111
3112void free_vm_area(struct vm_struct *area)
3113{
3114	struct vm_struct *ret;
3115	ret = remove_vm_area(area->addr);
3116	BUG_ON(ret != area);
3117	kfree(area);
3118}
3119EXPORT_SYMBOL_GPL(free_vm_area);
3120
3121#ifdef CONFIG_SMP
3122static struct vmap_area *node_to_va(struct rb_node *n)
3123{
3124	return rb_entry_safe(n, struct vmap_area, rb_node);
3125}
3126
3127/**
3128 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3129 * @addr: target address
3130 *
3131 * Returns: vmap_area if it is found. If there is no such area
3132 *   the first highest(reverse order) vmap_area is returned
3133 *   i.e. va->va_start < addr && va->va_end < addr or NULL
3134 *   if there are no any areas before @addr.
3135 */
3136static struct vmap_area *
3137pvm_find_va_enclose_addr(unsigned long addr)
3138{
3139	struct vmap_area *va, *tmp;
3140	struct rb_node *n;
3141
3142	n = free_vmap_area_root.rb_node;
3143	va = NULL;
3144
3145	while (n) {
3146		tmp = rb_entry(n, struct vmap_area, rb_node);
3147		if (tmp->va_start <= addr) {
3148			va = tmp;
3149			if (tmp->va_end >= addr)
3150				break;
3151
3152			n = n->rb_right;
3153		} else {
3154			n = n->rb_left;
3155		}
3156	}
3157
3158	return va;
3159}
3160
3161/**
3162 * pvm_determine_end_from_reverse - find the highest aligned address
3163 * of free block below VMALLOC_END
3164 * @va:
3165 *   in - the VA we start the search(reverse order);
3166 *   out - the VA with the highest aligned end address.
 
3167 *
3168 * Returns: determined end address within vmap_area
3169 */
3170static unsigned long
3171pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3172{
3173	unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3174	unsigned long addr;
3175
3176	if (likely(*va)) {
3177		list_for_each_entry_from_reverse((*va),
3178				&free_vmap_area_list, list) {
3179			addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3180			if ((*va)->va_start < addr)
3181				return addr;
3182		}
3183	}
3184
3185	return 0;
3186}
3187
3188/**
3189 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3190 * @offsets: array containing offset of each area
3191 * @sizes: array containing size of each area
3192 * @nr_vms: the number of areas to allocate
3193 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
3194 *
3195 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3196 *	    vm_structs on success, %NULL on failure
3197 *
3198 * Percpu allocator wants to use congruent vm areas so that it can
3199 * maintain the offsets among percpu areas.  This function allocates
3200 * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
3201 * be scattered pretty far, distance between two areas easily going up
3202 * to gigabytes.  To avoid interacting with regular vmallocs, these
3203 * areas are allocated from top.
3204 *
3205 * Despite its complicated look, this allocator is rather simple. It
3206 * does everything top-down and scans free blocks from the end looking
3207 * for matching base. While scanning, if any of the areas do not fit the
3208 * base address is pulled down to fit the area. Scanning is repeated till
3209 * all the areas fit and then all necessary data structures are inserted
3210 * and the result is returned.
3211 */
3212struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3213				     const size_t *sizes, int nr_vms,
3214				     size_t align)
3215{
3216	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3217	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3218	struct vmap_area **vas, *va;
3219	struct vm_struct **vms;
3220	int area, area2, last_area, term_area;
3221	unsigned long base, start, size, end, last_end;
3222	bool purged = false;
3223	enum fit_type type;
3224
3225	/* verify parameters and allocate data structures */
3226	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
3227	for (last_area = 0, area = 0; area < nr_vms; area++) {
3228		start = offsets[area];
3229		end = start + sizes[area];
3230
3231		/* is everything aligned properly? */
3232		BUG_ON(!IS_ALIGNED(offsets[area], align));
3233		BUG_ON(!IS_ALIGNED(sizes[area], align));
3234
3235		/* detect the area with the highest address */
3236		if (start > offsets[last_area])
3237			last_area = area;
3238
3239		for (area2 = area + 1; area2 < nr_vms; area2++) {
3240			unsigned long start2 = offsets[area2];
3241			unsigned long end2 = start2 + sizes[area2];
3242
3243			BUG_ON(start2 < end && start < end2);
3244		}
3245	}
3246	last_end = offsets[last_area] + sizes[last_area];
3247
3248	if (vmalloc_end - vmalloc_start < last_end) {
3249		WARN_ON(true);
3250		return NULL;
3251	}
3252
3253	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3254	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
3255	if (!vas || !vms)
3256		goto err_free2;
3257
3258	for (area = 0; area < nr_vms; area++) {
3259		vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
3260		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
3261		if (!vas[area] || !vms[area])
3262			goto err_free;
3263	}
3264retry:
3265	spin_lock(&vmap_area_lock);
3266
3267	/* start scanning - we scan from the top, begin with the last area */
3268	area = term_area = last_area;
3269	start = offsets[area];
3270	end = start + sizes[area];
3271
3272	va = pvm_find_va_enclose_addr(vmalloc_end);
3273	base = pvm_determine_end_from_reverse(&va, align) - end;
3274
3275	while (true) {
3276		/*
3277		 * base might have underflowed, add last_end before
3278		 * comparing.
3279		 */
3280		if (base + last_end < vmalloc_start + last_end)
3281			goto overflow;
3282
3283		/*
3284		 * Fitting base has not been found.
3285		 */
3286		if (va == NULL)
3287			goto overflow;
3288
3289		/*
3290		 * If required width exeeds current VA block, move
3291		 * base downwards and then recheck.
3292		 */
3293		if (base + end > va->va_end) {
3294			base = pvm_determine_end_from_reverse(&va, align) - end;
3295			term_area = area;
3296			continue;
3297		}
3298
3299		/*
3300		 * If this VA does not fit, move base downwards and recheck.
3301		 */
3302		if (base + start < va->va_start) {
3303			va = node_to_va(rb_prev(&va->rb_node));
3304			base = pvm_determine_end_from_reverse(&va, align) - end;
3305			term_area = area;
3306			continue;
3307		}
3308
3309		/*
3310		 * This area fits, move on to the previous one.  If
3311		 * the previous one is the terminal one, we're done.
3312		 */
3313		area = (area + nr_vms - 1) % nr_vms;
3314		if (area == term_area)
3315			break;
3316
3317		start = offsets[area];
3318		end = start + sizes[area];
3319		va = pvm_find_va_enclose_addr(base + end);
3320	}
3321
3322	/* we've found a fitting base, insert all va's */
3323	for (area = 0; area < nr_vms; area++) {
3324		int ret;
3325
3326		start = base + offsets[area];
3327		size = sizes[area];
3328
3329		va = pvm_find_va_enclose_addr(start);
3330		if (WARN_ON_ONCE(va == NULL))
3331			/* It is a BUG(), but trigger recovery instead. */
3332			goto recovery;
3333
3334		type = classify_va_fit_type(va, start, size);
3335		if (WARN_ON_ONCE(type == NOTHING_FIT))
3336			/* It is a BUG(), but trigger recovery instead. */
3337			goto recovery;
3338
3339		ret = adjust_va_to_fit_type(va, start, size, type);
3340		if (unlikely(ret))
3341			goto recovery;
3342
3343		/* Allocated area. */
3344		va = vas[area];
3345		va->va_start = start;
3346		va->va_end = start + size;
3347
3348		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
3349	}
3350
3351	spin_unlock(&vmap_area_lock);
 
 
 
 
 
 
 
 
 
3352
3353	/* insert all vm's */
3354	for (area = 0; area < nr_vms; area++)
3355		setup_vmalloc_vm(vms[area], vas[area], VM_ALLOC,
 
 
 
3356				 pcpu_get_vm_areas);
 
 
3357
3358	kfree(vas);
3359	return vms;
3360
3361recovery:
3362	/* Remove previously inserted areas. */
 
 
 
 
 
3363	while (area--) {
3364		__free_vmap_area(vas[area]);
 
 
 
 
 
 
3365		vas[area] = NULL;
3366	}
3367
3368overflow:
3369	spin_unlock(&vmap_area_lock);
3370	if (!purged) {
3371		purge_vmap_area_lazy();
3372		purged = true;
3373
3374		/* Before "retry", check if we recover. */
3375		for (area = 0; area < nr_vms; area++) {
3376			if (vas[area])
3377				continue;
3378
3379			vas[area] = kmem_cache_zalloc(
3380				vmap_area_cachep, GFP_KERNEL);
3381			if (!vas[area])
3382				goto err_free;
3383		}
3384
3385		goto retry;
3386	}
3387
3388err_free:
3389	for (area = 0; area < nr_vms; area++) {
3390		if (vas[area])
3391			kmem_cache_free(vmap_area_cachep, vas[area]);
3392
3393		kfree(vms[area]);
3394	}
3395err_free2:
3396	kfree(vas);
3397	kfree(vms);
3398	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3399}
3400
3401/**
3402 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3403 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3404 * @nr_vms: the number of allocated areas
3405 *
3406 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3407 */
3408void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
3409{
3410	int i;
3411
3412	for (i = 0; i < nr_vms; i++)
3413		free_vm_area(vms[i]);
3414	kfree(vms);
3415}
3416#endif	/* CONFIG_SMP */
3417
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3418#ifdef CONFIG_PROC_FS
3419static void *s_start(struct seq_file *m, loff_t *pos)
 
3420	__acquires(&vmap_area_lock)
3421{
 
3422	spin_lock(&vmap_area_lock);
 
3423	return seq_list_start(&vmap_area_list, *pos);
3424}
3425
3426static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3427{
3428	return seq_list_next(p, &vmap_area_list, pos);
3429}
3430
3431static void s_stop(struct seq_file *m, void *p)
3432	__releases(&vmap_area_lock)
 
3433{
3434	spin_unlock(&vmap_area_lock);
 
3435}
3436
3437static void show_numa_info(struct seq_file *m, struct vm_struct *v)
3438{
3439	if (IS_ENABLED(CONFIG_NUMA)) {
3440		unsigned int nr, *counters = m->private;
3441
3442		if (!counters)
3443			return;
3444
3445		if (v->flags & VM_UNINITIALIZED)
3446			return;
3447		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3448		smp_rmb();
3449
3450		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
3451
3452		for (nr = 0; nr < v->nr_pages; nr++)
3453			counters[page_to_nid(v->pages[nr])]++;
3454
3455		for_each_node_state(nr, N_HIGH_MEMORY)
3456			if (counters[nr])
3457				seq_printf(m, " N%u=%u", nr, counters[nr]);
3458	}
3459}
3460
3461static void show_purge_info(struct seq_file *m)
3462{
3463	struct llist_node *head;
3464	struct vmap_area *va;
3465
3466	head = READ_ONCE(vmap_purge_list.first);
3467	if (head == NULL)
3468		return;
3469
3470	llist_for_each_entry(va, head, purge_list) {
3471		seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
3472			(void *)va->va_start, (void *)va->va_end,
3473			va->va_end - va->va_start);
3474	}
 
3475}
3476
3477static int s_show(struct seq_file *m, void *p)
3478{
3479	struct vmap_area *va;
3480	struct vm_struct *v;
3481
3482	va = list_entry(p, struct vmap_area, list);
3483
3484	/*
3485	 * s_show can encounter race with remove_vm_area, !vm on behalf
3486	 * of vmap area is being tear down or vm_map_ram allocation.
3487	 */
3488	if (!va->vm) {
3489		seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
3490			(void *)va->va_start, (void *)va->va_end,
3491			va->va_end - va->va_start);
3492
3493		return 0;
3494	}
3495
3496	v = va->vm;
3497
3498	seq_printf(m, "0x%pK-0x%pK %7ld",
3499		v->addr, v->addr + v->size, v->size);
3500
3501	if (v->caller)
3502		seq_printf(m, " %pS", v->caller);
3503
3504	if (v->nr_pages)
3505		seq_printf(m, " pages=%d", v->nr_pages);
3506
3507	if (v->phys_addr)
3508		seq_printf(m, " phys=%pa", &v->phys_addr);
3509
3510	if (v->flags & VM_IOREMAP)
3511		seq_puts(m, " ioremap");
3512
3513	if (v->flags & VM_ALLOC)
3514		seq_puts(m, " vmalloc");
3515
3516	if (v->flags & VM_MAP)
3517		seq_puts(m, " vmap");
3518
3519	if (v->flags & VM_USERMAP)
3520		seq_puts(m, " user");
3521
3522	if (v->flags & VM_DMA_COHERENT)
3523		seq_puts(m, " dma-coherent");
3524
3525	if (is_vmalloc_addr(v->pages))
3526		seq_puts(m, " vpages");
3527
3528	show_numa_info(m, v);
3529	seq_putc(m, '\n');
3530
3531	/*
3532	 * As a final step, dump "unpurged" areas. Note,
3533	 * that entire "/proc/vmallocinfo" output will not
3534	 * be address sorted, because the purge list is not
3535	 * sorted.
3536	 */
3537	if (list_is_last(&va->list, &vmap_area_list))
3538		show_purge_info(m);
3539
3540	return 0;
3541}
3542
3543static const struct seq_operations vmalloc_op = {
3544	.start = s_start,
3545	.next = s_next,
3546	.stop = s_stop,
3547	.show = s_show,
3548};
3549
3550static int __init proc_vmalloc_init(void)
3551{
3552	if (IS_ENABLED(CONFIG_NUMA))
3553		proc_create_seq_private("vmallocinfo", 0400, NULL,
3554				&vmalloc_op,
3555				nr_node_ids * sizeof(unsigned int), NULL);
3556	else
3557		proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
3558	return 0;
3559}
3560module_init(proc_vmalloc_init);
3561
3562#endif
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
 
 
   3 *  Copyright (C) 1993  Linus Torvalds
   4 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
   5 *  SMP-safe vmalloc/vfree/ioremap, Tigran Aivazian <tigran@veritas.com>, May 2000
   6 *  Major rework to support vmap/vunmap, Christoph Hellwig, SGI, August 2002
   7 *  Numa awareness, Christoph Lameter, SGI, June 2005
   8 *  Improving global KVA allocator, Uladzislau Rezki, Sony, May 2019
   9 */
  10
  11#include <linux/vmalloc.h>
  12#include <linux/mm.h>
  13#include <linux/module.h>
  14#include <linux/highmem.h>
  15#include <linux/sched/signal.h>
  16#include <linux/slab.h>
  17#include <linux/spinlock.h>
  18#include <linux/interrupt.h>
  19#include <linux/proc_fs.h>
  20#include <linux/seq_file.h>
  21#include <linux/set_memory.h>
  22#include <linux/debugobjects.h>
  23#include <linux/kallsyms.h>
  24#include <linux/list.h>
  25#include <linux/notifier.h>
  26#include <linux/rbtree.h>
  27#include <linux/xarray.h>
  28#include <linux/io.h>
  29#include <linux/rcupdate.h>
  30#include <linux/pfn.h>
  31#include <linux/kmemleak.h>
  32#include <linux/atomic.h>
  33#include <linux/compiler.h>
  34#include <linux/llist.h>
  35#include <linux/bitops.h>
  36#include <linux/rbtree_augmented.h>
  37#include <linux/overflow.h>
  38#include <linux/pgtable.h>
  39#include <linux/uaccess.h>
  40#include <linux/hugetlb.h>
  41#include <asm/tlbflush.h>
  42#include <asm/shmparam.h>
  43
  44#include "internal.h"
  45#include "pgalloc-track.h"
  46
  47#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
  48static bool __ro_after_init vmap_allow_huge = true;
  49
  50static int __init set_nohugevmalloc(char *str)
  51{
  52	vmap_allow_huge = false;
  53	return 0;
  54}
  55early_param("nohugevmalloc", set_nohugevmalloc);
  56#else /* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
  57static const bool vmap_allow_huge = false;
  58#endif	/* CONFIG_HAVE_ARCH_HUGE_VMALLOC */
  59
  60bool is_vmalloc_addr(const void *x)
  61{
  62	unsigned long addr = (unsigned long)x;
  63
  64	return addr >= VMALLOC_START && addr < VMALLOC_END;
  65}
  66EXPORT_SYMBOL(is_vmalloc_addr);
  67
  68struct vfree_deferred {
  69	struct llist_head list;
  70	struct work_struct wq;
  71};
  72static DEFINE_PER_CPU(struct vfree_deferred, vfree_deferred);
  73
  74static void __vunmap(const void *, int);
  75
  76static void free_work(struct work_struct *w)
  77{
  78	struct vfree_deferred *p = container_of(w, struct vfree_deferred, wq);
  79	struct llist_node *t, *llnode;
  80
  81	llist_for_each_safe(llnode, t, llist_del_all(&p->list))
  82		__vunmap((void *)llnode, 1);
  83}
  84
  85/*** Page table manipulation functions ***/
  86static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
  87			phys_addr_t phys_addr, pgprot_t prot,
  88			unsigned int max_page_shift, pgtbl_mod_mask *mask)
  89{
  90	pte_t *pte;
  91	u64 pfn;
  92	unsigned long size = PAGE_SIZE;
  93
  94	pfn = phys_addr >> PAGE_SHIFT;
  95	pte = pte_alloc_kernel_track(pmd, addr, mask);
  96	if (!pte)
  97		return -ENOMEM;
  98	do {
  99		BUG_ON(!pte_none(*pte));
 100
 101#ifdef CONFIG_HUGETLB_PAGE
 102		size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift);
 103		if (size != PAGE_SIZE) {
 104			pte_t entry = pfn_pte(pfn, prot);
 105
 106			entry = pte_mkhuge(entry);
 107			entry = arch_make_huge_pte(entry, ilog2(size), 0);
 108			set_huge_pte_at(&init_mm, addr, pte, entry);
 109			pfn += PFN_DOWN(size);
 110			continue;
 111		}
 112#endif
 113		set_pte_at(&init_mm, addr, pte, pfn_pte(pfn, prot));
 114		pfn++;
 115	} while (pte += PFN_DOWN(size), addr += size, addr != end);
 116	*mask |= PGTBL_PTE_MODIFIED;
 117	return 0;
 118}
 119
 120static int vmap_try_huge_pmd(pmd_t *pmd, unsigned long addr, unsigned long end,
 121			phys_addr_t phys_addr, pgprot_t prot,
 122			unsigned int max_page_shift)
 123{
 124	if (max_page_shift < PMD_SHIFT)
 125		return 0;
 126
 127	if (!arch_vmap_pmd_supported(prot))
 128		return 0;
 129
 130	if ((end - addr) != PMD_SIZE)
 131		return 0;
 132
 133	if (!IS_ALIGNED(addr, PMD_SIZE))
 134		return 0;
 135
 136	if (!IS_ALIGNED(phys_addr, PMD_SIZE))
 137		return 0;
 138
 139	if (pmd_present(*pmd) && !pmd_free_pte_page(pmd, addr))
 140		return 0;
 141
 142	return pmd_set_huge(pmd, phys_addr, prot);
 143}
 144
 145static int vmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
 146			phys_addr_t phys_addr, pgprot_t prot,
 147			unsigned int max_page_shift, pgtbl_mod_mask *mask)
 148{
 149	pmd_t *pmd;
 150	unsigned long next;
 151
 152	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
 153	if (!pmd)
 154		return -ENOMEM;
 155	do {
 156		next = pmd_addr_end(addr, end);
 157
 158		if (vmap_try_huge_pmd(pmd, addr, next, phys_addr, prot,
 159					max_page_shift)) {
 160			*mask |= PGTBL_PMD_MODIFIED;
 161			continue;
 162		}
 163
 164		if (vmap_pte_range(pmd, addr, next, phys_addr, prot, max_page_shift, mask))
 165			return -ENOMEM;
 166	} while (pmd++, phys_addr += (next - addr), addr = next, addr != end);
 167	return 0;
 168}
 169
 170static int vmap_try_huge_pud(pud_t *pud, unsigned long addr, unsigned long end,
 171			phys_addr_t phys_addr, pgprot_t prot,
 172			unsigned int max_page_shift)
 173{
 174	if (max_page_shift < PUD_SHIFT)
 175		return 0;
 176
 177	if (!arch_vmap_pud_supported(prot))
 178		return 0;
 179
 180	if ((end - addr) != PUD_SIZE)
 181		return 0;
 182
 183	if (!IS_ALIGNED(addr, PUD_SIZE))
 184		return 0;
 185
 186	if (!IS_ALIGNED(phys_addr, PUD_SIZE))
 187		return 0;
 188
 189	if (pud_present(*pud) && !pud_free_pmd_page(pud, addr))
 190		return 0;
 191
 192	return pud_set_huge(pud, phys_addr, prot);
 193}
 194
 195static int vmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
 196			phys_addr_t phys_addr, pgprot_t prot,
 197			unsigned int max_page_shift, pgtbl_mod_mask *mask)
 198{
 199	pud_t *pud;
 200	unsigned long next;
 201
 202	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
 203	if (!pud)
 204		return -ENOMEM;
 205	do {
 206		next = pud_addr_end(addr, end);
 207
 208		if (vmap_try_huge_pud(pud, addr, next, phys_addr, prot,
 209					max_page_shift)) {
 210			*mask |= PGTBL_PUD_MODIFIED;
 211			continue;
 212		}
 213
 214		if (vmap_pmd_range(pud, addr, next, phys_addr, prot,
 215					max_page_shift, mask))
 216			return -ENOMEM;
 217	} while (pud++, phys_addr += (next - addr), addr = next, addr != end);
 218	return 0;
 219}
 220
 221static int vmap_try_huge_p4d(p4d_t *p4d, unsigned long addr, unsigned long end,
 222			phys_addr_t phys_addr, pgprot_t prot,
 223			unsigned int max_page_shift)
 224{
 225	if (max_page_shift < P4D_SHIFT)
 226		return 0;
 227
 228	if (!arch_vmap_p4d_supported(prot))
 229		return 0;
 230
 231	if ((end - addr) != P4D_SIZE)
 232		return 0;
 233
 234	if (!IS_ALIGNED(addr, P4D_SIZE))
 235		return 0;
 236
 237	if (!IS_ALIGNED(phys_addr, P4D_SIZE))
 238		return 0;
 239
 240	if (p4d_present(*p4d) && !p4d_free_pud_page(p4d, addr))
 241		return 0;
 242
 243	return p4d_set_huge(p4d, phys_addr, prot);
 244}
 245
 246static int vmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
 247			phys_addr_t phys_addr, pgprot_t prot,
 248			unsigned int max_page_shift, pgtbl_mod_mask *mask)
 249{
 250	p4d_t *p4d;
 251	unsigned long next;
 252
 253	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
 254	if (!p4d)
 255		return -ENOMEM;
 256	do {
 257		next = p4d_addr_end(addr, end);
 258
 259		if (vmap_try_huge_p4d(p4d, addr, next, phys_addr, prot,
 260					max_page_shift)) {
 261			*mask |= PGTBL_P4D_MODIFIED;
 262			continue;
 263		}
 264
 265		if (vmap_pud_range(p4d, addr, next, phys_addr, prot,
 266					max_page_shift, mask))
 267			return -ENOMEM;
 268	} while (p4d++, phys_addr += (next - addr), addr = next, addr != end);
 269	return 0;
 270}
 271
 272static int vmap_range_noflush(unsigned long addr, unsigned long end,
 273			phys_addr_t phys_addr, pgprot_t prot,
 274			unsigned int max_page_shift)
 275{
 276	pgd_t *pgd;
 277	unsigned long start;
 278	unsigned long next;
 279	int err;
 280	pgtbl_mod_mask mask = 0;
 281
 282	might_sleep();
 283	BUG_ON(addr >= end);
 284
 285	start = addr;
 286	pgd = pgd_offset_k(addr);
 287	do {
 288		next = pgd_addr_end(addr, end);
 289		err = vmap_p4d_range(pgd, addr, next, phys_addr, prot,
 290					max_page_shift, &mask);
 291		if (err)
 292			break;
 293	} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
 294
 295	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
 296		arch_sync_kernel_mappings(start, end);
 297
 298	return err;
 299}
 300
 301int vmap_range(unsigned long addr, unsigned long end,
 302			phys_addr_t phys_addr, pgprot_t prot,
 303			unsigned int max_page_shift)
 304{
 305	int err;
 306
 307	err = vmap_range_noflush(addr, end, phys_addr, prot, max_page_shift);
 308	flush_cache_vmap(addr, end);
 309
 310	return err;
 311}
 312
 313static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 314			     pgtbl_mod_mask *mask)
 315{
 316	pte_t *pte;
 317
 318	pte = pte_offset_kernel(pmd, addr);
 319	do {
 320		pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
 321		WARN_ON(!pte_none(ptent) && !pte_present(ptent));
 322	} while (pte++, addr += PAGE_SIZE, addr != end);
 323	*mask |= PGTBL_PTE_MODIFIED;
 324}
 325
 326static void vunmap_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
 327			     pgtbl_mod_mask *mask)
 328{
 329	pmd_t *pmd;
 330	unsigned long next;
 331	int cleared;
 332
 333	pmd = pmd_offset(pud, addr);
 334	do {
 335		next = pmd_addr_end(addr, end);
 336
 337		cleared = pmd_clear_huge(pmd);
 338		if (cleared || pmd_bad(*pmd))
 339			*mask |= PGTBL_PMD_MODIFIED;
 340
 341		if (cleared)
 342			continue;
 343		if (pmd_none_or_clear_bad(pmd))
 344			continue;
 345		vunmap_pte_range(pmd, addr, next, mask);
 346
 347		cond_resched();
 348	} while (pmd++, addr = next, addr != end);
 349}
 350
 351static void vunmap_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
 352			     pgtbl_mod_mask *mask)
 353{
 354	pud_t *pud;
 355	unsigned long next;
 356	int cleared;
 357
 358	pud = pud_offset(p4d, addr);
 359	do {
 360		next = pud_addr_end(addr, end);
 361
 362		cleared = pud_clear_huge(pud);
 363		if (cleared || pud_bad(*pud))
 364			*mask |= PGTBL_PUD_MODIFIED;
 365
 366		if (cleared)
 367			continue;
 368		if (pud_none_or_clear_bad(pud))
 369			continue;
 370		vunmap_pmd_range(pud, addr, next, mask);
 371	} while (pud++, addr = next, addr != end);
 372}
 373
 374static void vunmap_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
 375			     pgtbl_mod_mask *mask)
 376{
 377	p4d_t *p4d;
 378	unsigned long next;
 379	int cleared;
 380
 381	p4d = p4d_offset(pgd, addr);
 382	do {
 383		next = p4d_addr_end(addr, end);
 384
 385		cleared = p4d_clear_huge(p4d);
 386		if (cleared || p4d_bad(*p4d))
 387			*mask |= PGTBL_P4D_MODIFIED;
 388
 389		if (cleared)
 390			continue;
 391		if (p4d_none_or_clear_bad(p4d))
 392			continue;
 393		vunmap_pud_range(p4d, addr, next, mask);
 394	} while (p4d++, addr = next, addr != end);
 395}
 396
 397/*
 398 * vunmap_range_noflush is similar to vunmap_range, but does not
 399 * flush caches or TLBs.
 400 *
 401 * The caller is responsible for calling flush_cache_vmap() before calling
 402 * this function, and flush_tlb_kernel_range after it has returned
 403 * successfully (and before the addresses are expected to cause a page fault
 404 * or be re-mapped for something else, if TLB flushes are being delayed or
 405 * coalesced).
 406 *
 407 * This is an internal function only. Do not use outside mm/.
 408 */
 409void vunmap_range_noflush(unsigned long start, unsigned long end)
 410{
 
 411	unsigned long next;
 412	pgd_t *pgd;
 413	unsigned long addr = start;
 414	pgtbl_mod_mask mask = 0;
 415
 416	BUG_ON(addr >= end);
 417	pgd = pgd_offset_k(addr);
 418	do {
 419		next = pgd_addr_end(addr, end);
 420		if (pgd_bad(*pgd))
 421			mask |= PGTBL_PGD_MODIFIED;
 422		if (pgd_none_or_clear_bad(pgd))
 423			continue;
 424		vunmap_p4d_range(pgd, addr, next, &mask);
 425	} while (pgd++, addr = next, addr != end);
 426
 427	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
 428		arch_sync_kernel_mappings(start, end);
 429}
 430
 431/**
 432 * vunmap_range - unmap kernel virtual addresses
 433 * @addr: start of the VM area to unmap
 434 * @end: end of the VM area to unmap (non-inclusive)
 435 *
 436 * Clears any present PTEs in the virtual address range, flushes TLBs and
 437 * caches. Any subsequent access to the address before it has been re-mapped
 438 * is a kernel bug.
 439 */
 440void vunmap_range(unsigned long addr, unsigned long end)
 441{
 442	flush_cache_vunmap(addr, end);
 443	vunmap_range_noflush(addr, end);
 444	flush_tlb_kernel_range(addr, end);
 445}
 446
 447static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
 448		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
 449		pgtbl_mod_mask *mask)
 450{
 451	pte_t *pte;
 452
 453	/*
 454	 * nr is a running index into the array which helps higher level
 455	 * callers keep track of where we're up to.
 456	 */
 457
 458	pte = pte_alloc_kernel_track(pmd, addr, mask);
 459	if (!pte)
 460		return -ENOMEM;
 461	do {
 462		struct page *page = pages[*nr];
 463
 464		if (WARN_ON(!pte_none(*pte)))
 465			return -EBUSY;
 466		if (WARN_ON(!page))
 467			return -ENOMEM;
 468		set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
 469		(*nr)++;
 470	} while (pte++, addr += PAGE_SIZE, addr != end);
 471	*mask |= PGTBL_PTE_MODIFIED;
 472	return 0;
 473}
 474
 475static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
 476		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
 477		pgtbl_mod_mask *mask)
 478{
 479	pmd_t *pmd;
 480	unsigned long next;
 481
 482	pmd = pmd_alloc_track(&init_mm, pud, addr, mask);
 483	if (!pmd)
 484		return -ENOMEM;
 485	do {
 486		next = pmd_addr_end(addr, end);
 487		if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
 488			return -ENOMEM;
 489	} while (pmd++, addr = next, addr != end);
 490	return 0;
 491}
 492
 493static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
 494		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
 495		pgtbl_mod_mask *mask)
 496{
 497	pud_t *pud;
 498	unsigned long next;
 499
 500	pud = pud_alloc_track(&init_mm, p4d, addr, mask);
 501	if (!pud)
 502		return -ENOMEM;
 503	do {
 504		next = pud_addr_end(addr, end);
 505		if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
 506			return -ENOMEM;
 507	} while (pud++, addr = next, addr != end);
 508	return 0;
 509}
 510
 511static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
 512		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
 513		pgtbl_mod_mask *mask)
 514{
 515	p4d_t *p4d;
 516	unsigned long next;
 517
 518	p4d = p4d_alloc_track(&init_mm, pgd, addr, mask);
 519	if (!p4d)
 520		return -ENOMEM;
 521	do {
 522		next = p4d_addr_end(addr, end);
 523		if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
 524			return -ENOMEM;
 525	} while (p4d++, addr = next, addr != end);
 526	return 0;
 527}
 528
 529static int vmap_small_pages_range_noflush(unsigned long addr, unsigned long end,
 530		pgprot_t prot, struct page **pages)
 
 
 
 
 
 
 531{
 532	unsigned long start = addr;
 533	pgd_t *pgd;
 534	unsigned long next;
 
 535	int err = 0;
 536	int nr = 0;
 537	pgtbl_mod_mask mask = 0;
 538
 539	BUG_ON(addr >= end);
 540	pgd = pgd_offset_k(addr);
 541	do {
 542		next = pgd_addr_end(addr, end);
 543		if (pgd_bad(*pgd))
 544			mask |= PGTBL_PGD_MODIFIED;
 545		err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
 546		if (err)
 547			return err;
 548	} while (pgd++, addr = next, addr != end);
 549
 550	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
 551		arch_sync_kernel_mappings(start, end);
 552
 553	return 0;
 554}
 555
 556/*
 557 * vmap_pages_range_noflush is similar to vmap_pages_range, but does not
 558 * flush caches.
 559 *
 560 * The caller is responsible for calling flush_cache_vmap() after this
 561 * function returns successfully and before the addresses are accessed.
 562 *
 563 * This is an internal function only. Do not use outside mm/.
 564 */
 565int vmap_pages_range_noflush(unsigned long addr, unsigned long end,
 566		pgprot_t prot, struct page **pages, unsigned int page_shift)
 567{
 568	unsigned int i, nr = (end - addr) >> PAGE_SHIFT;
 569
 570	WARN_ON(page_shift < PAGE_SHIFT);
 571
 572	if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMALLOC) ||
 573			page_shift == PAGE_SHIFT)
 574		return vmap_small_pages_range_noflush(addr, end, prot, pages);
 575
 576	for (i = 0; i < nr; i += 1U << (page_shift - PAGE_SHIFT)) {
 577		int err;
 578
 579		err = vmap_range_noflush(addr, addr + (1UL << page_shift),
 580					__pa(page_address(pages[i])), prot,
 581					page_shift);
 582		if (err)
 583			return err;
 584
 585		addr += 1UL << page_shift;
 586	}
 587
 588	return 0;
 589}
 590
 591/**
 592 * vmap_pages_range - map pages to a kernel virtual address
 593 * @addr: start of the VM area to map
 594 * @end: end of the VM area to map (non-inclusive)
 595 * @prot: page protection flags to use
 596 * @pages: pages to map (always PAGE_SIZE pages)
 597 * @page_shift: maximum shift that the pages may be mapped with, @pages must
 598 * be aligned and contiguous up to at least this shift.
 599 *
 600 * RETURNS:
 601 * 0 on success, -errno on failure.
 602 */
 603static int vmap_pages_range(unsigned long addr, unsigned long end,
 604		pgprot_t prot, struct page **pages, unsigned int page_shift)
 605{
 606	int err;
 607
 608	err = vmap_pages_range_noflush(addr, end, prot, pages, page_shift);
 609	flush_cache_vmap(addr, end);
 610	return err;
 611}
 612
 613int is_vmalloc_or_module_addr(const void *x)
 614{
 615	/*
 616	 * ARM, x86-64 and sparc64 put modules in a special place,
 617	 * and fall back on vmalloc() if that fails. Others
 618	 * just put it in the vmalloc space.
 619	 */
 620#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
 621	unsigned long addr = (unsigned long)x;
 622	if (addr >= MODULES_VADDR && addr < MODULES_END)
 623		return 1;
 624#endif
 625	return is_vmalloc_addr(x);
 626}
 627
 628/*
 629 * Walk a vmap address to the struct page it maps. Huge vmap mappings will
 630 * return the tail page that corresponds to the base page address, which
 631 * matches small vmap mappings.
 632 */
 633struct page *vmalloc_to_page(const void *vmalloc_addr)
 634{
 635	unsigned long addr = (unsigned long) vmalloc_addr;
 636	struct page *page = NULL;
 637	pgd_t *pgd = pgd_offset_k(addr);
 638	p4d_t *p4d;
 639	pud_t *pud;
 640	pmd_t *pmd;
 641	pte_t *ptep, pte;
 642
 643	/*
 644	 * XXX we might need to change this if we add VIRTUAL_BUG_ON for
 645	 * architectures that do not vmalloc module space
 646	 */
 647	VIRTUAL_BUG_ON(!is_vmalloc_or_module_addr(vmalloc_addr));
 648
 649	if (pgd_none(*pgd))
 650		return NULL;
 651	if (WARN_ON_ONCE(pgd_leaf(*pgd)))
 652		return NULL; /* XXX: no allowance for huge pgd */
 653	if (WARN_ON_ONCE(pgd_bad(*pgd)))
 654		return NULL;
 655
 656	p4d = p4d_offset(pgd, addr);
 657	if (p4d_none(*p4d))
 658		return NULL;
 659	if (p4d_leaf(*p4d))
 660		return p4d_page(*p4d) + ((addr & ~P4D_MASK) >> PAGE_SHIFT);
 661	if (WARN_ON_ONCE(p4d_bad(*p4d)))
 662		return NULL;
 663
 664	pud = pud_offset(p4d, addr);
 665	if (pud_none(*pud))
 666		return NULL;
 667	if (pud_leaf(*pud))
 668		return pud_page(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
 669	if (WARN_ON_ONCE(pud_bad(*pud)))
 
 
 
 
 670		return NULL;
 671
 672	pmd = pmd_offset(pud, addr);
 673	if (pmd_none(*pmd))
 674		return NULL;
 675	if (pmd_leaf(*pmd))
 676		return pmd_page(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
 677	if (WARN_ON_ONCE(pmd_bad(*pmd)))
 678		return NULL;
 679
 680	ptep = pte_offset_map(pmd, addr);
 681	pte = *ptep;
 682	if (pte_present(pte))
 683		page = pte_page(pte);
 684	pte_unmap(ptep);
 685
 686	return page;
 687}
 688EXPORT_SYMBOL(vmalloc_to_page);
 689
 690/*
 691 * Map a vmalloc()-space virtual address to the physical page frame number.
 692 */
 693unsigned long vmalloc_to_pfn(const void *vmalloc_addr)
 694{
 695	return page_to_pfn(vmalloc_to_page(vmalloc_addr));
 696}
 697EXPORT_SYMBOL(vmalloc_to_pfn);
 698
 699
 700/*** Global kva allocator ***/
 701
 702#define DEBUG_AUGMENT_PROPAGATE_CHECK 0
 703#define DEBUG_AUGMENT_LOWEST_MATCH_CHECK 0
 704
 705
 706static DEFINE_SPINLOCK(vmap_area_lock);
 707static DEFINE_SPINLOCK(free_vmap_area_lock);
 708/* Export for kexec only */
 709LIST_HEAD(vmap_area_list);
 
 710static struct rb_root vmap_area_root = RB_ROOT;
 711static bool vmap_initialized __read_mostly;
 712
 713static struct rb_root purge_vmap_area_root = RB_ROOT;
 714static LIST_HEAD(purge_vmap_area_list);
 715static DEFINE_SPINLOCK(purge_vmap_area_lock);
 716
 717/*
 718 * This kmem_cache is used for vmap_area objects. Instead of
 719 * allocating from slab we reuse an object from this cache to
 720 * make things faster. Especially in "no edge" splitting of
 721 * free block.
 722 */
 723static struct kmem_cache *vmap_area_cachep;
 724
 725/*
 726 * This linked list is used in pair with free_vmap_area_root.
 727 * It gives O(1) access to prev/next to perform fast coalescing.
 728 */
 729static LIST_HEAD(free_vmap_area_list);
 730
 731/*
 732 * This augment red-black tree represents the free vmap space.
 733 * All vmap_area objects in this tree are sorted by va->va_start
 734 * address. It is used for allocation and merging when a vmap
 735 * object is released.
 736 *
 737 * Each vmap_area node contains a maximum available free block
 738 * of its sub-tree, right or left. Therefore it is possible to
 739 * find a lowest match of free area.
 740 */
 741static struct rb_root free_vmap_area_root = RB_ROOT;
 742
 743/*
 744 * Preload a CPU with one object for "no edge" split case. The
 745 * aim is to get rid of allocations from the atomic context, thus
 746 * to use more permissive allocation masks.
 747 */
 748static DEFINE_PER_CPU(struct vmap_area *, ne_fit_preload_node);
 749
 750static __always_inline unsigned long
 751va_size(struct vmap_area *va)
 752{
 753	return (va->va_end - va->va_start);
 754}
 755
 756static __always_inline unsigned long
 757get_subtree_max_size(struct rb_node *node)
 758{
 759	struct vmap_area *va;
 760
 761	va = rb_entry_safe(node, struct vmap_area, rb_node);
 762	return va ? va->subtree_max_size : 0;
 763}
 764
 765/*
 766 * Gets called when remove the node and rotate.
 767 */
 768static __always_inline unsigned long
 769compute_subtree_max_size(struct vmap_area *va)
 770{
 771	return max3(va_size(va),
 772		get_subtree_max_size(va->rb_node.rb_left),
 773		get_subtree_max_size(va->rb_node.rb_right));
 774}
 775
 776RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb,
 777	struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size)
 778
 779static void purge_vmap_area_lazy(void);
 780static BLOCKING_NOTIFIER_HEAD(vmap_notify_list);
 781static unsigned long lazy_max_pages(void);
 782
 783static atomic_long_t nr_vmalloc_pages;
 784
 785unsigned long vmalloc_nr_pages(void)
 786{
 787	return atomic_long_read(&nr_vmalloc_pages);
 788}
 789
 790static struct vmap_area *__find_vmap_area(unsigned long addr)
 791{
 792	struct rb_node *n = vmap_area_root.rb_node;
 793
 794	while (n) {
 795		struct vmap_area *va;
 796
 797		va = rb_entry(n, struct vmap_area, rb_node);
 798		if (addr < va->va_start)
 799			n = n->rb_left;
 800		else if (addr >= va->va_end)
 801			n = n->rb_right;
 802		else
 803			return va;
 804	}
 805
 806	return NULL;
 807}
 808
 809/*
 810 * This function returns back addresses of parent node
 811 * and its left or right link for further processing.
 812 *
 813 * Otherwise NULL is returned. In that case all further
 814 * steps regarding inserting of conflicting overlap range
 815 * have to be declined and actually considered as a bug.
 816 */
 817static __always_inline struct rb_node **
 818find_va_links(struct vmap_area *va,
 819	struct rb_root *root, struct rb_node *from,
 820	struct rb_node **parent)
 821{
 822	struct vmap_area *tmp_va;
 823	struct rb_node **link;
 824
 825	if (root) {
 826		link = &root->rb_node;
 827		if (unlikely(!*link)) {
 828			*parent = NULL;
 829			return link;
 830		}
 831	} else {
 832		link = &from;
 833	}
 834
 835	/*
 836	 * Go to the bottom of the tree. When we hit the last point
 837	 * we end up with parent rb_node and correct direction, i name
 838	 * it link, where the new va->rb_node will be attached to.
 839	 */
 840	do {
 841		tmp_va = rb_entry(*link, struct vmap_area, rb_node);
 842
 843		/*
 844		 * During the traversal we also do some sanity check.
 845		 * Trigger the BUG() if there are sides(left/right)
 846		 * or full overlaps.
 847		 */
 848		if (va->va_start < tmp_va->va_end &&
 849				va->va_end <= tmp_va->va_start)
 850			link = &(*link)->rb_left;
 851		else if (va->va_end > tmp_va->va_start &&
 852				va->va_start >= tmp_va->va_end)
 853			link = &(*link)->rb_right;
 854		else {
 855			WARN(1, "vmalloc bug: 0x%lx-0x%lx overlaps with 0x%lx-0x%lx\n",
 856				va->va_start, va->va_end, tmp_va->va_start, tmp_va->va_end);
 857
 858			return NULL;
 859		}
 860	} while (*link);
 861
 862	*parent = &tmp_va->rb_node;
 863	return link;
 864}
 865
 866static __always_inline struct list_head *
 867get_va_next_sibling(struct rb_node *parent, struct rb_node **link)
 868{
 869	struct list_head *list;
 870
 871	if (unlikely(!parent))
 872		/*
 873		 * The red-black tree where we try to find VA neighbors
 874		 * before merging or inserting is empty, i.e. it means
 875		 * there is no free vmap space. Normally it does not
 876		 * happen but we handle this case anyway.
 877		 */
 878		return NULL;
 879
 880	list = &rb_entry(parent, struct vmap_area, rb_node)->list;
 881	return (&parent->rb_right == link ? list->next : list);
 882}
 883
 884static __always_inline void
 885link_va(struct vmap_area *va, struct rb_root *root,
 886	struct rb_node *parent, struct rb_node **link, struct list_head *head)
 887{
 888	/*
 889	 * VA is still not in the list, but we can
 890	 * identify its future previous list_head node.
 891	 */
 892	if (likely(parent)) {
 893		head = &rb_entry(parent, struct vmap_area, rb_node)->list;
 894		if (&parent->rb_right != link)
 895			head = head->prev;
 896	}
 897
 898	/* Insert to the rb-tree */
 899	rb_link_node(&va->rb_node, parent, link);
 900	if (root == &free_vmap_area_root) {
 901		/*
 902		 * Some explanation here. Just perform simple insertion
 903		 * to the tree. We do not set va->subtree_max_size to
 904		 * its current size before calling rb_insert_augmented().
 905		 * It is because of we populate the tree from the bottom
 906		 * to parent levels when the node _is_ in the tree.
 907		 *
 908		 * Therefore we set subtree_max_size to zero after insertion,
 909		 * to let __augment_tree_propagate_from() puts everything to
 910		 * the correct order later on.
 911		 */
 912		rb_insert_augmented(&va->rb_node,
 913			root, &free_vmap_area_rb_augment_cb);
 914		va->subtree_max_size = 0;
 915	} else {
 916		rb_insert_color(&va->rb_node, root);
 917	}
 918
 919	/* Address-sort this list */
 920	list_add(&va->list, head);
 921}
 922
 923static __always_inline void
 924unlink_va(struct vmap_area *va, struct rb_root *root)
 925{
 926	if (WARN_ON(RB_EMPTY_NODE(&va->rb_node)))
 927		return;
 928
 929	if (root == &free_vmap_area_root)
 930		rb_erase_augmented(&va->rb_node,
 931			root, &free_vmap_area_rb_augment_cb);
 932	else
 933		rb_erase(&va->rb_node, root);
 934
 935	list_del(&va->list);
 936	RB_CLEAR_NODE(&va->rb_node);
 937}
 938
 939#if DEBUG_AUGMENT_PROPAGATE_CHECK
 940static void
 941augment_tree_propagate_check(void)
 942{
 943	struct vmap_area *va;
 944	unsigned long computed_size;
 
 
 945
 946	list_for_each_entry(va, &free_vmap_area_list, list) {
 947		computed_size = compute_subtree_max_size(va);
 948		if (computed_size != va->subtree_max_size)
 949			pr_emerg("tree is corrupted: %lu, %lu\n",
 950				va_size(va), va->subtree_max_size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 951	}
 
 
 
 952}
 953#endif
 954
 955/*
 956 * This function populates subtree_max_size from bottom to upper
 957 * levels starting from VA point. The propagation must be done
 958 * when VA size is modified by changing its va_start/va_end. Or
 959 * in case of newly inserting of VA to the tree.
 960 *
 961 * It means that __augment_tree_propagate_from() must be called:
 962 * - After VA has been inserted to the tree(free path);
 963 * - After VA has been shrunk(allocation path);
 964 * - After VA has been increased(merging path).
 965 *
 966 * Please note that, it does not mean that upper parent nodes
 967 * and their subtree_max_size are recalculated all the time up
 968 * to the root node.
 969 *
 970 *       4--8
 971 *        /\
 972 *       /  \
 973 *      /    \
 974 *    2--2  8--8
 975 *
 976 * For example if we modify the node 4, shrinking it to 2, then
 977 * no any modification is required. If we shrink the node 2 to 1
 978 * its subtree_max_size is updated only, and set to 1. If we shrink
 979 * the node 8 to 6, then its subtree_max_size is set to 6 and parent
 980 * node becomes 4--6.
 981 */
 982static __always_inline void
 983augment_tree_propagate_from(struct vmap_area *va)
 984{
 985	/*
 986	 * Populate the tree from bottom towards the root until
 987	 * the calculated maximum available size of checked node
 988	 * is equal to its current one.
 989	 */
 990	free_vmap_area_rb_augment_cb_propagate(&va->rb_node, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 991
 992#if DEBUG_AUGMENT_PROPAGATE_CHECK
 993	augment_tree_propagate_check();
 994#endif
 995}
 996
 997static void
 998insert_vmap_area(struct vmap_area *va,
 999	struct rb_root *root, struct list_head *head)
1000{
1001	struct rb_node **link;
1002	struct rb_node *parent;
1003
1004	link = find_va_links(va, root, NULL, &parent);
1005	if (link)
1006		link_va(va, root, parent, link, head);
1007}
1008
1009static void
1010insert_vmap_area_augment(struct vmap_area *va,
1011	struct rb_node *from, struct rb_root *root,
1012	struct list_head *head)
1013{
1014	struct rb_node **link;
1015	struct rb_node *parent;
1016
1017	if (from)
1018		link = find_va_links(va, NULL, from, &parent);
1019	else
1020		link = find_va_links(va, root, NULL, &parent);
1021
1022	if (link) {
1023		link_va(va, root, parent, link, head);
1024		augment_tree_propagate_from(va);
1025	}
1026}
1027
1028/*
1029 * Merge de-allocated chunk of VA memory with previous
1030 * and next free blocks. If coalesce is not done a new
1031 * free area is inserted. If VA has been merged, it is
1032 * freed.
1033 *
1034 * Please note, it can return NULL in case of overlap
1035 * ranges, followed by WARN() report. Despite it is a
1036 * buggy behaviour, a system can be alive and keep
1037 * ongoing.
1038 */
1039static __always_inline struct vmap_area *
1040merge_or_add_vmap_area(struct vmap_area *va,
1041	struct rb_root *root, struct list_head *head)
1042{
1043	struct vmap_area *sibling;
1044	struct list_head *next;
1045	struct rb_node **link;
1046	struct rb_node *parent;
1047	bool merged = false;
1048
1049	/*
1050	 * Find a place in the tree where VA potentially will be
1051	 * inserted, unless it is merged with its sibling/siblings.
1052	 */
1053	link = find_va_links(va, root, NULL, &parent);
1054	if (!link)
1055		return NULL;
1056
1057	/*
1058	 * Get next node of VA to check if merging can be done.
1059	 */
1060	next = get_va_next_sibling(parent, link);
1061	if (unlikely(next == NULL))
1062		goto insert;
1063
1064	/*
1065	 * start            end
1066	 * |                |
1067	 * |<------VA------>|<-----Next----->|
1068	 *                  |                |
1069	 *                  start            end
1070	 */
1071	if (next != head) {
1072		sibling = list_entry(next, struct vmap_area, list);
1073		if (sibling->va_start == va->va_end) {
1074			sibling->va_start = va->va_start;
1075
 
 
 
1076			/* Free vmap_area object. */
1077			kmem_cache_free(vmap_area_cachep, va);
1078
1079			/* Point to the new merged area. */
1080			va = sibling;
1081			merged = true;
1082		}
1083	}
1084
1085	/*
1086	 * start            end
1087	 * |                |
1088	 * |<-----Prev----->|<------VA------>|
1089	 *                  |                |
1090	 *                  start            end
1091	 */
1092	if (next->prev != head) {
1093		sibling = list_entry(next->prev, struct vmap_area, list);
1094		if (sibling->va_end == va->va_start) {
1095			/*
1096			 * If both neighbors are coalesced, it is important
1097			 * to unlink the "next" node first, followed by merging
1098			 * with "previous" one. Otherwise the tree might not be
1099			 * fully populated if a sibling's augmented value is
1100			 * "normalized" because of rotation operations.
1101			 */
1102			if (merged)
1103				unlink_va(va, root);
1104
1105			sibling->va_end = va->va_end;
1106
1107			/* Free vmap_area object. */
1108			kmem_cache_free(vmap_area_cachep, va);
1109
1110			/* Point to the new merged area. */
1111			va = sibling;
1112			merged = true;
1113		}
1114	}
1115
1116insert:
1117	if (!merged)
1118		link_va(va, root, parent, link, head);
1119
1120	return va;
1121}
1122
1123static __always_inline struct vmap_area *
1124merge_or_add_vmap_area_augment(struct vmap_area *va,
1125	struct rb_root *root, struct list_head *head)
1126{
1127	va = merge_or_add_vmap_area(va, root, head);
1128	if (va)
1129		augment_tree_propagate_from(va);
1130
1131	return va;
1132}
1133
1134static __always_inline bool
1135is_within_this_va(struct vmap_area *va, unsigned long size,
1136	unsigned long align, unsigned long vstart)
1137{
1138	unsigned long nva_start_addr;
1139
1140	if (va->va_start > vstart)
1141		nva_start_addr = ALIGN(va->va_start, align);
1142	else
1143		nva_start_addr = ALIGN(vstart, align);
1144
1145	/* Can be overflowed due to big size or alignment. */
1146	if (nva_start_addr + size < nva_start_addr ||
1147			nva_start_addr < vstart)
1148		return false;
1149
1150	return (nva_start_addr + size <= va->va_end);
1151}
1152
1153/*
1154 * Find the first free block(lowest start address) in the tree,
1155 * that will accomplish the request corresponding to passing
1156 * parameters.
1157 */
1158static __always_inline struct vmap_area *
1159find_vmap_lowest_match(unsigned long size,
1160	unsigned long align, unsigned long vstart)
1161{
1162	struct vmap_area *va;
1163	struct rb_node *node;
1164	unsigned long length;
1165
1166	/* Start from the root. */
1167	node = free_vmap_area_root.rb_node;
1168
1169	/* Adjust the search size for alignment overhead. */
1170	length = size + align - 1;
1171
1172	while (node) {
1173		va = rb_entry(node, struct vmap_area, rb_node);
1174
1175		if (get_subtree_max_size(node->rb_left) >= length &&
1176				vstart < va->va_start) {
1177			node = node->rb_left;
1178		} else {
1179			if (is_within_this_va(va, size, align, vstart))
1180				return va;
1181
1182			/*
1183			 * Does not make sense to go deeper towards the right
1184			 * sub-tree if it does not have a free block that is
1185			 * equal or bigger to the requested search length.
1186			 */
1187			if (get_subtree_max_size(node->rb_right) >= length) {
1188				node = node->rb_right;
1189				continue;
1190			}
1191
1192			/*
1193			 * OK. We roll back and find the first right sub-tree,
1194			 * that will satisfy the search criteria. It can happen
1195			 * only once due to "vstart" restriction.
1196			 */
1197			while ((node = rb_parent(node))) {
1198				va = rb_entry(node, struct vmap_area, rb_node);
1199				if (is_within_this_va(va, size, align, vstart))
1200					return va;
1201
1202				if (get_subtree_max_size(node->rb_right) >= length &&
1203						vstart <= va->va_start) {
1204					node = node->rb_right;
1205					break;
1206				}
1207			}
1208		}
1209	}
1210
1211	return NULL;
1212}
1213
1214#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1215#include <linux/random.h>
1216
1217static struct vmap_area *
1218find_vmap_lowest_linear_match(unsigned long size,
1219	unsigned long align, unsigned long vstart)
1220{
1221	struct vmap_area *va;
1222
1223	list_for_each_entry(va, &free_vmap_area_list, list) {
1224		if (!is_within_this_va(va, size, align, vstart))
1225			continue;
1226
1227		return va;
1228	}
1229
1230	return NULL;
1231}
1232
1233static void
1234find_vmap_lowest_match_check(unsigned long size)
1235{
1236	struct vmap_area *va_1, *va_2;
1237	unsigned long vstart;
1238	unsigned int rnd;
1239
1240	get_random_bytes(&rnd, sizeof(rnd));
1241	vstart = VMALLOC_START + rnd;
1242
1243	va_1 = find_vmap_lowest_match(size, 1, vstart);
1244	va_2 = find_vmap_lowest_linear_match(size, 1, vstart);
1245
1246	if (va_1 != va_2)
1247		pr_emerg("not lowest: t: 0x%p, l: 0x%p, v: 0x%lx\n",
1248			va_1, va_2, vstart);
1249}
1250#endif
1251
1252enum fit_type {
1253	NOTHING_FIT = 0,
1254	FL_FIT_TYPE = 1,	/* full fit */
1255	LE_FIT_TYPE = 2,	/* left edge fit */
1256	RE_FIT_TYPE = 3,	/* right edge fit */
1257	NE_FIT_TYPE = 4		/* no edge fit */
1258};
1259
1260static __always_inline enum fit_type
1261classify_va_fit_type(struct vmap_area *va,
1262	unsigned long nva_start_addr, unsigned long size)
1263{
1264	enum fit_type type;
1265
1266	/* Check if it is within VA. */
1267	if (nva_start_addr < va->va_start ||
1268			nva_start_addr + size > va->va_end)
1269		return NOTHING_FIT;
1270
1271	/* Now classify. */
1272	if (va->va_start == nva_start_addr) {
1273		if (va->va_end == nva_start_addr + size)
1274			type = FL_FIT_TYPE;
1275		else
1276			type = LE_FIT_TYPE;
1277	} else if (va->va_end == nva_start_addr + size) {
1278		type = RE_FIT_TYPE;
1279	} else {
1280		type = NE_FIT_TYPE;
1281	}
1282
1283	return type;
1284}
1285
1286static __always_inline int
1287adjust_va_to_fit_type(struct vmap_area *va,
1288	unsigned long nva_start_addr, unsigned long size,
1289	enum fit_type type)
1290{
1291	struct vmap_area *lva = NULL;
1292
1293	if (type == FL_FIT_TYPE) {
1294		/*
1295		 * No need to split VA, it fully fits.
1296		 *
1297		 * |               |
1298		 * V      NVA      V
1299		 * |---------------|
1300		 */
1301		unlink_va(va, &free_vmap_area_root);
1302		kmem_cache_free(vmap_area_cachep, va);
1303	} else if (type == LE_FIT_TYPE) {
1304		/*
1305		 * Split left edge of fit VA.
1306		 *
1307		 * |       |
1308		 * V  NVA  V   R
1309		 * |-------|-------|
1310		 */
1311		va->va_start += size;
1312	} else if (type == RE_FIT_TYPE) {
1313		/*
1314		 * Split right edge of fit VA.
1315		 *
1316		 *         |       |
1317		 *     L   V  NVA  V
1318		 * |-------|-------|
1319		 */
1320		va->va_end = nva_start_addr;
1321	} else if (type == NE_FIT_TYPE) {
1322		/*
1323		 * Split no edge of fit VA.
1324		 *
1325		 *     |       |
1326		 *   L V  NVA  V R
1327		 * |---|-------|---|
1328		 */
1329		lva = __this_cpu_xchg(ne_fit_preload_node, NULL);
1330		if (unlikely(!lva)) {
1331			/*
1332			 * For percpu allocator we do not do any pre-allocation
1333			 * and leave it as it is. The reason is it most likely
1334			 * never ends up with NE_FIT_TYPE splitting. In case of
1335			 * percpu allocations offsets and sizes are aligned to
1336			 * fixed align request, i.e. RE_FIT_TYPE and FL_FIT_TYPE
1337			 * are its main fitting cases.
1338			 *
1339			 * There are a few exceptions though, as an example it is
1340			 * a first allocation (early boot up) when we have "one"
1341			 * big free space that has to be split.
1342			 *
1343			 * Also we can hit this path in case of regular "vmap"
1344			 * allocations, if "this" current CPU was not preloaded.
1345			 * See the comment in alloc_vmap_area() why. If so, then
1346			 * GFP_NOWAIT is used instead to get an extra object for
1347			 * split purpose. That is rare and most time does not
1348			 * occur.
1349			 *
1350			 * What happens if an allocation gets failed. Basically,
1351			 * an "overflow" path is triggered to purge lazily freed
1352			 * areas to free some memory, then, the "retry" path is
1353			 * triggered to repeat one more time. See more details
1354			 * in alloc_vmap_area() function.
1355			 */
1356			lva = kmem_cache_alloc(vmap_area_cachep, GFP_NOWAIT);
1357			if (!lva)
1358				return -1;
1359		}
1360
1361		/*
1362		 * Build the remainder.
1363		 */
1364		lva->va_start = va->va_start;
1365		lva->va_end = nva_start_addr;
1366
1367		/*
1368		 * Shrink this VA to remaining size.
1369		 */
1370		va->va_start = nva_start_addr + size;
1371	} else {
1372		return -1;
1373	}
1374
1375	if (type != FL_FIT_TYPE) {
1376		augment_tree_propagate_from(va);
1377
1378		if (lva)	/* type == NE_FIT_TYPE */
1379			insert_vmap_area_augment(lva, &va->rb_node,
1380				&free_vmap_area_root, &free_vmap_area_list);
1381	}
1382
1383	return 0;
1384}
1385
1386/*
1387 * Returns a start address of the newly allocated area, if success.
1388 * Otherwise a vend is returned that indicates failure.
1389 */
1390static __always_inline unsigned long
1391__alloc_vmap_area(unsigned long size, unsigned long align,
1392	unsigned long vstart, unsigned long vend)
1393{
1394	unsigned long nva_start_addr;
1395	struct vmap_area *va;
1396	enum fit_type type;
1397	int ret;
1398
1399	va = find_vmap_lowest_match(size, align, vstart);
1400	if (unlikely(!va))
1401		return vend;
1402
1403	if (va->va_start > vstart)
1404		nva_start_addr = ALIGN(va->va_start, align);
1405	else
1406		nva_start_addr = ALIGN(vstart, align);
1407
1408	/* Check the "vend" restriction. */
1409	if (nva_start_addr + size > vend)
1410		return vend;
1411
1412	/* Classify what we have found. */
1413	type = classify_va_fit_type(va, nva_start_addr, size);
1414	if (WARN_ON_ONCE(type == NOTHING_FIT))
1415		return vend;
1416
1417	/* Update the free vmap_area. */
1418	ret = adjust_va_to_fit_type(va, nva_start_addr, size, type);
1419	if (ret)
1420		return vend;
1421
1422#if DEBUG_AUGMENT_LOWEST_MATCH_CHECK
1423	find_vmap_lowest_match_check(size);
1424#endif
1425
1426	return nva_start_addr;
1427}
1428
1429/*
1430 * Free a region of KVA allocated by alloc_vmap_area
1431 */
1432static void free_vmap_area(struct vmap_area *va)
1433{
1434	/*
1435	 * Remove from the busy tree/list.
1436	 */
1437	spin_lock(&vmap_area_lock);
1438	unlink_va(va, &vmap_area_root);
1439	spin_unlock(&vmap_area_lock);
1440
1441	/*
1442	 * Insert/Merge it back to the free tree/list.
1443	 */
1444	spin_lock(&free_vmap_area_lock);
1445	merge_or_add_vmap_area_augment(va, &free_vmap_area_root, &free_vmap_area_list);
1446	spin_unlock(&free_vmap_area_lock);
1447}
1448
1449static inline void
1450preload_this_cpu_lock(spinlock_t *lock, gfp_t gfp_mask, int node)
1451{
1452	struct vmap_area *va = NULL;
1453
1454	/*
1455	 * Preload this CPU with one extra vmap_area object. It is used
1456	 * when fit type of free area is NE_FIT_TYPE. It guarantees that
1457	 * a CPU that does an allocation is preloaded.
1458	 *
1459	 * We do it in non-atomic context, thus it allows us to use more
1460	 * permissive allocation masks to be more stable under low memory
1461	 * condition and high memory pressure.
1462	 */
1463	if (!this_cpu_read(ne_fit_preload_node))
1464		va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
1465
1466	spin_lock(lock);
1467
1468	if (va && __this_cpu_cmpxchg(ne_fit_preload_node, NULL, va))
1469		kmem_cache_free(vmap_area_cachep, va);
1470}
1471
1472/*
1473 * Allocate a region of KVA of the specified size and alignment, within the
1474 * vstart and vend.
1475 */
1476static struct vmap_area *alloc_vmap_area(unsigned long size,
1477				unsigned long align,
1478				unsigned long vstart, unsigned long vend,
1479				int node, gfp_t gfp_mask)
1480{
1481	struct vmap_area *va;
1482	unsigned long addr;
1483	int purged = 0;
1484	int ret;
1485
1486	BUG_ON(!size);
1487	BUG_ON(offset_in_page(size));
1488	BUG_ON(!is_power_of_2(align));
1489
1490	if (unlikely(!vmap_initialized))
1491		return ERR_PTR(-EBUSY);
1492
1493	might_sleep();
1494	gfp_mask = gfp_mask & GFP_RECLAIM_MASK;
1495
1496	va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
 
1497	if (unlikely(!va))
1498		return ERR_PTR(-ENOMEM);
1499
1500	/*
1501	 * Only scan the relevant parts containing pointers to other objects
1502	 * to avoid false negatives.
1503	 */
1504	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
1505
1506retry:
1507	preload_this_cpu_lock(&free_vmap_area_lock, gfp_mask, node);
1508	addr = __alloc_vmap_area(size, align, vstart, vend);
1509	spin_unlock(&free_vmap_area_lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1510
1511	/*
1512	 * If an allocation fails, the "vend" address is
1513	 * returned. Therefore trigger the overflow path.
1514	 */
 
1515	if (unlikely(addr == vend))
1516		goto overflow;
1517
1518	va->va_start = addr;
1519	va->va_end = addr + size;
1520	va->vm = NULL;
 
1521
1522	spin_lock(&vmap_area_lock);
1523	insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
1524	spin_unlock(&vmap_area_lock);
1525
1526	BUG_ON(!IS_ALIGNED(va->va_start, align));
1527	BUG_ON(va->va_start < vstart);
1528	BUG_ON(va->va_end > vend);
1529
1530	ret = kasan_populate_vmalloc(addr, size);
1531	if (ret) {
1532		free_vmap_area(va);
1533		return ERR_PTR(ret);
1534	}
1535
1536	return va;
1537
1538overflow:
 
1539	if (!purged) {
1540		purge_vmap_area_lazy();
1541		purged = 1;
1542		goto retry;
1543	}
1544
1545	if (gfpflags_allow_blocking(gfp_mask)) {
1546		unsigned long freed = 0;
1547		blocking_notifier_call_chain(&vmap_notify_list, 0, &freed);
1548		if (freed > 0) {
1549			purged = 0;
1550			goto retry;
1551		}
1552	}
1553
1554	if (!(gfp_mask & __GFP_NOWARN) && printk_ratelimit())
1555		pr_warn("vmap allocation for size %lu failed: use vmalloc=<size> to increase size\n",
1556			size);
1557
1558	kmem_cache_free(vmap_area_cachep, va);
1559	return ERR_PTR(-EBUSY);
1560}
1561
1562int register_vmap_purge_notifier(struct notifier_block *nb)
1563{
1564	return blocking_notifier_chain_register(&vmap_notify_list, nb);
1565}
1566EXPORT_SYMBOL_GPL(register_vmap_purge_notifier);
1567
1568int unregister_vmap_purge_notifier(struct notifier_block *nb)
1569{
1570	return blocking_notifier_chain_unregister(&vmap_notify_list, nb);
1571}
1572EXPORT_SYMBOL_GPL(unregister_vmap_purge_notifier);
1573
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1574/*
1575 * lazy_max_pages is the maximum amount of virtual address space we gather up
1576 * before attempting to purge with a TLB flush.
1577 *
1578 * There is a tradeoff here: a larger number will cover more kernel page tables
1579 * and take slightly longer to purge, but it will linearly reduce the number of
1580 * global TLB flushes that must be performed. It would seem natural to scale
1581 * this number up linearly with the number of CPUs (because vmapping activity
1582 * could also scale linearly with the number of CPUs), however it is likely
1583 * that in practice, workloads might be constrained in other ways that mean
1584 * vmap activity will not scale linearly with CPUs. Also, I want to be
1585 * conservative and not introduce a big latency on huge systems, so go with
1586 * a less aggressive log scale. It will still be an improvement over the old
1587 * code, and it will be simple to change the scale factor if we find that it
1588 * becomes a problem on bigger systems.
1589 */
1590static unsigned long lazy_max_pages(void)
1591{
1592	unsigned int log;
1593
1594	log = fls(num_online_cpus());
1595
1596	return log * (32UL * 1024 * 1024 / PAGE_SIZE);
1597}
1598
1599static atomic_long_t vmap_lazy_nr = ATOMIC_LONG_INIT(0);
1600
1601/*
1602 * Serialize vmap purging.  There is no actual critical section protected
1603 * by this look, but we want to avoid concurrent calls for performance
1604 * reasons and to make the pcpu_get_vm_areas more deterministic.
1605 */
1606static DEFINE_MUTEX(vmap_purge_lock);
1607
1608/* for per-CPU blocks */
1609static void purge_fragmented_blocks_allcpus(void);
1610
1611#ifdef CONFIG_X86_64
1612/*
1613 * called before a call to iounmap() if the caller wants vm_area_struct's
1614 * immediately freed.
1615 */
1616void set_iounmap_nonlazy(void)
1617{
1618	atomic_long_set(&vmap_lazy_nr, lazy_max_pages()+1);
1619}
1620#endif /* CONFIG_X86_64 */
1621
1622/*
1623 * Purges all lazily-freed vmap areas.
1624 */
1625static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end)
1626{
1627	unsigned long resched_threshold;
1628	struct list_head local_pure_list;
1629	struct vmap_area *va, *n_va;
 
1630
1631	lockdep_assert_held(&vmap_purge_lock);
1632
1633	spin_lock(&purge_vmap_area_lock);
1634	purge_vmap_area_root = RB_ROOT;
1635	list_replace_init(&purge_vmap_area_list, &local_pure_list);
1636	spin_unlock(&purge_vmap_area_lock);
1637
1638	if (unlikely(list_empty(&local_pure_list)))
1639		return false;
 
 
 
1640
1641	start = min(start,
1642		list_first_entry(&local_pure_list,
1643			struct vmap_area, list)->va_start);
1644
1645	end = max(end,
1646		list_last_entry(&local_pure_list,
1647			struct vmap_area, list)->va_end);
 
 
 
1648
1649	flush_tlb_kernel_range(start, end);
1650	resched_threshold = lazy_max_pages() << 1;
1651
1652	spin_lock(&free_vmap_area_lock);
1653	list_for_each_entry_safe(va, n_va, &local_pure_list, list) {
1654		unsigned long nr = (va->va_end - va->va_start) >> PAGE_SHIFT;
1655		unsigned long orig_start = va->va_start;
1656		unsigned long orig_end = va->va_end;
1657
1658		/*
1659		 * Finally insert or merge lazily-freed area. It is
1660		 * detached and there is no need to "unlink" it from
1661		 * anything.
1662		 */
1663		va = merge_or_add_vmap_area_augment(va, &free_vmap_area_root,
1664				&free_vmap_area_list);
1665
1666		if (!va)
1667			continue;
1668
1669		if (is_vmalloc_or_module_addr((void *)orig_start))
1670			kasan_release_vmalloc(orig_start, orig_end,
1671					      va->va_start, va->va_end);
1672
1673		atomic_long_sub(nr, &vmap_lazy_nr);
1674
1675		if (atomic_long_read(&vmap_lazy_nr) < resched_threshold)
1676			cond_resched_lock(&free_vmap_area_lock);
1677	}
1678	spin_unlock(&free_vmap_area_lock);
1679	return true;
1680}
1681
1682/*
1683 * Kick off a purge of the outstanding lazy areas. Don't bother if somebody
1684 * is already purging.
1685 */
1686static void try_purge_vmap_area_lazy(void)
1687{
1688	if (mutex_trylock(&vmap_purge_lock)) {
1689		__purge_vmap_area_lazy(ULONG_MAX, 0);
1690		mutex_unlock(&vmap_purge_lock);
1691	}
1692}
1693
1694/*
1695 * Kick off a purge of the outstanding lazy areas.
1696 */
1697static void purge_vmap_area_lazy(void)
1698{
1699	mutex_lock(&vmap_purge_lock);
1700	purge_fragmented_blocks_allcpus();
1701	__purge_vmap_area_lazy(ULONG_MAX, 0);
1702	mutex_unlock(&vmap_purge_lock);
1703}
1704
1705/*
1706 * Free a vmap area, caller ensuring that the area has been unmapped
1707 * and flush_cache_vunmap had been called for the correct range
1708 * previously.
1709 */
1710static void free_vmap_area_noflush(struct vmap_area *va)
1711{
1712	unsigned long nr_lazy;
1713
1714	spin_lock(&vmap_area_lock);
1715	unlink_va(va, &vmap_area_root);
1716	spin_unlock(&vmap_area_lock);
1717
1718	nr_lazy = atomic_long_add_return((va->va_end - va->va_start) >>
1719				PAGE_SHIFT, &vmap_lazy_nr);
1720
1721	/*
1722	 * Merge or place it to the purge tree/list.
1723	 */
1724	spin_lock(&purge_vmap_area_lock);
1725	merge_or_add_vmap_area(va,
1726		&purge_vmap_area_root, &purge_vmap_area_list);
1727	spin_unlock(&purge_vmap_area_lock);
1728
1729	/* After this point, we may free va at any time */
1730	if (unlikely(nr_lazy > lazy_max_pages()))
1731		try_purge_vmap_area_lazy();
1732}
1733
1734/*
1735 * Free and unmap a vmap area
1736 */
1737static void free_unmap_vmap_area(struct vmap_area *va)
1738{
1739	flush_cache_vunmap(va->va_start, va->va_end);
1740	vunmap_range_noflush(va->va_start, va->va_end);
1741	if (debug_pagealloc_enabled_static())
1742		flush_tlb_kernel_range(va->va_start, va->va_end);
1743
1744	free_vmap_area_noflush(va);
1745}
1746
1747static struct vmap_area *find_vmap_area(unsigned long addr)
1748{
1749	struct vmap_area *va;
1750
1751	spin_lock(&vmap_area_lock);
1752	va = __find_vmap_area(addr);
1753	spin_unlock(&vmap_area_lock);
1754
1755	return va;
1756}
1757
1758/*** Per cpu kva allocator ***/
1759
1760/*
1761 * vmap space is limited especially on 32 bit architectures. Ensure there is
1762 * room for at least 16 percpu vmap blocks per CPU.
1763 */
1764/*
1765 * If we had a constant VMALLOC_START and VMALLOC_END, we'd like to be able
1766 * to #define VMALLOC_SPACE		(VMALLOC_END-VMALLOC_START). Guess
1767 * instead (we just need a rough idea)
1768 */
1769#if BITS_PER_LONG == 32
1770#define VMALLOC_SPACE		(128UL*1024*1024)
1771#else
1772#define VMALLOC_SPACE		(128UL*1024*1024*1024)
1773#endif
1774
1775#define VMALLOC_PAGES		(VMALLOC_SPACE / PAGE_SIZE)
1776#define VMAP_MAX_ALLOC		BITS_PER_LONG	/* 256K with 4K pages */
1777#define VMAP_BBMAP_BITS_MAX	1024	/* 4MB with 4K pages */
1778#define VMAP_BBMAP_BITS_MIN	(VMAP_MAX_ALLOC*2)
1779#define VMAP_MIN(x, y)		((x) < (y) ? (x) : (y)) /* can't use min() */
1780#define VMAP_MAX(x, y)		((x) > (y) ? (x) : (y)) /* can't use max() */
1781#define VMAP_BBMAP_BITS		\
1782		VMAP_MIN(VMAP_BBMAP_BITS_MAX,	\
1783		VMAP_MAX(VMAP_BBMAP_BITS_MIN,	\
1784			VMALLOC_PAGES / roundup_pow_of_two(NR_CPUS) / 16))
1785
1786#define VMAP_BLOCK_SIZE		(VMAP_BBMAP_BITS * PAGE_SIZE)
1787
1788struct vmap_block_queue {
1789	spinlock_t lock;
1790	struct list_head free;
1791};
1792
1793struct vmap_block {
1794	spinlock_t lock;
1795	struct vmap_area *va;
1796	unsigned long free, dirty;
1797	unsigned long dirty_min, dirty_max; /*< dirty range */
1798	struct list_head free_list;
1799	struct rcu_head rcu_head;
1800	struct list_head purge;
1801};
1802
1803/* Queue of free and dirty vmap blocks, for allocation and flushing purposes */
1804static DEFINE_PER_CPU(struct vmap_block_queue, vmap_block_queue);
1805
1806/*
1807 * XArray of vmap blocks, indexed by address, to quickly find a vmap block
1808 * in the free path. Could get rid of this if we change the API to return a
1809 * "cookie" from alloc, to be passed to free. But no big deal yet.
1810 */
1811static DEFINE_XARRAY(vmap_blocks);
 
1812
1813/*
1814 * We should probably have a fallback mechanism to allocate virtual memory
1815 * out of partially filled vmap blocks. However vmap block sizing should be
1816 * fairly reasonable according to the vmalloc size, so it shouldn't be a
1817 * big problem.
1818 */
1819
1820static unsigned long addr_to_vb_idx(unsigned long addr)
1821{
1822	addr -= VMALLOC_START & ~(VMAP_BLOCK_SIZE-1);
1823	addr /= VMAP_BLOCK_SIZE;
1824	return addr;
1825}
1826
1827static void *vmap_block_vaddr(unsigned long va_start, unsigned long pages_off)
1828{
1829	unsigned long addr;
1830
1831	addr = va_start + (pages_off << PAGE_SHIFT);
1832	BUG_ON(addr_to_vb_idx(addr) != addr_to_vb_idx(va_start));
1833	return (void *)addr;
1834}
1835
1836/**
1837 * new_vmap_block - allocates new vmap_block and occupies 2^order pages in this
1838 *                  block. Of course pages number can't exceed VMAP_BBMAP_BITS
1839 * @order:    how many 2^order pages should be occupied in newly allocated block
1840 * @gfp_mask: flags for the page level allocator
1841 *
1842 * Return: virtual address in a newly allocated block or ERR_PTR(-errno)
1843 */
1844static void *new_vmap_block(unsigned int order, gfp_t gfp_mask)
1845{
1846	struct vmap_block_queue *vbq;
1847	struct vmap_block *vb;
1848	struct vmap_area *va;
1849	unsigned long vb_idx;
1850	int node, err;
1851	void *vaddr;
1852
1853	node = numa_node_id();
1854
1855	vb = kmalloc_node(sizeof(struct vmap_block),
1856			gfp_mask & GFP_RECLAIM_MASK, node);
1857	if (unlikely(!vb))
1858		return ERR_PTR(-ENOMEM);
1859
1860	va = alloc_vmap_area(VMAP_BLOCK_SIZE, VMAP_BLOCK_SIZE,
1861					VMALLOC_START, VMALLOC_END,
1862					node, gfp_mask);
1863	if (IS_ERR(va)) {
1864		kfree(vb);
1865		return ERR_CAST(va);
1866	}
1867
 
 
 
 
 
 
 
1868	vaddr = vmap_block_vaddr(va->va_start, 0);
1869	spin_lock_init(&vb->lock);
1870	vb->va = va;
1871	/* At least something should be left free */
1872	BUG_ON(VMAP_BBMAP_BITS <= (1UL << order));
1873	vb->free = VMAP_BBMAP_BITS - (1UL << order);
1874	vb->dirty = 0;
1875	vb->dirty_min = VMAP_BBMAP_BITS;
1876	vb->dirty_max = 0;
1877	INIT_LIST_HEAD(&vb->free_list);
1878
1879	vb_idx = addr_to_vb_idx(va->va_start);
1880	err = xa_insert(&vmap_blocks, vb_idx, vb, gfp_mask);
1881	if (err) {
1882		kfree(vb);
1883		free_vmap_area(va);
1884		return ERR_PTR(err);
1885	}
1886
1887	vbq = &get_cpu_var(vmap_block_queue);
1888	spin_lock(&vbq->lock);
1889	list_add_tail_rcu(&vb->free_list, &vbq->free);
1890	spin_unlock(&vbq->lock);
1891	put_cpu_var(vmap_block_queue);
1892
1893	return vaddr;
1894}
1895
1896static void free_vmap_block(struct vmap_block *vb)
1897{
1898	struct vmap_block *tmp;
 
1899
1900	tmp = xa_erase(&vmap_blocks, addr_to_vb_idx(vb->va->va_start));
 
 
 
1901	BUG_ON(tmp != vb);
1902
1903	free_vmap_area_noflush(vb->va);
1904	kfree_rcu(vb, rcu_head);
1905}
1906
1907static void purge_fragmented_blocks(int cpu)
1908{
1909	LIST_HEAD(purge);
1910	struct vmap_block *vb;
1911	struct vmap_block *n_vb;
1912	struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
1913
1914	rcu_read_lock();
1915	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1916
1917		if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS))
1918			continue;
1919
1920		spin_lock(&vb->lock);
1921		if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) {
1922			vb->free = 0; /* prevent further allocs after releasing lock */
1923			vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */
1924			vb->dirty_min = 0;
1925			vb->dirty_max = VMAP_BBMAP_BITS;
1926			spin_lock(&vbq->lock);
1927			list_del_rcu(&vb->free_list);
1928			spin_unlock(&vbq->lock);
1929			spin_unlock(&vb->lock);
1930			list_add_tail(&vb->purge, &purge);
1931		} else
1932			spin_unlock(&vb->lock);
1933	}
1934	rcu_read_unlock();
1935
1936	list_for_each_entry_safe(vb, n_vb, &purge, purge) {
1937		list_del(&vb->purge);
1938		free_vmap_block(vb);
1939	}
1940}
1941
1942static void purge_fragmented_blocks_allcpus(void)
1943{
1944	int cpu;
1945
1946	for_each_possible_cpu(cpu)
1947		purge_fragmented_blocks(cpu);
1948}
1949
1950static void *vb_alloc(unsigned long size, gfp_t gfp_mask)
1951{
1952	struct vmap_block_queue *vbq;
1953	struct vmap_block *vb;
1954	void *vaddr = NULL;
1955	unsigned int order;
1956
1957	BUG_ON(offset_in_page(size));
1958	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
1959	if (WARN_ON(size == 0)) {
1960		/*
1961		 * Allocating 0 bytes isn't what caller wants since
1962		 * get_order(0) returns funny result. Just warn and terminate
1963		 * early.
1964		 */
1965		return NULL;
1966	}
1967	order = get_order(size);
1968
1969	rcu_read_lock();
1970	vbq = &get_cpu_var(vmap_block_queue);
1971	list_for_each_entry_rcu(vb, &vbq->free, free_list) {
1972		unsigned long pages_off;
1973
1974		spin_lock(&vb->lock);
1975		if (vb->free < (1UL << order)) {
1976			spin_unlock(&vb->lock);
1977			continue;
1978		}
1979
1980		pages_off = VMAP_BBMAP_BITS - vb->free;
1981		vaddr = vmap_block_vaddr(vb->va->va_start, pages_off);
1982		vb->free -= 1UL << order;
1983		if (vb->free == 0) {
1984			spin_lock(&vbq->lock);
1985			list_del_rcu(&vb->free_list);
1986			spin_unlock(&vbq->lock);
1987		}
1988
1989		spin_unlock(&vb->lock);
1990		break;
1991	}
1992
1993	put_cpu_var(vmap_block_queue);
1994	rcu_read_unlock();
1995
1996	/* Allocate new block if nothing was found */
1997	if (!vaddr)
1998		vaddr = new_vmap_block(order, gfp_mask);
1999
2000	return vaddr;
2001}
2002
2003static void vb_free(unsigned long addr, unsigned long size)
2004{
2005	unsigned long offset;
 
2006	unsigned int order;
2007	struct vmap_block *vb;
2008
2009	BUG_ON(offset_in_page(size));
2010	BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
2011
2012	flush_cache_vunmap(addr, addr + size);
2013
2014	order = get_order(size);
2015	offset = (addr & (VMAP_BLOCK_SIZE - 1)) >> PAGE_SHIFT;
2016	vb = xa_load(&vmap_blocks, addr_to_vb_idx(addr));
2017
2018	vunmap_range_noflush(addr, addr + size);
 
 
 
 
 
 
 
 
 
2019
2020	if (debug_pagealloc_enabled_static())
2021		flush_tlb_kernel_range(addr, addr + size);
 
2022
2023	spin_lock(&vb->lock);
2024
2025	/* Expand dirty range */
2026	vb->dirty_min = min(vb->dirty_min, offset);
2027	vb->dirty_max = max(vb->dirty_max, offset + (1UL << order));
2028
2029	vb->dirty += 1UL << order;
2030	if (vb->dirty == VMAP_BBMAP_BITS) {
2031		BUG_ON(vb->free);
2032		spin_unlock(&vb->lock);
2033		free_vmap_block(vb);
2034	} else
2035		spin_unlock(&vb->lock);
2036}
2037
2038static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
2039{
2040	int cpu;
2041
2042	if (unlikely(!vmap_initialized))
2043		return;
2044
2045	might_sleep();
2046
2047	for_each_possible_cpu(cpu) {
2048		struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu);
2049		struct vmap_block *vb;
2050
2051		rcu_read_lock();
2052		list_for_each_entry_rcu(vb, &vbq->free, free_list) {
2053			spin_lock(&vb->lock);
2054			if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) {
2055				unsigned long va_start = vb->va->va_start;
2056				unsigned long s, e;
2057
2058				s = va_start + (vb->dirty_min << PAGE_SHIFT);
2059				e = va_start + (vb->dirty_max << PAGE_SHIFT);
2060
2061				start = min(s, start);
2062				end   = max(e, end);
2063
2064				flush = 1;
2065			}
2066			spin_unlock(&vb->lock);
2067		}
2068		rcu_read_unlock();
2069	}
2070
2071	mutex_lock(&vmap_purge_lock);
2072	purge_fragmented_blocks_allcpus();
2073	if (!__purge_vmap_area_lazy(start, end) && flush)
2074		flush_tlb_kernel_range(start, end);
2075	mutex_unlock(&vmap_purge_lock);
2076}
2077
2078/**
2079 * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
2080 *
2081 * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
2082 * to amortize TLB flushing overheads. What this means is that any page you
2083 * have now, may, in a former life, have been mapped into kernel virtual
2084 * address by the vmap layer and so there might be some CPUs with TLB entries
2085 * still referencing that page (additional to the regular 1:1 kernel mapping).
2086 *
2087 * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
2088 * be sure that none of the pages we have control over will have any aliases
2089 * from the vmap layer.
2090 */
2091void vm_unmap_aliases(void)
2092{
2093	unsigned long start = ULONG_MAX, end = 0;
2094	int flush = 0;
2095
2096	_vm_unmap_aliases(start, end, flush);
2097}
2098EXPORT_SYMBOL_GPL(vm_unmap_aliases);
2099
2100/**
2101 * vm_unmap_ram - unmap linear kernel address space set up by vm_map_ram
2102 * @mem: the pointer returned by vm_map_ram
2103 * @count: the count passed to that vm_map_ram call (cannot unmap partial)
2104 */
2105void vm_unmap_ram(const void *mem, unsigned int count)
2106{
2107	unsigned long size = (unsigned long)count << PAGE_SHIFT;
2108	unsigned long addr = (unsigned long)mem;
2109	struct vmap_area *va;
2110
2111	might_sleep();
2112	BUG_ON(!addr);
2113	BUG_ON(addr < VMALLOC_START);
2114	BUG_ON(addr > VMALLOC_END);
2115	BUG_ON(!PAGE_ALIGNED(addr));
2116
2117	kasan_poison_vmalloc(mem, size);
2118
2119	if (likely(count <= VMAP_MAX_ALLOC)) {
2120		debug_check_no_locks_freed(mem, size);
2121		vb_free(addr, size);
2122		return;
2123	}
2124
2125	va = find_vmap_area(addr);
2126	BUG_ON(!va);
2127	debug_check_no_locks_freed((void *)va->va_start,
2128				    (va->va_end - va->va_start));
2129	free_unmap_vmap_area(va);
2130}
2131EXPORT_SYMBOL(vm_unmap_ram);
2132
2133/**
2134 * vm_map_ram - map pages linearly into kernel virtual address (vmalloc space)
2135 * @pages: an array of pointers to the pages to be mapped
2136 * @count: number of pages
2137 * @node: prefer to allocate data structures on this node
 
2138 *
2139 * If you use this function for less than VMAP_MAX_ALLOC pages, it could be
2140 * faster than vmap so it's good.  But if you mix long-life and short-life
2141 * objects with vm_map_ram(), it could consume lots of address space through
2142 * fragmentation (especially on a 32bit machine).  You could see failures in
2143 * the end.  Please use this function for short-lived objects.
2144 *
2145 * Returns: a pointer to the address that has been mapped, or %NULL on failure
2146 */
2147void *vm_map_ram(struct page **pages, unsigned int count, int node)
2148{
2149	unsigned long size = (unsigned long)count << PAGE_SHIFT;
2150	unsigned long addr;
2151	void *mem;
2152
2153	if (likely(count <= VMAP_MAX_ALLOC)) {
2154		mem = vb_alloc(size, GFP_KERNEL);
2155		if (IS_ERR(mem))
2156			return NULL;
2157		addr = (unsigned long)mem;
2158	} else {
2159		struct vmap_area *va;
2160		va = alloc_vmap_area(size, PAGE_SIZE,
2161				VMALLOC_START, VMALLOC_END, node, GFP_KERNEL);
2162		if (IS_ERR(va))
2163			return NULL;
2164
2165		addr = va->va_start;
2166		mem = (void *)addr;
2167	}
2168
2169	kasan_unpoison_vmalloc(mem, size);
2170
2171	if (vmap_pages_range(addr, addr + size, PAGE_KERNEL,
2172				pages, PAGE_SHIFT) < 0) {
2173		vm_unmap_ram(mem, count);
2174		return NULL;
2175	}
2176
2177	return mem;
2178}
2179EXPORT_SYMBOL(vm_map_ram);
2180
2181static struct vm_struct *vmlist __initdata;
2182
2183static inline unsigned int vm_area_page_order(struct vm_struct *vm)
2184{
2185#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2186	return vm->page_order;
2187#else
2188	return 0;
2189#endif
2190}
2191
2192static inline void set_vm_area_page_order(struct vm_struct *vm, unsigned int order)
2193{
2194#ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
2195	vm->page_order = order;
2196#else
2197	BUG_ON(order != 0);
2198#endif
2199}
2200
2201/**
2202 * vm_area_add_early - add vmap area early during boot
2203 * @vm: vm_struct to add
2204 *
2205 * This function is used to add fixed kernel vm area to vmlist before
2206 * vmalloc_init() is called.  @vm->addr, @vm->size, and @vm->flags
2207 * should contain proper values and the other fields should be zero.
2208 *
2209 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2210 */
2211void __init vm_area_add_early(struct vm_struct *vm)
2212{
2213	struct vm_struct *tmp, **p;
2214
2215	BUG_ON(vmap_initialized);
2216	for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
2217		if (tmp->addr >= vm->addr) {
2218			BUG_ON(tmp->addr < vm->addr + vm->size);
2219			break;
2220		} else
2221			BUG_ON(tmp->addr + tmp->size > vm->addr);
2222	}
2223	vm->next = *p;
2224	*p = vm;
2225}
2226
2227/**
2228 * vm_area_register_early - register vmap area early during boot
2229 * @vm: vm_struct to register
2230 * @align: requested alignment
2231 *
2232 * This function is used to register kernel vm area before
2233 * vmalloc_init() is called.  @vm->size and @vm->flags should contain
2234 * proper values on entry and other fields should be zero.  On return,
2235 * vm->addr contains the allocated address.
2236 *
2237 * DO NOT USE THIS FUNCTION UNLESS YOU KNOW WHAT YOU'RE DOING.
2238 */
2239void __init vm_area_register_early(struct vm_struct *vm, size_t align)
2240{
2241	static size_t vm_init_off __initdata;
2242	unsigned long addr;
2243
2244	addr = ALIGN(VMALLOC_START + vm_init_off, align);
2245	vm_init_off = PFN_ALIGN(addr + vm->size) - VMALLOC_START;
2246
2247	vm->addr = (void *)addr;
2248
2249	vm_area_add_early(vm);
2250}
2251
2252static void vmap_init_free_space(void)
2253{
2254	unsigned long vmap_start = 1;
2255	const unsigned long vmap_end = ULONG_MAX;
2256	struct vmap_area *busy, *free;
2257
2258	/*
2259	 *     B     F     B     B     B     F
2260	 * -|-----|.....|-----|-----|-----|.....|-
2261	 *  |           The KVA space           |
2262	 *  |<--------------------------------->|
2263	 */
2264	list_for_each_entry(busy, &vmap_area_list, list) {
2265		if (busy->va_start - vmap_start > 0) {
2266			free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2267			if (!WARN_ON_ONCE(!free)) {
2268				free->va_start = vmap_start;
2269				free->va_end = busy->va_start;
2270
2271				insert_vmap_area_augment(free, NULL,
2272					&free_vmap_area_root,
2273						&free_vmap_area_list);
2274			}
2275		}
2276
2277		vmap_start = busy->va_end;
2278	}
2279
2280	if (vmap_end - vmap_start > 0) {
2281		free = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2282		if (!WARN_ON_ONCE(!free)) {
2283			free->va_start = vmap_start;
2284			free->va_end = vmap_end;
2285
2286			insert_vmap_area_augment(free, NULL,
2287				&free_vmap_area_root,
2288					&free_vmap_area_list);
2289		}
2290	}
2291}
2292
2293void __init vmalloc_init(void)
2294{
2295	struct vmap_area *va;
2296	struct vm_struct *tmp;
2297	int i;
2298
2299	/*
2300	 * Create the cache for vmap_area objects.
2301	 */
2302	vmap_area_cachep = KMEM_CACHE(vmap_area, SLAB_PANIC);
2303
2304	for_each_possible_cpu(i) {
2305		struct vmap_block_queue *vbq;
2306		struct vfree_deferred *p;
2307
2308		vbq = &per_cpu(vmap_block_queue, i);
2309		spin_lock_init(&vbq->lock);
2310		INIT_LIST_HEAD(&vbq->free);
2311		p = &per_cpu(vfree_deferred, i);
2312		init_llist_head(&p->list);
2313		INIT_WORK(&p->wq, free_work);
2314	}
2315
2316	/* Import existing vmlist entries. */
2317	for (tmp = vmlist; tmp; tmp = tmp->next) {
2318		va = kmem_cache_zalloc(vmap_area_cachep, GFP_NOWAIT);
2319		if (WARN_ON_ONCE(!va))
2320			continue;
2321
2322		va->va_start = (unsigned long)tmp->addr;
2323		va->va_end = va->va_start + tmp->size;
2324		va->vm = tmp;
2325		insert_vmap_area(va, &vmap_area_root, &vmap_area_list);
2326	}
2327
2328	/*
2329	 * Now we can initialize a free vmap space.
2330	 */
2331	vmap_init_free_space();
2332	vmap_initialized = true;
2333}
2334
2335static inline void setup_vmalloc_vm_locked(struct vm_struct *vm,
2336	struct vmap_area *va, unsigned long flags, const void *caller)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2337{
2338	vm->flags = flags;
2339	vm->addr = (void *)va->va_start;
2340	vm->size = va->va_end - va->va_start;
2341	vm->caller = caller;
2342	va->vm = vm;
 
 
2343}
 
2344
2345static void setup_vmalloc_vm(struct vm_struct *vm, struct vmap_area *va,
2346			      unsigned long flags, const void *caller)
2347{
2348	spin_lock(&vmap_area_lock);
2349	setup_vmalloc_vm_locked(vm, va, flags, caller);
 
 
 
 
2350	spin_unlock(&vmap_area_lock);
2351}
2352
2353static void clear_vm_uninitialized_flag(struct vm_struct *vm)
2354{
2355	/*
2356	 * Before removing VM_UNINITIALIZED,
2357	 * we should make sure that vm has proper values.
2358	 * Pair with smp_rmb() in show_numa_info().
2359	 */
2360	smp_wmb();
2361	vm->flags &= ~VM_UNINITIALIZED;
2362}
2363
2364static struct vm_struct *__get_vm_area_node(unsigned long size,
2365		unsigned long align, unsigned long shift, unsigned long flags,
2366		unsigned long start, unsigned long end, int node,
2367		gfp_t gfp_mask, const void *caller)
2368{
2369	struct vmap_area *va;
2370	struct vm_struct *area;
2371	unsigned long requested_size = size;
2372
2373	BUG_ON(in_interrupt());
2374	size = ALIGN(size, 1ul << shift);
2375	if (unlikely(!size))
2376		return NULL;
2377
2378	if (flags & VM_IOREMAP)
2379		align = 1ul << clamp_t(int, get_count_order_long(size),
2380				       PAGE_SHIFT, IOREMAP_MAX_ORDER);
2381
2382	area = kzalloc_node(sizeof(*area), gfp_mask & GFP_RECLAIM_MASK, node);
2383	if (unlikely(!area))
2384		return NULL;
2385
2386	if (!(flags & VM_NO_GUARD))
2387		size += PAGE_SIZE;
2388
2389	va = alloc_vmap_area(size, align, start, end, node, gfp_mask);
2390	if (IS_ERR(va)) {
2391		kfree(area);
2392		return NULL;
2393	}
2394
2395	kasan_unpoison_vmalloc((void *)va->va_start, requested_size);
2396
2397	setup_vmalloc_vm(area, va, flags, caller);
2398
2399	return area;
2400}
2401
 
 
 
 
 
 
 
 
2402struct vm_struct *__get_vm_area_caller(unsigned long size, unsigned long flags,
2403				       unsigned long start, unsigned long end,
2404				       const void *caller)
2405{
2406	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags, start, end,
2407				  NUMA_NO_NODE, GFP_KERNEL, caller);
2408}
2409
2410/**
2411 * get_vm_area - reserve a contiguous kernel virtual area
2412 * @size:	 size of the area
2413 * @flags:	 %VM_IOREMAP for I/O mappings or VM_ALLOC
2414 *
2415 * Search an area of @size in the kernel virtual mapping area,
2416 * and reserved it for out purposes.  Returns the area descriptor
2417 * on success or %NULL on failure.
2418 *
2419 * Return: the area descriptor on success or %NULL on failure.
2420 */
2421struct vm_struct *get_vm_area(unsigned long size, unsigned long flags)
2422{
2423	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2424				  VMALLOC_START, VMALLOC_END,
2425				  NUMA_NO_NODE, GFP_KERNEL,
2426				  __builtin_return_address(0));
2427}
2428
2429struct vm_struct *get_vm_area_caller(unsigned long size, unsigned long flags,
2430				const void *caller)
2431{
2432	return __get_vm_area_node(size, 1, PAGE_SHIFT, flags,
2433				  VMALLOC_START, VMALLOC_END,
2434				  NUMA_NO_NODE, GFP_KERNEL, caller);
2435}
2436
2437/**
2438 * find_vm_area - find a continuous kernel virtual area
2439 * @addr:	  base address
2440 *
2441 * Search for the kernel VM area starting at @addr, and return it.
2442 * It is up to the caller to do all required locking to keep the returned
2443 * pointer valid.
2444 *
2445 * Return: the area descriptor on success or %NULL on failure.
2446 */
2447struct vm_struct *find_vm_area(const void *addr)
2448{
2449	struct vmap_area *va;
2450
2451	va = find_vmap_area((unsigned long)addr);
2452	if (!va)
2453		return NULL;
2454
2455	return va->vm;
2456}
2457
2458/**
2459 * remove_vm_area - find and remove a continuous kernel virtual area
2460 * @addr:	    base address
2461 *
2462 * Search for the kernel VM area starting at @addr, and remove it.
2463 * This function returns the found VM area, but using it is NOT safe
2464 * on SMP machines, except for its size or flags.
2465 *
2466 * Return: the area descriptor on success or %NULL on failure.
2467 */
2468struct vm_struct *remove_vm_area(const void *addr)
2469{
2470	struct vmap_area *va;
2471
2472	might_sleep();
2473
2474	spin_lock(&vmap_area_lock);
2475	va = __find_vmap_area((unsigned long)addr);
2476	if (va && va->vm) {
2477		struct vm_struct *vm = va->vm;
2478
2479		va->vm = NULL;
2480		spin_unlock(&vmap_area_lock);
2481
2482		kasan_free_shadow(vm);
2483		free_unmap_vmap_area(va);
2484
2485		return vm;
2486	}
2487
2488	spin_unlock(&vmap_area_lock);
2489	return NULL;
2490}
2491
2492static inline void set_area_direct_map(const struct vm_struct *area,
2493				       int (*set_direct_map)(struct page *page))
2494{
2495	int i;
2496
2497	/* HUGE_VMALLOC passes small pages to set_direct_map */
2498	for (i = 0; i < area->nr_pages; i++)
2499		if (page_address(area->pages[i]))
2500			set_direct_map(area->pages[i]);
2501}
2502
2503/* Handle removing and resetting vm mappings related to the vm_struct. */
2504static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
2505{
2506	unsigned long start = ULONG_MAX, end = 0;
2507	unsigned int page_order = vm_area_page_order(area);
2508	int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
2509	int flush_dmap = 0;
2510	int i;
2511
2512	remove_vm_area(area->addr);
2513
2514	/* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
2515	if (!flush_reset)
2516		return;
2517
2518	/*
2519	 * If not deallocating pages, just do the flush of the VM area and
2520	 * return.
2521	 */
2522	if (!deallocate_pages) {
2523		vm_unmap_aliases();
2524		return;
2525	}
2526
2527	/*
2528	 * If execution gets here, flush the vm mapping and reset the direct
2529	 * map. Find the start and end range of the direct mappings to make sure
2530	 * the vm_unmap_aliases() flush includes the direct map.
2531	 */
2532	for (i = 0; i < area->nr_pages; i += 1U << page_order) {
2533		unsigned long addr = (unsigned long)page_address(area->pages[i]);
2534		if (addr) {
2535			unsigned long page_size;
2536
2537			page_size = PAGE_SIZE << page_order;
2538			start = min(addr, start);
2539			end = max(addr + page_size, end);
2540			flush_dmap = 1;
2541		}
2542	}
2543
2544	/*
2545	 * Set direct map to something invalid so that it won't be cached if
2546	 * there are any accesses after the TLB flush, then flush the TLB and
2547	 * reset the direct map permissions to the default.
2548	 */
2549	set_area_direct_map(area, set_direct_map_invalid_noflush);
2550	_vm_unmap_aliases(start, end, flush_dmap);
2551	set_area_direct_map(area, set_direct_map_default_noflush);
2552}
2553
2554static void __vunmap(const void *addr, int deallocate_pages)
2555{
2556	struct vm_struct *area;
2557
2558	if (!addr)
2559		return;
2560
2561	if (WARN(!PAGE_ALIGNED(addr), "Trying to vfree() bad address (%p)\n",
2562			addr))
2563		return;
2564
2565	area = find_vm_area(addr);
2566	if (unlikely(!area)) {
2567		WARN(1, KERN_ERR "Trying to vfree() nonexistent vm area (%p)\n",
2568				addr);
2569		return;
2570	}
2571
2572	debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
2573	debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
2574
2575	kasan_poison_vmalloc(area->addr, get_vm_area_size(area));
2576
2577	vm_remove_mappings(area, deallocate_pages);
2578
2579	if (deallocate_pages) {
2580		unsigned int page_order = vm_area_page_order(area);
2581		int i;
2582
2583		for (i = 0; i < area->nr_pages; i += 1U << page_order) {
2584			struct page *page = area->pages[i];
2585
2586			BUG_ON(!page);
2587			__free_pages(page, page_order);
2588			cond_resched();
2589		}
2590		atomic_long_sub(area->nr_pages, &nr_vmalloc_pages);
2591
2592		kvfree(area->pages);
2593	}
2594
2595	kfree(area);
 
2596}
2597
2598static inline void __vfree_deferred(const void *addr)
2599{
2600	/*
2601	 * Use raw_cpu_ptr() because this can be called from preemptible
2602	 * context. Preemption is absolutely fine here, because the llist_add()
2603	 * implementation is lockless, so it works even if we are adding to
2604	 * another cpu's list. schedule_work() should be fine with this too.
2605	 */
2606	struct vfree_deferred *p = raw_cpu_ptr(&vfree_deferred);
2607
2608	if (llist_add((struct llist_node *)addr, &p->list))
2609		schedule_work(&p->wq);
2610}
2611
2612/**
2613 * vfree_atomic - release memory allocated by vmalloc()
2614 * @addr:	  memory base address
2615 *
2616 * This one is just like vfree() but can be called in any atomic context
2617 * except NMIs.
2618 */
2619void vfree_atomic(const void *addr)
2620{
2621	BUG_ON(in_nmi());
2622
2623	kmemleak_free(addr);
2624
2625	if (!addr)
2626		return;
2627	__vfree_deferred(addr);
2628}
2629
2630static void __vfree(const void *addr)
2631{
2632	if (unlikely(in_interrupt()))
2633		__vfree_deferred(addr);
2634	else
2635		__vunmap(addr, 1);
2636}
2637
2638/**
2639 * vfree - Release memory allocated by vmalloc()
2640 * @addr:  Memory base address
2641 *
2642 * Free the virtually continuous memory area starting at @addr, as obtained
2643 * from one of the vmalloc() family of APIs.  This will usually also free the
2644 * physical memory underlying the virtual allocation, but that memory is
2645 * reference counted, so it will not be freed until the last user goes away.
 
 
 
2646 *
2647 * If @addr is NULL, no operation is performed.
2648 *
2649 * Context:
2650 * May sleep if called *not* from interrupt context.
2651 * Must not be called in NMI context (strictly speaking, it could be
2652 * if we have CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG, but making the calling
2653 * conventions for vfree() arch-dependent would be a really bad idea).
2654 */
2655void vfree(const void *addr)
2656{
2657	BUG_ON(in_nmi());
2658
2659	kmemleak_free(addr);
2660
2661	might_sleep_if(!in_interrupt());
2662
2663	if (!addr)
2664		return;
2665
2666	__vfree(addr);
2667}
2668EXPORT_SYMBOL(vfree);
2669
2670/**
2671 * vunmap - release virtual mapping obtained by vmap()
2672 * @addr:   memory base address
2673 *
2674 * Free the virtually contiguous memory area starting at @addr,
2675 * which was created from the page array passed to vmap().
2676 *
2677 * Must not be called in interrupt context.
2678 */
2679void vunmap(const void *addr)
2680{
2681	BUG_ON(in_interrupt());
2682	might_sleep();
2683	if (addr)
2684		__vunmap(addr, 0);
2685}
2686EXPORT_SYMBOL(vunmap);
2687
2688/**
2689 * vmap - map an array of pages into virtually contiguous space
2690 * @pages: array of page pointers
2691 * @count: number of pages to map
2692 * @flags: vm_area->flags
2693 * @prot: page protection for the mapping
2694 *
2695 * Maps @count pages from @pages into contiguous kernel virtual space.
2696 * If @flags contains %VM_MAP_PUT_PAGES the ownership of the pages array itself
2697 * (which must be kmalloc or vmalloc memory) and one reference per pages in it
2698 * are transferred from the caller to vmap(), and will be freed / dropped when
2699 * vfree() is called on the return value.
2700 *
2701 * Return: the address of the area or %NULL on failure
2702 */
2703void *vmap(struct page **pages, unsigned int count,
2704	   unsigned long flags, pgprot_t prot)
2705{
2706	struct vm_struct *area;
2707	unsigned long addr;
2708	unsigned long size;		/* In bytes */
2709
2710	might_sleep();
2711
2712	if (count > totalram_pages())
2713		return NULL;
2714
2715	size = (unsigned long)count << PAGE_SHIFT;
2716	area = get_vm_area_caller(size, flags, __builtin_return_address(0));
2717	if (!area)
2718		return NULL;
2719
2720	addr = (unsigned long)area->addr;
2721	if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
2722				pages, PAGE_SHIFT) < 0) {
2723		vunmap(area->addr);
2724		return NULL;
2725	}
2726
2727	if (flags & VM_MAP_PUT_PAGES) {
2728		area->pages = pages;
2729		area->nr_pages = count;
2730	}
2731	return area->addr;
2732}
2733EXPORT_SYMBOL(vmap);
2734
2735#ifdef CONFIG_VMAP_PFN
2736struct vmap_pfn_data {
2737	unsigned long	*pfns;
2738	pgprot_t	prot;
2739	unsigned int	idx;
2740};
2741
2742static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private)
2743{
2744	struct vmap_pfn_data *data = private;
2745
2746	if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx])))
2747		return -EINVAL;
2748	*pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot));
2749	return 0;
2750}
2751
2752/**
2753 * vmap_pfn - map an array of PFNs into virtually contiguous space
2754 * @pfns: array of PFNs
2755 * @count: number of pages to map
2756 * @prot: page protection for the mapping
2757 *
2758 * Maps @count PFNs from @pfns into contiguous kernel virtual space and returns
2759 * the start address of the mapping.
2760 */
2761void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot)
2762{
2763	struct vmap_pfn_data data = { .pfns = pfns, .prot = pgprot_nx(prot) };
2764	struct vm_struct *area;
2765
2766	area = get_vm_area_caller(count * PAGE_SIZE, VM_IOREMAP,
2767			__builtin_return_address(0));
2768	if (!area)
2769		return NULL;
2770	if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
2771			count * PAGE_SIZE, vmap_pfn_apply, &data)) {
2772		free_vm_area(area);
2773		return NULL;
2774	}
2775	return area->addr;
2776}
2777EXPORT_SYMBOL_GPL(vmap_pfn);
2778#endif /* CONFIG_VMAP_PFN */
2779
2780static inline unsigned int
2781vm_area_alloc_pages(gfp_t gfp, int nid,
2782		unsigned int order, unsigned long nr_pages, struct page **pages)
2783{
2784	unsigned int nr_allocated = 0;
2785
2786	/*
2787	 * For order-0 pages we make use of bulk allocator, if
2788	 * the page array is partly or not at all populated due
2789	 * to fails, fallback to a single page allocator that is
2790	 * more permissive.
2791	 */
2792	if (!order)
2793		nr_allocated = alloc_pages_bulk_array_node(
2794			gfp, nid, nr_pages, pages);
2795	else
2796		/*
2797		 * Compound pages required for remap_vmalloc_page if
2798		 * high-order pages.
2799		 */
2800		gfp |= __GFP_COMP;
2801
2802	/* High-order pages or fallback path if "bulk" fails. */
2803	while (nr_allocated < nr_pages) {
2804		struct page *page;
2805		int i;
2806
2807		page = alloc_pages_node(nid, gfp, order);
2808		if (unlikely(!page))
2809			break;
2810
2811		/*
2812		 * Careful, we allocate and map page-order pages, but
2813		 * tracking is done per PAGE_SIZE page so as to keep the
2814		 * vm_struct APIs independent of the physical/mapped size.
2815		 */
2816		for (i = 0; i < (1U << order); i++)
2817			pages[nr_allocated + i] = page + i;
2818
2819		if (gfpflags_allow_blocking(gfp))
2820			cond_resched();
2821
2822		nr_allocated += 1U << order;
2823	}
2824
2825	return nr_allocated;
2826}
2827
2828static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
2829				 pgprot_t prot, unsigned int page_shift,
2830				 int node)
2831{
 
 
2832	const gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
2833	unsigned long addr = (unsigned long)area->addr;
2834	unsigned long size = get_vm_area_size(area);
2835	unsigned long array_size;
2836	unsigned int nr_small_pages = size >> PAGE_SHIFT;
2837	unsigned int page_order;
2838
2839	array_size = (unsigned long)nr_small_pages * sizeof(struct page *);
2840	gfp_mask |= __GFP_NOWARN;
2841	if (!(gfp_mask & (GFP_DMA | GFP_DMA32)))
2842		gfp_mask |= __GFP_HIGHMEM;
2843
2844	/* Please note that the recursion is strictly bounded. */
2845	if (array_size > PAGE_SIZE) {
2846		area->pages = __vmalloc_node(array_size, 1, nested_gfp, node,
2847					area->caller);
2848	} else {
2849		area->pages = kmalloc_node(array_size, nested_gfp, node);
2850	}
2851
2852	if (!area->pages) {
2853		warn_alloc(gfp_mask, NULL,
2854			"vmalloc error: size %lu, failed to allocated page array size %lu",
2855			nr_small_pages * PAGE_SIZE, array_size);
2856		free_vm_area(area);
2857		return NULL;
2858	}
2859
2860	set_vm_area_page_order(area, page_shift - PAGE_SHIFT);
2861	page_order = vm_area_page_order(area);
2862
2863	area->nr_pages = vm_area_alloc_pages(gfp_mask, node,
2864		page_order, nr_small_pages, area->pages);
2865
2866	atomic_long_add(area->nr_pages, &nr_vmalloc_pages);
 
 
 
2867
2868	/*
2869	 * If not enough pages were obtained to accomplish an
2870	 * allocation request, free them via __vfree() if any.
2871	 */
2872	if (area->nr_pages != nr_small_pages) {
2873		warn_alloc(gfp_mask, NULL,
2874			"vmalloc error: size %lu, page order %u, failed to allocate pages",
2875			area->nr_pages * PAGE_SIZE, page_order);
2876		goto fail;
2877	}
 
2878
2879	if (vmap_pages_range(addr, addr + size, prot, area->pages,
2880			page_shift) < 0) {
2881		warn_alloc(gfp_mask, NULL,
2882			"vmalloc error: size %lu, failed to map pages",
2883			area->nr_pages * PAGE_SIZE);
2884		goto fail;
2885	}
2886
2887	return area->addr;
2888
2889fail:
 
 
 
2890	__vfree(area->addr);
2891	return NULL;
2892}
2893
2894/**
2895 * __vmalloc_node_range - allocate virtually contiguous memory
2896 * @size:		  allocation size
2897 * @align:		  desired alignment
2898 * @start:		  vm area range start
2899 * @end:		  vm area range end
2900 * @gfp_mask:		  flags for the page level allocator
2901 * @prot:		  protection mask for the allocated pages
2902 * @vm_flags:		  additional vm area flags (e.g. %VM_NO_GUARD)
2903 * @node:		  node to use for allocation or NUMA_NO_NODE
2904 * @caller:		  caller's return address
2905 *
2906 * Allocate enough pages to cover @size from the page level
2907 * allocator with @gfp_mask flags.  Map them into contiguous
2908 * kernel virtual space, using a pagetable protection of @prot.
2909 *
2910 * Return: the address of the area or %NULL on failure
2911 */
2912void *__vmalloc_node_range(unsigned long size, unsigned long align,
2913			unsigned long start, unsigned long end, gfp_t gfp_mask,
2914			pgprot_t prot, unsigned long vm_flags, int node,
2915			const void *caller)
2916{
2917	struct vm_struct *area;
2918	void *addr;
2919	unsigned long real_size = size;
2920	unsigned long real_align = align;
2921	unsigned int shift = PAGE_SHIFT;
2922
2923	if (WARN_ON_ONCE(!size))
2924		return NULL;
 
2925
2926	if ((size >> PAGE_SHIFT) > totalram_pages()) {
2927		warn_alloc(gfp_mask, NULL,
2928			"vmalloc error: size %lu, exceeds total pages",
2929			real_size);
2930		return NULL;
2931	}
2932
2933	if (vmap_allow_huge && !(vm_flags & VM_NO_HUGE_VMAP)) {
2934		unsigned long size_per_node;
2935
2936		/*
2937		 * Try huge pages. Only try for PAGE_KERNEL allocations,
2938		 * others like modules don't yet expect huge pages in
2939		 * their allocations due to apply_to_page_range not
2940		 * supporting them.
2941		 */
2942
2943		size_per_node = size;
2944		if (node == NUMA_NO_NODE)
2945			size_per_node /= num_online_nodes();
2946		if (arch_vmap_pmd_supported(prot) && size_per_node >= PMD_SIZE)
2947			shift = PMD_SHIFT;
2948		else
2949			shift = arch_vmap_pte_supported_shift(size_per_node);
2950
2951		align = max(real_align, 1UL << shift);
2952		size = ALIGN(real_size, 1UL << shift);
2953	}
2954
2955again:
2956	area = __get_vm_area_node(real_size, align, shift, VM_ALLOC |
2957				  VM_UNINITIALIZED | vm_flags, start, end, node,
2958				  gfp_mask, caller);
2959	if (!area) {
2960		warn_alloc(gfp_mask, NULL,
2961			"vmalloc error: size %lu, vm_struct allocation failed",
2962			real_size);
2963		goto fail;
2964	}
2965
2966	addr = __vmalloc_area_node(area, gfp_mask, prot, shift, node);
2967	if (!addr)
2968		goto fail;
2969
2970	/*
2971	 * In this function, newly allocated vm_struct has VM_UNINITIALIZED
2972	 * flag. It means that vm_struct is not fully initialized.
2973	 * Now, it is fully initialized, so remove this flag here.
2974	 */
2975	clear_vm_uninitialized_flag(area);
2976
2977	size = PAGE_ALIGN(size);
2978	kmemleak_vmalloc(area, size, gfp_mask);
2979
2980	return addr;
2981
2982fail:
2983	if (shift > PAGE_SHIFT) {
2984		shift = PAGE_SHIFT;
2985		align = real_align;
2986		size = real_size;
2987		goto again;
2988	}
2989
2990	return NULL;
2991}
2992
 
 
 
 
 
 
 
 
 
2993/**
2994 * __vmalloc_node - allocate virtually contiguous memory
2995 * @size:	    allocation size
2996 * @align:	    desired alignment
2997 * @gfp_mask:	    flags for the page level allocator
 
2998 * @node:	    node to use for allocation or NUMA_NO_NODE
2999 * @caller:	    caller's return address
3000 *
3001 * Allocate enough pages to cover @size from the page level allocator with
3002 * @gfp_mask flags.  Map them into contiguous kernel virtual space.
 
3003 *
3004 * Reclaim modifiers in @gfp_mask - __GFP_NORETRY, __GFP_RETRY_MAYFAIL
3005 * and __GFP_NOFAIL are not supported
3006 *
3007 * Any use of gfp flags outside of GFP_KERNEL should be consulted
3008 * with mm people.
3009 *
3010 * Return: pointer to the allocated memory or %NULL on error
3011 */
3012void *__vmalloc_node(unsigned long size, unsigned long align,
3013			    gfp_t gfp_mask, int node, const void *caller)
 
3014{
3015	return __vmalloc_node_range(size, align, VMALLOC_START, VMALLOC_END,
3016				gfp_mask, PAGE_KERNEL, 0, node, caller);
3017}
3018/*
3019 * This is only for performance analysis of vmalloc and stress purpose.
3020 * It is required by vmalloc test module, therefore do not use it other
3021 * than that.
3022 */
3023#ifdef CONFIG_TEST_VMALLOC_MODULE
3024EXPORT_SYMBOL_GPL(__vmalloc_node);
3025#endif
3026
3027void *__vmalloc(unsigned long size, gfp_t gfp_mask)
3028{
3029	return __vmalloc_node(size, 1, gfp_mask, NUMA_NO_NODE,
3030				__builtin_return_address(0));
3031}
3032EXPORT_SYMBOL(__vmalloc);
3033
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3034/**
3035 * vmalloc - allocate virtually contiguous memory
3036 * @size:    allocation size
3037 *
3038 * Allocate enough pages to cover @size from the page level
3039 * allocator and map them into contiguous kernel virtual space.
3040 *
3041 * For tight control over page level allocator and protection flags
3042 * use __vmalloc() instead.
3043 *
3044 * Return: pointer to the allocated memory or %NULL on error
3045 */
3046void *vmalloc(unsigned long size)
3047{
3048	return __vmalloc_node(size, 1, GFP_KERNEL, NUMA_NO_NODE,
3049				__builtin_return_address(0));
3050}
3051EXPORT_SYMBOL(vmalloc);
3052
3053/**
3054 * vmalloc_no_huge - allocate virtually contiguous memory using small pages
3055 * @size:    allocation size
3056 *
3057 * Allocate enough non-huge pages to cover @size from the page level
3058 * allocator and map them into contiguous kernel virtual space.
3059 *
3060 * Return: pointer to the allocated memory or %NULL on error
3061 */
3062void *vmalloc_no_huge(unsigned long size)
3063{
3064	return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
3065				    GFP_KERNEL, PAGE_KERNEL, VM_NO_HUGE_VMAP,
3066				    NUMA_NO_NODE, __builtin_return_address(0));
3067}
3068EXPORT_SYMBOL(vmalloc_no_huge);
3069
3070/**
3071 * vzalloc - allocate virtually contiguous memory with zero fill
3072 * @size:    allocation size
3073 *
3074 * Allocate enough pages to cover @size from the page level
3075 * allocator and map them into contiguous kernel virtual space.
3076 * The memory allocated is set to zero.
3077 *
3078 * For tight control over page level allocator and protection flags
3079 * use __vmalloc() instead.
3080 *
3081 * Return: pointer to the allocated memory or %NULL on error
3082 */
3083void *vzalloc(unsigned long size)
3084{
3085	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE,
3086				__builtin_return_address(0));
3087}
3088EXPORT_SYMBOL(vzalloc);
3089
3090/**
3091 * vmalloc_user - allocate zeroed virtually contiguous memory for userspace
3092 * @size: allocation size
3093 *
3094 * The resulting memory area is zeroed so it can be mapped to userspace
3095 * without leaking data.
3096 *
3097 * Return: pointer to the allocated memory or %NULL on error
3098 */
3099void *vmalloc_user(unsigned long size)
3100{
3101	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
3102				    GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL,
3103				    VM_USERMAP, NUMA_NO_NODE,
3104				    __builtin_return_address(0));
3105}
3106EXPORT_SYMBOL(vmalloc_user);
3107
3108/**
3109 * vmalloc_node - allocate memory on a specific node
3110 * @size:	  allocation size
3111 * @node:	  numa node
3112 *
3113 * Allocate enough pages to cover @size from the page level
3114 * allocator and map them into contiguous kernel virtual space.
3115 *
3116 * For tight control over page level allocator and protection flags
3117 * use __vmalloc() instead.
3118 *
3119 * Return: pointer to the allocated memory or %NULL on error
3120 */
3121void *vmalloc_node(unsigned long size, int node)
3122{
3123	return __vmalloc_node(size, 1, GFP_KERNEL, node,
3124			__builtin_return_address(0));
3125}
3126EXPORT_SYMBOL(vmalloc_node);
3127
3128/**
3129 * vzalloc_node - allocate memory on a specific node with zero fill
3130 * @size:	allocation size
3131 * @node:	numa node
3132 *
3133 * Allocate enough pages to cover @size from the page level
3134 * allocator and map them into contiguous kernel virtual space.
3135 * The memory allocated is set to zero.
3136 *
 
 
 
3137 * Return: pointer to the allocated memory or %NULL on error
3138 */
3139void *vzalloc_node(unsigned long size, int node)
3140{
3141	return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_ZERO, node,
3142				__builtin_return_address(0));
3143}
3144EXPORT_SYMBOL(vzalloc_node);
3145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3146#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)
3147#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3148#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
3149#define GFP_VMALLOC32 (GFP_DMA | GFP_KERNEL)
3150#else
3151/*
3152 * 64b systems should always have either DMA or DMA32 zones. For others
3153 * GFP_DMA32 should do the right thing and use the normal zone.
3154 */
3155#define GFP_VMALLOC32 (GFP_DMA32 | GFP_KERNEL)
3156#endif
3157
3158/**
3159 * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
3160 * @size:	allocation size
3161 *
3162 * Allocate enough 32bit PA addressable pages to cover @size from the
3163 * page level allocator and map them into contiguous kernel virtual space.
3164 *
3165 * Return: pointer to the allocated memory or %NULL on error
3166 */
3167void *vmalloc_32(unsigned long size)
3168{
3169	return __vmalloc_node(size, 1, GFP_VMALLOC32, NUMA_NO_NODE,
3170			__builtin_return_address(0));
3171}
3172EXPORT_SYMBOL(vmalloc_32);
3173
3174/**
3175 * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
3176 * @size:	     allocation size
3177 *
3178 * The resulting memory area is 32bit addressable and zeroed so it can be
3179 * mapped to userspace without leaking data.
3180 *
3181 * Return: pointer to the allocated memory or %NULL on error
3182 */
3183void *vmalloc_32_user(unsigned long size)
3184{
3185	return __vmalloc_node_range(size, SHMLBA,  VMALLOC_START, VMALLOC_END,
3186				    GFP_VMALLOC32 | __GFP_ZERO, PAGE_KERNEL,
3187				    VM_USERMAP, NUMA_NO_NODE,
3188				    __builtin_return_address(0));
3189}
3190EXPORT_SYMBOL(vmalloc_32_user);
3191
3192/*
3193 * small helper routine , copy contents to buf from addr.
3194 * If the page is not present, fill zero.
3195 */
3196
3197static int aligned_vread(char *buf, char *addr, unsigned long count)
3198{
3199	struct page *p;
3200	int copied = 0;
3201
3202	while (count) {
3203		unsigned long offset, length;
3204
3205		offset = offset_in_page(addr);
3206		length = PAGE_SIZE - offset;
3207		if (length > count)
3208			length = count;
3209		p = vmalloc_to_page(addr);
3210		/*
3211		 * To do safe access to this _mapped_ area, we need
3212		 * lock. But adding lock here means that we need to add
3213		 * overhead of vmalloc()/vfree() calls for this _debug_
3214		 * interface, rarely used. Instead of that, we'll use
3215		 * kmap() and get small overhead in this access function.
3216		 */
3217		if (p) {
3218			/* We can expect USER0 is not used -- see vread() */
 
 
 
3219			void *map = kmap_atomic(p);
3220			memcpy(buf, map + offset, length);
3221			kunmap_atomic(map);
3222		} else
3223			memset(buf, 0, length);
3224
3225		addr += length;
3226		buf += length;
3227		copied += length;
3228		count -= length;
3229	}
3230	return copied;
3231}
3232
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3233/**
3234 * vread() - read vmalloc area in a safe way.
3235 * @buf:     buffer for reading data
3236 * @addr:    vm address.
3237 * @count:   number of bytes to be read.
3238 *
3239 * This function checks that addr is a valid vmalloc'ed area, and
3240 * copy data from that area to a given buffer. If the given memory range
3241 * of [addr...addr+count) includes some valid address, data is copied to
3242 * proper area of @buf. If there are memory holes, they'll be zero-filled.
3243 * IOREMAP area is treated as memory hole and no copy is done.
3244 *
3245 * If [addr...addr+count) doesn't includes any intersects with alive
3246 * vm_struct area, returns 0. @buf should be kernel's buffer.
3247 *
3248 * Note: In usual ops, vread() is never necessary because the caller
3249 * should know vmalloc() area is valid and can use memcpy().
3250 * This is for routines which have to access vmalloc area without
3251 * any information, as /proc/kcore.
3252 *
3253 * Return: number of bytes for which addr and buf should be increased
3254 * (same number as @count) or %0 if [addr...addr+count) doesn't
3255 * include any intersection with valid vmalloc area
3256 */
3257long vread(char *buf, char *addr, unsigned long count)
3258{
3259	struct vmap_area *va;
3260	struct vm_struct *vm;
3261	char *vaddr, *buf_start = buf;
3262	unsigned long buflen = count;
3263	unsigned long n;
3264
3265	/* Don't allow overflow */
3266	if ((unsigned long) addr + count < count)
3267		count = -(unsigned long) addr;
3268
3269	spin_lock(&vmap_area_lock);
3270	va = __find_vmap_area((unsigned long)addr);
3271	if (!va)
3272		goto finished;
3273	list_for_each_entry_from(va, &vmap_area_list, list) {
3274		if (!count)
3275			break;
3276
3277		if (!va->vm)
3278			continue;
3279
3280		vm = va->vm;
3281		vaddr = (char *) vm->addr;
3282		if (addr >= vaddr + get_vm_area_size(vm))
3283			continue;
3284		while (addr < vaddr) {
3285			if (count == 0)
3286				goto finished;
3287			*buf = '\0';
3288			buf++;
3289			addr++;
3290			count--;
3291		}
3292		n = vaddr + get_vm_area_size(vm) - addr;
3293		if (n > count)
3294			n = count;
3295		if (!(vm->flags & VM_IOREMAP))
3296			aligned_vread(buf, addr, n);
3297		else /* IOREMAP area is treated as memory hole */
3298			memset(buf, 0, n);
3299		buf += n;
3300		addr += n;
3301		count -= n;
3302	}
3303finished:
3304	spin_unlock(&vmap_area_lock);
3305
3306	if (buf == buf_start)
3307		return 0;
3308	/* zero-fill memory holes */
3309	if (buf != buf_start + buflen)
3310		memset(buf, 0, buflen - (buf - buf_start));
3311
3312	return buflen;
3313}
3314
3315/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3316 * remap_vmalloc_range_partial - map vmalloc pages to userspace
3317 * @vma:		vma to cover
3318 * @uaddr:		target user address to start at
3319 * @kaddr:		virtual address of vmalloc kernel memory
3320 * @pgoff:		offset from @kaddr to start at
3321 * @size:		size of map area
3322 *
3323 * Returns:	0 for success, -Exxx on failure
3324 *
3325 * This function checks that @kaddr is a valid vmalloc'ed area,
3326 * and that it is big enough to cover the range starting at
3327 * @uaddr in @vma. Will return failure if that criteria isn't
3328 * met.
3329 *
3330 * Similar to remap_pfn_range() (see mm/memory.c)
3331 */
3332int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
3333				void *kaddr, unsigned long pgoff,
3334				unsigned long size)
3335{
3336	struct vm_struct *area;
3337	unsigned long off;
3338	unsigned long end_index;
3339
3340	if (check_shl_overflow(pgoff, PAGE_SHIFT, &off))
3341		return -EINVAL;
3342
3343	size = PAGE_ALIGN(size);
3344
3345	if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
3346		return -EINVAL;
3347
3348	area = find_vm_area(kaddr);
3349	if (!area)
3350		return -EINVAL;
3351
3352	if (!(area->flags & (VM_USERMAP | VM_DMA_COHERENT)))
3353		return -EINVAL;
3354
3355	if (check_add_overflow(size, off, &end_index) ||
3356	    end_index > get_vm_area_size(area))
3357		return -EINVAL;
3358	kaddr += off;
3359
3360	do {
3361		struct page *page = vmalloc_to_page(kaddr);
3362		int ret;
3363
3364		ret = vm_insert_page(vma, uaddr, page);
3365		if (ret)
3366			return ret;
3367
3368		uaddr += PAGE_SIZE;
3369		kaddr += PAGE_SIZE;
3370		size -= PAGE_SIZE;
3371	} while (size > 0);
3372
3373	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
3374
3375	return 0;
3376}
 
3377
3378/**
3379 * remap_vmalloc_range - map vmalloc pages to userspace
3380 * @vma:		vma to cover (map full range of vma)
3381 * @addr:		vmalloc memory
3382 * @pgoff:		number of pages into addr before first page to map
3383 *
3384 * Returns:	0 for success, -Exxx on failure
3385 *
3386 * This function checks that addr is a valid vmalloc'ed area, and
3387 * that it is big enough to cover the vma. Will return failure if
3388 * that criteria isn't met.
3389 *
3390 * Similar to remap_pfn_range() (see mm/memory.c)
3391 */
3392int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
3393						unsigned long pgoff)
3394{
3395	return remap_vmalloc_range_partial(vma, vma->vm_start,
3396					   addr, pgoff,
3397					   vma->vm_end - vma->vm_start);
3398}
3399EXPORT_SYMBOL(remap_vmalloc_range);
3400
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3401void free_vm_area(struct vm_struct *area)
3402{
3403	struct vm_struct *ret;
3404	ret = remove_vm_area(area->addr);
3405	BUG_ON(ret != area);
3406	kfree(area);
3407}
3408EXPORT_SYMBOL_GPL(free_vm_area);
3409
3410#ifdef CONFIG_SMP
3411static struct vmap_area *node_to_va(struct rb_node *n)
3412{
3413	return rb_entry_safe(n, struct vmap_area, rb_node);
3414}
3415
3416/**
3417 * pvm_find_va_enclose_addr - find the vmap_area @addr belongs to
3418 * @addr: target address
3419 *
3420 * Returns: vmap_area if it is found. If there is no such area
3421 *   the first highest(reverse order) vmap_area is returned
3422 *   i.e. va->va_start < addr && va->va_end < addr or NULL
3423 *   if there are no any areas before @addr.
3424 */
3425static struct vmap_area *
3426pvm_find_va_enclose_addr(unsigned long addr)
3427{
3428	struct vmap_area *va, *tmp;
3429	struct rb_node *n;
3430
3431	n = free_vmap_area_root.rb_node;
3432	va = NULL;
3433
3434	while (n) {
3435		tmp = rb_entry(n, struct vmap_area, rb_node);
3436		if (tmp->va_start <= addr) {
3437			va = tmp;
3438			if (tmp->va_end >= addr)
3439				break;
3440
3441			n = n->rb_right;
3442		} else {
3443			n = n->rb_left;
3444		}
3445	}
3446
3447	return va;
3448}
3449
3450/**
3451 * pvm_determine_end_from_reverse - find the highest aligned address
3452 * of free block below VMALLOC_END
3453 * @va:
3454 *   in - the VA we start the search(reverse order);
3455 *   out - the VA with the highest aligned end address.
3456 * @align: alignment for required highest address
3457 *
3458 * Returns: determined end address within vmap_area
3459 */
3460static unsigned long
3461pvm_determine_end_from_reverse(struct vmap_area **va, unsigned long align)
3462{
3463	unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3464	unsigned long addr;
3465
3466	if (likely(*va)) {
3467		list_for_each_entry_from_reverse((*va),
3468				&free_vmap_area_list, list) {
3469			addr = min((*va)->va_end & ~(align - 1), vmalloc_end);
3470			if ((*va)->va_start < addr)
3471				return addr;
3472		}
3473	}
3474
3475	return 0;
3476}
3477
3478/**
3479 * pcpu_get_vm_areas - allocate vmalloc areas for percpu allocator
3480 * @offsets: array containing offset of each area
3481 * @sizes: array containing size of each area
3482 * @nr_vms: the number of areas to allocate
3483 * @align: alignment, all entries in @offsets and @sizes must be aligned to this
3484 *
3485 * Returns: kmalloc'd vm_struct pointer array pointing to allocated
3486 *	    vm_structs on success, %NULL on failure
3487 *
3488 * Percpu allocator wants to use congruent vm areas so that it can
3489 * maintain the offsets among percpu areas.  This function allocates
3490 * congruent vmalloc areas for it with GFP_KERNEL.  These areas tend to
3491 * be scattered pretty far, distance between two areas easily going up
3492 * to gigabytes.  To avoid interacting with regular vmallocs, these
3493 * areas are allocated from top.
3494 *
3495 * Despite its complicated look, this allocator is rather simple. It
3496 * does everything top-down and scans free blocks from the end looking
3497 * for matching base. While scanning, if any of the areas do not fit the
3498 * base address is pulled down to fit the area. Scanning is repeated till
3499 * all the areas fit and then all necessary data structures are inserted
3500 * and the result is returned.
3501 */
3502struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
3503				     const size_t *sizes, int nr_vms,
3504				     size_t align)
3505{
3506	const unsigned long vmalloc_start = ALIGN(VMALLOC_START, align);
3507	const unsigned long vmalloc_end = VMALLOC_END & ~(align - 1);
3508	struct vmap_area **vas, *va;
3509	struct vm_struct **vms;
3510	int area, area2, last_area, term_area;
3511	unsigned long base, start, size, end, last_end, orig_start, orig_end;
3512	bool purged = false;
3513	enum fit_type type;
3514
3515	/* verify parameters and allocate data structures */
3516	BUG_ON(offset_in_page(align) || !is_power_of_2(align));
3517	for (last_area = 0, area = 0; area < nr_vms; area++) {
3518		start = offsets[area];
3519		end = start + sizes[area];
3520
3521		/* is everything aligned properly? */
3522		BUG_ON(!IS_ALIGNED(offsets[area], align));
3523		BUG_ON(!IS_ALIGNED(sizes[area], align));
3524
3525		/* detect the area with the highest address */
3526		if (start > offsets[last_area])
3527			last_area = area;
3528
3529		for (area2 = area + 1; area2 < nr_vms; area2++) {
3530			unsigned long start2 = offsets[area2];
3531			unsigned long end2 = start2 + sizes[area2];
3532
3533			BUG_ON(start2 < end && start < end2);
3534		}
3535	}
3536	last_end = offsets[last_area] + sizes[last_area];
3537
3538	if (vmalloc_end - vmalloc_start < last_end) {
3539		WARN_ON(true);
3540		return NULL;
3541	}
3542
3543	vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
3544	vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
3545	if (!vas || !vms)
3546		goto err_free2;
3547
3548	for (area = 0; area < nr_vms; area++) {
3549		vas[area] = kmem_cache_zalloc(vmap_area_cachep, GFP_KERNEL);
3550		vms[area] = kzalloc(sizeof(struct vm_struct), GFP_KERNEL);
3551		if (!vas[area] || !vms[area])
3552			goto err_free;
3553	}
3554retry:
3555	spin_lock(&free_vmap_area_lock);
3556
3557	/* start scanning - we scan from the top, begin with the last area */
3558	area = term_area = last_area;
3559	start = offsets[area];
3560	end = start + sizes[area];
3561
3562	va = pvm_find_va_enclose_addr(vmalloc_end);
3563	base = pvm_determine_end_from_reverse(&va, align) - end;
3564
3565	while (true) {
3566		/*
3567		 * base might have underflowed, add last_end before
3568		 * comparing.
3569		 */
3570		if (base + last_end < vmalloc_start + last_end)
3571			goto overflow;
3572
3573		/*
3574		 * Fitting base has not been found.
3575		 */
3576		if (va == NULL)
3577			goto overflow;
3578
3579		/*
3580		 * If required width exceeds current VA block, move
3581		 * base downwards and then recheck.
3582		 */
3583		if (base + end > va->va_end) {
3584			base = pvm_determine_end_from_reverse(&va, align) - end;
3585			term_area = area;
3586			continue;
3587		}
3588
3589		/*
3590		 * If this VA does not fit, move base downwards and recheck.
3591		 */
3592		if (base + start < va->va_start) {
3593			va = node_to_va(rb_prev(&va->rb_node));
3594			base = pvm_determine_end_from_reverse(&va, align) - end;
3595			term_area = area;
3596			continue;
3597		}
3598
3599		/*
3600		 * This area fits, move on to the previous one.  If
3601		 * the previous one is the terminal one, we're done.
3602		 */
3603		area = (area + nr_vms - 1) % nr_vms;
3604		if (area == term_area)
3605			break;
3606
3607		start = offsets[area];
3608		end = start + sizes[area];
3609		va = pvm_find_va_enclose_addr(base + end);
3610	}
3611
3612	/* we've found a fitting base, insert all va's */
3613	for (area = 0; area < nr_vms; area++) {
3614		int ret;
3615
3616		start = base + offsets[area];
3617		size = sizes[area];
3618
3619		va = pvm_find_va_enclose_addr(start);
3620		if (WARN_ON_ONCE(va == NULL))
3621			/* It is a BUG(), but trigger recovery instead. */
3622			goto recovery;
3623
3624		type = classify_va_fit_type(va, start, size);
3625		if (WARN_ON_ONCE(type == NOTHING_FIT))
3626			/* It is a BUG(), but trigger recovery instead. */
3627			goto recovery;
3628
3629		ret = adjust_va_to_fit_type(va, start, size, type);
3630		if (unlikely(ret))
3631			goto recovery;
3632
3633		/* Allocated area. */
3634		va = vas[area];
3635		va->va_start = start;
3636		va->va_end = start + size;
 
 
3637	}
3638
3639	spin_unlock(&free_vmap_area_lock);
3640
3641	/* populate the kasan shadow space */
3642	for (area = 0; area < nr_vms; area++) {
3643		if (kasan_populate_vmalloc(vas[area]->va_start, sizes[area]))
3644			goto err_free_shadow;
3645
3646		kasan_unpoison_vmalloc((void *)vas[area]->va_start,
3647				       sizes[area]);
3648	}
3649
3650	/* insert all vm's */
3651	spin_lock(&vmap_area_lock);
3652	for (area = 0; area < nr_vms; area++) {
3653		insert_vmap_area(vas[area], &vmap_area_root, &vmap_area_list);
3654
3655		setup_vmalloc_vm_locked(vms[area], vas[area], VM_ALLOC,
3656				 pcpu_get_vm_areas);
3657	}
3658	spin_unlock(&vmap_area_lock);
3659
3660	kfree(vas);
3661	return vms;
3662
3663recovery:
3664	/*
3665	 * Remove previously allocated areas. There is no
3666	 * need in removing these areas from the busy tree,
3667	 * because they are inserted only on the final step
3668	 * and when pcpu_get_vm_areas() is success.
3669	 */
3670	while (area--) {
3671		orig_start = vas[area]->va_start;
3672		orig_end = vas[area]->va_end;
3673		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
3674				&free_vmap_area_list);
3675		if (va)
3676			kasan_release_vmalloc(orig_start, orig_end,
3677				va->va_start, va->va_end);
3678		vas[area] = NULL;
3679	}
3680
3681overflow:
3682	spin_unlock(&free_vmap_area_lock);
3683	if (!purged) {
3684		purge_vmap_area_lazy();
3685		purged = true;
3686
3687		/* Before "retry", check if we recover. */
3688		for (area = 0; area < nr_vms; area++) {
3689			if (vas[area])
3690				continue;
3691
3692			vas[area] = kmem_cache_zalloc(
3693				vmap_area_cachep, GFP_KERNEL);
3694			if (!vas[area])
3695				goto err_free;
3696		}
3697
3698		goto retry;
3699	}
3700
3701err_free:
3702	for (area = 0; area < nr_vms; area++) {
3703		if (vas[area])
3704			kmem_cache_free(vmap_area_cachep, vas[area]);
3705
3706		kfree(vms[area]);
3707	}
3708err_free2:
3709	kfree(vas);
3710	kfree(vms);
3711	return NULL;
3712
3713err_free_shadow:
3714	spin_lock(&free_vmap_area_lock);
3715	/*
3716	 * We release all the vmalloc shadows, even the ones for regions that
3717	 * hadn't been successfully added. This relies on kasan_release_vmalloc
3718	 * being able to tolerate this case.
3719	 */
3720	for (area = 0; area < nr_vms; area++) {
3721		orig_start = vas[area]->va_start;
3722		orig_end = vas[area]->va_end;
3723		va = merge_or_add_vmap_area_augment(vas[area], &free_vmap_area_root,
3724				&free_vmap_area_list);
3725		if (va)
3726			kasan_release_vmalloc(orig_start, orig_end,
3727				va->va_start, va->va_end);
3728		vas[area] = NULL;
3729		kfree(vms[area]);
3730	}
3731	spin_unlock(&free_vmap_area_lock);
3732	kfree(vas);
3733	kfree(vms);
3734	return NULL;
3735}
3736
3737/**
3738 * pcpu_free_vm_areas - free vmalloc areas for percpu allocator
3739 * @vms: vm_struct pointer array returned by pcpu_get_vm_areas()
3740 * @nr_vms: the number of allocated areas
3741 *
3742 * Free vm_structs and the array allocated by pcpu_get_vm_areas().
3743 */
3744void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
3745{
3746	int i;
3747
3748	for (i = 0; i < nr_vms; i++)
3749		free_vm_area(vms[i]);
3750	kfree(vms);
3751}
3752#endif	/* CONFIG_SMP */
3753
3754#ifdef CONFIG_PRINTK
3755bool vmalloc_dump_obj(void *object)
3756{
3757	struct vm_struct *vm;
3758	void *objp = (void *)PAGE_ALIGN((unsigned long)object);
3759
3760	vm = find_vm_area(objp);
3761	if (!vm)
3762		return false;
3763	pr_cont(" %u-page vmalloc region starting at %#lx allocated at %pS\n",
3764		vm->nr_pages, (unsigned long)vm->addr, vm->caller);
3765	return true;
3766}
3767#endif
3768
3769#ifdef CONFIG_PROC_FS
3770static void *s_start(struct seq_file *m, loff_t *pos)
3771	__acquires(&vmap_purge_lock)
3772	__acquires(&vmap_area_lock)
3773{
3774	mutex_lock(&vmap_purge_lock);
3775	spin_lock(&vmap_area_lock);
3776
3777	return seq_list_start(&vmap_area_list, *pos);
3778}
3779
3780static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3781{
3782	return seq_list_next(p, &vmap_area_list, pos);
3783}
3784
3785static void s_stop(struct seq_file *m, void *p)
3786	__releases(&vmap_area_lock)
3787	__releases(&vmap_purge_lock)
3788{
3789	spin_unlock(&vmap_area_lock);
3790	mutex_unlock(&vmap_purge_lock);
3791}
3792
3793static void show_numa_info(struct seq_file *m, struct vm_struct *v)
3794{
3795	if (IS_ENABLED(CONFIG_NUMA)) {
3796		unsigned int nr, *counters = m->private;
3797
3798		if (!counters)
3799			return;
3800
3801		if (v->flags & VM_UNINITIALIZED)
3802			return;
3803		/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
3804		smp_rmb();
3805
3806		memset(counters, 0, nr_node_ids * sizeof(unsigned int));
3807
3808		for (nr = 0; nr < v->nr_pages; nr++)
3809			counters[page_to_nid(v->pages[nr])]++;
3810
3811		for_each_node_state(nr, N_HIGH_MEMORY)
3812			if (counters[nr])
3813				seq_printf(m, " N%u=%u", nr, counters[nr]);
3814	}
3815}
3816
3817static void show_purge_info(struct seq_file *m)
3818{
 
3819	struct vmap_area *va;
3820
3821	spin_lock(&purge_vmap_area_lock);
3822	list_for_each_entry(va, &purge_vmap_area_list, list) {
 
 
 
3823		seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
3824			(void *)va->va_start, (void *)va->va_end,
3825			va->va_end - va->va_start);
3826	}
3827	spin_unlock(&purge_vmap_area_lock);
3828}
3829
3830static int s_show(struct seq_file *m, void *p)
3831{
3832	struct vmap_area *va;
3833	struct vm_struct *v;
3834
3835	va = list_entry(p, struct vmap_area, list);
3836
3837	/*
3838	 * s_show can encounter race with remove_vm_area, !vm on behalf
3839	 * of vmap area is being tear down or vm_map_ram allocation.
3840	 */
3841	if (!va->vm) {
3842		seq_printf(m, "0x%pK-0x%pK %7ld vm_map_ram\n",
3843			(void *)va->va_start, (void *)va->va_end,
3844			va->va_end - va->va_start);
3845
3846		return 0;
3847	}
3848
3849	v = va->vm;
3850
3851	seq_printf(m, "0x%pK-0x%pK %7ld",
3852		v->addr, v->addr + v->size, v->size);
3853
3854	if (v->caller)
3855		seq_printf(m, " %pS", v->caller);
3856
3857	if (v->nr_pages)
3858		seq_printf(m, " pages=%d", v->nr_pages);
3859
3860	if (v->phys_addr)
3861		seq_printf(m, " phys=%pa", &v->phys_addr);
3862
3863	if (v->flags & VM_IOREMAP)
3864		seq_puts(m, " ioremap");
3865
3866	if (v->flags & VM_ALLOC)
3867		seq_puts(m, " vmalloc");
3868
3869	if (v->flags & VM_MAP)
3870		seq_puts(m, " vmap");
3871
3872	if (v->flags & VM_USERMAP)
3873		seq_puts(m, " user");
3874
3875	if (v->flags & VM_DMA_COHERENT)
3876		seq_puts(m, " dma-coherent");
3877
3878	if (is_vmalloc_addr(v->pages))
3879		seq_puts(m, " vpages");
3880
3881	show_numa_info(m, v);
3882	seq_putc(m, '\n');
3883
3884	/*
3885	 * As a final step, dump "unpurged" areas.
 
 
 
3886	 */
3887	if (list_is_last(&va->list, &vmap_area_list))
3888		show_purge_info(m);
3889
3890	return 0;
3891}
3892
3893static const struct seq_operations vmalloc_op = {
3894	.start = s_start,
3895	.next = s_next,
3896	.stop = s_stop,
3897	.show = s_show,
3898};
3899
3900static int __init proc_vmalloc_init(void)
3901{
3902	if (IS_ENABLED(CONFIG_NUMA))
3903		proc_create_seq_private("vmallocinfo", 0400, NULL,
3904				&vmalloc_op,
3905				nr_node_ids * sizeof(unsigned int), NULL);
3906	else
3907		proc_create_seq("vmallocinfo", 0400, NULL, &vmalloc_op);
3908	return 0;
3909}
3910module_init(proc_vmalloc_init);
3911
3912#endif