Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 *  linux/mm/memory_hotplug.c
   3 *
   4 *  Copyright (C)
   5 */
   6
   7#include <linux/stddef.h>
   8#include <linux/mm.h>
   9#include <linux/swap.h>
  10#include <linux/interrupt.h>
  11#include <linux/pagemap.h>
  12#include <linux/bootmem.h>
  13#include <linux/compiler.h>
  14#include <linux/module.h>
  15#include <linux/pagevec.h>
  16#include <linux/writeback.h>
  17#include <linux/slab.h>
  18#include <linux/sysctl.h>
  19#include <linux/cpu.h>
  20#include <linux/memory.h>
  21#include <linux/memory_hotplug.h>
  22#include <linux/highmem.h>
  23#include <linux/vmalloc.h>
  24#include <linux/ioport.h>
  25#include <linux/delay.h>
  26#include <linux/migrate.h>
  27#include <linux/page-isolation.h>
  28#include <linux/pfn.h>
  29#include <linux/suspend.h>
  30#include <linux/mm_inline.h>
  31#include <linux/firmware-map.h>
  32
  33#include <asm/tlbflush.h>
  34
  35#include "internal.h"
  36
  37/*
  38 * online_page_callback contains pointer to current page onlining function.
  39 * Initially it is generic_online_page(). If it is required it could be
  40 * changed by calling set_online_page_callback() for callback registration
  41 * and restore_online_page_callback() for generic callback restore.
  42 */
  43
  44static void generic_online_page(struct page *page);
  45
  46static online_page_callback_t online_page_callback = generic_online_page;
  47
  48DEFINE_MUTEX(mem_hotplug_mutex);
  49
  50void lock_memory_hotplug(void)
  51{
  52	mutex_lock(&mem_hotplug_mutex);
  53
  54	/* for exclusive hibernation if CONFIG_HIBERNATION=y */
  55	lock_system_sleep();
  56}
  57
  58void unlock_memory_hotplug(void)
  59{
  60	unlock_system_sleep();
  61	mutex_unlock(&mem_hotplug_mutex);
  62}
  63
  64
  65/* add this memory to iomem resource */
  66static struct resource *register_memory_resource(u64 start, u64 size)
  67{
  68	struct resource *res;
  69	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
  70	BUG_ON(!res);
  71
  72	res->name = "System RAM";
  73	res->start = start;
  74	res->end = start + size - 1;
  75	res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
  76	if (request_resource(&iomem_resource, res) < 0) {
  77		printk("System RAM resource %llx - %llx cannot be added\n",
  78		(unsigned long long)res->start, (unsigned long long)res->end);
  79		kfree(res);
  80		res = NULL;
  81	}
  82	return res;
  83}
  84
  85static void release_memory_resource(struct resource *res)
  86{
  87	if (!res)
  88		return;
  89	release_resource(res);
  90	kfree(res);
  91	return;
  92}
  93
  94#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
  95#ifndef CONFIG_SPARSEMEM_VMEMMAP
  96static void get_page_bootmem(unsigned long info,  struct page *page,
  97			     unsigned long type)
  98{
  99	page->lru.next = (struct list_head *) type;
 100	SetPagePrivate(page);
 101	set_page_private(page, info);
 102	atomic_inc(&page->_count);
 103}
 104
 105/* reference to __meminit __free_pages_bootmem is valid
 106 * so use __ref to tell modpost not to generate a warning */
 107void __ref put_page_bootmem(struct page *page)
 108{
 109	unsigned long type;
 110
 111	type = (unsigned long) page->lru.next;
 112	BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
 113	       type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
 114
 115	if (atomic_dec_return(&page->_count) == 1) {
 116		ClearPagePrivate(page);
 117		set_page_private(page, 0);
 118		INIT_LIST_HEAD(&page->lru);
 119		__free_pages_bootmem(page, 0);
 120	}
 121
 122}
 123
 124static void register_page_bootmem_info_section(unsigned long start_pfn)
 125{
 126	unsigned long *usemap, mapsize, section_nr, i;
 127	struct mem_section *ms;
 128	struct page *page, *memmap;
 129
 130	if (!pfn_valid(start_pfn))
 131		return;
 132
 133	section_nr = pfn_to_section_nr(start_pfn);
 134	ms = __nr_to_section(section_nr);
 135
 136	/* Get section's memmap address */
 137	memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
 138
 139	/*
 140	 * Get page for the memmap's phys address
 141	 * XXX: need more consideration for sparse_vmemmap...
 142	 */
 143	page = virt_to_page(memmap);
 144	mapsize = sizeof(struct page) * PAGES_PER_SECTION;
 145	mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
 146
 147	/* remember memmap's page */
 148	for (i = 0; i < mapsize; i++, page++)
 149		get_page_bootmem(section_nr, page, SECTION_INFO);
 150
 151	usemap = __nr_to_section(section_nr)->pageblock_flags;
 152	page = virt_to_page(usemap);
 153
 154	mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
 155
 156	for (i = 0; i < mapsize; i++, page++)
 157		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
 158
 159}
 160
 161void register_page_bootmem_info_node(struct pglist_data *pgdat)
 162{
 163	unsigned long i, pfn, end_pfn, nr_pages;
 164	int node = pgdat->node_id;
 165	struct page *page;
 166	struct zone *zone;
 167
 168	nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
 169	page = virt_to_page(pgdat);
 170
 171	for (i = 0; i < nr_pages; i++, page++)
 172		get_page_bootmem(node, page, NODE_INFO);
 173
 174	zone = &pgdat->node_zones[0];
 175	for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
 176		if (zone->wait_table) {
 177			nr_pages = zone->wait_table_hash_nr_entries
 178				* sizeof(wait_queue_head_t);
 179			nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
 180			page = virt_to_page(zone->wait_table);
 181
 182			for (i = 0; i < nr_pages; i++, page++)
 183				get_page_bootmem(node, page, NODE_INFO);
 184		}
 185	}
 186
 187	pfn = pgdat->node_start_pfn;
 188	end_pfn = pfn + pgdat->node_spanned_pages;
 189
 190	/* register_section info */
 191	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
 192		register_page_bootmem_info_section(pfn);
 193
 
 
 
 
 
 
 
 194}
 195#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
 196
 197static void grow_zone_span(struct zone *zone, unsigned long start_pfn,
 198			   unsigned long end_pfn)
 199{
 200	unsigned long old_zone_end_pfn;
 201
 202	zone_span_writelock(zone);
 203
 204	old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
 205	if (start_pfn < zone->zone_start_pfn)
 206		zone->zone_start_pfn = start_pfn;
 207
 208	zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
 209				zone->zone_start_pfn;
 210
 211	zone_span_writeunlock(zone);
 212}
 213
 214static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
 215			    unsigned long end_pfn)
 216{
 217	unsigned long old_pgdat_end_pfn =
 218		pgdat->node_start_pfn + pgdat->node_spanned_pages;
 219
 220	if (start_pfn < pgdat->node_start_pfn)
 221		pgdat->node_start_pfn = start_pfn;
 222
 223	pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
 224					pgdat->node_start_pfn;
 225}
 226
 227static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
 228{
 229	struct pglist_data *pgdat = zone->zone_pgdat;
 230	int nr_pages = PAGES_PER_SECTION;
 231	int nid = pgdat->node_id;
 232	int zone_type;
 233	unsigned long flags;
 234
 235	zone_type = zone - pgdat->node_zones;
 236	if (!zone->wait_table) {
 237		int ret;
 238
 239		ret = init_currently_empty_zone(zone, phys_start_pfn,
 240						nr_pages, MEMMAP_HOTPLUG);
 241		if (ret)
 242			return ret;
 243	}
 244	pgdat_resize_lock(zone->zone_pgdat, &flags);
 245	grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
 246	grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
 247			phys_start_pfn + nr_pages);
 248	pgdat_resize_unlock(zone->zone_pgdat, &flags);
 249	memmap_init_zone(nr_pages, nid, zone_type,
 250			 phys_start_pfn, MEMMAP_HOTPLUG);
 251	return 0;
 252}
 253
 254static int __meminit __add_section(int nid, struct zone *zone,
 255					unsigned long phys_start_pfn)
 256{
 257	int nr_pages = PAGES_PER_SECTION;
 258	int ret;
 259
 260	if (pfn_valid(phys_start_pfn))
 261		return -EEXIST;
 262
 263	ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
 264
 265	if (ret < 0)
 266		return ret;
 267
 268	ret = __add_zone(zone, phys_start_pfn);
 269
 270	if (ret < 0)
 271		return ret;
 272
 273	return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
 274}
 275
 276#ifdef CONFIG_SPARSEMEM_VMEMMAP
 277static int __remove_section(struct zone *zone, struct mem_section *ms)
 278{
 279	/*
 280	 * XXX: Freeing memmap with vmemmap is not implement yet.
 281	 *      This should be removed later.
 282	 */
 283	return -EBUSY;
 284}
 285#else
 286static int __remove_section(struct zone *zone, struct mem_section *ms)
 287{
 288	unsigned long flags;
 289	struct pglist_data *pgdat = zone->zone_pgdat;
 290	int ret = -EINVAL;
 291
 292	if (!valid_section(ms))
 293		return ret;
 294
 295	ret = unregister_memory_section(ms);
 296	if (ret)
 297		return ret;
 298
 299	pgdat_resize_lock(pgdat, &flags);
 300	sparse_remove_one_section(zone, ms);
 301	pgdat_resize_unlock(pgdat, &flags);
 302	return 0;
 303}
 304#endif
 305
 306/*
 307 * Reasonably generic function for adding memory.  It is
 308 * expected that archs that support memory hotplug will
 309 * call this function after deciding the zone to which to
 310 * add the new pages.
 311 */
 312int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
 313			unsigned long nr_pages)
 314{
 315	unsigned long i;
 316	int err = 0;
 317	int start_sec, end_sec;
 318	/* during initialize mem_map, align hot-added range to section */
 319	start_sec = pfn_to_section_nr(phys_start_pfn);
 320	end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
 321
 322	for (i = start_sec; i <= end_sec; i++) {
 323		err = __add_section(nid, zone, i << PFN_SECTION_SHIFT);
 324
 325		/*
 326		 * EEXIST is finally dealt with by ioresource collision
 327		 * check. see add_memory() => register_memory_resource()
 328		 * Warning will be printed if there is collision.
 329		 */
 330		if (err && (err != -EEXIST))
 331			break;
 332		err = 0;
 333	}
 334
 335	return err;
 336}
 337EXPORT_SYMBOL_GPL(__add_pages);
 338
 339/**
 340 * __remove_pages() - remove sections of pages from a zone
 341 * @zone: zone from which pages need to be removed
 342 * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
 343 * @nr_pages: number of pages to remove (must be multiple of section size)
 344 *
 345 * Generic helper function to remove section mappings and sysfs entries
 346 * for the section of the memory we are removing. Caller needs to make
 347 * sure that pages are marked reserved and zones are adjust properly by
 348 * calling offline_pages().
 349 */
 350int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
 351		 unsigned long nr_pages)
 352{
 353	unsigned long i, ret = 0;
 354	int sections_to_remove;
 355
 356	/*
 357	 * We can only remove entire sections
 358	 */
 359	BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
 360	BUG_ON(nr_pages % PAGES_PER_SECTION);
 361
 362	sections_to_remove = nr_pages / PAGES_PER_SECTION;
 363	for (i = 0; i < sections_to_remove; i++) {
 364		unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
 365		release_mem_region(pfn << PAGE_SHIFT,
 366				   PAGES_PER_SECTION << PAGE_SHIFT);
 367		ret = __remove_section(zone, __pfn_to_section(pfn));
 368		if (ret)
 369			break;
 370	}
 371	return ret;
 372}
 373EXPORT_SYMBOL_GPL(__remove_pages);
 374
 375int set_online_page_callback(online_page_callback_t callback)
 376{
 377	int rc = -EINVAL;
 378
 379	lock_memory_hotplug();
 380
 381	if (online_page_callback == generic_online_page) {
 382		online_page_callback = callback;
 383		rc = 0;
 384	}
 385
 386	unlock_memory_hotplug();
 387
 388	return rc;
 389}
 390EXPORT_SYMBOL_GPL(set_online_page_callback);
 391
 392int restore_online_page_callback(online_page_callback_t callback)
 393{
 394	int rc = -EINVAL;
 395
 396	lock_memory_hotplug();
 397
 398	if (online_page_callback == callback) {
 399		online_page_callback = generic_online_page;
 400		rc = 0;
 401	}
 402
 403	unlock_memory_hotplug();
 404
 405	return rc;
 406}
 407EXPORT_SYMBOL_GPL(restore_online_page_callback);
 408
 409void __online_page_set_limits(struct page *page)
 410{
 411	unsigned long pfn = page_to_pfn(page);
 412
 413	if (pfn >= num_physpages)
 414		num_physpages = pfn + 1;
 415}
 416EXPORT_SYMBOL_GPL(__online_page_set_limits);
 417
 418void __online_page_increment_counters(struct page *page)
 419{
 420	totalram_pages++;
 421
 422#ifdef CONFIG_HIGHMEM
 423	if (PageHighMem(page))
 424		totalhigh_pages++;
 425#endif
 426}
 427EXPORT_SYMBOL_GPL(__online_page_increment_counters);
 428
 429void __online_page_free(struct page *page)
 430{
 431	ClearPageReserved(page);
 432	init_page_count(page);
 433	__free_page(page);
 434}
 435EXPORT_SYMBOL_GPL(__online_page_free);
 436
 437static void generic_online_page(struct page *page)
 438{
 439	__online_page_set_limits(page);
 440	__online_page_increment_counters(page);
 441	__online_page_free(page);
 442}
 443
 444static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
 445			void *arg)
 446{
 447	unsigned long i;
 448	unsigned long onlined_pages = *(unsigned long *)arg;
 449	struct page *page;
 450	if (PageReserved(pfn_to_page(start_pfn)))
 451		for (i = 0; i < nr_pages; i++) {
 452			page = pfn_to_page(start_pfn + i);
 453			(*online_page_callback)(page);
 454			onlined_pages++;
 455		}
 456	*(unsigned long *)arg = onlined_pages;
 457	return 0;
 458}
 459
 460
 461int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
 462{
 463	unsigned long onlined_pages = 0;
 464	struct zone *zone;
 465	int need_zonelists_rebuild = 0;
 466	int nid;
 467	int ret;
 468	struct memory_notify arg;
 469
 470	lock_memory_hotplug();
 471	arg.start_pfn = pfn;
 472	arg.nr_pages = nr_pages;
 473	arg.status_change_nid = -1;
 474
 475	nid = page_to_nid(pfn_to_page(pfn));
 476	if (node_present_pages(nid) == 0)
 477		arg.status_change_nid = nid;
 478
 479	ret = memory_notify(MEM_GOING_ONLINE, &arg);
 480	ret = notifier_to_errno(ret);
 481	if (ret) {
 482		memory_notify(MEM_CANCEL_ONLINE, &arg);
 483		unlock_memory_hotplug();
 484		return ret;
 485	}
 486	/*
 487	 * This doesn't need a lock to do pfn_to_page().
 488	 * The section can't be removed here because of the
 489	 * memory_block->state_mutex.
 490	 */
 491	zone = page_zone(pfn_to_page(pfn));
 492	/*
 493	 * If this zone is not populated, then it is not in zonelist.
 494	 * This means the page allocator ignores this zone.
 495	 * So, zonelist must be updated after online.
 496	 */
 497	mutex_lock(&zonelists_mutex);
 498	if (!populated_zone(zone))
 499		need_zonelists_rebuild = 1;
 500
 501	ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
 502		online_pages_range);
 503	if (ret) {
 504		mutex_unlock(&zonelists_mutex);
 505		printk(KERN_DEBUG "online_pages %lx at %lx failed\n",
 506			nr_pages, pfn);
 
 
 507		memory_notify(MEM_CANCEL_ONLINE, &arg);
 508		unlock_memory_hotplug();
 509		return ret;
 510	}
 511
 512	zone->present_pages += onlined_pages;
 513	zone->zone_pgdat->node_present_pages += onlined_pages;
 514	if (need_zonelists_rebuild)
 515		build_all_zonelists(zone);
 516	else
 517		zone_pcp_update(zone);
 518
 519	mutex_unlock(&zonelists_mutex);
 520
 521	init_per_zone_wmark_min();
 522
 523	if (onlined_pages) {
 524		kswapd_run(zone_to_nid(zone));
 525		node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
 526	}
 527
 528	vm_total_pages = nr_free_pagecache_pages();
 529
 530	writeback_set_ratelimit();
 531
 532	if (onlined_pages)
 533		memory_notify(MEM_ONLINE, &arg);
 534	unlock_memory_hotplug();
 535
 536	return 0;
 537}
 538#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
 539
 540/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
 541static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
 542{
 543	struct pglist_data *pgdat;
 544	unsigned long zones_size[MAX_NR_ZONES] = {0};
 545	unsigned long zholes_size[MAX_NR_ZONES] = {0};
 546	unsigned long start_pfn = start >> PAGE_SHIFT;
 547
 548	pgdat = arch_alloc_nodedata(nid);
 549	if (!pgdat)
 550		return NULL;
 551
 552	arch_refresh_nodedata(nid, pgdat);
 553
 554	/* we can use NODE_DATA(nid) from here */
 555
 556	/* init node's zones as empty zones, we don't have any present pages.*/
 557	free_area_init_node(nid, zones_size, start_pfn, zholes_size);
 558
 559	/*
 560	 * The node we allocated has no zone fallback lists. For avoiding
 561	 * to access not-initialized zonelist, build here.
 562	 */
 563	mutex_lock(&zonelists_mutex);
 564	build_all_zonelists(NULL);
 565	mutex_unlock(&zonelists_mutex);
 566
 567	return pgdat;
 568}
 569
 570static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
 571{
 572	arch_refresh_nodedata(nid, NULL);
 573	arch_free_nodedata(pgdat);
 574	return;
 575}
 576
 577
 578/*
 579 * called by cpu_up() to online a node without onlined memory.
 580 */
 581int mem_online_node(int nid)
 582{
 583	pg_data_t	*pgdat;
 584	int	ret;
 585
 586	lock_memory_hotplug();
 587	pgdat = hotadd_new_pgdat(nid, 0);
 588	if (!pgdat) {
 589		ret = -ENOMEM;
 590		goto out;
 591	}
 592	node_set_online(nid);
 593	ret = register_one_node(nid);
 594	BUG_ON(ret);
 595
 596out:
 597	unlock_memory_hotplug();
 598	return ret;
 599}
 600
 601/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
 602int __ref add_memory(int nid, u64 start, u64 size)
 603{
 604	pg_data_t *pgdat = NULL;
 605	int new_pgdat = 0;
 606	struct resource *res;
 607	int ret;
 608
 609	lock_memory_hotplug();
 610
 611	res = register_memory_resource(start, size);
 612	ret = -EEXIST;
 613	if (!res)
 614		goto out;
 615
 616	if (!node_online(nid)) {
 617		pgdat = hotadd_new_pgdat(nid, start);
 618		ret = -ENOMEM;
 619		if (!pgdat)
 620			goto out;
 621		new_pgdat = 1;
 622	}
 623
 624	/* call arch's memory hotadd */
 625	ret = arch_add_memory(nid, start, size);
 626
 627	if (ret < 0)
 628		goto error;
 629
 630	/* we online node here. we can't roll back from here. */
 631	node_set_online(nid);
 632
 633	if (new_pgdat) {
 634		ret = register_one_node(nid);
 635		/*
 636		 * If sysfs file of new node can't create, cpu on the node
 637		 * can't be hot-added. There is no rollback way now.
 638		 * So, check by BUG_ON() to catch it reluctantly..
 639		 */
 640		BUG_ON(ret);
 641	}
 642
 643	/* create new memmap entry */
 644	firmware_map_add_hotplug(start, start + size, "System RAM");
 645
 646	goto out;
 647
 648error:
 649	/* rollback pgdat allocation and others */
 650	if (new_pgdat)
 651		rollback_node_hotadd(nid, pgdat);
 652	if (res)
 653		release_memory_resource(res);
 654
 655out:
 656	unlock_memory_hotplug();
 657	return ret;
 658}
 659EXPORT_SYMBOL_GPL(add_memory);
 660
 661#ifdef CONFIG_MEMORY_HOTREMOVE
 662/*
 663 * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
 664 * set and the size of the free page is given by page_order(). Using this,
 665 * the function determines if the pageblock contains only free pages.
 666 * Due to buddy contraints, a free page at least the size of a pageblock will
 667 * be located at the start of the pageblock
 668 */
 669static inline int pageblock_free(struct page *page)
 670{
 671	return PageBuddy(page) && page_order(page) >= pageblock_order;
 672}
 673
 674/* Return the start of the next active pageblock after a given page */
 675static struct page *next_active_pageblock(struct page *page)
 676{
 677	/* Ensure the starting page is pageblock-aligned */
 678	BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
 679
 680	/* If the entire pageblock is free, move to the end of free page */
 681	if (pageblock_free(page)) {
 682		int order;
 683		/* be careful. we don't have locks, page_order can be changed.*/
 684		order = page_order(page);
 685		if ((order < MAX_ORDER) && (order >= pageblock_order))
 686			return page + (1 << order);
 687	}
 688
 689	return page + pageblock_nr_pages;
 690}
 691
 692/* Checks if this range of memory is likely to be hot-removable. */
 693int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
 694{
 695	struct page *page = pfn_to_page(start_pfn);
 696	struct page *end_page = page + nr_pages;
 697
 698	/* Check the starting page of each pageblock within the range */
 699	for (; page < end_page; page = next_active_pageblock(page)) {
 700		if (!is_pageblock_removable_nolock(page))
 701			return 0;
 702		cond_resched();
 703	}
 704
 705	/* All pageblocks in the memory block are likely to be hot-removable */
 706	return 1;
 707}
 708
 709/*
 710 * Confirm all pages in a range [start, end) is belongs to the same zone.
 711 */
 712static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
 713{
 714	unsigned long pfn;
 715	struct zone *zone = NULL;
 716	struct page *page;
 717	int i;
 718	for (pfn = start_pfn;
 719	     pfn < end_pfn;
 720	     pfn += MAX_ORDER_NR_PAGES) {
 721		i = 0;
 722		/* This is just a CONFIG_HOLES_IN_ZONE check.*/
 723		while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
 724			i++;
 725		if (i == MAX_ORDER_NR_PAGES)
 726			continue;
 727		page = pfn_to_page(pfn + i);
 728		if (zone && page_zone(page) != zone)
 729			return 0;
 730		zone = page_zone(page);
 731	}
 732	return 1;
 733}
 734
 735/*
 736 * Scanning pfn is much easier than scanning lru list.
 737 * Scan pfn from start to end and Find LRU page.
 738 */
 739static unsigned long scan_lru_pages(unsigned long start, unsigned long end)
 740{
 741	unsigned long pfn;
 742	struct page *page;
 743	for (pfn = start; pfn < end; pfn++) {
 744		if (pfn_valid(pfn)) {
 745			page = pfn_to_page(pfn);
 746			if (PageLRU(page))
 747				return pfn;
 748		}
 749	}
 750	return 0;
 751}
 752
 753static struct page *
 754hotremove_migrate_alloc(struct page *page, unsigned long private, int **x)
 755{
 756	/* This should be improooooved!! */
 757	return alloc_page(GFP_HIGHUSER_MOVABLE);
 758}
 759
 760#define NR_OFFLINE_AT_ONCE_PAGES	(256)
 761static int
 762do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 763{
 764	unsigned long pfn;
 765	struct page *page;
 766	int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
 767	int not_managed = 0;
 768	int ret = 0;
 769	LIST_HEAD(source);
 770
 771	for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
 772		if (!pfn_valid(pfn))
 773			continue;
 774		page = pfn_to_page(pfn);
 775		if (!get_page_unless_zero(page))
 776			continue;
 777		/*
 778		 * We can skip free pages. And we can only deal with pages on
 779		 * LRU.
 780		 */
 781		ret = isolate_lru_page(page);
 782		if (!ret) { /* Success */
 783			put_page(page);
 784			list_add_tail(&page->lru, &source);
 785			move_pages--;
 786			inc_zone_page_state(page, NR_ISOLATED_ANON +
 787					    page_is_file_cache(page));
 788
 789		} else {
 790#ifdef CONFIG_DEBUG_VM
 791			printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
 792			       pfn);
 793			dump_page(page);
 794#endif
 795			put_page(page);
 796			/* Because we don't have big zone->lock. we should
 797			   check this again here. */
 798			if (page_count(page)) {
 799				not_managed++;
 800				ret = -EBUSY;
 801				break;
 802			}
 803		}
 804	}
 805	if (!list_empty(&source)) {
 806		if (not_managed) {
 807			putback_lru_pages(&source);
 808			goto out;
 809		}
 810		/* this function returns # of failed pages */
 811		ret = migrate_pages(&source, hotremove_migrate_alloc, 0,
 812								true, true);
 813		if (ret)
 814			putback_lru_pages(&source);
 815	}
 816out:
 817	return ret;
 818}
 819
 820/*
 821 * remove from free_area[] and mark all as Reserved.
 822 */
 823static int
 824offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
 825			void *data)
 826{
 827	__offline_isolated_pages(start, start + nr_pages);
 828	return 0;
 829}
 830
 831static void
 832offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
 833{
 834	walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
 835				offline_isolated_pages_cb);
 836}
 837
 838/*
 839 * Check all pages in range, recoreded as memory resource, are isolated.
 840 */
 841static int
 842check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
 843			void *data)
 844{
 845	int ret;
 846	long offlined = *(long *)data;
 847	ret = test_pages_isolated(start_pfn, start_pfn + nr_pages);
 848	offlined = nr_pages;
 849	if (!ret)
 850		*(long *)data += offlined;
 851	return ret;
 852}
 853
 854static long
 855check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
 856{
 857	long offlined = 0;
 858	int ret;
 859
 860	ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
 861			check_pages_isolated_cb);
 862	if (ret < 0)
 863		offlined = (long)ret;
 864	return offlined;
 865}
 866
 867static int __ref offline_pages(unsigned long start_pfn,
 868		  unsigned long end_pfn, unsigned long timeout)
 869{
 870	unsigned long pfn, nr_pages, expire;
 871	long offlined_pages;
 872	int ret, drain, retry_max, node;
 873	struct zone *zone;
 874	struct memory_notify arg;
 875
 876	BUG_ON(start_pfn >= end_pfn);
 877	/* at least, alignment against pageblock is necessary */
 878	if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
 879		return -EINVAL;
 880	if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
 881		return -EINVAL;
 882	/* This makes hotplug much easier...and readable.
 883	   we assume this for now. .*/
 884	if (!test_pages_in_a_zone(start_pfn, end_pfn))
 885		return -EINVAL;
 886
 887	lock_memory_hotplug();
 888
 889	zone = page_zone(pfn_to_page(start_pfn));
 890	node = zone_to_nid(zone);
 891	nr_pages = end_pfn - start_pfn;
 892
 893	/* set above range as isolated */
 894	ret = start_isolate_page_range(start_pfn, end_pfn);
 895	if (ret)
 896		goto out;
 897
 898	arg.start_pfn = start_pfn;
 899	arg.nr_pages = nr_pages;
 900	arg.status_change_nid = -1;
 901	if (nr_pages >= node_present_pages(node))
 902		arg.status_change_nid = node;
 903
 904	ret = memory_notify(MEM_GOING_OFFLINE, &arg);
 905	ret = notifier_to_errno(ret);
 906	if (ret)
 907		goto failed_removal;
 908
 909	pfn = start_pfn;
 910	expire = jiffies + timeout;
 911	drain = 0;
 912	retry_max = 5;
 913repeat:
 914	/* start memory hot removal */
 915	ret = -EAGAIN;
 916	if (time_after(jiffies, expire))
 917		goto failed_removal;
 918	ret = -EINTR;
 919	if (signal_pending(current))
 920		goto failed_removal;
 921	ret = 0;
 922	if (drain) {
 923		lru_add_drain_all();
 924		cond_resched();
 925		drain_all_pages();
 926	}
 927
 928	pfn = scan_lru_pages(start_pfn, end_pfn);
 929	if (pfn) { /* We have page on LRU */
 930		ret = do_migrate_range(pfn, end_pfn);
 931		if (!ret) {
 932			drain = 1;
 933			goto repeat;
 934		} else {
 935			if (ret < 0)
 936				if (--retry_max == 0)
 937					goto failed_removal;
 938			yield();
 939			drain = 1;
 940			goto repeat;
 941		}
 942	}
 943	/* drain all zone's lru pagevec, this is asyncronous... */
 944	lru_add_drain_all();
 945	yield();
 946	/* drain pcp pages , this is synchrouns. */
 947	drain_all_pages();
 948	/* check again */
 949	offlined_pages = check_pages_isolated(start_pfn, end_pfn);
 950	if (offlined_pages < 0) {
 951		ret = -EBUSY;
 952		goto failed_removal;
 953	}
 954	printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
 955	/* Ok, all of our target is islaoted.
 956	   We cannot do rollback at this point. */
 957	offline_isolated_pages(start_pfn, end_pfn);
 958	/* reset pagetype flags and makes migrate type to be MOVABLE */
 959	undo_isolate_page_range(start_pfn, end_pfn);
 960	/* removal success */
 961	zone->present_pages -= offlined_pages;
 962	zone->zone_pgdat->node_present_pages -= offlined_pages;
 963	totalram_pages -= offlined_pages;
 964
 965	init_per_zone_wmark_min();
 966
 967	if (!node_present_pages(node)) {
 968		node_clear_state(node, N_HIGH_MEMORY);
 969		kswapd_stop(node);
 970	}
 971
 972	vm_total_pages = nr_free_pagecache_pages();
 973	writeback_set_ratelimit();
 974
 975	memory_notify(MEM_OFFLINE, &arg);
 976	unlock_memory_hotplug();
 977	return 0;
 978
 979failed_removal:
 980	printk(KERN_INFO "memory offlining %lx to %lx failed\n",
 981		start_pfn, end_pfn);
 
 982	memory_notify(MEM_CANCEL_OFFLINE, &arg);
 983	/* pushback to free area */
 984	undo_isolate_page_range(start_pfn, end_pfn);
 985
 986out:
 987	unlock_memory_hotplug();
 988	return ret;
 989}
 990
 991int remove_memory(u64 start, u64 size)
 992{
 993	unsigned long start_pfn, end_pfn;
 994
 995	start_pfn = PFN_DOWN(start);
 996	end_pfn = start_pfn + PFN_DOWN(size);
 997	return offline_pages(start_pfn, end_pfn, 120 * HZ);
 998}
 999#else
1000int remove_memory(u64 start, u64 size)
1001{
1002	return -EINVAL;
1003}
1004#endif /* CONFIG_MEMORY_HOTREMOVE */
1005EXPORT_SYMBOL_GPL(remove_memory);
v3.5.6
   1/*
   2 *  linux/mm/memory_hotplug.c
   3 *
   4 *  Copyright (C)
   5 */
   6
   7#include <linux/stddef.h>
   8#include <linux/mm.h>
   9#include <linux/swap.h>
  10#include <linux/interrupt.h>
  11#include <linux/pagemap.h>
  12#include <linux/bootmem.h>
  13#include <linux/compiler.h>
  14#include <linux/export.h>
  15#include <linux/pagevec.h>
  16#include <linux/writeback.h>
  17#include <linux/slab.h>
  18#include <linux/sysctl.h>
  19#include <linux/cpu.h>
  20#include <linux/memory.h>
  21#include <linux/memory_hotplug.h>
  22#include <linux/highmem.h>
  23#include <linux/vmalloc.h>
  24#include <linux/ioport.h>
  25#include <linux/delay.h>
  26#include <linux/migrate.h>
  27#include <linux/page-isolation.h>
  28#include <linux/pfn.h>
  29#include <linux/suspend.h>
  30#include <linux/mm_inline.h>
  31#include <linux/firmware-map.h>
  32
  33#include <asm/tlbflush.h>
  34
  35#include "internal.h"
  36
  37/*
  38 * online_page_callback contains pointer to current page onlining function.
  39 * Initially it is generic_online_page(). If it is required it could be
  40 * changed by calling set_online_page_callback() for callback registration
  41 * and restore_online_page_callback() for generic callback restore.
  42 */
  43
  44static void generic_online_page(struct page *page);
  45
  46static online_page_callback_t online_page_callback = generic_online_page;
  47
  48DEFINE_MUTEX(mem_hotplug_mutex);
  49
  50void lock_memory_hotplug(void)
  51{
  52	mutex_lock(&mem_hotplug_mutex);
  53
  54	/* for exclusive hibernation if CONFIG_HIBERNATION=y */
  55	lock_system_sleep();
  56}
  57
  58void unlock_memory_hotplug(void)
  59{
  60	unlock_system_sleep();
  61	mutex_unlock(&mem_hotplug_mutex);
  62}
  63
  64
  65/* add this memory to iomem resource */
  66static struct resource *register_memory_resource(u64 start, u64 size)
  67{
  68	struct resource *res;
  69	res = kzalloc(sizeof(struct resource), GFP_KERNEL);
  70	BUG_ON(!res);
  71
  72	res->name = "System RAM";
  73	res->start = start;
  74	res->end = start + size - 1;
  75	res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
  76	if (request_resource(&iomem_resource, res) < 0) {
  77		printk("System RAM resource %pR cannot be added\n", res);
 
  78		kfree(res);
  79		res = NULL;
  80	}
  81	return res;
  82}
  83
  84static void release_memory_resource(struct resource *res)
  85{
  86	if (!res)
  87		return;
  88	release_resource(res);
  89	kfree(res);
  90	return;
  91}
  92
  93#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
  94#ifndef CONFIG_SPARSEMEM_VMEMMAP
  95static void get_page_bootmem(unsigned long info,  struct page *page,
  96			     unsigned long type)
  97{
  98	page->lru.next = (struct list_head *) type;
  99	SetPagePrivate(page);
 100	set_page_private(page, info);
 101	atomic_inc(&page->_count);
 102}
 103
 104/* reference to __meminit __free_pages_bootmem is valid
 105 * so use __ref to tell modpost not to generate a warning */
 106void __ref put_page_bootmem(struct page *page)
 107{
 108	unsigned long type;
 109
 110	type = (unsigned long) page->lru.next;
 111	BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
 112	       type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
 113
 114	if (atomic_dec_return(&page->_count) == 1) {
 115		ClearPagePrivate(page);
 116		set_page_private(page, 0);
 117		INIT_LIST_HEAD(&page->lru);
 118		__free_pages_bootmem(page, 0);
 119	}
 120
 121}
 122
 123static void register_page_bootmem_info_section(unsigned long start_pfn)
 124{
 125	unsigned long *usemap, mapsize, section_nr, i;
 126	struct mem_section *ms;
 127	struct page *page, *memmap;
 128
 
 
 
 129	section_nr = pfn_to_section_nr(start_pfn);
 130	ms = __nr_to_section(section_nr);
 131
 132	/* Get section's memmap address */
 133	memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
 134
 135	/*
 136	 * Get page for the memmap's phys address
 137	 * XXX: need more consideration for sparse_vmemmap...
 138	 */
 139	page = virt_to_page(memmap);
 140	mapsize = sizeof(struct page) * PAGES_PER_SECTION;
 141	mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
 142
 143	/* remember memmap's page */
 144	for (i = 0; i < mapsize; i++, page++)
 145		get_page_bootmem(section_nr, page, SECTION_INFO);
 146
 147	usemap = __nr_to_section(section_nr)->pageblock_flags;
 148	page = virt_to_page(usemap);
 149
 150	mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
 151
 152	for (i = 0; i < mapsize; i++, page++)
 153		get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
 154
 155}
 156
 157void register_page_bootmem_info_node(struct pglist_data *pgdat)
 158{
 159	unsigned long i, pfn, end_pfn, nr_pages;
 160	int node = pgdat->node_id;
 161	struct page *page;
 162	struct zone *zone;
 163
 164	nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
 165	page = virt_to_page(pgdat);
 166
 167	for (i = 0; i < nr_pages; i++, page++)
 168		get_page_bootmem(node, page, NODE_INFO);
 169
 170	zone = &pgdat->node_zones[0];
 171	for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
 172		if (zone->wait_table) {
 173			nr_pages = zone->wait_table_hash_nr_entries
 174				* sizeof(wait_queue_head_t);
 175			nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
 176			page = virt_to_page(zone->wait_table);
 177
 178			for (i = 0; i < nr_pages; i++, page++)
 179				get_page_bootmem(node, page, NODE_INFO);
 180		}
 181	}
 182
 183	pfn = pgdat->node_start_pfn;
 184	end_pfn = pfn + pgdat->node_spanned_pages;
 185
 186	/* register_section info */
 187	for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
 188		/*
 189		 * Some platforms can assign the same pfn to multiple nodes - on
 190		 * node0 as well as nodeN.  To avoid registering a pfn against
 191		 * multiple nodes we check that this pfn does not already
 192		 * reside in some other node.
 193		 */
 194		if (pfn_valid(pfn) && (pfn_to_nid(pfn) == node))
 195			register_page_bootmem_info_section(pfn);
 196	}
 197}
 198#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
 199
 200static void grow_zone_span(struct zone *zone, unsigned long start_pfn,
 201			   unsigned long end_pfn)
 202{
 203	unsigned long old_zone_end_pfn;
 204
 205	zone_span_writelock(zone);
 206
 207	old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
 208	if (start_pfn < zone->zone_start_pfn)
 209		zone->zone_start_pfn = start_pfn;
 210
 211	zone->spanned_pages = max(old_zone_end_pfn, end_pfn) -
 212				zone->zone_start_pfn;
 213
 214	zone_span_writeunlock(zone);
 215}
 216
 217static void grow_pgdat_span(struct pglist_data *pgdat, unsigned long start_pfn,
 218			    unsigned long end_pfn)
 219{
 220	unsigned long old_pgdat_end_pfn =
 221		pgdat->node_start_pfn + pgdat->node_spanned_pages;
 222
 223	if (start_pfn < pgdat->node_start_pfn)
 224		pgdat->node_start_pfn = start_pfn;
 225
 226	pgdat->node_spanned_pages = max(old_pgdat_end_pfn, end_pfn) -
 227					pgdat->node_start_pfn;
 228}
 229
 230static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
 231{
 232	struct pglist_data *pgdat = zone->zone_pgdat;
 233	int nr_pages = PAGES_PER_SECTION;
 234	int nid = pgdat->node_id;
 235	int zone_type;
 236	unsigned long flags;
 237
 238	zone_type = zone - pgdat->node_zones;
 239	if (!zone->wait_table) {
 240		int ret;
 241
 242		ret = init_currently_empty_zone(zone, phys_start_pfn,
 243						nr_pages, MEMMAP_HOTPLUG);
 244		if (ret)
 245			return ret;
 246	}
 247	pgdat_resize_lock(zone->zone_pgdat, &flags);
 248	grow_zone_span(zone, phys_start_pfn, phys_start_pfn + nr_pages);
 249	grow_pgdat_span(zone->zone_pgdat, phys_start_pfn,
 250			phys_start_pfn + nr_pages);
 251	pgdat_resize_unlock(zone->zone_pgdat, &flags);
 252	memmap_init_zone(nr_pages, nid, zone_type,
 253			 phys_start_pfn, MEMMAP_HOTPLUG);
 254	return 0;
 255}
 256
 257static int __meminit __add_section(int nid, struct zone *zone,
 258					unsigned long phys_start_pfn)
 259{
 260	int nr_pages = PAGES_PER_SECTION;
 261	int ret;
 262
 263	if (pfn_valid(phys_start_pfn))
 264		return -EEXIST;
 265
 266	ret = sparse_add_one_section(zone, phys_start_pfn, nr_pages);
 267
 268	if (ret < 0)
 269		return ret;
 270
 271	ret = __add_zone(zone, phys_start_pfn);
 272
 273	if (ret < 0)
 274		return ret;
 275
 276	return register_new_memory(nid, __pfn_to_section(phys_start_pfn));
 277}
 278
 279#ifdef CONFIG_SPARSEMEM_VMEMMAP
 280static int __remove_section(struct zone *zone, struct mem_section *ms)
 281{
 282	/*
 283	 * XXX: Freeing memmap with vmemmap is not implement yet.
 284	 *      This should be removed later.
 285	 */
 286	return -EBUSY;
 287}
 288#else
 289static int __remove_section(struct zone *zone, struct mem_section *ms)
 290{
 291	unsigned long flags;
 292	struct pglist_data *pgdat = zone->zone_pgdat;
 293	int ret = -EINVAL;
 294
 295	if (!valid_section(ms))
 296		return ret;
 297
 298	ret = unregister_memory_section(ms);
 299	if (ret)
 300		return ret;
 301
 302	pgdat_resize_lock(pgdat, &flags);
 303	sparse_remove_one_section(zone, ms);
 304	pgdat_resize_unlock(pgdat, &flags);
 305	return 0;
 306}
 307#endif
 308
 309/*
 310 * Reasonably generic function for adding memory.  It is
 311 * expected that archs that support memory hotplug will
 312 * call this function after deciding the zone to which to
 313 * add the new pages.
 314 */
 315int __ref __add_pages(int nid, struct zone *zone, unsigned long phys_start_pfn,
 316			unsigned long nr_pages)
 317{
 318	unsigned long i;
 319	int err = 0;
 320	int start_sec, end_sec;
 321	/* during initialize mem_map, align hot-added range to section */
 322	start_sec = pfn_to_section_nr(phys_start_pfn);
 323	end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);
 324
 325	for (i = start_sec; i <= end_sec; i++) {
 326		err = __add_section(nid, zone, i << PFN_SECTION_SHIFT);
 327
 328		/*
 329		 * EEXIST is finally dealt with by ioresource collision
 330		 * check. see add_memory() => register_memory_resource()
 331		 * Warning will be printed if there is collision.
 332		 */
 333		if (err && (err != -EEXIST))
 334			break;
 335		err = 0;
 336	}
 337
 338	return err;
 339}
 340EXPORT_SYMBOL_GPL(__add_pages);
 341
 342/**
 343 * __remove_pages() - remove sections of pages from a zone
 344 * @zone: zone from which pages need to be removed
 345 * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
 346 * @nr_pages: number of pages to remove (must be multiple of section size)
 347 *
 348 * Generic helper function to remove section mappings and sysfs entries
 349 * for the section of the memory we are removing. Caller needs to make
 350 * sure that pages are marked reserved and zones are adjust properly by
 351 * calling offline_pages().
 352 */
 353int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
 354		 unsigned long nr_pages)
 355{
 356	unsigned long i, ret = 0;
 357	int sections_to_remove;
 358
 359	/*
 360	 * We can only remove entire sections
 361	 */
 362	BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
 363	BUG_ON(nr_pages % PAGES_PER_SECTION);
 364
 365	sections_to_remove = nr_pages / PAGES_PER_SECTION;
 366	for (i = 0; i < sections_to_remove; i++) {
 367		unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
 368		release_mem_region(pfn << PAGE_SHIFT,
 369				   PAGES_PER_SECTION << PAGE_SHIFT);
 370		ret = __remove_section(zone, __pfn_to_section(pfn));
 371		if (ret)
 372			break;
 373	}
 374	return ret;
 375}
 376EXPORT_SYMBOL_GPL(__remove_pages);
 377
 378int set_online_page_callback(online_page_callback_t callback)
 379{
 380	int rc = -EINVAL;
 381
 382	lock_memory_hotplug();
 383
 384	if (online_page_callback == generic_online_page) {
 385		online_page_callback = callback;
 386		rc = 0;
 387	}
 388
 389	unlock_memory_hotplug();
 390
 391	return rc;
 392}
 393EXPORT_SYMBOL_GPL(set_online_page_callback);
 394
 395int restore_online_page_callback(online_page_callback_t callback)
 396{
 397	int rc = -EINVAL;
 398
 399	lock_memory_hotplug();
 400
 401	if (online_page_callback == callback) {
 402		online_page_callback = generic_online_page;
 403		rc = 0;
 404	}
 405
 406	unlock_memory_hotplug();
 407
 408	return rc;
 409}
 410EXPORT_SYMBOL_GPL(restore_online_page_callback);
 411
 412void __online_page_set_limits(struct page *page)
 413{
 414	unsigned long pfn = page_to_pfn(page);
 415
 416	if (pfn >= num_physpages)
 417		num_physpages = pfn + 1;
 418}
 419EXPORT_SYMBOL_GPL(__online_page_set_limits);
 420
 421void __online_page_increment_counters(struct page *page)
 422{
 423	totalram_pages++;
 424
 425#ifdef CONFIG_HIGHMEM
 426	if (PageHighMem(page))
 427		totalhigh_pages++;
 428#endif
 429}
 430EXPORT_SYMBOL_GPL(__online_page_increment_counters);
 431
 432void __online_page_free(struct page *page)
 433{
 434	ClearPageReserved(page);
 435	init_page_count(page);
 436	__free_page(page);
 437}
 438EXPORT_SYMBOL_GPL(__online_page_free);
 439
 440static void generic_online_page(struct page *page)
 441{
 442	__online_page_set_limits(page);
 443	__online_page_increment_counters(page);
 444	__online_page_free(page);
 445}
 446
 447static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
 448			void *arg)
 449{
 450	unsigned long i;
 451	unsigned long onlined_pages = *(unsigned long *)arg;
 452	struct page *page;
 453	if (PageReserved(pfn_to_page(start_pfn)))
 454		for (i = 0; i < nr_pages; i++) {
 455			page = pfn_to_page(start_pfn + i);
 456			(*online_page_callback)(page);
 457			onlined_pages++;
 458		}
 459	*(unsigned long *)arg = onlined_pages;
 460	return 0;
 461}
 462
 463
 464int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
 465{
 466	unsigned long onlined_pages = 0;
 467	struct zone *zone;
 468	int need_zonelists_rebuild = 0;
 469	int nid;
 470	int ret;
 471	struct memory_notify arg;
 472
 473	lock_memory_hotplug();
 474	arg.start_pfn = pfn;
 475	arg.nr_pages = nr_pages;
 476	arg.status_change_nid = -1;
 477
 478	nid = page_to_nid(pfn_to_page(pfn));
 479	if (node_present_pages(nid) == 0)
 480		arg.status_change_nid = nid;
 481
 482	ret = memory_notify(MEM_GOING_ONLINE, &arg);
 483	ret = notifier_to_errno(ret);
 484	if (ret) {
 485		memory_notify(MEM_CANCEL_ONLINE, &arg);
 486		unlock_memory_hotplug();
 487		return ret;
 488	}
 489	/*
 490	 * This doesn't need a lock to do pfn_to_page().
 491	 * The section can't be removed here because of the
 492	 * memory_block->state_mutex.
 493	 */
 494	zone = page_zone(pfn_to_page(pfn));
 495	/*
 496	 * If this zone is not populated, then it is not in zonelist.
 497	 * This means the page allocator ignores this zone.
 498	 * So, zonelist must be updated after online.
 499	 */
 500	mutex_lock(&zonelists_mutex);
 501	if (!populated_zone(zone))
 502		need_zonelists_rebuild = 1;
 503
 504	ret = walk_system_ram_range(pfn, nr_pages, &onlined_pages,
 505		online_pages_range);
 506	if (ret) {
 507		mutex_unlock(&zonelists_mutex);
 508		printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n",
 509		       (unsigned long long) pfn << PAGE_SHIFT,
 510		       (((unsigned long long) pfn + nr_pages)
 511			    << PAGE_SHIFT) - 1);
 512		memory_notify(MEM_CANCEL_ONLINE, &arg);
 513		unlock_memory_hotplug();
 514		return ret;
 515	}
 516
 517	zone->present_pages += onlined_pages;
 518	zone->zone_pgdat->node_present_pages += onlined_pages;
 519	if (need_zonelists_rebuild)
 520		build_all_zonelists(zone);
 521	else
 522		zone_pcp_update(zone);
 523
 524	mutex_unlock(&zonelists_mutex);
 525
 526	init_per_zone_wmark_min();
 527
 528	if (onlined_pages) {
 529		kswapd_run(zone_to_nid(zone));
 530		node_set_state(zone_to_nid(zone), N_HIGH_MEMORY);
 531	}
 532
 533	vm_total_pages = nr_free_pagecache_pages();
 534
 535	writeback_set_ratelimit();
 536
 537	if (onlined_pages)
 538		memory_notify(MEM_ONLINE, &arg);
 539	unlock_memory_hotplug();
 540
 541	return 0;
 542}
 543#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
 544
 545/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
 546static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
 547{
 548	struct pglist_data *pgdat;
 549	unsigned long zones_size[MAX_NR_ZONES] = {0};
 550	unsigned long zholes_size[MAX_NR_ZONES] = {0};
 551	unsigned long start_pfn = start >> PAGE_SHIFT;
 552
 553	pgdat = arch_alloc_nodedata(nid);
 554	if (!pgdat)
 555		return NULL;
 556
 557	arch_refresh_nodedata(nid, pgdat);
 558
 559	/* we can use NODE_DATA(nid) from here */
 560
 561	/* init node's zones as empty zones, we don't have any present pages.*/
 562	free_area_init_node(nid, zones_size, start_pfn, zholes_size);
 563
 564	/*
 565	 * The node we allocated has no zone fallback lists. For avoiding
 566	 * to access not-initialized zonelist, build here.
 567	 */
 568	mutex_lock(&zonelists_mutex);
 569	build_all_zonelists(NULL);
 570	mutex_unlock(&zonelists_mutex);
 571
 572	return pgdat;
 573}
 574
 575static void rollback_node_hotadd(int nid, pg_data_t *pgdat)
 576{
 577	arch_refresh_nodedata(nid, NULL);
 578	arch_free_nodedata(pgdat);
 579	return;
 580}
 581
 582
 583/*
 584 * called by cpu_up() to online a node without onlined memory.
 585 */
 586int mem_online_node(int nid)
 587{
 588	pg_data_t	*pgdat;
 589	int	ret;
 590
 591	lock_memory_hotplug();
 592	pgdat = hotadd_new_pgdat(nid, 0);
 593	if (!pgdat) {
 594		ret = -ENOMEM;
 595		goto out;
 596	}
 597	node_set_online(nid);
 598	ret = register_one_node(nid);
 599	BUG_ON(ret);
 600
 601out:
 602	unlock_memory_hotplug();
 603	return ret;
 604}
 605
 606/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
 607int __ref add_memory(int nid, u64 start, u64 size)
 608{
 609	pg_data_t *pgdat = NULL;
 610	int new_pgdat = 0;
 611	struct resource *res;
 612	int ret;
 613
 614	lock_memory_hotplug();
 615
 616	res = register_memory_resource(start, size);
 617	ret = -EEXIST;
 618	if (!res)
 619		goto out;
 620
 621	if (!node_online(nid)) {
 622		pgdat = hotadd_new_pgdat(nid, start);
 623		ret = -ENOMEM;
 624		if (!pgdat)
 625			goto error;
 626		new_pgdat = 1;
 627	}
 628
 629	/* call arch's memory hotadd */
 630	ret = arch_add_memory(nid, start, size);
 631
 632	if (ret < 0)
 633		goto error;
 634
 635	/* we online node here. we can't roll back from here. */
 636	node_set_online(nid);
 637
 638	if (new_pgdat) {
 639		ret = register_one_node(nid);
 640		/*
 641		 * If sysfs file of new node can't create, cpu on the node
 642		 * can't be hot-added. There is no rollback way now.
 643		 * So, check by BUG_ON() to catch it reluctantly..
 644		 */
 645		BUG_ON(ret);
 646	}
 647
 648	/* create new memmap entry */
 649	firmware_map_add_hotplug(start, start + size, "System RAM");
 650
 651	goto out;
 652
 653error:
 654	/* rollback pgdat allocation and others */
 655	if (new_pgdat)
 656		rollback_node_hotadd(nid, pgdat);
 657	if (res)
 658		release_memory_resource(res);
 659
 660out:
 661	unlock_memory_hotplug();
 662	return ret;
 663}
 664EXPORT_SYMBOL_GPL(add_memory);
 665
 666#ifdef CONFIG_MEMORY_HOTREMOVE
 667/*
 668 * A free page on the buddy free lists (not the per-cpu lists) has PageBuddy
 669 * set and the size of the free page is given by page_order(). Using this,
 670 * the function determines if the pageblock contains only free pages.
 671 * Due to buddy contraints, a free page at least the size of a pageblock will
 672 * be located at the start of the pageblock
 673 */
 674static inline int pageblock_free(struct page *page)
 675{
 676	return PageBuddy(page) && page_order(page) >= pageblock_order;
 677}
 678
 679/* Return the start of the next active pageblock after a given page */
 680static struct page *next_active_pageblock(struct page *page)
 681{
 682	/* Ensure the starting page is pageblock-aligned */
 683	BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
 684
 685	/* If the entire pageblock is free, move to the end of free page */
 686	if (pageblock_free(page)) {
 687		int order;
 688		/* be careful. we don't have locks, page_order can be changed.*/
 689		order = page_order(page);
 690		if ((order < MAX_ORDER) && (order >= pageblock_order))
 691			return page + (1 << order);
 692	}
 693
 694	return page + pageblock_nr_pages;
 695}
 696
 697/* Checks if this range of memory is likely to be hot-removable. */
 698int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
 699{
 700	struct page *page = pfn_to_page(start_pfn);
 701	struct page *end_page = page + nr_pages;
 702
 703	/* Check the starting page of each pageblock within the range */
 704	for (; page < end_page; page = next_active_pageblock(page)) {
 705		if (!is_pageblock_removable_nolock(page))
 706			return 0;
 707		cond_resched();
 708	}
 709
 710	/* All pageblocks in the memory block are likely to be hot-removable */
 711	return 1;
 712}
 713
 714/*
 715 * Confirm all pages in a range [start, end) is belongs to the same zone.
 716 */
 717static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn)
 718{
 719	unsigned long pfn;
 720	struct zone *zone = NULL;
 721	struct page *page;
 722	int i;
 723	for (pfn = start_pfn;
 724	     pfn < end_pfn;
 725	     pfn += MAX_ORDER_NR_PAGES) {
 726		i = 0;
 727		/* This is just a CONFIG_HOLES_IN_ZONE check.*/
 728		while ((i < MAX_ORDER_NR_PAGES) && !pfn_valid_within(pfn + i))
 729			i++;
 730		if (i == MAX_ORDER_NR_PAGES)
 731			continue;
 732		page = pfn_to_page(pfn + i);
 733		if (zone && page_zone(page) != zone)
 734			return 0;
 735		zone = page_zone(page);
 736	}
 737	return 1;
 738}
 739
 740/*
 741 * Scanning pfn is much easier than scanning lru list.
 742 * Scan pfn from start to end and Find LRU page.
 743 */
 744static unsigned long scan_lru_pages(unsigned long start, unsigned long end)
 745{
 746	unsigned long pfn;
 747	struct page *page;
 748	for (pfn = start; pfn < end; pfn++) {
 749		if (pfn_valid(pfn)) {
 750			page = pfn_to_page(pfn);
 751			if (PageLRU(page))
 752				return pfn;
 753		}
 754	}
 755	return 0;
 756}
 757
 758static struct page *
 759hotremove_migrate_alloc(struct page *page, unsigned long private, int **x)
 760{
 761	/* This should be improooooved!! */
 762	return alloc_page(GFP_HIGHUSER_MOVABLE);
 763}
 764
 765#define NR_OFFLINE_AT_ONCE_PAGES	(256)
 766static int
 767do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
 768{
 769	unsigned long pfn;
 770	struct page *page;
 771	int move_pages = NR_OFFLINE_AT_ONCE_PAGES;
 772	int not_managed = 0;
 773	int ret = 0;
 774	LIST_HEAD(source);
 775
 776	for (pfn = start_pfn; pfn < end_pfn && move_pages > 0; pfn++) {
 777		if (!pfn_valid(pfn))
 778			continue;
 779		page = pfn_to_page(pfn);
 780		if (!get_page_unless_zero(page))
 781			continue;
 782		/*
 783		 * We can skip free pages. And we can only deal with pages on
 784		 * LRU.
 785		 */
 786		ret = isolate_lru_page(page);
 787		if (!ret) { /* Success */
 788			put_page(page);
 789			list_add_tail(&page->lru, &source);
 790			move_pages--;
 791			inc_zone_page_state(page, NR_ISOLATED_ANON +
 792					    page_is_file_cache(page));
 793
 794		} else {
 795#ifdef CONFIG_DEBUG_VM
 796			printk(KERN_ALERT "removing pfn %lx from LRU failed\n",
 797			       pfn);
 798			dump_page(page);
 799#endif
 800			put_page(page);
 801			/* Because we don't have big zone->lock. we should
 802			   check this again here. */
 803			if (page_count(page)) {
 804				not_managed++;
 805				ret = -EBUSY;
 806				break;
 807			}
 808		}
 809	}
 810	if (!list_empty(&source)) {
 811		if (not_managed) {
 812			putback_lru_pages(&source);
 813			goto out;
 814		}
 815		/* this function returns # of failed pages */
 816		ret = migrate_pages(&source, hotremove_migrate_alloc, 0,
 817							true, MIGRATE_SYNC);
 818		if (ret)
 819			putback_lru_pages(&source);
 820	}
 821out:
 822	return ret;
 823}
 824
 825/*
 826 * remove from free_area[] and mark all as Reserved.
 827 */
 828static int
 829offline_isolated_pages_cb(unsigned long start, unsigned long nr_pages,
 830			void *data)
 831{
 832	__offline_isolated_pages(start, start + nr_pages);
 833	return 0;
 834}
 835
 836static void
 837offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
 838{
 839	walk_system_ram_range(start_pfn, end_pfn - start_pfn, NULL,
 840				offline_isolated_pages_cb);
 841}
 842
 843/*
 844 * Check all pages in range, recoreded as memory resource, are isolated.
 845 */
 846static int
 847check_pages_isolated_cb(unsigned long start_pfn, unsigned long nr_pages,
 848			void *data)
 849{
 850	int ret;
 851	long offlined = *(long *)data;
 852	ret = test_pages_isolated(start_pfn, start_pfn + nr_pages);
 853	offlined = nr_pages;
 854	if (!ret)
 855		*(long *)data += offlined;
 856	return ret;
 857}
 858
 859static long
 860check_pages_isolated(unsigned long start_pfn, unsigned long end_pfn)
 861{
 862	long offlined = 0;
 863	int ret;
 864
 865	ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn, &offlined,
 866			check_pages_isolated_cb);
 867	if (ret < 0)
 868		offlined = (long)ret;
 869	return offlined;
 870}
 871
 872static int __ref offline_pages(unsigned long start_pfn,
 873		  unsigned long end_pfn, unsigned long timeout)
 874{
 875	unsigned long pfn, nr_pages, expire;
 876	long offlined_pages;
 877	int ret, drain, retry_max, node;
 878	struct zone *zone;
 879	struct memory_notify arg;
 880
 881	BUG_ON(start_pfn >= end_pfn);
 882	/* at least, alignment against pageblock is necessary */
 883	if (!IS_ALIGNED(start_pfn, pageblock_nr_pages))
 884		return -EINVAL;
 885	if (!IS_ALIGNED(end_pfn, pageblock_nr_pages))
 886		return -EINVAL;
 887	/* This makes hotplug much easier...and readable.
 888	   we assume this for now. .*/
 889	if (!test_pages_in_a_zone(start_pfn, end_pfn))
 890		return -EINVAL;
 891
 892	lock_memory_hotplug();
 893
 894	zone = page_zone(pfn_to_page(start_pfn));
 895	node = zone_to_nid(zone);
 896	nr_pages = end_pfn - start_pfn;
 897
 898	/* set above range as isolated */
 899	ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
 900	if (ret)
 901		goto out;
 902
 903	arg.start_pfn = start_pfn;
 904	arg.nr_pages = nr_pages;
 905	arg.status_change_nid = -1;
 906	if (nr_pages >= node_present_pages(node))
 907		arg.status_change_nid = node;
 908
 909	ret = memory_notify(MEM_GOING_OFFLINE, &arg);
 910	ret = notifier_to_errno(ret);
 911	if (ret)
 912		goto failed_removal;
 913
 914	pfn = start_pfn;
 915	expire = jiffies + timeout;
 916	drain = 0;
 917	retry_max = 5;
 918repeat:
 919	/* start memory hot removal */
 920	ret = -EAGAIN;
 921	if (time_after(jiffies, expire))
 922		goto failed_removal;
 923	ret = -EINTR;
 924	if (signal_pending(current))
 925		goto failed_removal;
 926	ret = 0;
 927	if (drain) {
 928		lru_add_drain_all();
 929		cond_resched();
 930		drain_all_pages();
 931	}
 932
 933	pfn = scan_lru_pages(start_pfn, end_pfn);
 934	if (pfn) { /* We have page on LRU */
 935		ret = do_migrate_range(pfn, end_pfn);
 936		if (!ret) {
 937			drain = 1;
 938			goto repeat;
 939		} else {
 940			if (ret < 0)
 941				if (--retry_max == 0)
 942					goto failed_removal;
 943			yield();
 944			drain = 1;
 945			goto repeat;
 946		}
 947	}
 948	/* drain all zone's lru pagevec, this is asyncronous... */
 949	lru_add_drain_all();
 950	yield();
 951	/* drain pcp pages , this is synchrouns. */
 952	drain_all_pages();
 953	/* check again */
 954	offlined_pages = check_pages_isolated(start_pfn, end_pfn);
 955	if (offlined_pages < 0) {
 956		ret = -EBUSY;
 957		goto failed_removal;
 958	}
 959	printk(KERN_INFO "Offlined Pages %ld\n", offlined_pages);
 960	/* Ok, all of our target is islaoted.
 961	   We cannot do rollback at this point. */
 962	offline_isolated_pages(start_pfn, end_pfn);
 963	/* reset pagetype flags and makes migrate type to be MOVABLE */
 964	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
 965	/* removal success */
 966	zone->present_pages -= offlined_pages;
 967	zone->zone_pgdat->node_present_pages -= offlined_pages;
 968	totalram_pages -= offlined_pages;
 969
 970	init_per_zone_wmark_min();
 971
 972	if (!node_present_pages(node)) {
 973		node_clear_state(node, N_HIGH_MEMORY);
 974		kswapd_stop(node);
 975	}
 976
 977	vm_total_pages = nr_free_pagecache_pages();
 978	writeback_set_ratelimit();
 979
 980	memory_notify(MEM_OFFLINE, &arg);
 981	unlock_memory_hotplug();
 982	return 0;
 983
 984failed_removal:
 985	printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n",
 986	       (unsigned long long) start_pfn << PAGE_SHIFT,
 987	       ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
 988	memory_notify(MEM_CANCEL_OFFLINE, &arg);
 989	/* pushback to free area */
 990	undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
 991
 992out:
 993	unlock_memory_hotplug();
 994	return ret;
 995}
 996
 997int remove_memory(u64 start, u64 size)
 998{
 999	unsigned long start_pfn, end_pfn;
1000
1001	start_pfn = PFN_DOWN(start);
1002	end_pfn = start_pfn + PFN_DOWN(size);
1003	return offline_pages(start_pfn, end_pfn, 120 * HZ);
1004}
1005#else
1006int remove_memory(u64 start, u64 size)
1007{
1008	return -EINVAL;
1009}
1010#endif /* CONFIG_MEMORY_HOTREMOVE */
1011EXPORT_SYMBOL_GPL(remove_memory);