Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Memory subsystem support
4 *
5 * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
6 * Dave Hansen <haveblue@us.ibm.com>
7 *
8 * This file provides the necessary infrastructure to represent
9 * a SPARSEMEM-memory-model system's physical memory in /sysfs.
10 * All arch-independent code that assumes MEMORY_HOTPLUG requires
11 * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/topology.h>
17#include <linux/capability.h>
18#include <linux/device.h>
19#include <linux/memory.h>
20#include <linux/memory_hotplug.h>
21#include <linux/mm.h>
22#include <linux/stat.h>
23#include <linux/slab.h>
24#include <linux/xarray.h>
25
26#include <linux/atomic.h>
27#include <linux/uaccess.h>
28
29#define MEMORY_CLASS_NAME "memory"
30
31static const char *const online_type_to_str[] = {
32 [MMOP_OFFLINE] = "offline",
33 [MMOP_ONLINE] = "online",
34 [MMOP_ONLINE_KERNEL] = "online_kernel",
35 [MMOP_ONLINE_MOVABLE] = "online_movable",
36};
37
38int mhp_online_type_from_str(const char *str)
39{
40 int i;
41
42 for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) {
43 if (sysfs_streq(str, online_type_to_str[i]))
44 return i;
45 }
46 return -EINVAL;
47}
48
49#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
50
51static int sections_per_block;
52
53static inline unsigned long memory_block_id(unsigned long section_nr)
54{
55 return section_nr / sections_per_block;
56}
57
58static inline unsigned long pfn_to_block_id(unsigned long pfn)
59{
60 return memory_block_id(pfn_to_section_nr(pfn));
61}
62
63static inline unsigned long phys_to_block_id(unsigned long phys)
64{
65 return pfn_to_block_id(PFN_DOWN(phys));
66}
67
68static int memory_subsys_online(struct device *dev);
69static int memory_subsys_offline(struct device *dev);
70
71static struct bus_type memory_subsys = {
72 .name = MEMORY_CLASS_NAME,
73 .dev_name = MEMORY_CLASS_NAME,
74 .online = memory_subsys_online,
75 .offline = memory_subsys_offline,
76};
77
78/*
79 * Memory blocks are cached in a local radix tree to avoid
80 * a costly linear search for the corresponding device on
81 * the subsystem bus.
82 */
83static DEFINE_XARRAY(memory_blocks);
84
85static BLOCKING_NOTIFIER_HEAD(memory_chain);
86
87int register_memory_notifier(struct notifier_block *nb)
88{
89 return blocking_notifier_chain_register(&memory_chain, nb);
90}
91EXPORT_SYMBOL(register_memory_notifier);
92
93void unregister_memory_notifier(struct notifier_block *nb)
94{
95 blocking_notifier_chain_unregister(&memory_chain, nb);
96}
97EXPORT_SYMBOL(unregister_memory_notifier);
98
99static void memory_block_release(struct device *dev)
100{
101 struct memory_block *mem = to_memory_block(dev);
102
103 kfree(mem);
104}
105
106unsigned long __weak memory_block_size_bytes(void)
107{
108 return MIN_MEMORY_BLOCK_SIZE;
109}
110EXPORT_SYMBOL_GPL(memory_block_size_bytes);
111
112/*
113 * Show the first physical section index (number) of this memory block.
114 */
115static ssize_t phys_index_show(struct device *dev,
116 struct device_attribute *attr, char *buf)
117{
118 struct memory_block *mem = to_memory_block(dev);
119 unsigned long phys_index;
120
121 phys_index = mem->start_section_nr / sections_per_block;
122
123 return sysfs_emit(buf, "%08lx\n", phys_index);
124}
125
126/*
127 * Legacy interface that we cannot remove. Always indicate "removable"
128 * with CONFIG_MEMORY_HOTREMOVE - bad heuristic.
129 */
130static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
131 char *buf)
132{
133 return sysfs_emit(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE));
134}
135
136/*
137 * online, offline, going offline, etc.
138 */
139static ssize_t state_show(struct device *dev, struct device_attribute *attr,
140 char *buf)
141{
142 struct memory_block *mem = to_memory_block(dev);
143 const char *output;
144
145 /*
146 * We can probably put these states in a nice little array
147 * so that they're not open-coded
148 */
149 switch (mem->state) {
150 case MEM_ONLINE:
151 output = "online";
152 break;
153 case MEM_OFFLINE:
154 output = "offline";
155 break;
156 case MEM_GOING_OFFLINE:
157 output = "going-offline";
158 break;
159 default:
160 WARN_ON(1);
161 return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state);
162 }
163
164 return sysfs_emit(buf, "%s\n", output);
165}
166
167int memory_notify(unsigned long val, void *v)
168{
169 return blocking_notifier_call_chain(&memory_chain, val, v);
170}
171
172static int memory_block_online(struct memory_block *mem)
173{
174 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
175 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
176 unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
177 struct zone *zone;
178 int ret;
179
180 zone = zone_for_pfn_range(mem->online_type, mem->nid, start_pfn, nr_pages);
181
182 /*
183 * Although vmemmap pages have a different lifecycle than the pages
184 * they describe (they remain until the memory is unplugged), doing
185 * their initialization and accounting at memory onlining/offlining
186 * stage helps to keep accounting easier to follow - e.g vmemmaps
187 * belong to the same zone as the memory they backed.
188 */
189 if (nr_vmemmap_pages) {
190 ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, zone);
191 if (ret)
192 return ret;
193 }
194
195 ret = online_pages(start_pfn + nr_vmemmap_pages,
196 nr_pages - nr_vmemmap_pages, zone);
197 if (ret) {
198 if (nr_vmemmap_pages)
199 mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
200 return ret;
201 }
202
203 /*
204 * Account once onlining succeeded. If the zone was unpopulated, it is
205 * now already properly populated.
206 */
207 if (nr_vmemmap_pages)
208 adjust_present_page_count(zone, nr_vmemmap_pages);
209
210 return ret;
211}
212
213static int memory_block_offline(struct memory_block *mem)
214{
215 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
216 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
217 unsigned long nr_vmemmap_pages = mem->nr_vmemmap_pages;
218 struct zone *zone;
219 int ret;
220
221 /*
222 * Unaccount before offlining, such that unpopulated zone and kthreads
223 * can properly be torn down in offline_pages().
224 */
225 if (nr_vmemmap_pages) {
226 zone = page_zone(pfn_to_page(start_pfn));
227 adjust_present_page_count(zone, -nr_vmemmap_pages);
228 }
229
230 ret = offline_pages(start_pfn + nr_vmemmap_pages,
231 nr_pages - nr_vmemmap_pages);
232 if (ret) {
233 /* offline_pages() failed. Account back. */
234 if (nr_vmemmap_pages)
235 adjust_present_page_count(zone, nr_vmemmap_pages);
236 return ret;
237 }
238
239 if (nr_vmemmap_pages)
240 mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
241
242 return ret;
243}
244
245/*
246 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
247 * OK to have direct references to sparsemem variables in here.
248 */
249static int
250memory_block_action(struct memory_block *mem, unsigned long action)
251{
252 int ret;
253
254 switch (action) {
255 case MEM_ONLINE:
256 ret = memory_block_online(mem);
257 break;
258 case MEM_OFFLINE:
259 ret = memory_block_offline(mem);
260 break;
261 default:
262 WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
263 "%ld\n", __func__, mem->start_section_nr, action, action);
264 ret = -EINVAL;
265 }
266
267 return ret;
268}
269
270static int memory_block_change_state(struct memory_block *mem,
271 unsigned long to_state, unsigned long from_state_req)
272{
273 int ret = 0;
274
275 if (mem->state != from_state_req)
276 return -EINVAL;
277
278 if (to_state == MEM_OFFLINE)
279 mem->state = MEM_GOING_OFFLINE;
280
281 ret = memory_block_action(mem, to_state);
282 mem->state = ret ? from_state_req : to_state;
283
284 return ret;
285}
286
287/* The device lock serializes operations on memory_subsys_[online|offline] */
288static int memory_subsys_online(struct device *dev)
289{
290 struct memory_block *mem = to_memory_block(dev);
291 int ret;
292
293 if (mem->state == MEM_ONLINE)
294 return 0;
295
296 /*
297 * When called via device_online() without configuring the online_type,
298 * we want to default to MMOP_ONLINE.
299 */
300 if (mem->online_type == MMOP_OFFLINE)
301 mem->online_type = MMOP_ONLINE;
302
303 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
304 mem->online_type = MMOP_OFFLINE;
305
306 return ret;
307}
308
309static int memory_subsys_offline(struct device *dev)
310{
311 struct memory_block *mem = to_memory_block(dev);
312
313 if (mem->state == MEM_OFFLINE)
314 return 0;
315
316 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
317}
318
319static ssize_t state_store(struct device *dev, struct device_attribute *attr,
320 const char *buf, size_t count)
321{
322 const int online_type = mhp_online_type_from_str(buf);
323 struct memory_block *mem = to_memory_block(dev);
324 int ret;
325
326 if (online_type < 0)
327 return -EINVAL;
328
329 ret = lock_device_hotplug_sysfs();
330 if (ret)
331 return ret;
332
333 switch (online_type) {
334 case MMOP_ONLINE_KERNEL:
335 case MMOP_ONLINE_MOVABLE:
336 case MMOP_ONLINE:
337 /* mem->online_type is protected by device_hotplug_lock */
338 mem->online_type = online_type;
339 ret = device_online(&mem->dev);
340 break;
341 case MMOP_OFFLINE:
342 ret = device_offline(&mem->dev);
343 break;
344 default:
345 ret = -EINVAL; /* should never happen */
346 }
347
348 unlock_device_hotplug();
349
350 if (ret < 0)
351 return ret;
352 if (ret)
353 return -EINVAL;
354
355 return count;
356}
357
358/*
359 * Legacy interface that we cannot remove: s390x exposes the storage increment
360 * covered by a memory block, allowing for identifying which memory blocks
361 * comprise a storage increment. Since a memory block spans complete
362 * storage increments nowadays, this interface is basically unused. Other
363 * archs never exposed != 0.
364 */
365static ssize_t phys_device_show(struct device *dev,
366 struct device_attribute *attr, char *buf)
367{
368 struct memory_block *mem = to_memory_block(dev);
369 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
370
371 return sysfs_emit(buf, "%d\n",
372 arch_get_memory_phys_device(start_pfn));
373}
374
375#ifdef CONFIG_MEMORY_HOTREMOVE
376static int print_allowed_zone(char *buf, int len, int nid,
377 unsigned long start_pfn, unsigned long nr_pages,
378 int online_type, struct zone *default_zone)
379{
380 struct zone *zone;
381
382 zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
383 if (zone == default_zone)
384 return 0;
385
386 return sysfs_emit_at(buf, len, " %s", zone->name);
387}
388
389static ssize_t valid_zones_show(struct device *dev,
390 struct device_attribute *attr, char *buf)
391{
392 struct memory_block *mem = to_memory_block(dev);
393 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
394 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
395 struct zone *default_zone;
396 int len = 0;
397 int nid;
398
399 /*
400 * Check the existing zone. Make sure that we do that only on the
401 * online nodes otherwise the page_zone is not reliable
402 */
403 if (mem->state == MEM_ONLINE) {
404 /*
405 * The block contains more than one zone can not be offlined.
406 * This can happen e.g. for ZONE_DMA and ZONE_DMA32
407 */
408 default_zone = test_pages_in_a_zone(start_pfn,
409 start_pfn + nr_pages);
410 if (!default_zone)
411 return sysfs_emit(buf, "%s\n", "none");
412 len += sysfs_emit_at(buf, len, "%s", default_zone->name);
413 goto out;
414 }
415
416 nid = mem->nid;
417 default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, start_pfn,
418 nr_pages);
419
420 len += sysfs_emit_at(buf, len, "%s", default_zone->name);
421 len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages,
422 MMOP_ONLINE_KERNEL, default_zone);
423 len += print_allowed_zone(buf, len, nid, start_pfn, nr_pages,
424 MMOP_ONLINE_MOVABLE, default_zone);
425out:
426 len += sysfs_emit_at(buf, len, "\n");
427 return len;
428}
429static DEVICE_ATTR_RO(valid_zones);
430#endif
431
432static DEVICE_ATTR_RO(phys_index);
433static DEVICE_ATTR_RW(state);
434static DEVICE_ATTR_RO(phys_device);
435static DEVICE_ATTR_RO(removable);
436
437/*
438 * Show the memory block size (shared by all memory blocks).
439 */
440static ssize_t block_size_bytes_show(struct device *dev,
441 struct device_attribute *attr, char *buf)
442{
443 return sysfs_emit(buf, "%lx\n", memory_block_size_bytes());
444}
445
446static DEVICE_ATTR_RO(block_size_bytes);
447
448/*
449 * Memory auto online policy.
450 */
451
452static ssize_t auto_online_blocks_show(struct device *dev,
453 struct device_attribute *attr, char *buf)
454{
455 return sysfs_emit(buf, "%s\n",
456 online_type_to_str[mhp_default_online_type]);
457}
458
459static ssize_t auto_online_blocks_store(struct device *dev,
460 struct device_attribute *attr,
461 const char *buf, size_t count)
462{
463 const int online_type = mhp_online_type_from_str(buf);
464
465 if (online_type < 0)
466 return -EINVAL;
467
468 mhp_default_online_type = online_type;
469 return count;
470}
471
472static DEVICE_ATTR_RW(auto_online_blocks);
473
474/*
475 * Some architectures will have custom drivers to do this, and
476 * will not need to do it from userspace. The fake hot-add code
477 * as well as ppc64 will do all of their discovery in userspace
478 * and will require this interface.
479 */
480#ifdef CONFIG_ARCH_MEMORY_PROBE
481static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
482 const char *buf, size_t count)
483{
484 u64 phys_addr;
485 int nid, ret;
486 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
487
488 ret = kstrtoull(buf, 0, &phys_addr);
489 if (ret)
490 return ret;
491
492 if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
493 return -EINVAL;
494
495 ret = lock_device_hotplug_sysfs();
496 if (ret)
497 return ret;
498
499 nid = memory_add_physaddr_to_nid(phys_addr);
500 ret = __add_memory(nid, phys_addr,
501 MIN_MEMORY_BLOCK_SIZE * sections_per_block,
502 MHP_NONE);
503
504 if (ret)
505 goto out;
506
507 ret = count;
508out:
509 unlock_device_hotplug();
510 return ret;
511}
512
513static DEVICE_ATTR_WO(probe);
514#endif
515
516#ifdef CONFIG_MEMORY_FAILURE
517/*
518 * Support for offlining pages of memory
519 */
520
521/* Soft offline a page */
522static ssize_t soft_offline_page_store(struct device *dev,
523 struct device_attribute *attr,
524 const char *buf, size_t count)
525{
526 int ret;
527 u64 pfn;
528 if (!capable(CAP_SYS_ADMIN))
529 return -EPERM;
530 if (kstrtoull(buf, 0, &pfn) < 0)
531 return -EINVAL;
532 pfn >>= PAGE_SHIFT;
533 ret = soft_offline_page(pfn, 0);
534 return ret == 0 ? count : ret;
535}
536
537/* Forcibly offline a page, including killing processes. */
538static ssize_t hard_offline_page_store(struct device *dev,
539 struct device_attribute *attr,
540 const char *buf, size_t count)
541{
542 int ret;
543 u64 pfn;
544 if (!capable(CAP_SYS_ADMIN))
545 return -EPERM;
546 if (kstrtoull(buf, 0, &pfn) < 0)
547 return -EINVAL;
548 pfn >>= PAGE_SHIFT;
549 ret = memory_failure(pfn, 0);
550 return ret ? ret : count;
551}
552
553static DEVICE_ATTR_WO(soft_offline_page);
554static DEVICE_ATTR_WO(hard_offline_page);
555#endif
556
557/* See phys_device_show(). */
558int __weak arch_get_memory_phys_device(unsigned long start_pfn)
559{
560 return 0;
561}
562
563/*
564 * A reference for the returned memory block device is acquired.
565 *
566 * Called under device_hotplug_lock.
567 */
568static struct memory_block *find_memory_block_by_id(unsigned long block_id)
569{
570 struct memory_block *mem;
571
572 mem = xa_load(&memory_blocks, block_id);
573 if (mem)
574 get_device(&mem->dev);
575 return mem;
576}
577
578/*
579 * Called under device_hotplug_lock.
580 */
581struct memory_block *find_memory_block(struct mem_section *section)
582{
583 unsigned long block_id = memory_block_id(__section_nr(section));
584
585 return find_memory_block_by_id(block_id);
586}
587
588static struct attribute *memory_memblk_attrs[] = {
589 &dev_attr_phys_index.attr,
590 &dev_attr_state.attr,
591 &dev_attr_phys_device.attr,
592 &dev_attr_removable.attr,
593#ifdef CONFIG_MEMORY_HOTREMOVE
594 &dev_attr_valid_zones.attr,
595#endif
596 NULL
597};
598
599static const struct attribute_group memory_memblk_attr_group = {
600 .attrs = memory_memblk_attrs,
601};
602
603static const struct attribute_group *memory_memblk_attr_groups[] = {
604 &memory_memblk_attr_group,
605 NULL,
606};
607
608/*
609 * register_memory - Setup a sysfs device for a memory block
610 */
611static
612int register_memory(struct memory_block *memory)
613{
614 int ret;
615
616 memory->dev.bus = &memory_subsys;
617 memory->dev.id = memory->start_section_nr / sections_per_block;
618 memory->dev.release = memory_block_release;
619 memory->dev.groups = memory_memblk_attr_groups;
620 memory->dev.offline = memory->state == MEM_OFFLINE;
621
622 ret = device_register(&memory->dev);
623 if (ret) {
624 put_device(&memory->dev);
625 return ret;
626 }
627 ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory,
628 GFP_KERNEL));
629 if (ret) {
630 put_device(&memory->dev);
631 device_unregister(&memory->dev);
632 }
633 return ret;
634}
635
636static int init_memory_block(unsigned long block_id, unsigned long state,
637 unsigned long nr_vmemmap_pages)
638{
639 struct memory_block *mem;
640 int ret = 0;
641
642 mem = find_memory_block_by_id(block_id);
643 if (mem) {
644 put_device(&mem->dev);
645 return -EEXIST;
646 }
647 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
648 if (!mem)
649 return -ENOMEM;
650
651 mem->start_section_nr = block_id * sections_per_block;
652 mem->state = state;
653 mem->nid = NUMA_NO_NODE;
654 mem->nr_vmemmap_pages = nr_vmemmap_pages;
655
656 ret = register_memory(mem);
657
658 return ret;
659}
660
661static int add_memory_block(unsigned long base_section_nr)
662{
663 int section_count = 0;
664 unsigned long nr;
665
666 for (nr = base_section_nr; nr < base_section_nr + sections_per_block;
667 nr++)
668 if (present_section_nr(nr))
669 section_count++;
670
671 if (section_count == 0)
672 return 0;
673 return init_memory_block(memory_block_id(base_section_nr),
674 MEM_ONLINE, 0);
675}
676
677static void unregister_memory(struct memory_block *memory)
678{
679 if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
680 return;
681
682 WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL);
683
684 /* drop the ref. we got via find_memory_block() */
685 put_device(&memory->dev);
686 device_unregister(&memory->dev);
687}
688
689/*
690 * Create memory block devices for the given memory area. Start and size
691 * have to be aligned to memory block granularity. Memory block devices
692 * will be initialized as offline.
693 *
694 * Called under device_hotplug_lock.
695 */
696int create_memory_block_devices(unsigned long start, unsigned long size,
697 unsigned long vmemmap_pages)
698{
699 const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
700 unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
701 struct memory_block *mem;
702 unsigned long block_id;
703 int ret = 0;
704
705 if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
706 !IS_ALIGNED(size, memory_block_size_bytes())))
707 return -EINVAL;
708
709 for (block_id = start_block_id; block_id != end_block_id; block_id++) {
710 ret = init_memory_block(block_id, MEM_OFFLINE, vmemmap_pages);
711 if (ret)
712 break;
713 }
714 if (ret) {
715 end_block_id = block_id;
716 for (block_id = start_block_id; block_id != end_block_id;
717 block_id++) {
718 mem = find_memory_block_by_id(block_id);
719 if (WARN_ON_ONCE(!mem))
720 continue;
721 unregister_memory(mem);
722 }
723 }
724 return ret;
725}
726
727/*
728 * Remove memory block devices for the given memory area. Start and size
729 * have to be aligned to memory block granularity. Memory block devices
730 * have to be offline.
731 *
732 * Called under device_hotplug_lock.
733 */
734void remove_memory_block_devices(unsigned long start, unsigned long size)
735{
736 const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
737 const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
738 struct memory_block *mem;
739 unsigned long block_id;
740
741 if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
742 !IS_ALIGNED(size, memory_block_size_bytes())))
743 return;
744
745 for (block_id = start_block_id; block_id != end_block_id; block_id++) {
746 mem = find_memory_block_by_id(block_id);
747 if (WARN_ON_ONCE(!mem))
748 continue;
749 unregister_memory_block_under_nodes(mem);
750 unregister_memory(mem);
751 }
752}
753
754/* return true if the memory block is offlined, otherwise, return false */
755bool is_memblock_offlined(struct memory_block *mem)
756{
757 return mem->state == MEM_OFFLINE;
758}
759
760static struct attribute *memory_root_attrs[] = {
761#ifdef CONFIG_ARCH_MEMORY_PROBE
762 &dev_attr_probe.attr,
763#endif
764
765#ifdef CONFIG_MEMORY_FAILURE
766 &dev_attr_soft_offline_page.attr,
767 &dev_attr_hard_offline_page.attr,
768#endif
769
770 &dev_attr_block_size_bytes.attr,
771 &dev_attr_auto_online_blocks.attr,
772 NULL
773};
774
775static const struct attribute_group memory_root_attr_group = {
776 .attrs = memory_root_attrs,
777};
778
779static const struct attribute_group *memory_root_attr_groups[] = {
780 &memory_root_attr_group,
781 NULL,
782};
783
784/*
785 * Initialize the sysfs support for memory devices. At the time this function
786 * is called, we cannot have concurrent creation/deletion of memory block
787 * devices, the device_hotplug_lock is not needed.
788 */
789void __init memory_dev_init(void)
790{
791 int ret;
792 unsigned long block_sz, nr;
793
794 /* Validate the configured memory block size */
795 block_sz = memory_block_size_bytes();
796 if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE)
797 panic("Memory block size not suitable: 0x%lx\n", block_sz);
798 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
799
800 ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
801 if (ret)
802 panic("%s() failed to register subsystem: %d\n", __func__, ret);
803
804 /*
805 * Create entries for memory sections that were found
806 * during boot and have been initialized
807 */
808 for (nr = 0; nr <= __highest_present_section_nr;
809 nr += sections_per_block) {
810 ret = add_memory_block(nr);
811 if (ret)
812 panic("%s() failed to add memory block: %d\n", __func__,
813 ret);
814 }
815}
816
817/**
818 * walk_memory_blocks - walk through all present memory blocks overlapped
819 * by the range [start, start + size)
820 *
821 * @start: start address of the memory range
822 * @size: size of the memory range
823 * @arg: argument passed to func
824 * @func: callback for each memory section walked
825 *
826 * This function walks through all present memory blocks overlapped by the
827 * range [start, start + size), calling func on each memory block.
828 *
829 * In case func() returns an error, walking is aborted and the error is
830 * returned.
831 *
832 * Called under device_hotplug_lock.
833 */
834int walk_memory_blocks(unsigned long start, unsigned long size,
835 void *arg, walk_memory_blocks_func_t func)
836{
837 const unsigned long start_block_id = phys_to_block_id(start);
838 const unsigned long end_block_id = phys_to_block_id(start + size - 1);
839 struct memory_block *mem;
840 unsigned long block_id;
841 int ret = 0;
842
843 if (!size)
844 return 0;
845
846 for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
847 mem = find_memory_block_by_id(block_id);
848 if (!mem)
849 continue;
850
851 ret = func(mem, arg);
852 put_device(&mem->dev);
853 if (ret)
854 break;
855 }
856 return ret;
857}
858
859struct for_each_memory_block_cb_data {
860 walk_memory_blocks_func_t func;
861 void *arg;
862};
863
864static int for_each_memory_block_cb(struct device *dev, void *data)
865{
866 struct memory_block *mem = to_memory_block(dev);
867 struct for_each_memory_block_cb_data *cb_data = data;
868
869 return cb_data->func(mem, cb_data->arg);
870}
871
872/**
873 * for_each_memory_block - walk through all present memory blocks
874 *
875 * @arg: argument passed to func
876 * @func: callback for each memory block walked
877 *
878 * This function walks through all present memory blocks, calling func on
879 * each memory block.
880 *
881 * In case func() returns an error, walking is aborted and the error is
882 * returned.
883 */
884int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
885{
886 struct for_each_memory_block_cb_data cb_data = {
887 .func = func,
888 .arg = arg,
889 };
890
891 return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
892 for_each_memory_block_cb);
893}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Memory subsystem support
4 *
5 * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
6 * Dave Hansen <haveblue@us.ibm.com>
7 *
8 * This file provides the necessary infrastructure to represent
9 * a SPARSEMEM-memory-model system's physical memory in /sysfs.
10 * All arch-independent code that assumes MEMORY_HOTPLUG requires
11 * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/topology.h>
17#include <linux/capability.h>
18#include <linux/device.h>
19#include <linux/memory.h>
20#include <linux/memory_hotplug.h>
21#include <linux/mm.h>
22#include <linux/mutex.h>
23#include <linux/stat.h>
24#include <linux/slab.h>
25
26#include <linux/atomic.h>
27#include <linux/uaccess.h>
28
29static DEFINE_MUTEX(mem_sysfs_mutex);
30
31#define MEMORY_CLASS_NAME "memory"
32
33#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
34
35static int sections_per_block;
36
37static inline int base_memory_block_id(int section_nr)
38{
39 return section_nr / sections_per_block;
40}
41
42static int memory_subsys_online(struct device *dev);
43static int memory_subsys_offline(struct device *dev);
44
45static struct bus_type memory_subsys = {
46 .name = MEMORY_CLASS_NAME,
47 .dev_name = MEMORY_CLASS_NAME,
48 .online = memory_subsys_online,
49 .offline = memory_subsys_offline,
50};
51
52static BLOCKING_NOTIFIER_HEAD(memory_chain);
53
54int register_memory_notifier(struct notifier_block *nb)
55{
56 return blocking_notifier_chain_register(&memory_chain, nb);
57}
58EXPORT_SYMBOL(register_memory_notifier);
59
60void unregister_memory_notifier(struct notifier_block *nb)
61{
62 blocking_notifier_chain_unregister(&memory_chain, nb);
63}
64EXPORT_SYMBOL(unregister_memory_notifier);
65
66static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain);
67
68int register_memory_isolate_notifier(struct notifier_block *nb)
69{
70 return atomic_notifier_chain_register(&memory_isolate_chain, nb);
71}
72EXPORT_SYMBOL(register_memory_isolate_notifier);
73
74void unregister_memory_isolate_notifier(struct notifier_block *nb)
75{
76 atomic_notifier_chain_unregister(&memory_isolate_chain, nb);
77}
78EXPORT_SYMBOL(unregister_memory_isolate_notifier);
79
80static void memory_block_release(struct device *dev)
81{
82 struct memory_block *mem = to_memory_block(dev);
83
84 kfree(mem);
85}
86
87unsigned long __weak memory_block_size_bytes(void)
88{
89 return MIN_MEMORY_BLOCK_SIZE;
90}
91
92static unsigned long get_memory_block_size(void)
93{
94 unsigned long block_sz;
95
96 block_sz = memory_block_size_bytes();
97
98 /* Validate blk_sz is a power of 2 and not less than section size */
99 if ((block_sz & (block_sz - 1)) || (block_sz < MIN_MEMORY_BLOCK_SIZE)) {
100 WARN_ON(1);
101 block_sz = MIN_MEMORY_BLOCK_SIZE;
102 }
103
104 return block_sz;
105}
106
107/*
108 * use this as the physical section index that this memsection
109 * uses.
110 */
111
112static ssize_t show_mem_start_phys_index(struct device *dev,
113 struct device_attribute *attr, char *buf)
114{
115 struct memory_block *mem = to_memory_block(dev);
116 unsigned long phys_index;
117
118 phys_index = mem->start_section_nr / sections_per_block;
119 return sprintf(buf, "%08lx\n", phys_index);
120}
121
122/*
123 * Show whether the section of memory is likely to be hot-removable
124 */
125static ssize_t show_mem_removable(struct device *dev,
126 struct device_attribute *attr, char *buf)
127{
128 unsigned long i, pfn;
129 int ret = 1;
130 struct memory_block *mem = to_memory_block(dev);
131
132 if (mem->state != MEM_ONLINE)
133 goto out;
134
135 for (i = 0; i < sections_per_block; i++) {
136 if (!present_section_nr(mem->start_section_nr + i))
137 continue;
138 pfn = section_nr_to_pfn(mem->start_section_nr + i);
139 ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
140 }
141
142out:
143 return sprintf(buf, "%d\n", ret);
144}
145
146/*
147 * online, offline, going offline, etc.
148 */
149static ssize_t show_mem_state(struct device *dev,
150 struct device_attribute *attr, char *buf)
151{
152 struct memory_block *mem = to_memory_block(dev);
153 ssize_t len = 0;
154
155 /*
156 * We can probably put these states in a nice little array
157 * so that they're not open-coded
158 */
159 switch (mem->state) {
160 case MEM_ONLINE:
161 len = sprintf(buf, "online\n");
162 break;
163 case MEM_OFFLINE:
164 len = sprintf(buf, "offline\n");
165 break;
166 case MEM_GOING_OFFLINE:
167 len = sprintf(buf, "going-offline\n");
168 break;
169 default:
170 len = sprintf(buf, "ERROR-UNKNOWN-%ld\n",
171 mem->state);
172 WARN_ON(1);
173 break;
174 }
175
176 return len;
177}
178
179int memory_notify(unsigned long val, void *v)
180{
181 return blocking_notifier_call_chain(&memory_chain, val, v);
182}
183
184int memory_isolate_notify(unsigned long val, void *v)
185{
186 return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
187}
188
189/*
190 * The probe routines leave the pages uninitialized, just as the bootmem code
191 * does. Make sure we do not access them, but instead use only information from
192 * within sections.
193 */
194static bool pages_correctly_probed(unsigned long start_pfn)
195{
196 unsigned long section_nr = pfn_to_section_nr(start_pfn);
197 unsigned long section_nr_end = section_nr + sections_per_block;
198 unsigned long pfn = start_pfn;
199
200 /*
201 * memmap between sections is not contiguous except with
202 * SPARSEMEM_VMEMMAP. We lookup the page once per section
203 * and assume memmap is contiguous within each section
204 */
205 for (; section_nr < section_nr_end; section_nr++) {
206 if (WARN_ON_ONCE(!pfn_valid(pfn)))
207 return false;
208
209 if (!present_section_nr(section_nr)) {
210 pr_warn("section %ld pfn[%lx, %lx) not present",
211 section_nr, pfn, pfn + PAGES_PER_SECTION);
212 return false;
213 } else if (!valid_section_nr(section_nr)) {
214 pr_warn("section %ld pfn[%lx, %lx) no valid memmap",
215 section_nr, pfn, pfn + PAGES_PER_SECTION);
216 return false;
217 } else if (online_section_nr(section_nr)) {
218 pr_warn("section %ld pfn[%lx, %lx) is already online",
219 section_nr, pfn, pfn + PAGES_PER_SECTION);
220 return false;
221 }
222 pfn += PAGES_PER_SECTION;
223 }
224
225 return true;
226}
227
228/*
229 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
230 * OK to have direct references to sparsemem variables in here.
231 * Must already be protected by mem_hotplug_begin().
232 */
233static int
234memory_block_action(unsigned long phys_index, unsigned long action, int online_type)
235{
236 unsigned long start_pfn;
237 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
238 int ret;
239
240 start_pfn = section_nr_to_pfn(phys_index);
241
242 switch (action) {
243 case MEM_ONLINE:
244 if (!pages_correctly_probed(start_pfn))
245 return -EBUSY;
246
247 ret = online_pages(start_pfn, nr_pages, online_type);
248 break;
249 case MEM_OFFLINE:
250 ret = offline_pages(start_pfn, nr_pages);
251 break;
252 default:
253 WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
254 "%ld\n", __func__, phys_index, action, action);
255 ret = -EINVAL;
256 }
257
258 return ret;
259}
260
261static int memory_block_change_state(struct memory_block *mem,
262 unsigned long to_state, unsigned long from_state_req)
263{
264 int ret = 0;
265
266 if (mem->state != from_state_req)
267 return -EINVAL;
268
269 if (to_state == MEM_OFFLINE)
270 mem->state = MEM_GOING_OFFLINE;
271
272 ret = memory_block_action(mem->start_section_nr, to_state,
273 mem->online_type);
274
275 mem->state = ret ? from_state_req : to_state;
276
277 return ret;
278}
279
280/* The device lock serializes operations on memory_subsys_[online|offline] */
281static int memory_subsys_online(struct device *dev)
282{
283 struct memory_block *mem = to_memory_block(dev);
284 int ret;
285
286 if (mem->state == MEM_ONLINE)
287 return 0;
288
289 /*
290 * If we are called from store_mem_state(), online_type will be
291 * set >= 0 Otherwise we were called from the device online
292 * attribute and need to set the online_type.
293 */
294 if (mem->online_type < 0)
295 mem->online_type = MMOP_ONLINE_KEEP;
296
297 /* Already under protection of mem_hotplug_begin() */
298 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
299
300 /* clear online_type */
301 mem->online_type = -1;
302
303 return ret;
304}
305
306static int memory_subsys_offline(struct device *dev)
307{
308 struct memory_block *mem = to_memory_block(dev);
309
310 if (mem->state == MEM_OFFLINE)
311 return 0;
312
313 /* Can't offline block with non-present sections */
314 if (mem->section_count != sections_per_block)
315 return -EINVAL;
316
317 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
318}
319
320static ssize_t
321store_mem_state(struct device *dev,
322 struct device_attribute *attr, const char *buf, size_t count)
323{
324 struct memory_block *mem = to_memory_block(dev);
325 int ret, online_type;
326
327 ret = lock_device_hotplug_sysfs();
328 if (ret)
329 return ret;
330
331 if (sysfs_streq(buf, "online_kernel"))
332 online_type = MMOP_ONLINE_KERNEL;
333 else if (sysfs_streq(buf, "online_movable"))
334 online_type = MMOP_ONLINE_MOVABLE;
335 else if (sysfs_streq(buf, "online"))
336 online_type = MMOP_ONLINE_KEEP;
337 else if (sysfs_streq(buf, "offline"))
338 online_type = MMOP_OFFLINE;
339 else {
340 ret = -EINVAL;
341 goto err;
342 }
343
344 /*
345 * Memory hotplug needs to hold mem_hotplug_begin() for probe to find
346 * the correct memory block to online before doing device_online(dev),
347 * which will take dev->mutex. Take the lock early to prevent an
348 * inversion, memory_subsys_online() callbacks will be implemented by
349 * assuming it's already protected.
350 */
351 mem_hotplug_begin();
352
353 switch (online_type) {
354 case MMOP_ONLINE_KERNEL:
355 case MMOP_ONLINE_MOVABLE:
356 case MMOP_ONLINE_KEEP:
357 mem->online_type = online_type;
358 ret = device_online(&mem->dev);
359 break;
360 case MMOP_OFFLINE:
361 ret = device_offline(&mem->dev);
362 break;
363 default:
364 ret = -EINVAL; /* should never happen */
365 }
366
367 mem_hotplug_done();
368err:
369 unlock_device_hotplug();
370
371 if (ret < 0)
372 return ret;
373 if (ret)
374 return -EINVAL;
375
376 return count;
377}
378
379/*
380 * phys_device is a bad name for this. What I really want
381 * is a way to differentiate between memory ranges that
382 * are part of physical devices that constitute
383 * a complete removable unit or fru.
384 * i.e. do these ranges belong to the same physical device,
385 * s.t. if I offline all of these sections I can then
386 * remove the physical device?
387 */
388static ssize_t show_phys_device(struct device *dev,
389 struct device_attribute *attr, char *buf)
390{
391 struct memory_block *mem = to_memory_block(dev);
392 return sprintf(buf, "%d\n", mem->phys_device);
393}
394
395#ifdef CONFIG_MEMORY_HOTREMOVE
396static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn,
397 unsigned long nr_pages, int online_type,
398 struct zone *default_zone)
399{
400 struct zone *zone;
401
402 zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
403 if (zone != default_zone) {
404 strcat(buf, " ");
405 strcat(buf, zone->name);
406 }
407}
408
409static ssize_t show_valid_zones(struct device *dev,
410 struct device_attribute *attr, char *buf)
411{
412 struct memory_block *mem = to_memory_block(dev);
413 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
414 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
415 unsigned long valid_start_pfn, valid_end_pfn;
416 struct zone *default_zone;
417 int nid;
418
419 /*
420 * The block contains more than one zone can not be offlined.
421 * This can happen e.g. for ZONE_DMA and ZONE_DMA32
422 */
423 if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages, &valid_start_pfn, &valid_end_pfn))
424 return sprintf(buf, "none\n");
425
426 start_pfn = valid_start_pfn;
427 nr_pages = valid_end_pfn - start_pfn;
428
429 /*
430 * Check the existing zone. Make sure that we do that only on the
431 * online nodes otherwise the page_zone is not reliable
432 */
433 if (mem->state == MEM_ONLINE) {
434 strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
435 goto out;
436 }
437
438 nid = pfn_to_nid(start_pfn);
439 default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
440 strcat(buf, default_zone->name);
441
442 print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL,
443 default_zone);
444 print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE,
445 default_zone);
446out:
447 strcat(buf, "\n");
448
449 return strlen(buf);
450}
451static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL);
452#endif
453
454static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
455static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
456static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
457static DEVICE_ATTR(removable, 0444, show_mem_removable, NULL);
458
459/*
460 * Block size attribute stuff
461 */
462static ssize_t
463print_block_size(struct device *dev, struct device_attribute *attr,
464 char *buf)
465{
466 return sprintf(buf, "%lx\n", get_memory_block_size());
467}
468
469static DEVICE_ATTR(block_size_bytes, 0444, print_block_size, NULL);
470
471/*
472 * Memory auto online policy.
473 */
474
475static ssize_t
476show_auto_online_blocks(struct device *dev, struct device_attribute *attr,
477 char *buf)
478{
479 if (memhp_auto_online)
480 return sprintf(buf, "online\n");
481 else
482 return sprintf(buf, "offline\n");
483}
484
485static ssize_t
486store_auto_online_blocks(struct device *dev, struct device_attribute *attr,
487 const char *buf, size_t count)
488{
489 if (sysfs_streq(buf, "online"))
490 memhp_auto_online = true;
491 else if (sysfs_streq(buf, "offline"))
492 memhp_auto_online = false;
493 else
494 return -EINVAL;
495
496 return count;
497}
498
499static DEVICE_ATTR(auto_online_blocks, 0644, show_auto_online_blocks,
500 store_auto_online_blocks);
501
502/*
503 * Some architectures will have custom drivers to do this, and
504 * will not need to do it from userspace. The fake hot-add code
505 * as well as ppc64 will do all of their discovery in userspace
506 * and will require this interface.
507 */
508#ifdef CONFIG_ARCH_MEMORY_PROBE
509static ssize_t
510memory_probe_store(struct device *dev, struct device_attribute *attr,
511 const char *buf, size_t count)
512{
513 u64 phys_addr;
514 int nid, ret;
515 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
516
517 ret = kstrtoull(buf, 0, &phys_addr);
518 if (ret)
519 return ret;
520
521 if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
522 return -EINVAL;
523
524 nid = memory_add_physaddr_to_nid(phys_addr);
525 ret = add_memory(nid, phys_addr,
526 MIN_MEMORY_BLOCK_SIZE * sections_per_block);
527
528 if (ret)
529 goto out;
530
531 ret = count;
532out:
533 return ret;
534}
535
536static DEVICE_ATTR(probe, S_IWUSR, NULL, memory_probe_store);
537#endif
538
539#ifdef CONFIG_MEMORY_FAILURE
540/*
541 * Support for offlining pages of memory
542 */
543
544/* Soft offline a page */
545static ssize_t
546store_soft_offline_page(struct device *dev,
547 struct device_attribute *attr,
548 const char *buf, size_t count)
549{
550 int ret;
551 u64 pfn;
552 if (!capable(CAP_SYS_ADMIN))
553 return -EPERM;
554 if (kstrtoull(buf, 0, &pfn) < 0)
555 return -EINVAL;
556 pfn >>= PAGE_SHIFT;
557 if (!pfn_valid(pfn))
558 return -ENXIO;
559 ret = soft_offline_page(pfn_to_page(pfn), 0);
560 return ret == 0 ? count : ret;
561}
562
563/* Forcibly offline a page, including killing processes. */
564static ssize_t
565store_hard_offline_page(struct device *dev,
566 struct device_attribute *attr,
567 const char *buf, size_t count)
568{
569 int ret;
570 u64 pfn;
571 if (!capable(CAP_SYS_ADMIN))
572 return -EPERM;
573 if (kstrtoull(buf, 0, &pfn) < 0)
574 return -EINVAL;
575 pfn >>= PAGE_SHIFT;
576 ret = memory_failure(pfn, 0);
577 return ret ? ret : count;
578}
579
580static DEVICE_ATTR(soft_offline_page, S_IWUSR, NULL, store_soft_offline_page);
581static DEVICE_ATTR(hard_offline_page, S_IWUSR, NULL, store_hard_offline_page);
582#endif
583
584/*
585 * Note that phys_device is optional. It is here to allow for
586 * differentiation between which *physical* devices each
587 * section belongs to...
588 */
589int __weak arch_get_memory_phys_device(unsigned long start_pfn)
590{
591 return 0;
592}
593
594/*
595 * A reference for the returned object is held and the reference for the
596 * hinted object is released.
597 */
598struct memory_block *find_memory_block_hinted(struct mem_section *section,
599 struct memory_block *hint)
600{
601 int block_id = base_memory_block_id(__section_nr(section));
602 struct device *hintdev = hint ? &hint->dev : NULL;
603 struct device *dev;
604
605 dev = subsys_find_device_by_id(&memory_subsys, block_id, hintdev);
606 if (hint)
607 put_device(&hint->dev);
608 if (!dev)
609 return NULL;
610 return to_memory_block(dev);
611}
612
613/*
614 * For now, we have a linear search to go find the appropriate
615 * memory_block corresponding to a particular phys_index. If
616 * this gets to be a real problem, we can always use a radix
617 * tree or something here.
618 *
619 * This could be made generic for all device subsystems.
620 */
621struct memory_block *find_memory_block(struct mem_section *section)
622{
623 return find_memory_block_hinted(section, NULL);
624}
625
626static struct attribute *memory_memblk_attrs[] = {
627 &dev_attr_phys_index.attr,
628 &dev_attr_state.attr,
629 &dev_attr_phys_device.attr,
630 &dev_attr_removable.attr,
631#ifdef CONFIG_MEMORY_HOTREMOVE
632 &dev_attr_valid_zones.attr,
633#endif
634 NULL
635};
636
637static struct attribute_group memory_memblk_attr_group = {
638 .attrs = memory_memblk_attrs,
639};
640
641static const struct attribute_group *memory_memblk_attr_groups[] = {
642 &memory_memblk_attr_group,
643 NULL,
644};
645
646/*
647 * register_memory - Setup a sysfs device for a memory block
648 */
649static
650int register_memory(struct memory_block *memory)
651{
652 memory->dev.bus = &memory_subsys;
653 memory->dev.id = memory->start_section_nr / sections_per_block;
654 memory->dev.release = memory_block_release;
655 memory->dev.groups = memory_memblk_attr_groups;
656 memory->dev.offline = memory->state == MEM_OFFLINE;
657
658 return device_register(&memory->dev);
659}
660
661static int init_memory_block(struct memory_block **memory,
662 struct mem_section *section, unsigned long state)
663{
664 struct memory_block *mem;
665 unsigned long start_pfn;
666 int scn_nr;
667 int ret = 0;
668
669 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
670 if (!mem)
671 return -ENOMEM;
672
673 scn_nr = __section_nr(section);
674 mem->start_section_nr =
675 base_memory_block_id(scn_nr) * sections_per_block;
676 mem->end_section_nr = mem->start_section_nr + sections_per_block - 1;
677 mem->state = state;
678 start_pfn = section_nr_to_pfn(mem->start_section_nr);
679 mem->phys_device = arch_get_memory_phys_device(start_pfn);
680
681 ret = register_memory(mem);
682
683 *memory = mem;
684 return ret;
685}
686
687static int add_memory_block(int base_section_nr)
688{
689 struct memory_block *mem;
690 int i, ret, section_count = 0, section_nr;
691
692 for (i = base_section_nr;
693 (i < base_section_nr + sections_per_block) && i < NR_MEM_SECTIONS;
694 i++) {
695 if (!present_section_nr(i))
696 continue;
697 if (section_count == 0)
698 section_nr = i;
699 section_count++;
700 }
701
702 if (section_count == 0)
703 return 0;
704 ret = init_memory_block(&mem, __nr_to_section(section_nr), MEM_ONLINE);
705 if (ret)
706 return ret;
707 mem->section_count = section_count;
708 return 0;
709}
710
711/*
712 * need an interface for the VM to add new memory regions,
713 * but without onlining it.
714 */
715int hotplug_memory_register(int nid, struct mem_section *section)
716{
717 int ret = 0;
718 struct memory_block *mem;
719
720 mutex_lock(&mem_sysfs_mutex);
721
722 mem = find_memory_block(section);
723 if (mem) {
724 mem->section_count++;
725 put_device(&mem->dev);
726 } else {
727 ret = init_memory_block(&mem, section, MEM_OFFLINE);
728 if (ret)
729 goto out;
730 mem->section_count++;
731 }
732
733 if (mem->section_count == sections_per_block)
734 ret = register_mem_sect_under_node(mem, nid, false);
735out:
736 mutex_unlock(&mem_sysfs_mutex);
737 return ret;
738}
739
740#ifdef CONFIG_MEMORY_HOTREMOVE
741static void
742unregister_memory(struct memory_block *memory)
743{
744 BUG_ON(memory->dev.bus != &memory_subsys);
745
746 /* drop the ref. we got in remove_memory_block() */
747 put_device(&memory->dev);
748 device_unregister(&memory->dev);
749}
750
751static int remove_memory_section(unsigned long node_id,
752 struct mem_section *section, int phys_device)
753{
754 struct memory_block *mem;
755
756 mutex_lock(&mem_sysfs_mutex);
757
758 /*
759 * Some users of the memory hotplug do not want/need memblock to
760 * track all sections. Skip over those.
761 */
762 mem = find_memory_block(section);
763 if (!mem)
764 goto out_unlock;
765
766 unregister_mem_sect_under_nodes(mem, __section_nr(section));
767
768 mem->section_count--;
769 if (mem->section_count == 0)
770 unregister_memory(mem);
771 else
772 put_device(&mem->dev);
773
774out_unlock:
775 mutex_unlock(&mem_sysfs_mutex);
776 return 0;
777}
778
779int unregister_memory_section(struct mem_section *section)
780{
781 if (!present_section(section))
782 return -EINVAL;
783
784 return remove_memory_section(0, section, 0);
785}
786#endif /* CONFIG_MEMORY_HOTREMOVE */
787
788/* return true if the memory block is offlined, otherwise, return false */
789bool is_memblock_offlined(struct memory_block *mem)
790{
791 return mem->state == MEM_OFFLINE;
792}
793
794static struct attribute *memory_root_attrs[] = {
795#ifdef CONFIG_ARCH_MEMORY_PROBE
796 &dev_attr_probe.attr,
797#endif
798
799#ifdef CONFIG_MEMORY_FAILURE
800 &dev_attr_soft_offline_page.attr,
801 &dev_attr_hard_offline_page.attr,
802#endif
803
804 &dev_attr_block_size_bytes.attr,
805 &dev_attr_auto_online_blocks.attr,
806 NULL
807};
808
809static struct attribute_group memory_root_attr_group = {
810 .attrs = memory_root_attrs,
811};
812
813static const struct attribute_group *memory_root_attr_groups[] = {
814 &memory_root_attr_group,
815 NULL,
816};
817
818/*
819 * Initialize the sysfs support for memory devices...
820 */
821int __init memory_dev_init(void)
822{
823 unsigned int i;
824 int ret;
825 int err;
826 unsigned long block_sz;
827
828 ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
829 if (ret)
830 goto out;
831
832 block_sz = get_memory_block_size();
833 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
834
835 /*
836 * Create entries for memory sections that were found
837 * during boot and have been initialized
838 */
839 mutex_lock(&mem_sysfs_mutex);
840 for (i = 0; i <= __highest_present_section_nr;
841 i += sections_per_block) {
842 err = add_memory_block(i);
843 if (!ret)
844 ret = err;
845 }
846 mutex_unlock(&mem_sysfs_mutex);
847
848out:
849 if (ret)
850 printk(KERN_ERR "%s() failed: %d\n", __func__, ret);
851 return ret;
852}