Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Memory subsystem support
4 *
5 * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
6 * Dave Hansen <haveblue@us.ibm.com>
7 *
8 * This file provides the necessary infrastructure to represent
9 * a SPARSEMEM-memory-model system's physical memory in /sysfs.
10 * All arch-independent code that assumes MEMORY_HOTPLUG requires
11 * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/topology.h>
17#include <linux/capability.h>
18#include <linux/device.h>
19#include <linux/memory.h>
20#include <linux/memory_hotplug.h>
21#include <linux/mm.h>
22#include <linux/stat.h>
23#include <linux/slab.h>
24#include <linux/xarray.h>
25
26#include <linux/atomic.h>
27#include <linux/uaccess.h>
28
29#define MEMORY_CLASS_NAME "memory"
30
31static const char *const online_type_to_str[] = {
32 [MMOP_OFFLINE] = "offline",
33 [MMOP_ONLINE] = "online",
34 [MMOP_ONLINE_KERNEL] = "online_kernel",
35 [MMOP_ONLINE_MOVABLE] = "online_movable",
36};
37
38int mhp_online_type_from_str(const char *str)
39{
40 int i;
41
42 for (i = 0; i < ARRAY_SIZE(online_type_to_str); i++) {
43 if (sysfs_streq(str, online_type_to_str[i]))
44 return i;
45 }
46 return -EINVAL;
47}
48
49#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
50
51static int sections_per_block;
52
53static inline unsigned long memory_block_id(unsigned long section_nr)
54{
55 return section_nr / sections_per_block;
56}
57
58static inline unsigned long pfn_to_block_id(unsigned long pfn)
59{
60 return memory_block_id(pfn_to_section_nr(pfn));
61}
62
63static inline unsigned long phys_to_block_id(unsigned long phys)
64{
65 return pfn_to_block_id(PFN_DOWN(phys));
66}
67
68static int memory_subsys_online(struct device *dev);
69static int memory_subsys_offline(struct device *dev);
70
71static const struct bus_type memory_subsys = {
72 .name = MEMORY_CLASS_NAME,
73 .dev_name = MEMORY_CLASS_NAME,
74 .online = memory_subsys_online,
75 .offline = memory_subsys_offline,
76};
77
78/*
79 * Memory blocks are cached in a local radix tree to avoid
80 * a costly linear search for the corresponding device on
81 * the subsystem bus.
82 */
83static DEFINE_XARRAY(memory_blocks);
84
85/*
86 * Memory groups, indexed by memory group id (mgid).
87 */
88static DEFINE_XARRAY_FLAGS(memory_groups, XA_FLAGS_ALLOC);
89#define MEMORY_GROUP_MARK_DYNAMIC XA_MARK_1
90
91static BLOCKING_NOTIFIER_HEAD(memory_chain);
92
93int register_memory_notifier(struct notifier_block *nb)
94{
95 return blocking_notifier_chain_register(&memory_chain, nb);
96}
97EXPORT_SYMBOL(register_memory_notifier);
98
99void unregister_memory_notifier(struct notifier_block *nb)
100{
101 blocking_notifier_chain_unregister(&memory_chain, nb);
102}
103EXPORT_SYMBOL(unregister_memory_notifier);
104
105static void memory_block_release(struct device *dev)
106{
107 struct memory_block *mem = to_memory_block(dev);
108 /* Verify that the altmap is freed */
109 WARN_ON(mem->altmap);
110 kfree(mem);
111}
112
113unsigned long __weak memory_block_size_bytes(void)
114{
115 return MIN_MEMORY_BLOCK_SIZE;
116}
117EXPORT_SYMBOL_GPL(memory_block_size_bytes);
118
119/* Show the memory block ID, relative to the memory block size */
120static ssize_t phys_index_show(struct device *dev,
121 struct device_attribute *attr, char *buf)
122{
123 struct memory_block *mem = to_memory_block(dev);
124
125 return sysfs_emit(buf, "%08lx\n", memory_block_id(mem->start_section_nr));
126}
127
128/*
129 * Legacy interface that we cannot remove. Always indicate "removable"
130 * with CONFIG_MEMORY_HOTREMOVE - bad heuristic.
131 */
132static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
133 char *buf)
134{
135 return sysfs_emit(buf, "%d\n", (int)IS_ENABLED(CONFIG_MEMORY_HOTREMOVE));
136}
137
138/*
139 * online, offline, going offline, etc.
140 */
141static ssize_t state_show(struct device *dev, struct device_attribute *attr,
142 char *buf)
143{
144 struct memory_block *mem = to_memory_block(dev);
145 const char *output;
146
147 /*
148 * We can probably put these states in a nice little array
149 * so that they're not open-coded
150 */
151 switch (mem->state) {
152 case MEM_ONLINE:
153 output = "online";
154 break;
155 case MEM_OFFLINE:
156 output = "offline";
157 break;
158 case MEM_GOING_OFFLINE:
159 output = "going-offline";
160 break;
161 default:
162 WARN_ON(1);
163 return sysfs_emit(buf, "ERROR-UNKNOWN-%ld\n", mem->state);
164 }
165
166 return sysfs_emit(buf, "%s\n", output);
167}
168
169int memory_notify(unsigned long val, void *v)
170{
171 return blocking_notifier_call_chain(&memory_chain, val, v);
172}
173
174#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
175static unsigned long memblk_nr_poison(struct memory_block *mem);
176#else
177static inline unsigned long memblk_nr_poison(struct memory_block *mem)
178{
179 return 0;
180}
181#endif
182
183/*
184 * Must acquire mem_hotplug_lock in write mode.
185 */
186static int memory_block_online(struct memory_block *mem)
187{
188 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
189 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
190 unsigned long nr_vmemmap_pages = 0;
191 struct memory_notify arg;
192 struct zone *zone;
193 int ret;
194
195 if (memblk_nr_poison(mem))
196 return -EHWPOISON;
197
198 zone = zone_for_pfn_range(mem->online_type, mem->nid, mem->group,
199 start_pfn, nr_pages);
200
201 /*
202 * Although vmemmap pages have a different lifecycle than the pages
203 * they describe (they remain until the memory is unplugged), doing
204 * their initialization and accounting at memory onlining/offlining
205 * stage helps to keep accounting easier to follow - e.g vmemmaps
206 * belong to the same zone as the memory they backed.
207 */
208 if (mem->altmap)
209 nr_vmemmap_pages = mem->altmap->free;
210
211 arg.altmap_start_pfn = start_pfn;
212 arg.altmap_nr_pages = nr_vmemmap_pages;
213 arg.start_pfn = start_pfn + nr_vmemmap_pages;
214 arg.nr_pages = nr_pages - nr_vmemmap_pages;
215 mem_hotplug_begin();
216 ret = memory_notify(MEM_PREPARE_ONLINE, &arg);
217 ret = notifier_to_errno(ret);
218 if (ret)
219 goto out_notifier;
220
221 if (nr_vmemmap_pages) {
222 ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages,
223 zone, mem->altmap->inaccessible);
224 if (ret)
225 goto out;
226 }
227
228 ret = online_pages(start_pfn + nr_vmemmap_pages,
229 nr_pages - nr_vmemmap_pages, zone, mem->group);
230 if (ret) {
231 if (nr_vmemmap_pages)
232 mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
233 goto out;
234 }
235
236 /*
237 * Account once onlining succeeded. If the zone was unpopulated, it is
238 * now already properly populated.
239 */
240 if (nr_vmemmap_pages)
241 adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
242 nr_vmemmap_pages);
243
244 mem->zone = zone;
245 mem_hotplug_done();
246 return ret;
247out:
248 memory_notify(MEM_FINISH_OFFLINE, &arg);
249out_notifier:
250 mem_hotplug_done();
251 return ret;
252}
253
254/*
255 * Must acquire mem_hotplug_lock in write mode.
256 */
257static int memory_block_offline(struct memory_block *mem)
258{
259 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
260 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
261 unsigned long nr_vmemmap_pages = 0;
262 struct memory_notify arg;
263 int ret;
264
265 if (!mem->zone)
266 return -EINVAL;
267
268 /*
269 * Unaccount before offlining, such that unpopulated zone and kthreads
270 * can properly be torn down in offline_pages().
271 */
272 if (mem->altmap)
273 nr_vmemmap_pages = mem->altmap->free;
274
275 mem_hotplug_begin();
276 if (nr_vmemmap_pages)
277 adjust_present_page_count(pfn_to_page(start_pfn), mem->group,
278 -nr_vmemmap_pages);
279
280 ret = offline_pages(start_pfn + nr_vmemmap_pages,
281 nr_pages - nr_vmemmap_pages, mem->zone, mem->group);
282 if (ret) {
283 /* offline_pages() failed. Account back. */
284 if (nr_vmemmap_pages)
285 adjust_present_page_count(pfn_to_page(start_pfn),
286 mem->group, nr_vmemmap_pages);
287 goto out;
288 }
289
290 if (nr_vmemmap_pages)
291 mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages);
292
293 mem->zone = NULL;
294 arg.altmap_start_pfn = start_pfn;
295 arg.altmap_nr_pages = nr_vmemmap_pages;
296 arg.start_pfn = start_pfn + nr_vmemmap_pages;
297 arg.nr_pages = nr_pages - nr_vmemmap_pages;
298 memory_notify(MEM_FINISH_OFFLINE, &arg);
299out:
300 mem_hotplug_done();
301 return ret;
302}
303
304/*
305 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
306 * OK to have direct references to sparsemem variables in here.
307 */
308static int
309memory_block_action(struct memory_block *mem, unsigned long action)
310{
311 int ret;
312
313 switch (action) {
314 case MEM_ONLINE:
315 ret = memory_block_online(mem);
316 break;
317 case MEM_OFFLINE:
318 ret = memory_block_offline(mem);
319 break;
320 default:
321 WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
322 "%ld\n", __func__, mem->start_section_nr, action, action);
323 ret = -EINVAL;
324 }
325
326 return ret;
327}
328
329static int memory_block_change_state(struct memory_block *mem,
330 unsigned long to_state, unsigned long from_state_req)
331{
332 int ret = 0;
333
334 if (mem->state != from_state_req)
335 return -EINVAL;
336
337 if (to_state == MEM_OFFLINE)
338 mem->state = MEM_GOING_OFFLINE;
339
340 ret = memory_block_action(mem, to_state);
341 mem->state = ret ? from_state_req : to_state;
342
343 return ret;
344}
345
346/* The device lock serializes operations on memory_subsys_[online|offline] */
347static int memory_subsys_online(struct device *dev)
348{
349 struct memory_block *mem = to_memory_block(dev);
350 int ret;
351
352 if (mem->state == MEM_ONLINE)
353 return 0;
354
355 /*
356 * When called via device_online() without configuring the online_type,
357 * we want to default to MMOP_ONLINE.
358 */
359 if (mem->online_type == MMOP_OFFLINE)
360 mem->online_type = MMOP_ONLINE;
361
362 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
363 mem->online_type = MMOP_OFFLINE;
364
365 return ret;
366}
367
368static int memory_subsys_offline(struct device *dev)
369{
370 struct memory_block *mem = to_memory_block(dev);
371
372 if (mem->state == MEM_OFFLINE)
373 return 0;
374
375 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
376}
377
378static ssize_t state_store(struct device *dev, struct device_attribute *attr,
379 const char *buf, size_t count)
380{
381 const int online_type = mhp_online_type_from_str(buf);
382 struct memory_block *mem = to_memory_block(dev);
383 int ret;
384
385 if (online_type < 0)
386 return -EINVAL;
387
388 ret = lock_device_hotplug_sysfs();
389 if (ret)
390 return ret;
391
392 switch (online_type) {
393 case MMOP_ONLINE_KERNEL:
394 case MMOP_ONLINE_MOVABLE:
395 case MMOP_ONLINE:
396 /* mem->online_type is protected by device_hotplug_lock */
397 mem->online_type = online_type;
398 ret = device_online(&mem->dev);
399 break;
400 case MMOP_OFFLINE:
401 ret = device_offline(&mem->dev);
402 break;
403 default:
404 ret = -EINVAL; /* should never happen */
405 }
406
407 unlock_device_hotplug();
408
409 if (ret < 0)
410 return ret;
411 if (ret)
412 return -EINVAL;
413
414 return count;
415}
416
417/*
418 * Legacy interface that we cannot remove: s390x exposes the storage increment
419 * covered by a memory block, allowing for identifying which memory blocks
420 * comprise a storage increment. Since a memory block spans complete
421 * storage increments nowadays, this interface is basically unused. Other
422 * archs never exposed != 0.
423 */
424static ssize_t phys_device_show(struct device *dev,
425 struct device_attribute *attr, char *buf)
426{
427 struct memory_block *mem = to_memory_block(dev);
428 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
429
430 return sysfs_emit(buf, "%d\n",
431 arch_get_memory_phys_device(start_pfn));
432}
433
434#ifdef CONFIG_MEMORY_HOTREMOVE
435static int print_allowed_zone(char *buf, int len, int nid,
436 struct memory_group *group,
437 unsigned long start_pfn, unsigned long nr_pages,
438 int online_type, struct zone *default_zone)
439{
440 struct zone *zone;
441
442 zone = zone_for_pfn_range(online_type, nid, group, start_pfn, nr_pages);
443 if (zone == default_zone)
444 return 0;
445
446 return sysfs_emit_at(buf, len, " %s", zone->name);
447}
448
449static ssize_t valid_zones_show(struct device *dev,
450 struct device_attribute *attr, char *buf)
451{
452 struct memory_block *mem = to_memory_block(dev);
453 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
454 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
455 struct memory_group *group = mem->group;
456 struct zone *default_zone;
457 int nid = mem->nid;
458 int len = 0;
459
460 /*
461 * Check the existing zone. Make sure that we do that only on the
462 * online nodes otherwise the page_zone is not reliable
463 */
464 if (mem->state == MEM_ONLINE) {
465 /*
466 * If !mem->zone, the memory block spans multiple zones and
467 * cannot get offlined.
468 */
469 default_zone = mem->zone;
470 if (!default_zone)
471 return sysfs_emit(buf, "%s\n", "none");
472 len += sysfs_emit_at(buf, len, "%s", default_zone->name);
473 goto out;
474 }
475
476 default_zone = zone_for_pfn_range(MMOP_ONLINE, nid, group,
477 start_pfn, nr_pages);
478
479 len += sysfs_emit_at(buf, len, "%s", default_zone->name);
480 len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
481 MMOP_ONLINE_KERNEL, default_zone);
482 len += print_allowed_zone(buf, len, nid, group, start_pfn, nr_pages,
483 MMOP_ONLINE_MOVABLE, default_zone);
484out:
485 len += sysfs_emit_at(buf, len, "\n");
486 return len;
487}
488static DEVICE_ATTR_RO(valid_zones);
489#endif
490
491static DEVICE_ATTR_RO(phys_index);
492static DEVICE_ATTR_RW(state);
493static DEVICE_ATTR_RO(phys_device);
494static DEVICE_ATTR_RO(removable);
495
496/*
497 * Show the memory block size (shared by all memory blocks).
498 */
499static ssize_t block_size_bytes_show(struct device *dev,
500 struct device_attribute *attr, char *buf)
501{
502 return sysfs_emit(buf, "%lx\n", memory_block_size_bytes());
503}
504
505static DEVICE_ATTR_RO(block_size_bytes);
506
507/*
508 * Memory auto online policy.
509 */
510
511static ssize_t auto_online_blocks_show(struct device *dev,
512 struct device_attribute *attr, char *buf)
513{
514 return sysfs_emit(buf, "%s\n",
515 online_type_to_str[mhp_default_online_type]);
516}
517
518static ssize_t auto_online_blocks_store(struct device *dev,
519 struct device_attribute *attr,
520 const char *buf, size_t count)
521{
522 const int online_type = mhp_online_type_from_str(buf);
523
524 if (online_type < 0)
525 return -EINVAL;
526
527 mhp_default_online_type = online_type;
528 return count;
529}
530
531static DEVICE_ATTR_RW(auto_online_blocks);
532
533#ifdef CONFIG_CRASH_HOTPLUG
534#include <linux/kexec.h>
535static ssize_t crash_hotplug_show(struct device *dev,
536 struct device_attribute *attr, char *buf)
537{
538 return sysfs_emit(buf, "%d\n", crash_check_hotplug_support());
539}
540static DEVICE_ATTR_RO(crash_hotplug);
541#endif
542
543/*
544 * Some architectures will have custom drivers to do this, and
545 * will not need to do it from userspace. The fake hot-add code
546 * as well as ppc64 will do all of their discovery in userspace
547 * and will require this interface.
548 */
549#ifdef CONFIG_ARCH_MEMORY_PROBE
550static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
551 const char *buf, size_t count)
552{
553 u64 phys_addr;
554 int nid, ret;
555 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
556
557 ret = kstrtoull(buf, 0, &phys_addr);
558 if (ret)
559 return ret;
560
561 if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
562 return -EINVAL;
563
564 ret = lock_device_hotplug_sysfs();
565 if (ret)
566 return ret;
567
568 nid = memory_add_physaddr_to_nid(phys_addr);
569 ret = __add_memory(nid, phys_addr,
570 MIN_MEMORY_BLOCK_SIZE * sections_per_block,
571 MHP_NONE);
572
573 if (ret)
574 goto out;
575
576 ret = count;
577out:
578 unlock_device_hotplug();
579 return ret;
580}
581
582static DEVICE_ATTR_WO(probe);
583#endif
584
585#ifdef CONFIG_MEMORY_FAILURE
586/*
587 * Support for offlining pages of memory
588 */
589
590/* Soft offline a page */
591static ssize_t soft_offline_page_store(struct device *dev,
592 struct device_attribute *attr,
593 const char *buf, size_t count)
594{
595 int ret;
596 u64 pfn;
597 if (!capable(CAP_SYS_ADMIN))
598 return -EPERM;
599 if (kstrtoull(buf, 0, &pfn) < 0)
600 return -EINVAL;
601 pfn >>= PAGE_SHIFT;
602 ret = soft_offline_page(pfn, 0);
603 return ret == 0 ? count : ret;
604}
605
606/* Forcibly offline a page, including killing processes. */
607static ssize_t hard_offline_page_store(struct device *dev,
608 struct device_attribute *attr,
609 const char *buf, size_t count)
610{
611 int ret;
612 u64 pfn;
613 if (!capable(CAP_SYS_ADMIN))
614 return -EPERM;
615 if (kstrtoull(buf, 0, &pfn) < 0)
616 return -EINVAL;
617 pfn >>= PAGE_SHIFT;
618 ret = memory_failure(pfn, MF_SW_SIMULATED);
619 if (ret == -EOPNOTSUPP)
620 ret = 0;
621 return ret ? ret : count;
622}
623
624static DEVICE_ATTR_WO(soft_offline_page);
625static DEVICE_ATTR_WO(hard_offline_page);
626#endif
627
628/* See phys_device_show(). */
629int __weak arch_get_memory_phys_device(unsigned long start_pfn)
630{
631 return 0;
632}
633
634/*
635 * A reference for the returned memory block device is acquired.
636 *
637 * Called under device_hotplug_lock.
638 */
639static struct memory_block *find_memory_block_by_id(unsigned long block_id)
640{
641 struct memory_block *mem;
642
643 mem = xa_load(&memory_blocks, block_id);
644 if (mem)
645 get_device(&mem->dev);
646 return mem;
647}
648
649/*
650 * Called under device_hotplug_lock.
651 */
652struct memory_block *find_memory_block(unsigned long section_nr)
653{
654 unsigned long block_id = memory_block_id(section_nr);
655
656 return find_memory_block_by_id(block_id);
657}
658
659static struct attribute *memory_memblk_attrs[] = {
660 &dev_attr_phys_index.attr,
661 &dev_attr_state.attr,
662 &dev_attr_phys_device.attr,
663 &dev_attr_removable.attr,
664#ifdef CONFIG_MEMORY_HOTREMOVE
665 &dev_attr_valid_zones.attr,
666#endif
667 NULL
668};
669
670static const struct attribute_group memory_memblk_attr_group = {
671 .attrs = memory_memblk_attrs,
672};
673
674static const struct attribute_group *memory_memblk_attr_groups[] = {
675 &memory_memblk_attr_group,
676 NULL,
677};
678
679static int __add_memory_block(struct memory_block *memory)
680{
681 int ret;
682
683 memory->dev.bus = &memory_subsys;
684 memory->dev.id = memory->start_section_nr / sections_per_block;
685 memory->dev.release = memory_block_release;
686 memory->dev.groups = memory_memblk_attr_groups;
687 memory->dev.offline = memory->state == MEM_OFFLINE;
688
689 ret = device_register(&memory->dev);
690 if (ret) {
691 put_device(&memory->dev);
692 return ret;
693 }
694 ret = xa_err(xa_store(&memory_blocks, memory->dev.id, memory,
695 GFP_KERNEL));
696 if (ret)
697 device_unregister(&memory->dev);
698
699 return ret;
700}
701
702static struct zone *early_node_zone_for_memory_block(struct memory_block *mem,
703 int nid)
704{
705 const unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
706 const unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
707 struct zone *zone, *matching_zone = NULL;
708 pg_data_t *pgdat = NODE_DATA(nid);
709 int i;
710
711 /*
712 * This logic only works for early memory, when the applicable zones
713 * already span the memory block. We don't expect overlapping zones on
714 * a single node for early memory. So if we're told that some PFNs
715 * of a node fall into this memory block, we can assume that all node
716 * zones that intersect with the memory block are actually applicable.
717 * No need to look at the memmap.
718 */
719 for (i = 0; i < MAX_NR_ZONES; i++) {
720 zone = pgdat->node_zones + i;
721 if (!populated_zone(zone))
722 continue;
723 if (!zone_intersects(zone, start_pfn, nr_pages))
724 continue;
725 if (!matching_zone) {
726 matching_zone = zone;
727 continue;
728 }
729 /* Spans multiple zones ... */
730 matching_zone = NULL;
731 break;
732 }
733 return matching_zone;
734}
735
736#ifdef CONFIG_NUMA
737/**
738 * memory_block_add_nid() - Indicate that system RAM falling into this memory
739 * block device (partially) belongs to the given node.
740 * @mem: The memory block device.
741 * @nid: The node id.
742 * @context: The memory initialization context.
743 *
744 * Indicate that system RAM falling into this memory block (partially) belongs
745 * to the given node. If the context indicates ("early") that we are adding the
746 * node during node device subsystem initialization, this will also properly
747 * set/adjust mem->zone based on the zone ranges of the given node.
748 */
749void memory_block_add_nid(struct memory_block *mem, int nid,
750 enum meminit_context context)
751{
752 if (context == MEMINIT_EARLY && mem->nid != nid) {
753 /*
754 * For early memory we have to determine the zone when setting
755 * the node id and handle multiple nodes spanning a single
756 * memory block by indicate via zone == NULL that we're not
757 * dealing with a single zone. So if we're setting the node id
758 * the first time, determine if there is a single zone. If we're
759 * setting the node id a second time to a different node,
760 * invalidate the single detected zone.
761 */
762 if (mem->nid == NUMA_NO_NODE)
763 mem->zone = early_node_zone_for_memory_block(mem, nid);
764 else
765 mem->zone = NULL;
766 }
767
768 /*
769 * If this memory block spans multiple nodes, we only indicate
770 * the last processed node. If we span multiple nodes (not applicable
771 * to hotplugged memory), zone == NULL will prohibit memory offlining
772 * and consequently unplug.
773 */
774 mem->nid = nid;
775}
776#endif
777
778static int add_memory_block(unsigned long block_id, unsigned long state,
779 struct vmem_altmap *altmap,
780 struct memory_group *group)
781{
782 struct memory_block *mem;
783 int ret = 0;
784
785 mem = find_memory_block_by_id(block_id);
786 if (mem) {
787 put_device(&mem->dev);
788 return -EEXIST;
789 }
790 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
791 if (!mem)
792 return -ENOMEM;
793
794 mem->start_section_nr = block_id * sections_per_block;
795 mem->state = state;
796 mem->nid = NUMA_NO_NODE;
797 mem->altmap = altmap;
798 INIT_LIST_HEAD(&mem->group_next);
799
800#ifndef CONFIG_NUMA
801 if (state == MEM_ONLINE)
802 /*
803 * MEM_ONLINE at this point implies early memory. With NUMA,
804 * we'll determine the zone when setting the node id via
805 * memory_block_add_nid(). Memory hotplug updated the zone
806 * manually when memory onlining/offlining succeeds.
807 */
808 mem->zone = early_node_zone_for_memory_block(mem, NUMA_NO_NODE);
809#endif /* CONFIG_NUMA */
810
811 ret = __add_memory_block(mem);
812 if (ret)
813 return ret;
814
815 if (group) {
816 mem->group = group;
817 list_add(&mem->group_next, &group->memory_blocks);
818 }
819
820 return 0;
821}
822
823static int __init add_boot_memory_block(unsigned long base_section_nr)
824{
825 int section_count = 0;
826 unsigned long nr;
827
828 for (nr = base_section_nr; nr < base_section_nr + sections_per_block;
829 nr++)
830 if (present_section_nr(nr))
831 section_count++;
832
833 if (section_count == 0)
834 return 0;
835 return add_memory_block(memory_block_id(base_section_nr),
836 MEM_ONLINE, NULL, NULL);
837}
838
839static int add_hotplug_memory_block(unsigned long block_id,
840 struct vmem_altmap *altmap,
841 struct memory_group *group)
842{
843 return add_memory_block(block_id, MEM_OFFLINE, altmap, group);
844}
845
846static void remove_memory_block(struct memory_block *memory)
847{
848 if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
849 return;
850
851 WARN_ON(xa_erase(&memory_blocks, memory->dev.id) == NULL);
852
853 if (memory->group) {
854 list_del(&memory->group_next);
855 memory->group = NULL;
856 }
857
858 /* drop the ref. we got via find_memory_block() */
859 put_device(&memory->dev);
860 device_unregister(&memory->dev);
861}
862
863/*
864 * Create memory block devices for the given memory area. Start and size
865 * have to be aligned to memory block granularity. Memory block devices
866 * will be initialized as offline.
867 *
868 * Called under device_hotplug_lock.
869 */
870int create_memory_block_devices(unsigned long start, unsigned long size,
871 struct vmem_altmap *altmap,
872 struct memory_group *group)
873{
874 const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
875 unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
876 struct memory_block *mem;
877 unsigned long block_id;
878 int ret = 0;
879
880 if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
881 !IS_ALIGNED(size, memory_block_size_bytes())))
882 return -EINVAL;
883
884 for (block_id = start_block_id; block_id != end_block_id; block_id++) {
885 ret = add_hotplug_memory_block(block_id, altmap, group);
886 if (ret)
887 break;
888 }
889 if (ret) {
890 end_block_id = block_id;
891 for (block_id = start_block_id; block_id != end_block_id;
892 block_id++) {
893 mem = find_memory_block_by_id(block_id);
894 if (WARN_ON_ONCE(!mem))
895 continue;
896 remove_memory_block(mem);
897 }
898 }
899 return ret;
900}
901
902/*
903 * Remove memory block devices for the given memory area. Start and size
904 * have to be aligned to memory block granularity. Memory block devices
905 * have to be offline.
906 *
907 * Called under device_hotplug_lock.
908 */
909void remove_memory_block_devices(unsigned long start, unsigned long size)
910{
911 const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
912 const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
913 struct memory_block *mem;
914 unsigned long block_id;
915
916 if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
917 !IS_ALIGNED(size, memory_block_size_bytes())))
918 return;
919
920 for (block_id = start_block_id; block_id != end_block_id; block_id++) {
921 mem = find_memory_block_by_id(block_id);
922 if (WARN_ON_ONCE(!mem))
923 continue;
924 num_poisoned_pages_sub(-1UL, memblk_nr_poison(mem));
925 unregister_memory_block_under_nodes(mem);
926 remove_memory_block(mem);
927 }
928}
929
930static struct attribute *memory_root_attrs[] = {
931#ifdef CONFIG_ARCH_MEMORY_PROBE
932 &dev_attr_probe.attr,
933#endif
934
935#ifdef CONFIG_MEMORY_FAILURE
936 &dev_attr_soft_offline_page.attr,
937 &dev_attr_hard_offline_page.attr,
938#endif
939
940 &dev_attr_block_size_bytes.attr,
941 &dev_attr_auto_online_blocks.attr,
942#ifdef CONFIG_CRASH_HOTPLUG
943 &dev_attr_crash_hotplug.attr,
944#endif
945 NULL
946};
947
948static const struct attribute_group memory_root_attr_group = {
949 .attrs = memory_root_attrs,
950};
951
952static const struct attribute_group *memory_root_attr_groups[] = {
953 &memory_root_attr_group,
954 NULL,
955};
956
957/*
958 * Initialize the sysfs support for memory devices. At the time this function
959 * is called, we cannot have concurrent creation/deletion of memory block
960 * devices, the device_hotplug_lock is not needed.
961 */
962void __init memory_dev_init(void)
963{
964 int ret;
965 unsigned long block_sz, nr;
966
967 /* Validate the configured memory block size */
968 block_sz = memory_block_size_bytes();
969 if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE)
970 panic("Memory block size not suitable: 0x%lx\n", block_sz);
971 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
972
973 ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
974 if (ret)
975 panic("%s() failed to register subsystem: %d\n", __func__, ret);
976
977 /*
978 * Create entries for memory sections that were found
979 * during boot and have been initialized
980 */
981 for (nr = 0; nr <= __highest_present_section_nr;
982 nr += sections_per_block) {
983 ret = add_boot_memory_block(nr);
984 if (ret)
985 panic("%s() failed to add memory block: %d\n", __func__,
986 ret);
987 }
988}
989
990/**
991 * walk_memory_blocks - walk through all present memory blocks overlapped
992 * by the range [start, start + size)
993 *
994 * @start: start address of the memory range
995 * @size: size of the memory range
996 * @arg: argument passed to func
997 * @func: callback for each memory section walked
998 *
999 * This function walks through all present memory blocks overlapped by the
1000 * range [start, start + size), calling func on each memory block.
1001 *
1002 * In case func() returns an error, walking is aborted and the error is
1003 * returned.
1004 *
1005 * Called under device_hotplug_lock.
1006 */
1007int walk_memory_blocks(unsigned long start, unsigned long size,
1008 void *arg, walk_memory_blocks_func_t func)
1009{
1010 const unsigned long start_block_id = phys_to_block_id(start);
1011 const unsigned long end_block_id = phys_to_block_id(start + size - 1);
1012 struct memory_block *mem;
1013 unsigned long block_id;
1014 int ret = 0;
1015
1016 if (!size)
1017 return 0;
1018
1019 for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
1020 mem = find_memory_block_by_id(block_id);
1021 if (!mem)
1022 continue;
1023
1024 ret = func(mem, arg);
1025 put_device(&mem->dev);
1026 if (ret)
1027 break;
1028 }
1029 return ret;
1030}
1031
1032struct for_each_memory_block_cb_data {
1033 walk_memory_blocks_func_t func;
1034 void *arg;
1035};
1036
1037static int for_each_memory_block_cb(struct device *dev, void *data)
1038{
1039 struct memory_block *mem = to_memory_block(dev);
1040 struct for_each_memory_block_cb_data *cb_data = data;
1041
1042 return cb_data->func(mem, cb_data->arg);
1043}
1044
1045/**
1046 * for_each_memory_block - walk through all present memory blocks
1047 *
1048 * @arg: argument passed to func
1049 * @func: callback for each memory block walked
1050 *
1051 * This function walks through all present memory blocks, calling func on
1052 * each memory block.
1053 *
1054 * In case func() returns an error, walking is aborted and the error is
1055 * returned.
1056 */
1057int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
1058{
1059 struct for_each_memory_block_cb_data cb_data = {
1060 .func = func,
1061 .arg = arg,
1062 };
1063
1064 return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
1065 for_each_memory_block_cb);
1066}
1067
1068/*
1069 * This is an internal helper to unify allocation and initialization of
1070 * memory groups. Note that the passed memory group will be copied to a
1071 * dynamically allocated memory group. After this call, the passed
1072 * memory group should no longer be used.
1073 */
1074static int memory_group_register(struct memory_group group)
1075{
1076 struct memory_group *new_group;
1077 uint32_t mgid;
1078 int ret;
1079
1080 if (!node_possible(group.nid))
1081 return -EINVAL;
1082
1083 new_group = kzalloc(sizeof(group), GFP_KERNEL);
1084 if (!new_group)
1085 return -ENOMEM;
1086 *new_group = group;
1087 INIT_LIST_HEAD(&new_group->memory_blocks);
1088
1089 ret = xa_alloc(&memory_groups, &mgid, new_group, xa_limit_31b,
1090 GFP_KERNEL);
1091 if (ret) {
1092 kfree(new_group);
1093 return ret;
1094 } else if (group.is_dynamic) {
1095 xa_set_mark(&memory_groups, mgid, MEMORY_GROUP_MARK_DYNAMIC);
1096 }
1097 return mgid;
1098}
1099
1100/**
1101 * memory_group_register_static() - Register a static memory group.
1102 * @nid: The node id.
1103 * @max_pages: The maximum number of pages we'll have in this static memory
1104 * group.
1105 *
1106 * Register a new static memory group and return the memory group id.
1107 * All memory in the group belongs to a single unit, such as a DIMM. All
1108 * memory belonging to a static memory group is added in one go to be removed
1109 * in one go -- it's static.
1110 *
1111 * Returns an error if out of memory, if the node id is invalid, if no new
1112 * memory groups can be registered, or if max_pages is invalid (0). Otherwise,
1113 * returns the new memory group id.
1114 */
1115int memory_group_register_static(int nid, unsigned long max_pages)
1116{
1117 struct memory_group group = {
1118 .nid = nid,
1119 .s = {
1120 .max_pages = max_pages,
1121 },
1122 };
1123
1124 if (!max_pages)
1125 return -EINVAL;
1126 return memory_group_register(group);
1127}
1128EXPORT_SYMBOL_GPL(memory_group_register_static);
1129
1130/**
1131 * memory_group_register_dynamic() - Register a dynamic memory group.
1132 * @nid: The node id.
1133 * @unit_pages: Unit in pages in which is memory added/removed in this dynamic
1134 * memory group.
1135 *
1136 * Register a new dynamic memory group and return the memory group id.
1137 * Memory within a dynamic memory group is added/removed dynamically
1138 * in unit_pages.
1139 *
1140 * Returns an error if out of memory, if the node id is invalid, if no new
1141 * memory groups can be registered, or if unit_pages is invalid (0, not a
1142 * power of two, smaller than a single memory block). Otherwise, returns the
1143 * new memory group id.
1144 */
1145int memory_group_register_dynamic(int nid, unsigned long unit_pages)
1146{
1147 struct memory_group group = {
1148 .nid = nid,
1149 .is_dynamic = true,
1150 .d = {
1151 .unit_pages = unit_pages,
1152 },
1153 };
1154
1155 if (!unit_pages || !is_power_of_2(unit_pages) ||
1156 unit_pages < PHYS_PFN(memory_block_size_bytes()))
1157 return -EINVAL;
1158 return memory_group_register(group);
1159}
1160EXPORT_SYMBOL_GPL(memory_group_register_dynamic);
1161
1162/**
1163 * memory_group_unregister() - Unregister a memory group.
1164 * @mgid: the memory group id
1165 *
1166 * Unregister a memory group. If any memory block still belongs to this
1167 * memory group, unregistering will fail.
1168 *
1169 * Returns -EINVAL if the memory group id is invalid, returns -EBUSY if some
1170 * memory blocks still belong to this memory group and returns 0 if
1171 * unregistering succeeded.
1172 */
1173int memory_group_unregister(int mgid)
1174{
1175 struct memory_group *group;
1176
1177 if (mgid < 0)
1178 return -EINVAL;
1179
1180 group = xa_load(&memory_groups, mgid);
1181 if (!group)
1182 return -EINVAL;
1183 if (!list_empty(&group->memory_blocks))
1184 return -EBUSY;
1185 xa_erase(&memory_groups, mgid);
1186 kfree(group);
1187 return 0;
1188}
1189EXPORT_SYMBOL_GPL(memory_group_unregister);
1190
1191/*
1192 * This is an internal helper only to be used in core memory hotplug code to
1193 * lookup a memory group. We don't care about locking, as we don't expect a
1194 * memory group to get unregistered while adding memory to it -- because
1195 * the group and the memory is managed by the same driver.
1196 */
1197struct memory_group *memory_group_find_by_id(int mgid)
1198{
1199 return xa_load(&memory_groups, mgid);
1200}
1201
1202/*
1203 * This is an internal helper only to be used in core memory hotplug code to
1204 * walk all dynamic memory groups excluding a given memory group, either
1205 * belonging to a specific node, or belonging to any node.
1206 */
1207int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
1208 struct memory_group *excluded, void *arg)
1209{
1210 struct memory_group *group;
1211 unsigned long index;
1212 int ret = 0;
1213
1214 xa_for_each_marked(&memory_groups, index, group,
1215 MEMORY_GROUP_MARK_DYNAMIC) {
1216 if (group == excluded)
1217 continue;
1218#ifdef CONFIG_NUMA
1219 if (nid != NUMA_NO_NODE && group->nid != nid)
1220 continue;
1221#endif /* CONFIG_NUMA */
1222 ret = func(group, arg);
1223 if (ret)
1224 break;
1225 }
1226 return ret;
1227}
1228
1229#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
1230void memblk_nr_poison_inc(unsigned long pfn)
1231{
1232 const unsigned long block_id = pfn_to_block_id(pfn);
1233 struct memory_block *mem = find_memory_block_by_id(block_id);
1234
1235 if (mem)
1236 atomic_long_inc(&mem->nr_hwpoison);
1237}
1238
1239void memblk_nr_poison_sub(unsigned long pfn, long i)
1240{
1241 const unsigned long block_id = pfn_to_block_id(pfn);
1242 struct memory_block *mem = find_memory_block_by_id(block_id);
1243
1244 if (mem)
1245 atomic_long_sub(i, &mem->nr_hwpoison);
1246}
1247
1248static unsigned long memblk_nr_poison(struct memory_block *mem)
1249{
1250 return atomic_long_read(&mem->nr_hwpoison);
1251}
1252#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Memory subsystem support
4 *
5 * Written by Matt Tolentino <matthew.e.tolentino@intel.com>
6 * Dave Hansen <haveblue@us.ibm.com>
7 *
8 * This file provides the necessary infrastructure to represent
9 * a SPARSEMEM-memory-model system's physical memory in /sysfs.
10 * All arch-independent code that assumes MEMORY_HOTPLUG requires
11 * SPARSEMEM should be contained here, or in mm/memory_hotplug.c.
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/topology.h>
17#include <linux/capability.h>
18#include <linux/device.h>
19#include <linux/memory.h>
20#include <linux/memory_hotplug.h>
21#include <linux/mm.h>
22#include <linux/mutex.h>
23#include <linux/stat.h>
24#include <linux/slab.h>
25
26#include <linux/atomic.h>
27#include <linux/uaccess.h>
28
29static DEFINE_MUTEX(mem_sysfs_mutex);
30
31#define MEMORY_CLASS_NAME "memory"
32
33#define to_memory_block(dev) container_of(dev, struct memory_block, dev)
34
35static int sections_per_block;
36
37static inline unsigned long base_memory_block_id(unsigned long section_nr)
38{
39 return section_nr / sections_per_block;
40}
41
42static inline unsigned long pfn_to_block_id(unsigned long pfn)
43{
44 return base_memory_block_id(pfn_to_section_nr(pfn));
45}
46
47static inline unsigned long phys_to_block_id(unsigned long phys)
48{
49 return pfn_to_block_id(PFN_DOWN(phys));
50}
51
52static int memory_subsys_online(struct device *dev);
53static int memory_subsys_offline(struct device *dev);
54
55static struct bus_type memory_subsys = {
56 .name = MEMORY_CLASS_NAME,
57 .dev_name = MEMORY_CLASS_NAME,
58 .online = memory_subsys_online,
59 .offline = memory_subsys_offline,
60};
61
62static BLOCKING_NOTIFIER_HEAD(memory_chain);
63
64int register_memory_notifier(struct notifier_block *nb)
65{
66 return blocking_notifier_chain_register(&memory_chain, nb);
67}
68EXPORT_SYMBOL(register_memory_notifier);
69
70void unregister_memory_notifier(struct notifier_block *nb)
71{
72 blocking_notifier_chain_unregister(&memory_chain, nb);
73}
74EXPORT_SYMBOL(unregister_memory_notifier);
75
76static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain);
77
78int register_memory_isolate_notifier(struct notifier_block *nb)
79{
80 return atomic_notifier_chain_register(&memory_isolate_chain, nb);
81}
82EXPORT_SYMBOL(register_memory_isolate_notifier);
83
84void unregister_memory_isolate_notifier(struct notifier_block *nb)
85{
86 atomic_notifier_chain_unregister(&memory_isolate_chain, nb);
87}
88EXPORT_SYMBOL(unregister_memory_isolate_notifier);
89
90static void memory_block_release(struct device *dev)
91{
92 struct memory_block *mem = to_memory_block(dev);
93
94 kfree(mem);
95}
96
97unsigned long __weak memory_block_size_bytes(void)
98{
99 return MIN_MEMORY_BLOCK_SIZE;
100}
101EXPORT_SYMBOL_GPL(memory_block_size_bytes);
102
103/*
104 * Show the first physical section index (number) of this memory block.
105 */
106static ssize_t phys_index_show(struct device *dev,
107 struct device_attribute *attr, char *buf)
108{
109 struct memory_block *mem = to_memory_block(dev);
110 unsigned long phys_index;
111
112 phys_index = mem->start_section_nr / sections_per_block;
113 return sprintf(buf, "%08lx\n", phys_index);
114}
115
116/*
117 * Show whether the memory block is likely to be offlineable (or is already
118 * offline). Once offline, the memory block could be removed. The return
119 * value does, however, not indicate that there is a way to remove the
120 * memory block.
121 */
122static ssize_t removable_show(struct device *dev, struct device_attribute *attr,
123 char *buf)
124{
125 struct memory_block *mem = to_memory_block(dev);
126 unsigned long pfn;
127 int ret = 1, i;
128
129 if (mem->state != MEM_ONLINE)
130 goto out;
131
132 for (i = 0; i < sections_per_block; i++) {
133 if (!present_section_nr(mem->start_section_nr + i))
134 continue;
135 pfn = section_nr_to_pfn(mem->start_section_nr + i);
136 ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
137 }
138
139out:
140 return sprintf(buf, "%d\n", ret);
141}
142
143/*
144 * online, offline, going offline, etc.
145 */
146static ssize_t state_show(struct device *dev, struct device_attribute *attr,
147 char *buf)
148{
149 struct memory_block *mem = to_memory_block(dev);
150 ssize_t len = 0;
151
152 /*
153 * We can probably put these states in a nice little array
154 * so that they're not open-coded
155 */
156 switch (mem->state) {
157 case MEM_ONLINE:
158 len = sprintf(buf, "online\n");
159 break;
160 case MEM_OFFLINE:
161 len = sprintf(buf, "offline\n");
162 break;
163 case MEM_GOING_OFFLINE:
164 len = sprintf(buf, "going-offline\n");
165 break;
166 default:
167 len = sprintf(buf, "ERROR-UNKNOWN-%ld\n",
168 mem->state);
169 WARN_ON(1);
170 break;
171 }
172
173 return len;
174}
175
176int memory_notify(unsigned long val, void *v)
177{
178 return blocking_notifier_call_chain(&memory_chain, val, v);
179}
180
181int memory_isolate_notify(unsigned long val, void *v)
182{
183 return atomic_notifier_call_chain(&memory_isolate_chain, val, v);
184}
185
186/*
187 * The probe routines leave the pages uninitialized, just as the bootmem code
188 * does. Make sure we do not access them, but instead use only information from
189 * within sections.
190 */
191static bool pages_correctly_probed(unsigned long start_pfn)
192{
193 unsigned long section_nr = pfn_to_section_nr(start_pfn);
194 unsigned long section_nr_end = section_nr + sections_per_block;
195 unsigned long pfn = start_pfn;
196
197 /*
198 * memmap between sections is not contiguous except with
199 * SPARSEMEM_VMEMMAP. We lookup the page once per section
200 * and assume memmap is contiguous within each section
201 */
202 for (; section_nr < section_nr_end; section_nr++) {
203 if (WARN_ON_ONCE(!pfn_valid(pfn)))
204 return false;
205
206 if (!present_section_nr(section_nr)) {
207 pr_warn("section %ld pfn[%lx, %lx) not present\n",
208 section_nr, pfn, pfn + PAGES_PER_SECTION);
209 return false;
210 } else if (!valid_section_nr(section_nr)) {
211 pr_warn("section %ld pfn[%lx, %lx) no valid memmap\n",
212 section_nr, pfn, pfn + PAGES_PER_SECTION);
213 return false;
214 } else if (online_section_nr(section_nr)) {
215 pr_warn("section %ld pfn[%lx, %lx) is already online\n",
216 section_nr, pfn, pfn + PAGES_PER_SECTION);
217 return false;
218 }
219 pfn += PAGES_PER_SECTION;
220 }
221
222 return true;
223}
224
225/*
226 * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
227 * OK to have direct references to sparsemem variables in here.
228 */
229static int
230memory_block_action(unsigned long start_section_nr, unsigned long action,
231 int online_type)
232{
233 unsigned long start_pfn;
234 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
235 int ret;
236
237 start_pfn = section_nr_to_pfn(start_section_nr);
238
239 switch (action) {
240 case MEM_ONLINE:
241 if (!pages_correctly_probed(start_pfn))
242 return -EBUSY;
243
244 ret = online_pages(start_pfn, nr_pages, online_type);
245 break;
246 case MEM_OFFLINE:
247 ret = offline_pages(start_pfn, nr_pages);
248 break;
249 default:
250 WARN(1, KERN_WARNING "%s(%ld, %ld) unknown action: "
251 "%ld\n", __func__, start_section_nr, action, action);
252 ret = -EINVAL;
253 }
254
255 return ret;
256}
257
258static int memory_block_change_state(struct memory_block *mem,
259 unsigned long to_state, unsigned long from_state_req)
260{
261 int ret = 0;
262
263 if (mem->state != from_state_req)
264 return -EINVAL;
265
266 if (to_state == MEM_OFFLINE)
267 mem->state = MEM_GOING_OFFLINE;
268
269 ret = memory_block_action(mem->start_section_nr, to_state,
270 mem->online_type);
271
272 mem->state = ret ? from_state_req : to_state;
273
274 return ret;
275}
276
277/* The device lock serializes operations on memory_subsys_[online|offline] */
278static int memory_subsys_online(struct device *dev)
279{
280 struct memory_block *mem = to_memory_block(dev);
281 int ret;
282
283 if (mem->state == MEM_ONLINE)
284 return 0;
285
286 /*
287 * If we are called from state_store(), online_type will be
288 * set >= 0 Otherwise we were called from the device online
289 * attribute and need to set the online_type.
290 */
291 if (mem->online_type < 0)
292 mem->online_type = MMOP_ONLINE_KEEP;
293
294 ret = memory_block_change_state(mem, MEM_ONLINE, MEM_OFFLINE);
295
296 /* clear online_type */
297 mem->online_type = -1;
298
299 return ret;
300}
301
302static int memory_subsys_offline(struct device *dev)
303{
304 struct memory_block *mem = to_memory_block(dev);
305
306 if (mem->state == MEM_OFFLINE)
307 return 0;
308
309 /* Can't offline block with non-present sections */
310 if (mem->section_count != sections_per_block)
311 return -EINVAL;
312
313 return memory_block_change_state(mem, MEM_OFFLINE, MEM_ONLINE);
314}
315
316static ssize_t state_store(struct device *dev, struct device_attribute *attr,
317 const char *buf, size_t count)
318{
319 struct memory_block *mem = to_memory_block(dev);
320 int ret, online_type;
321
322 ret = lock_device_hotplug_sysfs();
323 if (ret)
324 return ret;
325
326 if (sysfs_streq(buf, "online_kernel"))
327 online_type = MMOP_ONLINE_KERNEL;
328 else if (sysfs_streq(buf, "online_movable"))
329 online_type = MMOP_ONLINE_MOVABLE;
330 else if (sysfs_streq(buf, "online"))
331 online_type = MMOP_ONLINE_KEEP;
332 else if (sysfs_streq(buf, "offline"))
333 online_type = MMOP_OFFLINE;
334 else {
335 ret = -EINVAL;
336 goto err;
337 }
338
339 switch (online_type) {
340 case MMOP_ONLINE_KERNEL:
341 case MMOP_ONLINE_MOVABLE:
342 case MMOP_ONLINE_KEEP:
343 /* mem->online_type is protected by device_hotplug_lock */
344 mem->online_type = online_type;
345 ret = device_online(&mem->dev);
346 break;
347 case MMOP_OFFLINE:
348 ret = device_offline(&mem->dev);
349 break;
350 default:
351 ret = -EINVAL; /* should never happen */
352 }
353
354err:
355 unlock_device_hotplug();
356
357 if (ret < 0)
358 return ret;
359 if (ret)
360 return -EINVAL;
361
362 return count;
363}
364
365/*
366 * phys_device is a bad name for this. What I really want
367 * is a way to differentiate between memory ranges that
368 * are part of physical devices that constitute
369 * a complete removable unit or fru.
370 * i.e. do these ranges belong to the same physical device,
371 * s.t. if I offline all of these sections I can then
372 * remove the physical device?
373 */
374static ssize_t phys_device_show(struct device *dev,
375 struct device_attribute *attr, char *buf)
376{
377 struct memory_block *mem = to_memory_block(dev);
378 return sprintf(buf, "%d\n", mem->phys_device);
379}
380
381#ifdef CONFIG_MEMORY_HOTREMOVE
382static void print_allowed_zone(char *buf, int nid, unsigned long start_pfn,
383 unsigned long nr_pages, int online_type,
384 struct zone *default_zone)
385{
386 struct zone *zone;
387
388 zone = zone_for_pfn_range(online_type, nid, start_pfn, nr_pages);
389 if (zone != default_zone) {
390 strcat(buf, " ");
391 strcat(buf, zone->name);
392 }
393}
394
395static ssize_t valid_zones_show(struct device *dev,
396 struct device_attribute *attr, char *buf)
397{
398 struct memory_block *mem = to_memory_block(dev);
399 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr);
400 unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
401 unsigned long valid_start_pfn, valid_end_pfn;
402 struct zone *default_zone;
403 int nid;
404
405 /*
406 * Check the existing zone. Make sure that we do that only on the
407 * online nodes otherwise the page_zone is not reliable
408 */
409 if (mem->state == MEM_ONLINE) {
410 /*
411 * The block contains more than one zone can not be offlined.
412 * This can happen e.g. for ZONE_DMA and ZONE_DMA32
413 */
414 if (!test_pages_in_a_zone(start_pfn, start_pfn + nr_pages,
415 &valid_start_pfn, &valid_end_pfn))
416 return sprintf(buf, "none\n");
417 start_pfn = valid_start_pfn;
418 strcat(buf, page_zone(pfn_to_page(start_pfn))->name);
419 goto out;
420 }
421
422 nid = mem->nid;
423 default_zone = zone_for_pfn_range(MMOP_ONLINE_KEEP, nid, start_pfn, nr_pages);
424 strcat(buf, default_zone->name);
425
426 print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_KERNEL,
427 default_zone);
428 print_allowed_zone(buf, nid, start_pfn, nr_pages, MMOP_ONLINE_MOVABLE,
429 default_zone);
430out:
431 strcat(buf, "\n");
432
433 return strlen(buf);
434}
435static DEVICE_ATTR_RO(valid_zones);
436#endif
437
438static DEVICE_ATTR_RO(phys_index);
439static DEVICE_ATTR_RW(state);
440static DEVICE_ATTR_RO(phys_device);
441static DEVICE_ATTR_RO(removable);
442
443/*
444 * Show the memory block size (shared by all memory blocks).
445 */
446static ssize_t block_size_bytes_show(struct device *dev,
447 struct device_attribute *attr, char *buf)
448{
449 return sprintf(buf, "%lx\n", memory_block_size_bytes());
450}
451
452static DEVICE_ATTR_RO(block_size_bytes);
453
454/*
455 * Memory auto online policy.
456 */
457
458static ssize_t auto_online_blocks_show(struct device *dev,
459 struct device_attribute *attr, char *buf)
460{
461 if (memhp_auto_online)
462 return sprintf(buf, "online\n");
463 else
464 return sprintf(buf, "offline\n");
465}
466
467static ssize_t auto_online_blocks_store(struct device *dev,
468 struct device_attribute *attr,
469 const char *buf, size_t count)
470{
471 if (sysfs_streq(buf, "online"))
472 memhp_auto_online = true;
473 else if (sysfs_streq(buf, "offline"))
474 memhp_auto_online = false;
475 else
476 return -EINVAL;
477
478 return count;
479}
480
481static DEVICE_ATTR_RW(auto_online_blocks);
482
483/*
484 * Some architectures will have custom drivers to do this, and
485 * will not need to do it from userspace. The fake hot-add code
486 * as well as ppc64 will do all of their discovery in userspace
487 * and will require this interface.
488 */
489#ifdef CONFIG_ARCH_MEMORY_PROBE
490static ssize_t probe_store(struct device *dev, struct device_attribute *attr,
491 const char *buf, size_t count)
492{
493 u64 phys_addr;
494 int nid, ret;
495 unsigned long pages_per_block = PAGES_PER_SECTION * sections_per_block;
496
497 ret = kstrtoull(buf, 0, &phys_addr);
498 if (ret)
499 return ret;
500
501 if (phys_addr & ((pages_per_block << PAGE_SHIFT) - 1))
502 return -EINVAL;
503
504 ret = lock_device_hotplug_sysfs();
505 if (ret)
506 return ret;
507
508 nid = memory_add_physaddr_to_nid(phys_addr);
509 ret = __add_memory(nid, phys_addr,
510 MIN_MEMORY_BLOCK_SIZE * sections_per_block);
511
512 if (ret)
513 goto out;
514
515 ret = count;
516out:
517 unlock_device_hotplug();
518 return ret;
519}
520
521static DEVICE_ATTR_WO(probe);
522#endif
523
524#ifdef CONFIG_MEMORY_FAILURE
525/*
526 * Support for offlining pages of memory
527 */
528
529/* Soft offline a page */
530static ssize_t soft_offline_page_store(struct device *dev,
531 struct device_attribute *attr,
532 const char *buf, size_t count)
533{
534 int ret;
535 u64 pfn;
536 if (!capable(CAP_SYS_ADMIN))
537 return -EPERM;
538 if (kstrtoull(buf, 0, &pfn) < 0)
539 return -EINVAL;
540 pfn >>= PAGE_SHIFT;
541 if (!pfn_valid(pfn))
542 return -ENXIO;
543 /* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
544 if (!pfn_to_online_page(pfn))
545 return -EIO;
546 ret = soft_offline_page(pfn_to_page(pfn), 0);
547 return ret == 0 ? count : ret;
548}
549
550/* Forcibly offline a page, including killing processes. */
551static ssize_t hard_offline_page_store(struct device *dev,
552 struct device_attribute *attr,
553 const char *buf, size_t count)
554{
555 int ret;
556 u64 pfn;
557 if (!capable(CAP_SYS_ADMIN))
558 return -EPERM;
559 if (kstrtoull(buf, 0, &pfn) < 0)
560 return -EINVAL;
561 pfn >>= PAGE_SHIFT;
562 ret = memory_failure(pfn, 0);
563 return ret ? ret : count;
564}
565
566static DEVICE_ATTR_WO(soft_offline_page);
567static DEVICE_ATTR_WO(hard_offline_page);
568#endif
569
570/*
571 * Note that phys_device is optional. It is here to allow for
572 * differentiation between which *physical* devices each
573 * section belongs to...
574 */
575int __weak arch_get_memory_phys_device(unsigned long start_pfn)
576{
577 return 0;
578}
579
580/* A reference for the returned memory block device is acquired. */
581static struct memory_block *find_memory_block_by_id(unsigned long block_id)
582{
583 struct device *dev;
584
585 dev = subsys_find_device_by_id(&memory_subsys, block_id, NULL);
586 return dev ? to_memory_block(dev) : NULL;
587}
588
589/*
590 * For now, we have a linear search to go find the appropriate
591 * memory_block corresponding to a particular phys_index. If
592 * this gets to be a real problem, we can always use a radix
593 * tree or something here.
594 *
595 * This could be made generic for all device subsystems.
596 */
597struct memory_block *find_memory_block(struct mem_section *section)
598{
599 unsigned long block_id = base_memory_block_id(__section_nr(section));
600
601 return find_memory_block_by_id(block_id);
602}
603
604static struct attribute *memory_memblk_attrs[] = {
605 &dev_attr_phys_index.attr,
606 &dev_attr_state.attr,
607 &dev_attr_phys_device.attr,
608 &dev_attr_removable.attr,
609#ifdef CONFIG_MEMORY_HOTREMOVE
610 &dev_attr_valid_zones.attr,
611#endif
612 NULL
613};
614
615static struct attribute_group memory_memblk_attr_group = {
616 .attrs = memory_memblk_attrs,
617};
618
619static const struct attribute_group *memory_memblk_attr_groups[] = {
620 &memory_memblk_attr_group,
621 NULL,
622};
623
624/*
625 * register_memory - Setup a sysfs device for a memory block
626 */
627static
628int register_memory(struct memory_block *memory)
629{
630 int ret;
631
632 memory->dev.bus = &memory_subsys;
633 memory->dev.id = memory->start_section_nr / sections_per_block;
634 memory->dev.release = memory_block_release;
635 memory->dev.groups = memory_memblk_attr_groups;
636 memory->dev.offline = memory->state == MEM_OFFLINE;
637
638 ret = device_register(&memory->dev);
639 if (ret)
640 put_device(&memory->dev);
641
642 return ret;
643}
644
645static int init_memory_block(struct memory_block **memory,
646 unsigned long block_id, unsigned long state)
647{
648 struct memory_block *mem;
649 unsigned long start_pfn;
650 int ret = 0;
651
652 mem = find_memory_block_by_id(block_id);
653 if (mem) {
654 put_device(&mem->dev);
655 return -EEXIST;
656 }
657 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
658 if (!mem)
659 return -ENOMEM;
660
661 mem->start_section_nr = block_id * sections_per_block;
662 mem->state = state;
663 start_pfn = section_nr_to_pfn(mem->start_section_nr);
664 mem->phys_device = arch_get_memory_phys_device(start_pfn);
665 mem->nid = NUMA_NO_NODE;
666
667 ret = register_memory(mem);
668
669 *memory = mem;
670 return ret;
671}
672
673static int add_memory_block(unsigned long base_section_nr)
674{
675 int ret, section_count = 0;
676 struct memory_block *mem;
677 unsigned long nr;
678
679 for (nr = base_section_nr; nr < base_section_nr + sections_per_block;
680 nr++)
681 if (present_section_nr(nr))
682 section_count++;
683
684 if (section_count == 0)
685 return 0;
686 ret = init_memory_block(&mem, base_memory_block_id(base_section_nr),
687 MEM_ONLINE);
688 if (ret)
689 return ret;
690 mem->section_count = section_count;
691 return 0;
692}
693
694static void unregister_memory(struct memory_block *memory)
695{
696 if (WARN_ON_ONCE(memory->dev.bus != &memory_subsys))
697 return;
698
699 /* drop the ref. we got via find_memory_block() */
700 put_device(&memory->dev);
701 device_unregister(&memory->dev);
702}
703
704/*
705 * Create memory block devices for the given memory area. Start and size
706 * have to be aligned to memory block granularity. Memory block devices
707 * will be initialized as offline.
708 */
709int create_memory_block_devices(unsigned long start, unsigned long size)
710{
711 const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
712 unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
713 struct memory_block *mem;
714 unsigned long block_id;
715 int ret = 0;
716
717 if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
718 !IS_ALIGNED(size, memory_block_size_bytes())))
719 return -EINVAL;
720
721 mutex_lock(&mem_sysfs_mutex);
722 for (block_id = start_block_id; block_id != end_block_id; block_id++) {
723 ret = init_memory_block(&mem, block_id, MEM_OFFLINE);
724 if (ret)
725 break;
726 mem->section_count = sections_per_block;
727 }
728 if (ret) {
729 end_block_id = block_id;
730 for (block_id = start_block_id; block_id != end_block_id;
731 block_id++) {
732 mem = find_memory_block_by_id(block_id);
733 mem->section_count = 0;
734 unregister_memory(mem);
735 }
736 }
737 mutex_unlock(&mem_sysfs_mutex);
738 return ret;
739}
740
741/*
742 * Remove memory block devices for the given memory area. Start and size
743 * have to be aligned to memory block granularity. Memory block devices
744 * have to be offline.
745 */
746void remove_memory_block_devices(unsigned long start, unsigned long size)
747{
748 const unsigned long start_block_id = pfn_to_block_id(PFN_DOWN(start));
749 const unsigned long end_block_id = pfn_to_block_id(PFN_DOWN(start + size));
750 struct memory_block *mem;
751 unsigned long block_id;
752
753 if (WARN_ON_ONCE(!IS_ALIGNED(start, memory_block_size_bytes()) ||
754 !IS_ALIGNED(size, memory_block_size_bytes())))
755 return;
756
757 mutex_lock(&mem_sysfs_mutex);
758 for (block_id = start_block_id; block_id != end_block_id; block_id++) {
759 mem = find_memory_block_by_id(block_id);
760 if (WARN_ON_ONCE(!mem))
761 continue;
762 mem->section_count = 0;
763 unregister_memory_block_under_nodes(mem);
764 unregister_memory(mem);
765 }
766 mutex_unlock(&mem_sysfs_mutex);
767}
768
769/* return true if the memory block is offlined, otherwise, return false */
770bool is_memblock_offlined(struct memory_block *mem)
771{
772 return mem->state == MEM_OFFLINE;
773}
774
775static struct attribute *memory_root_attrs[] = {
776#ifdef CONFIG_ARCH_MEMORY_PROBE
777 &dev_attr_probe.attr,
778#endif
779
780#ifdef CONFIG_MEMORY_FAILURE
781 &dev_attr_soft_offline_page.attr,
782 &dev_attr_hard_offline_page.attr,
783#endif
784
785 &dev_attr_block_size_bytes.attr,
786 &dev_attr_auto_online_blocks.attr,
787 NULL
788};
789
790static struct attribute_group memory_root_attr_group = {
791 .attrs = memory_root_attrs,
792};
793
794static const struct attribute_group *memory_root_attr_groups[] = {
795 &memory_root_attr_group,
796 NULL,
797};
798
799/*
800 * Initialize the sysfs support for memory devices...
801 */
802void __init memory_dev_init(void)
803{
804 int ret;
805 int err;
806 unsigned long block_sz, nr;
807
808 /* Validate the configured memory block size */
809 block_sz = memory_block_size_bytes();
810 if (!is_power_of_2(block_sz) || block_sz < MIN_MEMORY_BLOCK_SIZE)
811 panic("Memory block size not suitable: 0x%lx\n", block_sz);
812 sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
813
814 ret = subsys_system_register(&memory_subsys, memory_root_attr_groups);
815 if (ret)
816 goto out;
817
818 /*
819 * Create entries for memory sections that were found
820 * during boot and have been initialized
821 */
822 mutex_lock(&mem_sysfs_mutex);
823 for (nr = 0; nr <= __highest_present_section_nr;
824 nr += sections_per_block) {
825 err = add_memory_block(nr);
826 if (!ret)
827 ret = err;
828 }
829 mutex_unlock(&mem_sysfs_mutex);
830
831out:
832 if (ret)
833 panic("%s() failed: %d\n", __func__, ret);
834}
835
836/**
837 * walk_memory_blocks - walk through all present memory blocks overlapped
838 * by the range [start, start + size)
839 *
840 * @start: start address of the memory range
841 * @size: size of the memory range
842 * @arg: argument passed to func
843 * @func: callback for each memory section walked
844 *
845 * This function walks through all present memory blocks overlapped by the
846 * range [start, start + size), calling func on each memory block.
847 *
848 * In case func() returns an error, walking is aborted and the error is
849 * returned.
850 */
851int walk_memory_blocks(unsigned long start, unsigned long size,
852 void *arg, walk_memory_blocks_func_t func)
853{
854 const unsigned long start_block_id = phys_to_block_id(start);
855 const unsigned long end_block_id = phys_to_block_id(start + size - 1);
856 struct memory_block *mem;
857 unsigned long block_id;
858 int ret = 0;
859
860 if (!size)
861 return 0;
862
863 for (block_id = start_block_id; block_id <= end_block_id; block_id++) {
864 mem = find_memory_block_by_id(block_id);
865 if (!mem)
866 continue;
867
868 ret = func(mem, arg);
869 put_device(&mem->dev);
870 if (ret)
871 break;
872 }
873 return ret;
874}
875
876struct for_each_memory_block_cb_data {
877 walk_memory_blocks_func_t func;
878 void *arg;
879};
880
881static int for_each_memory_block_cb(struct device *dev, void *data)
882{
883 struct memory_block *mem = to_memory_block(dev);
884 struct for_each_memory_block_cb_data *cb_data = data;
885
886 return cb_data->func(mem, cb_data->arg);
887}
888
889/**
890 * for_each_memory_block - walk through all present memory blocks
891 *
892 * @arg: argument passed to func
893 * @func: callback for each memory block walked
894 *
895 * This function walks through all present memory blocks, calling func on
896 * each memory block.
897 *
898 * In case func() returns an error, walking is aborted and the error is
899 * returned.
900 */
901int for_each_memory_block(void *arg, walk_memory_blocks_func_t func)
902{
903 struct for_each_memory_block_cb_data cb_data = {
904 .func = func,
905 .arg = arg,
906 };
907
908 return bus_for_each_dev(&memory_subsys, NULL, &cb_data,
909 for_each_memory_block_cb);
910}