Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Basic Node interface support
  4 */
  5
  6#include <linux/module.h>
  7#include <linux/init.h>
  8#include <linux/mm.h>
  9#include <linux/memory.h>
 10#include <linux/vmstat.h>
 11#include <linux/notifier.h>
 12#include <linux/node.h>
 13#include <linux/hugetlb.h>
 14#include <linux/compaction.h>
 15#include <linux/cpumask.h>
 16#include <linux/topology.h>
 17#include <linux/nodemask.h>
 18#include <linux/cpu.h>
 19#include <linux/device.h>
 20#include <linux/pm_runtime.h>
 21#include <linux/swap.h>
 22#include <linux/slab.h>
 23
 24static const struct bus_type node_subsys = {
 25	.name = "node",
 26	.dev_name = "node",
 27};
 28
 29static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj,
 30				  struct bin_attribute *attr, char *buf,
 31				  loff_t off, size_t count)
 32{
 33	struct device *dev = kobj_to_dev(kobj);
 34	struct node *node_dev = to_node(dev);
 35	cpumask_var_t mask;
 36	ssize_t n;
 
 
 
 
 
 37
 38	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
 39		return 0;
 40
 41	cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
 42	n = cpumap_print_bitmask_to_buf(buf, mask, off, count);
 43	free_cpumask_var(mask);
 44
 45	return n;
 46}
 47
 48static BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES);
 49
 50static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
 51				   struct bin_attribute *attr, char *buf,
 52				   loff_t off, size_t count)
 53{
 54	struct device *dev = kobj_to_dev(kobj);
 55	struct node *node_dev = to_node(dev);
 56	cpumask_var_t mask;
 57	ssize_t n;
 58
 59	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
 60		return 0;
 61
 62	cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
 63	n = cpumap_print_list_to_buf(buf, mask, off, count);
 64	free_cpumask_var(mask);
 65
 66	return n;
 67}
 68
 69static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
 
 70
 71/**
 72 * struct node_access_nodes - Access class device to hold user visible
 73 * 			      relationships to other nodes.
 74 * @dev:	Device for this memory access class
 75 * @list_node:	List element in the node's access list
 76 * @access:	The access class rank
 77 * @coord:	Heterogeneous memory performance coordinates
 78 */
 79struct node_access_nodes {
 80	struct device		dev;
 81	struct list_head	list_node;
 82	unsigned int		access;
 83#ifdef CONFIG_HMEM_REPORTING
 84	struct access_coordinate	coord;
 85#endif
 86};
 87#define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev)
 88
 89static struct attribute *node_init_access_node_attrs[] = {
 90	NULL,
 91};
 92
 93static struct attribute *node_targ_access_node_attrs[] = {
 94	NULL,
 95};
 96
 97static const struct attribute_group initiators = {
 98	.name	= "initiators",
 99	.attrs	= node_init_access_node_attrs,
100};
101
102static const struct attribute_group targets = {
103	.name	= "targets",
104	.attrs	= node_targ_access_node_attrs,
105};
106
107static const struct attribute_group *node_access_node_groups[] = {
108	&initiators,
109	&targets,
110	NULL,
111};
112
113static void node_remove_accesses(struct node *node)
114{
115	struct node_access_nodes *c, *cnext;
116
117	list_for_each_entry_safe(c, cnext, &node->access_list, list_node) {
118		list_del(&c->list_node);
119		device_unregister(&c->dev);
120	}
121}
122
123static void node_access_release(struct device *dev)
124{
125	kfree(to_access_nodes(dev));
126}
127
128static struct node_access_nodes *node_init_node_access(struct node *node,
129						       unsigned int access)
130{
131	struct node_access_nodes *access_node;
132	struct device *dev;
133
134	list_for_each_entry(access_node, &node->access_list, list_node)
135		if (access_node->access == access)
136			return access_node;
137
138	access_node = kzalloc(sizeof(*access_node), GFP_KERNEL);
139	if (!access_node)
140		return NULL;
141
142	access_node->access = access;
143	dev = &access_node->dev;
144	dev->parent = &node->dev;
145	dev->release = node_access_release;
146	dev->groups = node_access_node_groups;
147	if (dev_set_name(dev, "access%u", access))
148		goto free;
149
150	if (device_register(dev))
151		goto free_name;
152
153	pm_runtime_no_callbacks(dev);
154	list_add_tail(&access_node->list_node, &node->access_list);
155	return access_node;
156free_name:
157	kfree_const(dev->kobj.name);
158free:
159	kfree(access_node);
160	return NULL;
161}
162
163#ifdef CONFIG_HMEM_REPORTING
164#define ACCESS_ATTR(property)						\
165static ssize_t property##_show(struct device *dev,			\
166			   struct device_attribute *attr,		\
167			   char *buf)					\
168{									\
169	return sysfs_emit(buf, "%u\n",					\
170			  to_access_nodes(dev)->coord.property);	\
171}									\
172static DEVICE_ATTR_RO(property)
173
174ACCESS_ATTR(read_bandwidth);
175ACCESS_ATTR(read_latency);
176ACCESS_ATTR(write_bandwidth);
177ACCESS_ATTR(write_latency);
178
179static struct attribute *access_attrs[] = {
180	&dev_attr_read_bandwidth.attr,
181	&dev_attr_read_latency.attr,
182	&dev_attr_write_bandwidth.attr,
183	&dev_attr_write_latency.attr,
184	NULL,
185};
186
187/**
188 * node_set_perf_attrs - Set the performance values for given access class
189 * @nid: Node identifier to be set
190 * @coord: Heterogeneous memory performance coordinates
191 * @access: The access class the for the given attributes
192 */
193void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
194			 unsigned int access)
195{
196	struct node_access_nodes *c;
197	struct node *node;
198	int i;
199
200	if (WARN_ON_ONCE(!node_online(nid)))
201		return;
202
203	node = node_devices[nid];
204	c = node_init_node_access(node, access);
205	if (!c)
206		return;
207
208	c->coord = *coord;
209	for (i = 0; access_attrs[i] != NULL; i++) {
210		if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i],
211					    "initiators")) {
212			pr_info("failed to add performance attribute to node %d\n",
213				nid);
214			break;
215		}
216	}
217}
218
219/**
220 * struct node_cache_info - Internal tracking for memory node caches
221 * @dev:	Device represeting the cache level
222 * @node:	List element for tracking in the node
223 * @cache_attrs:Attributes for this cache level
224 */
225struct node_cache_info {
226	struct device dev;
227	struct list_head node;
228	struct node_cache_attrs cache_attrs;
229};
230#define to_cache_info(device) container_of(device, struct node_cache_info, dev)
231
232#define CACHE_ATTR(name, fmt) 						\
233static ssize_t name##_show(struct device *dev,				\
234			   struct device_attribute *attr,		\
235			   char *buf)					\
236{									\
237	return sysfs_emit(buf, fmt "\n",				\
238			  to_cache_info(dev)->cache_attrs.name);	\
239}									\
240static DEVICE_ATTR_RO(name);
241
242CACHE_ATTR(size, "%llu")
243CACHE_ATTR(line_size, "%u")
244CACHE_ATTR(indexing, "%u")
245CACHE_ATTR(write_policy, "%u")
246
247static struct attribute *cache_attrs[] = {
248	&dev_attr_indexing.attr,
249	&dev_attr_size.attr,
250	&dev_attr_line_size.attr,
251	&dev_attr_write_policy.attr,
252	NULL,
253};
254ATTRIBUTE_GROUPS(cache);
255
256static void node_cache_release(struct device *dev)
257{
258	kfree(dev);
259}
260
261static void node_cacheinfo_release(struct device *dev)
262{
263	struct node_cache_info *info = to_cache_info(dev);
264	kfree(info);
265}
266
267static void node_init_cache_dev(struct node *node)
268{
269	struct device *dev;
270
271	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
272	if (!dev)
273		return;
274
275	device_initialize(dev);
276	dev->parent = &node->dev;
277	dev->release = node_cache_release;
278	if (dev_set_name(dev, "memory_side_cache"))
279		goto put_device;
280
281	if (device_add(dev))
282		goto put_device;
283
284	pm_runtime_no_callbacks(dev);
285	node->cache_dev = dev;
286	return;
287put_device:
288	put_device(dev);
 
 
289}
290
291/**
292 * node_add_cache() - add cache attribute to a memory node
293 * @nid: Node identifier that has new cache attributes
294 * @cache_attrs: Attributes for the cache being added
295 */
296void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
297{
298	struct node_cache_info *info;
299	struct device *dev;
300	struct node *node;
301
302	if (!node_online(nid) || !node_devices[nid])
303		return;
304
305	node = node_devices[nid];
306	list_for_each_entry(info, &node->cache_attrs, node) {
307		if (info->cache_attrs.level == cache_attrs->level) {
308			dev_warn(&node->dev,
309				"attempt to add duplicate cache level:%d\n",
310				cache_attrs->level);
311			return;
312		}
313	}
314
315	if (!node->cache_dev)
316		node_init_cache_dev(node);
317	if (!node->cache_dev)
318		return;
319
320	info = kzalloc(sizeof(*info), GFP_KERNEL);
321	if (!info)
322		return;
323
324	dev = &info->dev;
325	device_initialize(dev);
326	dev->parent = node->cache_dev;
327	dev->release = node_cacheinfo_release;
328	dev->groups = cache_groups;
329	if (dev_set_name(dev, "index%d", cache_attrs->level))
330		goto put_device;
331
332	info->cache_attrs = *cache_attrs;
333	if (device_add(dev)) {
334		dev_warn(&node->dev, "failed to add cache level:%d\n",
335			 cache_attrs->level);
336		goto put_device;
337	}
338	pm_runtime_no_callbacks(dev);
339	list_add_tail(&info->node, &node->cache_attrs);
340	return;
341put_device:
342	put_device(dev);
 
 
343}
344
345static void node_remove_caches(struct node *node)
346{
347	struct node_cache_info *info, *next;
348
349	if (!node->cache_dev)
350		return;
351
352	list_for_each_entry_safe(info, next, &node->cache_attrs, node) {
353		list_del(&info->node);
354		device_unregister(&info->dev);
355	}
356	device_unregister(node->cache_dev);
357}
358
359static void node_init_caches(unsigned int nid)
360{
361	INIT_LIST_HEAD(&node_devices[nid]->cache_attrs);
362}
363#else
364static void node_init_caches(unsigned int nid) { }
365static void node_remove_caches(struct node *node) { }
366#endif
367
368#define K(x) ((x) << (PAGE_SHIFT - 10))
369static ssize_t node_read_meminfo(struct device *dev,
370			struct device_attribute *attr, char *buf)
371{
372	int len = 0;
373	int nid = dev->id;
374	struct pglist_data *pgdat = NODE_DATA(nid);
375	struct sysinfo i;
376	unsigned long sreclaimable, sunreclaimable;
377	unsigned long swapcached = 0;
378
379	si_meminfo_node(&i, nid);
380	sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B);
381	sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B);
382#ifdef CONFIG_SWAP
383	swapcached = node_page_state_pages(pgdat, NR_SWAPCACHE);
384#endif
385	len = sysfs_emit_at(buf, len,
386			    "Node %d MemTotal:       %8lu kB\n"
387			    "Node %d MemFree:        %8lu kB\n"
388			    "Node %d MemUsed:        %8lu kB\n"
389			    "Node %d SwapCached:     %8lu kB\n"
390			    "Node %d Active:         %8lu kB\n"
391			    "Node %d Inactive:       %8lu kB\n"
392			    "Node %d Active(anon):   %8lu kB\n"
393			    "Node %d Inactive(anon): %8lu kB\n"
394			    "Node %d Active(file):   %8lu kB\n"
395			    "Node %d Inactive(file): %8lu kB\n"
396			    "Node %d Unevictable:    %8lu kB\n"
397			    "Node %d Mlocked:        %8lu kB\n",
398			    nid, K(i.totalram),
399			    nid, K(i.freeram),
400			    nid, K(i.totalram - i.freeram),
401			    nid, K(swapcached),
402			    nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
403				   node_page_state(pgdat, NR_ACTIVE_FILE)),
404			    nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
405				   node_page_state(pgdat, NR_INACTIVE_FILE)),
406			    nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)),
407			    nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)),
408			    nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)),
409			    nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)),
410			    nid, K(node_page_state(pgdat, NR_UNEVICTABLE)),
411			    nid, K(sum_zone_node_page_state(nid, NR_MLOCK)));
412
413#ifdef CONFIG_HIGHMEM
414	len += sysfs_emit_at(buf, len,
415			     "Node %d HighTotal:      %8lu kB\n"
416			     "Node %d HighFree:       %8lu kB\n"
417			     "Node %d LowTotal:       %8lu kB\n"
418			     "Node %d LowFree:        %8lu kB\n",
419			     nid, K(i.totalhigh),
420			     nid, K(i.freehigh),
421			     nid, K(i.totalram - i.totalhigh),
422			     nid, K(i.freeram - i.freehigh));
423#endif
424	len += sysfs_emit_at(buf, len,
425			     "Node %d Dirty:          %8lu kB\n"
426			     "Node %d Writeback:      %8lu kB\n"
427			     "Node %d FilePages:      %8lu kB\n"
428			     "Node %d Mapped:         %8lu kB\n"
429			     "Node %d AnonPages:      %8lu kB\n"
430			     "Node %d Shmem:          %8lu kB\n"
431			     "Node %d KernelStack:    %8lu kB\n"
432#ifdef CONFIG_SHADOW_CALL_STACK
433			     "Node %d ShadowCallStack:%8lu kB\n"
434#endif
435			     "Node %d PageTables:     %8lu kB\n"
436			     "Node %d SecPageTables:  %8lu kB\n"
437			     "Node %d NFS_Unstable:   %8lu kB\n"
438			     "Node %d Bounce:         %8lu kB\n"
439			     "Node %d WritebackTmp:   %8lu kB\n"
440			     "Node %d KReclaimable:   %8lu kB\n"
441			     "Node %d Slab:           %8lu kB\n"
442			     "Node %d SReclaimable:   %8lu kB\n"
443			     "Node %d SUnreclaim:     %8lu kB\n"
444#ifdef CONFIG_TRANSPARENT_HUGEPAGE
445			     "Node %d AnonHugePages:  %8lu kB\n"
446			     "Node %d ShmemHugePages: %8lu kB\n"
447			     "Node %d ShmemPmdMapped: %8lu kB\n"
448			     "Node %d FileHugePages:  %8lu kB\n"
449			     "Node %d FilePmdMapped:  %8lu kB\n"
450#endif
451#ifdef CONFIG_UNACCEPTED_MEMORY
452			     "Node %d Unaccepted:     %8lu kB\n"
453#endif
454			     ,
455			     nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
456			     nid, K(node_page_state(pgdat, NR_WRITEBACK)),
457			     nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
458			     nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
459			     nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
460			     nid, K(i.sharedram),
461			     nid, node_page_state(pgdat, NR_KERNEL_STACK_KB),
462#ifdef CONFIG_SHADOW_CALL_STACK
463			     nid, node_page_state(pgdat, NR_KERNEL_SCS_KB),
464#endif
465			     nid, K(node_page_state(pgdat, NR_PAGETABLE)),
466			     nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
467			     nid, 0UL,
468			     nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
469			     nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
470			     nid, K(sreclaimable +
471				    node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
472			     nid, K(sreclaimable + sunreclaimable),
473			     nid, K(sreclaimable),
474			     nid, K(sunreclaimable)
475#ifdef CONFIG_TRANSPARENT_HUGEPAGE
476			     ,
477			     nid, K(node_page_state(pgdat, NR_ANON_THPS)),
478			     nid, K(node_page_state(pgdat, NR_SHMEM_THPS)),
479			     nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
480			     nid, K(node_page_state(pgdat, NR_FILE_THPS)),
481			     nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED))
482#endif
483#ifdef CONFIG_UNACCEPTED_MEMORY
484			     ,
485			     nid, K(sum_zone_node_page_state(nid, NR_UNACCEPTED))
 
486#endif
487			    );
488	len += hugetlb_report_node_meminfo(buf, len, nid);
489	return len;
490}
491
492#undef K
493static DEVICE_ATTR(meminfo, 0444, node_read_meminfo, NULL);
494
495static ssize_t node_read_numastat(struct device *dev,
496				  struct device_attribute *attr, char *buf)
497{
498	fold_vm_numa_events();
499	return sysfs_emit(buf,
500			  "numa_hit %lu\n"
501			  "numa_miss %lu\n"
502			  "numa_foreign %lu\n"
503			  "interleave_hit %lu\n"
504			  "local_node %lu\n"
505			  "other_node %lu\n",
506			  sum_zone_numa_event_state(dev->id, NUMA_HIT),
507			  sum_zone_numa_event_state(dev->id, NUMA_MISS),
508			  sum_zone_numa_event_state(dev->id, NUMA_FOREIGN),
509			  sum_zone_numa_event_state(dev->id, NUMA_INTERLEAVE_HIT),
510			  sum_zone_numa_event_state(dev->id, NUMA_LOCAL),
511			  sum_zone_numa_event_state(dev->id, NUMA_OTHER));
512}
513static DEVICE_ATTR(numastat, 0444, node_read_numastat, NULL);
514
515static ssize_t node_read_vmstat(struct device *dev,
516				struct device_attribute *attr, char *buf)
517{
518	int nid = dev->id;
519	struct pglist_data *pgdat = NODE_DATA(nid);
520	int i;
521	int len = 0;
522
523	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
524		len += sysfs_emit_at(buf, len, "%s %lu\n",
525				     zone_stat_name(i),
526				     sum_zone_node_page_state(nid, i));
527
528#ifdef CONFIG_NUMA
529	fold_vm_numa_events();
530	for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
531		len += sysfs_emit_at(buf, len, "%s %lu\n",
532				     numa_stat_name(i),
533				     sum_zone_numa_event_state(nid, i));
534
535#endif
536	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
537		unsigned long pages = node_page_state_pages(pgdat, i);
538
539		if (vmstat_item_print_in_thp(i))
540			pages /= HPAGE_PMD_NR;
541		len += sysfs_emit_at(buf, len, "%s %lu\n", node_stat_name(i),
542				     pages);
543	}
544
545	return len;
 
 
 
 
 
 
546}
547static DEVICE_ATTR(vmstat, 0444, node_read_vmstat, NULL);
548
549static ssize_t node_read_distance(struct device *dev,
550				  struct device_attribute *attr, char *buf)
551{
552	int nid = dev->id;
553	int len = 0;
554	int i;
555
556	/*
557	 * buf is currently PAGE_SIZE in length and each node needs 4 chars
558	 * at the most (distance + space or newline).
559	 */
560	BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
561
562	for_each_online_node(i) {
563		len += sysfs_emit_at(buf, len, "%s%d",
564				     i ? " " : "", node_distance(nid, i));
565	}
566
567	len += sysfs_emit_at(buf, len, "\n");
568	return len;
569}
570static DEVICE_ATTR(distance, 0444, node_read_distance, NULL);
571
572static struct attribute *node_dev_attrs[] = {
 
 
573	&dev_attr_meminfo.attr,
574	&dev_attr_numastat.attr,
575	&dev_attr_distance.attr,
576	&dev_attr_vmstat.attr,
577	NULL
578};
 
579
580static struct bin_attribute *node_dev_bin_attrs[] = {
581	&bin_attr_cpumap,
582	&bin_attr_cpulist,
583	NULL
584};
 
 
 
 
 
 
 
585
586static const struct attribute_group node_dev_group = {
587	.attrs = node_dev_attrs,
588	.bin_attrs = node_dev_bin_attrs
589};
 
 
 
 
 
590
591static const struct attribute_group *node_dev_groups[] = {
592	&node_dev_group,
593#ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
594	&arch_node_dev_group,
595#endif
596#ifdef CONFIG_MEMORY_FAILURE
597	&memory_failure_attr_group,
 
 
 
 
 
 
 
 
 
598#endif
599	NULL
600};
601
602static void node_device_release(struct device *dev)
603{
604	kfree(to_node(dev));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
605}
606
607/*
608 * register_node - Setup a sysfs device for a node.
609 * @num - Node number to use when creating the device.
610 *
611 * Initialize and register the node device.
612 */
613static int register_node(struct node *node, int num)
614{
615	int error;
616
617	node->dev.id = num;
618	node->dev.bus = &node_subsys;
619	node->dev.release = node_device_release;
620	node->dev.groups = node_dev_groups;
621	error = device_register(&node->dev);
622
623	if (error) {
624		put_device(&node->dev);
625	} else {
626		hugetlb_register_node(node);
 
627		compaction_register_node(node);
628	}
629
630	return error;
631}
632
633/**
634 * unregister_node - unregister a node device
635 * @node: node going away
636 *
637 * Unregisters a node device @node.  All the devices on the node must be
638 * unregistered before calling this function.
639 */
640void unregister_node(struct node *node)
641{
642	hugetlb_unregister_node(node);
643	compaction_unregister_node(node);
644	node_remove_accesses(node);
645	node_remove_caches(node);
646	device_unregister(&node->dev);
647}
648
649struct node *node_devices[MAX_NUMNODES];
650
651/*
652 * register cpu under node
653 */
654int register_cpu_under_node(unsigned int cpu, unsigned int nid)
655{
656	int ret;
657	struct device *obj;
658
659	if (!node_online(nid))
660		return 0;
661
662	obj = get_cpu_device(cpu);
663	if (!obj)
664		return 0;
665
666	ret = sysfs_create_link(&node_devices[nid]->dev.kobj,
667				&obj->kobj,
668				kobject_name(&obj->kobj));
669	if (ret)
670		return ret;
671
672	return sysfs_create_link(&obj->kobj,
673				 &node_devices[nid]->dev.kobj,
674				 kobject_name(&node_devices[nid]->dev.kobj));
675}
676
677/**
678 * register_memory_node_under_compute_node - link memory node to its compute
679 *					     node for a given access class.
680 * @mem_nid:	Memory node number
681 * @cpu_nid:	Cpu  node number
682 * @access:	Access class to register
683 *
684 * Description:
685 * 	For use with platforms that may have separate memory and compute nodes.
686 * 	This function will export node relationships linking which memory
687 * 	initiator nodes can access memory targets at a given ranked access
688 * 	class.
689 */
690int register_memory_node_under_compute_node(unsigned int mem_nid,
691					    unsigned int cpu_nid,
692					    unsigned int access)
693{
694	struct node *init_node, *targ_node;
695	struct node_access_nodes *initiator, *target;
696	int ret;
697
698	if (!node_online(cpu_nid) || !node_online(mem_nid))
699		return -ENODEV;
700
701	init_node = node_devices[cpu_nid];
702	targ_node = node_devices[mem_nid];
703	initiator = node_init_node_access(init_node, access);
704	target = node_init_node_access(targ_node, access);
705	if (!initiator || !target)
706		return -ENOMEM;
707
708	ret = sysfs_add_link_to_group(&initiator->dev.kobj, "targets",
709				      &targ_node->dev.kobj,
710				      dev_name(&targ_node->dev));
711	if (ret)
712		return ret;
713
714	ret = sysfs_add_link_to_group(&target->dev.kobj, "initiators",
715				      &init_node->dev.kobj,
716				      dev_name(&init_node->dev));
717	if (ret)
718		goto err;
719
720	return 0;
721 err:
722	sysfs_remove_link_from_group(&initiator->dev.kobj, "targets",
723				     dev_name(&targ_node->dev));
724	return ret;
725}
726
727int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
728{
729	struct device *obj;
730
731	if (!node_online(nid))
732		return 0;
733
734	obj = get_cpu_device(cpu);
735	if (!obj)
736		return 0;
737
738	sysfs_remove_link(&node_devices[nid]->dev.kobj,
739			  kobject_name(&obj->kobj));
740	sysfs_remove_link(&obj->kobj,
741			  kobject_name(&node_devices[nid]->dev.kobj));
742
743	return 0;
744}
745
746#ifdef CONFIG_MEMORY_HOTPLUG
747static int __ref get_nid_for_pfn(unsigned long pfn)
748{
 
 
749#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
750	if (system_state < SYSTEM_RUNNING)
751		return early_pfn_to_nid(pfn);
752#endif
753	return pfn_to_nid(pfn);
754}
755
756static void do_register_memory_block_under_node(int nid,
757						struct memory_block *mem_blk,
758						enum meminit_context context)
759{
760	int ret;
761
762	memory_block_add_nid(mem_blk, nid, context);
763
764	ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
765				       &mem_blk->dev.kobj,
766				       kobject_name(&mem_blk->dev.kobj));
767	if (ret && ret != -EEXIST)
768		dev_err_ratelimited(&node_devices[nid]->dev,
769				    "can't create link to %s in sysfs (%d)\n",
770				    kobject_name(&mem_blk->dev.kobj), ret);
771
772	ret = sysfs_create_link_nowarn(&mem_blk->dev.kobj,
773				&node_devices[nid]->dev.kobj,
774				kobject_name(&node_devices[nid]->dev.kobj));
775	if (ret && ret != -EEXIST)
776		dev_err_ratelimited(&mem_blk->dev,
777				    "can't create link to %s in sysfs (%d)\n",
778				    kobject_name(&node_devices[nid]->dev.kobj),
779				    ret);
780}
781
782/* register memory section under specified node if it spans that node */
783static int register_mem_block_under_node_early(struct memory_block *mem_blk,
784					       void *arg)
785{
786	unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
787	unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
788	unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
789	int nid = *(int *)arg;
790	unsigned long pfn;
791
792	for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
793		int page_nid;
794
795		/*
796		 * memory block could have several absent sections from start.
797		 * skip pfn range from absent section
798		 */
799		if (!pfn_in_present_section(pfn)) {
800			pfn = round_down(pfn + PAGES_PER_SECTION,
801					 PAGES_PER_SECTION) - 1;
802			continue;
803		}
804
805		/*
806		 * We need to check if page belongs to nid only at the boot
807		 * case because node's ranges can be interleaved.
 
808		 */
809		page_nid = get_nid_for_pfn(pfn);
810		if (page_nid < 0)
811			continue;
812		if (page_nid != nid)
813			continue;
 
 
814
815		do_register_memory_block_under_node(nid, mem_blk, MEMINIT_EARLY);
816		return 0;
817	}
818	/* mem section does not span the specified node */
819	return 0;
820}
821
822/*
823 * During hotplug we know that all pages in the memory block belong to the same
824 * node.
825 */
826static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
827						 void *arg)
828{
829	int nid = *(int *)arg;
830
831	do_register_memory_block_under_node(nid, mem_blk, MEMINIT_HOTPLUG);
 
 
 
 
832	return 0;
833}
834
835/*
836 * Unregister a memory block device under the node it spans. Memory blocks
837 * with multiple nodes cannot be offlined and therefore also never be removed.
838 */
839void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
840{
841	if (mem_blk->nid == NUMA_NO_NODE)
842		return;
843
844	sysfs_remove_link(&node_devices[mem_blk->nid]->dev.kobj,
845			  kobject_name(&mem_blk->dev.kobj));
846	sysfs_remove_link(&mem_blk->dev.kobj,
847			  kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
848}
849
850void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
851				       unsigned long end_pfn,
852				       enum meminit_context context)
853{
854	walk_memory_blocks_func_t func;
855
856	if (context == MEMINIT_HOTPLUG)
857		func = register_mem_block_under_node_hotplug;
858	else
859		func = register_mem_block_under_node_early;
860
861	walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn),
862			   (void *)&nid, func);
863	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
864}
865#endif /* CONFIG_MEMORY_HOTPLUG */
 
 
 
 
 
 
 
 
 
 
 
 
 
866
867int __register_one_node(int nid)
868{
869	int error;
870	int cpu;
871	struct node *node;
872
873	node = kzalloc(sizeof(struct node), GFP_KERNEL);
874	if (!node)
875		return -ENOMEM;
876
877	INIT_LIST_HEAD(&node->access_list);
878	node_devices[nid] = node;
879
880	error = register_node(node_devices[nid], nid);
881
882	/* link cpu under this node */
883	for_each_present_cpu(cpu) {
884		if (cpu_to_node(cpu) == nid)
885			register_cpu_under_node(cpu, nid);
886	}
887
 
 
 
888	node_init_caches(nid);
889
890	return error;
891}
892
893void unregister_one_node(int nid)
894{
895	if (!node_devices[nid])
896		return;
897
898	unregister_node(node_devices[nid]);
899	node_devices[nid] = NULL;
900}
901
902/*
903 * node states attributes
904 */
905
 
 
 
 
 
 
 
 
 
 
 
906struct node_attr {
907	struct device_attribute attr;
908	enum node_states state;
909};
910
911static ssize_t show_node_state(struct device *dev,
912			       struct device_attribute *attr, char *buf)
913{
914	struct node_attr *na = container_of(attr, struct node_attr, attr);
915
916	return sysfs_emit(buf, "%*pbl\n",
917			  nodemask_pr_args(&node_states[na->state]));
918}
919
920#define _NODE_ATTR(name, state) \
921	{ __ATTR(name, 0444, show_node_state, NULL), state }
922
923static struct node_attr node_state_attr[] = {
924	[N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE),
925	[N_ONLINE] = _NODE_ATTR(online, N_ONLINE),
926	[N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
927#ifdef CONFIG_HIGHMEM
928	[N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
929#endif
930	[N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
931	[N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
932	[N_GENERIC_INITIATOR] = _NODE_ATTR(has_generic_initiator,
933					   N_GENERIC_INITIATOR),
934};
935
936static struct attribute *node_state_attrs[] = {
937	&node_state_attr[N_POSSIBLE].attr.attr,
938	&node_state_attr[N_ONLINE].attr.attr,
939	&node_state_attr[N_NORMAL_MEMORY].attr.attr,
940#ifdef CONFIG_HIGHMEM
941	&node_state_attr[N_HIGH_MEMORY].attr.attr,
942#endif
943	&node_state_attr[N_MEMORY].attr.attr,
944	&node_state_attr[N_CPU].attr.attr,
945	&node_state_attr[N_GENERIC_INITIATOR].attr.attr,
946	NULL
947};
948
949static const struct attribute_group memory_root_attr_group = {
950	.attrs = node_state_attrs,
951};
952
953static const struct attribute_group *cpu_root_attr_groups[] = {
954	&memory_root_attr_group,
955	NULL,
956};
957
958void __init node_dev_init(void)
 
959{
960	int ret, i;
961
962 	BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
963 	BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
964
965	ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
966	if (ret)
967		panic("%s() failed to register subsystem: %d\n", __func__, ret);
 
 
 
 
 
968
969	/*
970	 * Create all node devices, which will properly link the node
971	 * to applicable memory block devices and already created cpu devices.
972	 */
973	for_each_online_node(i) {
974		ret = register_one_node(i);
975		if (ret)
976			panic("%s() failed to add node: %d\n", __func__, ret);
977	}
978}
v5.4
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Basic Node interface support
   4 */
   5
   6#include <linux/module.h>
   7#include <linux/init.h>
   8#include <linux/mm.h>
   9#include <linux/memory.h>
  10#include <linux/vmstat.h>
  11#include <linux/notifier.h>
  12#include <linux/node.h>
  13#include <linux/hugetlb.h>
  14#include <linux/compaction.h>
  15#include <linux/cpumask.h>
  16#include <linux/topology.h>
  17#include <linux/nodemask.h>
  18#include <linux/cpu.h>
  19#include <linux/device.h>
  20#include <linux/pm_runtime.h>
  21#include <linux/swap.h>
  22#include <linux/slab.h>
  23
  24static struct bus_type node_subsys = {
  25	.name = "node",
  26	.dev_name = "node",
  27};
  28
  29
  30static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf)
 
  31{
 
 
 
  32	ssize_t n;
  33	cpumask_var_t mask;
  34	struct node *node_dev = to_node(dev);
  35
  36	/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
  37	BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
  38
  39	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  40		return 0;
  41
  42	cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
  43	n = cpumap_print_to_pagebuf(list, buf, mask);
  44	free_cpumask_var(mask);
  45
  46	return n;
  47}
  48
  49static inline ssize_t node_read_cpumask(struct device *dev,
  50				struct device_attribute *attr, char *buf)
 
 
 
  51{
  52	return node_read_cpumap(dev, false, buf);
  53}
  54static inline ssize_t node_read_cpulist(struct device *dev,
  55				struct device_attribute *attr, char *buf)
  56{
  57	return node_read_cpumap(dev, true, buf);
 
 
 
 
 
 
 
  58}
  59
  60static DEVICE_ATTR(cpumap,  S_IRUGO, node_read_cpumask, NULL);
  61static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
  62
  63/**
  64 * struct node_access_nodes - Access class device to hold user visible
  65 * 			      relationships to other nodes.
  66 * @dev:	Device for this memory access class
  67 * @list_node:	List element in the node's access list
  68 * @access:	The access class rank
  69 * @hmem_attrs: Heterogeneous memory performance attributes
  70 */
  71struct node_access_nodes {
  72	struct device		dev;
  73	struct list_head	list_node;
  74	unsigned		access;
  75#ifdef CONFIG_HMEM_REPORTING
  76	struct node_hmem_attrs	hmem_attrs;
  77#endif
  78};
  79#define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev)
  80
  81static struct attribute *node_init_access_node_attrs[] = {
  82	NULL,
  83};
  84
  85static struct attribute *node_targ_access_node_attrs[] = {
  86	NULL,
  87};
  88
  89static const struct attribute_group initiators = {
  90	.name	= "initiators",
  91	.attrs	= node_init_access_node_attrs,
  92};
  93
  94static const struct attribute_group targets = {
  95	.name	= "targets",
  96	.attrs	= node_targ_access_node_attrs,
  97};
  98
  99static const struct attribute_group *node_access_node_groups[] = {
 100	&initiators,
 101	&targets,
 102	NULL,
 103};
 104
 105static void node_remove_accesses(struct node *node)
 106{
 107	struct node_access_nodes *c, *cnext;
 108
 109	list_for_each_entry_safe(c, cnext, &node->access_list, list_node) {
 110		list_del(&c->list_node);
 111		device_unregister(&c->dev);
 112	}
 113}
 114
 115static void node_access_release(struct device *dev)
 116{
 117	kfree(to_access_nodes(dev));
 118}
 119
 120static struct node_access_nodes *node_init_node_access(struct node *node,
 121						       unsigned access)
 122{
 123	struct node_access_nodes *access_node;
 124	struct device *dev;
 125
 126	list_for_each_entry(access_node, &node->access_list, list_node)
 127		if (access_node->access == access)
 128			return access_node;
 129
 130	access_node = kzalloc(sizeof(*access_node), GFP_KERNEL);
 131	if (!access_node)
 132		return NULL;
 133
 134	access_node->access = access;
 135	dev = &access_node->dev;
 136	dev->parent = &node->dev;
 137	dev->release = node_access_release;
 138	dev->groups = node_access_node_groups;
 139	if (dev_set_name(dev, "access%u", access))
 140		goto free;
 141
 142	if (device_register(dev))
 143		goto free_name;
 144
 145	pm_runtime_no_callbacks(dev);
 146	list_add_tail(&access_node->list_node, &node->access_list);
 147	return access_node;
 148free_name:
 149	kfree_const(dev->kobj.name);
 150free:
 151	kfree(access_node);
 152	return NULL;
 153}
 154
 155#ifdef CONFIG_HMEM_REPORTING
 156#define ACCESS_ATTR(name) 						   \
 157static ssize_t name##_show(struct device *dev,				   \
 158			   struct device_attribute *attr,		   \
 159			   char *buf)					   \
 160{									   \
 161	return sprintf(buf, "%u\n", to_access_nodes(dev)->hmem_attrs.name); \
 162}									   \
 163static DEVICE_ATTR_RO(name);
 
 164
 165ACCESS_ATTR(read_bandwidth)
 166ACCESS_ATTR(read_latency)
 167ACCESS_ATTR(write_bandwidth)
 168ACCESS_ATTR(write_latency)
 169
 170static struct attribute *access_attrs[] = {
 171	&dev_attr_read_bandwidth.attr,
 172	&dev_attr_read_latency.attr,
 173	&dev_attr_write_bandwidth.attr,
 174	&dev_attr_write_latency.attr,
 175	NULL,
 176};
 177
 178/**
 179 * node_set_perf_attrs - Set the performance values for given access class
 180 * @nid: Node identifier to be set
 181 * @hmem_attrs: Heterogeneous memory performance attributes
 182 * @access: The access class the for the given attributes
 183 */
 184void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
 185			 unsigned access)
 186{
 187	struct node_access_nodes *c;
 188	struct node *node;
 189	int i;
 190
 191	if (WARN_ON_ONCE(!node_online(nid)))
 192		return;
 193
 194	node = node_devices[nid];
 195	c = node_init_node_access(node, access);
 196	if (!c)
 197		return;
 198
 199	c->hmem_attrs = *hmem_attrs;
 200	for (i = 0; access_attrs[i] != NULL; i++) {
 201		if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i],
 202					    "initiators")) {
 203			pr_info("failed to add performance attribute to node %d\n",
 204				nid);
 205			break;
 206		}
 207	}
 208}
 209
 210/**
 211 * struct node_cache_info - Internal tracking for memory node caches
 212 * @dev:	Device represeting the cache level
 213 * @node:	List element for tracking in the node
 214 * @cache_attrs:Attributes for this cache level
 215 */
 216struct node_cache_info {
 217	struct device dev;
 218	struct list_head node;
 219	struct node_cache_attrs cache_attrs;
 220};
 221#define to_cache_info(device) container_of(device, struct node_cache_info, dev)
 222
 223#define CACHE_ATTR(name, fmt) 						\
 224static ssize_t name##_show(struct device *dev,				\
 225			   struct device_attribute *attr,		\
 226			   char *buf)					\
 227{									\
 228	return sprintf(buf, fmt "\n", to_cache_info(dev)->cache_attrs.name);\
 
 229}									\
 230DEVICE_ATTR_RO(name);
 231
 232CACHE_ATTR(size, "%llu")
 233CACHE_ATTR(line_size, "%u")
 234CACHE_ATTR(indexing, "%u")
 235CACHE_ATTR(write_policy, "%u")
 236
 237static struct attribute *cache_attrs[] = {
 238	&dev_attr_indexing.attr,
 239	&dev_attr_size.attr,
 240	&dev_attr_line_size.attr,
 241	&dev_attr_write_policy.attr,
 242	NULL,
 243};
 244ATTRIBUTE_GROUPS(cache);
 245
 246static void node_cache_release(struct device *dev)
 247{
 248	kfree(dev);
 249}
 250
 251static void node_cacheinfo_release(struct device *dev)
 252{
 253	struct node_cache_info *info = to_cache_info(dev);
 254	kfree(info);
 255}
 256
 257static void node_init_cache_dev(struct node *node)
 258{
 259	struct device *dev;
 260
 261	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 262	if (!dev)
 263		return;
 264
 
 265	dev->parent = &node->dev;
 266	dev->release = node_cache_release;
 267	if (dev_set_name(dev, "memory_side_cache"))
 268		goto free_dev;
 269
 270	if (device_register(dev))
 271		goto free_name;
 272
 273	pm_runtime_no_callbacks(dev);
 274	node->cache_dev = dev;
 275	return;
 276free_name:
 277	kfree_const(dev->kobj.name);
 278free_dev:
 279	kfree(dev);
 280}
 281
 282/**
 283 * node_add_cache() - add cache attribute to a memory node
 284 * @nid: Node identifier that has new cache attributes
 285 * @cache_attrs: Attributes for the cache being added
 286 */
 287void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
 288{
 289	struct node_cache_info *info;
 290	struct device *dev;
 291	struct node *node;
 292
 293	if (!node_online(nid) || !node_devices[nid])
 294		return;
 295
 296	node = node_devices[nid];
 297	list_for_each_entry(info, &node->cache_attrs, node) {
 298		if (info->cache_attrs.level == cache_attrs->level) {
 299			dev_warn(&node->dev,
 300				"attempt to add duplicate cache level:%d\n",
 301				cache_attrs->level);
 302			return;
 303		}
 304	}
 305
 306	if (!node->cache_dev)
 307		node_init_cache_dev(node);
 308	if (!node->cache_dev)
 309		return;
 310
 311	info = kzalloc(sizeof(*info), GFP_KERNEL);
 312	if (!info)
 313		return;
 314
 315	dev = &info->dev;
 
 316	dev->parent = node->cache_dev;
 317	dev->release = node_cacheinfo_release;
 318	dev->groups = cache_groups;
 319	if (dev_set_name(dev, "index%d", cache_attrs->level))
 320		goto free_cache;
 321
 322	info->cache_attrs = *cache_attrs;
 323	if (device_register(dev)) {
 324		dev_warn(&node->dev, "failed to add cache level:%d\n",
 325			 cache_attrs->level);
 326		goto free_name;
 327	}
 328	pm_runtime_no_callbacks(dev);
 329	list_add_tail(&info->node, &node->cache_attrs);
 330	return;
 331free_name:
 332	kfree_const(dev->kobj.name);
 333free_cache:
 334	kfree(info);
 335}
 336
 337static void node_remove_caches(struct node *node)
 338{
 339	struct node_cache_info *info, *next;
 340
 341	if (!node->cache_dev)
 342		return;
 343
 344	list_for_each_entry_safe(info, next, &node->cache_attrs, node) {
 345		list_del(&info->node);
 346		device_unregister(&info->dev);
 347	}
 348	device_unregister(node->cache_dev);
 349}
 350
 351static void node_init_caches(unsigned int nid)
 352{
 353	INIT_LIST_HEAD(&node_devices[nid]->cache_attrs);
 354}
 355#else
 356static void node_init_caches(unsigned int nid) { }
 357static void node_remove_caches(struct node *node) { }
 358#endif
 359
 360#define K(x) ((x) << (PAGE_SHIFT - 10))
 361static ssize_t node_read_meminfo(struct device *dev,
 362			struct device_attribute *attr, char *buf)
 363{
 364	int n;
 365	int nid = dev->id;
 366	struct pglist_data *pgdat = NODE_DATA(nid);
 367	struct sysinfo i;
 368	unsigned long sreclaimable, sunreclaimable;
 
 369
 370	si_meminfo_node(&i, nid);
 371	sreclaimable = node_page_state(pgdat, NR_SLAB_RECLAIMABLE);
 372	sunreclaimable = node_page_state(pgdat, NR_SLAB_UNRECLAIMABLE);
 373	n = sprintf(buf,
 374		       "Node %d MemTotal:       %8lu kB\n"
 375		       "Node %d MemFree:        %8lu kB\n"
 376		       "Node %d MemUsed:        %8lu kB\n"
 377		       "Node %d Active:         %8lu kB\n"
 378		       "Node %d Inactive:       %8lu kB\n"
 379		       "Node %d Active(anon):   %8lu kB\n"
 380		       "Node %d Inactive(anon): %8lu kB\n"
 381		       "Node %d Active(file):   %8lu kB\n"
 382		       "Node %d Inactive(file): %8lu kB\n"
 383		       "Node %d Unevictable:    %8lu kB\n"
 384		       "Node %d Mlocked:        %8lu kB\n",
 385		       nid, K(i.totalram),
 386		       nid, K(i.freeram),
 387		       nid, K(i.totalram - i.freeram),
 388		       nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
 389				node_page_state(pgdat, NR_ACTIVE_FILE)),
 390		       nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
 391				node_page_state(pgdat, NR_INACTIVE_FILE)),
 392		       nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)),
 393		       nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)),
 394		       nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)),
 395		       nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)),
 396		       nid, K(node_page_state(pgdat, NR_UNEVICTABLE)),
 397		       nid, K(sum_zone_node_page_state(nid, NR_MLOCK)));
 
 
 
 
 
 398
 399#ifdef CONFIG_HIGHMEM
 400	n += sprintf(buf + n,
 401		       "Node %d HighTotal:      %8lu kB\n"
 402		       "Node %d HighFree:       %8lu kB\n"
 403		       "Node %d LowTotal:       %8lu kB\n"
 404		       "Node %d LowFree:        %8lu kB\n",
 405		       nid, K(i.totalhigh),
 406		       nid, K(i.freehigh),
 407		       nid, K(i.totalram - i.totalhigh),
 408		       nid, K(i.freeram - i.freehigh));
 409#endif
 410	n += sprintf(buf + n,
 411		       "Node %d Dirty:          %8lu kB\n"
 412		       "Node %d Writeback:      %8lu kB\n"
 413		       "Node %d FilePages:      %8lu kB\n"
 414		       "Node %d Mapped:         %8lu kB\n"
 415		       "Node %d AnonPages:      %8lu kB\n"
 416		       "Node %d Shmem:          %8lu kB\n"
 417		       "Node %d KernelStack:    %8lu kB\n"
 418		       "Node %d PageTables:     %8lu kB\n"
 419		       "Node %d NFS_Unstable:   %8lu kB\n"
 420		       "Node %d Bounce:         %8lu kB\n"
 421		       "Node %d WritebackTmp:   %8lu kB\n"
 422		       "Node %d KReclaimable:   %8lu kB\n"
 423		       "Node %d Slab:           %8lu kB\n"
 424		       "Node %d SReclaimable:   %8lu kB\n"
 425		       "Node %d SUnreclaim:     %8lu kB\n"
 
 
 
 
 426#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 427		       "Node %d AnonHugePages:  %8lu kB\n"
 428		       "Node %d ShmemHugePages: %8lu kB\n"
 429		       "Node %d ShmemPmdMapped: %8lu kB\n"
 430		       "Node %d FileHugePages: %8lu kB\n"
 431		       "Node %d FilePmdMapped: %8lu kB\n"
 432#endif
 433			,
 434		       nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
 435		       nid, K(node_page_state(pgdat, NR_WRITEBACK)),
 436		       nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
 437		       nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
 438		       nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
 439		       nid, K(i.sharedram),
 440		       nid, sum_zone_node_page_state(nid, NR_KERNEL_STACK_KB),
 441		       nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
 442		       nid, K(node_page_state(pgdat, NR_UNSTABLE_NFS)),
 443		       nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
 444		       nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
 445		       nid, K(sreclaimable +
 446			      node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
 447		       nid, K(sreclaimable + sunreclaimable),
 448		       nid, K(sreclaimable),
 449		       nid, K(sunreclaimable)
 
 
 
 
 
 
 
 450#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 451		       ,
 452		       nid, K(node_page_state(pgdat, NR_ANON_THPS) *
 453				       HPAGE_PMD_NR),
 454		       nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
 455				       HPAGE_PMD_NR),
 456		       nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
 457				       HPAGE_PMD_NR),
 458		       nid, K(node_page_state(pgdat, NR_FILE_THPS) *
 459				       HPAGE_PMD_NR),
 460		       nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED) *
 461				       HPAGE_PMD_NR)
 462#endif
 463		       );
 464	n += hugetlb_report_node_meminfo(nid, buf + n);
 465	return n;
 466}
 467
 468#undef K
 469static DEVICE_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
 470
 471static ssize_t node_read_numastat(struct device *dev,
 472				struct device_attribute *attr, char *buf)
 473{
 474	return sprintf(buf,
 475		       "numa_hit %lu\n"
 476		       "numa_miss %lu\n"
 477		       "numa_foreign %lu\n"
 478		       "interleave_hit %lu\n"
 479		       "local_node %lu\n"
 480		       "other_node %lu\n",
 481		       sum_zone_numa_state(dev->id, NUMA_HIT),
 482		       sum_zone_numa_state(dev->id, NUMA_MISS),
 483		       sum_zone_numa_state(dev->id, NUMA_FOREIGN),
 484		       sum_zone_numa_state(dev->id, NUMA_INTERLEAVE_HIT),
 485		       sum_zone_numa_state(dev->id, NUMA_LOCAL),
 486		       sum_zone_numa_state(dev->id, NUMA_OTHER));
 
 487}
 488static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
 489
 490static ssize_t node_read_vmstat(struct device *dev,
 491				struct device_attribute *attr, char *buf)
 492{
 493	int nid = dev->id;
 494	struct pglist_data *pgdat = NODE_DATA(nid);
 495	int i;
 496	int n = 0;
 497
 498	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 499		n += sprintf(buf+n, "%s %lu\n", vmstat_text[i],
 500			     sum_zone_node_page_state(nid, i));
 
 501
 502#ifdef CONFIG_NUMA
 503	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
 504		n += sprintf(buf+n, "%s %lu\n",
 505			     vmstat_text[i + NR_VM_ZONE_STAT_ITEMS],
 506			     sum_zone_numa_state(nid, i));
 
 
 507#endif
 
 
 
 
 
 
 
 
 508
 509	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 510		n += sprintf(buf+n, "%s %lu\n",
 511			     vmstat_text[i + NR_VM_ZONE_STAT_ITEMS +
 512			     NR_VM_NUMA_STAT_ITEMS],
 513			     node_page_state(pgdat, i));
 514
 515	return n;
 516}
 517static DEVICE_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
 518
 519static ssize_t node_read_distance(struct device *dev,
 520			struct device_attribute *attr, char *buf)
 521{
 522	int nid = dev->id;
 523	int len = 0;
 524	int i;
 525
 526	/*
 527	 * buf is currently PAGE_SIZE in length and each node needs 4 chars
 528	 * at the most (distance + space or newline).
 529	 */
 530	BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
 531
 532	for_each_online_node(i)
 533		len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i));
 
 
 534
 535	len += sprintf(buf + len, "\n");
 536	return len;
 537}
 538static DEVICE_ATTR(distance, S_IRUGO, node_read_distance, NULL);
 539
 540static struct attribute *node_dev_attrs[] = {
 541	&dev_attr_cpumap.attr,
 542	&dev_attr_cpulist.attr,
 543	&dev_attr_meminfo.attr,
 544	&dev_attr_numastat.attr,
 545	&dev_attr_distance.attr,
 546	&dev_attr_vmstat.attr,
 547	NULL
 548};
 549ATTRIBUTE_GROUPS(node_dev);
 550
 551#ifdef CONFIG_HUGETLBFS
 552/*
 553 * hugetlbfs per node attributes registration interface:
 554 * When/if hugetlb[fs] subsystem initializes [sometime after this module],
 555 * it will register its per node attributes for all online nodes with
 556 * memory.  It will also call register_hugetlbfs_with_node(), below, to
 557 * register its attribute registration functions with this node driver.
 558 * Once these hooks have been initialized, the node driver will call into
 559 * the hugetlb module to [un]register attributes for hot-plugged nodes.
 560 */
 561static node_registration_func_t __hugetlb_register_node;
 562static node_registration_func_t __hugetlb_unregister_node;
 563
 564static inline bool hugetlb_register_node(struct node *node)
 565{
 566	if (__hugetlb_register_node &&
 567			node_state(node->dev.id, N_MEMORY)) {
 568		__hugetlb_register_node(node);
 569		return true;
 570	}
 571	return false;
 572}
 573
 574static inline void hugetlb_unregister_node(struct node *node)
 575{
 576	if (__hugetlb_unregister_node)
 577		__hugetlb_unregister_node(node);
 578}
 579
 580void register_hugetlbfs_with_node(node_registration_func_t doregister,
 581				  node_registration_func_t unregister)
 582{
 583	__hugetlb_register_node   = doregister;
 584	__hugetlb_unregister_node = unregister;
 585}
 586#else
 587static inline void hugetlb_register_node(struct node *node) {}
 588
 589static inline void hugetlb_unregister_node(struct node *node) {}
 590#endif
 
 
 591
 592static void node_device_release(struct device *dev)
 593{
 594	struct node *node = to_node(dev);
 595
 596#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
 597	/*
 598	 * We schedule the work only when a memory section is
 599	 * onlined/offlined on this node. When we come here,
 600	 * all the memory on this node has been offlined,
 601	 * so we won't enqueue new work to this work.
 602	 *
 603	 * The work is using node->node_work, so we should
 604	 * flush work before freeing the memory.
 605	 */
 606	flush_work(&node->node_work);
 607#endif
 608	kfree(node);
 609}
 610
 611/*
 612 * register_node - Setup a sysfs device for a node.
 613 * @num - Node number to use when creating the device.
 614 *
 615 * Initialize and register the node device.
 616 */
 617static int register_node(struct node *node, int num)
 618{
 619	int error;
 620
 621	node->dev.id = num;
 622	node->dev.bus = &node_subsys;
 623	node->dev.release = node_device_release;
 624	node->dev.groups = node_dev_groups;
 625	error = device_register(&node->dev);
 626
 627	if (error)
 628		put_device(&node->dev);
 629	else {
 630		hugetlb_register_node(node);
 631
 632		compaction_register_node(node);
 633	}
 
 634	return error;
 635}
 636
 637/**
 638 * unregister_node - unregister a node device
 639 * @node: node going away
 640 *
 641 * Unregisters a node device @node.  All the devices on the node must be
 642 * unregistered before calling this function.
 643 */
 644void unregister_node(struct node *node)
 645{
 646	hugetlb_unregister_node(node);		/* no-op, if memoryless node */
 
 647	node_remove_accesses(node);
 648	node_remove_caches(node);
 649	device_unregister(&node->dev);
 650}
 651
 652struct node *node_devices[MAX_NUMNODES];
 653
 654/*
 655 * register cpu under node
 656 */
 657int register_cpu_under_node(unsigned int cpu, unsigned int nid)
 658{
 659	int ret;
 660	struct device *obj;
 661
 662	if (!node_online(nid))
 663		return 0;
 664
 665	obj = get_cpu_device(cpu);
 666	if (!obj)
 667		return 0;
 668
 669	ret = sysfs_create_link(&node_devices[nid]->dev.kobj,
 670				&obj->kobj,
 671				kobject_name(&obj->kobj));
 672	if (ret)
 673		return ret;
 674
 675	return sysfs_create_link(&obj->kobj,
 676				 &node_devices[nid]->dev.kobj,
 677				 kobject_name(&node_devices[nid]->dev.kobj));
 678}
 679
 680/**
 681 * register_memory_node_under_compute_node - link memory node to its compute
 682 *					     node for a given access class.
 683 * @mem_nid:	Memory node number
 684 * @cpu_nid:	Cpu  node number
 685 * @access:	Access class to register
 686 *
 687 * Description:
 688 * 	For use with platforms that may have separate memory and compute nodes.
 689 * 	This function will export node relationships linking which memory
 690 * 	initiator nodes can access memory targets at a given ranked access
 691 * 	class.
 692 */
 693int register_memory_node_under_compute_node(unsigned int mem_nid,
 694					    unsigned int cpu_nid,
 695					    unsigned access)
 696{
 697	struct node *init_node, *targ_node;
 698	struct node_access_nodes *initiator, *target;
 699	int ret;
 700
 701	if (!node_online(cpu_nid) || !node_online(mem_nid))
 702		return -ENODEV;
 703
 704	init_node = node_devices[cpu_nid];
 705	targ_node = node_devices[mem_nid];
 706	initiator = node_init_node_access(init_node, access);
 707	target = node_init_node_access(targ_node, access);
 708	if (!initiator || !target)
 709		return -ENOMEM;
 710
 711	ret = sysfs_add_link_to_group(&initiator->dev.kobj, "targets",
 712				      &targ_node->dev.kobj,
 713				      dev_name(&targ_node->dev));
 714	if (ret)
 715		return ret;
 716
 717	ret = sysfs_add_link_to_group(&target->dev.kobj, "initiators",
 718				      &init_node->dev.kobj,
 719				      dev_name(&init_node->dev));
 720	if (ret)
 721		goto err;
 722
 723	return 0;
 724 err:
 725	sysfs_remove_link_from_group(&initiator->dev.kobj, "targets",
 726				     dev_name(&targ_node->dev));
 727	return ret;
 728}
 729
 730int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
 731{
 732	struct device *obj;
 733
 734	if (!node_online(nid))
 735		return 0;
 736
 737	obj = get_cpu_device(cpu);
 738	if (!obj)
 739		return 0;
 740
 741	sysfs_remove_link(&node_devices[nid]->dev.kobj,
 742			  kobject_name(&obj->kobj));
 743	sysfs_remove_link(&obj->kobj,
 744			  kobject_name(&node_devices[nid]->dev.kobj));
 745
 746	return 0;
 747}
 748
 749#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
 750static int __ref get_nid_for_pfn(unsigned long pfn)
 751{
 752	if (!pfn_valid_within(pfn))
 753		return -1;
 754#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 755	if (system_state < SYSTEM_RUNNING)
 756		return early_pfn_to_nid(pfn);
 757#endif
 758	return pfn_to_nid(pfn);
 759}
 760
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 761/* register memory section under specified node if it spans that node */
 762static int register_mem_sect_under_node(struct memory_block *mem_blk,
 763					 void *arg)
 764{
 765	unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
 766	unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
 767	unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
 768	int ret, nid = *(int *)arg;
 769	unsigned long pfn;
 770
 771	for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
 772		int page_nid;
 773
 774		/*
 775		 * memory block could have several absent sections from start.
 776		 * skip pfn range from absent section
 777		 */
 778		if (!pfn_present(pfn)) {
 779			pfn = round_down(pfn + PAGES_PER_SECTION,
 780					 PAGES_PER_SECTION) - 1;
 781			continue;
 782		}
 783
 784		/*
 785		 * We need to check if page belongs to nid only for the boot
 786		 * case, during hotplug we know that all pages in the memory
 787		 * block belong to the same node.
 788		 */
 789		if (system_state == SYSTEM_BOOTING) {
 790			page_nid = get_nid_for_pfn(pfn);
 791			if (page_nid < 0)
 792				continue;
 793			if (page_nid != nid)
 794				continue;
 795		}
 796
 797		/*
 798		 * If this memory block spans multiple nodes, we only indicate
 799		 * the last processed node.
 800		 */
 801		mem_blk->nid = nid;
 
 802
 803		ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
 804					&mem_blk->dev.kobj,
 805					kobject_name(&mem_blk->dev.kobj));
 806		if (ret)
 807			return ret;
 
 
 
 808
 809		return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
 810				&node_devices[nid]->dev.kobj,
 811				kobject_name(&node_devices[nid]->dev.kobj));
 812	}
 813	/* mem section does not span the specified node */
 814	return 0;
 815}
 816
 817/*
 818 * Unregister a memory block device under the node it spans. Memory blocks
 819 * with multiple nodes cannot be offlined and therefore also never be removed.
 820 */
 821void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
 822{
 823	if (mem_blk->nid == NUMA_NO_NODE)
 824		return;
 825
 826	sysfs_remove_link(&node_devices[mem_blk->nid]->dev.kobj,
 827			  kobject_name(&mem_blk->dev.kobj));
 828	sysfs_remove_link(&mem_blk->dev.kobj,
 829			  kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
 830}
 831
 832int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
 833{
 834	return walk_memory_blocks(PFN_PHYS(start_pfn),
 835				  PFN_PHYS(end_pfn - start_pfn), (void *)&nid,
 836				  register_mem_sect_under_node);
 837}
 
 
 
 
 838
 839#ifdef CONFIG_HUGETLBFS
 840/*
 841 * Handle per node hstate attribute [un]registration on transistions
 842 * to/from memoryless state.
 843 */
 844static void node_hugetlb_work(struct work_struct *work)
 845{
 846	struct node *node = container_of(work, struct node, node_work);
 847
 848	/*
 849	 * We only get here when a node transitions to/from memoryless state.
 850	 * We can detect which transition occurred by examining whether the
 851	 * node has memory now.  hugetlb_register_node() already check this
 852	 * so we try to register the attributes.  If that fails, then the
 853	 * node has transitioned to memoryless, try to unregister the
 854	 * attributes.
 855	 */
 856	if (!hugetlb_register_node(node))
 857		hugetlb_unregister_node(node);
 858}
 859
 860static void init_node_hugetlb_work(int nid)
 861{
 862	INIT_WORK(&node_devices[nid]->node_work, node_hugetlb_work);
 863}
 864
 865static int node_memory_callback(struct notifier_block *self,
 866				unsigned long action, void *arg)
 867{
 868	struct memory_notify *mnb = arg;
 869	int nid = mnb->status_change_nid;
 870
 871	switch (action) {
 872	case MEM_ONLINE:
 873	case MEM_OFFLINE:
 874		/*
 875		 * offload per node hstate [un]registration to a work thread
 876		 * when transitioning to/from memoryless state.
 877		 */
 878		if (nid != NUMA_NO_NODE)
 879			schedule_work(&node_devices[nid]->node_work);
 880		break;
 881
 882	case MEM_GOING_ONLINE:
 883	case MEM_GOING_OFFLINE:
 884	case MEM_CANCEL_ONLINE:
 885	case MEM_CANCEL_OFFLINE:
 886	default:
 887		break;
 888	}
 889
 890	return NOTIFY_OK;
 891}
 892#endif	/* CONFIG_HUGETLBFS */
 893#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
 894
 895#if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \
 896    !defined(CONFIG_HUGETLBFS)
 897static inline int node_memory_callback(struct notifier_block *self,
 898				unsigned long action, void *arg)
 899{
 900	return NOTIFY_OK;
 901}
 902
 903static void init_node_hugetlb_work(int nid) { }
 904
 905#endif
 906
 907int __register_one_node(int nid)
 908{
 909	int error;
 910	int cpu;
 
 911
 912	node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
 913	if (!node_devices[nid])
 914		return -ENOMEM;
 915
 
 
 
 916	error = register_node(node_devices[nid], nid);
 917
 918	/* link cpu under this node */
 919	for_each_present_cpu(cpu) {
 920		if (cpu_to_node(cpu) == nid)
 921			register_cpu_under_node(cpu, nid);
 922	}
 923
 924	INIT_LIST_HEAD(&node_devices[nid]->access_list);
 925	/* initialize work queue for memory hot plug */
 926	init_node_hugetlb_work(nid);
 927	node_init_caches(nid);
 928
 929	return error;
 930}
 931
 932void unregister_one_node(int nid)
 933{
 934	if (!node_devices[nid])
 935		return;
 936
 937	unregister_node(node_devices[nid]);
 938	node_devices[nid] = NULL;
 939}
 940
 941/*
 942 * node states attributes
 943 */
 944
 945static ssize_t print_nodes_state(enum node_states state, char *buf)
 946{
 947	int n;
 948
 949	n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
 950		      nodemask_pr_args(&node_states[state]));
 951	buf[n++] = '\n';
 952	buf[n] = '\0';
 953	return n;
 954}
 955
 956struct node_attr {
 957	struct device_attribute attr;
 958	enum node_states state;
 959};
 960
 961static ssize_t show_node_state(struct device *dev,
 962			       struct device_attribute *attr, char *buf)
 963{
 964	struct node_attr *na = container_of(attr, struct node_attr, attr);
 965	return print_nodes_state(na->state, buf);
 
 
 966}
 967
 968#define _NODE_ATTR(name, state) \
 969	{ __ATTR(name, 0444, show_node_state, NULL), state }
 970
 971static struct node_attr node_state_attr[] = {
 972	[N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE),
 973	[N_ONLINE] = _NODE_ATTR(online, N_ONLINE),
 974	[N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
 975#ifdef CONFIG_HIGHMEM
 976	[N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
 977#endif
 978	[N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
 979	[N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
 
 
 980};
 981
 982static struct attribute *node_state_attrs[] = {
 983	&node_state_attr[N_POSSIBLE].attr.attr,
 984	&node_state_attr[N_ONLINE].attr.attr,
 985	&node_state_attr[N_NORMAL_MEMORY].attr.attr,
 986#ifdef CONFIG_HIGHMEM
 987	&node_state_attr[N_HIGH_MEMORY].attr.attr,
 988#endif
 989	&node_state_attr[N_MEMORY].attr.attr,
 990	&node_state_attr[N_CPU].attr.attr,
 
 991	NULL
 992};
 993
 994static struct attribute_group memory_root_attr_group = {
 995	.attrs = node_state_attrs,
 996};
 997
 998static const struct attribute_group *cpu_root_attr_groups[] = {
 999	&memory_root_attr_group,
1000	NULL,
1001};
1002
1003#define NODE_CALLBACK_PRI	2	/* lower than SLAB */
1004static int __init register_node_type(void)
1005{
1006	int ret;
1007
1008 	BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
1009 	BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
1010
1011	ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
1012	if (!ret) {
1013		static struct notifier_block node_memory_callback_nb = {
1014			.notifier_call = node_memory_callback,
1015			.priority = NODE_CALLBACK_PRI,
1016		};
1017		register_hotmemory_notifier(&node_memory_callback_nb);
1018	}
1019
1020	/*
1021	 * Note:  we're not going to unregister the node class if we fail
1022	 * to register the node state class attribute files.
1023	 */
1024	return ret;
 
 
 
 
1025}
1026postcore_initcall(register_node_type);