Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Basic Node interface support
   4 */
   5
   6#include <linux/module.h>
   7#include <linux/init.h>
   8#include <linux/mm.h>
   9#include <linux/memory.h>
  10#include <linux/vmstat.h>
  11#include <linux/notifier.h>
  12#include <linux/node.h>
  13#include <linux/hugetlb.h>
  14#include <linux/compaction.h>
  15#include <linux/cpumask.h>
  16#include <linux/topology.h>
  17#include <linux/nodemask.h>
  18#include <linux/cpu.h>
  19#include <linux/device.h>
  20#include <linux/pm_runtime.h>
  21#include <linux/swap.h>
  22#include <linux/slab.h>
 
  23
  24static struct bus_type node_subsys = {
  25	.name = "node",
  26	.dev_name = "node",
  27};
  28
  29
  30static ssize_t node_read_cpumap(struct device *dev, bool list, char *buf)
 
  31{
  32	ssize_t n;
  33	cpumask_var_t mask;
  34	struct node *node_dev = to_node(dev);
  35
  36	/* 2008/04/07: buf currently PAGE_SIZE, need 9 chars per 32 bits. */
  37	BUILD_BUG_ON((NR_CPUS/32 * 9) > (PAGE_SIZE-1));
  38
  39	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  40		return 0;
  41
  42	cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
  43	n = cpumap_print_to_pagebuf(list, buf, mask);
  44	free_cpumask_var(mask);
  45
  46	return n;
  47}
  48
  49static inline ssize_t node_read_cpumask(struct device *dev,
  50				struct device_attribute *attr, char *buf)
  51{
  52	return node_read_cpumap(dev, false, buf);
  53}
  54static inline ssize_t node_read_cpulist(struct device *dev,
  55				struct device_attribute *attr, char *buf)
  56{
  57	return node_read_cpumap(dev, true, buf);
 
 
 
 
 
 
 
 
 
 
 
 
  58}
  59
  60static DEVICE_ATTR(cpumap,  S_IRUGO, node_read_cpumask, NULL);
  61static DEVICE_ATTR(cpulist, S_IRUGO, node_read_cpulist, NULL);
  62
  63/**
  64 * struct node_access_nodes - Access class device to hold user visible
  65 * 			      relationships to other nodes.
  66 * @dev:	Device for this memory access class
  67 * @list_node:	List element in the node's access list
  68 * @access:	The access class rank
  69 * @hmem_attrs: Heterogeneous memory performance attributes
  70 */
  71struct node_access_nodes {
  72	struct device		dev;
  73	struct list_head	list_node;
  74	unsigned		access;
  75#ifdef CONFIG_HMEM_REPORTING
  76	struct node_hmem_attrs	hmem_attrs;
  77#endif
  78};
  79#define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev)
  80
  81static struct attribute *node_init_access_node_attrs[] = {
  82	NULL,
  83};
  84
  85static struct attribute *node_targ_access_node_attrs[] = {
  86	NULL,
  87};
  88
  89static const struct attribute_group initiators = {
  90	.name	= "initiators",
  91	.attrs	= node_init_access_node_attrs,
  92};
  93
  94static const struct attribute_group targets = {
  95	.name	= "targets",
  96	.attrs	= node_targ_access_node_attrs,
  97};
  98
  99static const struct attribute_group *node_access_node_groups[] = {
 100	&initiators,
 101	&targets,
 102	NULL,
 103};
 104
 105static void node_remove_accesses(struct node *node)
 106{
 107	struct node_access_nodes *c, *cnext;
 108
 109	list_for_each_entry_safe(c, cnext, &node->access_list, list_node) {
 110		list_del(&c->list_node);
 111		device_unregister(&c->dev);
 112	}
 113}
 114
 115static void node_access_release(struct device *dev)
 116{
 117	kfree(to_access_nodes(dev));
 118}
 119
 120static struct node_access_nodes *node_init_node_access(struct node *node,
 121						       unsigned access)
 122{
 123	struct node_access_nodes *access_node;
 124	struct device *dev;
 125
 126	list_for_each_entry(access_node, &node->access_list, list_node)
 127		if (access_node->access == access)
 128			return access_node;
 129
 130	access_node = kzalloc(sizeof(*access_node), GFP_KERNEL);
 131	if (!access_node)
 132		return NULL;
 133
 134	access_node->access = access;
 135	dev = &access_node->dev;
 136	dev->parent = &node->dev;
 137	dev->release = node_access_release;
 138	dev->groups = node_access_node_groups;
 139	if (dev_set_name(dev, "access%u", access))
 140		goto free;
 141
 142	if (device_register(dev))
 143		goto free_name;
 144
 145	pm_runtime_no_callbacks(dev);
 146	list_add_tail(&access_node->list_node, &node->access_list);
 147	return access_node;
 148free_name:
 149	kfree_const(dev->kobj.name);
 150free:
 151	kfree(access_node);
 152	return NULL;
 153}
 154
 155#ifdef CONFIG_HMEM_REPORTING
 156#define ACCESS_ATTR(name) 						   \
 157static ssize_t name##_show(struct device *dev,				   \
 158			   struct device_attribute *attr,		   \
 159			   char *buf)					   \
 160{									   \
 161	return sprintf(buf, "%u\n", to_access_nodes(dev)->hmem_attrs.name); \
 162}									   \
 163static DEVICE_ATTR_RO(name);
 
 164
 165ACCESS_ATTR(read_bandwidth)
 166ACCESS_ATTR(read_latency)
 167ACCESS_ATTR(write_bandwidth)
 168ACCESS_ATTR(write_latency)
 169
 170static struct attribute *access_attrs[] = {
 171	&dev_attr_read_bandwidth.attr,
 172	&dev_attr_read_latency.attr,
 173	&dev_attr_write_bandwidth.attr,
 174	&dev_attr_write_latency.attr,
 175	NULL,
 176};
 177
 178/**
 179 * node_set_perf_attrs - Set the performance values for given access class
 180 * @nid: Node identifier to be set
 181 * @hmem_attrs: Heterogeneous memory performance attributes
 182 * @access: The access class the for the given attributes
 183 */
 184void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
 185			 unsigned access)
 186{
 187	struct node_access_nodes *c;
 188	struct node *node;
 189	int i;
 190
 191	if (WARN_ON_ONCE(!node_online(nid)))
 192		return;
 193
 194	node = node_devices[nid];
 195	c = node_init_node_access(node, access);
 196	if (!c)
 197		return;
 198
 199	c->hmem_attrs = *hmem_attrs;
 200	for (i = 0; access_attrs[i] != NULL; i++) {
 201		if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i],
 202					    "initiators")) {
 203			pr_info("failed to add performance attribute to node %d\n",
 204				nid);
 205			break;
 206		}
 207	}
 208}
 209
 210/**
 211 * struct node_cache_info - Internal tracking for memory node caches
 212 * @dev:	Device represeting the cache level
 213 * @node:	List element for tracking in the node
 214 * @cache_attrs:Attributes for this cache level
 215 */
 216struct node_cache_info {
 217	struct device dev;
 218	struct list_head node;
 219	struct node_cache_attrs cache_attrs;
 220};
 221#define to_cache_info(device) container_of(device, struct node_cache_info, dev)
 222
 223#define CACHE_ATTR(name, fmt) 						\
 224static ssize_t name##_show(struct device *dev,				\
 225			   struct device_attribute *attr,		\
 226			   char *buf)					\
 227{									\
 228	return sprintf(buf, fmt "\n", to_cache_info(dev)->cache_attrs.name);\
 
 229}									\
 230DEVICE_ATTR_RO(name);
 231
 232CACHE_ATTR(size, "%llu")
 233CACHE_ATTR(line_size, "%u")
 234CACHE_ATTR(indexing, "%u")
 235CACHE_ATTR(write_policy, "%u")
 236
 237static struct attribute *cache_attrs[] = {
 238	&dev_attr_indexing.attr,
 239	&dev_attr_size.attr,
 240	&dev_attr_line_size.attr,
 241	&dev_attr_write_policy.attr,
 242	NULL,
 243};
 244ATTRIBUTE_GROUPS(cache);
 245
 246static void node_cache_release(struct device *dev)
 247{
 248	kfree(dev);
 249}
 250
 251static void node_cacheinfo_release(struct device *dev)
 252{
 253	struct node_cache_info *info = to_cache_info(dev);
 254	kfree(info);
 255}
 256
 257static void node_init_cache_dev(struct node *node)
 258{
 259	struct device *dev;
 260
 261	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
 262	if (!dev)
 263		return;
 264
 
 265	dev->parent = &node->dev;
 266	dev->release = node_cache_release;
 267	if (dev_set_name(dev, "memory_side_cache"))
 268		goto free_dev;
 269
 270	if (device_register(dev))
 271		goto free_name;
 272
 273	pm_runtime_no_callbacks(dev);
 274	node->cache_dev = dev;
 275	return;
 276free_name:
 277	kfree_const(dev->kobj.name);
 278free_dev:
 279	kfree(dev);
 280}
 281
 282/**
 283 * node_add_cache() - add cache attribute to a memory node
 284 * @nid: Node identifier that has new cache attributes
 285 * @cache_attrs: Attributes for the cache being added
 286 */
 287void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
 288{
 289	struct node_cache_info *info;
 290	struct device *dev;
 291	struct node *node;
 292
 293	if (!node_online(nid) || !node_devices[nid])
 294		return;
 295
 296	node = node_devices[nid];
 297	list_for_each_entry(info, &node->cache_attrs, node) {
 298		if (info->cache_attrs.level == cache_attrs->level) {
 299			dev_warn(&node->dev,
 300				"attempt to add duplicate cache level:%d\n",
 301				cache_attrs->level);
 302			return;
 303		}
 304	}
 305
 306	if (!node->cache_dev)
 307		node_init_cache_dev(node);
 308	if (!node->cache_dev)
 309		return;
 310
 311	info = kzalloc(sizeof(*info), GFP_KERNEL);
 312	if (!info)
 313		return;
 314
 315	dev = &info->dev;
 
 316	dev->parent = node->cache_dev;
 317	dev->release = node_cacheinfo_release;
 318	dev->groups = cache_groups;
 319	if (dev_set_name(dev, "index%d", cache_attrs->level))
 320		goto free_cache;
 321
 322	info->cache_attrs = *cache_attrs;
 323	if (device_register(dev)) {
 324		dev_warn(&node->dev, "failed to add cache level:%d\n",
 325			 cache_attrs->level);
 326		goto free_name;
 327	}
 328	pm_runtime_no_callbacks(dev);
 329	list_add_tail(&info->node, &node->cache_attrs);
 330	return;
 331free_name:
 332	kfree_const(dev->kobj.name);
 333free_cache:
 334	kfree(info);
 335}
 336
 337static void node_remove_caches(struct node *node)
 338{
 339	struct node_cache_info *info, *next;
 340
 341	if (!node->cache_dev)
 342		return;
 343
 344	list_for_each_entry_safe(info, next, &node->cache_attrs, node) {
 345		list_del(&info->node);
 346		device_unregister(&info->dev);
 347	}
 348	device_unregister(node->cache_dev);
 349}
 350
 351static void node_init_caches(unsigned int nid)
 352{
 353	INIT_LIST_HEAD(&node_devices[nid]->cache_attrs);
 354}
 355#else
 356static void node_init_caches(unsigned int nid) { }
 357static void node_remove_caches(struct node *node) { }
 358#endif
 359
 360#define K(x) ((x) << (PAGE_SHIFT - 10))
 361static ssize_t node_read_meminfo(struct device *dev,
 362			struct device_attribute *attr, char *buf)
 363{
 364	int n;
 365	int nid = dev->id;
 366	struct pglist_data *pgdat = NODE_DATA(nid);
 367	struct sysinfo i;
 368	unsigned long sreclaimable, sunreclaimable;
 
 369
 370	si_meminfo_node(&i, nid);
 371	sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B);
 372	sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B);
 373	n = sprintf(buf,
 374		       "Node %d MemTotal:       %8lu kB\n"
 375		       "Node %d MemFree:        %8lu kB\n"
 376		       "Node %d MemUsed:        %8lu kB\n"
 377		       "Node %d Active:         %8lu kB\n"
 378		       "Node %d Inactive:       %8lu kB\n"
 379		       "Node %d Active(anon):   %8lu kB\n"
 380		       "Node %d Inactive(anon): %8lu kB\n"
 381		       "Node %d Active(file):   %8lu kB\n"
 382		       "Node %d Inactive(file): %8lu kB\n"
 383		       "Node %d Unevictable:    %8lu kB\n"
 384		       "Node %d Mlocked:        %8lu kB\n",
 385		       nid, K(i.totalram),
 386		       nid, K(i.freeram),
 387		       nid, K(i.totalram - i.freeram),
 388		       nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
 389				node_page_state(pgdat, NR_ACTIVE_FILE)),
 390		       nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
 391				node_page_state(pgdat, NR_INACTIVE_FILE)),
 392		       nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)),
 393		       nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)),
 394		       nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)),
 395		       nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)),
 396		       nid, K(node_page_state(pgdat, NR_UNEVICTABLE)),
 397		       nid, K(sum_zone_node_page_state(nid, NR_MLOCK)));
 
 
 
 
 
 398
 399#ifdef CONFIG_HIGHMEM
 400	n += sprintf(buf + n,
 401		       "Node %d HighTotal:      %8lu kB\n"
 402		       "Node %d HighFree:       %8lu kB\n"
 403		       "Node %d LowTotal:       %8lu kB\n"
 404		       "Node %d LowFree:        %8lu kB\n",
 405		       nid, K(i.totalhigh),
 406		       nid, K(i.freehigh),
 407		       nid, K(i.totalram - i.totalhigh),
 408		       nid, K(i.freeram - i.freehigh));
 409#endif
 410	n += sprintf(buf + n,
 411		       "Node %d Dirty:          %8lu kB\n"
 412		       "Node %d Writeback:      %8lu kB\n"
 413		       "Node %d FilePages:      %8lu kB\n"
 414		       "Node %d Mapped:         %8lu kB\n"
 415		       "Node %d AnonPages:      %8lu kB\n"
 416		       "Node %d Shmem:          %8lu kB\n"
 417		       "Node %d KernelStack:    %8lu kB\n"
 418#ifdef CONFIG_SHADOW_CALL_STACK
 419		       "Node %d ShadowCallStack:%8lu kB\n"
 420#endif
 421		       "Node %d PageTables:     %8lu kB\n"
 422		       "Node %d NFS_Unstable:   %8lu kB\n"
 423		       "Node %d Bounce:         %8lu kB\n"
 424		       "Node %d WritebackTmp:   %8lu kB\n"
 425		       "Node %d KReclaimable:   %8lu kB\n"
 426		       "Node %d Slab:           %8lu kB\n"
 427		       "Node %d SReclaimable:   %8lu kB\n"
 428		       "Node %d SUnreclaim:     %8lu kB\n"
 
 429#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 430		       "Node %d AnonHugePages:  %8lu kB\n"
 431		       "Node %d ShmemHugePages: %8lu kB\n"
 432		       "Node %d ShmemPmdMapped: %8lu kB\n"
 433		       "Node %d FileHugePages: %8lu kB\n"
 434		       "Node %d FilePmdMapped: %8lu kB\n"
 435#endif
 436			,
 437		       nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
 438		       nid, K(node_page_state(pgdat, NR_WRITEBACK)),
 439		       nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
 440		       nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
 441		       nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
 442		       nid, K(i.sharedram),
 443		       nid, node_page_state(pgdat, NR_KERNEL_STACK_KB),
 444#ifdef CONFIG_SHADOW_CALL_STACK
 445		       nid, node_page_state(pgdat, NR_KERNEL_SCS_KB),
 446#endif
 447		       nid, K(sum_zone_node_page_state(nid, NR_PAGETABLE)),
 448		       nid, 0UL,
 449		       nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
 450		       nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
 451		       nid, K(sreclaimable +
 452			      node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
 453		       nid, K(sreclaimable + sunreclaimable),
 454		       nid, K(sreclaimable),
 455		       nid, K(sunreclaimable)
 
 456#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 457		       ,
 458		       nid, K(node_page_state(pgdat, NR_ANON_THPS) *
 459				       HPAGE_PMD_NR),
 460		       nid, K(node_page_state(pgdat, NR_SHMEM_THPS) *
 461				       HPAGE_PMD_NR),
 462		       nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED) *
 463				       HPAGE_PMD_NR),
 464		       nid, K(node_page_state(pgdat, NR_FILE_THPS) *
 465				       HPAGE_PMD_NR),
 466		       nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED) *
 467				       HPAGE_PMD_NR)
 468#endif
 469		       );
 470	n += hugetlb_report_node_meminfo(nid, buf + n);
 471	return n;
 472}
 473
 474#undef K
 475static DEVICE_ATTR(meminfo, S_IRUGO, node_read_meminfo, NULL);
 476
 477static ssize_t node_read_numastat(struct device *dev,
 478				struct device_attribute *attr, char *buf)
 479{
 480	return sprintf(buf,
 481		       "numa_hit %lu\n"
 482		       "numa_miss %lu\n"
 483		       "numa_foreign %lu\n"
 484		       "interleave_hit %lu\n"
 485		       "local_node %lu\n"
 486		       "other_node %lu\n",
 487		       sum_zone_numa_state(dev->id, NUMA_HIT),
 488		       sum_zone_numa_state(dev->id, NUMA_MISS),
 489		       sum_zone_numa_state(dev->id, NUMA_FOREIGN),
 490		       sum_zone_numa_state(dev->id, NUMA_INTERLEAVE_HIT),
 491		       sum_zone_numa_state(dev->id, NUMA_LOCAL),
 492		       sum_zone_numa_state(dev->id, NUMA_OTHER));
 
 493}
 494static DEVICE_ATTR(numastat, S_IRUGO, node_read_numastat, NULL);
 495
 496static ssize_t node_read_vmstat(struct device *dev,
 497				struct device_attribute *attr, char *buf)
 498{
 499	int nid = dev->id;
 500	struct pglist_data *pgdat = NODE_DATA(nid);
 501	int i;
 502	int n = 0;
 503
 504	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
 505		n += sprintf(buf+n, "%s %lu\n", zone_stat_name(i),
 506			     sum_zone_node_page_state(nid, i));
 
 507
 508#ifdef CONFIG_NUMA
 509	for (i = 0; i < NR_VM_NUMA_STAT_ITEMS; i++)
 510		n += sprintf(buf+n, "%s %lu\n", numa_stat_name(i),
 511			     sum_zone_numa_state(nid, i));
 512#endif
 
 513
 514	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++)
 515		n += sprintf(buf+n, "%s %lu\n", node_stat_name(i),
 516			     node_page_state_pages(pgdat, i));
 
 
 
 
 
 
 517
 518	return n;
 519}
 520static DEVICE_ATTR(vmstat, S_IRUGO, node_read_vmstat, NULL);
 521
 522static ssize_t node_read_distance(struct device *dev,
 523			struct device_attribute *attr, char *buf)
 524{
 525	int nid = dev->id;
 526	int len = 0;
 527	int i;
 528
 529	/*
 530	 * buf is currently PAGE_SIZE in length and each node needs 4 chars
 531	 * at the most (distance + space or newline).
 532	 */
 533	BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
 534
 535	for_each_online_node(i)
 536		len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i));
 
 
 537
 538	len += sprintf(buf + len, "\n");
 539	return len;
 540}
 541static DEVICE_ATTR(distance, S_IRUGO, node_read_distance, NULL);
 542
 543static struct attribute *node_dev_attrs[] = {
 544	&dev_attr_cpumap.attr,
 545	&dev_attr_cpulist.attr,
 546	&dev_attr_meminfo.attr,
 547	&dev_attr_numastat.attr,
 548	&dev_attr_distance.attr,
 549	&dev_attr_vmstat.attr,
 550	NULL
 551};
 552ATTRIBUTE_GROUPS(node_dev);
 553
 554#ifdef CONFIG_HUGETLBFS
 555/*
 556 * hugetlbfs per node attributes registration interface:
 557 * When/if hugetlb[fs] subsystem initializes [sometime after this module],
 558 * it will register its per node attributes for all online nodes with
 559 * memory.  It will also call register_hugetlbfs_with_node(), below, to
 560 * register its attribute registration functions with this node driver.
 561 * Once these hooks have been initialized, the node driver will call into
 562 * the hugetlb module to [un]register attributes for hot-plugged nodes.
 563 */
 564static node_registration_func_t __hugetlb_register_node;
 565static node_registration_func_t __hugetlb_unregister_node;
 566
 567static inline bool hugetlb_register_node(struct node *node)
 568{
 569	if (__hugetlb_register_node &&
 570			node_state(node->dev.id, N_MEMORY)) {
 571		__hugetlb_register_node(node);
 572		return true;
 573	}
 574	return false;
 575}
 576
 577static inline void hugetlb_unregister_node(struct node *node)
 578{
 579	if (__hugetlb_unregister_node)
 580		__hugetlb_unregister_node(node);
 581}
 582
 583void register_hugetlbfs_with_node(node_registration_func_t doregister,
 584				  node_registration_func_t unregister)
 585{
 586	__hugetlb_register_node   = doregister;
 587	__hugetlb_unregister_node = unregister;
 588}
 589#else
 590static inline void hugetlb_register_node(struct node *node) {}
 591
 592static inline void hugetlb_unregister_node(struct node *node) {}
 
 
 
 593#endif
 
 
 594
 595static void node_device_release(struct device *dev)
 596{
 597	struct node *node = to_node(dev);
 598
 599#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HUGETLBFS)
 600	/*
 601	 * We schedule the work only when a memory section is
 602	 * onlined/offlined on this node. When we come here,
 603	 * all the memory on this node has been offlined,
 604	 * so we won't enqueue new work to this work.
 605	 *
 606	 * The work is using node->node_work, so we should
 607	 * flush work before freeing the memory.
 608	 */
 609	flush_work(&node->node_work);
 610#endif
 611	kfree(node);
 612}
 613
 614/*
 615 * register_node - Setup a sysfs device for a node.
 616 * @num - Node number to use when creating the device.
 617 *
 618 * Initialize and register the node device.
 619 */
 620static int register_node(struct node *node, int num)
 621{
 622	int error;
 623
 624	node->dev.id = num;
 625	node->dev.bus = &node_subsys;
 626	node->dev.release = node_device_release;
 627	node->dev.groups = node_dev_groups;
 628	error = device_register(&node->dev);
 629
 630	if (error)
 631		put_device(&node->dev);
 632	else {
 633		hugetlb_register_node(node);
 634
 635		compaction_register_node(node);
 636	}
 
 637	return error;
 638}
 639
 640/**
 641 * unregister_node - unregister a node device
 642 * @node: node going away
 643 *
 644 * Unregisters a node device @node.  All the devices on the node must be
 645 * unregistered before calling this function.
 646 */
 647void unregister_node(struct node *node)
 648{
 649	hugetlb_unregister_node(node);		/* no-op, if memoryless node */
 
 650	node_remove_accesses(node);
 651	node_remove_caches(node);
 652	device_unregister(&node->dev);
 653}
 654
 655struct node *node_devices[MAX_NUMNODES];
 656
 657/*
 658 * register cpu under node
 659 */
 660int register_cpu_under_node(unsigned int cpu, unsigned int nid)
 661{
 662	int ret;
 663	struct device *obj;
 664
 665	if (!node_online(nid))
 666		return 0;
 667
 668	obj = get_cpu_device(cpu);
 669	if (!obj)
 670		return 0;
 671
 672	ret = sysfs_create_link(&node_devices[nid]->dev.kobj,
 673				&obj->kobj,
 674				kobject_name(&obj->kobj));
 675	if (ret)
 676		return ret;
 677
 678	return sysfs_create_link(&obj->kobj,
 679				 &node_devices[nid]->dev.kobj,
 680				 kobject_name(&node_devices[nid]->dev.kobj));
 681}
 682
 683/**
 684 * register_memory_node_under_compute_node - link memory node to its compute
 685 *					     node for a given access class.
 686 * @mem_nid:	Memory node number
 687 * @cpu_nid:	Cpu  node number
 688 * @access:	Access class to register
 689 *
 690 * Description:
 691 * 	For use with platforms that may have separate memory and compute nodes.
 692 * 	This function will export node relationships linking which memory
 693 * 	initiator nodes can access memory targets at a given ranked access
 694 * 	class.
 695 */
 696int register_memory_node_under_compute_node(unsigned int mem_nid,
 697					    unsigned int cpu_nid,
 698					    unsigned access)
 699{
 700	struct node *init_node, *targ_node;
 701	struct node_access_nodes *initiator, *target;
 702	int ret;
 703
 704	if (!node_online(cpu_nid) || !node_online(mem_nid))
 705		return -ENODEV;
 706
 707	init_node = node_devices[cpu_nid];
 708	targ_node = node_devices[mem_nid];
 709	initiator = node_init_node_access(init_node, access);
 710	target = node_init_node_access(targ_node, access);
 711	if (!initiator || !target)
 712		return -ENOMEM;
 713
 714	ret = sysfs_add_link_to_group(&initiator->dev.kobj, "targets",
 715				      &targ_node->dev.kobj,
 716				      dev_name(&targ_node->dev));
 717	if (ret)
 718		return ret;
 719
 720	ret = sysfs_add_link_to_group(&target->dev.kobj, "initiators",
 721				      &init_node->dev.kobj,
 722				      dev_name(&init_node->dev));
 723	if (ret)
 724		goto err;
 725
 726	return 0;
 727 err:
 728	sysfs_remove_link_from_group(&initiator->dev.kobj, "targets",
 729				     dev_name(&targ_node->dev));
 730	return ret;
 731}
 732
 733int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
 734{
 735	struct device *obj;
 736
 737	if (!node_online(nid))
 738		return 0;
 739
 740	obj = get_cpu_device(cpu);
 741	if (!obj)
 742		return 0;
 743
 744	sysfs_remove_link(&node_devices[nid]->dev.kobj,
 745			  kobject_name(&obj->kobj));
 746	sysfs_remove_link(&obj->kobj,
 747			  kobject_name(&node_devices[nid]->dev.kobj));
 748
 749	return 0;
 750}
 751
 752#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
 753static int __ref get_nid_for_pfn(unsigned long pfn)
 754{
 755	if (!pfn_valid_within(pfn))
 756		return -1;
 757#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
 758	if (system_state < SYSTEM_RUNNING)
 759		return early_pfn_to_nid(pfn);
 760#endif
 761	return pfn_to_nid(pfn);
 762}
 763
 764static int do_register_memory_block_under_node(int nid,
 765					       struct memory_block *mem_blk)
 
 766{
 767	int ret;
 768
 769	/*
 770	 * If this memory block spans multiple nodes, we only indicate
 771	 * the last processed node.
 772	 */
 773	mem_blk->nid = nid;
 774
 775	ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
 776				       &mem_blk->dev.kobj,
 777				       kobject_name(&mem_blk->dev.kobj));
 778	if (ret)
 779		return ret;
 
 
 780
 781	return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
 782				&node_devices[nid]->dev.kobj,
 783				kobject_name(&node_devices[nid]->dev.kobj));
 
 
 
 
 
 784}
 785
 786/* register memory section under specified node if it spans that node */
 787static int register_mem_block_under_node_early(struct memory_block *mem_blk,
 788					       void *arg)
 789{
 790	unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
 791	unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
 792	unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
 793	int nid = *(int *)arg;
 794	unsigned long pfn;
 795
 796	for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
 797		int page_nid;
 798
 799		/*
 800		 * memory block could have several absent sections from start.
 801		 * skip pfn range from absent section
 802		 */
 803		if (!pfn_in_present_section(pfn)) {
 804			pfn = round_down(pfn + PAGES_PER_SECTION,
 805					 PAGES_PER_SECTION) - 1;
 806			continue;
 807		}
 808
 809		/*
 810		 * We need to check if page belongs to nid only at the boot
 811		 * case because node's ranges can be interleaved.
 812		 */
 813		page_nid = get_nid_for_pfn(pfn);
 814		if (page_nid < 0)
 815			continue;
 816		if (page_nid != nid)
 817			continue;
 818
 819		return do_register_memory_block_under_node(nid, mem_blk);
 
 820	}
 821	/* mem section does not span the specified node */
 822	return 0;
 823}
 824
 825/*
 826 * During hotplug we know that all pages in the memory block belong to the same
 827 * node.
 828 */
 829static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
 830						 void *arg)
 831{
 832	int nid = *(int *)arg;
 833
 834	return do_register_memory_block_under_node(nid, mem_blk);
 
 835}
 836
 837/*
 838 * Unregister a memory block device under the node it spans. Memory blocks
 839 * with multiple nodes cannot be offlined and therefore also never be removed.
 840 */
 841void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
 842{
 843	if (mem_blk->nid == NUMA_NO_NODE)
 844		return;
 845
 846	sysfs_remove_link(&node_devices[mem_blk->nid]->dev.kobj,
 847			  kobject_name(&mem_blk->dev.kobj));
 848	sysfs_remove_link(&mem_blk->dev.kobj,
 849			  kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
 850}
 851
 852int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
 853		      enum meminit_context context)
 
 854{
 855	walk_memory_blocks_func_t func;
 856
 857	if (context == MEMINIT_HOTPLUG)
 858		func = register_mem_block_under_node_hotplug;
 859	else
 860		func = register_mem_block_under_node_early;
 861
 862	return walk_memory_blocks(PFN_PHYS(start_pfn),
 863				  PFN_PHYS(end_pfn - start_pfn), (void *)&nid,
 864				  func);
 865}
 866
 867#ifdef CONFIG_HUGETLBFS
 868/*
 869 * Handle per node hstate attribute [un]registration on transistions
 870 * to/from memoryless state.
 871 */
 872static void node_hugetlb_work(struct work_struct *work)
 873{
 874	struct node *node = container_of(work, struct node, node_work);
 875
 876	/*
 877	 * We only get here when a node transitions to/from memoryless state.
 878	 * We can detect which transition occurred by examining whether the
 879	 * node has memory now.  hugetlb_register_node() already check this
 880	 * so we try to register the attributes.  If that fails, then the
 881	 * node has transitioned to memoryless, try to unregister the
 882	 * attributes.
 883	 */
 884	if (!hugetlb_register_node(node))
 885		hugetlb_unregister_node(node);
 886}
 887
 888static void init_node_hugetlb_work(int nid)
 889{
 890	INIT_WORK(&node_devices[nid]->node_work, node_hugetlb_work);
 891}
 892
 893static int node_memory_callback(struct notifier_block *self,
 894				unsigned long action, void *arg)
 895{
 896	struct memory_notify *mnb = arg;
 897	int nid = mnb->status_change_nid;
 898
 899	switch (action) {
 900	case MEM_ONLINE:
 901	case MEM_OFFLINE:
 902		/*
 903		 * offload per node hstate [un]registration to a work thread
 904		 * when transitioning to/from memoryless state.
 905		 */
 906		if (nid != NUMA_NO_NODE)
 907			schedule_work(&node_devices[nid]->node_work);
 908		break;
 909
 910	case MEM_GOING_ONLINE:
 911	case MEM_GOING_OFFLINE:
 912	case MEM_CANCEL_ONLINE:
 913	case MEM_CANCEL_OFFLINE:
 914	default:
 915		break;
 916	}
 917
 918	return NOTIFY_OK;
 919}
 920#endif	/* CONFIG_HUGETLBFS */
 921#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
 922
 923#if !defined(CONFIG_MEMORY_HOTPLUG_SPARSE) || \
 924    !defined(CONFIG_HUGETLBFS)
 925static inline int node_memory_callback(struct notifier_block *self,
 926				unsigned long action, void *arg)
 927{
 928	return NOTIFY_OK;
 929}
 930
 931static void init_node_hugetlb_work(int nid) { }
 932
 933#endif
 934
 935int __register_one_node(int nid)
 936{
 937	int error;
 938	int cpu;
 939
 940	node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
 941	if (!node_devices[nid])
 942		return -ENOMEM;
 943
 944	error = register_node(node_devices[nid], nid);
 945
 946	/* link cpu under this node */
 947	for_each_present_cpu(cpu) {
 948		if (cpu_to_node(cpu) == nid)
 949			register_cpu_under_node(cpu, nid);
 950	}
 951
 952	INIT_LIST_HEAD(&node_devices[nid]->access_list);
 953	/* initialize work queue for memory hot plug */
 954	init_node_hugetlb_work(nid);
 955	node_init_caches(nid);
 956
 957	return error;
 958}
 959
 960void unregister_one_node(int nid)
 961{
 962	if (!node_devices[nid])
 963		return;
 964
 965	unregister_node(node_devices[nid]);
 966	node_devices[nid] = NULL;
 967}
 968
 969/*
 970 * node states attributes
 971 */
 972
 973static ssize_t print_nodes_state(enum node_states state, char *buf)
 974{
 975	int n;
 976
 977	n = scnprintf(buf, PAGE_SIZE - 1, "%*pbl",
 978		      nodemask_pr_args(&node_states[state]));
 979	buf[n++] = '\n';
 980	buf[n] = '\0';
 981	return n;
 982}
 983
 984struct node_attr {
 985	struct device_attribute attr;
 986	enum node_states state;
 987};
 988
 989static ssize_t show_node_state(struct device *dev,
 990			       struct device_attribute *attr, char *buf)
 991{
 992	struct node_attr *na = container_of(attr, struct node_attr, attr);
 993	return print_nodes_state(na->state, buf);
 
 
 994}
 995
 996#define _NODE_ATTR(name, state) \
 997	{ __ATTR(name, 0444, show_node_state, NULL), state }
 998
 999static struct node_attr node_state_attr[] = {
1000	[N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE),
1001	[N_ONLINE] = _NODE_ATTR(online, N_ONLINE),
1002	[N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
1003#ifdef CONFIG_HIGHMEM
1004	[N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
1005#endif
1006	[N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
1007	[N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
 
 
1008};
1009
1010static struct attribute *node_state_attrs[] = {
1011	&node_state_attr[N_POSSIBLE].attr.attr,
1012	&node_state_attr[N_ONLINE].attr.attr,
1013	&node_state_attr[N_NORMAL_MEMORY].attr.attr,
1014#ifdef CONFIG_HIGHMEM
1015	&node_state_attr[N_HIGH_MEMORY].attr.attr,
1016#endif
1017	&node_state_attr[N_MEMORY].attr.attr,
1018	&node_state_attr[N_CPU].attr.attr,
 
1019	NULL
1020};
1021
1022static struct attribute_group memory_root_attr_group = {
1023	.attrs = node_state_attrs,
1024};
1025
1026static const struct attribute_group *cpu_root_attr_groups[] = {
1027	&memory_root_attr_group,
1028	NULL,
1029};
1030
1031#define NODE_CALLBACK_PRI	2	/* lower than SLAB */
1032static int __init register_node_type(void)
1033{
1034	int ret;
1035
1036 	BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
1037 	BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
1038
1039	ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
1040	if (!ret) {
1041		static struct notifier_block node_memory_callback_nb = {
1042			.notifier_call = node_memory_callback,
1043			.priority = NODE_CALLBACK_PRI,
1044		};
1045		register_hotmemory_notifier(&node_memory_callback_nb);
1046	}
1047
1048	/*
1049	 * Note:  we're not going to unregister the node class if we fail
1050	 * to register the node state class attribute files.
1051	 */
1052	return ret;
 
 
 
 
1053}
1054postcore_initcall(register_node_type);
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Basic Node interface support
  4 */
  5
  6#include <linux/module.h>
  7#include <linux/init.h>
  8#include <linux/mm.h>
  9#include <linux/memory.h>
 10#include <linux/vmstat.h>
 11#include <linux/notifier.h>
 12#include <linux/node.h>
 13#include <linux/hugetlb.h>
 14#include <linux/compaction.h>
 15#include <linux/cpumask.h>
 16#include <linux/topology.h>
 17#include <linux/nodemask.h>
 18#include <linux/cpu.h>
 19#include <linux/device.h>
 20#include <linux/pm_runtime.h>
 21#include <linux/swap.h>
 22#include <linux/slab.h>
 23#include <linux/hugetlb.h>
 24
 25static struct bus_type node_subsys = {
 26	.name = "node",
 27	.dev_name = "node",
 28};
 29
 30static inline ssize_t cpumap_read(struct file *file, struct kobject *kobj,
 31				  struct bin_attribute *attr, char *buf,
 32				  loff_t off, size_t count)
 33{
 34	struct device *dev = kobj_to_dev(kobj);
 
 35	struct node *node_dev = to_node(dev);
 36	cpumask_var_t mask;
 37	ssize_t n;
 
 38
 39	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
 40		return 0;
 41
 42	cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
 43	n = cpumap_print_bitmask_to_buf(buf, mask, off, count);
 44	free_cpumask_var(mask);
 45
 46	return n;
 47}
 48
 49static BIN_ATTR_RO(cpumap, CPUMAP_FILE_MAX_BYTES);
 50
 51static inline ssize_t cpulist_read(struct file *file, struct kobject *kobj,
 52				   struct bin_attribute *attr, char *buf,
 53				   loff_t off, size_t count)
 
 
 54{
 55	struct device *dev = kobj_to_dev(kobj);
 56	struct node *node_dev = to_node(dev);
 57	cpumask_var_t mask;
 58	ssize_t n;
 59
 60	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
 61		return 0;
 62
 63	cpumask_and(mask, cpumask_of_node(node_dev->dev.id), cpu_online_mask);
 64	n = cpumap_print_list_to_buf(buf, mask, off, count);
 65	free_cpumask_var(mask);
 66
 67	return n;
 68}
 69
 70static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
 
 71
 72/**
 73 * struct node_access_nodes - Access class device to hold user visible
 74 * 			      relationships to other nodes.
 75 * @dev:	Device for this memory access class
 76 * @list_node:	List element in the node's access list
 77 * @access:	The access class rank
 78 * @hmem_attrs: Heterogeneous memory performance attributes
 79 */
 80struct node_access_nodes {
 81	struct device		dev;
 82	struct list_head	list_node;
 83	unsigned int		access;
 84#ifdef CONFIG_HMEM_REPORTING
 85	struct node_hmem_attrs	hmem_attrs;
 86#endif
 87};
 88#define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev)
 89
 90static struct attribute *node_init_access_node_attrs[] = {
 91	NULL,
 92};
 93
 94static struct attribute *node_targ_access_node_attrs[] = {
 95	NULL,
 96};
 97
 98static const struct attribute_group initiators = {
 99	.name	= "initiators",
100	.attrs	= node_init_access_node_attrs,
101};
102
103static const struct attribute_group targets = {
104	.name	= "targets",
105	.attrs	= node_targ_access_node_attrs,
106};
107
108static const struct attribute_group *node_access_node_groups[] = {
109	&initiators,
110	&targets,
111	NULL,
112};
113
114static void node_remove_accesses(struct node *node)
115{
116	struct node_access_nodes *c, *cnext;
117
118	list_for_each_entry_safe(c, cnext, &node->access_list, list_node) {
119		list_del(&c->list_node);
120		device_unregister(&c->dev);
121	}
122}
123
124static void node_access_release(struct device *dev)
125{
126	kfree(to_access_nodes(dev));
127}
128
129static struct node_access_nodes *node_init_node_access(struct node *node,
130						       unsigned int access)
131{
132	struct node_access_nodes *access_node;
133	struct device *dev;
134
135	list_for_each_entry(access_node, &node->access_list, list_node)
136		if (access_node->access == access)
137			return access_node;
138
139	access_node = kzalloc(sizeof(*access_node), GFP_KERNEL);
140	if (!access_node)
141		return NULL;
142
143	access_node->access = access;
144	dev = &access_node->dev;
145	dev->parent = &node->dev;
146	dev->release = node_access_release;
147	dev->groups = node_access_node_groups;
148	if (dev_set_name(dev, "access%u", access))
149		goto free;
150
151	if (device_register(dev))
152		goto free_name;
153
154	pm_runtime_no_callbacks(dev);
155	list_add_tail(&access_node->list_node, &node->access_list);
156	return access_node;
157free_name:
158	kfree_const(dev->kobj.name);
159free:
160	kfree(access_node);
161	return NULL;
162}
163
164#ifdef CONFIG_HMEM_REPORTING
165#define ACCESS_ATTR(name)						\
166static ssize_t name##_show(struct device *dev,				\
167			   struct device_attribute *attr,		\
168			   char *buf)					\
169{									\
170	return sysfs_emit(buf, "%u\n",					\
171			  to_access_nodes(dev)->hmem_attrs.name);	\
172}									\
173static DEVICE_ATTR_RO(name)
174
175ACCESS_ATTR(read_bandwidth);
176ACCESS_ATTR(read_latency);
177ACCESS_ATTR(write_bandwidth);
178ACCESS_ATTR(write_latency);
179
180static struct attribute *access_attrs[] = {
181	&dev_attr_read_bandwidth.attr,
182	&dev_attr_read_latency.attr,
183	&dev_attr_write_bandwidth.attr,
184	&dev_attr_write_latency.attr,
185	NULL,
186};
187
188/**
189 * node_set_perf_attrs - Set the performance values for given access class
190 * @nid: Node identifier to be set
191 * @hmem_attrs: Heterogeneous memory performance attributes
192 * @access: The access class the for the given attributes
193 */
194void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
195			 unsigned int access)
196{
197	struct node_access_nodes *c;
198	struct node *node;
199	int i;
200
201	if (WARN_ON_ONCE(!node_online(nid)))
202		return;
203
204	node = node_devices[nid];
205	c = node_init_node_access(node, access);
206	if (!c)
207		return;
208
209	c->hmem_attrs = *hmem_attrs;
210	for (i = 0; access_attrs[i] != NULL; i++) {
211		if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i],
212					    "initiators")) {
213			pr_info("failed to add performance attribute to node %d\n",
214				nid);
215			break;
216		}
217	}
218}
219
220/**
221 * struct node_cache_info - Internal tracking for memory node caches
222 * @dev:	Device represeting the cache level
223 * @node:	List element for tracking in the node
224 * @cache_attrs:Attributes for this cache level
225 */
226struct node_cache_info {
227	struct device dev;
228	struct list_head node;
229	struct node_cache_attrs cache_attrs;
230};
231#define to_cache_info(device) container_of(device, struct node_cache_info, dev)
232
233#define CACHE_ATTR(name, fmt) 						\
234static ssize_t name##_show(struct device *dev,				\
235			   struct device_attribute *attr,		\
236			   char *buf)					\
237{									\
238	return sysfs_emit(buf, fmt "\n",				\
239			  to_cache_info(dev)->cache_attrs.name);	\
240}									\
241static DEVICE_ATTR_RO(name);
242
243CACHE_ATTR(size, "%llu")
244CACHE_ATTR(line_size, "%u")
245CACHE_ATTR(indexing, "%u")
246CACHE_ATTR(write_policy, "%u")
247
248static struct attribute *cache_attrs[] = {
249	&dev_attr_indexing.attr,
250	&dev_attr_size.attr,
251	&dev_attr_line_size.attr,
252	&dev_attr_write_policy.attr,
253	NULL,
254};
255ATTRIBUTE_GROUPS(cache);
256
257static void node_cache_release(struct device *dev)
258{
259	kfree(dev);
260}
261
262static void node_cacheinfo_release(struct device *dev)
263{
264	struct node_cache_info *info = to_cache_info(dev);
265	kfree(info);
266}
267
268static void node_init_cache_dev(struct node *node)
269{
270	struct device *dev;
271
272	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
273	if (!dev)
274		return;
275
276	device_initialize(dev);
277	dev->parent = &node->dev;
278	dev->release = node_cache_release;
279	if (dev_set_name(dev, "memory_side_cache"))
280		goto put_device;
281
282	if (device_add(dev))
283		goto put_device;
284
285	pm_runtime_no_callbacks(dev);
286	node->cache_dev = dev;
287	return;
288put_device:
289	put_device(dev);
 
 
290}
291
292/**
293 * node_add_cache() - add cache attribute to a memory node
294 * @nid: Node identifier that has new cache attributes
295 * @cache_attrs: Attributes for the cache being added
296 */
297void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs)
298{
299	struct node_cache_info *info;
300	struct device *dev;
301	struct node *node;
302
303	if (!node_online(nid) || !node_devices[nid])
304		return;
305
306	node = node_devices[nid];
307	list_for_each_entry(info, &node->cache_attrs, node) {
308		if (info->cache_attrs.level == cache_attrs->level) {
309			dev_warn(&node->dev,
310				"attempt to add duplicate cache level:%d\n",
311				cache_attrs->level);
312			return;
313		}
314	}
315
316	if (!node->cache_dev)
317		node_init_cache_dev(node);
318	if (!node->cache_dev)
319		return;
320
321	info = kzalloc(sizeof(*info), GFP_KERNEL);
322	if (!info)
323		return;
324
325	dev = &info->dev;
326	device_initialize(dev);
327	dev->parent = node->cache_dev;
328	dev->release = node_cacheinfo_release;
329	dev->groups = cache_groups;
330	if (dev_set_name(dev, "index%d", cache_attrs->level))
331		goto put_device;
332
333	info->cache_attrs = *cache_attrs;
334	if (device_add(dev)) {
335		dev_warn(&node->dev, "failed to add cache level:%d\n",
336			 cache_attrs->level);
337		goto put_device;
338	}
339	pm_runtime_no_callbacks(dev);
340	list_add_tail(&info->node, &node->cache_attrs);
341	return;
342put_device:
343	put_device(dev);
 
 
344}
345
346static void node_remove_caches(struct node *node)
347{
348	struct node_cache_info *info, *next;
349
350	if (!node->cache_dev)
351		return;
352
353	list_for_each_entry_safe(info, next, &node->cache_attrs, node) {
354		list_del(&info->node);
355		device_unregister(&info->dev);
356	}
357	device_unregister(node->cache_dev);
358}
359
360static void node_init_caches(unsigned int nid)
361{
362	INIT_LIST_HEAD(&node_devices[nid]->cache_attrs);
363}
364#else
365static void node_init_caches(unsigned int nid) { }
366static void node_remove_caches(struct node *node) { }
367#endif
368
369#define K(x) ((x) << (PAGE_SHIFT - 10))
370static ssize_t node_read_meminfo(struct device *dev,
371			struct device_attribute *attr, char *buf)
372{
373	int len = 0;
374	int nid = dev->id;
375	struct pglist_data *pgdat = NODE_DATA(nid);
376	struct sysinfo i;
377	unsigned long sreclaimable, sunreclaimable;
378	unsigned long swapcached = 0;
379
380	si_meminfo_node(&i, nid);
381	sreclaimable = node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B);
382	sunreclaimable = node_page_state_pages(pgdat, NR_SLAB_UNRECLAIMABLE_B);
383#ifdef CONFIG_SWAP
384	swapcached = node_page_state_pages(pgdat, NR_SWAPCACHE);
385#endif
386	len = sysfs_emit_at(buf, len,
387			    "Node %d MemTotal:       %8lu kB\n"
388			    "Node %d MemFree:        %8lu kB\n"
389			    "Node %d MemUsed:        %8lu kB\n"
390			    "Node %d SwapCached:     %8lu kB\n"
391			    "Node %d Active:         %8lu kB\n"
392			    "Node %d Inactive:       %8lu kB\n"
393			    "Node %d Active(anon):   %8lu kB\n"
394			    "Node %d Inactive(anon): %8lu kB\n"
395			    "Node %d Active(file):   %8lu kB\n"
396			    "Node %d Inactive(file): %8lu kB\n"
397			    "Node %d Unevictable:    %8lu kB\n"
398			    "Node %d Mlocked:        %8lu kB\n",
399			    nid, K(i.totalram),
400			    nid, K(i.freeram),
401			    nid, K(i.totalram - i.freeram),
402			    nid, K(swapcached),
403			    nid, K(node_page_state(pgdat, NR_ACTIVE_ANON) +
404				   node_page_state(pgdat, NR_ACTIVE_FILE)),
405			    nid, K(node_page_state(pgdat, NR_INACTIVE_ANON) +
406				   node_page_state(pgdat, NR_INACTIVE_FILE)),
407			    nid, K(node_page_state(pgdat, NR_ACTIVE_ANON)),
408			    nid, K(node_page_state(pgdat, NR_INACTIVE_ANON)),
409			    nid, K(node_page_state(pgdat, NR_ACTIVE_FILE)),
410			    nid, K(node_page_state(pgdat, NR_INACTIVE_FILE)),
411			    nid, K(node_page_state(pgdat, NR_UNEVICTABLE)),
412			    nid, K(sum_zone_node_page_state(nid, NR_MLOCK)));
413
414#ifdef CONFIG_HIGHMEM
415	len += sysfs_emit_at(buf, len,
416			     "Node %d HighTotal:      %8lu kB\n"
417			     "Node %d HighFree:       %8lu kB\n"
418			     "Node %d LowTotal:       %8lu kB\n"
419			     "Node %d LowFree:        %8lu kB\n",
420			     nid, K(i.totalhigh),
421			     nid, K(i.freehigh),
422			     nid, K(i.totalram - i.totalhigh),
423			     nid, K(i.freeram - i.freehigh));
424#endif
425	len += sysfs_emit_at(buf, len,
426			     "Node %d Dirty:          %8lu kB\n"
427			     "Node %d Writeback:      %8lu kB\n"
428			     "Node %d FilePages:      %8lu kB\n"
429			     "Node %d Mapped:         %8lu kB\n"
430			     "Node %d AnonPages:      %8lu kB\n"
431			     "Node %d Shmem:          %8lu kB\n"
432			     "Node %d KernelStack:    %8lu kB\n"
433#ifdef CONFIG_SHADOW_CALL_STACK
434			     "Node %d ShadowCallStack:%8lu kB\n"
435#endif
436			     "Node %d PageTables:     %8lu kB\n"
437			     "Node %d SecPageTables:  %8lu kB\n"
438			     "Node %d NFS_Unstable:   %8lu kB\n"
439			     "Node %d Bounce:         %8lu kB\n"
440			     "Node %d WritebackTmp:   %8lu kB\n"
441			     "Node %d KReclaimable:   %8lu kB\n"
442			     "Node %d Slab:           %8lu kB\n"
443			     "Node %d SReclaimable:   %8lu kB\n"
444			     "Node %d SUnreclaim:     %8lu kB\n"
445#ifdef CONFIG_TRANSPARENT_HUGEPAGE
446			     "Node %d AnonHugePages:  %8lu kB\n"
447			     "Node %d ShmemHugePages: %8lu kB\n"
448			     "Node %d ShmemPmdMapped: %8lu kB\n"
449			     "Node %d FileHugePages: %8lu kB\n"
450			     "Node %d FilePmdMapped: %8lu kB\n"
451#endif
452			     ,
453			     nid, K(node_page_state(pgdat, NR_FILE_DIRTY)),
454			     nid, K(node_page_state(pgdat, NR_WRITEBACK)),
455			     nid, K(node_page_state(pgdat, NR_FILE_PAGES)),
456			     nid, K(node_page_state(pgdat, NR_FILE_MAPPED)),
457			     nid, K(node_page_state(pgdat, NR_ANON_MAPPED)),
458			     nid, K(i.sharedram),
459			     nid, node_page_state(pgdat, NR_KERNEL_STACK_KB),
460#ifdef CONFIG_SHADOW_CALL_STACK
461			     nid, node_page_state(pgdat, NR_KERNEL_SCS_KB),
462#endif
463			     nid, K(node_page_state(pgdat, NR_PAGETABLE)),
464			     nid, K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)),
465			     nid, 0UL,
466			     nid, K(sum_zone_node_page_state(nid, NR_BOUNCE)),
467			     nid, K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
468			     nid, K(sreclaimable +
469				    node_page_state(pgdat, NR_KERNEL_MISC_RECLAIMABLE)),
470			     nid, K(sreclaimable + sunreclaimable),
471			     nid, K(sreclaimable),
472			     nid, K(sunreclaimable)
473#ifdef CONFIG_TRANSPARENT_HUGEPAGE
474			     ,
475			     nid, K(node_page_state(pgdat, NR_ANON_THPS)),
476			     nid, K(node_page_state(pgdat, NR_SHMEM_THPS)),
477			     nid, K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)),
478			     nid, K(node_page_state(pgdat, NR_FILE_THPS)),
479			     nid, K(node_page_state(pgdat, NR_FILE_PMDMAPPED))
 
 
 
 
 
480#endif
481			    );
482	len += hugetlb_report_node_meminfo(buf, len, nid);
483	return len;
484}
485
486#undef K
487static DEVICE_ATTR(meminfo, 0444, node_read_meminfo, NULL);
488
489static ssize_t node_read_numastat(struct device *dev,
490				  struct device_attribute *attr, char *buf)
491{
492	fold_vm_numa_events();
493	return sysfs_emit(buf,
494			  "numa_hit %lu\n"
495			  "numa_miss %lu\n"
496			  "numa_foreign %lu\n"
497			  "interleave_hit %lu\n"
498			  "local_node %lu\n"
499			  "other_node %lu\n",
500			  sum_zone_numa_event_state(dev->id, NUMA_HIT),
501			  sum_zone_numa_event_state(dev->id, NUMA_MISS),
502			  sum_zone_numa_event_state(dev->id, NUMA_FOREIGN),
503			  sum_zone_numa_event_state(dev->id, NUMA_INTERLEAVE_HIT),
504			  sum_zone_numa_event_state(dev->id, NUMA_LOCAL),
505			  sum_zone_numa_event_state(dev->id, NUMA_OTHER));
506}
507static DEVICE_ATTR(numastat, 0444, node_read_numastat, NULL);
508
509static ssize_t node_read_vmstat(struct device *dev,
510				struct device_attribute *attr, char *buf)
511{
512	int nid = dev->id;
513	struct pglist_data *pgdat = NODE_DATA(nid);
514	int i;
515	int len = 0;
516
517	for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
518		len += sysfs_emit_at(buf, len, "%s %lu\n",
519				     zone_stat_name(i),
520				     sum_zone_node_page_state(nid, i));
521
522#ifdef CONFIG_NUMA
523	fold_vm_numa_events();
524	for (i = 0; i < NR_VM_NUMA_EVENT_ITEMS; i++)
525		len += sysfs_emit_at(buf, len, "%s %lu\n",
526				     numa_stat_name(i),
527				     sum_zone_numa_event_state(nid, i));
528
529#endif
530	for (i = 0; i < NR_VM_NODE_STAT_ITEMS; i++) {
531		unsigned long pages = node_page_state_pages(pgdat, i);
532
533		if (vmstat_item_print_in_thp(i))
534			pages /= HPAGE_PMD_NR;
535		len += sysfs_emit_at(buf, len, "%s %lu\n", node_stat_name(i),
536				     pages);
537	}
538
539	return len;
540}
541static DEVICE_ATTR(vmstat, 0444, node_read_vmstat, NULL);
542
543static ssize_t node_read_distance(struct device *dev,
544				  struct device_attribute *attr, char *buf)
545{
546	int nid = dev->id;
547	int len = 0;
548	int i;
549
550	/*
551	 * buf is currently PAGE_SIZE in length and each node needs 4 chars
552	 * at the most (distance + space or newline).
553	 */
554	BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
555
556	for_each_online_node(i) {
557		len += sysfs_emit_at(buf, len, "%s%d",
558				     i ? " " : "", node_distance(nid, i));
559	}
560
561	len += sysfs_emit_at(buf, len, "\n");
562	return len;
563}
564static DEVICE_ATTR(distance, 0444, node_read_distance, NULL);
565
566static struct attribute *node_dev_attrs[] = {
 
 
567	&dev_attr_meminfo.attr,
568	&dev_attr_numastat.attr,
569	&dev_attr_distance.attr,
570	&dev_attr_vmstat.attr,
571	NULL
572};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
573
574static struct bin_attribute *node_dev_bin_attrs[] = {
575	&bin_attr_cpumap,
576	&bin_attr_cpulist,
577	NULL
578};
579
580static const struct attribute_group node_dev_group = {
581	.attrs = node_dev_attrs,
582	.bin_attrs = node_dev_bin_attrs
583};
 
 
 
 
584
585static const struct attribute_group *node_dev_groups[] = {
586	&node_dev_group,
587#ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
588	&arch_node_dev_group,
589#endif
590	NULL
591};
592
593static void node_device_release(struct device *dev)
594{
595	kfree(to_node(dev));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
596}
597
598/*
599 * register_node - Setup a sysfs device for a node.
600 * @num - Node number to use when creating the device.
601 *
602 * Initialize and register the node device.
603 */
604static int register_node(struct node *node, int num)
605{
606	int error;
607
608	node->dev.id = num;
609	node->dev.bus = &node_subsys;
610	node->dev.release = node_device_release;
611	node->dev.groups = node_dev_groups;
612	error = device_register(&node->dev);
613
614	if (error) {
615		put_device(&node->dev);
616	} else {
617		hugetlb_register_node(node);
 
618		compaction_register_node(node);
619	}
620
621	return error;
622}
623
624/**
625 * unregister_node - unregister a node device
626 * @node: node going away
627 *
628 * Unregisters a node device @node.  All the devices on the node must be
629 * unregistered before calling this function.
630 */
631void unregister_node(struct node *node)
632{
633	hugetlb_unregister_node(node);
634	compaction_unregister_node(node);
635	node_remove_accesses(node);
636	node_remove_caches(node);
637	device_unregister(&node->dev);
638}
639
640struct node *node_devices[MAX_NUMNODES];
641
642/*
643 * register cpu under node
644 */
645int register_cpu_under_node(unsigned int cpu, unsigned int nid)
646{
647	int ret;
648	struct device *obj;
649
650	if (!node_online(nid))
651		return 0;
652
653	obj = get_cpu_device(cpu);
654	if (!obj)
655		return 0;
656
657	ret = sysfs_create_link(&node_devices[nid]->dev.kobj,
658				&obj->kobj,
659				kobject_name(&obj->kobj));
660	if (ret)
661		return ret;
662
663	return sysfs_create_link(&obj->kobj,
664				 &node_devices[nid]->dev.kobj,
665				 kobject_name(&node_devices[nid]->dev.kobj));
666}
667
668/**
669 * register_memory_node_under_compute_node - link memory node to its compute
670 *					     node for a given access class.
671 * @mem_nid:	Memory node number
672 * @cpu_nid:	Cpu  node number
673 * @access:	Access class to register
674 *
675 * Description:
676 * 	For use with platforms that may have separate memory and compute nodes.
677 * 	This function will export node relationships linking which memory
678 * 	initiator nodes can access memory targets at a given ranked access
679 * 	class.
680 */
681int register_memory_node_under_compute_node(unsigned int mem_nid,
682					    unsigned int cpu_nid,
683					    unsigned int access)
684{
685	struct node *init_node, *targ_node;
686	struct node_access_nodes *initiator, *target;
687	int ret;
688
689	if (!node_online(cpu_nid) || !node_online(mem_nid))
690		return -ENODEV;
691
692	init_node = node_devices[cpu_nid];
693	targ_node = node_devices[mem_nid];
694	initiator = node_init_node_access(init_node, access);
695	target = node_init_node_access(targ_node, access);
696	if (!initiator || !target)
697		return -ENOMEM;
698
699	ret = sysfs_add_link_to_group(&initiator->dev.kobj, "targets",
700				      &targ_node->dev.kobj,
701				      dev_name(&targ_node->dev));
702	if (ret)
703		return ret;
704
705	ret = sysfs_add_link_to_group(&target->dev.kobj, "initiators",
706				      &init_node->dev.kobj,
707				      dev_name(&init_node->dev));
708	if (ret)
709		goto err;
710
711	return 0;
712 err:
713	sysfs_remove_link_from_group(&initiator->dev.kobj, "targets",
714				     dev_name(&targ_node->dev));
715	return ret;
716}
717
718int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
719{
720	struct device *obj;
721
722	if (!node_online(nid))
723		return 0;
724
725	obj = get_cpu_device(cpu);
726	if (!obj)
727		return 0;
728
729	sysfs_remove_link(&node_devices[nid]->dev.kobj,
730			  kobject_name(&obj->kobj));
731	sysfs_remove_link(&obj->kobj,
732			  kobject_name(&node_devices[nid]->dev.kobj));
733
734	return 0;
735}
736
737#ifdef CONFIG_MEMORY_HOTPLUG
738static int __ref get_nid_for_pfn(unsigned long pfn)
739{
 
 
740#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
741	if (system_state < SYSTEM_RUNNING)
742		return early_pfn_to_nid(pfn);
743#endif
744	return pfn_to_nid(pfn);
745}
746
747static void do_register_memory_block_under_node(int nid,
748						struct memory_block *mem_blk,
749						enum meminit_context context)
750{
751	int ret;
752
753	memory_block_add_nid(mem_blk, nid, context);
 
 
 
 
754
755	ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
756				       &mem_blk->dev.kobj,
757				       kobject_name(&mem_blk->dev.kobj));
758	if (ret && ret != -EEXIST)
759		dev_err_ratelimited(&node_devices[nid]->dev,
760				    "can't create link to %s in sysfs (%d)\n",
761				    kobject_name(&mem_blk->dev.kobj), ret);
762
763	ret = sysfs_create_link_nowarn(&mem_blk->dev.kobj,
764				&node_devices[nid]->dev.kobj,
765				kobject_name(&node_devices[nid]->dev.kobj));
766	if (ret && ret != -EEXIST)
767		dev_err_ratelimited(&mem_blk->dev,
768				    "can't create link to %s in sysfs (%d)\n",
769				    kobject_name(&node_devices[nid]->dev.kobj),
770				    ret);
771}
772
773/* register memory section under specified node if it spans that node */
774static int register_mem_block_under_node_early(struct memory_block *mem_blk,
775					       void *arg)
776{
777	unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
778	unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
779	unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
780	int nid = *(int *)arg;
781	unsigned long pfn;
782
783	for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
784		int page_nid;
785
786		/*
787		 * memory block could have several absent sections from start.
788		 * skip pfn range from absent section
789		 */
790		if (!pfn_in_present_section(pfn)) {
791			pfn = round_down(pfn + PAGES_PER_SECTION,
792					 PAGES_PER_SECTION) - 1;
793			continue;
794		}
795
796		/*
797		 * We need to check if page belongs to nid only at the boot
798		 * case because node's ranges can be interleaved.
799		 */
800		page_nid = get_nid_for_pfn(pfn);
801		if (page_nid < 0)
802			continue;
803		if (page_nid != nid)
804			continue;
805
806		do_register_memory_block_under_node(nid, mem_blk, MEMINIT_EARLY);
807		return 0;
808	}
809	/* mem section does not span the specified node */
810	return 0;
811}
812
813/*
814 * During hotplug we know that all pages in the memory block belong to the same
815 * node.
816 */
817static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
818						 void *arg)
819{
820	int nid = *(int *)arg;
821
822	do_register_memory_block_under_node(nid, mem_blk, MEMINIT_HOTPLUG);
823	return 0;
824}
825
826/*
827 * Unregister a memory block device under the node it spans. Memory blocks
828 * with multiple nodes cannot be offlined and therefore also never be removed.
829 */
830void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
831{
832	if (mem_blk->nid == NUMA_NO_NODE)
833		return;
834
835	sysfs_remove_link(&node_devices[mem_blk->nid]->dev.kobj,
836			  kobject_name(&mem_blk->dev.kobj));
837	sysfs_remove_link(&mem_blk->dev.kobj,
838			  kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
839}
840
841void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
842				       unsigned long end_pfn,
843				       enum meminit_context context)
844{
845	walk_memory_blocks_func_t func;
846
847	if (context == MEMINIT_HOTPLUG)
848		func = register_mem_block_under_node_hotplug;
849	else
850		func = register_mem_block_under_node_early;
851
852	walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn),
853			   (void *)&nid, func);
854	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
855}
856#endif /* CONFIG_MEMORY_HOTPLUG */
 
 
 
857
858int __register_one_node(int nid)
859{
860	int error;
861	int cpu;
862
863	node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL);
864	if (!node_devices[nid])
865		return -ENOMEM;
866
867	error = register_node(node_devices[nid], nid);
868
869	/* link cpu under this node */
870	for_each_present_cpu(cpu) {
871		if (cpu_to_node(cpu) == nid)
872			register_cpu_under_node(cpu, nid);
873	}
874
875	INIT_LIST_HEAD(&node_devices[nid]->access_list);
 
 
876	node_init_caches(nid);
877
878	return error;
879}
880
881void unregister_one_node(int nid)
882{
883	if (!node_devices[nid])
884		return;
885
886	unregister_node(node_devices[nid]);
887	node_devices[nid] = NULL;
888}
889
890/*
891 * node states attributes
892 */
893
 
 
 
 
 
 
 
 
 
 
 
894struct node_attr {
895	struct device_attribute attr;
896	enum node_states state;
897};
898
899static ssize_t show_node_state(struct device *dev,
900			       struct device_attribute *attr, char *buf)
901{
902	struct node_attr *na = container_of(attr, struct node_attr, attr);
903
904	return sysfs_emit(buf, "%*pbl\n",
905			  nodemask_pr_args(&node_states[na->state]));
906}
907
908#define _NODE_ATTR(name, state) \
909	{ __ATTR(name, 0444, show_node_state, NULL), state }
910
911static struct node_attr node_state_attr[] = {
912	[N_POSSIBLE] = _NODE_ATTR(possible, N_POSSIBLE),
913	[N_ONLINE] = _NODE_ATTR(online, N_ONLINE),
914	[N_NORMAL_MEMORY] = _NODE_ATTR(has_normal_memory, N_NORMAL_MEMORY),
915#ifdef CONFIG_HIGHMEM
916	[N_HIGH_MEMORY] = _NODE_ATTR(has_high_memory, N_HIGH_MEMORY),
917#endif
918	[N_MEMORY] = _NODE_ATTR(has_memory, N_MEMORY),
919	[N_CPU] = _NODE_ATTR(has_cpu, N_CPU),
920	[N_GENERIC_INITIATOR] = _NODE_ATTR(has_generic_initiator,
921					   N_GENERIC_INITIATOR),
922};
923
924static struct attribute *node_state_attrs[] = {
925	&node_state_attr[N_POSSIBLE].attr.attr,
926	&node_state_attr[N_ONLINE].attr.attr,
927	&node_state_attr[N_NORMAL_MEMORY].attr.attr,
928#ifdef CONFIG_HIGHMEM
929	&node_state_attr[N_HIGH_MEMORY].attr.attr,
930#endif
931	&node_state_attr[N_MEMORY].attr.attr,
932	&node_state_attr[N_CPU].attr.attr,
933	&node_state_attr[N_GENERIC_INITIATOR].attr.attr,
934	NULL
935};
936
937static const struct attribute_group memory_root_attr_group = {
938	.attrs = node_state_attrs,
939};
940
941static const struct attribute_group *cpu_root_attr_groups[] = {
942	&memory_root_attr_group,
943	NULL,
944};
945
946void __init node_dev_init(void)
 
947{
948	int ret, i;
949
950 	BUILD_BUG_ON(ARRAY_SIZE(node_state_attr) != NR_NODE_STATES);
951 	BUILD_BUG_ON(ARRAY_SIZE(node_state_attrs)-1 != NR_NODE_STATES);
952
953	ret = subsys_system_register(&node_subsys, cpu_root_attr_groups);
954	if (ret)
955		panic("%s() failed to register subsystem: %d\n", __func__, ret);
 
 
 
 
 
956
957	/*
958	 * Create all node devices, which will properly link the node
959	 * to applicable memory block devices and already created cpu devices.
960	 */
961	for_each_online_node(i) {
962		ret = register_one_node(i);
963		if (ret)
964			panic("%s() failed to add node: %d\n", __func__, ret);
965	}
966}