Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * NUMA support for s390
  4 *
  5 * NUMA emulation (aka fake NUMA) distributes the available memory to nodes
  6 * without using real topology information about the physical memory of the
  7 * machine.
  8 *
  9 * It distributes the available CPUs to nodes while respecting the original
 10 * machine topology information. This is done by trying to avoid to separate
 11 * CPUs which reside on the same book or even on the same MC.
 12 *
 13 * Because the current Linux scheduler code requires a stable cpu to node
 14 * mapping, cores are pinned to nodes when the first CPU thread is set online.
 15 *
 16 * Copyright IBM Corp. 2015
 17 */
 18
 19#define KMSG_COMPONENT "numa_emu"
 20#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 21
 22#include <linux/kernel.h>
 23#include <linux/cpumask.h>
 24#include <linux/memblock.h>
 25#include <linux/node.h>
 26#include <linux/memory.h>
 27#include <linux/slab.h>
 28#include <asm/smp.h>
 29#include <asm/topology.h>
 30#include "numa_mode.h"
 31#include "toptree.h"
 32
 33/* Distances between the different system components */
 34#define DIST_EMPTY	0
 35#define DIST_CORE	1
 36#define DIST_MC		2
 37#define DIST_BOOK	3
 38#define DIST_DRAWER	4
 39#define DIST_MAX	5
 40
 41/* Node distance reported to common code */
 42#define EMU_NODE_DIST	10
 43
 44/* Node ID for free (not yet pinned) cores */
 45#define NODE_ID_FREE	-1
 46
 47/* Different levels of toptree */
 48enum toptree_level {CORE, MC, BOOK, DRAWER, NODE, TOPOLOGY};
 49
 50/* The two toptree IDs */
 51enum {TOPTREE_ID_PHYS, TOPTREE_ID_NUMA};
 52
 53/* Number of NUMA nodes */
 54static int emu_nodes = 1;
 55/* NUMA stripe size */
 56static unsigned long emu_size;
 57
 58/*
 59 * Node to core pinning information updates are protected by
 60 * "sched_domains_mutex".
 61 */
 62static struct {
 63	s32 to_node_id[CONFIG_NR_CPUS];	/* Pinned core to node mapping */
 64	int total;			/* Total number of pinned cores */
 65	int per_node_target;		/* Cores per node without extra cores */
 66	int per_node[MAX_NUMNODES];	/* Number of cores pinned to node */
 67} *emu_cores;
 68
 69/*
 70 * Pin a core to a node
 71 */
 72static void pin_core_to_node(int core_id, int node_id)
 73{
 74	if (emu_cores->to_node_id[core_id] == NODE_ID_FREE) {
 75		emu_cores->per_node[node_id]++;
 76		emu_cores->to_node_id[core_id] = node_id;
 77		emu_cores->total++;
 78	} else {
 79		WARN_ON(emu_cores->to_node_id[core_id] != node_id);
 80	}
 81}
 82
 83/*
 84 * Number of pinned cores of a node
 85 */
 86static int cores_pinned(struct toptree *node)
 87{
 88	return emu_cores->per_node[node->id];
 89}
 90
 91/*
 92 * ID of the node where the core is pinned (or NODE_ID_FREE)
 93 */
 94static int core_pinned_to_node_id(struct toptree *core)
 95{
 96	return emu_cores->to_node_id[core->id];
 97}
 98
 99/*
100 * Number of cores in the tree that are not yet pinned
101 */
102static int cores_free(struct toptree *tree)
103{
104	struct toptree *core;
105	int count = 0;
106
107	toptree_for_each(core, tree, CORE) {
108		if (core_pinned_to_node_id(core) == NODE_ID_FREE)
109			count++;
110	}
111	return count;
112}
113
114/*
115 * Return node of core
116 */
117static struct toptree *core_node(struct toptree *core)
118{
119	return core->parent->parent->parent->parent;
120}
121
122/*
123 * Return drawer of core
124 */
125static struct toptree *core_drawer(struct toptree *core)
126{
127	return core->parent->parent->parent;
128}
129
130/*
131 * Return book of core
132 */
133static struct toptree *core_book(struct toptree *core)
134{
135	return core->parent->parent;
136}
137
138/*
139 * Return mc of core
140 */
141static struct toptree *core_mc(struct toptree *core)
142{
143	return core->parent;
144}
145
146/*
147 * Distance between two cores
148 */
149static int dist_core_to_core(struct toptree *core1, struct toptree *core2)
150{
151	if (core_drawer(core1)->id != core_drawer(core2)->id)
152		return DIST_DRAWER;
153	if (core_book(core1)->id != core_book(core2)->id)
154		return DIST_BOOK;
155	if (core_mc(core1)->id != core_mc(core2)->id)
156		return DIST_MC;
157	/* Same core or sibling on same MC */
158	return DIST_CORE;
159}
160
161/*
162 * Distance of a node to a core
163 */
164static int dist_node_to_core(struct toptree *node, struct toptree *core)
165{
166	struct toptree *core_node;
167	int dist_min = DIST_MAX;
168
169	toptree_for_each(core_node, node, CORE)
170		dist_min = min(dist_min, dist_core_to_core(core_node, core));
171	return dist_min == DIST_MAX ? DIST_EMPTY : dist_min;
172}
173
174/*
175 * Unify will delete empty nodes, therefore recreate nodes.
176 */
177static void toptree_unify_tree(struct toptree *tree)
178{
179	int nid;
180
181	toptree_unify(tree);
182	for (nid = 0; nid < emu_nodes; nid++)
183		toptree_get_child(tree, nid);
184}
185
186/*
187 * Find the best/nearest node for a given core and ensure that no node
188 * gets more than "emu_cores->per_node_target + extra" cores.
189 */
190static struct toptree *node_for_core(struct toptree *numa, struct toptree *core,
191				     int extra)
192{
193	struct toptree *node, *node_best = NULL;
194	int dist_cur, dist_best, cores_target;
195
196	cores_target = emu_cores->per_node_target + extra;
197	dist_best = DIST_MAX;
198	node_best = NULL;
199	toptree_for_each(node, numa, NODE) {
200		/* Already pinned cores must use their nodes */
201		if (core_pinned_to_node_id(core) == node->id) {
202			node_best = node;
203			break;
204		}
205		/* Skip nodes that already have enough cores */
206		if (cores_pinned(node) >= cores_target)
207			continue;
208		dist_cur = dist_node_to_core(node, core);
209		if (dist_cur < dist_best) {
210			dist_best = dist_cur;
211			node_best = node;
212		}
213	}
214	return node_best;
215}
216
217/*
218 * Find the best node for each core with respect to "extra" core count
219 */
220static void toptree_to_numa_single(struct toptree *numa, struct toptree *phys,
221				   int extra)
222{
223	struct toptree *node, *core, *tmp;
224
225	toptree_for_each_safe(core, tmp, phys, CORE) {
226		node = node_for_core(numa, core, extra);
227		if (!node)
228			return;
229		toptree_move(core, node);
230		pin_core_to_node(core->id, node->id);
231	}
232}
233
234/*
235 * Move structures of given level to specified NUMA node
236 */
237static void move_level_to_numa_node(struct toptree *node, struct toptree *phys,
238				    enum toptree_level level, bool perfect)
239{
240	int cores_free, cores_target = emu_cores->per_node_target;
241	struct toptree *cur, *tmp;
242
243	toptree_for_each_safe(cur, tmp, phys, level) {
244		cores_free = cores_target - toptree_count(node, CORE);
245		if (perfect) {
246			if (cores_free == toptree_count(cur, CORE))
247				toptree_move(cur, node);
248		} else {
249			if (cores_free >= toptree_count(cur, CORE))
250				toptree_move(cur, node);
251		}
252	}
253}
254
255/*
256 * Move structures of a given level to NUMA nodes. If "perfect" is specified
257 * move only perfectly fitting structures. Otherwise move also smaller
258 * than needed structures.
259 */
260static void move_level_to_numa(struct toptree *numa, struct toptree *phys,
261			       enum toptree_level level, bool perfect)
262{
263	struct toptree *node;
264
265	toptree_for_each(node, numa, NODE)
266		move_level_to_numa_node(node, phys, level, perfect);
267}
268
269/*
270 * For the first run try to move the big structures
271 */
272static void toptree_to_numa_first(struct toptree *numa, struct toptree *phys)
273{
274	struct toptree *core;
275
276	/* Always try to move perfectly fitting structures first */
277	move_level_to_numa(numa, phys, DRAWER, true);
278	move_level_to_numa(numa, phys, DRAWER, false);
279	move_level_to_numa(numa, phys, BOOK, true);
280	move_level_to_numa(numa, phys, BOOK, false);
281	move_level_to_numa(numa, phys, MC, true);
282	move_level_to_numa(numa, phys, MC, false);
283	/* Now pin all the moved cores */
284	toptree_for_each(core, numa, CORE)
285		pin_core_to_node(core->id, core_node(core)->id);
286}
287
288/*
289 * Allocate new topology and create required nodes
290 */
291static struct toptree *toptree_new(int id, int nodes)
292{
293	struct toptree *tree;
294	int nid;
295
296	tree = toptree_alloc(TOPOLOGY, id);
297	if (!tree)
298		goto fail;
299	for (nid = 0; nid < nodes; nid++) {
300		if (!toptree_get_child(tree, nid))
301			goto fail;
302	}
303	return tree;
304fail:
305	panic("NUMA emulation could not allocate topology");
306}
307
308/*
309 * Allocate and initialize core to node mapping
310 */
311static void __ref create_core_to_node_map(void)
312{
313	int i;
314
315	emu_cores = memblock_alloc(sizeof(*emu_cores), 8);
316	if (!emu_cores)
317		panic("%s: Failed to allocate %zu bytes align=0x%x\n",
318		      __func__, sizeof(*emu_cores), 8);
319	for (i = 0; i < ARRAY_SIZE(emu_cores->to_node_id); i++)
320		emu_cores->to_node_id[i] = NODE_ID_FREE;
321}
322
323/*
324 * Move cores from physical topology into NUMA target topology
325 * and try to keep as much of the physical topology as possible.
326 */
327static struct toptree *toptree_to_numa(struct toptree *phys)
328{
329	static int first = 1;
330	struct toptree *numa;
331	int cores_total;
332
333	cores_total = emu_cores->total + cores_free(phys);
334	emu_cores->per_node_target = cores_total / emu_nodes;
335	numa = toptree_new(TOPTREE_ID_NUMA, emu_nodes);
336	if (first) {
337		toptree_to_numa_first(numa, phys);
338		first = 0;
339	}
340	toptree_to_numa_single(numa, phys, 0);
341	toptree_to_numa_single(numa, phys, 1);
342	toptree_unify_tree(numa);
343
344	WARN_ON(cpumask_weight(&phys->mask));
345	return numa;
346}
347
348/*
349 * Create a toptree out of the physical topology that we got from the hypervisor
350 */
351static struct toptree *toptree_from_topology(void)
352{
353	struct toptree *phys, *node, *drawer, *book, *mc, *core;
354	struct cpu_topology_s390 *top;
355	int cpu;
356
357	phys = toptree_new(TOPTREE_ID_PHYS, 1);
358
359	for_each_cpu(cpu, &cpus_with_topology) {
360		top = &cpu_topology[cpu];
361		node = toptree_get_child(phys, 0);
362		drawer = toptree_get_child(node, top->drawer_id);
363		book = toptree_get_child(drawer, top->book_id);
364		mc = toptree_get_child(book, top->socket_id);
365		core = toptree_get_child(mc, smp_get_base_cpu(cpu));
366		if (!drawer || !book || !mc || !core)
367			panic("NUMA emulation could not allocate memory");
368		cpumask_set_cpu(cpu, &core->mask);
369		toptree_update_mask(mc);
370	}
371	return phys;
372}
373
374/*
375 * Add toptree core to topology and create correct CPU masks
376 */
377static void topology_add_core(struct toptree *core)
378{
379	struct cpu_topology_s390 *top;
380	int cpu;
381
382	for_each_cpu(cpu, &core->mask) {
383		top = &cpu_topology[cpu];
384		cpumask_copy(&top->thread_mask, &core->mask);
385		cpumask_copy(&top->core_mask, &core_mc(core)->mask);
386		cpumask_copy(&top->book_mask, &core_book(core)->mask);
387		cpumask_copy(&top->drawer_mask, &core_drawer(core)->mask);
388		cpumask_set_cpu(cpu, &node_to_cpumask_map[core_node(core)->id]);
389		top->node_id = core_node(core)->id;
390	}
391}
392
393/*
394 * Apply toptree to topology and create CPU masks
395 */
396static void toptree_to_topology(struct toptree *numa)
397{
398	struct toptree *core;
399	int i;
400
401	/* Clear all node masks */
402	for (i = 0; i < MAX_NUMNODES; i++)
403		cpumask_clear(&node_to_cpumask_map[i]);
404
405	/* Rebuild all masks */
406	toptree_for_each(core, numa, CORE)
407		topology_add_core(core);
408}
409
410/*
411 * Show the node to core mapping
412 */
413static void print_node_to_core_map(void)
414{
415	int nid, cid;
416
417	if (!numa_debug_enabled)
418		return;
419	printk(KERN_DEBUG "NUMA node to core mapping\n");
420	for (nid = 0; nid < emu_nodes; nid++) {
421		printk(KERN_DEBUG "  node %3d: ", nid);
422		for (cid = 0; cid < ARRAY_SIZE(emu_cores->to_node_id); cid++) {
423			if (emu_cores->to_node_id[cid] == nid)
424				printk(KERN_CONT "%d ", cid);
425		}
426		printk(KERN_CONT "\n");
427	}
428}
429
430static void pin_all_possible_cpus(void)
431{
432	int core_id, node_id, cpu;
433	static int initialized;
434
435	if (initialized)
436		return;
437	print_node_to_core_map();
438	node_id = 0;
439	for_each_possible_cpu(cpu) {
440		core_id = smp_get_base_cpu(cpu);
441		if (emu_cores->to_node_id[core_id] != NODE_ID_FREE)
442			continue;
443		pin_core_to_node(core_id, node_id);
444		cpu_topology[cpu].node_id = node_id;
445		node_id = (node_id + 1) % emu_nodes;
446	}
447	print_node_to_core_map();
448	initialized = 1;
449}
450
451/*
452 * Transfer physical topology into a NUMA topology and modify CPU masks
453 * according to the NUMA topology.
454 *
455 * Must be called with "sched_domains_mutex" lock held.
456 */
457static void emu_update_cpu_topology(void)
458{
459	struct toptree *phys, *numa;
460
461	if (emu_cores == NULL)
462		create_core_to_node_map();
463	phys = toptree_from_topology();
464	numa = toptree_to_numa(phys);
465	toptree_free(phys);
466	toptree_to_topology(numa);
467	toptree_free(numa);
468	pin_all_possible_cpus();
469}
470
471/*
472 * If emu_size is not set, use CONFIG_EMU_SIZE. Then round to minimum
473 * alignment (needed for memory hotplug).
474 */
475static unsigned long emu_setup_size_adjust(unsigned long size)
476{
477	unsigned long size_new;
478
479	size = size ? : CONFIG_EMU_SIZE;
480	size_new = roundup(size, memory_block_size_bytes());
481	if (size_new == size)
482		return size;
483	pr_warn("Increasing memory stripe size from %ld MB to %ld MB\n",
484		size >> 20, size_new >> 20);
485	return size_new;
486}
487
488/*
489 * If we have not enough memory for the specified nodes, reduce the node count.
490 */
491static int emu_setup_nodes_adjust(int nodes)
492{
493	int nodes_max;
494
495	nodes_max = memblock.memory.total_size / emu_size;
496	nodes_max = max(nodes_max, 1);
497	if (nodes_max >= nodes)
498		return nodes;
499	pr_warn("Not enough memory for %d nodes, reducing node count\n", nodes);
500	return nodes_max;
501}
502
503/*
504 * Early emu setup
505 */
506static void emu_setup(void)
507{
508	int nid;
509
510	emu_size = emu_setup_size_adjust(emu_size);
511	emu_nodes = emu_setup_nodes_adjust(emu_nodes);
512	for (nid = 0; nid < emu_nodes; nid++)
513		node_set(nid, node_possible_map);
514	pr_info("Creating %d nodes with memory stripe size %ld MB\n",
515		emu_nodes, emu_size >> 20);
516}
517
518/*
519 * Return node id for given page number
520 */
521static int emu_pfn_to_nid(unsigned long pfn)
522{
523	return (pfn / (emu_size >> PAGE_SHIFT)) % emu_nodes;
524}
525
526/*
527 * Return stripe size
528 */
529static unsigned long emu_align(void)
530{
531	return emu_size;
532}
533
534/*
535 * Return distance between two nodes
536 */
537static int emu_distance(int node1, int node2)
538{
539	return (node1 != node2) * EMU_NODE_DIST;
540}
541
542/*
543 * Define callbacks for generic s390 NUMA infrastructure
544 */
545const struct numa_mode numa_mode_emu = {
546	.name = "emu",
547	.setup = emu_setup,
548	.update_cpu_topology = emu_update_cpu_topology,
549	.__pfn_to_nid = emu_pfn_to_nid,
550	.align = emu_align,
551	.distance = emu_distance,
552};
553
554/*
555 * Kernel parameter: emu_nodes=<n>
556 */
557static int __init early_parse_emu_nodes(char *p)
558{
559	int count;
560
561	if (!p || kstrtoint(p, 0, &count) != 0 || count <= 0)
562		return 0;
563	emu_nodes = min(count, MAX_NUMNODES);
564	return 0;
565}
566early_param("emu_nodes", early_parse_emu_nodes);
567
568/*
569 * Kernel parameter: emu_size=[<n>[k|M|G|T]]
570 */
571static int __init early_parse_emu_size(char *p)
572{
573	if (p)
574		emu_size = memparse(p, NULL);
575	return 0;
576}
577early_param("emu_size", early_parse_emu_size);