Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *    Copyright IBM Corp. 2007, 2011
 
  4 */
  5
  6#define KMSG_COMPONENT "cpu"
  7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  8
  9#include <linux/workqueue.h>
 10#include <linux/memblock.h>
 11#include <linux/uaccess.h>
 12#include <linux/sysctl.h>
 13#include <linux/cpuset.h>
 14#include <linux/device.h>
 15#include <linux/export.h>
 16#include <linux/kernel.h>
 17#include <linux/sched.h>
 18#include <linux/sched/topology.h>
 19#include <linux/delay.h>
 20#include <linux/init.h>
 21#include <linux/slab.h>
 22#include <linux/cpu.h>
 23#include <linux/smp.h>
 24#include <linux/mm.h>
 25#include <linux/nodemask.h>
 26#include <linux/node.h>
 27#include <asm/sysinfo.h>
 
 28
 29#define PTF_HORIZONTAL	(0UL)
 30#define PTF_VERTICAL	(1UL)
 31#define PTF_CHECK	(2UL)
 32
 33enum {
 34	TOPOLOGY_MODE_HW,
 35	TOPOLOGY_MODE_SINGLE,
 36	TOPOLOGY_MODE_PACKAGE,
 37	TOPOLOGY_MODE_UNINITIALIZED
 38};
 39
 40struct mask_info {
 41	struct mask_info *next;
 42	unsigned char id;
 43	cpumask_t mask;
 44};
 45
 46static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED;
 47static void set_topology_timer(void);
 48static void topology_work_fn(struct work_struct *work);
 49static struct sysinfo_15_1_x *tl_info;
 50
 51static DECLARE_WORK(topology_work, topology_work_fn);
 52
 53/*
 54 * Socket/Book linked lists and cpu_topology updates are
 55 * protected by "sched_domains_mutex".
 56 */
 57static struct mask_info socket_info;
 58static struct mask_info book_info;
 59static struct mask_info drawer_info;
 60
 61struct cpu_topology_s390 cpu_topology[NR_CPUS];
 62EXPORT_SYMBOL_GPL(cpu_topology);
 63
 64static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int cpu)
 
 
 65{
 66	static cpumask_t mask;
 67
 68	cpumask_clear(&mask);
 69	if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
 70		goto out;
 71	cpumask_set_cpu(cpu, &mask);
 72	switch (topology_mode) {
 73	case TOPOLOGY_MODE_HW:
 74		while (info) {
 75			if (cpumask_test_cpu(cpu, &info->mask)) {
 76				cpumask_copy(&mask, &info->mask);
 77				break;
 78			}
 79			info = info->next;
 80		}
 
 
 81		break;
 82	case TOPOLOGY_MODE_PACKAGE:
 83		cpumask_copy(&mask, cpu_present_mask);
 84		break;
 85	default:
 86		fallthrough;
 87	case TOPOLOGY_MODE_SINGLE:
 
 88		break;
 89	}
 90	cpumask_and(&mask, &mask, &cpu_setup_mask);
 91out:
 92	cpumask_copy(dst, &mask);
 93}
 94
 95static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
 96{
 97	static cpumask_t mask;
 98	int i;
 99
100	cpumask_clear(&mask);
101	if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
102		goto out;
103	cpumask_set_cpu(cpu, &mask);
104	if (topology_mode != TOPOLOGY_MODE_HW)
105		goto out;
106	cpu -= cpu % (smp_cpu_mtid + 1);
107	for (i = 0; i <= smp_cpu_mtid; i++) {
108		if (cpumask_test_cpu(cpu + i, &cpu_setup_mask))
109			cpumask_set_cpu(cpu + i, &mask);
110	}
111out:
112	cpumask_copy(dst, &mask);
113}
114
115#define TOPOLOGY_CORE_BITS	64
116
117static void add_cpus_to_mask(struct topology_core *tl_core,
118			     struct mask_info *drawer,
119			     struct mask_info *book,
120			     struct mask_info *socket)
121{
122	struct cpu_topology_s390 *topo;
123	unsigned int core;
124
125	for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) {
126		unsigned int rcore;
127		int lcpu, i;
128
129		rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
130		lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
131		if (lcpu < 0)
132			continue;
133		for (i = 0; i <= smp_cpu_mtid; i++) {
134			topo = &cpu_topology[lcpu + i];
135			topo->drawer_id = drawer->id;
136			topo->book_id = book->id;
137			topo->socket_id = socket->id;
138			topo->core_id = rcore;
139			topo->thread_id = lcpu + i;
140			topo->dedicated = tl_core->d;
141			cpumask_set_cpu(lcpu + i, &drawer->mask);
142			cpumask_set_cpu(lcpu + i, &book->mask);
143			cpumask_set_cpu(lcpu + i, &socket->mask);
 
144			smp_cpu_set_polarization(lcpu + i, tl_core->pp);
145		}
146	}
147}
148
149static void clear_masks(void)
150{
151	struct mask_info *info;
152
153	info = &socket_info;
154	while (info) {
155		cpumask_clear(&info->mask);
156		info = info->next;
157	}
158	info = &book_info;
159	while (info) {
160		cpumask_clear(&info->mask);
161		info = info->next;
162	}
163	info = &drawer_info;
164	while (info) {
165		cpumask_clear(&info->mask);
166		info = info->next;
167	}
168}
169
170static union topology_entry *next_tle(union topology_entry *tle)
171{
172	if (!tle->nl)
173		return (union topology_entry *)((struct topology_core *)tle + 1);
174	return (union topology_entry *)((struct topology_container *)tle + 1);
175}
176
177static void tl_to_masks(struct sysinfo_15_1_x *info)
178{
179	struct mask_info *socket = &socket_info;
180	struct mask_info *book = &book_info;
181	struct mask_info *drawer = &drawer_info;
182	union topology_entry *tle, *end;
183
184	clear_masks();
185	tle = info->tle;
186	end = (union topology_entry *)((unsigned long)info + info->length);
187	while (tle < end) {
188		switch (tle->nl) {
189		case 3:
190			drawer = drawer->next;
191			drawer->id = tle->container.id;
192			break;
193		case 2:
194			book = book->next;
195			book->id = tle->container.id;
196			break;
197		case 1:
198			socket = socket->next;
199			socket->id = tle->container.id;
200			break;
201		case 0:
202			add_cpus_to_mask(&tle->cpu, drawer, book, socket);
203			break;
204		default:
205			clear_masks();
206			return;
207		}
208		tle = next_tle(tle);
209	}
210}
211
212static void topology_update_polarization_simple(void)
213{
214	int cpu;
215
216	for_each_possible_cpu(cpu)
217		smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
218}
219
220static int ptf(unsigned long fc)
221{
222	int rc;
223
224	asm volatile(
225		"	.insn	rre,0xb9a20000,%1,%1\n"
226		"	ipm	%0\n"
227		"	srl	%0,28\n"
228		: "=d" (rc)
229		: "d" (fc)  : "cc");
230	return rc;
231}
232
233int topology_set_cpu_management(int fc)
234{
235	int cpu, rc;
236
237	if (!MACHINE_HAS_TOPOLOGY)
238		return -EOPNOTSUPP;
239	if (fc)
240		rc = ptf(PTF_VERTICAL);
241	else
242		rc = ptf(PTF_HORIZONTAL);
243	if (rc)
244		return -EBUSY;
245	for_each_possible_cpu(cpu)
246		smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
247	return rc;
248}
249
250void update_cpu_masks(void)
251{
252	struct cpu_topology_s390 *topo, *topo_package, *topo_sibling;
253	int cpu, sibling, pkg_first, smt_first, id;
254
255	for_each_possible_cpu(cpu) {
256		topo = &cpu_topology[cpu];
257		cpu_thread_map(&topo->thread_mask, cpu);
258		cpu_group_map(&topo->core_mask, &socket_info, cpu);
259		cpu_group_map(&topo->book_mask, &book_info, cpu);
260		cpu_group_map(&topo->drawer_mask, &drawer_info, cpu);
261		topo->booted_cores = 0;
262		if (topology_mode != TOPOLOGY_MODE_HW) {
263			id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
264			topo->thread_id = cpu;
265			topo->core_id = cpu;
266			topo->socket_id = id;
267			topo->book_id = id;
268			topo->drawer_id = id;
 
 
269		}
270	}
271	for_each_online_cpu(cpu) {
272		topo = &cpu_topology[cpu];
273		pkg_first = cpumask_first(&topo->core_mask);
274		topo_package = &cpu_topology[pkg_first];
275		if (cpu == pkg_first) {
276			for_each_cpu(sibling, &topo->core_mask) {
277				topo_sibling = &cpu_topology[sibling];
278				smt_first = cpumask_first(&topo_sibling->thread_mask);
279				if (sibling == smt_first)
280					topo_package->booted_cores++;
281			}
282		} else {
283			topo->booted_cores = topo_package->booted_cores;
284		}
285	}
286}
287
288void store_topology(struct sysinfo_15_1_x *info)
289{
290	stsi(info, 15, 1, topology_mnest_limit());
291}
292
293static void __arch_update_dedicated_flag(void *arg)
294{
295	if (topology_cpu_dedicated(smp_processor_id()))
296		set_cpu_flag(CIF_DEDICATED_CPU);
297	else
298		clear_cpu_flag(CIF_DEDICATED_CPU);
299}
300
301static int __arch_update_cpu_topology(void)
302{
303	struct sysinfo_15_1_x *info = tl_info;
304	int rc = 0;
305
306	mutex_lock(&smp_cpu_state_mutex);
 
307	if (MACHINE_HAS_TOPOLOGY) {
308		rc = 1;
309		store_topology(info);
310		tl_to_masks(info);
311	}
312	update_cpu_masks();
313	if (!MACHINE_HAS_TOPOLOGY)
314		topology_update_polarization_simple();
315	mutex_unlock(&smp_cpu_state_mutex);
316	return rc;
317}
318
319int arch_update_cpu_topology(void)
320{
321	struct device *dev;
322	int cpu, rc;
323
324	rc = __arch_update_cpu_topology();
325	on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
326	for_each_online_cpu(cpu) {
327		dev = get_cpu_device(cpu);
328		if (dev)
329			kobject_uevent(&dev->kobj, KOBJ_CHANGE);
330	}
331	return rc;
332}
333
334static void topology_work_fn(struct work_struct *work)
335{
336	rebuild_sched_domains();
337}
338
339void topology_schedule_update(void)
340{
341	schedule_work(&topology_work);
342}
343
344static void topology_flush_work(void)
345{
346	flush_work(&topology_work);
347}
348
349static void topology_timer_fn(struct timer_list *unused)
350{
351	if (ptf(PTF_CHECK))
352		topology_schedule_update();
353	set_topology_timer();
354}
355
356static struct timer_list topology_timer;
357
358static atomic_t topology_poll = ATOMIC_INIT(0);
359
360static void set_topology_timer(void)
361{
362	if (atomic_add_unless(&topology_poll, -1, 0))
363		mod_timer(&topology_timer, jiffies + msecs_to_jiffies(100));
364	else
365		mod_timer(&topology_timer, jiffies + msecs_to_jiffies(60 * MSEC_PER_SEC));
366}
367
368void topology_expect_change(void)
369{
370	if (!MACHINE_HAS_TOPOLOGY)
371		return;
372	/* This is racy, but it doesn't matter since it is just a heuristic.
373	 * Worst case is that we poll in a higher frequency for a bit longer.
374	 */
375	if (atomic_read(&topology_poll) > 60)
376		return;
377	atomic_add(60, &topology_poll);
378	set_topology_timer();
379}
380
381static int cpu_management;
382
383static ssize_t dispatching_show(struct device *dev,
384				struct device_attribute *attr,
385				char *buf)
386{
387	ssize_t count;
388
389	mutex_lock(&smp_cpu_state_mutex);
390	count = sprintf(buf, "%d\n", cpu_management);
391	mutex_unlock(&smp_cpu_state_mutex);
392	return count;
393}
394
395static ssize_t dispatching_store(struct device *dev,
396				 struct device_attribute *attr,
397				 const char *buf,
398				 size_t count)
399{
400	int val, rc;
401	char delim;
402
403	if (sscanf(buf, "%d %c", &val, &delim) != 1)
404		return -EINVAL;
405	if (val != 0 && val != 1)
406		return -EINVAL;
407	rc = 0;
408	cpus_read_lock();
409	mutex_lock(&smp_cpu_state_mutex);
410	if (cpu_management == val)
411		goto out;
412	rc = topology_set_cpu_management(val);
413	if (rc)
414		goto out;
415	cpu_management = val;
416	topology_expect_change();
417out:
418	mutex_unlock(&smp_cpu_state_mutex);
419	cpus_read_unlock();
420	return rc ? rc : count;
421}
422static DEVICE_ATTR_RW(dispatching);
423
424static ssize_t cpu_polarization_show(struct device *dev,
425				     struct device_attribute *attr, char *buf)
426{
427	int cpu = dev->id;
428	ssize_t count;
429
430	mutex_lock(&smp_cpu_state_mutex);
431	switch (smp_cpu_get_polarization(cpu)) {
432	case POLARIZATION_HRZ:
433		count = sprintf(buf, "horizontal\n");
434		break;
435	case POLARIZATION_VL:
436		count = sprintf(buf, "vertical:low\n");
437		break;
438	case POLARIZATION_VM:
439		count = sprintf(buf, "vertical:medium\n");
440		break;
441	case POLARIZATION_VH:
442		count = sprintf(buf, "vertical:high\n");
443		break;
444	default:
445		count = sprintf(buf, "unknown\n");
446		break;
447	}
448	mutex_unlock(&smp_cpu_state_mutex);
449	return count;
450}
451static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
452
453static struct attribute *topology_cpu_attrs[] = {
454	&dev_attr_polarization.attr,
455	NULL,
456};
457
458static struct attribute_group topology_cpu_attr_group = {
459	.attrs = topology_cpu_attrs,
460};
461
462static ssize_t cpu_dedicated_show(struct device *dev,
463				  struct device_attribute *attr, char *buf)
464{
465	int cpu = dev->id;
466	ssize_t count;
467
468	mutex_lock(&smp_cpu_state_mutex);
469	count = sprintf(buf, "%d\n", topology_cpu_dedicated(cpu));
470	mutex_unlock(&smp_cpu_state_mutex);
471	return count;
472}
473static DEVICE_ATTR(dedicated, 0444, cpu_dedicated_show, NULL);
474
475static struct attribute *topology_extra_cpu_attrs[] = {
476	&dev_attr_dedicated.attr,
477	NULL,
478};
479
480static struct attribute_group topology_extra_cpu_attr_group = {
481	.attrs = topology_extra_cpu_attrs,
482};
483
484int topology_cpu_init(struct cpu *cpu)
485{
486	int rc;
487
488	rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
489	if (rc || !MACHINE_HAS_TOPOLOGY)
490		return rc;
491	rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group);
492	if (rc)
493		sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group);
494	return rc;
495}
496
497static const struct cpumask *cpu_thread_mask(int cpu)
498{
499	return &cpu_topology[cpu].thread_mask;
500}
501
502
503const struct cpumask *cpu_coregroup_mask(int cpu)
504{
505	return &cpu_topology[cpu].core_mask;
506}
507
508static const struct cpumask *cpu_book_mask(int cpu)
509{
510	return &cpu_topology[cpu].book_mask;
511}
512
513static const struct cpumask *cpu_drawer_mask(int cpu)
514{
515	return &cpu_topology[cpu].drawer_mask;
516}
517
518static struct sched_domain_topology_level s390_topology[] = {
519	{ cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
520	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
521	{ cpu_book_mask, SD_INIT_NAME(BOOK) },
522	{ cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
523	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
524	{ NULL, },
525};
526
527static void __init alloc_masks(struct sysinfo_15_1_x *info,
528			       struct mask_info *mask, int offset)
529{
530	int i, nr_masks;
531
532	nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
533	for (i = 0; i < info->mnest - offset; i++)
534		nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
535	nr_masks = max(nr_masks, 1);
536	for (i = 0; i < nr_masks; i++) {
537		mask->next = memblock_alloc(sizeof(*mask->next), 8);
538		if (!mask->next)
539			panic("%s: Failed to allocate %zu bytes align=0x%x\n",
540			      __func__, sizeof(*mask->next), 8);
541		mask = mask->next;
542	}
543}
544
545void __init topology_init_early(void)
546{
547	struct sysinfo_15_1_x *info;
548
549	set_sched_topology(s390_topology);
550	if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) {
551		if (MACHINE_HAS_TOPOLOGY)
552			topology_mode = TOPOLOGY_MODE_HW;
553		else
554			topology_mode = TOPOLOGY_MODE_SINGLE;
555	}
556	if (!MACHINE_HAS_TOPOLOGY)
557		goto out;
558	tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
559	if (!tl_info)
560		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
561		      __func__, PAGE_SIZE, PAGE_SIZE);
562	info = tl_info;
563	store_topology(info);
564	pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
565		info->mag[0], info->mag[1], info->mag[2], info->mag[3],
566		info->mag[4], info->mag[5], info->mnest);
567	alloc_masks(info, &socket_info, 1);
568	alloc_masks(info, &book_info, 2);
569	alloc_masks(info, &drawer_info, 3);
570out:
571	cpumask_set_cpu(0, &cpu_setup_mask);
572	__arch_update_cpu_topology();
573	__arch_update_dedicated_flag(NULL);
574}
575
576static inline int topology_get_mode(int enabled)
577{
578	if (!enabled)
579		return TOPOLOGY_MODE_SINGLE;
580	return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE;
581}
582
583static inline int topology_is_enabled(void)
584{
585	return topology_mode != TOPOLOGY_MODE_SINGLE;
586}
587
588static int __init topology_setup(char *str)
589{
590	bool enabled;
591	int rc;
592
593	rc = kstrtobool(str, &enabled);
594	if (rc)
595		return rc;
596	topology_mode = topology_get_mode(enabled);
597	return 0;
598}
599early_param("topology", topology_setup);
600
601static int topology_ctl_handler(struct ctl_table *ctl, int write,
602				void *buffer, size_t *lenp, loff_t *ppos)
603{
604	int enabled = topology_is_enabled();
605	int new_mode;
606	int rc;
607	struct ctl_table ctl_entry = {
608		.procname	= ctl->procname,
609		.data		= &enabled,
610		.maxlen		= sizeof(int),
611		.extra1		= SYSCTL_ZERO,
612		.extra2		= SYSCTL_ONE,
613	};
614
615	rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
616	if (rc < 0 || !write)
617		return rc;
618
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
619	mutex_lock(&smp_cpu_state_mutex);
620	new_mode = topology_get_mode(enabled);
621	if (topology_mode != new_mode) {
622		topology_mode = new_mode;
623		topology_schedule_update();
624	}
625	mutex_unlock(&smp_cpu_state_mutex);
626	topology_flush_work();
627
628	return rc;
 
 
629}
630
631static struct ctl_table topology_ctl_table[] = {
632	{
633		.procname	= "topology",
634		.mode		= 0644,
635		.proc_handler	= topology_ctl_handler,
636	},
637	{ },
638};
639
640static struct ctl_table topology_dir_table[] = {
641	{
642		.procname	= "s390",
643		.maxlen		= 0,
644		.mode		= 0555,
645		.child		= topology_ctl_table,
646	},
647	{ },
648};
649
650static int __init topology_init(void)
651{
652	timer_setup(&topology_timer, topology_timer_fn, TIMER_DEFERRABLE);
653	if (MACHINE_HAS_TOPOLOGY)
654		set_topology_timer();
655	else
656		topology_update_polarization_simple();
657	register_sysctl_table(topology_dir_table);
658	return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
659}
660device_initcall(topology_init);
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *    Copyright IBM Corp. 2007, 2011
  4 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  5 */
  6
  7#define KMSG_COMPONENT "cpu"
  8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  9
 10#include <linux/workqueue.h>
 11#include <linux/bootmem.h>
 12#include <linux/uaccess.h>
 13#include <linux/sysctl.h>
 14#include <linux/cpuset.h>
 15#include <linux/device.h>
 16#include <linux/export.h>
 17#include <linux/kernel.h>
 18#include <linux/sched.h>
 19#include <linux/sched/topology.h>
 20#include <linux/delay.h>
 21#include <linux/init.h>
 22#include <linux/slab.h>
 23#include <linux/cpu.h>
 24#include <linux/smp.h>
 25#include <linux/mm.h>
 26#include <linux/nodemask.h>
 27#include <linux/node.h>
 28#include <asm/sysinfo.h>
 29#include <asm/numa.h>
 30
 31#define PTF_HORIZONTAL	(0UL)
 32#define PTF_VERTICAL	(1UL)
 33#define PTF_CHECK	(2UL)
 34
 35enum {
 36	TOPOLOGY_MODE_HW,
 37	TOPOLOGY_MODE_SINGLE,
 38	TOPOLOGY_MODE_PACKAGE,
 39	TOPOLOGY_MODE_UNINITIALIZED
 40};
 41
 42struct mask_info {
 43	struct mask_info *next;
 44	unsigned char id;
 45	cpumask_t mask;
 46};
 47
 48static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED;
 49static void set_topology_timer(void);
 50static void topology_work_fn(struct work_struct *work);
 51static struct sysinfo_15_1_x *tl_info;
 52
 53static DECLARE_WORK(topology_work, topology_work_fn);
 54
 55/*
 56 * Socket/Book linked lists and cpu_topology updates are
 57 * protected by "sched_domains_mutex".
 58 */
 59static struct mask_info socket_info;
 60static struct mask_info book_info;
 61static struct mask_info drawer_info;
 62
 63struct cpu_topology_s390 cpu_topology[NR_CPUS];
 64EXPORT_SYMBOL_GPL(cpu_topology);
 65
 66cpumask_t cpus_with_topology;
 67
 68static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
 69{
 70	cpumask_t mask;
 71
 72	cpumask_copy(&mask, cpumask_of(cpu));
 
 
 
 73	switch (topology_mode) {
 74	case TOPOLOGY_MODE_HW:
 75		while (info) {
 76			if (cpumask_test_cpu(cpu, &info->mask)) {
 77				mask = info->mask;
 78				break;
 79			}
 80			info = info->next;
 81		}
 82		if (cpumask_empty(&mask))
 83			cpumask_copy(&mask, cpumask_of(cpu));
 84		break;
 85	case TOPOLOGY_MODE_PACKAGE:
 86		cpumask_copy(&mask, cpu_present_mask);
 87		break;
 88	default:
 89		/* fallthrough */
 90	case TOPOLOGY_MODE_SINGLE:
 91		cpumask_copy(&mask, cpumask_of(cpu));
 92		break;
 93	}
 94	return mask;
 
 
 95}
 96
 97static cpumask_t cpu_thread_map(unsigned int cpu)
 98{
 99	cpumask_t mask;
100	int i;
101
102	cpumask_copy(&mask, cpumask_of(cpu));
 
 
 
103	if (topology_mode != TOPOLOGY_MODE_HW)
104		return mask;
105	cpu -= cpu % (smp_cpu_mtid + 1);
106	for (i = 0; i <= smp_cpu_mtid; i++)
107		if (cpu_present(cpu + i))
108			cpumask_set_cpu(cpu + i, &mask);
109	return mask;
 
 
110}
111
112#define TOPOLOGY_CORE_BITS	64
113
114static void add_cpus_to_mask(struct topology_core *tl_core,
115			     struct mask_info *drawer,
116			     struct mask_info *book,
117			     struct mask_info *socket)
118{
119	struct cpu_topology_s390 *topo;
120	unsigned int core;
121
122	for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) {
123		unsigned int rcore;
124		int lcpu, i;
125
126		rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
127		lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
128		if (lcpu < 0)
129			continue;
130		for (i = 0; i <= smp_cpu_mtid; i++) {
131			topo = &cpu_topology[lcpu + i];
132			topo->drawer_id = drawer->id;
133			topo->book_id = book->id;
134			topo->socket_id = socket->id;
135			topo->core_id = rcore;
136			topo->thread_id = lcpu + i;
137			topo->dedicated = tl_core->d;
138			cpumask_set_cpu(lcpu + i, &drawer->mask);
139			cpumask_set_cpu(lcpu + i, &book->mask);
140			cpumask_set_cpu(lcpu + i, &socket->mask);
141			cpumask_set_cpu(lcpu + i, &cpus_with_topology);
142			smp_cpu_set_polarization(lcpu + i, tl_core->pp);
143		}
144	}
145}
146
147static void clear_masks(void)
148{
149	struct mask_info *info;
150
151	info = &socket_info;
152	while (info) {
153		cpumask_clear(&info->mask);
154		info = info->next;
155	}
156	info = &book_info;
157	while (info) {
158		cpumask_clear(&info->mask);
159		info = info->next;
160	}
161	info = &drawer_info;
162	while (info) {
163		cpumask_clear(&info->mask);
164		info = info->next;
165	}
166}
167
168static union topology_entry *next_tle(union topology_entry *tle)
169{
170	if (!tle->nl)
171		return (union topology_entry *)((struct topology_core *)tle + 1);
172	return (union topology_entry *)((struct topology_container *)tle + 1);
173}
174
175static void tl_to_masks(struct sysinfo_15_1_x *info)
176{
177	struct mask_info *socket = &socket_info;
178	struct mask_info *book = &book_info;
179	struct mask_info *drawer = &drawer_info;
180	union topology_entry *tle, *end;
181
182	clear_masks();
183	tle = info->tle;
184	end = (union topology_entry *)((unsigned long)info + info->length);
185	while (tle < end) {
186		switch (tle->nl) {
187		case 3:
188			drawer = drawer->next;
189			drawer->id = tle->container.id;
190			break;
191		case 2:
192			book = book->next;
193			book->id = tle->container.id;
194			break;
195		case 1:
196			socket = socket->next;
197			socket->id = tle->container.id;
198			break;
199		case 0:
200			add_cpus_to_mask(&tle->cpu, drawer, book, socket);
201			break;
202		default:
203			clear_masks();
204			return;
205		}
206		tle = next_tle(tle);
207	}
208}
209
210static void topology_update_polarization_simple(void)
211{
212	int cpu;
213
214	for_each_possible_cpu(cpu)
215		smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
216}
217
218static int ptf(unsigned long fc)
219{
220	int rc;
221
222	asm volatile(
223		"	.insn	rre,0xb9a20000,%1,%1\n"
224		"	ipm	%0\n"
225		"	srl	%0,28\n"
226		: "=d" (rc)
227		: "d" (fc)  : "cc");
228	return rc;
229}
230
231int topology_set_cpu_management(int fc)
232{
233	int cpu, rc;
234
235	if (!MACHINE_HAS_TOPOLOGY)
236		return -EOPNOTSUPP;
237	if (fc)
238		rc = ptf(PTF_VERTICAL);
239	else
240		rc = ptf(PTF_HORIZONTAL);
241	if (rc)
242		return -EBUSY;
243	for_each_possible_cpu(cpu)
244		smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
245	return rc;
246}
247
248static void update_cpu_masks(void)
249{
250	struct cpu_topology_s390 *topo;
251	int cpu, id;
252
253	for_each_possible_cpu(cpu) {
254		topo = &cpu_topology[cpu];
255		topo->thread_mask = cpu_thread_map(cpu);
256		topo->core_mask = cpu_group_map(&socket_info, cpu);
257		topo->book_mask = cpu_group_map(&book_info, cpu);
258		topo->drawer_mask = cpu_group_map(&drawer_info, cpu);
 
259		if (topology_mode != TOPOLOGY_MODE_HW) {
260			id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
261			topo->thread_id = cpu;
262			topo->core_id = cpu;
263			topo->socket_id = id;
264			topo->book_id = id;
265			topo->drawer_id = id;
266			if (cpu_present(cpu))
267				cpumask_set_cpu(cpu, &cpus_with_topology);
268		}
269	}
270	numa_update_cpu_topology();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271}
272
273void store_topology(struct sysinfo_15_1_x *info)
274{
275	stsi(info, 15, 1, topology_mnest_limit());
276}
277
278static void __arch_update_dedicated_flag(void *arg)
279{
280	if (topology_cpu_dedicated(smp_processor_id()))
281		set_cpu_flag(CIF_DEDICATED_CPU);
282	else
283		clear_cpu_flag(CIF_DEDICATED_CPU);
284}
285
286static int __arch_update_cpu_topology(void)
287{
288	struct sysinfo_15_1_x *info = tl_info;
289	int rc = 0;
290
291	mutex_lock(&smp_cpu_state_mutex);
292	cpumask_clear(&cpus_with_topology);
293	if (MACHINE_HAS_TOPOLOGY) {
294		rc = 1;
295		store_topology(info);
296		tl_to_masks(info);
297	}
298	update_cpu_masks();
299	if (!MACHINE_HAS_TOPOLOGY)
300		topology_update_polarization_simple();
301	mutex_unlock(&smp_cpu_state_mutex);
302	return rc;
303}
304
305int arch_update_cpu_topology(void)
306{
307	struct device *dev;
308	int cpu, rc;
309
310	rc = __arch_update_cpu_topology();
311	on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
312	for_each_online_cpu(cpu) {
313		dev = get_cpu_device(cpu);
314		kobject_uevent(&dev->kobj, KOBJ_CHANGE);
 
315	}
316	return rc;
317}
318
319static void topology_work_fn(struct work_struct *work)
320{
321	rebuild_sched_domains();
322}
323
324void topology_schedule_update(void)
325{
326	schedule_work(&topology_work);
327}
328
329static void topology_flush_work(void)
330{
331	flush_work(&topology_work);
332}
333
334static void topology_timer_fn(struct timer_list *unused)
335{
336	if (ptf(PTF_CHECK))
337		topology_schedule_update();
338	set_topology_timer();
339}
340
341static struct timer_list topology_timer;
342
343static atomic_t topology_poll = ATOMIC_INIT(0);
344
345static void set_topology_timer(void)
346{
347	if (atomic_add_unless(&topology_poll, -1, 0))
348		mod_timer(&topology_timer, jiffies + HZ / 10);
349	else
350		mod_timer(&topology_timer, jiffies + HZ * 60);
351}
352
353void topology_expect_change(void)
354{
355	if (!MACHINE_HAS_TOPOLOGY)
356		return;
357	/* This is racy, but it doesn't matter since it is just a heuristic.
358	 * Worst case is that we poll in a higher frequency for a bit longer.
359	 */
360	if (atomic_read(&topology_poll) > 60)
361		return;
362	atomic_add(60, &topology_poll);
363	set_topology_timer();
364}
365
366static int cpu_management;
367
368static ssize_t dispatching_show(struct device *dev,
369				struct device_attribute *attr,
370				char *buf)
371{
372	ssize_t count;
373
374	mutex_lock(&smp_cpu_state_mutex);
375	count = sprintf(buf, "%d\n", cpu_management);
376	mutex_unlock(&smp_cpu_state_mutex);
377	return count;
378}
379
380static ssize_t dispatching_store(struct device *dev,
381				 struct device_attribute *attr,
382				 const char *buf,
383				 size_t count)
384{
385	int val, rc;
386	char delim;
387
388	if (sscanf(buf, "%d %c", &val, &delim) != 1)
389		return -EINVAL;
390	if (val != 0 && val != 1)
391		return -EINVAL;
392	rc = 0;
393	get_online_cpus();
394	mutex_lock(&smp_cpu_state_mutex);
395	if (cpu_management == val)
396		goto out;
397	rc = topology_set_cpu_management(val);
398	if (rc)
399		goto out;
400	cpu_management = val;
401	topology_expect_change();
402out:
403	mutex_unlock(&smp_cpu_state_mutex);
404	put_online_cpus();
405	return rc ? rc : count;
406}
407static DEVICE_ATTR_RW(dispatching);
408
409static ssize_t cpu_polarization_show(struct device *dev,
410				     struct device_attribute *attr, char *buf)
411{
412	int cpu = dev->id;
413	ssize_t count;
414
415	mutex_lock(&smp_cpu_state_mutex);
416	switch (smp_cpu_get_polarization(cpu)) {
417	case POLARIZATION_HRZ:
418		count = sprintf(buf, "horizontal\n");
419		break;
420	case POLARIZATION_VL:
421		count = sprintf(buf, "vertical:low\n");
422		break;
423	case POLARIZATION_VM:
424		count = sprintf(buf, "vertical:medium\n");
425		break;
426	case POLARIZATION_VH:
427		count = sprintf(buf, "vertical:high\n");
428		break;
429	default:
430		count = sprintf(buf, "unknown\n");
431		break;
432	}
433	mutex_unlock(&smp_cpu_state_mutex);
434	return count;
435}
436static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
437
438static struct attribute *topology_cpu_attrs[] = {
439	&dev_attr_polarization.attr,
440	NULL,
441};
442
443static struct attribute_group topology_cpu_attr_group = {
444	.attrs = topology_cpu_attrs,
445};
446
447static ssize_t cpu_dedicated_show(struct device *dev,
448				  struct device_attribute *attr, char *buf)
449{
450	int cpu = dev->id;
451	ssize_t count;
452
453	mutex_lock(&smp_cpu_state_mutex);
454	count = sprintf(buf, "%d\n", topology_cpu_dedicated(cpu));
455	mutex_unlock(&smp_cpu_state_mutex);
456	return count;
457}
458static DEVICE_ATTR(dedicated, 0444, cpu_dedicated_show, NULL);
459
460static struct attribute *topology_extra_cpu_attrs[] = {
461	&dev_attr_dedicated.attr,
462	NULL,
463};
464
465static struct attribute_group topology_extra_cpu_attr_group = {
466	.attrs = topology_extra_cpu_attrs,
467};
468
469int topology_cpu_init(struct cpu *cpu)
470{
471	int rc;
472
473	rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
474	if (rc || !MACHINE_HAS_TOPOLOGY)
475		return rc;
476	rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group);
477	if (rc)
478		sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group);
479	return rc;
480}
481
482static const struct cpumask *cpu_thread_mask(int cpu)
483{
484	return &cpu_topology[cpu].thread_mask;
485}
486
487
488const struct cpumask *cpu_coregroup_mask(int cpu)
489{
490	return &cpu_topology[cpu].core_mask;
491}
492
493static const struct cpumask *cpu_book_mask(int cpu)
494{
495	return &cpu_topology[cpu].book_mask;
496}
497
498static const struct cpumask *cpu_drawer_mask(int cpu)
499{
500	return &cpu_topology[cpu].drawer_mask;
501}
502
503static struct sched_domain_topology_level s390_topology[] = {
504	{ cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
505	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
506	{ cpu_book_mask, SD_INIT_NAME(BOOK) },
507	{ cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
508	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
509	{ NULL, },
510};
511
512static void __init alloc_masks(struct sysinfo_15_1_x *info,
513			       struct mask_info *mask, int offset)
514{
515	int i, nr_masks;
516
517	nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
518	for (i = 0; i < info->mnest - offset; i++)
519		nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
520	nr_masks = max(nr_masks, 1);
521	for (i = 0; i < nr_masks; i++) {
522		mask->next = memblock_virt_alloc(sizeof(*mask->next), 8);
 
 
 
523		mask = mask->next;
524	}
525}
526
527void __init topology_init_early(void)
528{
529	struct sysinfo_15_1_x *info;
530
531	set_sched_topology(s390_topology);
532	if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) {
533		if (MACHINE_HAS_TOPOLOGY)
534			topology_mode = TOPOLOGY_MODE_HW;
535		else
536			topology_mode = TOPOLOGY_MODE_SINGLE;
537	}
538	if (!MACHINE_HAS_TOPOLOGY)
539		goto out;
540	tl_info = memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE);
 
 
 
541	info = tl_info;
542	store_topology(info);
543	pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
544		info->mag[0], info->mag[1], info->mag[2], info->mag[3],
545		info->mag[4], info->mag[5], info->mnest);
546	alloc_masks(info, &socket_info, 1);
547	alloc_masks(info, &book_info, 2);
548	alloc_masks(info, &drawer_info, 3);
549out:
 
550	__arch_update_cpu_topology();
551	__arch_update_dedicated_flag(NULL);
552}
553
554static inline int topology_get_mode(int enabled)
555{
556	if (!enabled)
557		return TOPOLOGY_MODE_SINGLE;
558	return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE;
559}
560
561static inline int topology_is_enabled(void)
562{
563	return topology_mode != TOPOLOGY_MODE_SINGLE;
564}
565
566static int __init topology_setup(char *str)
567{
568	bool enabled;
569	int rc;
570
571	rc = kstrtobool(str, &enabled);
572	if (rc)
573		return rc;
574	topology_mode = topology_get_mode(enabled);
575	return 0;
576}
577early_param("topology", topology_setup);
578
579static int topology_ctl_handler(struct ctl_table *ctl, int write,
580				void __user *buffer, size_t *lenp, loff_t *ppos)
581{
582	unsigned int len;
583	int new_mode;
584	char buf[2];
 
 
 
 
 
 
 
 
 
 
 
585
586	if (!*lenp || *ppos) {
587		*lenp = 0;
588		return 0;
589	}
590	if (!write) {
591		strncpy(buf, topology_is_enabled() ? "1\n" : "0\n",
592			ARRAY_SIZE(buf));
593		len = strnlen(buf, ARRAY_SIZE(buf));
594		if (len > *lenp)
595			len = *lenp;
596		if (copy_to_user(buffer, buf, len))
597			return -EFAULT;
598		goto out;
599	}
600	len = *lenp;
601	if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
602		return -EFAULT;
603	if (buf[0] != '0' && buf[0] != '1')
604		return -EINVAL;
605	mutex_lock(&smp_cpu_state_mutex);
606	new_mode = topology_get_mode(buf[0] == '1');
607	if (topology_mode != new_mode) {
608		topology_mode = new_mode;
609		topology_schedule_update();
610	}
611	mutex_unlock(&smp_cpu_state_mutex);
612	topology_flush_work();
613out:
614	*lenp = len;
615	*ppos += len;
616	return 0;
617}
618
619static struct ctl_table topology_ctl_table[] = {
620	{
621		.procname	= "topology",
622		.mode		= 0644,
623		.proc_handler	= topology_ctl_handler,
624	},
625	{ },
626};
627
628static struct ctl_table topology_dir_table[] = {
629	{
630		.procname	= "s390",
631		.maxlen		= 0,
632		.mode		= 0555,
633		.child		= topology_ctl_table,
634	},
635	{ },
636};
637
638static int __init topology_init(void)
639{
640	timer_setup(&topology_timer, topology_timer_fn, TIMER_DEFERRABLE);
641	if (MACHINE_HAS_TOPOLOGY)
642		set_topology_timer();
643	else
644		topology_update_polarization_simple();
645	register_sysctl_table(topology_dir_table);
646	return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
647}
648device_initcall(topology_init);