Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *    Copyright IBM Corp. 2007, 2011
 
  4 */
  5
  6#define KMSG_COMPONENT "cpu"
  7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  8
  9#include <linux/workqueue.h>
 10#include <linux/memblock.h>
 11#include <linux/uaccess.h>
 12#include <linux/sysctl.h>
 13#include <linux/cpuset.h>
 14#include <linux/device.h>
 15#include <linux/export.h>
 16#include <linux/kernel.h>
 17#include <linux/sched.h>
 18#include <linux/sched/topology.h>
 19#include <linux/delay.h>
 20#include <linux/init.h>
 21#include <linux/slab.h>
 22#include <linux/cpu.h>
 23#include <linux/smp.h>
 24#include <linux/mm.h>
 25#include <linux/nodemask.h>
 26#include <linux/node.h>
 27#include <asm/hiperdispatch.h>
 28#include <asm/sysinfo.h>
 29#include <asm/asm.h>
 30
 31#define PTF_HORIZONTAL	(0UL)
 32#define PTF_VERTICAL	(1UL)
 33#define PTF_CHECK	(2UL)
 34
 35enum {
 36	TOPOLOGY_MODE_HW,
 37	TOPOLOGY_MODE_SINGLE,
 38	TOPOLOGY_MODE_PACKAGE,
 39	TOPOLOGY_MODE_UNINITIALIZED
 40};
 41
 42struct mask_info {
 43	struct mask_info *next;
 44	unsigned char id;
 45	cpumask_t mask;
 46};
 47
 48static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED;
 49static void set_topology_timer(void);
 50static void topology_work_fn(struct work_struct *work);
 51static struct sysinfo_15_1_x *tl_info;
 52static int cpu_management;
 53
 
 54static DECLARE_WORK(topology_work, topology_work_fn);
 55
 56/*
 57 * Socket/Book linked lists and cpu_topology updates are
 58 * protected by "sched_domains_mutex".
 59 */
 60static struct mask_info socket_info;
 61static struct mask_info book_info;
 62static struct mask_info drawer_info;
 63
 64struct cpu_topology_s390 cpu_topology[NR_CPUS];
 65EXPORT_SYMBOL_GPL(cpu_topology);
 66
 67static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int cpu)
 68{
 69	static cpumask_t mask;
 70
 71	cpumask_clear(&mask);
 72	if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
 73		goto out;
 74	cpumask_set_cpu(cpu, &mask);
 75	switch (topology_mode) {
 76	case TOPOLOGY_MODE_HW:
 77		while (info) {
 78			if (cpumask_test_cpu(cpu, &info->mask)) {
 79				cpumask_copy(&mask, &info->mask);
 80				break;
 81			}
 82			info = info->next;
 83		}
 84		break;
 85	case TOPOLOGY_MODE_PACKAGE:
 86		cpumask_copy(&mask, cpu_present_mask);
 87		break;
 88	default:
 89		fallthrough;
 90	case TOPOLOGY_MODE_SINGLE:
 91		break;
 92	}
 93	cpumask_and(&mask, &mask, &cpu_setup_mask);
 94out:
 95	cpumask_copy(dst, &mask);
 96}
 97
 98static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
 99{
100	static cpumask_t mask;
101	unsigned int max_cpu;
102
103	cpumask_clear(&mask);
104	if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
105		goto out;
106	cpumask_set_cpu(cpu, &mask);
107	if (topology_mode != TOPOLOGY_MODE_HW)
108		goto out;
109	cpu -= cpu % (smp_cpu_mtid + 1);
110	max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1);
111	for (; cpu <= max_cpu; cpu++) {
112		if (cpumask_test_cpu(cpu, &cpu_setup_mask))
113			cpumask_set_cpu(cpu, &mask);
114	}
115out:
116	cpumask_copy(dst, &mask);
117}
118
119#define TOPOLOGY_CORE_BITS	64
120
121static void add_cpus_to_mask(struct topology_core *tl_core,
122			     struct mask_info *drawer,
123			     struct mask_info *book,
124			     struct mask_info *socket)
125{
126	struct cpu_topology_s390 *topo;
127	unsigned int core;
128
129	for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) {
130		unsigned int max_cpu, rcore;
131		int cpu;
132
133		rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
134		cpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
135		if (cpu < 0)
136			continue;
137		max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1);
138		for (; cpu <= max_cpu; cpu++) {
139			topo = &cpu_topology[cpu];
140			topo->drawer_id = drawer->id;
141			topo->book_id = book->id;
142			topo->socket_id = socket->id;
143			topo->core_id = rcore;
144			topo->thread_id = cpu;
145			topo->dedicated = tl_core->d;
146			cpumask_set_cpu(cpu, &drawer->mask);
147			cpumask_set_cpu(cpu, &book->mask);
148			cpumask_set_cpu(cpu, &socket->mask);
149			smp_cpu_set_polarization(cpu, tl_core->pp);
150			smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH);
 
151		}
 
 
152	}
 
153}
154
155static void clear_masks(void)
156{
157	struct mask_info *info;
158
159	info = &socket_info;
160	while (info) {
161		cpumask_clear(&info->mask);
162		info = info->next;
163	}
164	info = &book_info;
165	while (info) {
166		cpumask_clear(&info->mask);
167		info = info->next;
168	}
169	info = &drawer_info;
170	while (info) {
171		cpumask_clear(&info->mask);
172		info = info->next;
173	}
174}
175
176static union topology_entry *next_tle(union topology_entry *tle)
177{
178	if (!tle->nl)
179		return (union topology_entry *)((struct topology_core *)tle + 1);
180	return (union topology_entry *)((struct topology_container *)tle + 1);
181}
182
183static void tl_to_masks(struct sysinfo_15_1_x *info)
184{
185	struct mask_info *socket = &socket_info;
186	struct mask_info *book = &book_info;
187	struct mask_info *drawer = &drawer_info;
188	union topology_entry *tle, *end;
189
190	clear_masks();
191	tle = info->tle;
192	end = (union topology_entry *)((unsigned long)info + info->length);
193	while (tle < end) {
194		switch (tle->nl) {
195		case 3:
196			drawer = drawer->next;
197			drawer->id = tle->container.id;
198			break;
199		case 2:
200			book = book->next;
201			book->id = tle->container.id;
202			break;
203		case 1:
204			socket = socket->next;
205			socket->id = tle->container.id;
206			break;
207		case 0:
208			add_cpus_to_mask(&tle->cpu, drawer, book, socket);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209			break;
210		default:
211			clear_masks();
212			return;
213		}
214		tle = next_tle(tle);
215	}
216}
217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218static void topology_update_polarization_simple(void)
219{
220	int cpu;
221
 
222	for_each_possible_cpu(cpu)
223		smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
 
224}
225
226static int ptf(unsigned long fc)
227{
228	int cc;
229
230	asm volatile(
231		"	.insn	rre,0xb9a20000,%[fc],%[fc]\n"
232		CC_IPM(cc)
233		: CC_OUT(cc, cc)
234		: [fc] "d" (fc)
235		: CC_CLOBBER);
236	return CC_TRANSFORM(cc);
237}
238
239int topology_set_cpu_management(int fc)
240{
241	int cpu, rc;
242
243	if (!MACHINE_HAS_TOPOLOGY)
244		return -EOPNOTSUPP;
245	if (fc)
246		rc = ptf(PTF_VERTICAL);
247	else
248		rc = ptf(PTF_HORIZONTAL);
249	if (rc)
250		return -EBUSY;
251	for_each_possible_cpu(cpu)
252		smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
253	return rc;
254}
255
256void update_cpu_masks(void)
257{
258	struct cpu_topology_s390 *topo, *topo_package, *topo_sibling;
259	int cpu, sibling, pkg_first, smt_first, id;
260
261	for_each_possible_cpu(cpu) {
262		topo = &cpu_topology[cpu];
263		cpu_thread_map(&topo->thread_mask, cpu);
264		cpu_group_map(&topo->core_mask, &socket_info, cpu);
265		cpu_group_map(&topo->book_mask, &book_info, cpu);
266		cpu_group_map(&topo->drawer_mask, &drawer_info, cpu);
267		topo->booted_cores = 0;
268		if (topology_mode != TOPOLOGY_MODE_HW) {
269			id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
270			topo->thread_id = cpu;
271			topo->core_id = cpu;
272			topo->socket_id = id;
273			topo->book_id = id;
274			topo->drawer_id = id;
275		}
276	}
277	hd_reset_state();
278	for_each_online_cpu(cpu) {
279		topo = &cpu_topology[cpu];
280		pkg_first = cpumask_first(&topo->core_mask);
281		topo_package = &cpu_topology[pkg_first];
282		if (cpu == pkg_first) {
283			for_each_cpu(sibling, &topo->core_mask) {
284				topo_sibling = &cpu_topology[sibling];
285				smt_first = cpumask_first(&topo_sibling->thread_mask);
286				if (sibling == smt_first) {
287					topo_package->booted_cores++;
288					hd_add_core(sibling);
289				}
290			}
291		} else {
292			topo->booted_cores = topo_package->booted_cores;
293		}
294	}
 
295}
296
297void store_topology(struct sysinfo_15_1_x *info)
298{
299	stsi(info, 15, 1, topology_mnest_limit());
300}
301
302static void __arch_update_dedicated_flag(void *arg)
303{
304	if (topology_cpu_dedicated(smp_processor_id()))
305		set_cpu_flag(CIF_DEDICATED_CPU);
306	else
307		clear_cpu_flag(CIF_DEDICATED_CPU);
308}
309
310static int __arch_update_cpu_topology(void)
311{
312	struct sysinfo_15_1_x *info = tl_info;
313	int rc, hd_status;
 
314
315	hd_status = 0;
316	rc = 0;
317	mutex_lock(&smp_cpu_state_mutex);
318	if (MACHINE_HAS_TOPOLOGY) {
319		rc = 1;
320		store_topology(info);
321		tl_to_masks(info);
322	}
323	update_cpu_masks();
324	if (!MACHINE_HAS_TOPOLOGY)
325		topology_update_polarization_simple();
326	if (cpu_management == 1)
327		hd_status = hd_enable_hiperdispatch();
328	mutex_unlock(&smp_cpu_state_mutex);
329	if (hd_status == 0)
330		hd_disable_hiperdispatch();
331	return rc;
332}
333
334int arch_update_cpu_topology(void)
335{
336	int rc;
337
338	rc = __arch_update_cpu_topology();
339	on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
340	return rc;
341}
342
343static void topology_work_fn(struct work_struct *work)
344{
345	rebuild_sched_domains();
346}
347
348void topology_schedule_update(void)
349{
350	schedule_work(&topology_work);
351}
352
353static void topology_flush_work(void)
354{
355	flush_work(&topology_work);
356}
357
358static void topology_timer_fn(struct timer_list *unused)
359{
360	if (ptf(PTF_CHECK))
361		topology_schedule_update();
362	set_topology_timer();
363}
364
365static struct timer_list topology_timer;
 
366
367static atomic_t topology_poll = ATOMIC_INIT(0);
368
369static void set_topology_timer(void)
370{
371	if (atomic_add_unless(&topology_poll, -1, 0))
372		mod_timer(&topology_timer, jiffies + msecs_to_jiffies(100));
373	else
374		mod_timer(&topology_timer, jiffies + msecs_to_jiffies(60 * MSEC_PER_SEC));
375}
376
377void topology_expect_change(void)
378{
379	if (!MACHINE_HAS_TOPOLOGY)
380		return;
381	/* This is racy, but it doesn't matter since it is just a heuristic.
382	 * Worst case is that we poll in a higher frequency for a bit longer.
383	 */
384	if (atomic_read(&topology_poll) > 60)
385		return;
386	atomic_add(60, &topology_poll);
387	set_topology_timer();
388}
389
390static int set_polarization(int polarization)
391{
392	int rc = 0;
393
394	cpus_read_lock();
395	mutex_lock(&smp_cpu_state_mutex);
396	if (cpu_management == polarization)
397		goto out;
398	rc = topology_set_cpu_management(polarization);
399	if (rc)
400		goto out;
401	cpu_management = polarization;
402	topology_expect_change();
403out:
404	mutex_unlock(&smp_cpu_state_mutex);
405	cpus_read_unlock();
406	return rc;
407}
408
409static ssize_t dispatching_show(struct device *dev,
410				struct device_attribute *attr,
411				char *buf)
412{
413	ssize_t count;
414
415	mutex_lock(&smp_cpu_state_mutex);
416	count = sysfs_emit(buf, "%d\n", cpu_management);
417	mutex_unlock(&smp_cpu_state_mutex);
418	return count;
419}
420
421static ssize_t dispatching_store(struct device *dev,
422				 struct device_attribute *attr,
423				 const char *buf,
424				 size_t count)
425{
426	int val, rc;
427	char delim;
428
429	if (sscanf(buf, "%d %c", &val, &delim) != 1)
430		return -EINVAL;
431	if (val != 0 && val != 1)
432		return -EINVAL;
433	rc = set_polarization(val);
 
 
 
 
 
 
 
 
 
 
 
 
434	return rc ? rc : count;
435}
436static DEVICE_ATTR_RW(dispatching);
 
437
438static ssize_t cpu_polarization_show(struct device *dev,
439				     struct device_attribute *attr, char *buf)
440{
441	int cpu = dev->id;
442	ssize_t count;
443
444	mutex_lock(&smp_cpu_state_mutex);
445	switch (smp_cpu_get_polarization(cpu)) {
446	case POLARIZATION_HRZ:
447		count = sysfs_emit(buf, "horizontal\n");
448		break;
449	case POLARIZATION_VL:
450		count = sysfs_emit(buf, "vertical:low\n");
451		break;
452	case POLARIZATION_VM:
453		count = sysfs_emit(buf, "vertical:medium\n");
454		break;
455	case POLARIZATION_VH:
456		count = sysfs_emit(buf, "vertical:high\n");
457		break;
458	default:
459		count = sysfs_emit(buf, "unknown\n");
460		break;
461	}
462	mutex_unlock(&smp_cpu_state_mutex);
463	return count;
464}
465static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
466
467static struct attribute *topology_cpu_attrs[] = {
468	&dev_attr_polarization.attr,
469	NULL,
470};
471
472static struct attribute_group topology_cpu_attr_group = {
473	.attrs = topology_cpu_attrs,
474};
475
476static ssize_t cpu_dedicated_show(struct device *dev,
477				  struct device_attribute *attr, char *buf)
478{
479	int cpu = dev->id;
480	ssize_t count;
481
482	mutex_lock(&smp_cpu_state_mutex);
483	count = sysfs_emit(buf, "%d\n", topology_cpu_dedicated(cpu));
484	mutex_unlock(&smp_cpu_state_mutex);
485	return count;
486}
487static DEVICE_ATTR(dedicated, 0444, cpu_dedicated_show, NULL);
488
489static struct attribute *topology_extra_cpu_attrs[] = {
490	&dev_attr_dedicated.attr,
491	NULL,
492};
493
494static struct attribute_group topology_extra_cpu_attr_group = {
495	.attrs = topology_extra_cpu_attrs,
496};
497
498int topology_cpu_init(struct cpu *cpu)
499{
500	int rc;
501
502	rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
503	if (rc || !MACHINE_HAS_TOPOLOGY)
504		return rc;
505	rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group);
506	if (rc)
507		sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group);
508	return rc;
509}
510
511static const struct cpumask *cpu_thread_mask(int cpu)
512{
513	return &cpu_topology[cpu].thread_mask;
514}
515
516
517const struct cpumask *cpu_coregroup_mask(int cpu)
518{
519	return &cpu_topology[cpu].core_mask;
520}
521
522static const struct cpumask *cpu_book_mask(int cpu)
523{
524	return &cpu_topology[cpu].book_mask;
525}
526
527static const struct cpumask *cpu_drawer_mask(int cpu)
528{
529	return &cpu_topology[cpu].drawer_mask;
530}
 
531
532static struct sched_domain_topology_level s390_topology[] = {
533	{ cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
534	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
535	{ cpu_book_mask, SD_INIT_NAME(BOOK) },
536	{ cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
537	{ cpu_cpu_mask, SD_INIT_NAME(PKG) },
538	{ NULL, },
539};
540
541static void __init alloc_masks(struct sysinfo_15_1_x *info,
542			       struct mask_info *mask, int offset)
543{
544	int i, nr_masks;
545
546	nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
547	for (i = 0; i < info->mnest - offset; i++)
548		nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
549	nr_masks = max(nr_masks, 1);
550	for (i = 0; i < nr_masks; i++) {
551		mask->next = memblock_alloc(sizeof(*mask->next), 8);
552		if (!mask->next)
553			panic("%s: Failed to allocate %zu bytes align=0x%x\n",
554			      __func__, sizeof(*mask->next), 8);
555		mask = mask->next;
556	}
557}
558
559void __init topology_init_early(void)
560{
561	struct sysinfo_15_1_x *info;
 
562
563	set_sched_topology(s390_topology);
564	if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) {
565		if (MACHINE_HAS_TOPOLOGY)
566			topology_mode = TOPOLOGY_MODE_HW;
567		else
568			topology_mode = TOPOLOGY_MODE_SINGLE;
569	}
570	if (!MACHINE_HAS_TOPOLOGY)
571		goto out;
572	tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
573	if (!tl_info)
574		panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
575		      __func__, PAGE_SIZE, PAGE_SIZE);
576	info = tl_info;
577	store_topology(info);
578	pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
579		info->mag[0], info->mag[1], info->mag[2], info->mag[3],
580		info->mag[4], info->mag[5], info->mnest);
 
581	alloc_masks(info, &socket_info, 1);
582	alloc_masks(info, &book_info, 2);
583	alloc_masks(info, &drawer_info, 3);
584out:
585	cpumask_set_cpu(0, &cpu_setup_mask);
586	__arch_update_cpu_topology();
587	__arch_update_dedicated_flag(NULL);
588}
589
590static inline int topology_get_mode(int enabled)
591{
592	if (!enabled)
593		return TOPOLOGY_MODE_SINGLE;
594	return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE;
595}
596
597static inline int topology_is_enabled(void)
598{
599	return topology_mode != TOPOLOGY_MODE_SINGLE;
600}
601
602static int __init topology_setup(char *str)
603{
604	bool enabled;
605	int rc;
606
607	rc = kstrtobool(str, &enabled);
608	if (rc)
609		return rc;
610	topology_mode = topology_get_mode(enabled);
611	return 0;
612}
613early_param("topology", topology_setup);
614
615static int topology_ctl_handler(const struct ctl_table *ctl, int write,
616				void *buffer, size_t *lenp, loff_t *ppos)
617{
618	int enabled = topology_is_enabled();
619	int new_mode;
620	int rc;
621	struct ctl_table ctl_entry = {
622		.procname	= ctl->procname,
623		.data		= &enabled,
624		.maxlen		= sizeof(int),
625		.extra1		= SYSCTL_ZERO,
626		.extra2		= SYSCTL_ONE,
627	};
628
629	rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
630	if (rc < 0 || !write)
631		return rc;
632
633	mutex_lock(&smp_cpu_state_mutex);
634	new_mode = topology_get_mode(enabled);
635	if (topology_mode != new_mode) {
636		topology_mode = new_mode;
637		topology_schedule_update();
638	}
639	mutex_unlock(&smp_cpu_state_mutex);
640	topology_flush_work();
641
642	return rc;
643}
644
645static int polarization_ctl_handler(const struct ctl_table *ctl, int write,
646				    void *buffer, size_t *lenp, loff_t *ppos)
647{
648	int polarization;
649	int rc;
650	struct ctl_table ctl_entry = {
651		.procname	= ctl->procname,
652		.data		= &polarization,
653		.maxlen		= sizeof(int),
654		.extra1		= SYSCTL_ZERO,
655		.extra2		= SYSCTL_ONE,
656	};
657
658	polarization = cpu_management;
659	rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
660	if (rc < 0 || !write)
661		return rc;
662	return set_polarization(polarization);
663}
664
665static struct ctl_table topology_ctl_table[] = {
666	{
667		.procname	= "topology",
668		.mode		= 0644,
669		.proc_handler	= topology_ctl_handler,
670	},
671	{
672		.procname	= "polarization",
673		.mode		= 0644,
674		.proc_handler	= polarization_ctl_handler,
675	},
676};
677
678static int __init topology_init(void)
679{
680	struct device *dev_root;
681	int rc = 0;
682
683	timer_setup(&topology_timer, topology_timer_fn, TIMER_DEFERRABLE);
684	if (MACHINE_HAS_TOPOLOGY)
685		set_topology_timer();
686	else
687		topology_update_polarization_simple();
688	if (IS_ENABLED(CONFIG_SCHED_TOPOLOGY_VERTICAL))
689		set_polarization(1);
690	register_sysctl("s390", topology_ctl_table);
691
692	dev_root = bus_get_dev_root(&cpu_subsys);
693	if (dev_root) {
694		rc = device_create_file(dev_root, &dev_attr_dispatching);
695		put_device(dev_root);
696	}
697	return rc;
698}
699device_initcall(topology_init);
v4.6
 
  1/*
  2 *    Copyright IBM Corp. 2007, 2011
  3 *    Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  4 */
  5
  6#define KMSG_COMPONENT "cpu"
  7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  8
  9#include <linux/workqueue.h>
 
 
 
 10#include <linux/cpuset.h>
 11#include <linux/device.h>
 12#include <linux/export.h>
 13#include <linux/kernel.h>
 14#include <linux/sched.h>
 
 15#include <linux/delay.h>
 16#include <linux/init.h>
 17#include <linux/slab.h>
 18#include <linux/cpu.h>
 19#include <linux/smp.h>
 20#include <linux/mm.h>
 21#include <linux/nodemask.h>
 22#include <linux/node.h>
 
 23#include <asm/sysinfo.h>
 24#include <asm/numa.h>
 25
 26#define PTF_HORIZONTAL	(0UL)
 27#define PTF_VERTICAL	(1UL)
 28#define PTF_CHECK	(2UL)
 29
 
 
 
 
 
 
 
 30struct mask_info {
 31	struct mask_info *next;
 32	unsigned char id;
 33	cpumask_t mask;
 34};
 35
 
 36static void set_topology_timer(void);
 37static void topology_work_fn(struct work_struct *work);
 38static struct sysinfo_15_1_x *tl_info;
 
 39
 40static bool topology_enabled = true;
 41static DECLARE_WORK(topology_work, topology_work_fn);
 42
 43/*
 44 * Socket/Book linked lists and per_cpu(cpu_topology) updates are
 45 * protected by "sched_domains_mutex".
 46 */
 47static struct mask_info socket_info;
 48static struct mask_info book_info;
 
 49
 50DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
 51EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
 52
 53static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
 54{
 55	cpumask_t mask;
 56
 57	cpumask_copy(&mask, cpumask_of(cpu));
 58	if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
 59		return mask;
 60	for (; info; info = info->next) {
 61		if (cpumask_test_cpu(cpu, &info->mask))
 62			return info->mask;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 63	}
 64	return mask;
 
 
 65}
 66
 67static cpumask_t cpu_thread_map(unsigned int cpu)
 68{
 69	cpumask_t mask;
 70	int i;
 71
 72	cpumask_copy(&mask, cpumask_of(cpu));
 73	if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
 74		return mask;
 
 
 
 75	cpu -= cpu % (smp_cpu_mtid + 1);
 76	for (i = 0; i <= smp_cpu_mtid; i++)
 77		if (cpu_present(cpu + i))
 78			cpumask_set_cpu(cpu + i, &mask);
 79	return mask;
 
 
 
 80}
 81
 82static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
 83					  struct mask_info *book,
 84					  struct mask_info *socket,
 85					  int one_socket_per_cpu)
 
 
 86{
 87	struct cpu_topology_s390 *topo;
 88	unsigned int core;
 89
 90	for_each_set_bit(core, &tl_core->mask[0], TOPOLOGY_CORE_BITS) {
 91		unsigned int rcore;
 92		int lcpu, i;
 93
 94		rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
 95		lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
 96		if (lcpu < 0)
 97			continue;
 98		for (i = 0; i <= smp_cpu_mtid; i++) {
 99			topo = &per_cpu(cpu_topology, lcpu + i);
 
 
100			topo->book_id = book->id;
 
101			topo->core_id = rcore;
102			topo->thread_id = lcpu + i;
103			cpumask_set_cpu(lcpu + i, &book->mask);
104			cpumask_set_cpu(lcpu + i, &socket->mask);
105			if (one_socket_per_cpu)
106				topo->socket_id = rcore;
107			else
108				topo->socket_id = socket->id;
109			smp_cpu_set_polarization(lcpu + i, tl_core->pp);
110		}
111		if (one_socket_per_cpu)
112			socket = socket->next;
113	}
114	return socket;
115}
116
117static void clear_masks(void)
118{
119	struct mask_info *info;
120
121	info = &socket_info;
122	while (info) {
123		cpumask_clear(&info->mask);
124		info = info->next;
125	}
126	info = &book_info;
127	while (info) {
128		cpumask_clear(&info->mask);
129		info = info->next;
130	}
 
 
 
 
 
131}
132
133static union topology_entry *next_tle(union topology_entry *tle)
134{
135	if (!tle->nl)
136		return (union topology_entry *)((struct topology_core *)tle + 1);
137	return (union topology_entry *)((struct topology_container *)tle + 1);
138}
139
140static void __tl_to_masks_generic(struct sysinfo_15_1_x *info)
141{
142	struct mask_info *socket = &socket_info;
143	struct mask_info *book = &book_info;
 
144	union topology_entry *tle, *end;
145
 
146	tle = info->tle;
147	end = (union topology_entry *)((unsigned long)info + info->length);
148	while (tle < end) {
149		switch (tle->nl) {
 
 
 
 
150		case 2:
151			book = book->next;
152			book->id = tle->container.id;
153			break;
154		case 1:
155			socket = socket->next;
156			socket->id = tle->container.id;
157			break;
158		case 0:
159			add_cpus_to_mask(&tle->cpu, book, socket, 0);
160			break;
161		default:
162			clear_masks();
163			return;
164		}
165		tle = next_tle(tle);
166	}
167}
168
169static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
170{
171	struct mask_info *socket = &socket_info;
172	struct mask_info *book = &book_info;
173	union topology_entry *tle, *end;
174
175	tle = info->tle;
176	end = (union topology_entry *)((unsigned long)info + info->length);
177	while (tle < end) {
178		switch (tle->nl) {
179		case 1:
180			book = book->next;
181			book->id = tle->container.id;
182			break;
183		case 0:
184			socket = add_cpus_to_mask(&tle->cpu, book, socket, 1);
185			break;
186		default:
187			clear_masks();
188			return;
189		}
190		tle = next_tle(tle);
191	}
192}
193
194static void tl_to_masks(struct sysinfo_15_1_x *info)
195{
196	struct cpuid cpu_id;
197
198	get_cpu_id(&cpu_id);
199	clear_masks();
200	switch (cpu_id.machine) {
201	case 0x2097:
202	case 0x2098:
203		__tl_to_masks_z10(info);
204		break;
205	default:
206		__tl_to_masks_generic(info);
207	}
208}
209
210static void topology_update_polarization_simple(void)
211{
212	int cpu;
213
214	mutex_lock(&smp_cpu_state_mutex);
215	for_each_possible_cpu(cpu)
216		smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
217	mutex_unlock(&smp_cpu_state_mutex);
218}
219
220static int ptf(unsigned long fc)
221{
222	int rc;
223
224	asm volatile(
225		"	.insn	rre,0xb9a20000,%1,%1\n"
226		"	ipm	%0\n"
227		"	srl	%0,28\n"
228		: "=d" (rc)
229		: "d" (fc)  : "cc");
230	return rc;
231}
232
233int topology_set_cpu_management(int fc)
234{
235	int cpu, rc;
236
237	if (!MACHINE_HAS_TOPOLOGY)
238		return -EOPNOTSUPP;
239	if (fc)
240		rc = ptf(PTF_VERTICAL);
241	else
242		rc = ptf(PTF_HORIZONTAL);
243	if (rc)
244		return -EBUSY;
245	for_each_possible_cpu(cpu)
246		smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
247	return rc;
248}
249
250static void update_cpu_masks(void)
251{
252	struct cpu_topology_s390 *topo;
253	int cpu;
254
255	for_each_possible_cpu(cpu) {
256		topo = &per_cpu(cpu_topology, cpu);
257		topo->thread_mask = cpu_thread_map(cpu);
258		topo->core_mask = cpu_group_map(&socket_info, cpu);
259		topo->book_mask = cpu_group_map(&book_info, cpu);
260		if (!MACHINE_HAS_TOPOLOGY) {
 
 
 
261			topo->thread_id = cpu;
262			topo->core_id = cpu;
263			topo->socket_id = cpu;
264			topo->book_id = cpu;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265		}
266	}
267	numa_update_cpu_topology();
268}
269
270void store_topology(struct sysinfo_15_1_x *info)
271{
272	if (topology_max_mnest >= 3)
273		stsi(info, 15, 1, 3);
 
 
 
 
 
274	else
275		stsi(info, 15, 1, 2);
276}
277
278int arch_update_cpu_topology(void)
279{
280	struct sysinfo_15_1_x *info = tl_info;
281	struct device *dev;
282	int cpu, rc = 0;
283
 
 
 
284	if (MACHINE_HAS_TOPOLOGY) {
285		rc = 1;
286		store_topology(info);
287		tl_to_masks(info);
288	}
289	update_cpu_masks();
290	if (!MACHINE_HAS_TOPOLOGY)
291		topology_update_polarization_simple();
292	for_each_online_cpu(cpu) {
293		dev = get_cpu_device(cpu);
294		kobject_uevent(&dev->kobj, KOBJ_CHANGE);
295	}
 
 
 
 
 
 
 
 
 
 
296	return rc;
297}
298
299static void topology_work_fn(struct work_struct *work)
300{
301	rebuild_sched_domains();
302}
303
304void topology_schedule_update(void)
305{
306	schedule_work(&topology_work);
307}
308
309static void topology_timer_fn(unsigned long ignored)
 
 
 
 
 
310{
311	if (ptf(PTF_CHECK))
312		topology_schedule_update();
313	set_topology_timer();
314}
315
316static struct timer_list topology_timer =
317	TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
318
319static atomic_t topology_poll = ATOMIC_INIT(0);
320
321static void set_topology_timer(void)
322{
323	if (atomic_add_unless(&topology_poll, -1, 0))
324		mod_timer(&topology_timer, jiffies + HZ / 10);
325	else
326		mod_timer(&topology_timer, jiffies + HZ * 60);
327}
328
329void topology_expect_change(void)
330{
331	if (!MACHINE_HAS_TOPOLOGY)
332		return;
333	/* This is racy, but it doesn't matter since it is just a heuristic.
334	 * Worst case is that we poll in a higher frequency for a bit longer.
335	 */
336	if (atomic_read(&topology_poll) > 60)
337		return;
338	atomic_add(60, &topology_poll);
339	set_topology_timer();
340}
341
342static int cpu_management;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
343
344static ssize_t dispatching_show(struct device *dev,
345				struct device_attribute *attr,
346				char *buf)
347{
348	ssize_t count;
349
350	mutex_lock(&smp_cpu_state_mutex);
351	count = sprintf(buf, "%d\n", cpu_management);
352	mutex_unlock(&smp_cpu_state_mutex);
353	return count;
354}
355
356static ssize_t dispatching_store(struct device *dev,
357				 struct device_attribute *attr,
358				 const char *buf,
359				 size_t count)
360{
361	int val, rc;
362	char delim;
363
364	if (sscanf(buf, "%d %c", &val, &delim) != 1)
365		return -EINVAL;
366	if (val != 0 && val != 1)
367		return -EINVAL;
368	rc = 0;
369	get_online_cpus();
370	mutex_lock(&smp_cpu_state_mutex);
371	if (cpu_management == val)
372		goto out;
373	rc = topology_set_cpu_management(val);
374	if (rc)
375		goto out;
376	cpu_management = val;
377	topology_expect_change();
378out:
379	mutex_unlock(&smp_cpu_state_mutex);
380	put_online_cpus();
381	return rc ? rc : count;
382}
383static DEVICE_ATTR(dispatching, 0644, dispatching_show,
384			 dispatching_store);
385
386static ssize_t cpu_polarization_show(struct device *dev,
387				     struct device_attribute *attr, char *buf)
388{
389	int cpu = dev->id;
390	ssize_t count;
391
392	mutex_lock(&smp_cpu_state_mutex);
393	switch (smp_cpu_get_polarization(cpu)) {
394	case POLARIZATION_HRZ:
395		count = sprintf(buf, "horizontal\n");
396		break;
397	case POLARIZATION_VL:
398		count = sprintf(buf, "vertical:low\n");
399		break;
400	case POLARIZATION_VM:
401		count = sprintf(buf, "vertical:medium\n");
402		break;
403	case POLARIZATION_VH:
404		count = sprintf(buf, "vertical:high\n");
405		break;
406	default:
407		count = sprintf(buf, "unknown\n");
408		break;
409	}
410	mutex_unlock(&smp_cpu_state_mutex);
411	return count;
412}
413static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
414
415static struct attribute *topology_cpu_attrs[] = {
416	&dev_attr_polarization.attr,
417	NULL,
418};
419
420static struct attribute_group topology_cpu_attr_group = {
421	.attrs = topology_cpu_attrs,
422};
423
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424int topology_cpu_init(struct cpu *cpu)
425{
426	return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
 
 
 
 
 
 
 
 
427}
428
429static const struct cpumask *cpu_thread_mask(int cpu)
430{
431	return &per_cpu(cpu_topology, cpu).thread_mask;
432}
433
434
435const struct cpumask *cpu_coregroup_mask(int cpu)
436{
437	return &per_cpu(cpu_topology, cpu).core_mask;
438}
439
440static const struct cpumask *cpu_book_mask(int cpu)
441{
442	return &per_cpu(cpu_topology, cpu).book_mask;
443}
444
445static int __init early_parse_topology(char *p)
446{
447	return kstrtobool(p, &topology_enabled);
448}
449early_param("topology", early_parse_topology);
450
451static struct sched_domain_topology_level s390_topology[] = {
452	{ cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
453	{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
454	{ cpu_book_mask, SD_INIT_NAME(BOOK) },
455	{ cpu_cpu_mask, SD_INIT_NAME(DIE) },
 
456	{ NULL, },
457};
458
459static void __init alloc_masks(struct sysinfo_15_1_x *info,
460			       struct mask_info *mask, int offset)
461{
462	int i, nr_masks;
463
464	nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
465	for (i = 0; i < info->mnest - offset; i++)
466		nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
467	nr_masks = max(nr_masks, 1);
468	for (i = 0; i < nr_masks; i++) {
469		mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
 
 
 
470		mask = mask->next;
471	}
472}
473
474static int __init s390_topology_init(void)
475{
476	struct sysinfo_15_1_x *info;
477	int i;
478
 
 
 
 
 
 
 
479	if (!MACHINE_HAS_TOPOLOGY)
480		return 0;
481	tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
 
 
 
482	info = tl_info;
483	store_topology(info);
484	pr_info("The CPU configuration topology of the machine is:");
485	for (i = 0; i < TOPOLOGY_NR_MAG; i++)
486		printk(KERN_CONT " %d", info->mag[i]);
487	printk(KERN_CONT " / %d\n", info->mnest);
488	alloc_masks(info, &socket_info, 1);
489	alloc_masks(info, &book_info, 2);
490	set_sched_topology(s390_topology);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
491	return 0;
492}
493early_initcall(s390_topology_init);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
494
495static int __init topology_init(void)
496{
 
 
 
 
497	if (MACHINE_HAS_TOPOLOGY)
498		set_topology_timer();
499	else
500		topology_update_polarization_simple();
501	return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
 
 
 
 
 
 
 
 
 
502}
503device_initcall(topology_init);