Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2007, 2011
4 */
5
6#define KMSG_COMPONENT "cpu"
7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
8
9#include <linux/workqueue.h>
10#include <linux/memblock.h>
11#include <linux/uaccess.h>
12#include <linux/sysctl.h>
13#include <linux/cpuset.h>
14#include <linux/device.h>
15#include <linux/export.h>
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <linux/sched/topology.h>
19#include <linux/delay.h>
20#include <linux/init.h>
21#include <linux/slab.h>
22#include <linux/cpu.h>
23#include <linux/smp.h>
24#include <linux/mm.h>
25#include <linux/nodemask.h>
26#include <linux/node.h>
27#include <asm/sysinfo.h>
28
29#define PTF_HORIZONTAL (0UL)
30#define PTF_VERTICAL (1UL)
31#define PTF_CHECK (2UL)
32
33enum {
34 TOPOLOGY_MODE_HW,
35 TOPOLOGY_MODE_SINGLE,
36 TOPOLOGY_MODE_PACKAGE,
37 TOPOLOGY_MODE_UNINITIALIZED
38};
39
40struct mask_info {
41 struct mask_info *next;
42 unsigned char id;
43 cpumask_t mask;
44};
45
46static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED;
47static void set_topology_timer(void);
48static void topology_work_fn(struct work_struct *work);
49static struct sysinfo_15_1_x *tl_info;
50
51static DECLARE_WORK(topology_work, topology_work_fn);
52
53/*
54 * Socket/Book linked lists and cpu_topology updates are
55 * protected by "sched_domains_mutex".
56 */
57static struct mask_info socket_info;
58static struct mask_info book_info;
59static struct mask_info drawer_info;
60
61struct cpu_topology_s390 cpu_topology[NR_CPUS];
62EXPORT_SYMBOL_GPL(cpu_topology);
63
64static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int cpu)
65{
66 static cpumask_t mask;
67
68 cpumask_clear(&mask);
69 if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
70 goto out;
71 cpumask_set_cpu(cpu, &mask);
72 switch (topology_mode) {
73 case TOPOLOGY_MODE_HW:
74 while (info) {
75 if (cpumask_test_cpu(cpu, &info->mask)) {
76 cpumask_copy(&mask, &info->mask);
77 break;
78 }
79 info = info->next;
80 }
81 break;
82 case TOPOLOGY_MODE_PACKAGE:
83 cpumask_copy(&mask, cpu_present_mask);
84 break;
85 default:
86 fallthrough;
87 case TOPOLOGY_MODE_SINGLE:
88 break;
89 }
90 cpumask_and(&mask, &mask, &cpu_setup_mask);
91out:
92 cpumask_copy(dst, &mask);
93}
94
95static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
96{
97 static cpumask_t mask;
98 unsigned int max_cpu;
99
100 cpumask_clear(&mask);
101 if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
102 goto out;
103 cpumask_set_cpu(cpu, &mask);
104 if (topology_mode != TOPOLOGY_MODE_HW)
105 goto out;
106 cpu -= cpu % (smp_cpu_mtid + 1);
107 max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1);
108 for (; cpu <= max_cpu; cpu++) {
109 if (cpumask_test_cpu(cpu, &cpu_setup_mask))
110 cpumask_set_cpu(cpu, &mask);
111 }
112out:
113 cpumask_copy(dst, &mask);
114}
115
116#define TOPOLOGY_CORE_BITS 64
117
118static void add_cpus_to_mask(struct topology_core *tl_core,
119 struct mask_info *drawer,
120 struct mask_info *book,
121 struct mask_info *socket)
122{
123 struct cpu_topology_s390 *topo;
124 unsigned int core;
125
126 for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) {
127 unsigned int max_cpu, rcore;
128 int cpu;
129
130 rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
131 cpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
132 if (cpu < 0)
133 continue;
134 max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1);
135 for (; cpu <= max_cpu; cpu++) {
136 topo = &cpu_topology[cpu];
137 topo->drawer_id = drawer->id;
138 topo->book_id = book->id;
139 topo->socket_id = socket->id;
140 topo->core_id = rcore;
141 topo->thread_id = cpu;
142 topo->dedicated = tl_core->d;
143 cpumask_set_cpu(cpu, &drawer->mask);
144 cpumask_set_cpu(cpu, &book->mask);
145 cpumask_set_cpu(cpu, &socket->mask);
146 smp_cpu_set_polarization(cpu, tl_core->pp);
147 }
148 }
149}
150
151static void clear_masks(void)
152{
153 struct mask_info *info;
154
155 info = &socket_info;
156 while (info) {
157 cpumask_clear(&info->mask);
158 info = info->next;
159 }
160 info = &book_info;
161 while (info) {
162 cpumask_clear(&info->mask);
163 info = info->next;
164 }
165 info = &drawer_info;
166 while (info) {
167 cpumask_clear(&info->mask);
168 info = info->next;
169 }
170}
171
172static union topology_entry *next_tle(union topology_entry *tle)
173{
174 if (!tle->nl)
175 return (union topology_entry *)((struct topology_core *)tle + 1);
176 return (union topology_entry *)((struct topology_container *)tle + 1);
177}
178
179static void tl_to_masks(struct sysinfo_15_1_x *info)
180{
181 struct mask_info *socket = &socket_info;
182 struct mask_info *book = &book_info;
183 struct mask_info *drawer = &drawer_info;
184 union topology_entry *tle, *end;
185
186 clear_masks();
187 tle = info->tle;
188 end = (union topology_entry *)((unsigned long)info + info->length);
189 while (tle < end) {
190 switch (tle->nl) {
191 case 3:
192 drawer = drawer->next;
193 drawer->id = tle->container.id;
194 break;
195 case 2:
196 book = book->next;
197 book->id = tle->container.id;
198 break;
199 case 1:
200 socket = socket->next;
201 socket->id = tle->container.id;
202 break;
203 case 0:
204 add_cpus_to_mask(&tle->cpu, drawer, book, socket);
205 break;
206 default:
207 clear_masks();
208 return;
209 }
210 tle = next_tle(tle);
211 }
212}
213
214static void topology_update_polarization_simple(void)
215{
216 int cpu;
217
218 for_each_possible_cpu(cpu)
219 smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
220}
221
222static int ptf(unsigned long fc)
223{
224 int rc;
225
226 asm volatile(
227 " .insn rre,0xb9a20000,%1,%1\n"
228 " ipm %0\n"
229 " srl %0,28\n"
230 : "=d" (rc)
231 : "d" (fc) : "cc");
232 return rc;
233}
234
235int topology_set_cpu_management(int fc)
236{
237 int cpu, rc;
238
239 if (!MACHINE_HAS_TOPOLOGY)
240 return -EOPNOTSUPP;
241 if (fc)
242 rc = ptf(PTF_VERTICAL);
243 else
244 rc = ptf(PTF_HORIZONTAL);
245 if (rc)
246 return -EBUSY;
247 for_each_possible_cpu(cpu)
248 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
249 return rc;
250}
251
252void update_cpu_masks(void)
253{
254 struct cpu_topology_s390 *topo, *topo_package, *topo_sibling;
255 int cpu, sibling, pkg_first, smt_first, id;
256
257 for_each_possible_cpu(cpu) {
258 topo = &cpu_topology[cpu];
259 cpu_thread_map(&topo->thread_mask, cpu);
260 cpu_group_map(&topo->core_mask, &socket_info, cpu);
261 cpu_group_map(&topo->book_mask, &book_info, cpu);
262 cpu_group_map(&topo->drawer_mask, &drawer_info, cpu);
263 topo->booted_cores = 0;
264 if (topology_mode != TOPOLOGY_MODE_HW) {
265 id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
266 topo->thread_id = cpu;
267 topo->core_id = cpu;
268 topo->socket_id = id;
269 topo->book_id = id;
270 topo->drawer_id = id;
271 }
272 }
273 for_each_online_cpu(cpu) {
274 topo = &cpu_topology[cpu];
275 pkg_first = cpumask_first(&topo->core_mask);
276 topo_package = &cpu_topology[pkg_first];
277 if (cpu == pkg_first) {
278 for_each_cpu(sibling, &topo->core_mask) {
279 topo_sibling = &cpu_topology[sibling];
280 smt_first = cpumask_first(&topo_sibling->thread_mask);
281 if (sibling == smt_first)
282 topo_package->booted_cores++;
283 }
284 } else {
285 topo->booted_cores = topo_package->booted_cores;
286 }
287 }
288}
289
290void store_topology(struct sysinfo_15_1_x *info)
291{
292 stsi(info, 15, 1, topology_mnest_limit());
293}
294
295static void __arch_update_dedicated_flag(void *arg)
296{
297 if (topology_cpu_dedicated(smp_processor_id()))
298 set_cpu_flag(CIF_DEDICATED_CPU);
299 else
300 clear_cpu_flag(CIF_DEDICATED_CPU);
301}
302
303static int __arch_update_cpu_topology(void)
304{
305 struct sysinfo_15_1_x *info = tl_info;
306 int rc = 0;
307
308 mutex_lock(&smp_cpu_state_mutex);
309 if (MACHINE_HAS_TOPOLOGY) {
310 rc = 1;
311 store_topology(info);
312 tl_to_masks(info);
313 }
314 update_cpu_masks();
315 if (!MACHINE_HAS_TOPOLOGY)
316 topology_update_polarization_simple();
317 mutex_unlock(&smp_cpu_state_mutex);
318 return rc;
319}
320
321int arch_update_cpu_topology(void)
322{
323 struct device *dev;
324 int cpu, rc;
325
326 rc = __arch_update_cpu_topology();
327 on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
328 for_each_online_cpu(cpu) {
329 dev = get_cpu_device(cpu);
330 if (dev)
331 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
332 }
333 return rc;
334}
335
336static void topology_work_fn(struct work_struct *work)
337{
338 rebuild_sched_domains();
339}
340
341void topology_schedule_update(void)
342{
343 schedule_work(&topology_work);
344}
345
346static void topology_flush_work(void)
347{
348 flush_work(&topology_work);
349}
350
351static void topology_timer_fn(struct timer_list *unused)
352{
353 if (ptf(PTF_CHECK))
354 topology_schedule_update();
355 set_topology_timer();
356}
357
358static struct timer_list topology_timer;
359
360static atomic_t topology_poll = ATOMIC_INIT(0);
361
362static void set_topology_timer(void)
363{
364 if (atomic_add_unless(&topology_poll, -1, 0))
365 mod_timer(&topology_timer, jiffies + msecs_to_jiffies(100));
366 else
367 mod_timer(&topology_timer, jiffies + msecs_to_jiffies(60 * MSEC_PER_SEC));
368}
369
370void topology_expect_change(void)
371{
372 if (!MACHINE_HAS_TOPOLOGY)
373 return;
374 /* This is racy, but it doesn't matter since it is just a heuristic.
375 * Worst case is that we poll in a higher frequency for a bit longer.
376 */
377 if (atomic_read(&topology_poll) > 60)
378 return;
379 atomic_add(60, &topology_poll);
380 set_topology_timer();
381}
382
383static int cpu_management;
384
385static ssize_t dispatching_show(struct device *dev,
386 struct device_attribute *attr,
387 char *buf)
388{
389 ssize_t count;
390
391 mutex_lock(&smp_cpu_state_mutex);
392 count = sprintf(buf, "%d\n", cpu_management);
393 mutex_unlock(&smp_cpu_state_mutex);
394 return count;
395}
396
397static ssize_t dispatching_store(struct device *dev,
398 struct device_attribute *attr,
399 const char *buf,
400 size_t count)
401{
402 int val, rc;
403 char delim;
404
405 if (sscanf(buf, "%d %c", &val, &delim) != 1)
406 return -EINVAL;
407 if (val != 0 && val != 1)
408 return -EINVAL;
409 rc = 0;
410 cpus_read_lock();
411 mutex_lock(&smp_cpu_state_mutex);
412 if (cpu_management == val)
413 goto out;
414 rc = topology_set_cpu_management(val);
415 if (rc)
416 goto out;
417 cpu_management = val;
418 topology_expect_change();
419out:
420 mutex_unlock(&smp_cpu_state_mutex);
421 cpus_read_unlock();
422 return rc ? rc : count;
423}
424static DEVICE_ATTR_RW(dispatching);
425
426static ssize_t cpu_polarization_show(struct device *dev,
427 struct device_attribute *attr, char *buf)
428{
429 int cpu = dev->id;
430 ssize_t count;
431
432 mutex_lock(&smp_cpu_state_mutex);
433 switch (smp_cpu_get_polarization(cpu)) {
434 case POLARIZATION_HRZ:
435 count = sprintf(buf, "horizontal\n");
436 break;
437 case POLARIZATION_VL:
438 count = sprintf(buf, "vertical:low\n");
439 break;
440 case POLARIZATION_VM:
441 count = sprintf(buf, "vertical:medium\n");
442 break;
443 case POLARIZATION_VH:
444 count = sprintf(buf, "vertical:high\n");
445 break;
446 default:
447 count = sprintf(buf, "unknown\n");
448 break;
449 }
450 mutex_unlock(&smp_cpu_state_mutex);
451 return count;
452}
453static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
454
455static struct attribute *topology_cpu_attrs[] = {
456 &dev_attr_polarization.attr,
457 NULL,
458};
459
460static struct attribute_group topology_cpu_attr_group = {
461 .attrs = topology_cpu_attrs,
462};
463
464static ssize_t cpu_dedicated_show(struct device *dev,
465 struct device_attribute *attr, char *buf)
466{
467 int cpu = dev->id;
468 ssize_t count;
469
470 mutex_lock(&smp_cpu_state_mutex);
471 count = sprintf(buf, "%d\n", topology_cpu_dedicated(cpu));
472 mutex_unlock(&smp_cpu_state_mutex);
473 return count;
474}
475static DEVICE_ATTR(dedicated, 0444, cpu_dedicated_show, NULL);
476
477static struct attribute *topology_extra_cpu_attrs[] = {
478 &dev_attr_dedicated.attr,
479 NULL,
480};
481
482static struct attribute_group topology_extra_cpu_attr_group = {
483 .attrs = topology_extra_cpu_attrs,
484};
485
486int topology_cpu_init(struct cpu *cpu)
487{
488 int rc;
489
490 rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
491 if (rc || !MACHINE_HAS_TOPOLOGY)
492 return rc;
493 rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group);
494 if (rc)
495 sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group);
496 return rc;
497}
498
499static const struct cpumask *cpu_thread_mask(int cpu)
500{
501 return &cpu_topology[cpu].thread_mask;
502}
503
504
505const struct cpumask *cpu_coregroup_mask(int cpu)
506{
507 return &cpu_topology[cpu].core_mask;
508}
509
510static const struct cpumask *cpu_book_mask(int cpu)
511{
512 return &cpu_topology[cpu].book_mask;
513}
514
515static const struct cpumask *cpu_drawer_mask(int cpu)
516{
517 return &cpu_topology[cpu].drawer_mask;
518}
519
520static struct sched_domain_topology_level s390_topology[] = {
521 { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
522 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
523 { cpu_book_mask, SD_INIT_NAME(BOOK) },
524 { cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
525 { cpu_cpu_mask, SD_INIT_NAME(PKG) },
526 { NULL, },
527};
528
529static void __init alloc_masks(struct sysinfo_15_1_x *info,
530 struct mask_info *mask, int offset)
531{
532 int i, nr_masks;
533
534 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
535 for (i = 0; i < info->mnest - offset; i++)
536 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
537 nr_masks = max(nr_masks, 1);
538 for (i = 0; i < nr_masks; i++) {
539 mask->next = memblock_alloc(sizeof(*mask->next), 8);
540 if (!mask->next)
541 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
542 __func__, sizeof(*mask->next), 8);
543 mask = mask->next;
544 }
545}
546
547void __init topology_init_early(void)
548{
549 struct sysinfo_15_1_x *info;
550
551 set_sched_topology(s390_topology);
552 if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) {
553 if (MACHINE_HAS_TOPOLOGY)
554 topology_mode = TOPOLOGY_MODE_HW;
555 else
556 topology_mode = TOPOLOGY_MODE_SINGLE;
557 }
558 if (!MACHINE_HAS_TOPOLOGY)
559 goto out;
560 tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
561 if (!tl_info)
562 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
563 __func__, PAGE_SIZE, PAGE_SIZE);
564 info = tl_info;
565 store_topology(info);
566 pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
567 info->mag[0], info->mag[1], info->mag[2], info->mag[3],
568 info->mag[4], info->mag[5], info->mnest);
569 alloc_masks(info, &socket_info, 1);
570 alloc_masks(info, &book_info, 2);
571 alloc_masks(info, &drawer_info, 3);
572out:
573 cpumask_set_cpu(0, &cpu_setup_mask);
574 __arch_update_cpu_topology();
575 __arch_update_dedicated_flag(NULL);
576}
577
578static inline int topology_get_mode(int enabled)
579{
580 if (!enabled)
581 return TOPOLOGY_MODE_SINGLE;
582 return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE;
583}
584
585static inline int topology_is_enabled(void)
586{
587 return topology_mode != TOPOLOGY_MODE_SINGLE;
588}
589
590static int __init topology_setup(char *str)
591{
592 bool enabled;
593 int rc;
594
595 rc = kstrtobool(str, &enabled);
596 if (rc)
597 return rc;
598 topology_mode = topology_get_mode(enabled);
599 return 0;
600}
601early_param("topology", topology_setup);
602
603static int topology_ctl_handler(struct ctl_table *ctl, int write,
604 void *buffer, size_t *lenp, loff_t *ppos)
605{
606 int enabled = topology_is_enabled();
607 int new_mode;
608 int rc;
609 struct ctl_table ctl_entry = {
610 .procname = ctl->procname,
611 .data = &enabled,
612 .maxlen = sizeof(int),
613 .extra1 = SYSCTL_ZERO,
614 .extra2 = SYSCTL_ONE,
615 };
616
617 rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
618 if (rc < 0 || !write)
619 return rc;
620
621 mutex_lock(&smp_cpu_state_mutex);
622 new_mode = topology_get_mode(enabled);
623 if (topology_mode != new_mode) {
624 topology_mode = new_mode;
625 topology_schedule_update();
626 }
627 mutex_unlock(&smp_cpu_state_mutex);
628 topology_flush_work();
629
630 return rc;
631}
632
633static struct ctl_table topology_ctl_table[] = {
634 {
635 .procname = "topology",
636 .mode = 0644,
637 .proc_handler = topology_ctl_handler,
638 },
639};
640
641static int __init topology_init(void)
642{
643 struct device *dev_root;
644 int rc = 0;
645
646 timer_setup(&topology_timer, topology_timer_fn, TIMER_DEFERRABLE);
647 if (MACHINE_HAS_TOPOLOGY)
648 set_topology_timer();
649 else
650 topology_update_polarization_simple();
651 register_sysctl("s390", topology_ctl_table);
652
653 dev_root = bus_get_dev_root(&cpu_subsys);
654 if (dev_root) {
655 rc = device_create_file(dev_root, &dev_attr_dispatching);
656 put_device(dev_root);
657 }
658 return rc;
659}
660device_initcall(topology_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2007, 2011
4 */
5
6#define KMSG_COMPONENT "cpu"
7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
8
9#include <linux/workqueue.h>
10#include <linux/memblock.h>
11#include <linux/uaccess.h>
12#include <linux/sysctl.h>
13#include <linux/cpuset.h>
14#include <linux/device.h>
15#include <linux/export.h>
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <linux/sched/topology.h>
19#include <linux/delay.h>
20#include <linux/init.h>
21#include <linux/slab.h>
22#include <linux/cpu.h>
23#include <linux/smp.h>
24#include <linux/mm.h>
25#include <linux/nodemask.h>
26#include <linux/node.h>
27#include <asm/hiperdispatch.h>
28#include <asm/sysinfo.h>
29#include <asm/asm.h>
30
31#define PTF_HORIZONTAL (0UL)
32#define PTF_VERTICAL (1UL)
33#define PTF_CHECK (2UL)
34
35enum {
36 TOPOLOGY_MODE_HW,
37 TOPOLOGY_MODE_SINGLE,
38 TOPOLOGY_MODE_PACKAGE,
39 TOPOLOGY_MODE_UNINITIALIZED
40};
41
42struct mask_info {
43 struct mask_info *next;
44 unsigned char id;
45 cpumask_t mask;
46};
47
48static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED;
49static void set_topology_timer(void);
50static void topology_work_fn(struct work_struct *work);
51static struct sysinfo_15_1_x *tl_info;
52static int cpu_management;
53
54static DECLARE_WORK(topology_work, topology_work_fn);
55
56/*
57 * Socket/Book linked lists and cpu_topology updates are
58 * protected by "sched_domains_mutex".
59 */
60static struct mask_info socket_info;
61static struct mask_info book_info;
62static struct mask_info drawer_info;
63
64struct cpu_topology_s390 cpu_topology[NR_CPUS];
65EXPORT_SYMBOL_GPL(cpu_topology);
66
67static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int cpu)
68{
69 static cpumask_t mask;
70
71 cpumask_clear(&mask);
72 if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
73 goto out;
74 cpumask_set_cpu(cpu, &mask);
75 switch (topology_mode) {
76 case TOPOLOGY_MODE_HW:
77 while (info) {
78 if (cpumask_test_cpu(cpu, &info->mask)) {
79 cpumask_copy(&mask, &info->mask);
80 break;
81 }
82 info = info->next;
83 }
84 break;
85 case TOPOLOGY_MODE_PACKAGE:
86 cpumask_copy(&mask, cpu_present_mask);
87 break;
88 default:
89 fallthrough;
90 case TOPOLOGY_MODE_SINGLE:
91 break;
92 }
93 cpumask_and(&mask, &mask, &cpu_setup_mask);
94out:
95 cpumask_copy(dst, &mask);
96}
97
98static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
99{
100 static cpumask_t mask;
101 unsigned int max_cpu;
102
103 cpumask_clear(&mask);
104 if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
105 goto out;
106 cpumask_set_cpu(cpu, &mask);
107 if (topology_mode != TOPOLOGY_MODE_HW)
108 goto out;
109 cpu -= cpu % (smp_cpu_mtid + 1);
110 max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1);
111 for (; cpu <= max_cpu; cpu++) {
112 if (cpumask_test_cpu(cpu, &cpu_setup_mask))
113 cpumask_set_cpu(cpu, &mask);
114 }
115out:
116 cpumask_copy(dst, &mask);
117}
118
119#define TOPOLOGY_CORE_BITS 64
120
121static void add_cpus_to_mask(struct topology_core *tl_core,
122 struct mask_info *drawer,
123 struct mask_info *book,
124 struct mask_info *socket)
125{
126 struct cpu_topology_s390 *topo;
127 unsigned int core;
128
129 for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) {
130 unsigned int max_cpu, rcore;
131 int cpu;
132
133 rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
134 cpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
135 if (cpu < 0)
136 continue;
137 max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1);
138 for (; cpu <= max_cpu; cpu++) {
139 topo = &cpu_topology[cpu];
140 topo->drawer_id = drawer->id;
141 topo->book_id = book->id;
142 topo->socket_id = socket->id;
143 topo->core_id = rcore;
144 topo->thread_id = cpu;
145 topo->dedicated = tl_core->d;
146 cpumask_set_cpu(cpu, &drawer->mask);
147 cpumask_set_cpu(cpu, &book->mask);
148 cpumask_set_cpu(cpu, &socket->mask);
149 smp_cpu_set_polarization(cpu, tl_core->pp);
150 smp_cpu_set_capacity(cpu, CPU_CAPACITY_HIGH);
151 }
152 }
153}
154
155static void clear_masks(void)
156{
157 struct mask_info *info;
158
159 info = &socket_info;
160 while (info) {
161 cpumask_clear(&info->mask);
162 info = info->next;
163 }
164 info = &book_info;
165 while (info) {
166 cpumask_clear(&info->mask);
167 info = info->next;
168 }
169 info = &drawer_info;
170 while (info) {
171 cpumask_clear(&info->mask);
172 info = info->next;
173 }
174}
175
176static union topology_entry *next_tle(union topology_entry *tle)
177{
178 if (!tle->nl)
179 return (union topology_entry *)((struct topology_core *)tle + 1);
180 return (union topology_entry *)((struct topology_container *)tle + 1);
181}
182
183static void tl_to_masks(struct sysinfo_15_1_x *info)
184{
185 struct mask_info *socket = &socket_info;
186 struct mask_info *book = &book_info;
187 struct mask_info *drawer = &drawer_info;
188 union topology_entry *tle, *end;
189
190 clear_masks();
191 tle = info->tle;
192 end = (union topology_entry *)((unsigned long)info + info->length);
193 while (tle < end) {
194 switch (tle->nl) {
195 case 3:
196 drawer = drawer->next;
197 drawer->id = tle->container.id;
198 break;
199 case 2:
200 book = book->next;
201 book->id = tle->container.id;
202 break;
203 case 1:
204 socket = socket->next;
205 socket->id = tle->container.id;
206 break;
207 case 0:
208 add_cpus_to_mask(&tle->cpu, drawer, book, socket);
209 break;
210 default:
211 clear_masks();
212 return;
213 }
214 tle = next_tle(tle);
215 }
216}
217
218static void topology_update_polarization_simple(void)
219{
220 int cpu;
221
222 for_each_possible_cpu(cpu)
223 smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
224}
225
226static int ptf(unsigned long fc)
227{
228 int cc;
229
230 asm volatile(
231 " .insn rre,0xb9a20000,%[fc],%[fc]\n"
232 CC_IPM(cc)
233 : CC_OUT(cc, cc)
234 : [fc] "d" (fc)
235 : CC_CLOBBER);
236 return CC_TRANSFORM(cc);
237}
238
239int topology_set_cpu_management(int fc)
240{
241 int cpu, rc;
242
243 if (!MACHINE_HAS_TOPOLOGY)
244 return -EOPNOTSUPP;
245 if (fc)
246 rc = ptf(PTF_VERTICAL);
247 else
248 rc = ptf(PTF_HORIZONTAL);
249 if (rc)
250 return -EBUSY;
251 for_each_possible_cpu(cpu)
252 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
253 return rc;
254}
255
256void update_cpu_masks(void)
257{
258 struct cpu_topology_s390 *topo, *topo_package, *topo_sibling;
259 int cpu, sibling, pkg_first, smt_first, id;
260
261 for_each_possible_cpu(cpu) {
262 topo = &cpu_topology[cpu];
263 cpu_thread_map(&topo->thread_mask, cpu);
264 cpu_group_map(&topo->core_mask, &socket_info, cpu);
265 cpu_group_map(&topo->book_mask, &book_info, cpu);
266 cpu_group_map(&topo->drawer_mask, &drawer_info, cpu);
267 topo->booted_cores = 0;
268 if (topology_mode != TOPOLOGY_MODE_HW) {
269 id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
270 topo->thread_id = cpu;
271 topo->core_id = cpu;
272 topo->socket_id = id;
273 topo->book_id = id;
274 topo->drawer_id = id;
275 }
276 }
277 hd_reset_state();
278 for_each_online_cpu(cpu) {
279 topo = &cpu_topology[cpu];
280 pkg_first = cpumask_first(&topo->core_mask);
281 topo_package = &cpu_topology[pkg_first];
282 if (cpu == pkg_first) {
283 for_each_cpu(sibling, &topo->core_mask) {
284 topo_sibling = &cpu_topology[sibling];
285 smt_first = cpumask_first(&topo_sibling->thread_mask);
286 if (sibling == smt_first) {
287 topo_package->booted_cores++;
288 hd_add_core(sibling);
289 }
290 }
291 } else {
292 topo->booted_cores = topo_package->booted_cores;
293 }
294 }
295}
296
297void store_topology(struct sysinfo_15_1_x *info)
298{
299 stsi(info, 15, 1, topology_mnest_limit());
300}
301
302static void __arch_update_dedicated_flag(void *arg)
303{
304 if (topology_cpu_dedicated(smp_processor_id()))
305 set_cpu_flag(CIF_DEDICATED_CPU);
306 else
307 clear_cpu_flag(CIF_DEDICATED_CPU);
308}
309
310static int __arch_update_cpu_topology(void)
311{
312 struct sysinfo_15_1_x *info = tl_info;
313 int rc, hd_status;
314
315 hd_status = 0;
316 rc = 0;
317 mutex_lock(&smp_cpu_state_mutex);
318 if (MACHINE_HAS_TOPOLOGY) {
319 rc = 1;
320 store_topology(info);
321 tl_to_masks(info);
322 }
323 update_cpu_masks();
324 if (!MACHINE_HAS_TOPOLOGY)
325 topology_update_polarization_simple();
326 if (cpu_management == 1)
327 hd_status = hd_enable_hiperdispatch();
328 mutex_unlock(&smp_cpu_state_mutex);
329 if (hd_status == 0)
330 hd_disable_hiperdispatch();
331 return rc;
332}
333
334int arch_update_cpu_topology(void)
335{
336 int rc;
337
338 rc = __arch_update_cpu_topology();
339 on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
340 return rc;
341}
342
343static void topology_work_fn(struct work_struct *work)
344{
345 rebuild_sched_domains();
346}
347
348void topology_schedule_update(void)
349{
350 schedule_work(&topology_work);
351}
352
353static void topology_flush_work(void)
354{
355 flush_work(&topology_work);
356}
357
358static void topology_timer_fn(struct timer_list *unused)
359{
360 if (ptf(PTF_CHECK))
361 topology_schedule_update();
362 set_topology_timer();
363}
364
365static struct timer_list topology_timer;
366
367static atomic_t topology_poll = ATOMIC_INIT(0);
368
369static void set_topology_timer(void)
370{
371 if (atomic_add_unless(&topology_poll, -1, 0))
372 mod_timer(&topology_timer, jiffies + msecs_to_jiffies(100));
373 else
374 mod_timer(&topology_timer, jiffies + msecs_to_jiffies(60 * MSEC_PER_SEC));
375}
376
377void topology_expect_change(void)
378{
379 if (!MACHINE_HAS_TOPOLOGY)
380 return;
381 /* This is racy, but it doesn't matter since it is just a heuristic.
382 * Worst case is that we poll in a higher frequency for a bit longer.
383 */
384 if (atomic_read(&topology_poll) > 60)
385 return;
386 atomic_add(60, &topology_poll);
387 set_topology_timer();
388}
389
390static int set_polarization(int polarization)
391{
392 int rc = 0;
393
394 cpus_read_lock();
395 mutex_lock(&smp_cpu_state_mutex);
396 if (cpu_management == polarization)
397 goto out;
398 rc = topology_set_cpu_management(polarization);
399 if (rc)
400 goto out;
401 cpu_management = polarization;
402 topology_expect_change();
403out:
404 mutex_unlock(&smp_cpu_state_mutex);
405 cpus_read_unlock();
406 return rc;
407}
408
409static ssize_t dispatching_show(struct device *dev,
410 struct device_attribute *attr,
411 char *buf)
412{
413 ssize_t count;
414
415 mutex_lock(&smp_cpu_state_mutex);
416 count = sysfs_emit(buf, "%d\n", cpu_management);
417 mutex_unlock(&smp_cpu_state_mutex);
418 return count;
419}
420
421static ssize_t dispatching_store(struct device *dev,
422 struct device_attribute *attr,
423 const char *buf,
424 size_t count)
425{
426 int val, rc;
427 char delim;
428
429 if (sscanf(buf, "%d %c", &val, &delim) != 1)
430 return -EINVAL;
431 if (val != 0 && val != 1)
432 return -EINVAL;
433 rc = set_polarization(val);
434 return rc ? rc : count;
435}
436static DEVICE_ATTR_RW(dispatching);
437
438static ssize_t cpu_polarization_show(struct device *dev,
439 struct device_attribute *attr, char *buf)
440{
441 int cpu = dev->id;
442 ssize_t count;
443
444 mutex_lock(&smp_cpu_state_mutex);
445 switch (smp_cpu_get_polarization(cpu)) {
446 case POLARIZATION_HRZ:
447 count = sysfs_emit(buf, "horizontal\n");
448 break;
449 case POLARIZATION_VL:
450 count = sysfs_emit(buf, "vertical:low\n");
451 break;
452 case POLARIZATION_VM:
453 count = sysfs_emit(buf, "vertical:medium\n");
454 break;
455 case POLARIZATION_VH:
456 count = sysfs_emit(buf, "vertical:high\n");
457 break;
458 default:
459 count = sysfs_emit(buf, "unknown\n");
460 break;
461 }
462 mutex_unlock(&smp_cpu_state_mutex);
463 return count;
464}
465static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
466
467static struct attribute *topology_cpu_attrs[] = {
468 &dev_attr_polarization.attr,
469 NULL,
470};
471
472static struct attribute_group topology_cpu_attr_group = {
473 .attrs = topology_cpu_attrs,
474};
475
476static ssize_t cpu_dedicated_show(struct device *dev,
477 struct device_attribute *attr, char *buf)
478{
479 int cpu = dev->id;
480 ssize_t count;
481
482 mutex_lock(&smp_cpu_state_mutex);
483 count = sysfs_emit(buf, "%d\n", topology_cpu_dedicated(cpu));
484 mutex_unlock(&smp_cpu_state_mutex);
485 return count;
486}
487static DEVICE_ATTR(dedicated, 0444, cpu_dedicated_show, NULL);
488
489static struct attribute *topology_extra_cpu_attrs[] = {
490 &dev_attr_dedicated.attr,
491 NULL,
492};
493
494static struct attribute_group topology_extra_cpu_attr_group = {
495 .attrs = topology_extra_cpu_attrs,
496};
497
498int topology_cpu_init(struct cpu *cpu)
499{
500 int rc;
501
502 rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
503 if (rc || !MACHINE_HAS_TOPOLOGY)
504 return rc;
505 rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group);
506 if (rc)
507 sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group);
508 return rc;
509}
510
511static const struct cpumask *cpu_thread_mask(int cpu)
512{
513 return &cpu_topology[cpu].thread_mask;
514}
515
516
517const struct cpumask *cpu_coregroup_mask(int cpu)
518{
519 return &cpu_topology[cpu].core_mask;
520}
521
522static const struct cpumask *cpu_book_mask(int cpu)
523{
524 return &cpu_topology[cpu].book_mask;
525}
526
527static const struct cpumask *cpu_drawer_mask(int cpu)
528{
529 return &cpu_topology[cpu].drawer_mask;
530}
531
532static struct sched_domain_topology_level s390_topology[] = {
533 { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
534 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
535 { cpu_book_mask, SD_INIT_NAME(BOOK) },
536 { cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
537 { cpu_cpu_mask, SD_INIT_NAME(PKG) },
538 { NULL, },
539};
540
541static void __init alloc_masks(struct sysinfo_15_1_x *info,
542 struct mask_info *mask, int offset)
543{
544 int i, nr_masks;
545
546 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
547 for (i = 0; i < info->mnest - offset; i++)
548 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
549 nr_masks = max(nr_masks, 1);
550 for (i = 0; i < nr_masks; i++) {
551 mask->next = memblock_alloc(sizeof(*mask->next), 8);
552 if (!mask->next)
553 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
554 __func__, sizeof(*mask->next), 8);
555 mask = mask->next;
556 }
557}
558
559void __init topology_init_early(void)
560{
561 struct sysinfo_15_1_x *info;
562
563 set_sched_topology(s390_topology);
564 if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) {
565 if (MACHINE_HAS_TOPOLOGY)
566 topology_mode = TOPOLOGY_MODE_HW;
567 else
568 topology_mode = TOPOLOGY_MODE_SINGLE;
569 }
570 if (!MACHINE_HAS_TOPOLOGY)
571 goto out;
572 tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
573 if (!tl_info)
574 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
575 __func__, PAGE_SIZE, PAGE_SIZE);
576 info = tl_info;
577 store_topology(info);
578 pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
579 info->mag[0], info->mag[1], info->mag[2], info->mag[3],
580 info->mag[4], info->mag[5], info->mnest);
581 alloc_masks(info, &socket_info, 1);
582 alloc_masks(info, &book_info, 2);
583 alloc_masks(info, &drawer_info, 3);
584out:
585 cpumask_set_cpu(0, &cpu_setup_mask);
586 __arch_update_cpu_topology();
587 __arch_update_dedicated_flag(NULL);
588}
589
590static inline int topology_get_mode(int enabled)
591{
592 if (!enabled)
593 return TOPOLOGY_MODE_SINGLE;
594 return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE;
595}
596
597static inline int topology_is_enabled(void)
598{
599 return topology_mode != TOPOLOGY_MODE_SINGLE;
600}
601
602static int __init topology_setup(char *str)
603{
604 bool enabled;
605 int rc;
606
607 rc = kstrtobool(str, &enabled);
608 if (rc)
609 return rc;
610 topology_mode = topology_get_mode(enabled);
611 return 0;
612}
613early_param("topology", topology_setup);
614
615static int topology_ctl_handler(const struct ctl_table *ctl, int write,
616 void *buffer, size_t *lenp, loff_t *ppos)
617{
618 int enabled = topology_is_enabled();
619 int new_mode;
620 int rc;
621 struct ctl_table ctl_entry = {
622 .procname = ctl->procname,
623 .data = &enabled,
624 .maxlen = sizeof(int),
625 .extra1 = SYSCTL_ZERO,
626 .extra2 = SYSCTL_ONE,
627 };
628
629 rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
630 if (rc < 0 || !write)
631 return rc;
632
633 mutex_lock(&smp_cpu_state_mutex);
634 new_mode = topology_get_mode(enabled);
635 if (topology_mode != new_mode) {
636 topology_mode = new_mode;
637 topology_schedule_update();
638 }
639 mutex_unlock(&smp_cpu_state_mutex);
640 topology_flush_work();
641
642 return rc;
643}
644
645static int polarization_ctl_handler(const struct ctl_table *ctl, int write,
646 void *buffer, size_t *lenp, loff_t *ppos)
647{
648 int polarization;
649 int rc;
650 struct ctl_table ctl_entry = {
651 .procname = ctl->procname,
652 .data = &polarization,
653 .maxlen = sizeof(int),
654 .extra1 = SYSCTL_ZERO,
655 .extra2 = SYSCTL_ONE,
656 };
657
658 polarization = cpu_management;
659 rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
660 if (rc < 0 || !write)
661 return rc;
662 return set_polarization(polarization);
663}
664
665static struct ctl_table topology_ctl_table[] = {
666 {
667 .procname = "topology",
668 .mode = 0644,
669 .proc_handler = topology_ctl_handler,
670 },
671 {
672 .procname = "polarization",
673 .mode = 0644,
674 .proc_handler = polarization_ctl_handler,
675 },
676};
677
678static int __init topology_init(void)
679{
680 struct device *dev_root;
681 int rc = 0;
682
683 timer_setup(&topology_timer, topology_timer_fn, TIMER_DEFERRABLE);
684 if (MACHINE_HAS_TOPOLOGY)
685 set_topology_timer();
686 else
687 topology_update_polarization_simple();
688 if (IS_ENABLED(CONFIG_SCHED_TOPOLOGY_VERTICAL))
689 set_polarization(1);
690 register_sysctl("s390", topology_ctl_table);
691
692 dev_root = bus_get_dev_root(&cpu_subsys);
693 if (dev_root) {
694 rc = device_create_file(dev_root, &dev_attr_dispatching);
695 put_device(dev_root);
696 }
697 return rc;
698}
699device_initcall(topology_init);