Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2007, 2011
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
5 */
6
7#define KMSG_COMPONENT "cpu"
8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10#include <linux/workqueue.h>
11#include <linux/memblock.h>
12#include <linux/uaccess.h>
13#include <linux/sysctl.h>
14#include <linux/cpuset.h>
15#include <linux/device.h>
16#include <linux/export.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/sched/topology.h>
20#include <linux/delay.h>
21#include <linux/init.h>
22#include <linux/slab.h>
23#include <linux/cpu.h>
24#include <linux/smp.h>
25#include <linux/mm.h>
26#include <linux/nodemask.h>
27#include <linux/node.h>
28#include <asm/sysinfo.h>
29#include <asm/numa.h>
30
31#define PTF_HORIZONTAL (0UL)
32#define PTF_VERTICAL (1UL)
33#define PTF_CHECK (2UL)
34
35enum {
36 TOPOLOGY_MODE_HW,
37 TOPOLOGY_MODE_SINGLE,
38 TOPOLOGY_MODE_PACKAGE,
39 TOPOLOGY_MODE_UNINITIALIZED
40};
41
42struct mask_info {
43 struct mask_info *next;
44 unsigned char id;
45 cpumask_t mask;
46};
47
48static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED;
49static void set_topology_timer(void);
50static void topology_work_fn(struct work_struct *work);
51static struct sysinfo_15_1_x *tl_info;
52
53static DECLARE_WORK(topology_work, topology_work_fn);
54
55/*
56 * Socket/Book linked lists and cpu_topology updates are
57 * protected by "sched_domains_mutex".
58 */
59static struct mask_info socket_info;
60static struct mask_info book_info;
61static struct mask_info drawer_info;
62
63struct cpu_topology_s390 cpu_topology[NR_CPUS];
64EXPORT_SYMBOL_GPL(cpu_topology);
65
66cpumask_t cpus_with_topology;
67
68static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
69{
70 cpumask_t mask;
71
72 cpumask_copy(&mask, cpumask_of(cpu));
73 switch (topology_mode) {
74 case TOPOLOGY_MODE_HW:
75 while (info) {
76 if (cpumask_test_cpu(cpu, &info->mask)) {
77 mask = info->mask;
78 break;
79 }
80 info = info->next;
81 }
82 if (cpumask_empty(&mask))
83 cpumask_copy(&mask, cpumask_of(cpu));
84 break;
85 case TOPOLOGY_MODE_PACKAGE:
86 cpumask_copy(&mask, cpu_present_mask);
87 break;
88 default:
89 /* fallthrough */
90 case TOPOLOGY_MODE_SINGLE:
91 cpumask_copy(&mask, cpumask_of(cpu));
92 break;
93 }
94 return mask;
95}
96
97static cpumask_t cpu_thread_map(unsigned int cpu)
98{
99 cpumask_t mask;
100 int i;
101
102 cpumask_copy(&mask, cpumask_of(cpu));
103 if (topology_mode != TOPOLOGY_MODE_HW)
104 return mask;
105 cpu -= cpu % (smp_cpu_mtid + 1);
106 for (i = 0; i <= smp_cpu_mtid; i++)
107 if (cpu_present(cpu + i))
108 cpumask_set_cpu(cpu + i, &mask);
109 return mask;
110}
111
112#define TOPOLOGY_CORE_BITS 64
113
114static void add_cpus_to_mask(struct topology_core *tl_core,
115 struct mask_info *drawer,
116 struct mask_info *book,
117 struct mask_info *socket)
118{
119 struct cpu_topology_s390 *topo;
120 unsigned int core;
121
122 for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) {
123 unsigned int rcore;
124 int lcpu, i;
125
126 rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
127 lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
128 if (lcpu < 0)
129 continue;
130 for (i = 0; i <= smp_cpu_mtid; i++) {
131 topo = &cpu_topology[lcpu + i];
132 topo->drawer_id = drawer->id;
133 topo->book_id = book->id;
134 topo->socket_id = socket->id;
135 topo->core_id = rcore;
136 topo->thread_id = lcpu + i;
137 topo->dedicated = tl_core->d;
138 cpumask_set_cpu(lcpu + i, &drawer->mask);
139 cpumask_set_cpu(lcpu + i, &book->mask);
140 cpumask_set_cpu(lcpu + i, &socket->mask);
141 cpumask_set_cpu(lcpu + i, &cpus_with_topology);
142 smp_cpu_set_polarization(lcpu + i, tl_core->pp);
143 }
144 }
145}
146
147static void clear_masks(void)
148{
149 struct mask_info *info;
150
151 info = &socket_info;
152 while (info) {
153 cpumask_clear(&info->mask);
154 info = info->next;
155 }
156 info = &book_info;
157 while (info) {
158 cpumask_clear(&info->mask);
159 info = info->next;
160 }
161 info = &drawer_info;
162 while (info) {
163 cpumask_clear(&info->mask);
164 info = info->next;
165 }
166}
167
168static union topology_entry *next_tle(union topology_entry *tle)
169{
170 if (!tle->nl)
171 return (union topology_entry *)((struct topology_core *)tle + 1);
172 return (union topology_entry *)((struct topology_container *)tle + 1);
173}
174
175static void tl_to_masks(struct sysinfo_15_1_x *info)
176{
177 struct mask_info *socket = &socket_info;
178 struct mask_info *book = &book_info;
179 struct mask_info *drawer = &drawer_info;
180 union topology_entry *tle, *end;
181
182 clear_masks();
183 tle = info->tle;
184 end = (union topology_entry *)((unsigned long)info + info->length);
185 while (tle < end) {
186 switch (tle->nl) {
187 case 3:
188 drawer = drawer->next;
189 drawer->id = tle->container.id;
190 break;
191 case 2:
192 book = book->next;
193 book->id = tle->container.id;
194 break;
195 case 1:
196 socket = socket->next;
197 socket->id = tle->container.id;
198 break;
199 case 0:
200 add_cpus_to_mask(&tle->cpu, drawer, book, socket);
201 break;
202 default:
203 clear_masks();
204 return;
205 }
206 tle = next_tle(tle);
207 }
208}
209
210static void topology_update_polarization_simple(void)
211{
212 int cpu;
213
214 for_each_possible_cpu(cpu)
215 smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
216}
217
218static int ptf(unsigned long fc)
219{
220 int rc;
221
222 asm volatile(
223 " .insn rre,0xb9a20000,%1,%1\n"
224 " ipm %0\n"
225 " srl %0,28\n"
226 : "=d" (rc)
227 : "d" (fc) : "cc");
228 return rc;
229}
230
231int topology_set_cpu_management(int fc)
232{
233 int cpu, rc;
234
235 if (!MACHINE_HAS_TOPOLOGY)
236 return -EOPNOTSUPP;
237 if (fc)
238 rc = ptf(PTF_VERTICAL);
239 else
240 rc = ptf(PTF_HORIZONTAL);
241 if (rc)
242 return -EBUSY;
243 for_each_possible_cpu(cpu)
244 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
245 return rc;
246}
247
248static void update_cpu_masks(void)
249{
250 struct cpu_topology_s390 *topo;
251 int cpu, id;
252
253 for_each_possible_cpu(cpu) {
254 topo = &cpu_topology[cpu];
255 topo->thread_mask = cpu_thread_map(cpu);
256 topo->core_mask = cpu_group_map(&socket_info, cpu);
257 topo->book_mask = cpu_group_map(&book_info, cpu);
258 topo->drawer_mask = cpu_group_map(&drawer_info, cpu);
259 if (topology_mode != TOPOLOGY_MODE_HW) {
260 id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
261 topo->thread_id = cpu;
262 topo->core_id = cpu;
263 topo->socket_id = id;
264 topo->book_id = id;
265 topo->drawer_id = id;
266 if (cpu_present(cpu))
267 cpumask_set_cpu(cpu, &cpus_with_topology);
268 }
269 }
270 numa_update_cpu_topology();
271}
272
273void store_topology(struct sysinfo_15_1_x *info)
274{
275 stsi(info, 15, 1, topology_mnest_limit());
276}
277
278static void __arch_update_dedicated_flag(void *arg)
279{
280 if (topology_cpu_dedicated(smp_processor_id()))
281 set_cpu_flag(CIF_DEDICATED_CPU);
282 else
283 clear_cpu_flag(CIF_DEDICATED_CPU);
284}
285
286static int __arch_update_cpu_topology(void)
287{
288 struct sysinfo_15_1_x *info = tl_info;
289 int rc = 0;
290
291 mutex_lock(&smp_cpu_state_mutex);
292 cpumask_clear(&cpus_with_topology);
293 if (MACHINE_HAS_TOPOLOGY) {
294 rc = 1;
295 store_topology(info);
296 tl_to_masks(info);
297 }
298 update_cpu_masks();
299 if (!MACHINE_HAS_TOPOLOGY)
300 topology_update_polarization_simple();
301 mutex_unlock(&smp_cpu_state_mutex);
302 return rc;
303}
304
305int arch_update_cpu_topology(void)
306{
307 struct device *dev;
308 int cpu, rc;
309
310 rc = __arch_update_cpu_topology();
311 on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
312 for_each_online_cpu(cpu) {
313 dev = get_cpu_device(cpu);
314 if (dev)
315 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
316 }
317 return rc;
318}
319
320static void topology_work_fn(struct work_struct *work)
321{
322 rebuild_sched_domains();
323}
324
325void topology_schedule_update(void)
326{
327 schedule_work(&topology_work);
328}
329
330static void topology_flush_work(void)
331{
332 flush_work(&topology_work);
333}
334
335static void topology_timer_fn(struct timer_list *unused)
336{
337 if (ptf(PTF_CHECK))
338 topology_schedule_update();
339 set_topology_timer();
340}
341
342static struct timer_list topology_timer;
343
344static atomic_t topology_poll = ATOMIC_INIT(0);
345
346static void set_topology_timer(void)
347{
348 if (atomic_add_unless(&topology_poll, -1, 0))
349 mod_timer(&topology_timer, jiffies + HZ / 10);
350 else
351 mod_timer(&topology_timer, jiffies + HZ * 60);
352}
353
354void topology_expect_change(void)
355{
356 if (!MACHINE_HAS_TOPOLOGY)
357 return;
358 /* This is racy, but it doesn't matter since it is just a heuristic.
359 * Worst case is that we poll in a higher frequency for a bit longer.
360 */
361 if (atomic_read(&topology_poll) > 60)
362 return;
363 atomic_add(60, &topology_poll);
364 set_topology_timer();
365}
366
367static int cpu_management;
368
369static ssize_t dispatching_show(struct device *dev,
370 struct device_attribute *attr,
371 char *buf)
372{
373 ssize_t count;
374
375 mutex_lock(&smp_cpu_state_mutex);
376 count = sprintf(buf, "%d\n", cpu_management);
377 mutex_unlock(&smp_cpu_state_mutex);
378 return count;
379}
380
381static ssize_t dispatching_store(struct device *dev,
382 struct device_attribute *attr,
383 const char *buf,
384 size_t count)
385{
386 int val, rc;
387 char delim;
388
389 if (sscanf(buf, "%d %c", &val, &delim) != 1)
390 return -EINVAL;
391 if (val != 0 && val != 1)
392 return -EINVAL;
393 rc = 0;
394 get_online_cpus();
395 mutex_lock(&smp_cpu_state_mutex);
396 if (cpu_management == val)
397 goto out;
398 rc = topology_set_cpu_management(val);
399 if (rc)
400 goto out;
401 cpu_management = val;
402 topology_expect_change();
403out:
404 mutex_unlock(&smp_cpu_state_mutex);
405 put_online_cpus();
406 return rc ? rc : count;
407}
408static DEVICE_ATTR_RW(dispatching);
409
410static ssize_t cpu_polarization_show(struct device *dev,
411 struct device_attribute *attr, char *buf)
412{
413 int cpu = dev->id;
414 ssize_t count;
415
416 mutex_lock(&smp_cpu_state_mutex);
417 switch (smp_cpu_get_polarization(cpu)) {
418 case POLARIZATION_HRZ:
419 count = sprintf(buf, "horizontal\n");
420 break;
421 case POLARIZATION_VL:
422 count = sprintf(buf, "vertical:low\n");
423 break;
424 case POLARIZATION_VM:
425 count = sprintf(buf, "vertical:medium\n");
426 break;
427 case POLARIZATION_VH:
428 count = sprintf(buf, "vertical:high\n");
429 break;
430 default:
431 count = sprintf(buf, "unknown\n");
432 break;
433 }
434 mutex_unlock(&smp_cpu_state_mutex);
435 return count;
436}
437static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
438
439static struct attribute *topology_cpu_attrs[] = {
440 &dev_attr_polarization.attr,
441 NULL,
442};
443
444static struct attribute_group topology_cpu_attr_group = {
445 .attrs = topology_cpu_attrs,
446};
447
448static ssize_t cpu_dedicated_show(struct device *dev,
449 struct device_attribute *attr, char *buf)
450{
451 int cpu = dev->id;
452 ssize_t count;
453
454 mutex_lock(&smp_cpu_state_mutex);
455 count = sprintf(buf, "%d\n", topology_cpu_dedicated(cpu));
456 mutex_unlock(&smp_cpu_state_mutex);
457 return count;
458}
459static DEVICE_ATTR(dedicated, 0444, cpu_dedicated_show, NULL);
460
461static struct attribute *topology_extra_cpu_attrs[] = {
462 &dev_attr_dedicated.attr,
463 NULL,
464};
465
466static struct attribute_group topology_extra_cpu_attr_group = {
467 .attrs = topology_extra_cpu_attrs,
468};
469
470int topology_cpu_init(struct cpu *cpu)
471{
472 int rc;
473
474 rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
475 if (rc || !MACHINE_HAS_TOPOLOGY)
476 return rc;
477 rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group);
478 if (rc)
479 sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group);
480 return rc;
481}
482
483static const struct cpumask *cpu_thread_mask(int cpu)
484{
485 return &cpu_topology[cpu].thread_mask;
486}
487
488
489const struct cpumask *cpu_coregroup_mask(int cpu)
490{
491 return &cpu_topology[cpu].core_mask;
492}
493
494static const struct cpumask *cpu_book_mask(int cpu)
495{
496 return &cpu_topology[cpu].book_mask;
497}
498
499static const struct cpumask *cpu_drawer_mask(int cpu)
500{
501 return &cpu_topology[cpu].drawer_mask;
502}
503
504static struct sched_domain_topology_level s390_topology[] = {
505 { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
506 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
507 { cpu_book_mask, SD_INIT_NAME(BOOK) },
508 { cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
509 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
510 { NULL, },
511};
512
513static void __init alloc_masks(struct sysinfo_15_1_x *info,
514 struct mask_info *mask, int offset)
515{
516 int i, nr_masks;
517
518 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
519 for (i = 0; i < info->mnest - offset; i++)
520 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
521 nr_masks = max(nr_masks, 1);
522 for (i = 0; i < nr_masks; i++) {
523 mask->next = memblock_alloc(sizeof(*mask->next), 8);
524 if (!mask->next)
525 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
526 __func__, sizeof(*mask->next), 8);
527 mask = mask->next;
528 }
529}
530
531void __init topology_init_early(void)
532{
533 struct sysinfo_15_1_x *info;
534
535 set_sched_topology(s390_topology);
536 if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) {
537 if (MACHINE_HAS_TOPOLOGY)
538 topology_mode = TOPOLOGY_MODE_HW;
539 else
540 topology_mode = TOPOLOGY_MODE_SINGLE;
541 }
542 if (!MACHINE_HAS_TOPOLOGY)
543 goto out;
544 tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
545 if (!tl_info)
546 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
547 __func__, PAGE_SIZE, PAGE_SIZE);
548 info = tl_info;
549 store_topology(info);
550 pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
551 info->mag[0], info->mag[1], info->mag[2], info->mag[3],
552 info->mag[4], info->mag[5], info->mnest);
553 alloc_masks(info, &socket_info, 1);
554 alloc_masks(info, &book_info, 2);
555 alloc_masks(info, &drawer_info, 3);
556out:
557 __arch_update_cpu_topology();
558 __arch_update_dedicated_flag(NULL);
559}
560
561static inline int topology_get_mode(int enabled)
562{
563 if (!enabled)
564 return TOPOLOGY_MODE_SINGLE;
565 return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE;
566}
567
568static inline int topology_is_enabled(void)
569{
570 return topology_mode != TOPOLOGY_MODE_SINGLE;
571}
572
573static int __init topology_setup(char *str)
574{
575 bool enabled;
576 int rc;
577
578 rc = kstrtobool(str, &enabled);
579 if (rc)
580 return rc;
581 topology_mode = topology_get_mode(enabled);
582 return 0;
583}
584early_param("topology", topology_setup);
585
586static int topology_ctl_handler(struct ctl_table *ctl, int write,
587 void __user *buffer, size_t *lenp, loff_t *ppos)
588{
589 int enabled = topology_is_enabled();
590 int new_mode;
591 int rc;
592 struct ctl_table ctl_entry = {
593 .procname = ctl->procname,
594 .data = &enabled,
595 .maxlen = sizeof(int),
596 .extra1 = SYSCTL_ZERO,
597 .extra2 = SYSCTL_ONE,
598 };
599
600 rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
601 if (rc < 0 || !write)
602 return rc;
603
604 mutex_lock(&smp_cpu_state_mutex);
605 new_mode = topology_get_mode(enabled);
606 if (topology_mode != new_mode) {
607 topology_mode = new_mode;
608 topology_schedule_update();
609 }
610 mutex_unlock(&smp_cpu_state_mutex);
611 topology_flush_work();
612
613 return rc;
614}
615
616static struct ctl_table topology_ctl_table[] = {
617 {
618 .procname = "topology",
619 .mode = 0644,
620 .proc_handler = topology_ctl_handler,
621 },
622 { },
623};
624
625static struct ctl_table topology_dir_table[] = {
626 {
627 .procname = "s390",
628 .maxlen = 0,
629 .mode = 0555,
630 .child = topology_ctl_table,
631 },
632 { },
633};
634
635static int __init topology_init(void)
636{
637 timer_setup(&topology_timer, topology_timer_fn, TIMER_DEFERRABLE);
638 if (MACHINE_HAS_TOPOLOGY)
639 set_topology_timer();
640 else
641 topology_update_polarization_simple();
642 register_sysctl_table(topology_dir_table);
643 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
644}
645device_initcall(topology_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2007, 2011
4 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
5 */
6
7#define KMSG_COMPONENT "cpu"
8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10#include <linux/workqueue.h>
11#include <linux/memblock.h>
12#include <linux/uaccess.h>
13#include <linux/sysctl.h>
14#include <linux/cpuset.h>
15#include <linux/device.h>
16#include <linux/export.h>
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/sched/topology.h>
20#include <linux/delay.h>
21#include <linux/init.h>
22#include <linux/slab.h>
23#include <linux/cpu.h>
24#include <linux/smp.h>
25#include <linux/mm.h>
26#include <linux/nodemask.h>
27#include <linux/node.h>
28#include <asm/sysinfo.h>
29
30#define PTF_HORIZONTAL (0UL)
31#define PTF_VERTICAL (1UL)
32#define PTF_CHECK (2UL)
33
34enum {
35 TOPOLOGY_MODE_HW,
36 TOPOLOGY_MODE_SINGLE,
37 TOPOLOGY_MODE_PACKAGE,
38 TOPOLOGY_MODE_UNINITIALIZED
39};
40
41struct mask_info {
42 struct mask_info *next;
43 unsigned char id;
44 cpumask_t mask;
45};
46
47static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED;
48static void set_topology_timer(void);
49static void topology_work_fn(struct work_struct *work);
50static struct sysinfo_15_1_x *tl_info;
51
52static DECLARE_WORK(topology_work, topology_work_fn);
53
54/*
55 * Socket/Book linked lists and cpu_topology updates are
56 * protected by "sched_domains_mutex".
57 */
58static struct mask_info socket_info;
59static struct mask_info book_info;
60static struct mask_info drawer_info;
61
62struct cpu_topology_s390 cpu_topology[NR_CPUS];
63EXPORT_SYMBOL_GPL(cpu_topology);
64
65static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int cpu)
66{
67 static cpumask_t mask;
68
69 cpumask_clear(&mask);
70 if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
71 goto out;
72 cpumask_set_cpu(cpu, &mask);
73 switch (topology_mode) {
74 case TOPOLOGY_MODE_HW:
75 while (info) {
76 if (cpumask_test_cpu(cpu, &info->mask)) {
77 cpumask_copy(&mask, &info->mask);
78 break;
79 }
80 info = info->next;
81 }
82 break;
83 case TOPOLOGY_MODE_PACKAGE:
84 cpumask_copy(&mask, cpu_present_mask);
85 break;
86 default:
87 fallthrough;
88 case TOPOLOGY_MODE_SINGLE:
89 break;
90 }
91 cpumask_and(&mask, &mask, &cpu_setup_mask);
92out:
93 cpumask_copy(dst, &mask);
94}
95
96static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
97{
98 static cpumask_t mask;
99 int i;
100
101 cpumask_clear(&mask);
102 if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
103 goto out;
104 cpumask_set_cpu(cpu, &mask);
105 if (topology_mode != TOPOLOGY_MODE_HW)
106 goto out;
107 cpu -= cpu % (smp_cpu_mtid + 1);
108 for (i = 0; i <= smp_cpu_mtid; i++) {
109 if (cpumask_test_cpu(cpu + i, &cpu_setup_mask))
110 cpumask_set_cpu(cpu + i, &mask);
111 }
112out:
113 cpumask_copy(dst, &mask);
114}
115
116#define TOPOLOGY_CORE_BITS 64
117
118static void add_cpus_to_mask(struct topology_core *tl_core,
119 struct mask_info *drawer,
120 struct mask_info *book,
121 struct mask_info *socket)
122{
123 struct cpu_topology_s390 *topo;
124 unsigned int core;
125
126 for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) {
127 unsigned int rcore;
128 int lcpu, i;
129
130 rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
131 lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
132 if (lcpu < 0)
133 continue;
134 for (i = 0; i <= smp_cpu_mtid; i++) {
135 topo = &cpu_topology[lcpu + i];
136 topo->drawer_id = drawer->id;
137 topo->book_id = book->id;
138 topo->socket_id = socket->id;
139 topo->core_id = rcore;
140 topo->thread_id = lcpu + i;
141 topo->dedicated = tl_core->d;
142 cpumask_set_cpu(lcpu + i, &drawer->mask);
143 cpumask_set_cpu(lcpu + i, &book->mask);
144 cpumask_set_cpu(lcpu + i, &socket->mask);
145 smp_cpu_set_polarization(lcpu + i, tl_core->pp);
146 }
147 }
148}
149
150static void clear_masks(void)
151{
152 struct mask_info *info;
153
154 info = &socket_info;
155 while (info) {
156 cpumask_clear(&info->mask);
157 info = info->next;
158 }
159 info = &book_info;
160 while (info) {
161 cpumask_clear(&info->mask);
162 info = info->next;
163 }
164 info = &drawer_info;
165 while (info) {
166 cpumask_clear(&info->mask);
167 info = info->next;
168 }
169}
170
171static union topology_entry *next_tle(union topology_entry *tle)
172{
173 if (!tle->nl)
174 return (union topology_entry *)((struct topology_core *)tle + 1);
175 return (union topology_entry *)((struct topology_container *)tle + 1);
176}
177
178static void tl_to_masks(struct sysinfo_15_1_x *info)
179{
180 struct mask_info *socket = &socket_info;
181 struct mask_info *book = &book_info;
182 struct mask_info *drawer = &drawer_info;
183 union topology_entry *tle, *end;
184
185 clear_masks();
186 tle = info->tle;
187 end = (union topology_entry *)((unsigned long)info + info->length);
188 while (tle < end) {
189 switch (tle->nl) {
190 case 3:
191 drawer = drawer->next;
192 drawer->id = tle->container.id;
193 break;
194 case 2:
195 book = book->next;
196 book->id = tle->container.id;
197 break;
198 case 1:
199 socket = socket->next;
200 socket->id = tle->container.id;
201 break;
202 case 0:
203 add_cpus_to_mask(&tle->cpu, drawer, book, socket);
204 break;
205 default:
206 clear_masks();
207 return;
208 }
209 tle = next_tle(tle);
210 }
211}
212
213static void topology_update_polarization_simple(void)
214{
215 int cpu;
216
217 for_each_possible_cpu(cpu)
218 smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
219}
220
221static int ptf(unsigned long fc)
222{
223 int rc;
224
225 asm volatile(
226 " .insn rre,0xb9a20000,%1,%1\n"
227 " ipm %0\n"
228 " srl %0,28\n"
229 : "=d" (rc)
230 : "d" (fc) : "cc");
231 return rc;
232}
233
234int topology_set_cpu_management(int fc)
235{
236 int cpu, rc;
237
238 if (!MACHINE_HAS_TOPOLOGY)
239 return -EOPNOTSUPP;
240 if (fc)
241 rc = ptf(PTF_VERTICAL);
242 else
243 rc = ptf(PTF_HORIZONTAL);
244 if (rc)
245 return -EBUSY;
246 for_each_possible_cpu(cpu)
247 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
248 return rc;
249}
250
251void update_cpu_masks(void)
252{
253 struct cpu_topology_s390 *topo, *topo_package, *topo_sibling;
254 int cpu, sibling, pkg_first, smt_first, id;
255
256 for_each_possible_cpu(cpu) {
257 topo = &cpu_topology[cpu];
258 cpu_thread_map(&topo->thread_mask, cpu);
259 cpu_group_map(&topo->core_mask, &socket_info, cpu);
260 cpu_group_map(&topo->book_mask, &book_info, cpu);
261 cpu_group_map(&topo->drawer_mask, &drawer_info, cpu);
262 topo->booted_cores = 0;
263 if (topology_mode != TOPOLOGY_MODE_HW) {
264 id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
265 topo->thread_id = cpu;
266 topo->core_id = cpu;
267 topo->socket_id = id;
268 topo->book_id = id;
269 topo->drawer_id = id;
270 }
271 }
272 for_each_online_cpu(cpu) {
273 topo = &cpu_topology[cpu];
274 pkg_first = cpumask_first(&topo->core_mask);
275 topo_package = &cpu_topology[pkg_first];
276 if (cpu == pkg_first) {
277 for_each_cpu(sibling, &topo->core_mask) {
278 topo_sibling = &cpu_topology[sibling];
279 smt_first = cpumask_first(&topo_sibling->thread_mask);
280 if (sibling == smt_first)
281 topo_package->booted_cores++;
282 }
283 } else {
284 topo->booted_cores = topo_package->booted_cores;
285 }
286 }
287}
288
289void store_topology(struct sysinfo_15_1_x *info)
290{
291 stsi(info, 15, 1, topology_mnest_limit());
292}
293
294static void __arch_update_dedicated_flag(void *arg)
295{
296 if (topology_cpu_dedicated(smp_processor_id()))
297 set_cpu_flag(CIF_DEDICATED_CPU);
298 else
299 clear_cpu_flag(CIF_DEDICATED_CPU);
300}
301
302static int __arch_update_cpu_topology(void)
303{
304 struct sysinfo_15_1_x *info = tl_info;
305 int rc = 0;
306
307 mutex_lock(&smp_cpu_state_mutex);
308 if (MACHINE_HAS_TOPOLOGY) {
309 rc = 1;
310 store_topology(info);
311 tl_to_masks(info);
312 }
313 update_cpu_masks();
314 if (!MACHINE_HAS_TOPOLOGY)
315 topology_update_polarization_simple();
316 mutex_unlock(&smp_cpu_state_mutex);
317 return rc;
318}
319
320int arch_update_cpu_topology(void)
321{
322 struct device *dev;
323 int cpu, rc;
324
325 rc = __arch_update_cpu_topology();
326 on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
327 for_each_online_cpu(cpu) {
328 dev = get_cpu_device(cpu);
329 if (dev)
330 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
331 }
332 return rc;
333}
334
335static void topology_work_fn(struct work_struct *work)
336{
337 rebuild_sched_domains();
338}
339
340void topology_schedule_update(void)
341{
342 schedule_work(&topology_work);
343}
344
345static void topology_flush_work(void)
346{
347 flush_work(&topology_work);
348}
349
350static void topology_timer_fn(struct timer_list *unused)
351{
352 if (ptf(PTF_CHECK))
353 topology_schedule_update();
354 set_topology_timer();
355}
356
357static struct timer_list topology_timer;
358
359static atomic_t topology_poll = ATOMIC_INIT(0);
360
361static void set_topology_timer(void)
362{
363 if (atomic_add_unless(&topology_poll, -1, 0))
364 mod_timer(&topology_timer, jiffies + msecs_to_jiffies(100));
365 else
366 mod_timer(&topology_timer, jiffies + msecs_to_jiffies(60 * MSEC_PER_SEC));
367}
368
369void topology_expect_change(void)
370{
371 if (!MACHINE_HAS_TOPOLOGY)
372 return;
373 /* This is racy, but it doesn't matter since it is just a heuristic.
374 * Worst case is that we poll in a higher frequency for a bit longer.
375 */
376 if (atomic_read(&topology_poll) > 60)
377 return;
378 atomic_add(60, &topology_poll);
379 set_topology_timer();
380}
381
382static int cpu_management;
383
384static ssize_t dispatching_show(struct device *dev,
385 struct device_attribute *attr,
386 char *buf)
387{
388 ssize_t count;
389
390 mutex_lock(&smp_cpu_state_mutex);
391 count = sprintf(buf, "%d\n", cpu_management);
392 mutex_unlock(&smp_cpu_state_mutex);
393 return count;
394}
395
396static ssize_t dispatching_store(struct device *dev,
397 struct device_attribute *attr,
398 const char *buf,
399 size_t count)
400{
401 int val, rc;
402 char delim;
403
404 if (sscanf(buf, "%d %c", &val, &delim) != 1)
405 return -EINVAL;
406 if (val != 0 && val != 1)
407 return -EINVAL;
408 rc = 0;
409 get_online_cpus();
410 mutex_lock(&smp_cpu_state_mutex);
411 if (cpu_management == val)
412 goto out;
413 rc = topology_set_cpu_management(val);
414 if (rc)
415 goto out;
416 cpu_management = val;
417 topology_expect_change();
418out:
419 mutex_unlock(&smp_cpu_state_mutex);
420 put_online_cpus();
421 return rc ? rc : count;
422}
423static DEVICE_ATTR_RW(dispatching);
424
425static ssize_t cpu_polarization_show(struct device *dev,
426 struct device_attribute *attr, char *buf)
427{
428 int cpu = dev->id;
429 ssize_t count;
430
431 mutex_lock(&smp_cpu_state_mutex);
432 switch (smp_cpu_get_polarization(cpu)) {
433 case POLARIZATION_HRZ:
434 count = sprintf(buf, "horizontal\n");
435 break;
436 case POLARIZATION_VL:
437 count = sprintf(buf, "vertical:low\n");
438 break;
439 case POLARIZATION_VM:
440 count = sprintf(buf, "vertical:medium\n");
441 break;
442 case POLARIZATION_VH:
443 count = sprintf(buf, "vertical:high\n");
444 break;
445 default:
446 count = sprintf(buf, "unknown\n");
447 break;
448 }
449 mutex_unlock(&smp_cpu_state_mutex);
450 return count;
451}
452static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
453
454static struct attribute *topology_cpu_attrs[] = {
455 &dev_attr_polarization.attr,
456 NULL,
457};
458
459static struct attribute_group topology_cpu_attr_group = {
460 .attrs = topology_cpu_attrs,
461};
462
463static ssize_t cpu_dedicated_show(struct device *dev,
464 struct device_attribute *attr, char *buf)
465{
466 int cpu = dev->id;
467 ssize_t count;
468
469 mutex_lock(&smp_cpu_state_mutex);
470 count = sprintf(buf, "%d\n", topology_cpu_dedicated(cpu));
471 mutex_unlock(&smp_cpu_state_mutex);
472 return count;
473}
474static DEVICE_ATTR(dedicated, 0444, cpu_dedicated_show, NULL);
475
476static struct attribute *topology_extra_cpu_attrs[] = {
477 &dev_attr_dedicated.attr,
478 NULL,
479};
480
481static struct attribute_group topology_extra_cpu_attr_group = {
482 .attrs = topology_extra_cpu_attrs,
483};
484
485int topology_cpu_init(struct cpu *cpu)
486{
487 int rc;
488
489 rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
490 if (rc || !MACHINE_HAS_TOPOLOGY)
491 return rc;
492 rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group);
493 if (rc)
494 sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group);
495 return rc;
496}
497
498static const struct cpumask *cpu_thread_mask(int cpu)
499{
500 return &cpu_topology[cpu].thread_mask;
501}
502
503
504const struct cpumask *cpu_coregroup_mask(int cpu)
505{
506 return &cpu_topology[cpu].core_mask;
507}
508
509static const struct cpumask *cpu_book_mask(int cpu)
510{
511 return &cpu_topology[cpu].book_mask;
512}
513
514static const struct cpumask *cpu_drawer_mask(int cpu)
515{
516 return &cpu_topology[cpu].drawer_mask;
517}
518
519static struct sched_domain_topology_level s390_topology[] = {
520 { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
521 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
522 { cpu_book_mask, SD_INIT_NAME(BOOK) },
523 { cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
524 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
525 { NULL, },
526};
527
528static void __init alloc_masks(struct sysinfo_15_1_x *info,
529 struct mask_info *mask, int offset)
530{
531 int i, nr_masks;
532
533 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
534 for (i = 0; i < info->mnest - offset; i++)
535 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
536 nr_masks = max(nr_masks, 1);
537 for (i = 0; i < nr_masks; i++) {
538 mask->next = memblock_alloc(sizeof(*mask->next), 8);
539 if (!mask->next)
540 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
541 __func__, sizeof(*mask->next), 8);
542 mask = mask->next;
543 }
544}
545
546void __init topology_init_early(void)
547{
548 struct sysinfo_15_1_x *info;
549
550 set_sched_topology(s390_topology);
551 if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) {
552 if (MACHINE_HAS_TOPOLOGY)
553 topology_mode = TOPOLOGY_MODE_HW;
554 else
555 topology_mode = TOPOLOGY_MODE_SINGLE;
556 }
557 if (!MACHINE_HAS_TOPOLOGY)
558 goto out;
559 tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
560 if (!tl_info)
561 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
562 __func__, PAGE_SIZE, PAGE_SIZE);
563 info = tl_info;
564 store_topology(info);
565 pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
566 info->mag[0], info->mag[1], info->mag[2], info->mag[3],
567 info->mag[4], info->mag[5], info->mnest);
568 alloc_masks(info, &socket_info, 1);
569 alloc_masks(info, &book_info, 2);
570 alloc_masks(info, &drawer_info, 3);
571out:
572 cpumask_set_cpu(0, &cpu_setup_mask);
573 __arch_update_cpu_topology();
574 __arch_update_dedicated_flag(NULL);
575}
576
577static inline int topology_get_mode(int enabled)
578{
579 if (!enabled)
580 return TOPOLOGY_MODE_SINGLE;
581 return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE;
582}
583
584static inline int topology_is_enabled(void)
585{
586 return topology_mode != TOPOLOGY_MODE_SINGLE;
587}
588
589static int __init topology_setup(char *str)
590{
591 bool enabled;
592 int rc;
593
594 rc = kstrtobool(str, &enabled);
595 if (rc)
596 return rc;
597 topology_mode = topology_get_mode(enabled);
598 return 0;
599}
600early_param("topology", topology_setup);
601
602static int topology_ctl_handler(struct ctl_table *ctl, int write,
603 void *buffer, size_t *lenp, loff_t *ppos)
604{
605 int enabled = topology_is_enabled();
606 int new_mode;
607 int rc;
608 struct ctl_table ctl_entry = {
609 .procname = ctl->procname,
610 .data = &enabled,
611 .maxlen = sizeof(int),
612 .extra1 = SYSCTL_ZERO,
613 .extra2 = SYSCTL_ONE,
614 };
615
616 rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
617 if (rc < 0 || !write)
618 return rc;
619
620 mutex_lock(&smp_cpu_state_mutex);
621 new_mode = topology_get_mode(enabled);
622 if (topology_mode != new_mode) {
623 topology_mode = new_mode;
624 topology_schedule_update();
625 }
626 mutex_unlock(&smp_cpu_state_mutex);
627 topology_flush_work();
628
629 return rc;
630}
631
632static struct ctl_table topology_ctl_table[] = {
633 {
634 .procname = "topology",
635 .mode = 0644,
636 .proc_handler = topology_ctl_handler,
637 },
638 { },
639};
640
641static struct ctl_table topology_dir_table[] = {
642 {
643 .procname = "s390",
644 .maxlen = 0,
645 .mode = 0555,
646 .child = topology_ctl_table,
647 },
648 { },
649};
650
651static int __init topology_init(void)
652{
653 timer_setup(&topology_timer, topology_timer_fn, TIMER_DEFERRABLE);
654 if (MACHINE_HAS_TOPOLOGY)
655 set_topology_timer();
656 else
657 topology_update_polarization_simple();
658 register_sysctl_table(topology_dir_table);
659 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
660}
661device_initcall(topology_init);