Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2007, 2011
4 */
5
6#define KMSG_COMPONENT "cpu"
7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
8
9#include <linux/workqueue.h>
10#include <linux/memblock.h>
11#include <linux/uaccess.h>
12#include <linux/sysctl.h>
13#include <linux/cpuset.h>
14#include <linux/device.h>
15#include <linux/export.h>
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <linux/sched/topology.h>
19#include <linux/delay.h>
20#include <linux/init.h>
21#include <linux/slab.h>
22#include <linux/cpu.h>
23#include <linux/smp.h>
24#include <linux/mm.h>
25#include <linux/nodemask.h>
26#include <linux/node.h>
27#include <asm/sysinfo.h>
28
29#define PTF_HORIZONTAL (0UL)
30#define PTF_VERTICAL (1UL)
31#define PTF_CHECK (2UL)
32
33enum {
34 TOPOLOGY_MODE_HW,
35 TOPOLOGY_MODE_SINGLE,
36 TOPOLOGY_MODE_PACKAGE,
37 TOPOLOGY_MODE_UNINITIALIZED
38};
39
40struct mask_info {
41 struct mask_info *next;
42 unsigned char id;
43 cpumask_t mask;
44};
45
46static int topology_mode = TOPOLOGY_MODE_UNINITIALIZED;
47static void set_topology_timer(void);
48static void topology_work_fn(struct work_struct *work);
49static struct sysinfo_15_1_x *tl_info;
50
51static DECLARE_WORK(topology_work, topology_work_fn);
52
53/*
54 * Socket/Book linked lists and cpu_topology updates are
55 * protected by "sched_domains_mutex".
56 */
57static struct mask_info socket_info;
58static struct mask_info book_info;
59static struct mask_info drawer_info;
60
61struct cpu_topology_s390 cpu_topology[NR_CPUS];
62EXPORT_SYMBOL_GPL(cpu_topology);
63
64static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int cpu)
65{
66 static cpumask_t mask;
67
68 cpumask_clear(&mask);
69 if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
70 goto out;
71 cpumask_set_cpu(cpu, &mask);
72 switch (topology_mode) {
73 case TOPOLOGY_MODE_HW:
74 while (info) {
75 if (cpumask_test_cpu(cpu, &info->mask)) {
76 cpumask_copy(&mask, &info->mask);
77 break;
78 }
79 info = info->next;
80 }
81 break;
82 case TOPOLOGY_MODE_PACKAGE:
83 cpumask_copy(&mask, cpu_present_mask);
84 break;
85 default:
86 fallthrough;
87 case TOPOLOGY_MODE_SINGLE:
88 break;
89 }
90 cpumask_and(&mask, &mask, &cpu_setup_mask);
91out:
92 cpumask_copy(dst, &mask);
93}
94
95static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
96{
97 static cpumask_t mask;
98 unsigned int max_cpu;
99
100 cpumask_clear(&mask);
101 if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
102 goto out;
103 cpumask_set_cpu(cpu, &mask);
104 if (topology_mode != TOPOLOGY_MODE_HW)
105 goto out;
106 cpu -= cpu % (smp_cpu_mtid + 1);
107 max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1);
108 for (; cpu <= max_cpu; cpu++) {
109 if (cpumask_test_cpu(cpu, &cpu_setup_mask))
110 cpumask_set_cpu(cpu, &mask);
111 }
112out:
113 cpumask_copy(dst, &mask);
114}
115
116#define TOPOLOGY_CORE_BITS 64
117
118static void add_cpus_to_mask(struct topology_core *tl_core,
119 struct mask_info *drawer,
120 struct mask_info *book,
121 struct mask_info *socket)
122{
123 struct cpu_topology_s390 *topo;
124 unsigned int core;
125
126 for_each_set_bit(core, &tl_core->mask, TOPOLOGY_CORE_BITS) {
127 unsigned int max_cpu, rcore;
128 int cpu;
129
130 rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
131 cpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
132 if (cpu < 0)
133 continue;
134 max_cpu = min(cpu + smp_cpu_mtid, nr_cpu_ids - 1);
135 for (; cpu <= max_cpu; cpu++) {
136 topo = &cpu_topology[cpu];
137 topo->drawer_id = drawer->id;
138 topo->book_id = book->id;
139 topo->socket_id = socket->id;
140 topo->core_id = rcore;
141 topo->thread_id = cpu;
142 topo->dedicated = tl_core->d;
143 cpumask_set_cpu(cpu, &drawer->mask);
144 cpumask_set_cpu(cpu, &book->mask);
145 cpumask_set_cpu(cpu, &socket->mask);
146 smp_cpu_set_polarization(cpu, tl_core->pp);
147 }
148 }
149}
150
151static void clear_masks(void)
152{
153 struct mask_info *info;
154
155 info = &socket_info;
156 while (info) {
157 cpumask_clear(&info->mask);
158 info = info->next;
159 }
160 info = &book_info;
161 while (info) {
162 cpumask_clear(&info->mask);
163 info = info->next;
164 }
165 info = &drawer_info;
166 while (info) {
167 cpumask_clear(&info->mask);
168 info = info->next;
169 }
170}
171
172static union topology_entry *next_tle(union topology_entry *tle)
173{
174 if (!tle->nl)
175 return (union topology_entry *)((struct topology_core *)tle + 1);
176 return (union topology_entry *)((struct topology_container *)tle + 1);
177}
178
179static void tl_to_masks(struct sysinfo_15_1_x *info)
180{
181 struct mask_info *socket = &socket_info;
182 struct mask_info *book = &book_info;
183 struct mask_info *drawer = &drawer_info;
184 union topology_entry *tle, *end;
185
186 clear_masks();
187 tle = info->tle;
188 end = (union topology_entry *)((unsigned long)info + info->length);
189 while (tle < end) {
190 switch (tle->nl) {
191 case 3:
192 drawer = drawer->next;
193 drawer->id = tle->container.id;
194 break;
195 case 2:
196 book = book->next;
197 book->id = tle->container.id;
198 break;
199 case 1:
200 socket = socket->next;
201 socket->id = tle->container.id;
202 break;
203 case 0:
204 add_cpus_to_mask(&tle->cpu, drawer, book, socket);
205 break;
206 default:
207 clear_masks();
208 return;
209 }
210 tle = next_tle(tle);
211 }
212}
213
214static void topology_update_polarization_simple(void)
215{
216 int cpu;
217
218 for_each_possible_cpu(cpu)
219 smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
220}
221
222static int ptf(unsigned long fc)
223{
224 int rc;
225
226 asm volatile(
227 " .insn rre,0xb9a20000,%1,%1\n"
228 " ipm %0\n"
229 " srl %0,28\n"
230 : "=d" (rc)
231 : "d" (fc) : "cc");
232 return rc;
233}
234
235int topology_set_cpu_management(int fc)
236{
237 int cpu, rc;
238
239 if (!MACHINE_HAS_TOPOLOGY)
240 return -EOPNOTSUPP;
241 if (fc)
242 rc = ptf(PTF_VERTICAL);
243 else
244 rc = ptf(PTF_HORIZONTAL);
245 if (rc)
246 return -EBUSY;
247 for_each_possible_cpu(cpu)
248 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
249 return rc;
250}
251
252void update_cpu_masks(void)
253{
254 struct cpu_topology_s390 *topo, *topo_package, *topo_sibling;
255 int cpu, sibling, pkg_first, smt_first, id;
256
257 for_each_possible_cpu(cpu) {
258 topo = &cpu_topology[cpu];
259 cpu_thread_map(&topo->thread_mask, cpu);
260 cpu_group_map(&topo->core_mask, &socket_info, cpu);
261 cpu_group_map(&topo->book_mask, &book_info, cpu);
262 cpu_group_map(&topo->drawer_mask, &drawer_info, cpu);
263 topo->booted_cores = 0;
264 if (topology_mode != TOPOLOGY_MODE_HW) {
265 id = topology_mode == TOPOLOGY_MODE_PACKAGE ? 0 : cpu;
266 topo->thread_id = cpu;
267 topo->core_id = cpu;
268 topo->socket_id = id;
269 topo->book_id = id;
270 topo->drawer_id = id;
271 }
272 }
273 for_each_online_cpu(cpu) {
274 topo = &cpu_topology[cpu];
275 pkg_first = cpumask_first(&topo->core_mask);
276 topo_package = &cpu_topology[pkg_first];
277 if (cpu == pkg_first) {
278 for_each_cpu(sibling, &topo->core_mask) {
279 topo_sibling = &cpu_topology[sibling];
280 smt_first = cpumask_first(&topo_sibling->thread_mask);
281 if (sibling == smt_first)
282 topo_package->booted_cores++;
283 }
284 } else {
285 topo->booted_cores = topo_package->booted_cores;
286 }
287 }
288}
289
290void store_topology(struct sysinfo_15_1_x *info)
291{
292 stsi(info, 15, 1, topology_mnest_limit());
293}
294
295static void __arch_update_dedicated_flag(void *arg)
296{
297 if (topology_cpu_dedicated(smp_processor_id()))
298 set_cpu_flag(CIF_DEDICATED_CPU);
299 else
300 clear_cpu_flag(CIF_DEDICATED_CPU);
301}
302
303static int __arch_update_cpu_topology(void)
304{
305 struct sysinfo_15_1_x *info = tl_info;
306 int rc = 0;
307
308 mutex_lock(&smp_cpu_state_mutex);
309 if (MACHINE_HAS_TOPOLOGY) {
310 rc = 1;
311 store_topology(info);
312 tl_to_masks(info);
313 }
314 update_cpu_masks();
315 if (!MACHINE_HAS_TOPOLOGY)
316 topology_update_polarization_simple();
317 mutex_unlock(&smp_cpu_state_mutex);
318 return rc;
319}
320
321int arch_update_cpu_topology(void)
322{
323 struct device *dev;
324 int cpu, rc;
325
326 rc = __arch_update_cpu_topology();
327 on_each_cpu(__arch_update_dedicated_flag, NULL, 0);
328 for_each_online_cpu(cpu) {
329 dev = get_cpu_device(cpu);
330 if (dev)
331 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
332 }
333 return rc;
334}
335
336static void topology_work_fn(struct work_struct *work)
337{
338 rebuild_sched_domains();
339}
340
341void topology_schedule_update(void)
342{
343 schedule_work(&topology_work);
344}
345
346static void topology_flush_work(void)
347{
348 flush_work(&topology_work);
349}
350
351static void topology_timer_fn(struct timer_list *unused)
352{
353 if (ptf(PTF_CHECK))
354 topology_schedule_update();
355 set_topology_timer();
356}
357
358static struct timer_list topology_timer;
359
360static atomic_t topology_poll = ATOMIC_INIT(0);
361
362static void set_topology_timer(void)
363{
364 if (atomic_add_unless(&topology_poll, -1, 0))
365 mod_timer(&topology_timer, jiffies + msecs_to_jiffies(100));
366 else
367 mod_timer(&topology_timer, jiffies + msecs_to_jiffies(60 * MSEC_PER_SEC));
368}
369
370void topology_expect_change(void)
371{
372 if (!MACHINE_HAS_TOPOLOGY)
373 return;
374 /* This is racy, but it doesn't matter since it is just a heuristic.
375 * Worst case is that we poll in a higher frequency for a bit longer.
376 */
377 if (atomic_read(&topology_poll) > 60)
378 return;
379 atomic_add(60, &topology_poll);
380 set_topology_timer();
381}
382
383static int cpu_management;
384
385static ssize_t dispatching_show(struct device *dev,
386 struct device_attribute *attr,
387 char *buf)
388{
389 ssize_t count;
390
391 mutex_lock(&smp_cpu_state_mutex);
392 count = sprintf(buf, "%d\n", cpu_management);
393 mutex_unlock(&smp_cpu_state_mutex);
394 return count;
395}
396
397static ssize_t dispatching_store(struct device *dev,
398 struct device_attribute *attr,
399 const char *buf,
400 size_t count)
401{
402 int val, rc;
403 char delim;
404
405 if (sscanf(buf, "%d %c", &val, &delim) != 1)
406 return -EINVAL;
407 if (val != 0 && val != 1)
408 return -EINVAL;
409 rc = 0;
410 cpus_read_lock();
411 mutex_lock(&smp_cpu_state_mutex);
412 if (cpu_management == val)
413 goto out;
414 rc = topology_set_cpu_management(val);
415 if (rc)
416 goto out;
417 cpu_management = val;
418 topology_expect_change();
419out:
420 mutex_unlock(&smp_cpu_state_mutex);
421 cpus_read_unlock();
422 return rc ? rc : count;
423}
424static DEVICE_ATTR_RW(dispatching);
425
426static ssize_t cpu_polarization_show(struct device *dev,
427 struct device_attribute *attr, char *buf)
428{
429 int cpu = dev->id;
430 ssize_t count;
431
432 mutex_lock(&smp_cpu_state_mutex);
433 switch (smp_cpu_get_polarization(cpu)) {
434 case POLARIZATION_HRZ:
435 count = sprintf(buf, "horizontal\n");
436 break;
437 case POLARIZATION_VL:
438 count = sprintf(buf, "vertical:low\n");
439 break;
440 case POLARIZATION_VM:
441 count = sprintf(buf, "vertical:medium\n");
442 break;
443 case POLARIZATION_VH:
444 count = sprintf(buf, "vertical:high\n");
445 break;
446 default:
447 count = sprintf(buf, "unknown\n");
448 break;
449 }
450 mutex_unlock(&smp_cpu_state_mutex);
451 return count;
452}
453static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
454
455static struct attribute *topology_cpu_attrs[] = {
456 &dev_attr_polarization.attr,
457 NULL,
458};
459
460static struct attribute_group topology_cpu_attr_group = {
461 .attrs = topology_cpu_attrs,
462};
463
464static ssize_t cpu_dedicated_show(struct device *dev,
465 struct device_attribute *attr, char *buf)
466{
467 int cpu = dev->id;
468 ssize_t count;
469
470 mutex_lock(&smp_cpu_state_mutex);
471 count = sprintf(buf, "%d\n", topology_cpu_dedicated(cpu));
472 mutex_unlock(&smp_cpu_state_mutex);
473 return count;
474}
475static DEVICE_ATTR(dedicated, 0444, cpu_dedicated_show, NULL);
476
477static struct attribute *topology_extra_cpu_attrs[] = {
478 &dev_attr_dedicated.attr,
479 NULL,
480};
481
482static struct attribute_group topology_extra_cpu_attr_group = {
483 .attrs = topology_extra_cpu_attrs,
484};
485
486int topology_cpu_init(struct cpu *cpu)
487{
488 int rc;
489
490 rc = sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
491 if (rc || !MACHINE_HAS_TOPOLOGY)
492 return rc;
493 rc = sysfs_create_group(&cpu->dev.kobj, &topology_extra_cpu_attr_group);
494 if (rc)
495 sysfs_remove_group(&cpu->dev.kobj, &topology_cpu_attr_group);
496 return rc;
497}
498
499static const struct cpumask *cpu_thread_mask(int cpu)
500{
501 return &cpu_topology[cpu].thread_mask;
502}
503
504
505const struct cpumask *cpu_coregroup_mask(int cpu)
506{
507 return &cpu_topology[cpu].core_mask;
508}
509
510static const struct cpumask *cpu_book_mask(int cpu)
511{
512 return &cpu_topology[cpu].book_mask;
513}
514
515static const struct cpumask *cpu_drawer_mask(int cpu)
516{
517 return &cpu_topology[cpu].drawer_mask;
518}
519
520static struct sched_domain_topology_level s390_topology[] = {
521 { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
522 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
523 { cpu_book_mask, SD_INIT_NAME(BOOK) },
524 { cpu_drawer_mask, SD_INIT_NAME(DRAWER) },
525 { cpu_cpu_mask, SD_INIT_NAME(PKG) },
526 { NULL, },
527};
528
529static void __init alloc_masks(struct sysinfo_15_1_x *info,
530 struct mask_info *mask, int offset)
531{
532 int i, nr_masks;
533
534 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
535 for (i = 0; i < info->mnest - offset; i++)
536 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
537 nr_masks = max(nr_masks, 1);
538 for (i = 0; i < nr_masks; i++) {
539 mask->next = memblock_alloc(sizeof(*mask->next), 8);
540 if (!mask->next)
541 panic("%s: Failed to allocate %zu bytes align=0x%x\n",
542 __func__, sizeof(*mask->next), 8);
543 mask = mask->next;
544 }
545}
546
547void __init topology_init_early(void)
548{
549 struct sysinfo_15_1_x *info;
550
551 set_sched_topology(s390_topology);
552 if (topology_mode == TOPOLOGY_MODE_UNINITIALIZED) {
553 if (MACHINE_HAS_TOPOLOGY)
554 topology_mode = TOPOLOGY_MODE_HW;
555 else
556 topology_mode = TOPOLOGY_MODE_SINGLE;
557 }
558 if (!MACHINE_HAS_TOPOLOGY)
559 goto out;
560 tl_info = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
561 if (!tl_info)
562 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
563 __func__, PAGE_SIZE, PAGE_SIZE);
564 info = tl_info;
565 store_topology(info);
566 pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
567 info->mag[0], info->mag[1], info->mag[2], info->mag[3],
568 info->mag[4], info->mag[5], info->mnest);
569 alloc_masks(info, &socket_info, 1);
570 alloc_masks(info, &book_info, 2);
571 alloc_masks(info, &drawer_info, 3);
572out:
573 cpumask_set_cpu(0, &cpu_setup_mask);
574 __arch_update_cpu_topology();
575 __arch_update_dedicated_flag(NULL);
576}
577
578static inline int topology_get_mode(int enabled)
579{
580 if (!enabled)
581 return TOPOLOGY_MODE_SINGLE;
582 return MACHINE_HAS_TOPOLOGY ? TOPOLOGY_MODE_HW : TOPOLOGY_MODE_PACKAGE;
583}
584
585static inline int topology_is_enabled(void)
586{
587 return topology_mode != TOPOLOGY_MODE_SINGLE;
588}
589
590static int __init topology_setup(char *str)
591{
592 bool enabled;
593 int rc;
594
595 rc = kstrtobool(str, &enabled);
596 if (rc)
597 return rc;
598 topology_mode = topology_get_mode(enabled);
599 return 0;
600}
601early_param("topology", topology_setup);
602
603static int topology_ctl_handler(struct ctl_table *ctl, int write,
604 void *buffer, size_t *lenp, loff_t *ppos)
605{
606 int enabled = topology_is_enabled();
607 int new_mode;
608 int rc;
609 struct ctl_table ctl_entry = {
610 .procname = ctl->procname,
611 .data = &enabled,
612 .maxlen = sizeof(int),
613 .extra1 = SYSCTL_ZERO,
614 .extra2 = SYSCTL_ONE,
615 };
616
617 rc = proc_douintvec_minmax(&ctl_entry, write, buffer, lenp, ppos);
618 if (rc < 0 || !write)
619 return rc;
620
621 mutex_lock(&smp_cpu_state_mutex);
622 new_mode = topology_get_mode(enabled);
623 if (topology_mode != new_mode) {
624 topology_mode = new_mode;
625 topology_schedule_update();
626 }
627 mutex_unlock(&smp_cpu_state_mutex);
628 topology_flush_work();
629
630 return rc;
631}
632
633static struct ctl_table topology_ctl_table[] = {
634 {
635 .procname = "topology",
636 .mode = 0644,
637 .proc_handler = topology_ctl_handler,
638 },
639};
640
641static int __init topology_init(void)
642{
643 struct device *dev_root;
644 int rc = 0;
645
646 timer_setup(&topology_timer, topology_timer_fn, TIMER_DEFERRABLE);
647 if (MACHINE_HAS_TOPOLOGY)
648 set_topology_timer();
649 else
650 topology_update_polarization_simple();
651 register_sysctl("s390", topology_ctl_table);
652
653 dev_root = bus_get_dev_root(&cpu_subsys);
654 if (dev_root) {
655 rc = device_create_file(dev_root, &dev_attr_dispatching);
656 put_device(dev_root);
657 }
658 return rc;
659}
660device_initcall(topology_init);
1/*
2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
4 */
5
6#define KMSG_COMPONENT "cpu"
7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
8
9#include <linux/workqueue.h>
10#include <linux/cpuset.h>
11#include <linux/device.h>
12#include <linux/export.h>
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/delay.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/cpu.h>
19#include <linux/smp.h>
20#include <linux/mm.h>
21#include <linux/nodemask.h>
22#include <linux/node.h>
23#include <asm/sysinfo.h>
24#include <asm/numa.h>
25
26#define PTF_HORIZONTAL (0UL)
27#define PTF_VERTICAL (1UL)
28#define PTF_CHECK (2UL)
29
30struct mask_info {
31 struct mask_info *next;
32 unsigned char id;
33 cpumask_t mask;
34};
35
36static void set_topology_timer(void);
37static void topology_work_fn(struct work_struct *work);
38static struct sysinfo_15_1_x *tl_info;
39
40static bool topology_enabled = true;
41static DECLARE_WORK(topology_work, topology_work_fn);
42
43/*
44 * Socket/Book linked lists and per_cpu(cpu_topology) updates are
45 * protected by "sched_domains_mutex".
46 */
47static struct mask_info socket_info;
48static struct mask_info book_info;
49
50DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
51EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
52
53static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
54{
55 cpumask_t mask;
56
57 cpumask_copy(&mask, cpumask_of(cpu));
58 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
59 return mask;
60 for (; info; info = info->next) {
61 if (cpumask_test_cpu(cpu, &info->mask))
62 return info->mask;
63 }
64 return mask;
65}
66
67static cpumask_t cpu_thread_map(unsigned int cpu)
68{
69 cpumask_t mask;
70 int i;
71
72 cpumask_copy(&mask, cpumask_of(cpu));
73 if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
74 return mask;
75 cpu -= cpu % (smp_cpu_mtid + 1);
76 for (i = 0; i <= smp_cpu_mtid; i++)
77 if (cpu_present(cpu + i))
78 cpumask_set_cpu(cpu + i, &mask);
79 return mask;
80}
81
82static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
83 struct mask_info *book,
84 struct mask_info *socket,
85 int one_socket_per_cpu)
86{
87 struct cpu_topology_s390 *topo;
88 unsigned int core;
89
90 for_each_set_bit(core, &tl_core->mask[0], TOPOLOGY_CORE_BITS) {
91 unsigned int rcore;
92 int lcpu, i;
93
94 rcore = TOPOLOGY_CORE_BITS - 1 - core + tl_core->origin;
95 lcpu = smp_find_processor_id(rcore << smp_cpu_mt_shift);
96 if (lcpu < 0)
97 continue;
98 for (i = 0; i <= smp_cpu_mtid; i++) {
99 topo = &per_cpu(cpu_topology, lcpu + i);
100 topo->book_id = book->id;
101 topo->core_id = rcore;
102 topo->thread_id = lcpu + i;
103 cpumask_set_cpu(lcpu + i, &book->mask);
104 cpumask_set_cpu(lcpu + i, &socket->mask);
105 if (one_socket_per_cpu)
106 topo->socket_id = rcore;
107 else
108 topo->socket_id = socket->id;
109 smp_cpu_set_polarization(lcpu + i, tl_core->pp);
110 }
111 if (one_socket_per_cpu)
112 socket = socket->next;
113 }
114 return socket;
115}
116
117static void clear_masks(void)
118{
119 struct mask_info *info;
120
121 info = &socket_info;
122 while (info) {
123 cpumask_clear(&info->mask);
124 info = info->next;
125 }
126 info = &book_info;
127 while (info) {
128 cpumask_clear(&info->mask);
129 info = info->next;
130 }
131}
132
133static union topology_entry *next_tle(union topology_entry *tle)
134{
135 if (!tle->nl)
136 return (union topology_entry *)((struct topology_core *)tle + 1);
137 return (union topology_entry *)((struct topology_container *)tle + 1);
138}
139
140static void __tl_to_masks_generic(struct sysinfo_15_1_x *info)
141{
142 struct mask_info *socket = &socket_info;
143 struct mask_info *book = &book_info;
144 union topology_entry *tle, *end;
145
146 tle = info->tle;
147 end = (union topology_entry *)((unsigned long)info + info->length);
148 while (tle < end) {
149 switch (tle->nl) {
150 case 2:
151 book = book->next;
152 book->id = tle->container.id;
153 break;
154 case 1:
155 socket = socket->next;
156 socket->id = tle->container.id;
157 break;
158 case 0:
159 add_cpus_to_mask(&tle->cpu, book, socket, 0);
160 break;
161 default:
162 clear_masks();
163 return;
164 }
165 tle = next_tle(tle);
166 }
167}
168
169static void __tl_to_masks_z10(struct sysinfo_15_1_x *info)
170{
171 struct mask_info *socket = &socket_info;
172 struct mask_info *book = &book_info;
173 union topology_entry *tle, *end;
174
175 tle = info->tle;
176 end = (union topology_entry *)((unsigned long)info + info->length);
177 while (tle < end) {
178 switch (tle->nl) {
179 case 1:
180 book = book->next;
181 book->id = tle->container.id;
182 break;
183 case 0:
184 socket = add_cpus_to_mask(&tle->cpu, book, socket, 1);
185 break;
186 default:
187 clear_masks();
188 return;
189 }
190 tle = next_tle(tle);
191 }
192}
193
194static void tl_to_masks(struct sysinfo_15_1_x *info)
195{
196 struct cpuid cpu_id;
197
198 get_cpu_id(&cpu_id);
199 clear_masks();
200 switch (cpu_id.machine) {
201 case 0x2097:
202 case 0x2098:
203 __tl_to_masks_z10(info);
204 break;
205 default:
206 __tl_to_masks_generic(info);
207 }
208}
209
210static void topology_update_polarization_simple(void)
211{
212 int cpu;
213
214 mutex_lock(&smp_cpu_state_mutex);
215 for_each_possible_cpu(cpu)
216 smp_cpu_set_polarization(cpu, POLARIZATION_HRZ);
217 mutex_unlock(&smp_cpu_state_mutex);
218}
219
220static int ptf(unsigned long fc)
221{
222 int rc;
223
224 asm volatile(
225 " .insn rre,0xb9a20000,%1,%1\n"
226 " ipm %0\n"
227 " srl %0,28\n"
228 : "=d" (rc)
229 : "d" (fc) : "cc");
230 return rc;
231}
232
233int topology_set_cpu_management(int fc)
234{
235 int cpu, rc;
236
237 if (!MACHINE_HAS_TOPOLOGY)
238 return -EOPNOTSUPP;
239 if (fc)
240 rc = ptf(PTF_VERTICAL);
241 else
242 rc = ptf(PTF_HORIZONTAL);
243 if (rc)
244 return -EBUSY;
245 for_each_possible_cpu(cpu)
246 smp_cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
247 return rc;
248}
249
250static void update_cpu_masks(void)
251{
252 struct cpu_topology_s390 *topo;
253 int cpu;
254
255 for_each_possible_cpu(cpu) {
256 topo = &per_cpu(cpu_topology, cpu);
257 topo->thread_mask = cpu_thread_map(cpu);
258 topo->core_mask = cpu_group_map(&socket_info, cpu);
259 topo->book_mask = cpu_group_map(&book_info, cpu);
260 if (!MACHINE_HAS_TOPOLOGY) {
261 topo->thread_id = cpu;
262 topo->core_id = cpu;
263 topo->socket_id = cpu;
264 topo->book_id = cpu;
265 }
266 }
267 numa_update_cpu_topology();
268}
269
270void store_topology(struct sysinfo_15_1_x *info)
271{
272 if (topology_max_mnest >= 3)
273 stsi(info, 15, 1, 3);
274 else
275 stsi(info, 15, 1, 2);
276}
277
278int arch_update_cpu_topology(void)
279{
280 struct sysinfo_15_1_x *info = tl_info;
281 struct device *dev;
282 int cpu, rc = 0;
283
284 if (MACHINE_HAS_TOPOLOGY) {
285 rc = 1;
286 store_topology(info);
287 tl_to_masks(info);
288 }
289 update_cpu_masks();
290 if (!MACHINE_HAS_TOPOLOGY)
291 topology_update_polarization_simple();
292 for_each_online_cpu(cpu) {
293 dev = get_cpu_device(cpu);
294 kobject_uevent(&dev->kobj, KOBJ_CHANGE);
295 }
296 return rc;
297}
298
299static void topology_work_fn(struct work_struct *work)
300{
301 rebuild_sched_domains();
302}
303
304void topology_schedule_update(void)
305{
306 schedule_work(&topology_work);
307}
308
309static void topology_timer_fn(unsigned long ignored)
310{
311 if (ptf(PTF_CHECK))
312 topology_schedule_update();
313 set_topology_timer();
314}
315
316static struct timer_list topology_timer =
317 TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0);
318
319static atomic_t topology_poll = ATOMIC_INIT(0);
320
321static void set_topology_timer(void)
322{
323 if (atomic_add_unless(&topology_poll, -1, 0))
324 mod_timer(&topology_timer, jiffies + HZ / 10);
325 else
326 mod_timer(&topology_timer, jiffies + HZ * 60);
327}
328
329void topology_expect_change(void)
330{
331 if (!MACHINE_HAS_TOPOLOGY)
332 return;
333 /* This is racy, but it doesn't matter since it is just a heuristic.
334 * Worst case is that we poll in a higher frequency for a bit longer.
335 */
336 if (atomic_read(&topology_poll) > 60)
337 return;
338 atomic_add(60, &topology_poll);
339 set_topology_timer();
340}
341
342static int cpu_management;
343
344static ssize_t dispatching_show(struct device *dev,
345 struct device_attribute *attr,
346 char *buf)
347{
348 ssize_t count;
349
350 mutex_lock(&smp_cpu_state_mutex);
351 count = sprintf(buf, "%d\n", cpu_management);
352 mutex_unlock(&smp_cpu_state_mutex);
353 return count;
354}
355
356static ssize_t dispatching_store(struct device *dev,
357 struct device_attribute *attr,
358 const char *buf,
359 size_t count)
360{
361 int val, rc;
362 char delim;
363
364 if (sscanf(buf, "%d %c", &val, &delim) != 1)
365 return -EINVAL;
366 if (val != 0 && val != 1)
367 return -EINVAL;
368 rc = 0;
369 get_online_cpus();
370 mutex_lock(&smp_cpu_state_mutex);
371 if (cpu_management == val)
372 goto out;
373 rc = topology_set_cpu_management(val);
374 if (rc)
375 goto out;
376 cpu_management = val;
377 topology_expect_change();
378out:
379 mutex_unlock(&smp_cpu_state_mutex);
380 put_online_cpus();
381 return rc ? rc : count;
382}
383static DEVICE_ATTR(dispatching, 0644, dispatching_show,
384 dispatching_store);
385
386static ssize_t cpu_polarization_show(struct device *dev,
387 struct device_attribute *attr, char *buf)
388{
389 int cpu = dev->id;
390 ssize_t count;
391
392 mutex_lock(&smp_cpu_state_mutex);
393 switch (smp_cpu_get_polarization(cpu)) {
394 case POLARIZATION_HRZ:
395 count = sprintf(buf, "horizontal\n");
396 break;
397 case POLARIZATION_VL:
398 count = sprintf(buf, "vertical:low\n");
399 break;
400 case POLARIZATION_VM:
401 count = sprintf(buf, "vertical:medium\n");
402 break;
403 case POLARIZATION_VH:
404 count = sprintf(buf, "vertical:high\n");
405 break;
406 default:
407 count = sprintf(buf, "unknown\n");
408 break;
409 }
410 mutex_unlock(&smp_cpu_state_mutex);
411 return count;
412}
413static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL);
414
415static struct attribute *topology_cpu_attrs[] = {
416 &dev_attr_polarization.attr,
417 NULL,
418};
419
420static struct attribute_group topology_cpu_attr_group = {
421 .attrs = topology_cpu_attrs,
422};
423
424int topology_cpu_init(struct cpu *cpu)
425{
426 return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group);
427}
428
429static const struct cpumask *cpu_thread_mask(int cpu)
430{
431 return &per_cpu(cpu_topology, cpu).thread_mask;
432}
433
434
435const struct cpumask *cpu_coregroup_mask(int cpu)
436{
437 return &per_cpu(cpu_topology, cpu).core_mask;
438}
439
440static const struct cpumask *cpu_book_mask(int cpu)
441{
442 return &per_cpu(cpu_topology, cpu).book_mask;
443}
444
445static int __init early_parse_topology(char *p)
446{
447 return kstrtobool(p, &topology_enabled);
448}
449early_param("topology", early_parse_topology);
450
451static struct sched_domain_topology_level s390_topology[] = {
452 { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
453 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
454 { cpu_book_mask, SD_INIT_NAME(BOOK) },
455 { cpu_cpu_mask, SD_INIT_NAME(DIE) },
456 { NULL, },
457};
458
459static void __init alloc_masks(struct sysinfo_15_1_x *info,
460 struct mask_info *mask, int offset)
461{
462 int i, nr_masks;
463
464 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
465 for (i = 0; i < info->mnest - offset; i++)
466 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
467 nr_masks = max(nr_masks, 1);
468 for (i = 0; i < nr_masks; i++) {
469 mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
470 mask = mask->next;
471 }
472}
473
474static int __init s390_topology_init(void)
475{
476 struct sysinfo_15_1_x *info;
477 int i;
478
479 if (!MACHINE_HAS_TOPOLOGY)
480 return 0;
481 tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
482 info = tl_info;
483 store_topology(info);
484 pr_info("The CPU configuration topology of the machine is:");
485 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
486 printk(KERN_CONT " %d", info->mag[i]);
487 printk(KERN_CONT " / %d\n", info->mnest);
488 alloc_masks(info, &socket_info, 1);
489 alloc_masks(info, &book_info, 2);
490 set_sched_topology(s390_topology);
491 return 0;
492}
493early_initcall(s390_topology_init);
494
495static int __init topology_init(void)
496{
497 if (MACHINE_HAS_TOPOLOGY)
498 set_topology_timer();
499 else
500 topology_update_polarization_simple();
501 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
502}
503device_initcall(topology_init);