Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2008
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5 */
6
7#define KMSG_COMPONENT "cpu"
8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10#include <linux/stop_machine.h>
11#include <linux/bitops.h>
12#include <linux/kernel.h>
13#include <linux/random.h>
14#include <linux/sched/mm.h>
15#include <linux/init.h>
16#include <linux/seq_file.h>
17#include <linux/mm_types.h>
18#include <linux/delay.h>
19#include <linux/cpu.h>
20
21#include <asm/diag.h>
22#include <asm/facility.h>
23#include <asm/elf.h>
24#include <asm/lowcore.h>
25#include <asm/param.h>
26#include <asm/sclp.h>
27#include <asm/smp.h>
28
29unsigned long __read_mostly elf_hwcap;
30char elf_platform[ELF_PLATFORM_SIZE];
31
32struct cpu_info {
33 unsigned int cpu_mhz_dynamic;
34 unsigned int cpu_mhz_static;
35 struct cpuid cpu_id;
36};
37
38static DEFINE_PER_CPU(struct cpu_info, cpu_info);
39static DEFINE_PER_CPU(int, cpu_relax_retry);
40
41static bool machine_has_cpu_mhz;
42
43void __init cpu_detect_mhz_feature(void)
44{
45 if (test_facility(34) && __ecag(ECAG_CPU_ATTRIBUTE, 0) != -1UL)
46 machine_has_cpu_mhz = true;
47}
48
49static void update_cpu_mhz(void *arg)
50{
51 unsigned long mhz;
52 struct cpu_info *c;
53
54 mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
55 c = this_cpu_ptr(&cpu_info);
56 c->cpu_mhz_dynamic = mhz >> 32;
57 c->cpu_mhz_static = mhz & 0xffffffff;
58}
59
60void s390_update_cpu_mhz(void)
61{
62 s390_adjust_jiffies();
63 if (machine_has_cpu_mhz)
64 on_each_cpu(update_cpu_mhz, NULL, 0);
65}
66
67void notrace stop_machine_yield(const struct cpumask *cpumask)
68{
69 int cpu, this_cpu;
70
71 this_cpu = smp_processor_id();
72 if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) {
73 __this_cpu_write(cpu_relax_retry, 0);
74 cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false);
75 if (cpu >= nr_cpu_ids)
76 return;
77 if (arch_vcpu_is_preempted(cpu))
78 smp_yield_cpu(cpu);
79 }
80}
81
82/*
83 * cpu_init - initializes state that is per-CPU.
84 */
85void cpu_init(void)
86{
87 struct cpuid *id = this_cpu_ptr(&cpu_info.cpu_id);
88
89 get_cpu_id(id);
90 if (machine_has_cpu_mhz)
91 update_cpu_mhz(NULL);
92 mmgrab(&init_mm);
93 current->active_mm = &init_mm;
94 BUG_ON(current->mm);
95 enter_lazy_tlb(&init_mm, current);
96}
97
98static void show_facilities(struct seq_file *m)
99{
100 unsigned int bit;
101
102 seq_puts(m, "facilities :");
103 for_each_set_bit_inv(bit, (long *)&stfle_fac_list, MAX_FACILITY_BIT)
104 seq_printf(m, " %d", bit);
105 seq_putc(m, '\n');
106}
107
108static void show_cpu_summary(struct seq_file *m, void *v)
109{
110 static const char *hwcap_str[] = {
111 [HWCAP_NR_ESAN3] = "esan3",
112 [HWCAP_NR_ZARCH] = "zarch",
113 [HWCAP_NR_STFLE] = "stfle",
114 [HWCAP_NR_MSA] = "msa",
115 [HWCAP_NR_LDISP] = "ldisp",
116 [HWCAP_NR_EIMM] = "eimm",
117 [HWCAP_NR_DFP] = "dfp",
118 [HWCAP_NR_HPAGE] = "edat",
119 [HWCAP_NR_ETF3EH] = "etf3eh",
120 [HWCAP_NR_HIGH_GPRS] = "highgprs",
121 [HWCAP_NR_TE] = "te",
122 [HWCAP_NR_VXRS] = "vx",
123 [HWCAP_NR_VXRS_BCD] = "vxd",
124 [HWCAP_NR_VXRS_EXT] = "vxe",
125 [HWCAP_NR_GS] = "gs",
126 [HWCAP_NR_VXRS_EXT2] = "vxe2",
127 [HWCAP_NR_VXRS_PDE] = "vxp",
128 [HWCAP_NR_SORT] = "sort",
129 [HWCAP_NR_DFLT] = "dflt",
130 [HWCAP_NR_VXRS_PDE2] = "vxp2",
131 [HWCAP_NR_NNPA] = "nnpa",
132 [HWCAP_NR_PCI_MIO] = "pcimio",
133 [HWCAP_NR_SIE] = "sie",
134 };
135 int i, cpu;
136
137 BUILD_BUG_ON(ARRAY_SIZE(hwcap_str) != HWCAP_NR_MAX);
138 seq_printf(m, "vendor_id : IBM/S390\n"
139 "# processors : %i\n"
140 "bogomips per cpu: %lu.%02lu\n",
141 num_online_cpus(), loops_per_jiffy/(500000/HZ),
142 (loops_per_jiffy/(5000/HZ))%100);
143 seq_printf(m, "max thread id : %d\n", smp_cpu_mtid);
144 seq_puts(m, "features\t: ");
145 for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
146 if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
147 seq_printf(m, "%s ", hwcap_str[i]);
148 seq_puts(m, "\n");
149 show_facilities(m);
150 show_cacheinfo(m);
151 for_each_online_cpu(cpu) {
152 struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
153
154 seq_printf(m, "processor %d: "
155 "version = %02X, "
156 "identification = %06X, "
157 "machine = %04X\n",
158 cpu, id->version, id->ident, id->machine);
159 }
160}
161
162static int __init setup_hwcaps(void)
163{
164 /* instructions named N3, "backported" to esa-mode */
165 elf_hwcap |= HWCAP_ESAN3;
166
167 /* z/Architecture mode active */
168 elf_hwcap |= HWCAP_ZARCH;
169
170 /* store-facility-list-extended */
171 if (test_facility(7))
172 elf_hwcap |= HWCAP_STFLE;
173
174 /* message-security assist */
175 if (test_facility(17))
176 elf_hwcap |= HWCAP_MSA;
177
178 /* long-displacement */
179 if (test_facility(19))
180 elf_hwcap |= HWCAP_LDISP;
181
182 /* extended-immediate */
183 elf_hwcap |= HWCAP_EIMM;
184
185 /* extended-translation facility 3 enhancement */
186 if (test_facility(22) && test_facility(30))
187 elf_hwcap |= HWCAP_ETF3EH;
188
189 /* decimal floating point & perform floating point operation */
190 if (test_facility(42) && test_facility(44))
191 elf_hwcap |= HWCAP_DFP;
192
193 /* huge page support */
194 if (MACHINE_HAS_EDAT1)
195 elf_hwcap |= HWCAP_HPAGE;
196
197 /* 64-bit register support for 31-bit processes */
198 elf_hwcap |= HWCAP_HIGH_GPRS;
199
200 /* transactional execution */
201 if (MACHINE_HAS_TE)
202 elf_hwcap |= HWCAP_TE;
203
204 /*
205 * Vector extension can be disabled with the "novx" parameter.
206 * Use MACHINE_HAS_VX instead of facility bit 129.
207 */
208 if (MACHINE_HAS_VX) {
209 elf_hwcap |= HWCAP_VXRS;
210 if (test_facility(134))
211 elf_hwcap |= HWCAP_VXRS_BCD;
212 if (test_facility(135))
213 elf_hwcap |= HWCAP_VXRS_EXT;
214 if (test_facility(148))
215 elf_hwcap |= HWCAP_VXRS_EXT2;
216 if (test_facility(152))
217 elf_hwcap |= HWCAP_VXRS_PDE;
218 if (test_facility(192))
219 elf_hwcap |= HWCAP_VXRS_PDE2;
220 }
221
222 if (test_facility(150))
223 elf_hwcap |= HWCAP_SORT;
224
225 if (test_facility(151))
226 elf_hwcap |= HWCAP_DFLT;
227
228 if (test_facility(165))
229 elf_hwcap |= HWCAP_NNPA;
230
231 /* guarded storage */
232 if (MACHINE_HAS_GS)
233 elf_hwcap |= HWCAP_GS;
234
235 if (MACHINE_HAS_PCI_MIO)
236 elf_hwcap |= HWCAP_PCI_MIO;
237
238 /* virtualization support */
239 if (sclp.has_sief2)
240 elf_hwcap |= HWCAP_SIE;
241
242 return 0;
243}
244arch_initcall(setup_hwcaps);
245
246static int __init setup_elf_platform(void)
247{
248 struct cpuid cpu_id;
249
250 get_cpu_id(&cpu_id);
251 add_device_randomness(&cpu_id, sizeof(cpu_id));
252 switch (cpu_id.machine) {
253 default: /* Use "z10" as default. */
254 strcpy(elf_platform, "z10");
255 break;
256 case 0x2817:
257 case 0x2818:
258 strcpy(elf_platform, "z196");
259 break;
260 case 0x2827:
261 case 0x2828:
262 strcpy(elf_platform, "zEC12");
263 break;
264 case 0x2964:
265 case 0x2965:
266 strcpy(elf_platform, "z13");
267 break;
268 case 0x3906:
269 case 0x3907:
270 strcpy(elf_platform, "z14");
271 break;
272 case 0x8561:
273 case 0x8562:
274 strcpy(elf_platform, "z15");
275 break;
276 case 0x3931:
277 case 0x3932:
278 strcpy(elf_platform, "z16");
279 break;
280 }
281 return 0;
282}
283arch_initcall(setup_elf_platform);
284
285static void show_cpu_topology(struct seq_file *m, unsigned long n)
286{
287#ifdef CONFIG_SCHED_TOPOLOGY
288 seq_printf(m, "physical id : %d\n", topology_physical_package_id(n));
289 seq_printf(m, "core id : %d\n", topology_core_id(n));
290 seq_printf(m, "book id : %d\n", topology_book_id(n));
291 seq_printf(m, "drawer id : %d\n", topology_drawer_id(n));
292 seq_printf(m, "dedicated : %d\n", topology_cpu_dedicated(n));
293 seq_printf(m, "address : %d\n", smp_cpu_get_cpu_address(n));
294 seq_printf(m, "siblings : %d\n", cpumask_weight(topology_core_cpumask(n)));
295 seq_printf(m, "cpu cores : %d\n", topology_booted_cores(n));
296#endif /* CONFIG_SCHED_TOPOLOGY */
297}
298
299static void show_cpu_ids(struct seq_file *m, unsigned long n)
300{
301 struct cpuid *id = &per_cpu(cpu_info.cpu_id, n);
302
303 seq_printf(m, "version : %02X\n", id->version);
304 seq_printf(m, "identification : %06X\n", id->ident);
305 seq_printf(m, "machine : %04X\n", id->machine);
306}
307
308static void show_cpu_mhz(struct seq_file *m, unsigned long n)
309{
310 struct cpu_info *c = per_cpu_ptr(&cpu_info, n);
311
312 if (!machine_has_cpu_mhz)
313 return;
314 seq_printf(m, "cpu MHz dynamic : %d\n", c->cpu_mhz_dynamic);
315 seq_printf(m, "cpu MHz static : %d\n", c->cpu_mhz_static);
316}
317
318/*
319 * show_cpuinfo - Get information on one CPU for use by procfs.
320 */
321static int show_cpuinfo(struct seq_file *m, void *v)
322{
323 unsigned long n = (unsigned long) v - 1;
324 unsigned long first = cpumask_first(cpu_online_mask);
325
326 if (n == first)
327 show_cpu_summary(m, v);
328 seq_printf(m, "\ncpu number : %ld\n", n);
329 show_cpu_topology(m, n);
330 show_cpu_ids(m, n);
331 show_cpu_mhz(m, n);
332 return 0;
333}
334
335static inline void *c_update(loff_t *pos)
336{
337 if (*pos)
338 *pos = cpumask_next(*pos - 1, cpu_online_mask);
339 else
340 *pos = cpumask_first(cpu_online_mask);
341 return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
342}
343
344static void *c_start(struct seq_file *m, loff_t *pos)
345{
346 cpus_read_lock();
347 return c_update(pos);
348}
349
350static void *c_next(struct seq_file *m, void *v, loff_t *pos)
351{
352 ++*pos;
353 return c_update(pos);
354}
355
356static void c_stop(struct seq_file *m, void *v)
357{
358 cpus_read_unlock();
359}
360
361const struct seq_operations cpuinfo_op = {
362 .start = c_start,
363 .next = c_next,
364 .stop = c_stop,
365 .show = show_cpuinfo,
366};
367
368int s390_isolate_bp(void)
369{
370 if (!test_facility(82))
371 return -EOPNOTSUPP;
372 set_thread_flag(TIF_ISOLATE_BP);
373 return 0;
374}
375EXPORT_SYMBOL(s390_isolate_bp);
376
377int s390_isolate_bp_guest(void)
378{
379 if (!test_facility(82))
380 return -EOPNOTSUPP;
381 set_thread_flag(TIF_ISOLATE_BP_GUEST);
382 return 0;
383}
384EXPORT_SYMBOL(s390_isolate_bp_guest);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2008
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5 */
6
7#define KMSG_COMPONENT "cpu"
8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10#include <linux/stop_machine.h>
11#include <linux/cpufeature.h>
12#include <linux/bitops.h>
13#include <linux/kernel.h>
14#include <linux/sched/mm.h>
15#include <linux/init.h>
16#include <linux/seq_file.h>
17#include <linux/mm_types.h>
18#include <linux/delay.h>
19#include <linux/cpu.h>
20
21#include <asm/diag.h>
22#include <asm/facility.h>
23#include <asm/elf.h>
24#include <asm/lowcore.h>
25#include <asm/param.h>
26#include <asm/smp.h>
27
28struct cpu_info {
29 unsigned int cpu_mhz_dynamic;
30 unsigned int cpu_mhz_static;
31 struct cpuid cpu_id;
32};
33
34static DEFINE_PER_CPU(struct cpu_info, cpu_info);
35static DEFINE_PER_CPU(int, cpu_relax_retry);
36
37static bool machine_has_cpu_mhz;
38
39void __init cpu_detect_mhz_feature(void)
40{
41 if (test_facility(34) && __ecag(ECAG_CPU_ATTRIBUTE, 0) != -1UL)
42 machine_has_cpu_mhz = true;
43}
44
45static void update_cpu_mhz(void *arg)
46{
47 unsigned long mhz;
48 struct cpu_info *c;
49
50 mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
51 c = this_cpu_ptr(&cpu_info);
52 c->cpu_mhz_dynamic = mhz >> 32;
53 c->cpu_mhz_static = mhz & 0xffffffff;
54}
55
56void s390_update_cpu_mhz(void)
57{
58 s390_adjust_jiffies();
59 if (machine_has_cpu_mhz)
60 on_each_cpu(update_cpu_mhz, NULL, 0);
61}
62
63void notrace stop_machine_yield(const struct cpumask *cpumask)
64{
65 int cpu, this_cpu;
66
67 this_cpu = smp_processor_id();
68 if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) {
69 __this_cpu_write(cpu_relax_retry, 0);
70 cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false);
71 if (cpu >= nr_cpu_ids)
72 return;
73 if (arch_vcpu_is_preempted(cpu))
74 smp_yield_cpu(cpu);
75 }
76}
77
78/*
79 * cpu_init - initializes state that is per-CPU.
80 */
81void cpu_init(void)
82{
83 struct cpuid *id = this_cpu_ptr(&cpu_info.cpu_id);
84
85 get_cpu_id(id);
86 if (machine_has_cpu_mhz)
87 update_cpu_mhz(NULL);
88 mmgrab(&init_mm);
89 current->active_mm = &init_mm;
90 BUG_ON(current->mm);
91 enter_lazy_tlb(&init_mm, current);
92}
93
94/*
95 * cpu_have_feature - Test CPU features on module initialization
96 */
97int cpu_have_feature(unsigned int num)
98{
99 return elf_hwcap & (1UL << num);
100}
101EXPORT_SYMBOL(cpu_have_feature);
102
103static void show_facilities(struct seq_file *m)
104{
105 unsigned int bit;
106
107 seq_puts(m, "facilities :");
108 for_each_set_bit_inv(bit, (long *)&stfle_fac_list, MAX_FACILITY_BIT)
109 seq_printf(m, " %d", bit);
110 seq_putc(m, '\n');
111}
112
113static void show_cpu_summary(struct seq_file *m, void *v)
114{
115 static const char *hwcap_str[] = {
116 "esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
117 "edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe", "gs",
118 "vxe2", "vxp", "sort", "dflt"
119 };
120 static const char * const int_hwcap_str[] = {
121 "sie"
122 };
123 int i, cpu;
124
125 seq_printf(m, "vendor_id : IBM/S390\n"
126 "# processors : %i\n"
127 "bogomips per cpu: %lu.%02lu\n",
128 num_online_cpus(), loops_per_jiffy/(500000/HZ),
129 (loops_per_jiffy/(5000/HZ))%100);
130 seq_printf(m, "max thread id : %d\n", smp_cpu_mtid);
131 seq_puts(m, "features\t: ");
132 for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
133 if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
134 seq_printf(m, "%s ", hwcap_str[i]);
135 for (i = 0; i < ARRAY_SIZE(int_hwcap_str); i++)
136 if (int_hwcap_str[i] && (int_hwcap & (1UL << i)))
137 seq_printf(m, "%s ", int_hwcap_str[i]);
138 seq_puts(m, "\n");
139 show_facilities(m);
140 show_cacheinfo(m);
141 for_each_online_cpu(cpu) {
142 struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
143
144 seq_printf(m, "processor %d: "
145 "version = %02X, "
146 "identification = %06X, "
147 "machine = %04X\n",
148 cpu, id->version, id->ident, id->machine);
149 }
150}
151
152static void show_cpu_topology(struct seq_file *m, unsigned long n)
153{
154#ifdef CONFIG_SCHED_TOPOLOGY
155 seq_printf(m, "physical id : %d\n", topology_physical_package_id(n));
156 seq_printf(m, "core id : %d\n", topology_core_id(n));
157 seq_printf(m, "book id : %d\n", topology_book_id(n));
158 seq_printf(m, "drawer id : %d\n", topology_drawer_id(n));
159 seq_printf(m, "dedicated : %d\n", topology_cpu_dedicated(n));
160 seq_printf(m, "address : %d\n", smp_cpu_get_cpu_address(n));
161 seq_printf(m, "siblings : %d\n", cpumask_weight(topology_core_cpumask(n)));
162 seq_printf(m, "cpu cores : %d\n", topology_booted_cores(n));
163#endif /* CONFIG_SCHED_TOPOLOGY */
164}
165
166static void show_cpu_ids(struct seq_file *m, unsigned long n)
167{
168 struct cpuid *id = &per_cpu(cpu_info.cpu_id, n);
169
170 seq_printf(m, "version : %02X\n", id->version);
171 seq_printf(m, "identification : %06X\n", id->ident);
172 seq_printf(m, "machine : %04X\n", id->machine);
173}
174
175static void show_cpu_mhz(struct seq_file *m, unsigned long n)
176{
177 struct cpu_info *c = per_cpu_ptr(&cpu_info, n);
178
179 if (!machine_has_cpu_mhz)
180 return;
181 seq_printf(m, "cpu MHz dynamic : %d\n", c->cpu_mhz_dynamic);
182 seq_printf(m, "cpu MHz static : %d\n", c->cpu_mhz_static);
183}
184
185/*
186 * show_cpuinfo - Get information on one CPU for use by procfs.
187 */
188static int show_cpuinfo(struct seq_file *m, void *v)
189{
190 unsigned long n = (unsigned long) v - 1;
191 unsigned long first = cpumask_first(cpu_online_mask);
192
193 if (n == first)
194 show_cpu_summary(m, v);
195 seq_printf(m, "\ncpu number : %ld\n", n);
196 show_cpu_topology(m, n);
197 show_cpu_ids(m, n);
198 show_cpu_mhz(m, n);
199 return 0;
200}
201
202static inline void *c_update(loff_t *pos)
203{
204 if (*pos)
205 *pos = cpumask_next(*pos - 1, cpu_online_mask);
206 else
207 *pos = cpumask_first(cpu_online_mask);
208 return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
209}
210
211static void *c_start(struct seq_file *m, loff_t *pos)
212{
213 get_online_cpus();
214 return c_update(pos);
215}
216
217static void *c_next(struct seq_file *m, void *v, loff_t *pos)
218{
219 ++*pos;
220 return c_update(pos);
221}
222
223static void c_stop(struct seq_file *m, void *v)
224{
225 put_online_cpus();
226}
227
228const struct seq_operations cpuinfo_op = {
229 .start = c_start,
230 .next = c_next,
231 .stop = c_stop,
232 .show = show_cpuinfo,
233};
234
235int s390_isolate_bp(void)
236{
237 if (!test_facility(82))
238 return -EOPNOTSUPP;
239 set_thread_flag(TIF_ISOLATE_BP);
240 return 0;
241}
242EXPORT_SYMBOL(s390_isolate_bp);
243
244int s390_isolate_bp_guest(void)
245{
246 if (!test_facility(82))
247 return -EOPNOTSUPP;
248 set_thread_flag(TIF_ISOLATE_BP_GUEST);
249 return 0;
250}
251EXPORT_SYMBOL(s390_isolate_bp_guest);