Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Copyright IBM Corp. 2008
  4 *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  5 */
  6
  7#define KMSG_COMPONENT "cpu"
  8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  9
 10#include <linux/stop_machine.h>
 11#include <linux/bitops.h>
 12#include <linux/kernel.h>
 13#include <linux/random.h>
 14#include <linux/sched/mm.h>
 15#include <linux/init.h>
 16#include <linux/seq_file.h>
 17#include <linux/mm_types.h>
 18#include <linux/delay.h>
 19#include <linux/cpu.h>
 20#include <linux/smp.h>
 21#include <asm/text-patching.h>
 22#include <asm/diag.h>
 23#include <asm/facility.h>
 24#include <asm/elf.h>
 25#include <asm/lowcore.h>
 26#include <asm/param.h>
 27#include <asm/sclp.h>
 28#include <asm/smp.h>
 29
 30unsigned long __read_mostly elf_hwcap;
 31char elf_platform[ELF_PLATFORM_SIZE];
 32
 33struct cpu_info {
 34	unsigned int cpu_mhz_dynamic;
 35	unsigned int cpu_mhz_static;
 36	struct cpuid cpu_id;
 37};
 38
 39static DEFINE_PER_CPU(struct cpu_info, cpu_info);
 40static DEFINE_PER_CPU(int, cpu_relax_retry);
 41
 42static bool machine_has_cpu_mhz;
 43
 44void __init cpu_detect_mhz_feature(void)
 45{
 46	if (test_facility(34) && __ecag(ECAG_CPU_ATTRIBUTE, 0) != -1UL)
 47		machine_has_cpu_mhz = true;
 48}
 49
 50static void update_cpu_mhz(void *arg)
 51{
 52	unsigned long mhz;
 53	struct cpu_info *c;
 54
 55	mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
 56	c = this_cpu_ptr(&cpu_info);
 57	c->cpu_mhz_dynamic = mhz >> 32;
 58	c->cpu_mhz_static = mhz & 0xffffffff;
 59}
 60
 61void s390_update_cpu_mhz(void)
 62{
 63	s390_adjust_jiffies();
 64	if (machine_has_cpu_mhz)
 65		on_each_cpu(update_cpu_mhz, NULL, 0);
 66}
 67
 68void notrace stop_machine_yield(const struct cpumask *cpumask)
 69{
 70	int cpu, this_cpu;
 71
 72	this_cpu = smp_processor_id();
 73	if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) {
 74		__this_cpu_write(cpu_relax_retry, 0);
 75		cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false);
 76		if (cpu >= nr_cpu_ids)
 77			return;
 78		if (arch_vcpu_is_preempted(cpu))
 79			smp_yield_cpu(cpu);
 80	}
 
 81}
 82
 83static void do_sync_core(void *info)
 84{
 85	sync_core();
 86}
 87
 88void text_poke_sync(void)
 89{
 90	on_each_cpu(do_sync_core, NULL, 1);
 91}
 92
 93void text_poke_sync_lock(void)
 94{
 95	cpus_read_lock();
 96	text_poke_sync();
 97	cpus_read_unlock();
 98}
 99
100/*
101 * cpu_init - initializes state that is per-CPU.
102 */
103void cpu_init(void)
104{
105	struct cpuid *id = this_cpu_ptr(&cpu_info.cpu_id);
106
107	get_cpu_id(id);
108	if (machine_has_cpu_mhz)
109		update_cpu_mhz(NULL);
110	mmgrab(&init_mm);
111	current->active_mm = &init_mm;
112	BUG_ON(current->mm);
113	enter_lazy_tlb(&init_mm, current);
114}
115
 
 
 
 
 
 
 
 
 
116static void show_facilities(struct seq_file *m)
117{
118	unsigned int bit;
 
119
 
120	seq_puts(m, "facilities      :");
121	for_each_set_bit_inv(bit, (long *)&stfle_fac_list, MAX_FACILITY_BIT)
122		seq_printf(m, " %d", bit);
123	seq_putc(m, '\n');
124}
125
126static void show_cpu_summary(struct seq_file *m, void *v)
127{
128	static const char *hwcap_str[] = {
129		[HWCAP_NR_ESAN3]	= "esan3",
130		[HWCAP_NR_ZARCH]	= "zarch",
131		[HWCAP_NR_STFLE]	= "stfle",
132		[HWCAP_NR_MSA]		= "msa",
133		[HWCAP_NR_LDISP]	= "ldisp",
134		[HWCAP_NR_EIMM]		= "eimm",
135		[HWCAP_NR_DFP]		= "dfp",
136		[HWCAP_NR_HPAGE]	= "edat",
137		[HWCAP_NR_ETF3EH]	= "etf3eh",
138		[HWCAP_NR_HIGH_GPRS]	= "highgprs",
139		[HWCAP_NR_TE]		= "te",
140		[HWCAP_NR_VXRS]		= "vx",
141		[HWCAP_NR_VXRS_BCD]	= "vxd",
142		[HWCAP_NR_VXRS_EXT]	= "vxe",
143		[HWCAP_NR_GS]		= "gs",
144		[HWCAP_NR_VXRS_EXT2]	= "vxe2",
145		[HWCAP_NR_VXRS_PDE]	= "vxp",
146		[HWCAP_NR_SORT]		= "sort",
147		[HWCAP_NR_DFLT]		= "dflt",
148		[HWCAP_NR_VXRS_PDE2]	= "vxp2",
149		[HWCAP_NR_NNPA]		= "nnpa",
150		[HWCAP_NR_PCI_MIO]	= "pcimio",
151		[HWCAP_NR_SIE]		= "sie",
152	};
153	int i, cpu;
154
155	BUILD_BUG_ON(ARRAY_SIZE(hwcap_str) != HWCAP_NR_MAX);
156	seq_printf(m, "vendor_id       : IBM/S390\n"
157		   "# processors    : %i\n"
158		   "bogomips per cpu: %lu.%02lu\n",
159		   num_online_cpus(), loops_per_jiffy/(500000/HZ),
160		   (loops_per_jiffy/(5000/HZ))%100);
161	seq_printf(m, "max thread id   : %d\n", smp_cpu_mtid);
162	seq_puts(m, "features\t: ");
163	for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
164		if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
165			seq_printf(m, "%s ", hwcap_str[i]);
 
 
 
166	seq_puts(m, "\n");
167	show_facilities(m);
168	show_cacheinfo(m);
169	for_each_online_cpu(cpu) {
170		struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
171
172		seq_printf(m, "processor %d: "
173			   "version = %02X,  "
174			   "identification = %06X,  "
175			   "machine = %04X\n",
176			   cpu, id->version, id->ident, id->machine);
177	}
178}
179
180static int __init setup_hwcaps(void)
181{
182	/* instructions named N3, "backported" to esa-mode */
183	elf_hwcap |= HWCAP_ESAN3;
184
185	/* z/Architecture mode active */
186	elf_hwcap |= HWCAP_ZARCH;
187
188	/* store-facility-list-extended */
189	if (test_facility(7))
190		elf_hwcap |= HWCAP_STFLE;
191
192	/* message-security assist */
193	if (test_facility(17))
194		elf_hwcap |= HWCAP_MSA;
195
196	/* long-displacement */
197	if (test_facility(19))
198		elf_hwcap |= HWCAP_LDISP;
199
200	/* extended-immediate */
201	elf_hwcap |= HWCAP_EIMM;
202
203	/* extended-translation facility 3 enhancement */
204	if (test_facility(22) && test_facility(30))
205		elf_hwcap |= HWCAP_ETF3EH;
206
207	/* decimal floating point & perform floating point operation */
208	if (test_facility(42) && test_facility(44))
209		elf_hwcap |= HWCAP_DFP;
210
211	/* huge page support */
212	if (MACHINE_HAS_EDAT1)
213		elf_hwcap |= HWCAP_HPAGE;
214
215	/* 64-bit register support for 31-bit processes */
216	elf_hwcap |= HWCAP_HIGH_GPRS;
217
218	/* transactional execution */
219	if (MACHINE_HAS_TE)
220		elf_hwcap |= HWCAP_TE;
221
222	/* vector */
223	if (test_facility(129)) {
224		elf_hwcap |= HWCAP_VXRS;
225		if (test_facility(134))
226			elf_hwcap |= HWCAP_VXRS_BCD;
227		if (test_facility(135))
228			elf_hwcap |= HWCAP_VXRS_EXT;
229		if (test_facility(148))
230			elf_hwcap |= HWCAP_VXRS_EXT2;
231		if (test_facility(152))
232			elf_hwcap |= HWCAP_VXRS_PDE;
233		if (test_facility(192))
234			elf_hwcap |= HWCAP_VXRS_PDE2;
235	}
236
237	if (test_facility(150))
238		elf_hwcap |= HWCAP_SORT;
239
240	if (test_facility(151))
241		elf_hwcap |= HWCAP_DFLT;
242
243	if (test_facility(165))
244		elf_hwcap |= HWCAP_NNPA;
245
246	/* guarded storage */
247	if (MACHINE_HAS_GS)
248		elf_hwcap |= HWCAP_GS;
249
250	if (MACHINE_HAS_PCI_MIO)
251		elf_hwcap |= HWCAP_PCI_MIO;
252
253	/* virtualization support */
254	if (sclp.has_sief2)
255		elf_hwcap |= HWCAP_SIE;
256
257	return 0;
258}
259arch_initcall(setup_hwcaps);
260
261static int __init setup_elf_platform(void)
262{
263	struct cpuid cpu_id;
264
265	get_cpu_id(&cpu_id);
266	add_device_randomness(&cpu_id, sizeof(cpu_id));
267	switch (cpu_id.machine) {
268	default:	/* Use "z10" as default. */
269		strcpy(elf_platform, "z10");
270		break;
271	case 0x2817:
272	case 0x2818:
273		strcpy(elf_platform, "z196");
274		break;
275	case 0x2827:
276	case 0x2828:
277		strcpy(elf_platform, "zEC12");
278		break;
279	case 0x2964:
280	case 0x2965:
281		strcpy(elf_platform, "z13");
282		break;
283	case 0x3906:
284	case 0x3907:
285		strcpy(elf_platform, "z14");
286		break;
287	case 0x8561:
288	case 0x8562:
289		strcpy(elf_platform, "z15");
290		break;
291	case 0x3931:
292	case 0x3932:
293		strcpy(elf_platform, "z16");
294		break;
295	}
296	return 0;
297}
298arch_initcall(setup_elf_platform);
299
300static void show_cpu_topology(struct seq_file *m, unsigned long n)
301{
302#ifdef CONFIG_SCHED_TOPOLOGY
303	seq_printf(m, "physical id     : %d\n", topology_physical_package_id(n));
304	seq_printf(m, "core id         : %d\n", topology_core_id(n));
305	seq_printf(m, "book id         : %d\n", topology_book_id(n));
306	seq_printf(m, "drawer id       : %d\n", topology_drawer_id(n));
307	seq_printf(m, "dedicated       : %d\n", topology_cpu_dedicated(n));
308	seq_printf(m, "address         : %d\n", smp_cpu_get_cpu_address(n));
309	seq_printf(m, "siblings        : %d\n", cpumask_weight(topology_core_cpumask(n)));
310	seq_printf(m, "cpu cores       : %d\n", topology_booted_cores(n));
311#endif /* CONFIG_SCHED_TOPOLOGY */
312}
313
314static void show_cpu_ids(struct seq_file *m, unsigned long n)
315{
316	struct cpuid *id = &per_cpu(cpu_info.cpu_id, n);
317
318	seq_printf(m, "version         : %02X\n", id->version);
319	seq_printf(m, "identification  : %06X\n", id->ident);
320	seq_printf(m, "machine         : %04X\n", id->machine);
321}
322
323static void show_cpu_mhz(struct seq_file *m, unsigned long n)
324{
325	struct cpu_info *c = per_cpu_ptr(&cpu_info, n);
326
327	if (!machine_has_cpu_mhz)
328		return;
329	seq_printf(m, "cpu MHz dynamic : %d\n", c->cpu_mhz_dynamic);
330	seq_printf(m, "cpu MHz static  : %d\n", c->cpu_mhz_static);
331}
332
333/*
334 * show_cpuinfo - Get information on one CPU for use by procfs.
335 */
336static int show_cpuinfo(struct seq_file *m, void *v)
337{
338	unsigned long n = (unsigned long) v - 1;
339	unsigned long first = cpumask_first(cpu_online_mask);
340
341	if (n == first)
342		show_cpu_summary(m, v);
 
 
343	seq_printf(m, "\ncpu number      : %ld\n", n);
344	show_cpu_topology(m, n);
345	show_cpu_ids(m, n);
346	show_cpu_mhz(m, n);
347	return 0;
348}
349
350static inline void *c_update(loff_t *pos)
351{
352	if (*pos)
353		*pos = cpumask_next(*pos - 1, cpu_online_mask);
354	else
355		*pos = cpumask_first(cpu_online_mask);
356	return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
357}
358
359static void *c_start(struct seq_file *m, loff_t *pos)
360{
361	cpus_read_lock();
362	return c_update(pos);
363}
364
365static void *c_next(struct seq_file *m, void *v, loff_t *pos)
366{
367	++*pos;
368	return c_update(pos);
369}
370
371static void c_stop(struct seq_file *m, void *v)
372{
373	cpus_read_unlock();
374}
375
376const struct seq_operations cpuinfo_op = {
377	.start	= c_start,
378	.next	= c_next,
379	.stop	= c_stop,
380	.show	= show_cpuinfo,
381};
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  Copyright IBM Corp. 2008
  4 *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
  5 */
  6
  7#define KMSG_COMPONENT "cpu"
  8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  9
 10#include <linux/cpufeature.h>
 11#include <linux/bitops.h>
 12#include <linux/kernel.h>
 
 13#include <linux/sched/mm.h>
 14#include <linux/init.h>
 15#include <linux/seq_file.h>
 16#include <linux/mm_types.h>
 17#include <linux/delay.h>
 18#include <linux/cpu.h>
 19
 
 20#include <asm/diag.h>
 21#include <asm/facility.h>
 22#include <asm/elf.h>
 23#include <asm/lowcore.h>
 24#include <asm/param.h>
 
 25#include <asm/smp.h>
 26
 
 
 
 27struct cpu_info {
 28	unsigned int cpu_mhz_dynamic;
 29	unsigned int cpu_mhz_static;
 30	struct cpuid cpu_id;
 31};
 32
 33static DEFINE_PER_CPU(struct cpu_info, cpu_info);
 
 34
 35static bool machine_has_cpu_mhz;
 36
 37void __init cpu_detect_mhz_feature(void)
 38{
 39	if (test_facility(34) && __ecag(ECAG_CPU_ATTRIBUTE, 0) != -1UL)
 40		machine_has_cpu_mhz = true;
 41}
 42
 43static void update_cpu_mhz(void *arg)
 44{
 45	unsigned long mhz;
 46	struct cpu_info *c;
 47
 48	mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
 49	c = this_cpu_ptr(&cpu_info);
 50	c->cpu_mhz_dynamic = mhz >> 32;
 51	c->cpu_mhz_static = mhz & 0xffffffff;
 52}
 53
 54void s390_update_cpu_mhz(void)
 55{
 56	s390_adjust_jiffies();
 57	if (machine_has_cpu_mhz)
 58		on_each_cpu(update_cpu_mhz, NULL, 0);
 59}
 60
 61void notrace cpu_relax_yield(void)
 62{
 63	if (!smp_cpu_mtid && MACHINE_HAS_DIAG44) {
 64		diag_stat_inc(DIAG_STAT_X044);
 65		asm volatile("diag 0,0,0x44");
 
 
 
 
 
 
 
 66	}
 67	barrier();
 68}
 69EXPORT_SYMBOL(cpu_relax_yield);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 70
 71/*
 72 * cpu_init - initializes state that is per-CPU.
 73 */
 74void cpu_init(void)
 75{
 76	struct cpuid *id = this_cpu_ptr(&cpu_info.cpu_id);
 77
 78	get_cpu_id(id);
 79	if (machine_has_cpu_mhz)
 80		update_cpu_mhz(NULL);
 81	mmgrab(&init_mm);
 82	current->active_mm = &init_mm;
 83	BUG_ON(current->mm);
 84	enter_lazy_tlb(&init_mm, current);
 85}
 86
 87/*
 88 * cpu_have_feature - Test CPU features on module initialization
 89 */
 90int cpu_have_feature(unsigned int num)
 91{
 92	return elf_hwcap & (1UL << num);
 93}
 94EXPORT_SYMBOL(cpu_have_feature);
 95
 96static void show_facilities(struct seq_file *m)
 97{
 98	unsigned int bit;
 99	long *facilities;
100
101	facilities = (long *)&S390_lowcore.stfle_fac_list;
102	seq_puts(m, "facilities      :");
103	for_each_set_bit_inv(bit, facilities, MAX_FACILITY_BIT)
104		seq_printf(m, " %d", bit);
105	seq_putc(m, '\n');
106}
107
108static void show_cpu_summary(struct seq_file *m, void *v)
109{
110	static const char *hwcap_str[] = {
111		"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
112		"edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe", "gs"
113	};
114	static const char * const int_hwcap_str[] = {
115		"sie"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116	};
117	int i, cpu;
118
 
119	seq_printf(m, "vendor_id       : IBM/S390\n"
120		   "# processors    : %i\n"
121		   "bogomips per cpu: %lu.%02lu\n",
122		   num_online_cpus(), loops_per_jiffy/(500000/HZ),
123		   (loops_per_jiffy/(5000/HZ))%100);
124	seq_printf(m, "max thread id   : %d\n", smp_cpu_mtid);
125	seq_puts(m, "features\t: ");
126	for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
127		if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
128			seq_printf(m, "%s ", hwcap_str[i]);
129	for (i = 0; i < ARRAY_SIZE(int_hwcap_str); i++)
130		if (int_hwcap_str[i] && (int_hwcap & (1UL << i)))
131			seq_printf(m, "%s ", int_hwcap_str[i]);
132	seq_puts(m, "\n");
133	show_facilities(m);
134	show_cacheinfo(m);
135	for_each_online_cpu(cpu) {
136		struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
137
138		seq_printf(m, "processor %d: "
139			   "version = %02X,  "
140			   "identification = %06X,  "
141			   "machine = %04X\n",
142			   cpu, id->version, id->ident, id->machine);
143	}
144}
145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146static void show_cpu_mhz(struct seq_file *m, unsigned long n)
147{
148	struct cpu_info *c = per_cpu_ptr(&cpu_info, n);
149
 
 
150	seq_printf(m, "cpu MHz dynamic : %d\n", c->cpu_mhz_dynamic);
151	seq_printf(m, "cpu MHz static  : %d\n", c->cpu_mhz_static);
152}
153
154/*
155 * show_cpuinfo - Get information on one CPU for use by procfs.
156 */
157static int show_cpuinfo(struct seq_file *m, void *v)
158{
159	unsigned long n = (unsigned long) v - 1;
 
160
161	if (!n)
162		show_cpu_summary(m, v);
163	if (!machine_has_cpu_mhz)
164		return 0;
165	seq_printf(m, "\ncpu number      : %ld\n", n);
 
 
166	show_cpu_mhz(m, n);
167	return 0;
168}
169
170static inline void *c_update(loff_t *pos)
171{
172	if (*pos)
173		*pos = cpumask_next(*pos - 1, cpu_online_mask);
 
 
174	return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
175}
176
177static void *c_start(struct seq_file *m, loff_t *pos)
178{
179	get_online_cpus();
180	return c_update(pos);
181}
182
183static void *c_next(struct seq_file *m, void *v, loff_t *pos)
184{
185	++*pos;
186	return c_update(pos);
187}
188
189static void c_stop(struct seq_file *m, void *v)
190{
191	put_online_cpus();
192}
193
194const struct seq_operations cpuinfo_op = {
195	.start	= c_start,
196	.next	= c_next,
197	.stop	= c_stop,
198	.show	= show_cpuinfo,
199};
200
201int s390_isolate_bp(void)
202{
203	if (!test_facility(82))
204		return -EOPNOTSUPP;
205	set_thread_flag(TIF_ISOLATE_BP);
206	return 0;
207}
208EXPORT_SYMBOL(s390_isolate_bp);
209
210int s390_isolate_bp_guest(void)
211{
212	if (!test_facility(82))
213		return -EOPNOTSUPP;
214	set_thread_flag(TIF_ISOLATE_BP_GUEST);
215	return 0;
216}
217EXPORT_SYMBOL(s390_isolate_bp_guest);