Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2008
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5 */
6
7#define KMSG_COMPONENT "cpu"
8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10#include <linux/stop_machine.h>
11#include <linux/bitops.h>
12#include <linux/kernel.h>
13#include <linux/random.h>
14#include <linux/sched/mm.h>
15#include <linux/init.h>
16#include <linux/seq_file.h>
17#include <linux/mm_types.h>
18#include <linux/delay.h>
19#include <linux/cpu.h>
20#include <linux/smp.h>
21#include <asm/text-patching.h>
22#include <asm/diag.h>
23#include <asm/facility.h>
24#include <asm/elf.h>
25#include <asm/lowcore.h>
26#include <asm/param.h>
27#include <asm/sclp.h>
28#include <asm/smp.h>
29
30unsigned long __read_mostly elf_hwcap;
31char elf_platform[ELF_PLATFORM_SIZE];
32
33struct cpu_info {
34 unsigned int cpu_mhz_dynamic;
35 unsigned int cpu_mhz_static;
36 struct cpuid cpu_id;
37};
38
39static DEFINE_PER_CPU(struct cpu_info, cpu_info);
40static DEFINE_PER_CPU(int, cpu_relax_retry);
41
42static bool machine_has_cpu_mhz;
43
44void __init cpu_detect_mhz_feature(void)
45{
46 if (test_facility(34) && __ecag(ECAG_CPU_ATTRIBUTE, 0) != -1UL)
47 machine_has_cpu_mhz = true;
48}
49
50static void update_cpu_mhz(void *arg)
51{
52 unsigned long mhz;
53 struct cpu_info *c;
54
55 mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
56 c = this_cpu_ptr(&cpu_info);
57 c->cpu_mhz_dynamic = mhz >> 32;
58 c->cpu_mhz_static = mhz & 0xffffffff;
59}
60
61void s390_update_cpu_mhz(void)
62{
63 s390_adjust_jiffies();
64 if (machine_has_cpu_mhz)
65 on_each_cpu(update_cpu_mhz, NULL, 0);
66}
67
68void notrace stop_machine_yield(const struct cpumask *cpumask)
69{
70 int cpu, this_cpu;
71
72 this_cpu = smp_processor_id();
73 if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) {
74 __this_cpu_write(cpu_relax_retry, 0);
75 cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false);
76 if (cpu >= nr_cpu_ids)
77 return;
78 if (arch_vcpu_is_preempted(cpu))
79 smp_yield_cpu(cpu);
80 }
81}
82
83static void do_sync_core(void *info)
84{
85 sync_core();
86}
87
88void text_poke_sync(void)
89{
90 on_each_cpu(do_sync_core, NULL, 1);
91}
92
93void text_poke_sync_lock(void)
94{
95 cpus_read_lock();
96 text_poke_sync();
97 cpus_read_unlock();
98}
99
100/*
101 * cpu_init - initializes state that is per-CPU.
102 */
103void cpu_init(void)
104{
105 struct cpuid *id = this_cpu_ptr(&cpu_info.cpu_id);
106
107 get_cpu_id(id);
108 if (machine_has_cpu_mhz)
109 update_cpu_mhz(NULL);
110 mmgrab(&init_mm);
111 current->active_mm = &init_mm;
112 BUG_ON(current->mm);
113 enter_lazy_tlb(&init_mm, current);
114}
115
116static void show_facilities(struct seq_file *m)
117{
118 unsigned int bit;
119
120 seq_puts(m, "facilities :");
121 for_each_set_bit_inv(bit, (long *)&stfle_fac_list, MAX_FACILITY_BIT)
122 seq_printf(m, " %d", bit);
123 seq_putc(m, '\n');
124}
125
126static void show_cpu_summary(struct seq_file *m, void *v)
127{
128 static const char *hwcap_str[] = {
129 [HWCAP_NR_ESAN3] = "esan3",
130 [HWCAP_NR_ZARCH] = "zarch",
131 [HWCAP_NR_STFLE] = "stfle",
132 [HWCAP_NR_MSA] = "msa",
133 [HWCAP_NR_LDISP] = "ldisp",
134 [HWCAP_NR_EIMM] = "eimm",
135 [HWCAP_NR_DFP] = "dfp",
136 [HWCAP_NR_HPAGE] = "edat",
137 [HWCAP_NR_ETF3EH] = "etf3eh",
138 [HWCAP_NR_HIGH_GPRS] = "highgprs",
139 [HWCAP_NR_TE] = "te",
140 [HWCAP_NR_VXRS] = "vx",
141 [HWCAP_NR_VXRS_BCD] = "vxd",
142 [HWCAP_NR_VXRS_EXT] = "vxe",
143 [HWCAP_NR_GS] = "gs",
144 [HWCAP_NR_VXRS_EXT2] = "vxe2",
145 [HWCAP_NR_VXRS_PDE] = "vxp",
146 [HWCAP_NR_SORT] = "sort",
147 [HWCAP_NR_DFLT] = "dflt",
148 [HWCAP_NR_VXRS_PDE2] = "vxp2",
149 [HWCAP_NR_NNPA] = "nnpa",
150 [HWCAP_NR_PCI_MIO] = "pcimio",
151 [HWCAP_NR_SIE] = "sie",
152 };
153 int i, cpu;
154
155 BUILD_BUG_ON(ARRAY_SIZE(hwcap_str) != HWCAP_NR_MAX);
156 seq_printf(m, "vendor_id : IBM/S390\n"
157 "# processors : %i\n"
158 "bogomips per cpu: %lu.%02lu\n",
159 num_online_cpus(), loops_per_jiffy/(500000/HZ),
160 (loops_per_jiffy/(5000/HZ))%100);
161 seq_printf(m, "max thread id : %d\n", smp_cpu_mtid);
162 seq_puts(m, "features\t: ");
163 for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
164 if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
165 seq_printf(m, "%s ", hwcap_str[i]);
166 seq_puts(m, "\n");
167 show_facilities(m);
168 show_cacheinfo(m);
169 for_each_online_cpu(cpu) {
170 struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
171
172 seq_printf(m, "processor %d: "
173 "version = %02X, "
174 "identification = %06X, "
175 "machine = %04X\n",
176 cpu, id->version, id->ident, id->machine);
177 }
178}
179
180static int __init setup_hwcaps(void)
181{
182 /* instructions named N3, "backported" to esa-mode */
183 elf_hwcap |= HWCAP_ESAN3;
184
185 /* z/Architecture mode active */
186 elf_hwcap |= HWCAP_ZARCH;
187
188 /* store-facility-list-extended */
189 if (test_facility(7))
190 elf_hwcap |= HWCAP_STFLE;
191
192 /* message-security assist */
193 if (test_facility(17))
194 elf_hwcap |= HWCAP_MSA;
195
196 /* long-displacement */
197 if (test_facility(19))
198 elf_hwcap |= HWCAP_LDISP;
199
200 /* extended-immediate */
201 elf_hwcap |= HWCAP_EIMM;
202
203 /* extended-translation facility 3 enhancement */
204 if (test_facility(22) && test_facility(30))
205 elf_hwcap |= HWCAP_ETF3EH;
206
207 /* decimal floating point & perform floating point operation */
208 if (test_facility(42) && test_facility(44))
209 elf_hwcap |= HWCAP_DFP;
210
211 /* huge page support */
212 if (MACHINE_HAS_EDAT1)
213 elf_hwcap |= HWCAP_HPAGE;
214
215 /* 64-bit register support for 31-bit processes */
216 elf_hwcap |= HWCAP_HIGH_GPRS;
217
218 /* transactional execution */
219 if (MACHINE_HAS_TE)
220 elf_hwcap |= HWCAP_TE;
221
222 /* vector */
223 if (test_facility(129)) {
224 elf_hwcap |= HWCAP_VXRS;
225 if (test_facility(134))
226 elf_hwcap |= HWCAP_VXRS_BCD;
227 if (test_facility(135))
228 elf_hwcap |= HWCAP_VXRS_EXT;
229 if (test_facility(148))
230 elf_hwcap |= HWCAP_VXRS_EXT2;
231 if (test_facility(152))
232 elf_hwcap |= HWCAP_VXRS_PDE;
233 if (test_facility(192))
234 elf_hwcap |= HWCAP_VXRS_PDE2;
235 }
236
237 if (test_facility(150))
238 elf_hwcap |= HWCAP_SORT;
239
240 if (test_facility(151))
241 elf_hwcap |= HWCAP_DFLT;
242
243 if (test_facility(165))
244 elf_hwcap |= HWCAP_NNPA;
245
246 /* guarded storage */
247 if (MACHINE_HAS_GS)
248 elf_hwcap |= HWCAP_GS;
249
250 if (MACHINE_HAS_PCI_MIO)
251 elf_hwcap |= HWCAP_PCI_MIO;
252
253 /* virtualization support */
254 if (sclp.has_sief2)
255 elf_hwcap |= HWCAP_SIE;
256
257 return 0;
258}
259arch_initcall(setup_hwcaps);
260
261static int __init setup_elf_platform(void)
262{
263 struct cpuid cpu_id;
264
265 get_cpu_id(&cpu_id);
266 add_device_randomness(&cpu_id, sizeof(cpu_id));
267 switch (cpu_id.machine) {
268 default: /* Use "z10" as default. */
269 strcpy(elf_platform, "z10");
270 break;
271 case 0x2817:
272 case 0x2818:
273 strcpy(elf_platform, "z196");
274 break;
275 case 0x2827:
276 case 0x2828:
277 strcpy(elf_platform, "zEC12");
278 break;
279 case 0x2964:
280 case 0x2965:
281 strcpy(elf_platform, "z13");
282 break;
283 case 0x3906:
284 case 0x3907:
285 strcpy(elf_platform, "z14");
286 break;
287 case 0x8561:
288 case 0x8562:
289 strcpy(elf_platform, "z15");
290 break;
291 case 0x3931:
292 case 0x3932:
293 strcpy(elf_platform, "z16");
294 break;
295 }
296 return 0;
297}
298arch_initcall(setup_elf_platform);
299
300static void show_cpu_topology(struct seq_file *m, unsigned long n)
301{
302#ifdef CONFIG_SCHED_TOPOLOGY
303 seq_printf(m, "physical id : %d\n", topology_physical_package_id(n));
304 seq_printf(m, "core id : %d\n", topology_core_id(n));
305 seq_printf(m, "book id : %d\n", topology_book_id(n));
306 seq_printf(m, "drawer id : %d\n", topology_drawer_id(n));
307 seq_printf(m, "dedicated : %d\n", topology_cpu_dedicated(n));
308 seq_printf(m, "address : %d\n", smp_cpu_get_cpu_address(n));
309 seq_printf(m, "siblings : %d\n", cpumask_weight(topology_core_cpumask(n)));
310 seq_printf(m, "cpu cores : %d\n", topology_booted_cores(n));
311#endif /* CONFIG_SCHED_TOPOLOGY */
312}
313
314static void show_cpu_ids(struct seq_file *m, unsigned long n)
315{
316 struct cpuid *id = &per_cpu(cpu_info.cpu_id, n);
317
318 seq_printf(m, "version : %02X\n", id->version);
319 seq_printf(m, "identification : %06X\n", id->ident);
320 seq_printf(m, "machine : %04X\n", id->machine);
321}
322
323static void show_cpu_mhz(struct seq_file *m, unsigned long n)
324{
325 struct cpu_info *c = per_cpu_ptr(&cpu_info, n);
326
327 if (!machine_has_cpu_mhz)
328 return;
329 seq_printf(m, "cpu MHz dynamic : %d\n", c->cpu_mhz_dynamic);
330 seq_printf(m, "cpu MHz static : %d\n", c->cpu_mhz_static);
331}
332
333/*
334 * show_cpuinfo - Get information on one CPU for use by procfs.
335 */
336static int show_cpuinfo(struct seq_file *m, void *v)
337{
338 unsigned long n = (unsigned long) v - 1;
339 unsigned long first = cpumask_first(cpu_online_mask);
340
341 if (n == first)
342 show_cpu_summary(m, v);
343 seq_printf(m, "\ncpu number : %ld\n", n);
344 show_cpu_topology(m, n);
345 show_cpu_ids(m, n);
346 show_cpu_mhz(m, n);
347 return 0;
348}
349
350static inline void *c_update(loff_t *pos)
351{
352 if (*pos)
353 *pos = cpumask_next(*pos - 1, cpu_online_mask);
354 else
355 *pos = cpumask_first(cpu_online_mask);
356 return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
357}
358
359static void *c_start(struct seq_file *m, loff_t *pos)
360{
361 cpus_read_lock();
362 return c_update(pos);
363}
364
365static void *c_next(struct seq_file *m, void *v, loff_t *pos)
366{
367 ++*pos;
368 return c_update(pos);
369}
370
371static void c_stop(struct seq_file *m, void *v)
372{
373 cpus_read_unlock();
374}
375
376const struct seq_operations cpuinfo_op = {
377 .start = c_start,
378 .next = c_next,
379 .stop = c_stop,
380 .show = show_cpuinfo,
381};
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2008
4 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
5 */
6
7#define KMSG_COMPONENT "cpu"
8#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
9
10#include <linux/stop_machine.h>
11#include <linux/bitops.h>
12#include <linux/kernel.h>
13#include <linux/random.h>
14#include <linux/sched/mm.h>
15#include <linux/init.h>
16#include <linux/seq_file.h>
17#include <linux/mm_types.h>
18#include <linux/delay.h>
19#include <linux/cpu.h>
20
21#include <asm/diag.h>
22#include <asm/facility.h>
23#include <asm/elf.h>
24#include <asm/lowcore.h>
25#include <asm/param.h>
26#include <asm/sclp.h>
27#include <asm/smp.h>
28
29unsigned long __read_mostly elf_hwcap;
30char elf_platform[ELF_PLATFORM_SIZE];
31
32struct cpu_info {
33 unsigned int cpu_mhz_dynamic;
34 unsigned int cpu_mhz_static;
35 struct cpuid cpu_id;
36};
37
38static DEFINE_PER_CPU(struct cpu_info, cpu_info);
39static DEFINE_PER_CPU(int, cpu_relax_retry);
40
41static bool machine_has_cpu_mhz;
42
43void __init cpu_detect_mhz_feature(void)
44{
45 if (test_facility(34) && __ecag(ECAG_CPU_ATTRIBUTE, 0) != -1UL)
46 machine_has_cpu_mhz = true;
47}
48
49static void update_cpu_mhz(void *arg)
50{
51 unsigned long mhz;
52 struct cpu_info *c;
53
54 mhz = __ecag(ECAG_CPU_ATTRIBUTE, 0);
55 c = this_cpu_ptr(&cpu_info);
56 c->cpu_mhz_dynamic = mhz >> 32;
57 c->cpu_mhz_static = mhz & 0xffffffff;
58}
59
60void s390_update_cpu_mhz(void)
61{
62 s390_adjust_jiffies();
63 if (machine_has_cpu_mhz)
64 on_each_cpu(update_cpu_mhz, NULL, 0);
65}
66
67void notrace stop_machine_yield(const struct cpumask *cpumask)
68{
69 int cpu, this_cpu;
70
71 this_cpu = smp_processor_id();
72 if (__this_cpu_inc_return(cpu_relax_retry) >= spin_retry) {
73 __this_cpu_write(cpu_relax_retry, 0);
74 cpu = cpumask_next_wrap(this_cpu, cpumask, this_cpu, false);
75 if (cpu >= nr_cpu_ids)
76 return;
77 if (arch_vcpu_is_preempted(cpu))
78 smp_yield_cpu(cpu);
79 }
80}
81
82/*
83 * cpu_init - initializes state that is per-CPU.
84 */
85void cpu_init(void)
86{
87 struct cpuid *id = this_cpu_ptr(&cpu_info.cpu_id);
88
89 get_cpu_id(id);
90 if (machine_has_cpu_mhz)
91 update_cpu_mhz(NULL);
92 mmgrab(&init_mm);
93 current->active_mm = &init_mm;
94 BUG_ON(current->mm);
95 enter_lazy_tlb(&init_mm, current);
96}
97
98static void show_facilities(struct seq_file *m)
99{
100 unsigned int bit;
101
102 seq_puts(m, "facilities :");
103 for_each_set_bit_inv(bit, (long *)&stfle_fac_list, MAX_FACILITY_BIT)
104 seq_printf(m, " %d", bit);
105 seq_putc(m, '\n');
106}
107
108static void show_cpu_summary(struct seq_file *m, void *v)
109{
110 static const char *hwcap_str[] = {
111 [HWCAP_NR_ESAN3] = "esan3",
112 [HWCAP_NR_ZARCH] = "zarch",
113 [HWCAP_NR_STFLE] = "stfle",
114 [HWCAP_NR_MSA] = "msa",
115 [HWCAP_NR_LDISP] = "ldisp",
116 [HWCAP_NR_EIMM] = "eimm",
117 [HWCAP_NR_DFP] = "dfp",
118 [HWCAP_NR_HPAGE] = "edat",
119 [HWCAP_NR_ETF3EH] = "etf3eh",
120 [HWCAP_NR_HIGH_GPRS] = "highgprs",
121 [HWCAP_NR_TE] = "te",
122 [HWCAP_NR_VXRS] = "vx",
123 [HWCAP_NR_VXRS_BCD] = "vxd",
124 [HWCAP_NR_VXRS_EXT] = "vxe",
125 [HWCAP_NR_GS] = "gs",
126 [HWCAP_NR_VXRS_EXT2] = "vxe2",
127 [HWCAP_NR_VXRS_PDE] = "vxp",
128 [HWCAP_NR_SORT] = "sort",
129 [HWCAP_NR_DFLT] = "dflt",
130 [HWCAP_NR_VXRS_PDE2] = "vxp2",
131 [HWCAP_NR_NNPA] = "nnpa",
132 [HWCAP_NR_PCI_MIO] = "pcimio",
133 [HWCAP_NR_SIE] = "sie",
134 };
135 int i, cpu;
136
137 BUILD_BUG_ON(ARRAY_SIZE(hwcap_str) != HWCAP_NR_MAX);
138 seq_printf(m, "vendor_id : IBM/S390\n"
139 "# processors : %i\n"
140 "bogomips per cpu: %lu.%02lu\n",
141 num_online_cpus(), loops_per_jiffy/(500000/HZ),
142 (loops_per_jiffy/(5000/HZ))%100);
143 seq_printf(m, "max thread id : %d\n", smp_cpu_mtid);
144 seq_puts(m, "features\t: ");
145 for (i = 0; i < ARRAY_SIZE(hwcap_str); i++)
146 if (hwcap_str[i] && (elf_hwcap & (1UL << i)))
147 seq_printf(m, "%s ", hwcap_str[i]);
148 seq_puts(m, "\n");
149 show_facilities(m);
150 show_cacheinfo(m);
151 for_each_online_cpu(cpu) {
152 struct cpuid *id = &per_cpu(cpu_info.cpu_id, cpu);
153
154 seq_printf(m, "processor %d: "
155 "version = %02X, "
156 "identification = %06X, "
157 "machine = %04X\n",
158 cpu, id->version, id->ident, id->machine);
159 }
160}
161
162static int __init setup_hwcaps(void)
163{
164 /* instructions named N3, "backported" to esa-mode */
165 elf_hwcap |= HWCAP_ESAN3;
166
167 /* z/Architecture mode active */
168 elf_hwcap |= HWCAP_ZARCH;
169
170 /* store-facility-list-extended */
171 if (test_facility(7))
172 elf_hwcap |= HWCAP_STFLE;
173
174 /* message-security assist */
175 if (test_facility(17))
176 elf_hwcap |= HWCAP_MSA;
177
178 /* long-displacement */
179 if (test_facility(19))
180 elf_hwcap |= HWCAP_LDISP;
181
182 /* extended-immediate */
183 elf_hwcap |= HWCAP_EIMM;
184
185 /* extended-translation facility 3 enhancement */
186 if (test_facility(22) && test_facility(30))
187 elf_hwcap |= HWCAP_ETF3EH;
188
189 /* decimal floating point & perform floating point operation */
190 if (test_facility(42) && test_facility(44))
191 elf_hwcap |= HWCAP_DFP;
192
193 /* huge page support */
194 if (MACHINE_HAS_EDAT1)
195 elf_hwcap |= HWCAP_HPAGE;
196
197 /* 64-bit register support for 31-bit processes */
198 elf_hwcap |= HWCAP_HIGH_GPRS;
199
200 /* transactional execution */
201 if (MACHINE_HAS_TE)
202 elf_hwcap |= HWCAP_TE;
203
204 /* vector */
205 if (test_facility(129)) {
206 elf_hwcap |= HWCAP_VXRS;
207 if (test_facility(134))
208 elf_hwcap |= HWCAP_VXRS_BCD;
209 if (test_facility(135))
210 elf_hwcap |= HWCAP_VXRS_EXT;
211 if (test_facility(148))
212 elf_hwcap |= HWCAP_VXRS_EXT2;
213 if (test_facility(152))
214 elf_hwcap |= HWCAP_VXRS_PDE;
215 if (test_facility(192))
216 elf_hwcap |= HWCAP_VXRS_PDE2;
217 }
218
219 if (test_facility(150))
220 elf_hwcap |= HWCAP_SORT;
221
222 if (test_facility(151))
223 elf_hwcap |= HWCAP_DFLT;
224
225 if (test_facility(165))
226 elf_hwcap |= HWCAP_NNPA;
227
228 /* guarded storage */
229 if (MACHINE_HAS_GS)
230 elf_hwcap |= HWCAP_GS;
231
232 if (MACHINE_HAS_PCI_MIO)
233 elf_hwcap |= HWCAP_PCI_MIO;
234
235 /* virtualization support */
236 if (sclp.has_sief2)
237 elf_hwcap |= HWCAP_SIE;
238
239 return 0;
240}
241arch_initcall(setup_hwcaps);
242
243static int __init setup_elf_platform(void)
244{
245 struct cpuid cpu_id;
246
247 get_cpu_id(&cpu_id);
248 add_device_randomness(&cpu_id, sizeof(cpu_id));
249 switch (cpu_id.machine) {
250 default: /* Use "z10" as default. */
251 strcpy(elf_platform, "z10");
252 break;
253 case 0x2817:
254 case 0x2818:
255 strcpy(elf_platform, "z196");
256 break;
257 case 0x2827:
258 case 0x2828:
259 strcpy(elf_platform, "zEC12");
260 break;
261 case 0x2964:
262 case 0x2965:
263 strcpy(elf_platform, "z13");
264 break;
265 case 0x3906:
266 case 0x3907:
267 strcpy(elf_platform, "z14");
268 break;
269 case 0x8561:
270 case 0x8562:
271 strcpy(elf_platform, "z15");
272 break;
273 case 0x3931:
274 case 0x3932:
275 strcpy(elf_platform, "z16");
276 break;
277 }
278 return 0;
279}
280arch_initcall(setup_elf_platform);
281
282static void show_cpu_topology(struct seq_file *m, unsigned long n)
283{
284#ifdef CONFIG_SCHED_TOPOLOGY
285 seq_printf(m, "physical id : %d\n", topology_physical_package_id(n));
286 seq_printf(m, "core id : %d\n", topology_core_id(n));
287 seq_printf(m, "book id : %d\n", topology_book_id(n));
288 seq_printf(m, "drawer id : %d\n", topology_drawer_id(n));
289 seq_printf(m, "dedicated : %d\n", topology_cpu_dedicated(n));
290 seq_printf(m, "address : %d\n", smp_cpu_get_cpu_address(n));
291 seq_printf(m, "siblings : %d\n", cpumask_weight(topology_core_cpumask(n)));
292 seq_printf(m, "cpu cores : %d\n", topology_booted_cores(n));
293#endif /* CONFIG_SCHED_TOPOLOGY */
294}
295
296static void show_cpu_ids(struct seq_file *m, unsigned long n)
297{
298 struct cpuid *id = &per_cpu(cpu_info.cpu_id, n);
299
300 seq_printf(m, "version : %02X\n", id->version);
301 seq_printf(m, "identification : %06X\n", id->ident);
302 seq_printf(m, "machine : %04X\n", id->machine);
303}
304
305static void show_cpu_mhz(struct seq_file *m, unsigned long n)
306{
307 struct cpu_info *c = per_cpu_ptr(&cpu_info, n);
308
309 if (!machine_has_cpu_mhz)
310 return;
311 seq_printf(m, "cpu MHz dynamic : %d\n", c->cpu_mhz_dynamic);
312 seq_printf(m, "cpu MHz static : %d\n", c->cpu_mhz_static);
313}
314
315/*
316 * show_cpuinfo - Get information on one CPU for use by procfs.
317 */
318static int show_cpuinfo(struct seq_file *m, void *v)
319{
320 unsigned long n = (unsigned long) v - 1;
321 unsigned long first = cpumask_first(cpu_online_mask);
322
323 if (n == first)
324 show_cpu_summary(m, v);
325 seq_printf(m, "\ncpu number : %ld\n", n);
326 show_cpu_topology(m, n);
327 show_cpu_ids(m, n);
328 show_cpu_mhz(m, n);
329 return 0;
330}
331
332static inline void *c_update(loff_t *pos)
333{
334 if (*pos)
335 *pos = cpumask_next(*pos - 1, cpu_online_mask);
336 else
337 *pos = cpumask_first(cpu_online_mask);
338 return *pos < nr_cpu_ids ? (void *)*pos + 1 : NULL;
339}
340
341static void *c_start(struct seq_file *m, loff_t *pos)
342{
343 cpus_read_lock();
344 return c_update(pos);
345}
346
347static void *c_next(struct seq_file *m, void *v, loff_t *pos)
348{
349 ++*pos;
350 return c_update(pos);
351}
352
353static void c_stop(struct seq_file *m, void *v)
354{
355 cpus_read_unlock();
356}
357
358const struct seq_operations cpuinfo_op = {
359 .start = c_start,
360 .next = c_next,
361 .stop = c_stop,
362 .show = show_cpuinfo,
363};