Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * CPU subsystem support
4 */
5
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/sched.h>
10#include <linux/cpu.h>
11#include <linux/topology.h>
12#include <linux/device.h>
13#include <linux/node.h>
14#include <linux/gfp.h>
15#include <linux/slab.h>
16#include <linux/percpu.h>
17#include <linux/acpi.h>
18#include <linux/of.h>
19#include <linux/cpufeature.h>
20#include <linux/tick.h>
21#include <linux/pm_qos.h>
22#include <linux/delay.h>
23#include <linux/sched/isolation.h>
24
25#include "base.h"
26
27static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
28
29static int cpu_subsys_match(struct device *dev, struct device_driver *drv)
30{
31 /* ACPI style match is the only one that may succeed. */
32 if (acpi_driver_match_device(dev, drv))
33 return 1;
34
35 return 0;
36}
37
38#ifdef CONFIG_HOTPLUG_CPU
39static void change_cpu_under_node(struct cpu *cpu,
40 unsigned int from_nid, unsigned int to_nid)
41{
42 int cpuid = cpu->dev.id;
43 unregister_cpu_under_node(cpuid, from_nid);
44 register_cpu_under_node(cpuid, to_nid);
45 cpu->node_id = to_nid;
46}
47
48static int cpu_subsys_online(struct device *dev)
49{
50 struct cpu *cpu = container_of(dev, struct cpu, dev);
51 int cpuid = dev->id;
52 int from_nid, to_nid;
53 int ret;
54 int retries = 0;
55
56 from_nid = cpu_to_node(cpuid);
57 if (from_nid == NUMA_NO_NODE)
58 return -ENODEV;
59
60retry:
61 ret = cpu_device_up(dev);
62
63 /*
64 * If -EBUSY is returned, it is likely that hotplug is temporarily
65 * disabled when cpu_hotplug_disable() was called. This condition is
66 * transient. So we retry after waiting for an exponentially
67 * increasing delay up to a total of at least 620ms as some PCI
68 * device initialization can take quite a while.
69 */
70 if (ret == -EBUSY) {
71 retries++;
72 if (retries > 5)
73 return ret;
74 msleep(10 * (1 << retries));
75 goto retry;
76 }
77
78 /*
79 * When hot adding memory to memoryless node and enabling a cpu
80 * on the node, node number of the cpu may internally change.
81 */
82 to_nid = cpu_to_node(cpuid);
83 if (from_nid != to_nid)
84 change_cpu_under_node(cpu, from_nid, to_nid);
85
86 return ret;
87}
88
89static int cpu_subsys_offline(struct device *dev)
90{
91 return cpu_device_down(dev);
92}
93
94void unregister_cpu(struct cpu *cpu)
95{
96 int logical_cpu = cpu->dev.id;
97
98 unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
99
100 device_unregister(&cpu->dev);
101 per_cpu(cpu_sys_devices, logical_cpu) = NULL;
102 return;
103}
104
105#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
106static ssize_t cpu_probe_store(struct device *dev,
107 struct device_attribute *attr,
108 const char *buf,
109 size_t count)
110{
111 ssize_t cnt;
112 int ret;
113
114 ret = lock_device_hotplug_sysfs();
115 if (ret)
116 return ret;
117
118 cnt = arch_cpu_probe(buf, count);
119
120 unlock_device_hotplug();
121 return cnt;
122}
123
124static ssize_t cpu_release_store(struct device *dev,
125 struct device_attribute *attr,
126 const char *buf,
127 size_t count)
128{
129 ssize_t cnt;
130 int ret;
131
132 ret = lock_device_hotplug_sysfs();
133 if (ret)
134 return ret;
135
136 cnt = arch_cpu_release(buf, count);
137
138 unlock_device_hotplug();
139 return cnt;
140}
141
142static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
143static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
144#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
145#endif /* CONFIG_HOTPLUG_CPU */
146
147#ifdef CONFIG_KEXEC_CORE
148#include <linux/kexec.h>
149
150static ssize_t crash_notes_show(struct device *dev,
151 struct device_attribute *attr,
152 char *buf)
153{
154 struct cpu *cpu = container_of(dev, struct cpu, dev);
155 unsigned long long addr;
156 int cpunum;
157
158 cpunum = cpu->dev.id;
159
160 /*
161 * Might be reading other cpu's data based on which cpu read thread
162 * has been scheduled. But cpu data (memory) is allocated once during
163 * boot up and this data does not change there after. Hence this
164 * operation should be safe. No locking required.
165 */
166 addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
167
168 return sysfs_emit(buf, "%llx\n", addr);
169}
170static DEVICE_ATTR_ADMIN_RO(crash_notes);
171
172static ssize_t crash_notes_size_show(struct device *dev,
173 struct device_attribute *attr,
174 char *buf)
175{
176 return sysfs_emit(buf, "%zu\n", sizeof(note_buf_t));
177}
178static DEVICE_ATTR_ADMIN_RO(crash_notes_size);
179
180static struct attribute *crash_note_cpu_attrs[] = {
181 &dev_attr_crash_notes.attr,
182 &dev_attr_crash_notes_size.attr,
183 NULL
184};
185
186static const struct attribute_group crash_note_cpu_attr_group = {
187 .attrs = crash_note_cpu_attrs,
188};
189#endif
190
191static const struct attribute_group *common_cpu_attr_groups[] = {
192#ifdef CONFIG_KEXEC_CORE
193 &crash_note_cpu_attr_group,
194#endif
195 NULL
196};
197
198static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
199#ifdef CONFIG_KEXEC_CORE
200 &crash_note_cpu_attr_group,
201#endif
202 NULL
203};
204
205/*
206 * Print cpu online, possible, present, and system maps
207 */
208
209struct cpu_attr {
210 struct device_attribute attr;
211 const struct cpumask *const map;
212};
213
214static ssize_t show_cpus_attr(struct device *dev,
215 struct device_attribute *attr,
216 char *buf)
217{
218 struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
219
220 return cpumap_print_to_pagebuf(true, buf, ca->map);
221}
222
223#define _CPU_ATTR(name, map) \
224 { __ATTR(name, 0444, show_cpus_attr, NULL), map }
225
226/* Keep in sync with cpu_subsys_attrs */
227static struct cpu_attr cpu_attrs[] = {
228 _CPU_ATTR(online, &__cpu_online_mask),
229 _CPU_ATTR(possible, &__cpu_possible_mask),
230 _CPU_ATTR(present, &__cpu_present_mask),
231};
232
233/*
234 * Print values for NR_CPUS and offlined cpus
235 */
236static ssize_t print_cpus_kernel_max(struct device *dev,
237 struct device_attribute *attr, char *buf)
238{
239 return sysfs_emit(buf, "%d\n", NR_CPUS - 1);
240}
241static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
242
243/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
244unsigned int total_cpus;
245
246static ssize_t print_cpus_offline(struct device *dev,
247 struct device_attribute *attr, char *buf)
248{
249 int len = 0;
250 cpumask_var_t offline;
251
252 /* display offline cpus < nr_cpu_ids */
253 if (!alloc_cpumask_var(&offline, GFP_KERNEL))
254 return -ENOMEM;
255 cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
256 len += sysfs_emit_at(buf, len, "%*pbl", cpumask_pr_args(offline));
257 free_cpumask_var(offline);
258
259 /* display offline cpus >= nr_cpu_ids */
260 if (total_cpus && nr_cpu_ids < total_cpus) {
261 len += sysfs_emit_at(buf, len, ",");
262
263 if (nr_cpu_ids == total_cpus-1)
264 len += sysfs_emit_at(buf, len, "%u", nr_cpu_ids);
265 else
266 len += sysfs_emit_at(buf, len, "%u-%d",
267 nr_cpu_ids, total_cpus - 1);
268 }
269
270 len += sysfs_emit_at(buf, len, "\n");
271
272 return len;
273}
274static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
275
276static ssize_t print_cpus_isolated(struct device *dev,
277 struct device_attribute *attr, char *buf)
278{
279 int len;
280 cpumask_var_t isolated;
281
282 if (!alloc_cpumask_var(&isolated, GFP_KERNEL))
283 return -ENOMEM;
284
285 cpumask_andnot(isolated, cpu_possible_mask,
286 housekeeping_cpumask(HK_TYPE_DOMAIN));
287 len = sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(isolated));
288
289 free_cpumask_var(isolated);
290
291 return len;
292}
293static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL);
294
295#ifdef CONFIG_NO_HZ_FULL
296static ssize_t print_cpus_nohz_full(struct device *dev,
297 struct device_attribute *attr, char *buf)
298{
299 return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
300}
301static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL);
302#endif
303
304#ifdef CONFIG_CRASH_HOTPLUG
305static ssize_t crash_hotplug_show(struct device *dev,
306 struct device_attribute *attr,
307 char *buf)
308{
309 return sysfs_emit(buf, "%d\n", crash_hotplug_cpu_support());
310}
311static DEVICE_ATTR_ADMIN_RO(crash_hotplug);
312#endif
313
314static void cpu_device_release(struct device *dev)
315{
316 /*
317 * This is an empty function to prevent the driver core from spitting a
318 * warning at us. Yes, I know this is directly opposite of what the
319 * documentation for the driver core and kobjects say, and the author
320 * of this code has already been publically ridiculed for doing
321 * something as foolish as this. However, at this point in time, it is
322 * the only way to handle the issue of statically allocated cpu
323 * devices. The different architectures will have their cpu device
324 * code reworked to properly handle this in the near future, so this
325 * function will then be changed to correctly free up the memory held
326 * by the cpu device.
327 *
328 * Never copy this way of doing things, or you too will be made fun of
329 * on the linux-kernel list, you have been warned.
330 */
331}
332
333#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
334static ssize_t print_cpu_modalias(struct device *dev,
335 struct device_attribute *attr,
336 char *buf)
337{
338 int len = 0;
339 u32 i;
340
341 len += sysfs_emit_at(buf, len,
342 "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
343 CPU_FEATURE_TYPEVAL);
344
345 for (i = 0; i < MAX_CPU_FEATURES; i++)
346 if (cpu_have_feature(i)) {
347 if (len + sizeof(",XXXX\n") >= PAGE_SIZE) {
348 WARN(1, "CPU features overflow page\n");
349 break;
350 }
351 len += sysfs_emit_at(buf, len, ",%04X", i);
352 }
353 len += sysfs_emit_at(buf, len, "\n");
354 return len;
355}
356
357static int cpu_uevent(const struct device *dev, struct kobj_uevent_env *env)
358{
359 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
360 if (buf) {
361 print_cpu_modalias(NULL, NULL, buf);
362 add_uevent_var(env, "MODALIAS=%s", buf);
363 kfree(buf);
364 }
365 return 0;
366}
367#endif
368
369struct bus_type cpu_subsys = {
370 .name = "cpu",
371 .dev_name = "cpu",
372 .match = cpu_subsys_match,
373#ifdef CONFIG_HOTPLUG_CPU
374 .online = cpu_subsys_online,
375 .offline = cpu_subsys_offline,
376#endif
377#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
378 .uevent = cpu_uevent,
379#endif
380};
381EXPORT_SYMBOL_GPL(cpu_subsys);
382
383/*
384 * register_cpu - Setup a sysfs device for a CPU.
385 * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
386 * sysfs for this CPU.
387 * @num - CPU number to use when creating the device.
388 *
389 * Initialize and register the CPU device.
390 */
391int register_cpu(struct cpu *cpu, int num)
392{
393 int error;
394
395 cpu->node_id = cpu_to_node(num);
396 memset(&cpu->dev, 0x00, sizeof(struct device));
397 cpu->dev.id = num;
398 cpu->dev.bus = &cpu_subsys;
399 cpu->dev.release = cpu_device_release;
400 cpu->dev.offline_disabled = !cpu->hotpluggable;
401 cpu->dev.offline = !cpu_online(num);
402 cpu->dev.of_node = of_get_cpu_node(num, NULL);
403 cpu->dev.groups = common_cpu_attr_groups;
404 if (cpu->hotpluggable)
405 cpu->dev.groups = hotplugable_cpu_attr_groups;
406 error = device_register(&cpu->dev);
407 if (error) {
408 put_device(&cpu->dev);
409 return error;
410 }
411
412 per_cpu(cpu_sys_devices, num) = &cpu->dev;
413 register_cpu_under_node(num, cpu_to_node(num));
414 dev_pm_qos_expose_latency_limit(&cpu->dev,
415 PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
416
417 return 0;
418}
419
420struct device *get_cpu_device(unsigned int cpu)
421{
422 if (cpu < nr_cpu_ids && cpu_possible(cpu))
423 return per_cpu(cpu_sys_devices, cpu);
424 else
425 return NULL;
426}
427EXPORT_SYMBOL_GPL(get_cpu_device);
428
429static void device_create_release(struct device *dev)
430{
431 kfree(dev);
432}
433
434__printf(4, 0)
435static struct device *
436__cpu_device_create(struct device *parent, void *drvdata,
437 const struct attribute_group **groups,
438 const char *fmt, va_list args)
439{
440 struct device *dev = NULL;
441 int retval = -ENOMEM;
442
443 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
444 if (!dev)
445 goto error;
446
447 device_initialize(dev);
448 dev->parent = parent;
449 dev->groups = groups;
450 dev->release = device_create_release;
451 device_set_pm_not_required(dev);
452 dev_set_drvdata(dev, drvdata);
453
454 retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
455 if (retval)
456 goto error;
457
458 retval = device_add(dev);
459 if (retval)
460 goto error;
461
462 return dev;
463
464error:
465 put_device(dev);
466 return ERR_PTR(retval);
467}
468
469struct device *cpu_device_create(struct device *parent, void *drvdata,
470 const struct attribute_group **groups,
471 const char *fmt, ...)
472{
473 va_list vargs;
474 struct device *dev;
475
476 va_start(vargs, fmt);
477 dev = __cpu_device_create(parent, drvdata, groups, fmt, vargs);
478 va_end(vargs);
479 return dev;
480}
481EXPORT_SYMBOL_GPL(cpu_device_create);
482
483#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
484static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
485#endif
486
487static struct attribute *cpu_root_attrs[] = {
488#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
489 &dev_attr_probe.attr,
490 &dev_attr_release.attr,
491#endif
492 &cpu_attrs[0].attr.attr,
493 &cpu_attrs[1].attr.attr,
494 &cpu_attrs[2].attr.attr,
495 &dev_attr_kernel_max.attr,
496 &dev_attr_offline.attr,
497 &dev_attr_isolated.attr,
498#ifdef CONFIG_NO_HZ_FULL
499 &dev_attr_nohz_full.attr,
500#endif
501#ifdef CONFIG_CRASH_HOTPLUG
502 &dev_attr_crash_hotplug.attr,
503#endif
504#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
505 &dev_attr_modalias.attr,
506#endif
507 NULL
508};
509
510static const struct attribute_group cpu_root_attr_group = {
511 .attrs = cpu_root_attrs,
512};
513
514static const struct attribute_group *cpu_root_attr_groups[] = {
515 &cpu_root_attr_group,
516 NULL,
517};
518
519bool cpu_is_hotpluggable(unsigned int cpu)
520{
521 struct device *dev = get_cpu_device(cpu);
522 return dev && container_of(dev, struct cpu, dev)->hotpluggable
523 && tick_nohz_cpu_hotpluggable(cpu);
524}
525EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
526
527#ifdef CONFIG_GENERIC_CPU_DEVICES
528DEFINE_PER_CPU(struct cpu, cpu_devices);
529
530bool __weak arch_cpu_is_hotpluggable(int cpu)
531{
532 return false;
533}
534
535int __weak arch_register_cpu(int cpu)
536{
537 struct cpu *c = &per_cpu(cpu_devices, cpu);
538
539 c->hotpluggable = arch_cpu_is_hotpluggable(cpu);
540
541 return register_cpu(c, cpu);
542}
543
544#ifdef CONFIG_HOTPLUG_CPU
545void __weak arch_unregister_cpu(int num)
546{
547 unregister_cpu(&per_cpu(cpu_devices, num));
548}
549#endif /* CONFIG_HOTPLUG_CPU */
550#endif /* CONFIG_GENERIC_CPU_DEVICES */
551
552static void __init cpu_dev_register_generic(void)
553{
554 int i, ret;
555
556 if (!IS_ENABLED(CONFIG_GENERIC_CPU_DEVICES))
557 return;
558
559 for_each_present_cpu(i) {
560 ret = arch_register_cpu(i);
561 if (ret)
562 pr_warn("register_cpu %d failed (%d)\n", i, ret);
563 }
564}
565
566#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
567static ssize_t cpu_show_not_affected(struct device *dev,
568 struct device_attribute *attr, char *buf)
569{
570 return sysfs_emit(buf, "Not affected\n");
571}
572
573#define CPU_SHOW_VULN_FALLBACK(func) \
574 ssize_t cpu_show_##func(struct device *, \
575 struct device_attribute *, char *) \
576 __attribute__((weak, alias("cpu_show_not_affected")))
577
578CPU_SHOW_VULN_FALLBACK(meltdown);
579CPU_SHOW_VULN_FALLBACK(spectre_v1);
580CPU_SHOW_VULN_FALLBACK(spectre_v2);
581CPU_SHOW_VULN_FALLBACK(spec_store_bypass);
582CPU_SHOW_VULN_FALLBACK(l1tf);
583CPU_SHOW_VULN_FALLBACK(mds);
584CPU_SHOW_VULN_FALLBACK(tsx_async_abort);
585CPU_SHOW_VULN_FALLBACK(itlb_multihit);
586CPU_SHOW_VULN_FALLBACK(srbds);
587CPU_SHOW_VULN_FALLBACK(mmio_stale_data);
588CPU_SHOW_VULN_FALLBACK(retbleed);
589CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
590CPU_SHOW_VULN_FALLBACK(gds);
591
592static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
593static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
594static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
595static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
596static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
597static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
598static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
599static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
600static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
601static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL);
602static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
603static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
604static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
605
606static struct attribute *cpu_root_vulnerabilities_attrs[] = {
607 &dev_attr_meltdown.attr,
608 &dev_attr_spectre_v1.attr,
609 &dev_attr_spectre_v2.attr,
610 &dev_attr_spec_store_bypass.attr,
611 &dev_attr_l1tf.attr,
612 &dev_attr_mds.attr,
613 &dev_attr_tsx_async_abort.attr,
614 &dev_attr_itlb_multihit.attr,
615 &dev_attr_srbds.attr,
616 &dev_attr_mmio_stale_data.attr,
617 &dev_attr_retbleed.attr,
618 &dev_attr_spec_rstack_overflow.attr,
619 &dev_attr_gather_data_sampling.attr,
620 NULL
621};
622
623static const struct attribute_group cpu_root_vulnerabilities_group = {
624 .name = "vulnerabilities",
625 .attrs = cpu_root_vulnerabilities_attrs,
626};
627
628static void __init cpu_register_vulnerabilities(void)
629{
630 struct device *dev = bus_get_dev_root(&cpu_subsys);
631
632 if (dev) {
633 if (sysfs_create_group(&dev->kobj, &cpu_root_vulnerabilities_group))
634 pr_err("Unable to register CPU vulnerabilities\n");
635 put_device(dev);
636 }
637}
638
639#else
640static inline void cpu_register_vulnerabilities(void) { }
641#endif
642
643void __init cpu_dev_init(void)
644{
645 if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
646 panic("Failed to register CPU subsystem");
647
648 cpu_dev_register_generic();
649 cpu_register_vulnerabilities();
650}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * CPU subsystem support
4 */
5
6#include <linux/kernel.h>
7#include <linux/module.h>
8#include <linux/init.h>
9#include <linux/sched.h>
10#include <linux/cpu.h>
11#include <linux/topology.h>
12#include <linux/device.h>
13#include <linux/node.h>
14#include <linux/gfp.h>
15#include <linux/slab.h>
16#include <linux/percpu.h>
17#include <linux/acpi.h>
18#include <linux/of.h>
19#include <linux/cpufeature.h>
20#include <linux/tick.h>
21#include <linux/pm_qos.h>
22#include <linux/sched/isolation.h>
23
24#include "base.h"
25
26static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
27
28static int cpu_subsys_match(struct device *dev, struct device_driver *drv)
29{
30 /* ACPI style match is the only one that may succeed. */
31 if (acpi_driver_match_device(dev, drv))
32 return 1;
33
34 return 0;
35}
36
37#ifdef CONFIG_HOTPLUG_CPU
38static void change_cpu_under_node(struct cpu *cpu,
39 unsigned int from_nid, unsigned int to_nid)
40{
41 int cpuid = cpu->dev.id;
42 unregister_cpu_under_node(cpuid, from_nid);
43 register_cpu_under_node(cpuid, to_nid);
44 cpu->node_id = to_nid;
45}
46
47static int cpu_subsys_online(struct device *dev)
48{
49 struct cpu *cpu = container_of(dev, struct cpu, dev);
50 int cpuid = dev->id;
51 int from_nid, to_nid;
52 int ret;
53
54 from_nid = cpu_to_node(cpuid);
55 if (from_nid == NUMA_NO_NODE)
56 return -ENODEV;
57
58 ret = cpu_device_up(dev);
59 /*
60 * When hot adding memory to memoryless node and enabling a cpu
61 * on the node, node number of the cpu may internally change.
62 */
63 to_nid = cpu_to_node(cpuid);
64 if (from_nid != to_nid)
65 change_cpu_under_node(cpu, from_nid, to_nid);
66
67 return ret;
68}
69
70static int cpu_subsys_offline(struct device *dev)
71{
72 return cpu_device_down(dev);
73}
74
75void unregister_cpu(struct cpu *cpu)
76{
77 int logical_cpu = cpu->dev.id;
78
79 unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
80
81 device_unregister(&cpu->dev);
82 per_cpu(cpu_sys_devices, logical_cpu) = NULL;
83 return;
84}
85
86#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
87static ssize_t cpu_probe_store(struct device *dev,
88 struct device_attribute *attr,
89 const char *buf,
90 size_t count)
91{
92 ssize_t cnt;
93 int ret;
94
95 ret = lock_device_hotplug_sysfs();
96 if (ret)
97 return ret;
98
99 cnt = arch_cpu_probe(buf, count);
100
101 unlock_device_hotplug();
102 return cnt;
103}
104
105static ssize_t cpu_release_store(struct device *dev,
106 struct device_attribute *attr,
107 const char *buf,
108 size_t count)
109{
110 ssize_t cnt;
111 int ret;
112
113 ret = lock_device_hotplug_sysfs();
114 if (ret)
115 return ret;
116
117 cnt = arch_cpu_release(buf, count);
118
119 unlock_device_hotplug();
120 return cnt;
121}
122
123static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
124static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
125#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
126#endif /* CONFIG_HOTPLUG_CPU */
127
128struct bus_type cpu_subsys = {
129 .name = "cpu",
130 .dev_name = "cpu",
131 .match = cpu_subsys_match,
132#ifdef CONFIG_HOTPLUG_CPU
133 .online = cpu_subsys_online,
134 .offline = cpu_subsys_offline,
135#endif
136};
137EXPORT_SYMBOL_GPL(cpu_subsys);
138
139#ifdef CONFIG_KEXEC
140#include <linux/kexec.h>
141
142static ssize_t crash_notes_show(struct device *dev,
143 struct device_attribute *attr,
144 char *buf)
145{
146 struct cpu *cpu = container_of(dev, struct cpu, dev);
147 unsigned long long addr;
148 int cpunum;
149
150 cpunum = cpu->dev.id;
151
152 /*
153 * Might be reading other cpu's data based on which cpu read thread
154 * has been scheduled. But cpu data (memory) is allocated once during
155 * boot up and this data does not change there after. Hence this
156 * operation should be safe. No locking required.
157 */
158 addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
159
160 return sysfs_emit(buf, "%llx\n", addr);
161}
162static DEVICE_ATTR_ADMIN_RO(crash_notes);
163
164static ssize_t crash_notes_size_show(struct device *dev,
165 struct device_attribute *attr,
166 char *buf)
167{
168 return sysfs_emit(buf, "%zu\n", sizeof(note_buf_t));
169}
170static DEVICE_ATTR_ADMIN_RO(crash_notes_size);
171
172static struct attribute *crash_note_cpu_attrs[] = {
173 &dev_attr_crash_notes.attr,
174 &dev_attr_crash_notes_size.attr,
175 NULL
176};
177
178static const struct attribute_group crash_note_cpu_attr_group = {
179 .attrs = crash_note_cpu_attrs,
180};
181#endif
182
183static const struct attribute_group *common_cpu_attr_groups[] = {
184#ifdef CONFIG_KEXEC
185 &crash_note_cpu_attr_group,
186#endif
187 NULL
188};
189
190static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
191#ifdef CONFIG_KEXEC
192 &crash_note_cpu_attr_group,
193#endif
194 NULL
195};
196
197/*
198 * Print cpu online, possible, present, and system maps
199 */
200
201struct cpu_attr {
202 struct device_attribute attr;
203 const struct cpumask *const map;
204};
205
206static ssize_t show_cpus_attr(struct device *dev,
207 struct device_attribute *attr,
208 char *buf)
209{
210 struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
211
212 return cpumap_print_to_pagebuf(true, buf, ca->map);
213}
214
215#define _CPU_ATTR(name, map) \
216 { __ATTR(name, 0444, show_cpus_attr, NULL), map }
217
218/* Keep in sync with cpu_subsys_attrs */
219static struct cpu_attr cpu_attrs[] = {
220 _CPU_ATTR(online, &__cpu_online_mask),
221 _CPU_ATTR(possible, &__cpu_possible_mask),
222 _CPU_ATTR(present, &__cpu_present_mask),
223};
224
225/*
226 * Print values for NR_CPUS and offlined cpus
227 */
228static ssize_t print_cpus_kernel_max(struct device *dev,
229 struct device_attribute *attr, char *buf)
230{
231 return sysfs_emit(buf, "%d\n", NR_CPUS - 1);
232}
233static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
234
235/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
236unsigned int total_cpus;
237
238static ssize_t print_cpus_offline(struct device *dev,
239 struct device_attribute *attr, char *buf)
240{
241 int len = 0;
242 cpumask_var_t offline;
243
244 /* display offline cpus < nr_cpu_ids */
245 if (!alloc_cpumask_var(&offline, GFP_KERNEL))
246 return -ENOMEM;
247 cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
248 len += sysfs_emit_at(buf, len, "%*pbl", cpumask_pr_args(offline));
249 free_cpumask_var(offline);
250
251 /* display offline cpus >= nr_cpu_ids */
252 if (total_cpus && nr_cpu_ids < total_cpus) {
253 len += sysfs_emit_at(buf, len, ",");
254
255 if (nr_cpu_ids == total_cpus-1)
256 len += sysfs_emit_at(buf, len, "%u", nr_cpu_ids);
257 else
258 len += sysfs_emit_at(buf, len, "%u-%d",
259 nr_cpu_ids, total_cpus - 1);
260 }
261
262 len += sysfs_emit_at(buf, len, "\n");
263
264 return len;
265}
266static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
267
268static ssize_t print_cpus_isolated(struct device *dev,
269 struct device_attribute *attr, char *buf)
270{
271 int len;
272 cpumask_var_t isolated;
273
274 if (!alloc_cpumask_var(&isolated, GFP_KERNEL))
275 return -ENOMEM;
276
277 cpumask_andnot(isolated, cpu_possible_mask,
278 housekeeping_cpumask(HK_FLAG_DOMAIN));
279 len = sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(isolated));
280
281 free_cpumask_var(isolated);
282
283 return len;
284}
285static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL);
286
287#ifdef CONFIG_NO_HZ_FULL
288static ssize_t print_cpus_nohz_full(struct device *dev,
289 struct device_attribute *attr, char *buf)
290{
291 return sysfs_emit(buf, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
292}
293static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL);
294#endif
295
296static void cpu_device_release(struct device *dev)
297{
298 /*
299 * This is an empty function to prevent the driver core from spitting a
300 * warning at us. Yes, I know this is directly opposite of what the
301 * documentation for the driver core and kobjects say, and the author
302 * of this code has already been publically ridiculed for doing
303 * something as foolish as this. However, at this point in time, it is
304 * the only way to handle the issue of statically allocated cpu
305 * devices. The different architectures will have their cpu device
306 * code reworked to properly handle this in the near future, so this
307 * function will then be changed to correctly free up the memory held
308 * by the cpu device.
309 *
310 * Never copy this way of doing things, or you too will be made fun of
311 * on the linux-kernel list, you have been warned.
312 */
313}
314
315#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
316static ssize_t print_cpu_modalias(struct device *dev,
317 struct device_attribute *attr,
318 char *buf)
319{
320 int len = 0;
321 u32 i;
322
323 len += sysfs_emit_at(buf, len,
324 "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
325 CPU_FEATURE_TYPEVAL);
326
327 for (i = 0; i < MAX_CPU_FEATURES; i++)
328 if (cpu_have_feature(i)) {
329 if (len + sizeof(",XXXX\n") >= PAGE_SIZE) {
330 WARN(1, "CPU features overflow page\n");
331 break;
332 }
333 len += sysfs_emit_at(buf, len, ",%04X", i);
334 }
335 len += sysfs_emit_at(buf, len, "\n");
336 return len;
337}
338
339static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
340{
341 char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
342 if (buf) {
343 print_cpu_modalias(NULL, NULL, buf);
344 add_uevent_var(env, "MODALIAS=%s", buf);
345 kfree(buf);
346 }
347 return 0;
348}
349#endif
350
351/*
352 * register_cpu - Setup a sysfs device for a CPU.
353 * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
354 * sysfs for this CPU.
355 * @num - CPU number to use when creating the device.
356 *
357 * Initialize and register the CPU device.
358 */
359int register_cpu(struct cpu *cpu, int num)
360{
361 int error;
362
363 cpu->node_id = cpu_to_node(num);
364 memset(&cpu->dev, 0x00, sizeof(struct device));
365 cpu->dev.id = num;
366 cpu->dev.bus = &cpu_subsys;
367 cpu->dev.release = cpu_device_release;
368 cpu->dev.offline_disabled = !cpu->hotpluggable;
369 cpu->dev.offline = !cpu_online(num);
370 cpu->dev.of_node = of_get_cpu_node(num, NULL);
371#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
372 cpu->dev.bus->uevent = cpu_uevent;
373#endif
374 cpu->dev.groups = common_cpu_attr_groups;
375 if (cpu->hotpluggable)
376 cpu->dev.groups = hotplugable_cpu_attr_groups;
377 error = device_register(&cpu->dev);
378 if (error) {
379 put_device(&cpu->dev);
380 return error;
381 }
382
383 per_cpu(cpu_sys_devices, num) = &cpu->dev;
384 register_cpu_under_node(num, cpu_to_node(num));
385 dev_pm_qos_expose_latency_limit(&cpu->dev,
386 PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
387
388 return 0;
389}
390
391struct device *get_cpu_device(unsigned cpu)
392{
393 if (cpu < nr_cpu_ids && cpu_possible(cpu))
394 return per_cpu(cpu_sys_devices, cpu);
395 else
396 return NULL;
397}
398EXPORT_SYMBOL_GPL(get_cpu_device);
399
400static void device_create_release(struct device *dev)
401{
402 kfree(dev);
403}
404
405__printf(4, 0)
406static struct device *
407__cpu_device_create(struct device *parent, void *drvdata,
408 const struct attribute_group **groups,
409 const char *fmt, va_list args)
410{
411 struct device *dev = NULL;
412 int retval = -ENOMEM;
413
414 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
415 if (!dev)
416 goto error;
417
418 device_initialize(dev);
419 dev->parent = parent;
420 dev->groups = groups;
421 dev->release = device_create_release;
422 device_set_pm_not_required(dev);
423 dev_set_drvdata(dev, drvdata);
424
425 retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
426 if (retval)
427 goto error;
428
429 retval = device_add(dev);
430 if (retval)
431 goto error;
432
433 return dev;
434
435error:
436 put_device(dev);
437 return ERR_PTR(retval);
438}
439
440struct device *cpu_device_create(struct device *parent, void *drvdata,
441 const struct attribute_group **groups,
442 const char *fmt, ...)
443{
444 va_list vargs;
445 struct device *dev;
446
447 va_start(vargs, fmt);
448 dev = __cpu_device_create(parent, drvdata, groups, fmt, vargs);
449 va_end(vargs);
450 return dev;
451}
452EXPORT_SYMBOL_GPL(cpu_device_create);
453
454#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
455static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
456#endif
457
458static struct attribute *cpu_root_attrs[] = {
459#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
460 &dev_attr_probe.attr,
461 &dev_attr_release.attr,
462#endif
463 &cpu_attrs[0].attr.attr,
464 &cpu_attrs[1].attr.attr,
465 &cpu_attrs[2].attr.attr,
466 &dev_attr_kernel_max.attr,
467 &dev_attr_offline.attr,
468 &dev_attr_isolated.attr,
469#ifdef CONFIG_NO_HZ_FULL
470 &dev_attr_nohz_full.attr,
471#endif
472#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
473 &dev_attr_modalias.attr,
474#endif
475 NULL
476};
477
478static const struct attribute_group cpu_root_attr_group = {
479 .attrs = cpu_root_attrs,
480};
481
482static const struct attribute_group *cpu_root_attr_groups[] = {
483 &cpu_root_attr_group,
484 NULL,
485};
486
487bool cpu_is_hotpluggable(unsigned cpu)
488{
489 struct device *dev = get_cpu_device(cpu);
490 return dev && container_of(dev, struct cpu, dev)->hotpluggable;
491}
492EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
493
494#ifdef CONFIG_GENERIC_CPU_DEVICES
495static DEFINE_PER_CPU(struct cpu, cpu_devices);
496#endif
497
498static void __init cpu_dev_register_generic(void)
499{
500#ifdef CONFIG_GENERIC_CPU_DEVICES
501 int i;
502
503 for_each_possible_cpu(i) {
504 if (register_cpu(&per_cpu(cpu_devices, i), i))
505 panic("Failed to register CPU device");
506 }
507#endif
508}
509
510#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
511
512ssize_t __weak cpu_show_meltdown(struct device *dev,
513 struct device_attribute *attr, char *buf)
514{
515 return sysfs_emit(buf, "Not affected\n");
516}
517
518ssize_t __weak cpu_show_spectre_v1(struct device *dev,
519 struct device_attribute *attr, char *buf)
520{
521 return sysfs_emit(buf, "Not affected\n");
522}
523
524ssize_t __weak cpu_show_spectre_v2(struct device *dev,
525 struct device_attribute *attr, char *buf)
526{
527 return sysfs_emit(buf, "Not affected\n");
528}
529
530ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
531 struct device_attribute *attr, char *buf)
532{
533 return sysfs_emit(buf, "Not affected\n");
534}
535
536ssize_t __weak cpu_show_l1tf(struct device *dev,
537 struct device_attribute *attr, char *buf)
538{
539 return sysfs_emit(buf, "Not affected\n");
540}
541
542ssize_t __weak cpu_show_mds(struct device *dev,
543 struct device_attribute *attr, char *buf)
544{
545 return sysfs_emit(buf, "Not affected\n");
546}
547
548ssize_t __weak cpu_show_tsx_async_abort(struct device *dev,
549 struct device_attribute *attr,
550 char *buf)
551{
552 return sysfs_emit(buf, "Not affected\n");
553}
554
555ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
556 struct device_attribute *attr, char *buf)
557{
558 return sysfs_emit(buf, "Not affected\n");
559}
560
561ssize_t __weak cpu_show_srbds(struct device *dev,
562 struct device_attribute *attr, char *buf)
563{
564 return sysfs_emit(buf, "Not affected\n");
565}
566
567static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
568static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
569static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
570static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
571static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
572static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
573static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
574static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
575static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL);
576
577static struct attribute *cpu_root_vulnerabilities_attrs[] = {
578 &dev_attr_meltdown.attr,
579 &dev_attr_spectre_v1.attr,
580 &dev_attr_spectre_v2.attr,
581 &dev_attr_spec_store_bypass.attr,
582 &dev_attr_l1tf.attr,
583 &dev_attr_mds.attr,
584 &dev_attr_tsx_async_abort.attr,
585 &dev_attr_itlb_multihit.attr,
586 &dev_attr_srbds.attr,
587 NULL
588};
589
590static const struct attribute_group cpu_root_vulnerabilities_group = {
591 .name = "vulnerabilities",
592 .attrs = cpu_root_vulnerabilities_attrs,
593};
594
595static void __init cpu_register_vulnerabilities(void)
596{
597 if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
598 &cpu_root_vulnerabilities_group))
599 pr_err("Unable to register CPU vulnerabilities\n");
600}
601
602#else
603static inline void cpu_register_vulnerabilities(void) { }
604#endif
605
606void __init cpu_dev_init(void)
607{
608 if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
609 panic("Failed to register CPU subsystem");
610
611 cpu_dev_register_generic();
612 cpu_register_vulnerabilities();
613}