Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * drivers/base/cpu.c - basic CPU class support
  3 */
  4
  5#include <linux/sysdev.h>
  6#include <linux/module.h>
  7#include <linux/init.h>
  8#include <linux/sched.h>
  9#include <linux/cpu.h>
 10#include <linux/topology.h>
 11#include <linux/device.h>
 12#include <linux/node.h>
 13#include <linux/gfp.h>
 
 
 
 
 
 
 
 
 14
 15#include "base.h"
 16
 17static struct sysdev_class_attribute *cpu_sysdev_class_attrs[];
 18
 19struct sysdev_class cpu_sysdev_class = {
 20	.name = "cpu",
 21	.attrs = cpu_sysdev_class_attrs,
 22};
 23EXPORT_SYMBOL(cpu_sysdev_class);
 24
 25static DEFINE_PER_CPU(struct sys_device *, cpu_sys_devices);
 
 26
 27#ifdef CONFIG_HOTPLUG_CPU
 28static ssize_t show_online(struct sys_device *dev, struct sysdev_attribute *attr,
 29			   char *buf)
 30{
 31	struct cpu *cpu = container_of(dev, struct cpu, sysdev);
 32
 33	return sprintf(buf, "%u\n", !!cpu_online(cpu->sysdev.id));
 
 34}
 35
 36static ssize_t __ref store_online(struct sys_device *dev, struct sysdev_attribute *attr,
 37				 const char *buf, size_t count)
 38{
 39	struct cpu *cpu = container_of(dev, struct cpu, sysdev);
 40	ssize_t ret;
 41
 42	cpu_hotplug_driver_lock();
 43	switch (buf[0]) {
 44	case '0':
 45		ret = cpu_down(cpu->sysdev.id);
 46		if (!ret)
 47			kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
 48		break;
 49	case '1':
 50		ret = cpu_up(cpu->sysdev.id);
 51		if (!ret)
 52			kobject_uevent(&dev->kobj, KOBJ_ONLINE);
 53		break;
 54	default:
 55		ret = -EINVAL;
 56	}
 57	cpu_hotplug_driver_unlock();
 58
 59	if (ret >= 0)
 60		ret = count;
 61	return ret;
 62}
 63static SYSDEV_ATTR(online, 0644, show_online, store_online);
 64
 65static void __cpuinit register_cpu_control(struct cpu *cpu)
 66{
 67	sysdev_create_file(&cpu->sysdev, &attr_online);
 68}
 
 69void unregister_cpu(struct cpu *cpu)
 70{
 71	int logical_cpu = cpu->sysdev.id;
 72
 73	unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
 74
 75	sysdev_remove_file(&cpu->sysdev, &attr_online);
 76
 77	sysdev_unregister(&cpu->sysdev);
 78	per_cpu(cpu_sys_devices, logical_cpu) = NULL;
 79	return;
 80}
 81
 82#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
 83static ssize_t cpu_probe_store(struct sysdev_class *class,
 84			       struct sysdev_class_attribute *attr,
 85			       const char *buf,
 86			       size_t count)
 87{
 88	return arch_cpu_probe(buf, count);
 
 
 
 
 
 
 
 
 
 
 89}
 90
 91static ssize_t cpu_release_store(struct sysdev_class *class,
 92				 struct sysdev_class_attribute *attr,
 93				 const char *buf,
 94				 size_t count)
 95{
 96	return arch_cpu_release(buf, count);
 97}
 98
 99static SYSDEV_CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
100static SYSDEV_CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store);
101#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
102
103#else /* ... !CONFIG_HOTPLUG_CPU */
104static inline void register_cpu_control(struct cpu *cpu)
105{
 
106}
 
 
 
 
107#endif /* CONFIG_HOTPLUG_CPU */
108
 
 
 
 
 
 
 
 
 
 
 
109#ifdef CONFIG_KEXEC
110#include <linux/kexec.h>
111
112static ssize_t show_crash_notes(struct sys_device *dev, struct sysdev_attribute *attr,
113				char *buf)
114{
115	struct cpu *cpu = container_of(dev, struct cpu, sysdev);
116	ssize_t rc;
117	unsigned long long addr;
118	int cpunum;
119
120	cpunum = cpu->sysdev.id;
121
122	/*
123	 * Might be reading other cpu's data based on which cpu read thread
124	 * has been scheduled. But cpu data (memory) is allocated once during
125	 * boot up and this data does not change there after. Hence this
126	 * operation should be safe. No locking required.
127	 */
128	addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
129	rc = sprintf(buf, "%Lx\n", addr);
130	return rc;
131}
132static SYSDEV_ATTR(crash_notes, 0400, show_crash_notes, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
133#endif
 
 
134
135/*
136 * Print cpu online, possible, present, and system maps
137 */
138
139struct cpu_attr {
140	struct sysdev_class_attribute attr;
141	const struct cpumask *const * const map;
142};
143
144static ssize_t show_cpus_attr(struct sysdev_class *class,
145			      struct sysdev_class_attribute *attr,
146			      char *buf)
147{
148	struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
149	int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map));
150
151	buf[n++] = '\n';
152	buf[n] = '\0';
153	return n;
154}
155
156#define _CPU_ATTR(name, map)						\
157	{ _SYSDEV_CLASS_ATTR(name, 0444, show_cpus_attr, NULL), map }
158
159/* Keep in sync with cpu_sysdev_class_attrs */
160static struct cpu_attr cpu_attrs[] = {
161	_CPU_ATTR(online, &cpu_online_mask),
162	_CPU_ATTR(possible, &cpu_possible_mask),
163	_CPU_ATTR(present, &cpu_present_mask),
164};
165
166/*
167 * Print values for NR_CPUS and offlined cpus
168 */
169static ssize_t print_cpus_kernel_max(struct sysdev_class *class,
170				     struct sysdev_class_attribute *attr, char *buf)
171{
172	int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
173	return n;
174}
175static SYSDEV_CLASS_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
176
177/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
178unsigned int total_cpus;
179
180static ssize_t print_cpus_offline(struct sysdev_class *class,
181				  struct sysdev_class_attribute *attr, char *buf)
182{
183	int n = 0, len = PAGE_SIZE-2;
184	cpumask_var_t offline;
185
186	/* display offline cpus < nr_cpu_ids */
187	if (!alloc_cpumask_var(&offline, GFP_KERNEL))
188		return -ENOMEM;
189	cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
190	n = cpulist_scnprintf(buf, len, offline);
191	free_cpumask_var(offline);
192
193	/* display offline cpus >= nr_cpu_ids */
194	if (total_cpus && nr_cpu_ids < total_cpus) {
195		if (n && n < len)
196			buf[n++] = ',';
197
198		if (nr_cpu_ids == total_cpus-1)
199			n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
200		else
201			n += snprintf(&buf[n], len - n, "%d-%d",
202						      nr_cpu_ids, total_cpus-1);
203	}
204
205	n += snprintf(&buf[n], len - n, "\n");
206	return n;
207}
208static SYSDEV_CLASS_ATTR(offline, 0444, print_cpus_offline, NULL);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
209
210/*
211 * register_cpu - Setup a sysfs device for a CPU.
212 * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
213 *	  sysfs for this CPU.
214 * @num - CPU number to use when creating the device.
215 *
216 * Initialize and register the CPU device.
217 */
218int __cpuinit register_cpu(struct cpu *cpu, int num)
219{
220	int error;
221	cpu->node_id = cpu_to_node(num);
222	cpu->sysdev.id = num;
223	cpu->sysdev.cls = &cpu_sysdev_class;
224
225	error = sysdev_register(&cpu->sysdev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
226
227	if (!error && cpu->hotpluggable)
228		register_cpu_control(cpu);
229	if (!error)
230		per_cpu(cpu_sys_devices, num) = &cpu->sysdev;
231	if (!error)
232		register_cpu_under_node(num, cpu_to_node(num));
233
234#ifdef CONFIG_KEXEC
235	if (!error)
236		error = sysdev_create_file(&cpu->sysdev, &attr_crash_notes);
237#endif
238	return error;
239}
240
241struct sys_device *get_cpu_sysdev(unsigned cpu)
242{
243	if (cpu < nr_cpu_ids && cpu_possible(cpu))
244		return per_cpu(cpu_sys_devices, cpu);
245	else
246		return NULL;
247}
248EXPORT_SYMBOL_GPL(get_cpu_sysdev);
249
250int __init cpu_dev_init(void)
251{
252	int err;
 
253
254	err = sysdev_class_register(&cpu_sysdev_class);
255#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
256	if (!err)
257		err = sched_create_sysfs_power_savings_entries(&cpu_sysdev_class);
258#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
259
260	return err;
 
 
 
261}
 
 
 
 
 
262
263static struct sysdev_class_attribute *cpu_sysdev_class_attrs[] = {
264#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
265	&attr_probe,
266	&attr_release,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267#endif
268	&cpu_attrs[0].attr,
269	&cpu_attrs[1].attr,
270	&cpu_attrs[2].attr,
271	&attr_kernel_max,
272	&attr_offline,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
273	NULL
274};
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * CPU subsystem support
  4 */
  5
  6#include <linux/kernel.h>
  7#include <linux/module.h>
  8#include <linux/init.h>
  9#include <linux/sched.h>
 10#include <linux/cpu.h>
 11#include <linux/topology.h>
 12#include <linux/device.h>
 13#include <linux/node.h>
 14#include <linux/gfp.h>
 15#include <linux/slab.h>
 16#include <linux/percpu.h>
 17#include <linux/acpi.h>
 18#include <linux/of.h>
 19#include <linux/cpufeature.h>
 20#include <linux/tick.h>
 21#include <linux/pm_qos.h>
 22#include <linux/sched/isolation.h>
 23
 24#include "base.h"
 25
 26static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
 27
 28static int cpu_subsys_match(struct device *dev, struct device_driver *drv)
 29{
 30	/* ACPI style match is the only one that may succeed. */
 31	if (acpi_driver_match_device(dev, drv))
 32		return 1;
 33
 34	return 0;
 35}
 36
 37#ifdef CONFIG_HOTPLUG_CPU
 38static void change_cpu_under_node(struct cpu *cpu,
 39			unsigned int from_nid, unsigned int to_nid)
 40{
 41	int cpuid = cpu->dev.id;
 42	unregister_cpu_under_node(cpuid, from_nid);
 43	register_cpu_under_node(cpuid, to_nid);
 44	cpu->node_id = to_nid;
 45}
 46
 47static int cpu_subsys_online(struct device *dev)
 48{
 49	struct cpu *cpu = container_of(dev, struct cpu, dev);
 50	int cpuid = dev->id;
 51	int from_nid, to_nid;
 52	int ret;
 53
 54	from_nid = cpu_to_node(cpuid);
 55	if (from_nid == NUMA_NO_NODE)
 56		return -ENODEV;
 57
 58	ret = cpu_up(cpuid);
 59	/*
 60	 * When hot adding memory to memoryless node and enabling a cpu
 61	 * on the node, node number of the cpu may internally change.
 62	 */
 63	to_nid = cpu_to_node(cpuid);
 64	if (from_nid != to_nid)
 65		change_cpu_under_node(cpu, from_nid, to_nid);
 
 
 
 66
 
 
 67	return ret;
 68}
 
 69
 70static int cpu_subsys_offline(struct device *dev)
 71{
 72	return cpu_down(dev->id);
 73}
 74
 75void unregister_cpu(struct cpu *cpu)
 76{
 77	int logical_cpu = cpu->dev.id;
 78
 79	unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
 80
 81	device_unregister(&cpu->dev);
 
 
 82	per_cpu(cpu_sys_devices, logical_cpu) = NULL;
 83	return;
 84}
 85
 86#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
 87static ssize_t cpu_probe_store(struct device *dev,
 88			       struct device_attribute *attr,
 89			       const char *buf,
 90			       size_t count)
 91{
 92	ssize_t cnt;
 93	int ret;
 94
 95	ret = lock_device_hotplug_sysfs();
 96	if (ret)
 97		return ret;
 98
 99	cnt = arch_cpu_probe(buf, count);
100
101	unlock_device_hotplug();
102	return cnt;
103}
104
105static ssize_t cpu_release_store(struct device *dev,
106				 struct device_attribute *attr,
107				 const char *buf,
108				 size_t count)
109{
110	ssize_t cnt;
111	int ret;
112
113	ret = lock_device_hotplug_sysfs();
114	if (ret)
115		return ret;
116
117	cnt = arch_cpu_release(buf, count);
118
119	unlock_device_hotplug();
120	return cnt;
121}
122
123static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
124static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
125#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
126#endif /* CONFIG_HOTPLUG_CPU */
127
128struct bus_type cpu_subsys = {
129	.name = "cpu",
130	.dev_name = "cpu",
131	.match = cpu_subsys_match,
132#ifdef CONFIG_HOTPLUG_CPU
133	.online = cpu_subsys_online,
134	.offline = cpu_subsys_offline,
135#endif
136};
137EXPORT_SYMBOL_GPL(cpu_subsys);
138
139#ifdef CONFIG_KEXEC
140#include <linux/kexec.h>
141
142static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
143				char *buf)
144{
145	struct cpu *cpu = container_of(dev, struct cpu, dev);
146	ssize_t rc;
147	unsigned long long addr;
148	int cpunum;
149
150	cpunum = cpu->dev.id;
151
152	/*
153	 * Might be reading other cpu's data based on which cpu read thread
154	 * has been scheduled. But cpu data (memory) is allocated once during
155	 * boot up and this data does not change there after. Hence this
156	 * operation should be safe. No locking required.
157	 */
158	addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
159	rc = sprintf(buf, "%Lx\n", addr);
160	return rc;
161}
162static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
163
164static ssize_t show_crash_notes_size(struct device *dev,
165				     struct device_attribute *attr,
166				     char *buf)
167{
168	ssize_t rc;
169
170	rc = sprintf(buf, "%zu\n", sizeof(note_buf_t));
171	return rc;
172}
173static DEVICE_ATTR(crash_notes_size, 0400, show_crash_notes_size, NULL);
174
175static struct attribute *crash_note_cpu_attrs[] = {
176	&dev_attr_crash_notes.attr,
177	&dev_attr_crash_notes_size.attr,
178	NULL
179};
180
181static struct attribute_group crash_note_cpu_attr_group = {
182	.attrs = crash_note_cpu_attrs,
183};
184#endif
185
186static const struct attribute_group *common_cpu_attr_groups[] = {
187#ifdef CONFIG_KEXEC
188	&crash_note_cpu_attr_group,
189#endif
190	NULL
191};
192
193static const struct attribute_group *hotplugable_cpu_attr_groups[] = {
194#ifdef CONFIG_KEXEC
195	&crash_note_cpu_attr_group,
196#endif
197	NULL
198};
199
200/*
201 * Print cpu online, possible, present, and system maps
202 */
203
204struct cpu_attr {
205	struct device_attribute attr;
206	const struct cpumask *const map;
207};
208
209static ssize_t show_cpus_attr(struct device *dev,
210			      struct device_attribute *attr,
211			      char *buf)
212{
213	struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
 
214
215	return cpumap_print_to_pagebuf(true, buf, ca->map);
 
 
216}
217
218#define _CPU_ATTR(name, map) \
219	{ __ATTR(name, 0444, show_cpus_attr, NULL), map }
220
221/* Keep in sync with cpu_subsys_attrs */
222static struct cpu_attr cpu_attrs[] = {
223	_CPU_ATTR(online, &__cpu_online_mask),
224	_CPU_ATTR(possible, &__cpu_possible_mask),
225	_CPU_ATTR(present, &__cpu_present_mask),
226};
227
228/*
229 * Print values for NR_CPUS and offlined cpus
230 */
231static ssize_t print_cpus_kernel_max(struct device *dev,
232				     struct device_attribute *attr, char *buf)
233{
234	int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
235	return n;
236}
237static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
238
239/* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
240unsigned int total_cpus;
241
242static ssize_t print_cpus_offline(struct device *dev,
243				  struct device_attribute *attr, char *buf)
244{
245	int n = 0, len = PAGE_SIZE-2;
246	cpumask_var_t offline;
247
248	/* display offline cpus < nr_cpu_ids */
249	if (!alloc_cpumask_var(&offline, GFP_KERNEL))
250		return -ENOMEM;
251	cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
252	n = scnprintf(buf, len, "%*pbl", cpumask_pr_args(offline));
253	free_cpumask_var(offline);
254
255	/* display offline cpus >= nr_cpu_ids */
256	if (total_cpus && nr_cpu_ids < total_cpus) {
257		if (n && n < len)
258			buf[n++] = ',';
259
260		if (nr_cpu_ids == total_cpus-1)
261			n += snprintf(&buf[n], len - n, "%u", nr_cpu_ids);
262		else
263			n += snprintf(&buf[n], len - n, "%u-%d",
264						      nr_cpu_ids, total_cpus-1);
265	}
266
267	n += snprintf(&buf[n], len - n, "\n");
268	return n;
269}
270static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
271
272static ssize_t print_cpus_isolated(struct device *dev,
273				  struct device_attribute *attr, char *buf)
274{
275	int n = 0, len = PAGE_SIZE-2;
276	cpumask_var_t isolated;
277
278	if (!alloc_cpumask_var(&isolated, GFP_KERNEL))
279		return -ENOMEM;
280
281	cpumask_andnot(isolated, cpu_possible_mask,
282		       housekeeping_cpumask(HK_FLAG_DOMAIN));
283	n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(isolated));
284
285	free_cpumask_var(isolated);
286
287	return n;
288}
289static DEVICE_ATTR(isolated, 0444, print_cpus_isolated, NULL);
290
291#ifdef CONFIG_NO_HZ_FULL
292static ssize_t print_cpus_nohz_full(struct device *dev,
293				  struct device_attribute *attr, char *buf)
294{
295	int n = 0, len = PAGE_SIZE-2;
296
297	n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(tick_nohz_full_mask));
298
299	return n;
300}
301static DEVICE_ATTR(nohz_full, 0444, print_cpus_nohz_full, NULL);
302#endif
303
304static void cpu_device_release(struct device *dev)
305{
306	/*
307	 * This is an empty function to prevent the driver core from spitting a
308	 * warning at us.  Yes, I know this is directly opposite of what the
309	 * documentation for the driver core and kobjects say, and the author
310	 * of this code has already been publically ridiculed for doing
311	 * something as foolish as this.  However, at this point in time, it is
312	 * the only way to handle the issue of statically allocated cpu
313	 * devices.  The different architectures will have their cpu device
314	 * code reworked to properly handle this in the near future, so this
315	 * function will then be changed to correctly free up the memory held
316	 * by the cpu device.
317	 *
318	 * Never copy this way of doing things, or you too will be made fun of
319	 * on the linux-kernel list, you have been warned.
320	 */
321}
322
323#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
324static ssize_t print_cpu_modalias(struct device *dev,
325				  struct device_attribute *attr,
326				  char *buf)
327{
328	ssize_t n;
329	u32 i;
330
331	n = sprintf(buf, "cpu:type:" CPU_FEATURE_TYPEFMT ":feature:",
332		    CPU_FEATURE_TYPEVAL);
333
334	for (i = 0; i < MAX_CPU_FEATURES; i++)
335		if (cpu_have_feature(i)) {
336			if (PAGE_SIZE < n + sizeof(",XXXX\n")) {
337				WARN(1, "CPU features overflow page\n");
338				break;
339			}
340			n += sprintf(&buf[n], ",%04X", i);
341		}
342	buf[n++] = '\n';
343	return n;
344}
345
346static int cpu_uevent(struct device *dev, struct kobj_uevent_env *env)
347{
348	char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL);
349	if (buf) {
350		print_cpu_modalias(NULL, NULL, buf);
351		add_uevent_var(env, "MODALIAS=%s", buf);
352		kfree(buf);
353	}
354	return 0;
355}
356#endif
357
358/*
359 * register_cpu - Setup a sysfs device for a CPU.
360 * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
361 *	  sysfs for this CPU.
362 * @num - CPU number to use when creating the device.
363 *
364 * Initialize and register the CPU device.
365 */
366int register_cpu(struct cpu *cpu, int num)
367{
368	int error;
 
 
 
369
370	cpu->node_id = cpu_to_node(num);
371	memset(&cpu->dev, 0x00, sizeof(struct device));
372	cpu->dev.id = num;
373	cpu->dev.bus = &cpu_subsys;
374	cpu->dev.release = cpu_device_release;
375	cpu->dev.offline_disabled = !cpu->hotpluggable;
376	cpu->dev.offline = !cpu_online(num);
377	cpu->dev.of_node = of_get_cpu_node(num, NULL);
378#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
379	cpu->dev.bus->uevent = cpu_uevent;
380#endif
381	cpu->dev.groups = common_cpu_attr_groups;
382	if (cpu->hotpluggable)
383		cpu->dev.groups = hotplugable_cpu_attr_groups;
384	error = device_register(&cpu->dev);
385	if (error) {
386		put_device(&cpu->dev);
387		return error;
388	}
389
390	per_cpu(cpu_sys_devices, num) = &cpu->dev;
391	register_cpu_under_node(num, cpu_to_node(num));
392	dev_pm_qos_expose_latency_limit(&cpu->dev,
393					PM_QOS_RESUME_LATENCY_NO_CONSTRAINT);
 
 
394
395	return 0;
 
 
 
 
396}
397
398struct device *get_cpu_device(unsigned cpu)
399{
400	if (cpu < nr_cpu_ids && cpu_possible(cpu))
401		return per_cpu(cpu_sys_devices, cpu);
402	else
403		return NULL;
404}
405EXPORT_SYMBOL_GPL(get_cpu_device);
406
407static void device_create_release(struct device *dev)
408{
409	kfree(dev);
410}
411
412__printf(4, 0)
413static struct device *
414__cpu_device_create(struct device *parent, void *drvdata,
415		    const struct attribute_group **groups,
416		    const char *fmt, va_list args)
417{
418	struct device *dev = NULL;
419	int retval = -ENODEV;
420
421	dev = kzalloc(sizeof(*dev), GFP_KERNEL);
422	if (!dev) {
423		retval = -ENOMEM;
424		goto error;
425	}
426
427	device_initialize(dev);
428	dev->parent = parent;
429	dev->groups = groups;
430	dev->release = device_create_release;
431	device_set_pm_not_required(dev);
432	dev_set_drvdata(dev, drvdata);
433
434	retval = kobject_set_name_vargs(&dev->kobj, fmt, args);
435	if (retval)
436		goto error;
437
438	retval = device_add(dev);
439	if (retval)
440		goto error;
441
442	return dev;
443
444error:
445	put_device(dev);
446	return ERR_PTR(retval);
447}
448
449struct device *cpu_device_create(struct device *parent, void *drvdata,
450				 const struct attribute_group **groups,
451				 const char *fmt, ...)
452{
453	va_list vargs;
454	struct device *dev;
455
456	va_start(vargs, fmt);
457	dev = __cpu_device_create(parent, drvdata, groups, fmt, vargs);
458	va_end(vargs);
459	return dev;
460}
461EXPORT_SYMBOL_GPL(cpu_device_create);
462
463#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
464static DEVICE_ATTR(modalias, 0444, print_cpu_modalias, NULL);
465#endif
466
467static struct attribute *cpu_root_attrs[] = {
468#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
469	&dev_attr_probe.attr,
470	&dev_attr_release.attr,
471#endif
472	&cpu_attrs[0].attr.attr,
473	&cpu_attrs[1].attr.attr,
474	&cpu_attrs[2].attr.attr,
475	&dev_attr_kernel_max.attr,
476	&dev_attr_offline.attr,
477	&dev_attr_isolated.attr,
478#ifdef CONFIG_NO_HZ_FULL
479	&dev_attr_nohz_full.attr,
480#endif
481#ifdef CONFIG_GENERIC_CPU_AUTOPROBE
482	&dev_attr_modalias.attr,
483#endif
484	NULL
485};
486
487static struct attribute_group cpu_root_attr_group = {
488	.attrs = cpu_root_attrs,
489};
490
491static const struct attribute_group *cpu_root_attr_groups[] = {
492	&cpu_root_attr_group,
493	NULL,
494};
495
496bool cpu_is_hotpluggable(unsigned cpu)
497{
498	struct device *dev = get_cpu_device(cpu);
499	return dev && container_of(dev, struct cpu, dev)->hotpluggable;
500}
501EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
502
503#ifdef CONFIG_GENERIC_CPU_DEVICES
504static DEFINE_PER_CPU(struct cpu, cpu_devices);
505#endif
506
507static void __init cpu_dev_register_generic(void)
508{
509#ifdef CONFIG_GENERIC_CPU_DEVICES
510	int i;
511
512	for_each_possible_cpu(i) {
513		if (register_cpu(&per_cpu(cpu_devices, i), i))
514			panic("Failed to register CPU device");
515	}
516#endif
517}
518
519#ifdef CONFIG_GENERIC_CPU_VULNERABILITIES
520
521ssize_t __weak cpu_show_meltdown(struct device *dev,
522				 struct device_attribute *attr, char *buf)
523{
524	return sprintf(buf, "Not affected\n");
525}
526
527ssize_t __weak cpu_show_spectre_v1(struct device *dev,
528				   struct device_attribute *attr, char *buf)
529{
530	return sprintf(buf, "Not affected\n");
531}
532
533ssize_t __weak cpu_show_spectre_v2(struct device *dev,
534				   struct device_attribute *attr, char *buf)
535{
536	return sprintf(buf, "Not affected\n");
537}
538
539ssize_t __weak cpu_show_spec_store_bypass(struct device *dev,
540					  struct device_attribute *attr, char *buf)
541{
542	return sprintf(buf, "Not affected\n");
543}
544
545ssize_t __weak cpu_show_l1tf(struct device *dev,
546			     struct device_attribute *attr, char *buf)
547{
548	return sprintf(buf, "Not affected\n");
549}
550
551ssize_t __weak cpu_show_mds(struct device *dev,
552			    struct device_attribute *attr, char *buf)
553{
554	return sprintf(buf, "Not affected\n");
555}
556
557ssize_t __weak cpu_show_tsx_async_abort(struct device *dev,
558					struct device_attribute *attr,
559					char *buf)
560{
561	return sprintf(buf, "Not affected\n");
562}
563
564ssize_t __weak cpu_show_itlb_multihit(struct device *dev,
565			    struct device_attribute *attr, char *buf)
566{
567	return sprintf(buf, "Not affected\n");
568}
569
570static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
571static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
572static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL);
573static DEVICE_ATTR(spec_store_bypass, 0444, cpu_show_spec_store_bypass, NULL);
574static DEVICE_ATTR(l1tf, 0444, cpu_show_l1tf, NULL);
575static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL);
576static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL);
577static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL);
578
579static struct attribute *cpu_root_vulnerabilities_attrs[] = {
580	&dev_attr_meltdown.attr,
581	&dev_attr_spectre_v1.attr,
582	&dev_attr_spectre_v2.attr,
583	&dev_attr_spec_store_bypass.attr,
584	&dev_attr_l1tf.attr,
585	&dev_attr_mds.attr,
586	&dev_attr_tsx_async_abort.attr,
587	&dev_attr_itlb_multihit.attr,
588	NULL
589};
590
591static const struct attribute_group cpu_root_vulnerabilities_group = {
592	.name  = "vulnerabilities",
593	.attrs = cpu_root_vulnerabilities_attrs,
594};
595
596static void __init cpu_register_vulnerabilities(void)
597{
598	if (sysfs_create_group(&cpu_subsys.dev_root->kobj,
599			       &cpu_root_vulnerabilities_group))
600		pr_err("Unable to register CPU vulnerabilities\n");
601}
602
603#else
604static inline void cpu_register_vulnerabilities(void) { }
605#endif
606
607void __init cpu_dev_init(void)
608{
609	if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
610		panic("Failed to register CPU subsystem");
611
612	cpu_dev_register_generic();
613	cpu_register_vulnerabilities();
614}