Linux Audio

Check our new training course

Loading...
v5.9
  1/******************************************************************************
  2 * pcpu.c
  3 * Management physical cpu in dom0, get pcpu info and provide sys interface
  4 *
  5 * Copyright (c) 2012 Intel Corporation
  6 * Author: Liu, Jinsong <jinsong.liu@intel.com>
  7 * Author: Jiang, Yunhong <yunhong.jiang@intel.com>
  8 *
  9 * This program is free software; you can redistribute it and/or
 10 * modify it under the terms of the GNU General Public License version 2
 11 * as published by the Free Software Foundation; or, when distributed
 12 * separately from the Linux kernel or incorporated into other
 13 * software packages, subject to the following license:
 14 *
 15 * Permission is hereby granted, free of charge, to any person obtaining a copy
 16 * of this source file (the "Software"), to deal in the Software without
 17 * restriction, including without limitation the rights to use, copy, modify,
 18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 19 * and to permit persons to whom the Software is furnished to do so, subject to
 20 * the following conditions:
 21 *
 22 * The above copyright notice and this permission notice shall be included in
 23 * all copies or substantial portions of the Software.
 24 *
 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 31 * IN THE SOFTWARE.
 32 */
 33
 34#define pr_fmt(fmt) "xen_cpu: " fmt
 35
 36#include <linux/interrupt.h>
 37#include <linux/spinlock.h>
 38#include <linux/cpu.h>
 39#include <linux/stat.h>
 40#include <linux/capability.h>
 41
 42#include <xen/xen.h>
 43#include <xen/acpi.h>
 44#include <xen/xenbus.h>
 45#include <xen/events.h>
 46#include <xen/interface/platform.h>
 47#include <asm/xen/hypervisor.h>
 48#include <asm/xen/hypercall.h>
 49
 
 
 
 50
 51/*
 52 * @cpu_id: Xen physical cpu logic number
 53 * @flags: Xen physical cpu status flag
 54 * - XEN_PCPU_FLAGS_ONLINE: cpu is online
 55 * - XEN_PCPU_FLAGS_INVALID: cpu is not present
 56 */
 57struct pcpu {
 58	struct list_head list;
 59	struct device dev;
 60	uint32_t cpu_id;
 
 61	uint32_t flags;
 62};
 63
 64static struct bus_type xen_pcpu_subsys = {
 65	.name = "xen_cpu",
 66	.dev_name = "xen_cpu",
 67};
 68
 69static DEFINE_MUTEX(xen_pcpu_lock);
 70
 71static LIST_HEAD(xen_pcpus);
 72
 73static int xen_pcpu_down(uint32_t cpu_id)
 74{
 75	struct xen_platform_op op = {
 76		.cmd			= XENPF_cpu_offline,
 77		.interface_version	= XENPF_INTERFACE_VERSION,
 78		.u.cpu_ol.cpuid		= cpu_id,
 79	};
 80
 81	return HYPERVISOR_platform_op(&op);
 82}
 83
 84static int xen_pcpu_up(uint32_t cpu_id)
 85{
 86	struct xen_platform_op op = {
 87		.cmd			= XENPF_cpu_online,
 88		.interface_version	= XENPF_INTERFACE_VERSION,
 89		.u.cpu_ol.cpuid		= cpu_id,
 90	};
 91
 92	return HYPERVISOR_platform_op(&op);
 93}
 94
 95static ssize_t show_online(struct device *dev,
 96			   struct device_attribute *attr,
 97			   char *buf)
 98{
 99	struct pcpu *cpu = container_of(dev, struct pcpu, dev);
100
101	return sprintf(buf, "%u\n", !!(cpu->flags & XEN_PCPU_FLAGS_ONLINE));
102}
103
104static ssize_t __ref store_online(struct device *dev,
105				  struct device_attribute *attr,
106				  const char *buf, size_t count)
107{
108	struct pcpu *pcpu = container_of(dev, struct pcpu, dev);
109	unsigned long long val;
110	ssize_t ret;
111
112	if (!capable(CAP_SYS_ADMIN))
113		return -EPERM;
114
115	if (kstrtoull(buf, 0, &val) < 0)
116		return -EINVAL;
117
118	switch (val) {
119	case 0:
120		ret = xen_pcpu_down(pcpu->cpu_id);
121		break;
122	case 1:
123		ret = xen_pcpu_up(pcpu->cpu_id);
124		break;
125	default:
126		ret = -EINVAL;
127	}
128
129	if (ret >= 0)
130		ret = count;
131	return ret;
132}
133static DEVICE_ATTR(online, S_IRUGO | S_IWUSR, show_online, store_online);
134
135static struct attribute *pcpu_dev_attrs[] = {
136	&dev_attr_online.attr,
137	NULL
138};
139
140static umode_t pcpu_dev_is_visible(struct kobject *kobj,
141				   struct attribute *attr, int idx)
142{
143	struct device *dev = kobj_to_dev(kobj);
144	/*
145	 * Xen never offline cpu0 due to several restrictions
146	 * and assumptions. This basically doesn't add a sys control
147	 * to user, one cannot attempt to offline BSP.
148	 */
149	return dev->id ? attr->mode : 0;
150}
151
152static const struct attribute_group pcpu_dev_group = {
153	.attrs = pcpu_dev_attrs,
154	.is_visible = pcpu_dev_is_visible,
155};
156
157static const struct attribute_group *pcpu_dev_groups[] = {
158	&pcpu_dev_group,
159	NULL
160};
161
162static bool xen_pcpu_online(uint32_t flags)
163{
164	return !!(flags & XEN_PCPU_FLAGS_ONLINE);
165}
166
167static void pcpu_online_status(struct xenpf_pcpuinfo *info,
168			       struct pcpu *pcpu)
169{
170	if (xen_pcpu_online(info->flags) &&
171	   !xen_pcpu_online(pcpu->flags)) {
172		/* the pcpu is onlined */
173		pcpu->flags |= XEN_PCPU_FLAGS_ONLINE;
174		kobject_uevent(&pcpu->dev.kobj, KOBJ_ONLINE);
175	} else if (!xen_pcpu_online(info->flags) &&
176		    xen_pcpu_online(pcpu->flags)) {
177		/* The pcpu is offlined */
178		pcpu->flags &= ~XEN_PCPU_FLAGS_ONLINE;
179		kobject_uevent(&pcpu->dev.kobj, KOBJ_OFFLINE);
180	}
181}
182
183static struct pcpu *get_pcpu(uint32_t cpu_id)
184{
185	struct pcpu *pcpu;
186
187	list_for_each_entry(pcpu, &xen_pcpus, list) {
188		if (pcpu->cpu_id == cpu_id)
189			return pcpu;
190	}
191
192	return NULL;
193}
194
195static void pcpu_release(struct device *dev)
196{
197	struct pcpu *pcpu = container_of(dev, struct pcpu, dev);
198
199	list_del(&pcpu->list);
200	kfree(pcpu);
201}
202
203static void unregister_and_remove_pcpu(struct pcpu *pcpu)
204{
205	struct device *dev;
206
207	if (!pcpu)
208		return;
209
210	dev = &pcpu->dev;
211	/* pcpu remove would be implicitly done */
212	device_unregister(dev);
213}
214
215static int register_pcpu(struct pcpu *pcpu)
216{
217	struct device *dev;
218	int err = -EINVAL;
219
220	if (!pcpu)
221		return err;
222
223	dev = &pcpu->dev;
224	dev->bus = &xen_pcpu_subsys;
225	dev->id = pcpu->cpu_id;
226	dev->release = pcpu_release;
227	dev->groups = pcpu_dev_groups;
228
229	err = device_register(dev);
230	if (err) {
231		pcpu_release(dev);
232		return err;
233	}
234
235	return 0;
236}
237
238static struct pcpu *create_and_register_pcpu(struct xenpf_pcpuinfo *info)
239{
240	struct pcpu *pcpu;
241	int err;
242
243	if (info->flags & XEN_PCPU_FLAGS_INVALID)
244		return ERR_PTR(-ENODEV);
245
246	pcpu = kzalloc(sizeof(struct pcpu), GFP_KERNEL);
247	if (!pcpu)
248		return ERR_PTR(-ENOMEM);
249
250	INIT_LIST_HEAD(&pcpu->list);
251	pcpu->cpu_id = info->xen_cpuid;
 
252	pcpu->flags = info->flags;
253
254	/* Need hold on xen_pcpu_lock before pcpu list manipulations */
255	list_add_tail(&pcpu->list, &xen_pcpus);
256
257	err = register_pcpu(pcpu);
258	if (err) {
259		pr_warn("Failed to register pcpu%u\n", info->xen_cpuid);
260		return ERR_PTR(-ENOENT);
261	}
262
263	return pcpu;
264}
265
266/*
267 * Caller should hold the xen_pcpu_lock
268 */
269static int sync_pcpu(uint32_t cpu, uint32_t *max_cpu)
270{
271	int ret;
272	struct pcpu *pcpu = NULL;
273	struct xenpf_pcpuinfo *info;
274	struct xen_platform_op op = {
275		.cmd                   = XENPF_get_cpuinfo,
276		.interface_version     = XENPF_INTERFACE_VERSION,
277		.u.pcpu_info.xen_cpuid = cpu,
278	};
279
280	ret = HYPERVISOR_platform_op(&op);
281	if (ret)
282		return ret;
283
284	info = &op.u.pcpu_info;
285	if (max_cpu)
286		*max_cpu = info->max_present;
287
288	pcpu = get_pcpu(cpu);
289
290	/*
291	 * Only those at cpu present map has its sys interface.
292	 */
293	if (info->flags & XEN_PCPU_FLAGS_INVALID) {
294		unregister_and_remove_pcpu(pcpu);
295		return 0;
296	}
297
298	if (!pcpu) {
299		pcpu = create_and_register_pcpu(info);
300		if (IS_ERR_OR_NULL(pcpu))
301			return -ENODEV;
302	} else
303		pcpu_online_status(info, pcpu);
304
305	return 0;
306}
307
308/*
309 * Sync dom0's pcpu information with xen hypervisor's
310 */
311static int xen_sync_pcpus(void)
312{
313	/*
314	 * Boot cpu always have cpu_id 0 in xen
315	 */
316	uint32_t cpu = 0, max_cpu = 0;
317	int err = 0;
318	struct pcpu *pcpu, *tmp;
319
320	mutex_lock(&xen_pcpu_lock);
321
322	while (!err && (cpu <= max_cpu)) {
323		err = sync_pcpu(cpu, &max_cpu);
324		cpu++;
325	}
326
327	if (err)
328		list_for_each_entry_safe(pcpu, tmp, &xen_pcpus, list)
329			unregister_and_remove_pcpu(pcpu);
330
331	mutex_unlock(&xen_pcpu_lock);
332
333	return err;
334}
335
336static void xen_pcpu_work_fn(struct work_struct *work)
337{
338	xen_sync_pcpus();
339}
340static DECLARE_WORK(xen_pcpu_work, xen_pcpu_work_fn);
341
342static irqreturn_t xen_pcpu_interrupt(int irq, void *dev_id)
343{
344	schedule_work(&xen_pcpu_work);
345	return IRQ_HANDLED;
346}
347
348/* Sync with Xen hypervisor after cpu hotadded */
349void xen_pcpu_hotplug_sync(void)
350{
351	schedule_work(&xen_pcpu_work);
352}
353EXPORT_SYMBOL_GPL(xen_pcpu_hotplug_sync);
354
355/*
356 * For hypervisor presented cpu, return logic cpu id;
357 * For hypervisor non-presented cpu, return -ENODEV.
358 */
359int xen_pcpu_id(uint32_t acpi_id)
360{
361	int cpu_id = 0, max_id = 0;
362	struct xen_platform_op op;
363
364	op.cmd = XENPF_get_cpuinfo;
365	while (cpu_id <= max_id) {
366		op.u.pcpu_info.xen_cpuid = cpu_id;
367		if (HYPERVISOR_platform_op(&op)) {
368			cpu_id++;
369			continue;
370		}
371
372		if (acpi_id == op.u.pcpu_info.acpi_id)
373			return cpu_id;
374		if (op.u.pcpu_info.max_present > max_id)
375			max_id = op.u.pcpu_info.max_present;
376		cpu_id++;
377	}
378
379	return -ENODEV;
380}
381EXPORT_SYMBOL_GPL(xen_pcpu_id);
382
383static int __init xen_pcpu_init(void)
384{
385	int irq, ret;
386
387	if (!xen_initial_domain())
388		return -ENODEV;
389
390	irq = bind_virq_to_irqhandler(VIRQ_PCPU_STATE, 0,
391				      xen_pcpu_interrupt, 0,
392				      "xen-pcpu", NULL);
393	if (irq < 0) {
394		pr_warn("Failed to bind pcpu virq\n");
395		return irq;
396	}
397
398	ret = subsys_system_register(&xen_pcpu_subsys, NULL);
399	if (ret) {
400		pr_warn("Failed to register pcpu subsys\n");
401		goto err1;
402	}
403
404	ret = xen_sync_pcpus();
405	if (ret) {
406		pr_warn("Failed to sync pcpu info\n");
407		goto err2;
408	}
409
410	return 0;
411
412err2:
413	bus_unregister(&xen_pcpu_subsys);
414err1:
415	unbind_from_irqhandler(irq, NULL);
416	return ret;
417}
418arch_initcall(xen_pcpu_init);
v6.13.7
  1/******************************************************************************
  2 * pcpu.c
  3 * Management physical cpu in dom0, get pcpu info and provide sys interface
  4 *
  5 * Copyright (c) 2012 Intel Corporation
  6 * Author: Liu, Jinsong <jinsong.liu@intel.com>
  7 * Author: Jiang, Yunhong <yunhong.jiang@intel.com>
  8 *
  9 * This program is free software; you can redistribute it and/or
 10 * modify it under the terms of the GNU General Public License version 2
 11 * as published by the Free Software Foundation; or, when distributed
 12 * separately from the Linux kernel or incorporated into other
 13 * software packages, subject to the following license:
 14 *
 15 * Permission is hereby granted, free of charge, to any person obtaining a copy
 16 * of this source file (the "Software"), to deal in the Software without
 17 * restriction, including without limitation the rights to use, copy, modify,
 18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 19 * and to permit persons to whom the Software is furnished to do so, subject to
 20 * the following conditions:
 21 *
 22 * The above copyright notice and this permission notice shall be included in
 23 * all copies or substantial portions of the Software.
 24 *
 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 31 * IN THE SOFTWARE.
 32 */
 33
 34#define pr_fmt(fmt) "xen_cpu: " fmt
 35
 36#include <linux/interrupt.h>
 37#include <linux/spinlock.h>
 38#include <linux/cpu.h>
 39#include <linux/stat.h>
 40#include <linux/capability.h>
 41
 42#include <xen/xen.h>
 43#include <xen/acpi.h>
 44#include <xen/xenbus.h>
 45#include <xen/events.h>
 46#include <xen/interface/platform.h>
 47#include <asm/xen/hypervisor.h>
 48#include <asm/xen/hypercall.h>
 49
 50#ifdef CONFIG_ACPI
 51#include <acpi/processor.h>
 52#endif
 53
 54/*
 55 * @cpu_id: Xen physical cpu logic number
 56 * @flags: Xen physical cpu status flag
 57 * - XEN_PCPU_FLAGS_ONLINE: cpu is online
 58 * - XEN_PCPU_FLAGS_INVALID: cpu is not present
 59 */
 60struct pcpu {
 61	struct list_head list;
 62	struct device dev;
 63	uint32_t cpu_id;
 64	uint32_t acpi_id;
 65	uint32_t flags;
 66};
 67
 68static const struct bus_type xen_pcpu_subsys = {
 69	.name = "xen_cpu",
 70	.dev_name = "xen_cpu",
 71};
 72
 73static DEFINE_MUTEX(xen_pcpu_lock);
 74
 75static LIST_HEAD(xen_pcpus);
 76
 77static int xen_pcpu_down(uint32_t cpu_id)
 78{
 79	struct xen_platform_op op = {
 80		.cmd			= XENPF_cpu_offline,
 81		.interface_version	= XENPF_INTERFACE_VERSION,
 82		.u.cpu_ol.cpuid		= cpu_id,
 83	};
 84
 85	return HYPERVISOR_platform_op(&op);
 86}
 87
 88static int xen_pcpu_up(uint32_t cpu_id)
 89{
 90	struct xen_platform_op op = {
 91		.cmd			= XENPF_cpu_online,
 92		.interface_version	= XENPF_INTERFACE_VERSION,
 93		.u.cpu_ol.cpuid		= cpu_id,
 94	};
 95
 96	return HYPERVISOR_platform_op(&op);
 97}
 98
 99static ssize_t online_show(struct device *dev,
100			   struct device_attribute *attr,
101			   char *buf)
102{
103	struct pcpu *cpu = container_of(dev, struct pcpu, dev);
104
105	return sprintf(buf, "%u\n", !!(cpu->flags & XEN_PCPU_FLAGS_ONLINE));
106}
107
108static ssize_t __ref online_store(struct device *dev,
109				  struct device_attribute *attr,
110				  const char *buf, size_t count)
111{
112	struct pcpu *pcpu = container_of(dev, struct pcpu, dev);
113	unsigned long long val;
114	ssize_t ret;
115
116	if (!capable(CAP_SYS_ADMIN))
117		return -EPERM;
118
119	if (kstrtoull(buf, 0, &val) < 0)
120		return -EINVAL;
121
122	switch (val) {
123	case 0:
124		ret = xen_pcpu_down(pcpu->cpu_id);
125		break;
126	case 1:
127		ret = xen_pcpu_up(pcpu->cpu_id);
128		break;
129	default:
130		ret = -EINVAL;
131	}
132
133	if (ret >= 0)
134		ret = count;
135	return ret;
136}
137static DEVICE_ATTR_RW(online);
138
139static struct attribute *pcpu_dev_attrs[] = {
140	&dev_attr_online.attr,
141	NULL
142};
143
144static umode_t pcpu_dev_is_visible(struct kobject *kobj,
145				   struct attribute *attr, int idx)
146{
147	struct device *dev = kobj_to_dev(kobj);
148	/*
149	 * Xen never offline cpu0 due to several restrictions
150	 * and assumptions. This basically doesn't add a sys control
151	 * to user, one cannot attempt to offline BSP.
152	 */
153	return dev->id ? attr->mode : 0;
154}
155
156static const struct attribute_group pcpu_dev_group = {
157	.attrs = pcpu_dev_attrs,
158	.is_visible = pcpu_dev_is_visible,
159};
160
161static const struct attribute_group *pcpu_dev_groups[] = {
162	&pcpu_dev_group,
163	NULL
164};
165
166static bool xen_pcpu_online(uint32_t flags)
167{
168	return !!(flags & XEN_PCPU_FLAGS_ONLINE);
169}
170
171static void pcpu_online_status(struct xenpf_pcpuinfo *info,
172			       struct pcpu *pcpu)
173{
174	if (xen_pcpu_online(info->flags) &&
175	   !xen_pcpu_online(pcpu->flags)) {
176		/* the pcpu is onlined */
177		pcpu->flags |= XEN_PCPU_FLAGS_ONLINE;
178		kobject_uevent(&pcpu->dev.kobj, KOBJ_ONLINE);
179	} else if (!xen_pcpu_online(info->flags) &&
180		    xen_pcpu_online(pcpu->flags)) {
181		/* The pcpu is offlined */
182		pcpu->flags &= ~XEN_PCPU_FLAGS_ONLINE;
183		kobject_uevent(&pcpu->dev.kobj, KOBJ_OFFLINE);
184	}
185}
186
187static struct pcpu *get_pcpu(uint32_t cpu_id)
188{
189	struct pcpu *pcpu;
190
191	list_for_each_entry(pcpu, &xen_pcpus, list) {
192		if (pcpu->cpu_id == cpu_id)
193			return pcpu;
194	}
195
196	return NULL;
197}
198
199static void pcpu_release(struct device *dev)
200{
201	struct pcpu *pcpu = container_of(dev, struct pcpu, dev);
202
203	list_del(&pcpu->list);
204	kfree(pcpu);
205}
206
207static void unregister_and_remove_pcpu(struct pcpu *pcpu)
208{
209	struct device *dev;
210
211	if (!pcpu)
212		return;
213
214	dev = &pcpu->dev;
215	/* pcpu remove would be implicitly done */
216	device_unregister(dev);
217}
218
219static int register_pcpu(struct pcpu *pcpu)
220{
221	struct device *dev;
222	int err = -EINVAL;
223
224	if (!pcpu)
225		return err;
226
227	dev = &pcpu->dev;
228	dev->bus = &xen_pcpu_subsys;
229	dev->id = pcpu->cpu_id;
230	dev->release = pcpu_release;
231	dev->groups = pcpu_dev_groups;
232
233	err = device_register(dev);
234	if (err) {
235		put_device(dev);
236		return err;
237	}
238
239	return 0;
240}
241
242static struct pcpu *create_and_register_pcpu(struct xenpf_pcpuinfo *info)
243{
244	struct pcpu *pcpu;
245	int err;
246
247	if (info->flags & XEN_PCPU_FLAGS_INVALID)
248		return ERR_PTR(-ENODEV);
249
250	pcpu = kzalloc(sizeof(struct pcpu), GFP_KERNEL);
251	if (!pcpu)
252		return ERR_PTR(-ENOMEM);
253
254	INIT_LIST_HEAD(&pcpu->list);
255	pcpu->cpu_id = info->xen_cpuid;
256	pcpu->acpi_id = info->acpi_id;
257	pcpu->flags = info->flags;
258
259	/* Need hold on xen_pcpu_lock before pcpu list manipulations */
260	list_add_tail(&pcpu->list, &xen_pcpus);
261
262	err = register_pcpu(pcpu);
263	if (err) {
264		pr_warn("Failed to register pcpu%u\n", info->xen_cpuid);
265		return ERR_PTR(-ENOENT);
266	}
267
268	return pcpu;
269}
270
271/*
272 * Caller should hold the xen_pcpu_lock
273 */
274static int sync_pcpu(uint32_t cpu, uint32_t *max_cpu)
275{
276	int ret;
277	struct pcpu *pcpu = NULL;
278	struct xenpf_pcpuinfo *info;
279	struct xen_platform_op op = {
280		.cmd                   = XENPF_get_cpuinfo,
281		.interface_version     = XENPF_INTERFACE_VERSION,
282		.u.pcpu_info.xen_cpuid = cpu,
283	};
284
285	ret = HYPERVISOR_platform_op(&op);
286	if (ret)
287		return ret;
288
289	info = &op.u.pcpu_info;
290	if (max_cpu)
291		*max_cpu = info->max_present;
292
293	pcpu = get_pcpu(cpu);
294
295	/*
296	 * Only those at cpu present map has its sys interface.
297	 */
298	if (info->flags & XEN_PCPU_FLAGS_INVALID) {
299		unregister_and_remove_pcpu(pcpu);
300		return 0;
301	}
302
303	if (!pcpu) {
304		pcpu = create_and_register_pcpu(info);
305		if (IS_ERR_OR_NULL(pcpu))
306			return -ENODEV;
307	} else
308		pcpu_online_status(info, pcpu);
309
310	return 0;
311}
312
313/*
314 * Sync dom0's pcpu information with xen hypervisor's
315 */
316static int xen_sync_pcpus(void)
317{
318	/*
319	 * Boot cpu always have cpu_id 0 in xen
320	 */
321	uint32_t cpu = 0, max_cpu = 0;
322	int err = 0;
323	struct pcpu *pcpu, *tmp;
324
325	mutex_lock(&xen_pcpu_lock);
326
327	while (!err && (cpu <= max_cpu)) {
328		err = sync_pcpu(cpu, &max_cpu);
329		cpu++;
330	}
331
332	if (err)
333		list_for_each_entry_safe(pcpu, tmp, &xen_pcpus, list)
334			unregister_and_remove_pcpu(pcpu);
335
336	mutex_unlock(&xen_pcpu_lock);
337
338	return err;
339}
340
341static void xen_pcpu_work_fn(struct work_struct *work)
342{
343	xen_sync_pcpus();
344}
345static DECLARE_WORK(xen_pcpu_work, xen_pcpu_work_fn);
346
347static irqreturn_t xen_pcpu_interrupt(int irq, void *dev_id)
348{
349	schedule_work(&xen_pcpu_work);
350	return IRQ_HANDLED;
351}
352
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353static int __init xen_pcpu_init(void)
354{
355	int irq, ret;
356
357	if (!xen_initial_domain())
358		return -ENODEV;
359
360	irq = bind_virq_to_irqhandler(VIRQ_PCPU_STATE, 0,
361				      xen_pcpu_interrupt, 0,
362				      "xen-pcpu", NULL);
363	if (irq < 0) {
364		pr_warn("Failed to bind pcpu virq\n");
365		return irq;
366	}
367
368	ret = subsys_system_register(&xen_pcpu_subsys, NULL);
369	if (ret) {
370		pr_warn("Failed to register pcpu subsys\n");
371		goto err1;
372	}
373
374	ret = xen_sync_pcpus();
375	if (ret) {
376		pr_warn("Failed to sync pcpu info\n");
377		goto err2;
378	}
379
380	return 0;
381
382err2:
383	bus_unregister(&xen_pcpu_subsys);
384err1:
385	unbind_from_irqhandler(irq, NULL);
386	return ret;
387}
388arch_initcall(xen_pcpu_init);
389
390#ifdef CONFIG_ACPI
391bool __init xen_processor_present(uint32_t acpi_id)
392{
393	const struct pcpu *pcpu;
394	bool online = false;
395
396	mutex_lock(&xen_pcpu_lock);
397	list_for_each_entry(pcpu, &xen_pcpus, list)
398		if (pcpu->acpi_id == acpi_id) {
399			online = pcpu->flags & XEN_PCPU_FLAGS_ONLINE;
400			break;
401		}
402	mutex_unlock(&xen_pcpu_lock);
403
404	return online;
405}
406
407void xen_sanitize_proc_cap_bits(uint32_t *cap)
408{
409	struct xen_platform_op op = {
410		.cmd			= XENPF_set_processor_pminfo,
411		.u.set_pminfo.id	= -1,
412		.u.set_pminfo.type	= XEN_PM_PDC,
413	};
414	u32 buf[3] = { ACPI_PDC_REVISION_ID, 1, *cap };
415	int ret;
416
417	set_xen_guest_handle(op.u.set_pminfo.pdc, buf);
418	ret = HYPERVISOR_platform_op(&op);
419	if (ret)
420		pr_err("sanitize of _PDC buffer bits from Xen failed: %d\n",
421		       ret);
422	else
423		*cap = buf[2];
424}
425#endif