Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2#include <xen/xen.h>
3#include <xen/events.h>
4#include <xen/grant_table.h>
5#include <xen/hvm.h>
6#include <xen/interface/vcpu.h>
7#include <xen/interface/xen.h>
8#include <xen/interface/memory.h>
9#include <xen/interface/hvm/params.h>
10#include <xen/features.h>
11#include <xen/platform_pci.h>
12#include <xen/xenbus.h>
13#include <xen/page.h>
14#include <xen/interface/sched.h>
15#include <xen/xen-ops.h>
16#include <asm/xen/hypervisor.h>
17#include <asm/xen/hypercall.h>
18#include <asm/system_misc.h>
19#include <asm/efi.h>
20#include <linux/interrupt.h>
21#include <linux/irqreturn.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_fdt.h>
25#include <linux/of_irq.h>
26#include <linux/of_address.h>
27#include <linux/cpuidle.h>
28#include <linux/cpufreq.h>
29#include <linux/cpu.h>
30#include <linux/console.h>
31#include <linux/pvclock_gtod.h>
32#include <linux/reboot.h>
33#include <linux/time64.h>
34#include <linux/timekeeping.h>
35#include <linux/timekeeper_internal.h>
36#include <linux/acpi.h>
37#include <linux/virtio_anchor.h>
38
39#include <linux/mm.h>
40
41static struct start_info _xen_start_info;
42struct start_info *xen_start_info = &_xen_start_info;
43EXPORT_SYMBOL(xen_start_info);
44
45enum xen_domain_type xen_domain_type = XEN_NATIVE;
46EXPORT_SYMBOL(xen_domain_type);
47
48struct shared_info xen_dummy_shared_info;
49struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
50
51DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
52static struct vcpu_info __percpu *xen_vcpu_info;
53
54/* Linux <-> Xen vCPU id mapping */
55DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
56EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
57
58/* These are unused until we support booting "pre-ballooned" */
59unsigned long xen_released_pages;
60struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
61
62static __read_mostly unsigned int xen_events_irq;
63static __read_mostly phys_addr_t xen_grant_frames;
64
65#define GRANT_TABLE_INDEX 0
66#define EXT_REGION_INDEX 1
67
68uint32_t xen_start_flags;
69EXPORT_SYMBOL(xen_start_flags);
70
71int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
72 int nr, struct page **pages)
73{
74 return xen_xlate_unmap_gfn_range(vma, nr, pages);
75}
76EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
77
78static void xen_read_wallclock(struct timespec64 *ts)
79{
80 u32 version;
81 struct timespec64 now, ts_monotonic;
82 struct shared_info *s = HYPERVISOR_shared_info;
83 struct pvclock_wall_clock *wall_clock = &(s->wc);
84
85 /* get wallclock at system boot */
86 do {
87 version = wall_clock->version;
88 rmb(); /* fetch version before time */
89 now.tv_sec = ((uint64_t)wall_clock->sec_hi << 32) | wall_clock->sec;
90 now.tv_nsec = wall_clock->nsec;
91 rmb(); /* fetch time before checking version */
92 } while ((wall_clock->version & 1) || (version != wall_clock->version));
93
94 /* time since system boot */
95 ktime_get_ts64(&ts_monotonic);
96 *ts = timespec64_add(now, ts_monotonic);
97}
98
99static int xen_pvclock_gtod_notify(struct notifier_block *nb,
100 unsigned long was_set, void *priv)
101{
102 /* Protected by the calling core code serialization */
103 static struct timespec64 next_sync;
104
105 struct xen_platform_op op;
106 struct timespec64 now, system_time;
107 struct timekeeper *tk = priv;
108
109 now.tv_sec = tk->xtime_sec;
110 now.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
111 system_time = timespec64_add(now, tk->wall_to_monotonic);
112
113 /*
114 * We only take the expensive HV call when the clock was set
115 * or when the 11 minutes RTC synchronization time elapsed.
116 */
117 if (!was_set && timespec64_compare(&now, &next_sync) < 0)
118 return NOTIFY_OK;
119
120 op.cmd = XENPF_settime64;
121 op.u.settime64.mbz = 0;
122 op.u.settime64.secs = now.tv_sec;
123 op.u.settime64.nsecs = now.tv_nsec;
124 op.u.settime64.system_time = timespec64_to_ns(&system_time);
125 (void)HYPERVISOR_platform_op(&op);
126
127 /*
128 * Move the next drift compensation time 11 minutes
129 * ahead. That's emulating the sync_cmos_clock() update for
130 * the hardware RTC.
131 */
132 next_sync = now;
133 next_sync.tv_sec += 11 * 60;
134
135 return NOTIFY_OK;
136}
137
138static struct notifier_block xen_pvclock_gtod_notifier = {
139 .notifier_call = xen_pvclock_gtod_notify,
140};
141
142static int xen_starting_cpu(unsigned int cpu)
143{
144 struct vcpu_register_vcpu_info info;
145 struct vcpu_info *vcpup;
146 int err;
147
148 /*
149 * VCPUOP_register_vcpu_info cannot be called twice for the same
150 * vcpu, so if vcpu_info is already registered, just get out. This
151 * can happen with cpu-hotplug.
152 */
153 if (per_cpu(xen_vcpu, cpu) != NULL)
154 goto after_register_vcpu_info;
155
156 pr_info("Xen: initializing cpu%d\n", cpu);
157 vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
158
159 info.mfn = percpu_to_gfn(vcpup);
160 info.offset = xen_offset_in_page(vcpup);
161
162 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu),
163 &info);
164 BUG_ON(err);
165 per_cpu(xen_vcpu, cpu) = vcpup;
166
167after_register_vcpu_info:
168 enable_percpu_irq(xen_events_irq, 0);
169 return 0;
170}
171
172static int xen_dying_cpu(unsigned int cpu)
173{
174 disable_percpu_irq(xen_events_irq);
175 return 0;
176}
177
178void xen_reboot(int reason)
179{
180 struct sched_shutdown r = { .reason = reason };
181 int rc;
182
183 rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
184 BUG_ON(rc);
185}
186
187static int xen_restart(struct notifier_block *nb, unsigned long action,
188 void *data)
189{
190 xen_reboot(SHUTDOWN_reboot);
191
192 return NOTIFY_DONE;
193}
194
195static struct notifier_block xen_restart_nb = {
196 .notifier_call = xen_restart,
197 .priority = 192,
198};
199
200static void xen_power_off(void)
201{
202 xen_reboot(SHUTDOWN_poweroff);
203}
204
205static irqreturn_t xen_arm_callback(int irq, void *arg)
206{
207 xen_evtchn_do_upcall();
208 return IRQ_HANDLED;
209}
210
211static __initdata struct {
212 const char *compat;
213 const char *prefix;
214 const char *version;
215 bool found;
216} hyper_node = {"xen,xen", "xen,xen-", NULL, false};
217
218static int __init fdt_find_hyper_node(unsigned long node, const char *uname,
219 int depth, void *data)
220{
221 const void *s = NULL;
222 int len;
223
224 if (depth != 1 || strcmp(uname, "hypervisor") != 0)
225 return 0;
226
227 if (of_flat_dt_is_compatible(node, hyper_node.compat))
228 hyper_node.found = true;
229
230 s = of_get_flat_dt_prop(node, "compatible", &len);
231 if (strlen(hyper_node.prefix) + 3 < len &&
232 !strncmp(hyper_node.prefix, s, strlen(hyper_node.prefix)))
233 hyper_node.version = s + strlen(hyper_node.prefix);
234
235 /*
236 * Check if Xen supports EFI by checking whether there is the
237 * "/hypervisor/uefi" node in DT. If so, runtime services are available
238 * through proxy functions (e.g. in case of Xen dom0 EFI implementation
239 * they call special hypercall which executes relevant EFI functions)
240 * and that is why they are always enabled.
241 */
242 if (IS_ENABLED(CONFIG_XEN_EFI)) {
243 if ((of_get_flat_dt_subnode_by_name(node, "uefi") > 0) &&
244 !efi_runtime_disabled())
245 set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
246 }
247
248 return 0;
249}
250
251/*
252 * see Documentation/devicetree/bindings/arm/xen.txt for the
253 * documentation of the Xen Device Tree format.
254 */
255void __init xen_early_init(void)
256{
257 of_scan_flat_dt(fdt_find_hyper_node, NULL);
258 if (!hyper_node.found) {
259 pr_debug("No Xen support\n");
260 return;
261 }
262
263 if (hyper_node.version == NULL) {
264 pr_debug("Xen version not found\n");
265 return;
266 }
267
268 pr_info("Xen %s support found\n", hyper_node.version);
269
270 xen_domain_type = XEN_HVM_DOMAIN;
271
272 xen_setup_features();
273
274 if (xen_feature(XENFEAT_dom0))
275 xen_start_flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
276
277 if (!console_set_on_cmdline && !xen_initial_domain())
278 add_preferred_console("hvc", 0, NULL);
279}
280
281static void __init xen_acpi_guest_init(void)
282{
283#ifdef CONFIG_ACPI
284 struct xen_hvm_param a;
285 int interrupt, trigger, polarity;
286
287 a.domid = DOMID_SELF;
288 a.index = HVM_PARAM_CALLBACK_IRQ;
289
290 if (HYPERVISOR_hvm_op(HVMOP_get_param, &a)
291 || (a.value >> 56) != HVM_PARAM_CALLBACK_TYPE_PPI) {
292 xen_events_irq = 0;
293 return;
294 }
295
296 interrupt = a.value & 0xff;
297 trigger = ((a.value >> 8) & 0x1) ? ACPI_EDGE_SENSITIVE
298 : ACPI_LEVEL_SENSITIVE;
299 polarity = ((a.value >> 8) & 0x2) ? ACPI_ACTIVE_LOW
300 : ACPI_ACTIVE_HIGH;
301 xen_events_irq = acpi_register_gsi(NULL, interrupt, trigger, polarity);
302#endif
303}
304
305#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
306/*
307 * A type-less specific Xen resource which contains extended regions
308 * (unused regions of guest physical address space provided by the hypervisor).
309 */
310static struct resource xen_resource = {
311 .name = "Xen unused space",
312};
313
314int __init arch_xen_unpopulated_init(struct resource **res)
315{
316 struct device_node *np;
317 struct resource *regs, *tmp_res;
318 uint64_t min_gpaddr = -1, max_gpaddr = 0;
319 unsigned int i, nr_reg = 0;
320 int rc;
321
322 if (!xen_domain())
323 return -ENODEV;
324
325 if (!acpi_disabled)
326 return -ENODEV;
327
328 np = of_find_compatible_node(NULL, NULL, "xen,xen");
329 if (WARN_ON(!np))
330 return -ENODEV;
331
332 /* Skip region 0 which is reserved for grant table space */
333 while (of_get_address(np, nr_reg + EXT_REGION_INDEX, NULL, NULL))
334 nr_reg++;
335
336 if (!nr_reg) {
337 pr_err("No extended regions are found\n");
338 of_node_put(np);
339 return -EINVAL;
340 }
341
342 regs = kcalloc(nr_reg, sizeof(*regs), GFP_KERNEL);
343 if (!regs) {
344 of_node_put(np);
345 return -ENOMEM;
346 }
347
348 /*
349 * Create resource from extended regions provided by the hypervisor to be
350 * used as unused address space for Xen scratch pages.
351 */
352 for (i = 0; i < nr_reg; i++) {
353 rc = of_address_to_resource(np, i + EXT_REGION_INDEX, ®s[i]);
354 if (rc)
355 goto err;
356
357 if (max_gpaddr < regs[i].end)
358 max_gpaddr = regs[i].end;
359 if (min_gpaddr > regs[i].start)
360 min_gpaddr = regs[i].start;
361 }
362
363 xen_resource.start = min_gpaddr;
364 xen_resource.end = max_gpaddr;
365
366 /*
367 * Mark holes between extended regions as unavailable. The rest of that
368 * address space will be available for the allocation.
369 */
370 for (i = 1; i < nr_reg; i++) {
371 resource_size_t start, end;
372
373 /* There is an overlap between regions */
374 if (regs[i - 1].end + 1 > regs[i].start) {
375 rc = -EINVAL;
376 goto err;
377 }
378
379 /* There is no hole between regions */
380 if (regs[i - 1].end + 1 == regs[i].start)
381 continue;
382
383 start = regs[i - 1].end + 1;
384 end = regs[i].start - 1;
385
386 tmp_res = kzalloc(sizeof(*tmp_res), GFP_KERNEL);
387 if (!tmp_res) {
388 rc = -ENOMEM;
389 goto err;
390 }
391
392 tmp_res->name = "Unavailable space";
393 tmp_res->start = start;
394 tmp_res->end = end;
395
396 rc = insert_resource(&xen_resource, tmp_res);
397 if (rc) {
398 pr_err("Cannot insert resource %pR (%d)\n", tmp_res, rc);
399 kfree(tmp_res);
400 goto err;
401 }
402 }
403
404 *res = &xen_resource;
405
406err:
407 of_node_put(np);
408 kfree(regs);
409 return rc;
410}
411#endif
412
413static void __init xen_dt_guest_init(void)
414{
415 struct device_node *xen_node;
416 struct resource res;
417
418 xen_node = of_find_compatible_node(NULL, NULL, "xen,xen");
419 if (!xen_node) {
420 pr_err("Xen support was detected before, but it has disappeared\n");
421 return;
422 }
423
424 xen_events_irq = irq_of_parse_and_map(xen_node, 0);
425
426 if (of_address_to_resource(xen_node, GRANT_TABLE_INDEX, &res)) {
427 pr_err("Xen grant table region is not found\n");
428 of_node_put(xen_node);
429 return;
430 }
431 of_node_put(xen_node);
432 xen_grant_frames = res.start;
433}
434
435static int __init xen_guest_init(void)
436{
437 struct xen_add_to_physmap xatp;
438 struct shared_info *shared_info_page = NULL;
439 int rc, cpu;
440
441 if (!xen_domain())
442 return 0;
443
444 if (IS_ENABLED(CONFIG_XEN_VIRTIO))
445 virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc);
446
447 if (!acpi_disabled)
448 xen_acpi_guest_init();
449 else
450 xen_dt_guest_init();
451
452 if (!xen_events_irq) {
453 pr_err("Xen event channel interrupt not found\n");
454 return -ENODEV;
455 }
456
457 /*
458 * The fdt parsing codes have set EFI_RUNTIME_SERVICES if Xen EFI
459 * parameters are found. Force enable runtime services.
460 */
461 if (efi_enabled(EFI_RUNTIME_SERVICES))
462 xen_efi_runtime_setup();
463
464 shared_info_page = (struct shared_info *)get_zeroed_page(GFP_KERNEL);
465
466 if (!shared_info_page) {
467 pr_err("not enough memory\n");
468 return -ENOMEM;
469 }
470 xatp.domid = DOMID_SELF;
471 xatp.idx = 0;
472 xatp.space = XENMAPSPACE_shared_info;
473 xatp.gpfn = virt_to_gfn(shared_info_page);
474 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
475 BUG();
476
477 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
478
479 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
480 * page, we use it in the event channel upcall and in some pvclock
481 * related functions.
482 * The shared info contains exactly 1 CPU (the boot CPU). The guest
483 * is required to use VCPUOP_register_vcpu_info to place vcpu info
484 * for secondary CPUs as they are brought up.
485 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
486 */
487 xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
488 1 << fls(sizeof(struct vcpu_info) - 1));
489 if (xen_vcpu_info == NULL)
490 return -ENOMEM;
491
492 /* Direct vCPU id mapping for ARM guests. */
493 for_each_possible_cpu(cpu)
494 per_cpu(xen_vcpu_id, cpu) = cpu;
495
496 if (!xen_grant_frames) {
497 xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames();
498 rc = xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn,
499 &xen_auto_xlat_grant_frames.vaddr,
500 xen_auto_xlat_grant_frames.count);
501 } else
502 rc = gnttab_setup_auto_xlat_frames(xen_grant_frames);
503 if (rc) {
504 free_percpu(xen_vcpu_info);
505 return rc;
506 }
507 gnttab_init();
508
509 /*
510 * Making sure board specific code will not set up ops for
511 * cpu idle and cpu freq.
512 */
513 disable_cpuidle();
514 disable_cpufreq();
515
516 xen_init_IRQ();
517
518 if (request_percpu_irq(xen_events_irq, xen_arm_callback,
519 "events", &xen_vcpu)) {
520 pr_err("Error request IRQ %d\n", xen_events_irq);
521 return -EINVAL;
522 }
523
524 if (xen_initial_domain())
525 pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
526
527 return cpuhp_setup_state(CPUHP_AP_ARM_XEN_STARTING,
528 "arm/xen:starting", xen_starting_cpu,
529 xen_dying_cpu);
530}
531early_initcall(xen_guest_init);
532
533static int xen_starting_runstate_cpu(unsigned int cpu)
534{
535 xen_setup_runstate_info(cpu);
536 return 0;
537}
538
539static int __init xen_late_init(void)
540{
541 if (!xen_domain())
542 return -ENODEV;
543
544 pm_power_off = xen_power_off;
545 register_restart_handler(&xen_restart_nb);
546 if (!xen_initial_domain()) {
547 struct timespec64 ts;
548 xen_read_wallclock(&ts);
549 do_settimeofday64(&ts);
550 }
551
552 if (xen_kernel_unmapped_at_usr())
553 return 0;
554
555 xen_time_setup_guest();
556
557 return cpuhp_setup_state(CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
558 "arm/xen_runstate:starting",
559 xen_starting_runstate_cpu, NULL);
560}
561late_initcall(xen_late_init);
562
563
564/* empty stubs */
565void xen_arch_pre_suspend(void) { }
566void xen_arch_post_suspend(int suspend_cancelled) { }
567void xen_timer_resume(void) { }
568void xen_arch_resume(void) { }
569void xen_arch_suspend(void) { }
570
571
572/* In the hypercall.S file. */
573EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op);
574EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op);
575EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version);
576EXPORT_SYMBOL_GPL(HYPERVISOR_console_io);
577EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op);
578EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op);
579EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
580EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
581EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op);
582EXPORT_SYMBOL_GPL(HYPERVISOR_platform_op_raw);
583EXPORT_SYMBOL_GPL(HYPERVISOR_multicall);
584EXPORT_SYMBOL_GPL(HYPERVISOR_vm_assist);
585EXPORT_SYMBOL_GPL(HYPERVISOR_dm_op);
586EXPORT_SYMBOL_GPL(privcmd_call);
1#include <xen/xen.h>
2#include <xen/events.h>
3#include <xen/grant_table.h>
4#include <xen/hvm.h>
5#include <xen/interface/vcpu.h>
6#include <xen/interface/xen.h>
7#include <xen/interface/memory.h>
8#include <xen/interface/hvm/params.h>
9#include <xen/features.h>
10#include <xen/platform_pci.h>
11#include <xen/xenbus.h>
12#include <xen/page.h>
13#include <xen/interface/sched.h>
14#include <xen/xen-ops.h>
15#include <asm/xen/hypervisor.h>
16#include <asm/xen/hypercall.h>
17#include <asm/system_misc.h>
18#include <linux/interrupt.h>
19#include <linux/irqreturn.h>
20#include <linux/module.h>
21#include <linux/of.h>
22#include <linux/of_irq.h>
23#include <linux/of_address.h>
24#include <linux/cpuidle.h>
25#include <linux/cpufreq.h>
26#include <linux/cpu.h>
27
28#include <linux/mm.h>
29
30struct start_info _xen_start_info;
31struct start_info *xen_start_info = &_xen_start_info;
32EXPORT_SYMBOL_GPL(xen_start_info);
33
34enum xen_domain_type xen_domain_type = XEN_NATIVE;
35EXPORT_SYMBOL_GPL(xen_domain_type);
36
37struct shared_info xen_dummy_shared_info;
38struct shared_info *HYPERVISOR_shared_info = (void *)&xen_dummy_shared_info;
39
40DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
41static struct vcpu_info __percpu *xen_vcpu_info;
42
43/* These are unused until we support booting "pre-ballooned" */
44unsigned long xen_released_pages;
45struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
46
47/* TODO: to be removed */
48__read_mostly int xen_have_vector_callback;
49EXPORT_SYMBOL_GPL(xen_have_vector_callback);
50
51int xen_platform_pci_unplug = XEN_UNPLUG_ALL;
52EXPORT_SYMBOL_GPL(xen_platform_pci_unplug);
53
54static __read_mostly int xen_events_irq = -1;
55
56/* map fgmfn of domid to lpfn in the current domain */
57static int map_foreign_page(unsigned long lpfn, unsigned long fgmfn,
58 unsigned int domid)
59{
60 int rc;
61 struct xen_add_to_physmap_range xatp = {
62 .domid = DOMID_SELF,
63 .foreign_domid = domid,
64 .size = 1,
65 .space = XENMAPSPACE_gmfn_foreign,
66 };
67 xen_ulong_t idx = fgmfn;
68 xen_pfn_t gpfn = lpfn;
69 int err = 0;
70
71 set_xen_guest_handle(xatp.idxs, &idx);
72 set_xen_guest_handle(xatp.gpfns, &gpfn);
73 set_xen_guest_handle(xatp.errs, &err);
74
75 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap_range, &xatp);
76 if (rc || err) {
77 pr_warn("Failed to map pfn to mfn rc:%d:%d pfn:%lx mfn:%lx\n",
78 rc, err, lpfn, fgmfn);
79 return 1;
80 }
81 return 0;
82}
83
84struct remap_data {
85 xen_pfn_t fgmfn; /* foreign domain's gmfn */
86 pgprot_t prot;
87 domid_t domid;
88 struct vm_area_struct *vma;
89 int index;
90 struct page **pages;
91 struct xen_remap_mfn_info *info;
92};
93
94static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
95 void *data)
96{
97 struct remap_data *info = data;
98 struct page *page = info->pages[info->index++];
99 unsigned long pfn = page_to_pfn(page);
100 pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
101
102 if (map_foreign_page(pfn, info->fgmfn, info->domid))
103 return -EFAULT;
104 set_pte_at(info->vma->vm_mm, addr, ptep, pte);
105
106 return 0;
107}
108
109int xen_remap_domain_mfn_range(struct vm_area_struct *vma,
110 unsigned long addr,
111 xen_pfn_t mfn, int nr,
112 pgprot_t prot, unsigned domid,
113 struct page **pages)
114{
115 int err;
116 struct remap_data data;
117
118 /* TBD: Batching, current sole caller only does page at a time */
119 if (nr > 1)
120 return -EINVAL;
121
122 data.fgmfn = mfn;
123 data.prot = prot;
124 data.domid = domid;
125 data.vma = vma;
126 data.index = 0;
127 data.pages = pages;
128 err = apply_to_page_range(vma->vm_mm, addr, nr << PAGE_SHIFT,
129 remap_pte_fn, &data);
130 return err;
131}
132EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range);
133
134int xen_unmap_domain_mfn_range(struct vm_area_struct *vma,
135 int nr, struct page **pages)
136{
137 int i;
138
139 for (i = 0; i < nr; i++) {
140 struct xen_remove_from_physmap xrp;
141 unsigned long rc, pfn;
142
143 pfn = page_to_pfn(pages[i]);
144
145 xrp.domid = DOMID_SELF;
146 xrp.gpfn = pfn;
147 rc = HYPERVISOR_memory_op(XENMEM_remove_from_physmap, &xrp);
148 if (rc) {
149 pr_warn("Failed to unmap pfn:%lx rc:%ld\n",
150 pfn, rc);
151 return rc;
152 }
153 }
154 return 0;
155}
156EXPORT_SYMBOL_GPL(xen_unmap_domain_mfn_range);
157
158static void xen_percpu_init(void)
159{
160 struct vcpu_register_vcpu_info info;
161 struct vcpu_info *vcpup;
162 int err;
163 int cpu = get_cpu();
164
165 pr_info("Xen: initializing cpu%d\n", cpu);
166 vcpup = per_cpu_ptr(xen_vcpu_info, cpu);
167
168 info.mfn = __pa(vcpup) >> PAGE_SHIFT;
169 info.offset = offset_in_page(vcpup);
170
171 err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info);
172 BUG_ON(err);
173 per_cpu(xen_vcpu, cpu) = vcpup;
174
175 enable_percpu_irq(xen_events_irq, 0);
176 put_cpu();
177}
178
179static void xen_restart(enum reboot_mode reboot_mode, const char *cmd)
180{
181 struct sched_shutdown r = { .reason = SHUTDOWN_reboot };
182 int rc;
183 rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
184 if (rc)
185 BUG();
186}
187
188static void xen_power_off(void)
189{
190 struct sched_shutdown r = { .reason = SHUTDOWN_poweroff };
191 int rc;
192 rc = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
193 if (rc)
194 BUG();
195}
196
197static int xen_cpu_notification(struct notifier_block *self,
198 unsigned long action,
199 void *hcpu)
200{
201 switch (action) {
202 case CPU_STARTING:
203 xen_percpu_init();
204 break;
205 default:
206 break;
207 }
208
209 return NOTIFY_OK;
210}
211
212static struct notifier_block xen_cpu_notifier = {
213 .notifier_call = xen_cpu_notification,
214};
215
216static irqreturn_t xen_arm_callback(int irq, void *arg)
217{
218 xen_hvm_evtchn_do_upcall();
219 return IRQ_HANDLED;
220}
221
222/*
223 * see Documentation/devicetree/bindings/arm/xen.txt for the
224 * documentation of the Xen Device Tree format.
225 */
226#define GRANT_TABLE_PHYSADDR 0
227static int __init xen_guest_init(void)
228{
229 struct xen_add_to_physmap xatp;
230 static struct shared_info *shared_info_page = 0;
231 struct device_node *node;
232 int len;
233 const char *s = NULL;
234 const char *version = NULL;
235 const char *xen_prefix = "xen,xen-";
236 struct resource res;
237 phys_addr_t grant_frames;
238
239 node = of_find_compatible_node(NULL, NULL, "xen,xen");
240 if (!node) {
241 pr_debug("No Xen support\n");
242 return 0;
243 }
244 s = of_get_property(node, "compatible", &len);
245 if (strlen(xen_prefix) + 3 < len &&
246 !strncmp(xen_prefix, s, strlen(xen_prefix)))
247 version = s + strlen(xen_prefix);
248 if (version == NULL) {
249 pr_debug("Xen version not found\n");
250 return 0;
251 }
252 if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
253 return 0;
254 grant_frames = res.start;
255 xen_events_irq = irq_of_parse_and_map(node, 0);
256 pr_info("Xen %s support found, events_irq=%d gnttab_frame=%pa\n",
257 version, xen_events_irq, &grant_frames);
258
259 if (xen_events_irq < 0)
260 return -ENODEV;
261
262 xen_domain_type = XEN_HVM_DOMAIN;
263
264 xen_setup_features();
265 if (xen_feature(XENFEAT_dom0))
266 xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
267 else
268 xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED);
269
270 if (!shared_info_page)
271 shared_info_page = (struct shared_info *)
272 get_zeroed_page(GFP_KERNEL);
273 if (!shared_info_page) {
274 pr_err("not enough memory\n");
275 return -ENOMEM;
276 }
277 xatp.domid = DOMID_SELF;
278 xatp.idx = 0;
279 xatp.space = XENMAPSPACE_shared_info;
280 xatp.gpfn = __pa(shared_info_page) >> PAGE_SHIFT;
281 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
282 BUG();
283
284 HYPERVISOR_shared_info = (struct shared_info *)shared_info_page;
285
286 /* xen_vcpu is a pointer to the vcpu_info struct in the shared_info
287 * page, we use it in the event channel upcall and in some pvclock
288 * related functions.
289 * The shared info contains exactly 1 CPU (the boot CPU). The guest
290 * is required to use VCPUOP_register_vcpu_info to place vcpu info
291 * for secondary CPUs as they are brought up.
292 * For uniformity we use VCPUOP_register_vcpu_info even on cpu0.
293 */
294 xen_vcpu_info = __alloc_percpu(sizeof(struct vcpu_info),
295 sizeof(struct vcpu_info));
296 if (xen_vcpu_info == NULL)
297 return -ENOMEM;
298
299 if (gnttab_setup_auto_xlat_frames(grant_frames)) {
300 free_percpu(xen_vcpu_info);
301 return -ENOMEM;
302 }
303 gnttab_init();
304 if (!xen_initial_domain())
305 xenbus_probe(NULL);
306
307 /*
308 * Making sure board specific code will not set up ops for
309 * cpu idle and cpu freq.
310 */
311 disable_cpuidle();
312 disable_cpufreq();
313
314 xen_init_IRQ();
315
316 if (request_percpu_irq(xen_events_irq, xen_arm_callback,
317 "events", &xen_vcpu)) {
318 pr_err("Error request IRQ %d\n", xen_events_irq);
319 return -EINVAL;
320 }
321
322 xen_percpu_init();
323
324 register_cpu_notifier(&xen_cpu_notifier);
325
326 return 0;
327}
328early_initcall(xen_guest_init);
329
330static int __init xen_pm_init(void)
331{
332 if (!xen_domain())
333 return -ENODEV;
334
335 pm_power_off = xen_power_off;
336 arm_pm_restart = xen_restart;
337
338 return 0;
339}
340late_initcall(xen_pm_init);
341
342/* In the hypervisor.S file. */
343EXPORT_SYMBOL_GPL(HYPERVISOR_event_channel_op);
344EXPORT_SYMBOL_GPL(HYPERVISOR_grant_table_op);
345EXPORT_SYMBOL_GPL(HYPERVISOR_xen_version);
346EXPORT_SYMBOL_GPL(HYPERVISOR_console_io);
347EXPORT_SYMBOL_GPL(HYPERVISOR_sched_op);
348EXPORT_SYMBOL_GPL(HYPERVISOR_hvm_op);
349EXPORT_SYMBOL_GPL(HYPERVISOR_memory_op);
350EXPORT_SYMBOL_GPL(HYPERVISOR_physdev_op);
351EXPORT_SYMBOL_GPL(HYPERVISOR_vcpu_op);
352EXPORT_SYMBOL_GPL(HYPERVISOR_tmem_op);
353EXPORT_SYMBOL_GPL(privcmd_call);