Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Machine specific setup for xen
4 *
5 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
6 */
7
8#include <linux/init.h>
9#include <linux/sched.h>
10#include <linux/kstrtox.h>
11#include <linux/mm.h>
12#include <linux/pm.h>
13#include <linux/memblock.h>
14#include <linux/cpuidle.h>
15#include <linux/cpufreq.h>
16#include <linux/memory_hotplug.h>
17
18#include <asm/elf.h>
19#include <asm/vdso.h>
20#include <asm/e820/api.h>
21#include <asm/setup.h>
22#include <asm/acpi.h>
23#include <asm/numa.h>
24#include <asm/idtentry.h>
25#include <asm/xen/hypervisor.h>
26#include <asm/xen/hypercall.h>
27
28#include <xen/xen.h>
29#include <xen/page.h>
30#include <xen/interface/callback.h>
31#include <xen/interface/memory.h>
32#include <xen/interface/physdev.h>
33#include <xen/features.h>
34#include <xen/hvc-console.h>
35#include "xen-ops.h"
36#include "mmu.h"
37
38#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
39
40/* Amount of extra memory space we add to the e820 ranges */
41struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
42
43/* Number of pages released from the initial allocation. */
44unsigned long xen_released_pages;
45
46/* E820 map used during setting up memory. */
47static struct e820_table xen_e820_table __initdata;
48
49/*
50 * Buffer used to remap identity mapped pages. We only need the virtual space.
51 * The physical page behind this address is remapped as needed to different
52 * buffer pages.
53 */
54#define REMAP_SIZE (P2M_PER_PAGE - 3)
55static struct {
56 unsigned long next_area_mfn;
57 unsigned long target_pfn;
58 unsigned long size;
59 unsigned long mfns[REMAP_SIZE];
60} xen_remap_buf __initdata __aligned(PAGE_SIZE);
61static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
62
63/*
64 * The maximum amount of extra memory compared to the base size. The
65 * main scaling factor is the size of struct page. At extreme ratios
66 * of base:extra, all the base memory can be filled with page
67 * structures for the extra memory, leaving no space for anything
68 * else.
69 *
70 * 10x seems like a reasonable balance between scaling flexibility and
71 * leaving a practically usable system.
72 */
73#define EXTRA_MEM_RATIO (10)
74
75static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
76
77static void __init xen_parse_512gb(void)
78{
79 bool val = false;
80 char *arg;
81
82 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
83 if (!arg)
84 return;
85
86 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
87 if (!arg)
88 val = true;
89 else if (kstrtobool(arg + strlen("xen_512gb_limit="), &val))
90 return;
91
92 xen_512gb_limit = val;
93}
94
95static void __init xen_add_extra_mem(unsigned long start_pfn,
96 unsigned long n_pfns)
97{
98 int i;
99
100 /*
101 * No need to check for zero size, should happen rarely and will only
102 * write a new entry regarded to be unused due to zero size.
103 */
104 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
105 /* Add new region. */
106 if (xen_extra_mem[i].n_pfns == 0) {
107 xen_extra_mem[i].start_pfn = start_pfn;
108 xen_extra_mem[i].n_pfns = n_pfns;
109 break;
110 }
111 /* Append to existing region. */
112 if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
113 start_pfn) {
114 xen_extra_mem[i].n_pfns += n_pfns;
115 break;
116 }
117 }
118 if (i == XEN_EXTRA_MEM_MAX_REGIONS)
119 printk(KERN_WARNING "Warning: not enough extra memory regions\n");
120
121 memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
122}
123
124static void __init xen_del_extra_mem(unsigned long start_pfn,
125 unsigned long n_pfns)
126{
127 int i;
128 unsigned long start_r, size_r;
129
130 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
131 start_r = xen_extra_mem[i].start_pfn;
132 size_r = xen_extra_mem[i].n_pfns;
133
134 /* Start of region. */
135 if (start_r == start_pfn) {
136 BUG_ON(n_pfns > size_r);
137 xen_extra_mem[i].start_pfn += n_pfns;
138 xen_extra_mem[i].n_pfns -= n_pfns;
139 break;
140 }
141 /* End of region. */
142 if (start_r + size_r == start_pfn + n_pfns) {
143 BUG_ON(n_pfns > size_r);
144 xen_extra_mem[i].n_pfns -= n_pfns;
145 break;
146 }
147 /* Mid of region. */
148 if (start_pfn > start_r && start_pfn < start_r + size_r) {
149 BUG_ON(start_pfn + n_pfns > start_r + size_r);
150 xen_extra_mem[i].n_pfns = start_pfn - start_r;
151 /* Calling memblock_reserve() again is okay. */
152 xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
153 (start_pfn + n_pfns));
154 break;
155 }
156 }
157 memblock_phys_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
158}
159
160/*
161 * Called during boot before the p2m list can take entries beyond the
162 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
163 * invalid.
164 */
165unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
166{
167 int i;
168
169 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
170 if (pfn >= xen_extra_mem[i].start_pfn &&
171 pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
172 return INVALID_P2M_ENTRY;
173 }
174
175 return IDENTITY_FRAME(pfn);
176}
177
178/*
179 * Mark all pfns of extra mem as invalid in p2m list.
180 */
181void __init xen_inv_extra_mem(void)
182{
183 unsigned long pfn, pfn_s, pfn_e;
184 int i;
185
186 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
187 if (!xen_extra_mem[i].n_pfns)
188 continue;
189 pfn_s = xen_extra_mem[i].start_pfn;
190 pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
191 for (pfn = pfn_s; pfn < pfn_e; pfn++)
192 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
193 }
194}
195
196/*
197 * Finds the next RAM pfn available in the E820 map after min_pfn.
198 * This function updates min_pfn with the pfn found and returns
199 * the size of that range or zero if not found.
200 */
201static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
202{
203 const struct e820_entry *entry = xen_e820_table.entries;
204 unsigned int i;
205 unsigned long done = 0;
206
207 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
208 unsigned long s_pfn;
209 unsigned long e_pfn;
210
211 if (entry->type != E820_TYPE_RAM)
212 continue;
213
214 e_pfn = PFN_DOWN(entry->addr + entry->size);
215
216 /* We only care about E820 after this */
217 if (e_pfn <= *min_pfn)
218 continue;
219
220 s_pfn = PFN_UP(entry->addr);
221
222 /* If min_pfn falls within the E820 entry, we want to start
223 * at the min_pfn PFN.
224 */
225 if (s_pfn <= *min_pfn) {
226 done = e_pfn - *min_pfn;
227 } else {
228 done = e_pfn - s_pfn;
229 *min_pfn = s_pfn;
230 }
231 break;
232 }
233
234 return done;
235}
236
237static int __init xen_free_mfn(unsigned long mfn)
238{
239 struct xen_memory_reservation reservation = {
240 .address_bits = 0,
241 .extent_order = 0,
242 .domid = DOMID_SELF
243 };
244
245 set_xen_guest_handle(reservation.extent_start, &mfn);
246 reservation.nr_extents = 1;
247
248 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
249}
250
251/*
252 * This releases a chunk of memory and then does the identity map. It's used
253 * as a fallback if the remapping fails.
254 */
255static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
256 unsigned long end_pfn, unsigned long nr_pages)
257{
258 unsigned long pfn, end;
259 int ret;
260
261 WARN_ON(start_pfn > end_pfn);
262
263 /* Release pages first. */
264 end = min(end_pfn, nr_pages);
265 for (pfn = start_pfn; pfn < end; pfn++) {
266 unsigned long mfn = pfn_to_mfn(pfn);
267
268 /* Make sure pfn exists to start with */
269 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
270 continue;
271
272 ret = xen_free_mfn(mfn);
273 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
274
275 if (ret == 1) {
276 xen_released_pages++;
277 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
278 break;
279 } else
280 break;
281 }
282
283 set_phys_range_identity(start_pfn, end_pfn);
284}
285
286/*
287 * Helper function to update the p2m and m2p tables and kernel mapping.
288 */
289static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
290{
291 struct mmu_update update = {
292 .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
293 .val = pfn
294 };
295
296 /* Update p2m */
297 if (!set_phys_to_machine(pfn, mfn)) {
298 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
299 pfn, mfn);
300 BUG();
301 }
302
303 /* Update m2p */
304 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
305 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
306 mfn, pfn);
307 BUG();
308 }
309
310 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
311 mfn_pte(mfn, PAGE_KERNEL), 0)) {
312 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
313 mfn, pfn);
314 BUG();
315 }
316}
317
318/*
319 * This function updates the p2m and m2p tables with an identity map from
320 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
321 * original allocation at remap_pfn. The information needed for remapping is
322 * saved in the memory itself to avoid the need for allocating buffers. The
323 * complete remap information is contained in a list of MFNs each containing
324 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
325 * This enables us to preserve the original mfn sequence while doing the
326 * remapping at a time when the memory management is capable of allocating
327 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
328 * its callers.
329 */
330static void __init xen_do_set_identity_and_remap_chunk(
331 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
332{
333 unsigned long buf = (unsigned long)&xen_remap_buf;
334 unsigned long mfn_save, mfn;
335 unsigned long ident_pfn_iter, remap_pfn_iter;
336 unsigned long ident_end_pfn = start_pfn + size;
337 unsigned long left = size;
338 unsigned int i, chunk;
339
340 WARN_ON(size == 0);
341
342 mfn_save = virt_to_mfn(buf);
343
344 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
345 ident_pfn_iter < ident_end_pfn;
346 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
347 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
348
349 /* Map first pfn to xen_remap_buf */
350 mfn = pfn_to_mfn(ident_pfn_iter);
351 set_pte_mfn(buf, mfn, PAGE_KERNEL);
352
353 /* Save mapping information in page */
354 xen_remap_buf.next_area_mfn = xen_remap_mfn;
355 xen_remap_buf.target_pfn = remap_pfn_iter;
356 xen_remap_buf.size = chunk;
357 for (i = 0; i < chunk; i++)
358 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
359
360 /* Put remap buf into list. */
361 xen_remap_mfn = mfn;
362
363 /* Set identity map */
364 set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
365
366 left -= chunk;
367 }
368
369 /* Restore old xen_remap_buf mapping */
370 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
371}
372
373/*
374 * This function takes a contiguous pfn range that needs to be identity mapped
375 * and:
376 *
377 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
378 * 2) Calls the do_ function to actually do the mapping/remapping work.
379 *
380 * The goal is to not allocate additional memory but to remap the existing
381 * pages. In the case of an error the underlying memory is simply released back
382 * to Xen and not remapped.
383 */
384static unsigned long __init xen_set_identity_and_remap_chunk(
385 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
386 unsigned long remap_pfn)
387{
388 unsigned long pfn;
389 unsigned long i = 0;
390 unsigned long n = end_pfn - start_pfn;
391
392 if (remap_pfn == 0)
393 remap_pfn = nr_pages;
394
395 while (i < n) {
396 unsigned long cur_pfn = start_pfn + i;
397 unsigned long left = n - i;
398 unsigned long size = left;
399 unsigned long remap_range_size;
400
401 /* Do not remap pages beyond the current allocation */
402 if (cur_pfn >= nr_pages) {
403 /* Identity map remaining pages */
404 set_phys_range_identity(cur_pfn, cur_pfn + size);
405 break;
406 }
407 if (cur_pfn + size > nr_pages)
408 size = nr_pages - cur_pfn;
409
410 remap_range_size = xen_find_pfn_range(&remap_pfn);
411 if (!remap_range_size) {
412 pr_warn("Unable to find available pfn range, not remapping identity pages\n");
413 xen_set_identity_and_release_chunk(cur_pfn,
414 cur_pfn + left, nr_pages);
415 break;
416 }
417 /* Adjust size to fit in current e820 RAM region */
418 if (size > remap_range_size)
419 size = remap_range_size;
420
421 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
422
423 /* Update variables to reflect new mappings. */
424 i += size;
425 remap_pfn += size;
426 }
427
428 /*
429 * If the PFNs are currently mapped, their VA mappings need to be
430 * zapped.
431 */
432 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
433 (void)HYPERVISOR_update_va_mapping(
434 (unsigned long)__va(pfn << PAGE_SHIFT),
435 native_make_pte(0), 0);
436
437 return remap_pfn;
438}
439
440static unsigned long __init xen_count_remap_pages(
441 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
442 unsigned long remap_pages)
443{
444 if (start_pfn >= nr_pages)
445 return remap_pages;
446
447 return remap_pages + min(end_pfn, nr_pages) - start_pfn;
448}
449
450static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
451 unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
452 unsigned long nr_pages, unsigned long last_val))
453{
454 phys_addr_t start = 0;
455 unsigned long ret_val = 0;
456 const struct e820_entry *entry = xen_e820_table.entries;
457 int i;
458
459 /*
460 * Combine non-RAM regions and gaps until a RAM region (or the
461 * end of the map) is reached, then call the provided function
462 * to perform its duty on the non-RAM region.
463 *
464 * The combined non-RAM regions are rounded to a whole number
465 * of pages so any partial pages are accessible via the 1:1
466 * mapping. This is needed for some BIOSes that put (for
467 * example) the DMI tables in a reserved region that begins on
468 * a non-page boundary.
469 */
470 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
471 phys_addr_t end = entry->addr + entry->size;
472 if (entry->type == E820_TYPE_RAM || i == xen_e820_table.nr_entries - 1) {
473 unsigned long start_pfn = PFN_DOWN(start);
474 unsigned long end_pfn = PFN_UP(end);
475
476 if (entry->type == E820_TYPE_RAM)
477 end_pfn = PFN_UP(entry->addr);
478
479 if (start_pfn < end_pfn)
480 ret_val = func(start_pfn, end_pfn, nr_pages,
481 ret_val);
482 start = end;
483 }
484 }
485
486 return ret_val;
487}
488
489/*
490 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
491 * The remap information (which mfn remap to which pfn) is contained in the
492 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
493 * This scheme allows to remap the different chunks in arbitrary order while
494 * the resulting mapping will be independent from the order.
495 */
496void __init xen_remap_memory(void)
497{
498 unsigned long buf = (unsigned long)&xen_remap_buf;
499 unsigned long mfn_save, pfn;
500 unsigned long remapped = 0;
501 unsigned int i;
502 unsigned long pfn_s = ~0UL;
503 unsigned long len = 0;
504
505 mfn_save = virt_to_mfn(buf);
506
507 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
508 /* Map the remap information */
509 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
510
511 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
512
513 pfn = xen_remap_buf.target_pfn;
514 for (i = 0; i < xen_remap_buf.size; i++) {
515 xen_update_mem_tables(pfn, xen_remap_buf.mfns[i]);
516 remapped++;
517 pfn++;
518 }
519 if (pfn_s == ~0UL || pfn == pfn_s) {
520 pfn_s = xen_remap_buf.target_pfn;
521 len += xen_remap_buf.size;
522 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
523 len += xen_remap_buf.size;
524 } else {
525 xen_del_extra_mem(pfn_s, len);
526 pfn_s = xen_remap_buf.target_pfn;
527 len = xen_remap_buf.size;
528 }
529 xen_remap_mfn = xen_remap_buf.next_area_mfn;
530 }
531
532 if (pfn_s != ~0UL && len)
533 xen_del_extra_mem(pfn_s, len);
534
535 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
536
537 pr_info("Remapped %ld page(s)\n", remapped);
538}
539
540static unsigned long __init xen_get_pages_limit(void)
541{
542 unsigned long limit;
543
544 limit = MAXMEM / PAGE_SIZE;
545 if (!xen_initial_domain() && xen_512gb_limit)
546 limit = GB(512) / PAGE_SIZE;
547
548 return limit;
549}
550
551static unsigned long __init xen_get_max_pages(void)
552{
553 unsigned long max_pages, limit;
554 domid_t domid = DOMID_SELF;
555 long ret;
556
557 limit = xen_get_pages_limit();
558 max_pages = limit;
559
560 /*
561 * For the initial domain we use the maximum reservation as
562 * the maximum page.
563 *
564 * For guest domains the current maximum reservation reflects
565 * the current maximum rather than the static maximum. In this
566 * case the e820 map provided to us will cover the static
567 * maximum region.
568 */
569 if (xen_initial_domain()) {
570 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
571 if (ret > 0)
572 max_pages = ret;
573 }
574
575 return min(max_pages, limit);
576}
577
578static void __init xen_align_and_add_e820_region(phys_addr_t start,
579 phys_addr_t size, int type)
580{
581 phys_addr_t end = start + size;
582
583 /* Align RAM regions to page boundaries. */
584 if (type == E820_TYPE_RAM) {
585 start = PAGE_ALIGN(start);
586 end &= ~((phys_addr_t)PAGE_SIZE - 1);
587#ifdef CONFIG_MEMORY_HOTPLUG
588 /*
589 * Don't allow adding memory not in E820 map while booting the
590 * system. Once the balloon driver is up it will remove that
591 * restriction again.
592 */
593 max_mem_size = end;
594#endif
595 }
596
597 e820__range_add(start, end - start, type);
598}
599
600static void __init xen_ignore_unusable(void)
601{
602 struct e820_entry *entry = xen_e820_table.entries;
603 unsigned int i;
604
605 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
606 if (entry->type == E820_TYPE_UNUSABLE)
607 entry->type = E820_TYPE_RAM;
608 }
609}
610
611bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
612{
613 struct e820_entry *entry;
614 unsigned mapcnt;
615 phys_addr_t end;
616
617 if (!size)
618 return false;
619
620 end = start + size;
621 entry = xen_e820_table.entries;
622
623 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
624 if (entry->type == E820_TYPE_RAM && entry->addr <= start &&
625 (entry->addr + entry->size) >= end)
626 return false;
627
628 entry++;
629 }
630
631 return true;
632}
633
634/*
635 * Find a free area in physical memory not yet reserved and compliant with
636 * E820 map.
637 * Used to relocate pre-allocated areas like initrd or p2m list which are in
638 * conflict with the to be used E820 map.
639 * In case no area is found, return 0. Otherwise return the physical address
640 * of the area which is already reserved for convenience.
641 */
642phys_addr_t __init xen_find_free_area(phys_addr_t size)
643{
644 unsigned mapcnt;
645 phys_addr_t addr, start;
646 struct e820_entry *entry = xen_e820_table.entries;
647
648 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++, entry++) {
649 if (entry->type != E820_TYPE_RAM || entry->size < size)
650 continue;
651 start = entry->addr;
652 for (addr = start; addr < start + size; addr += PAGE_SIZE) {
653 if (!memblock_is_reserved(addr))
654 continue;
655 start = addr + PAGE_SIZE;
656 if (start + size > entry->addr + entry->size)
657 break;
658 }
659 if (addr >= start + size) {
660 memblock_reserve(start, size);
661 return start;
662 }
663 }
664
665 return 0;
666}
667
668/*
669 * Like memcpy, but with physical addresses for dest and src.
670 */
671static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
672 phys_addr_t n)
673{
674 phys_addr_t dest_off, src_off, dest_len, src_len, len;
675 void *from, *to;
676
677 while (n) {
678 dest_off = dest & ~PAGE_MASK;
679 src_off = src & ~PAGE_MASK;
680 dest_len = n;
681 if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
682 dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
683 src_len = n;
684 if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
685 src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
686 len = min(dest_len, src_len);
687 to = early_memremap(dest - dest_off, dest_len + dest_off);
688 from = early_memremap(src - src_off, src_len + src_off);
689 memcpy(to, from, len);
690 early_memunmap(to, dest_len + dest_off);
691 early_memunmap(from, src_len + src_off);
692 n -= len;
693 dest += len;
694 src += len;
695 }
696}
697
698/*
699 * Reserve Xen mfn_list.
700 */
701static void __init xen_reserve_xen_mfnlist(void)
702{
703 phys_addr_t start, size;
704
705 if (xen_start_info->mfn_list >= __START_KERNEL_map) {
706 start = __pa(xen_start_info->mfn_list);
707 size = PFN_ALIGN(xen_start_info->nr_pages *
708 sizeof(unsigned long));
709 } else {
710 start = PFN_PHYS(xen_start_info->first_p2m_pfn);
711 size = PFN_PHYS(xen_start_info->nr_p2m_frames);
712 }
713
714 memblock_reserve(start, size);
715 if (!xen_is_e820_reserved(start, size))
716 return;
717
718 xen_relocate_p2m();
719 memblock_phys_free(start, size);
720}
721
722/**
723 * xen_memory_setup - Hook for machine specific memory setup.
724 **/
725char * __init xen_memory_setup(void)
726{
727 unsigned long max_pfn, pfn_s, n_pfns;
728 phys_addr_t mem_end, addr, size, chunk_size;
729 u32 type;
730 int rc;
731 struct xen_memory_map memmap;
732 unsigned long max_pages;
733 unsigned long extra_pages = 0;
734 int i;
735 int op;
736
737 xen_parse_512gb();
738 max_pfn = xen_get_pages_limit();
739 max_pfn = min(max_pfn, xen_start_info->nr_pages);
740 mem_end = PFN_PHYS(max_pfn);
741
742 memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
743 set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
744
745#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
746 xen_saved_max_mem_size = max_mem_size;
747#endif
748
749 op = xen_initial_domain() ?
750 XENMEM_machine_memory_map :
751 XENMEM_memory_map;
752 rc = HYPERVISOR_memory_op(op, &memmap);
753 if (rc == -ENOSYS) {
754 BUG_ON(xen_initial_domain());
755 memmap.nr_entries = 1;
756 xen_e820_table.entries[0].addr = 0ULL;
757 xen_e820_table.entries[0].size = mem_end;
758 /* 8MB slack (to balance backend allocations). */
759 xen_e820_table.entries[0].size += 8ULL << 20;
760 xen_e820_table.entries[0].type = E820_TYPE_RAM;
761 rc = 0;
762 }
763 BUG_ON(rc);
764 BUG_ON(memmap.nr_entries == 0);
765 xen_e820_table.nr_entries = memmap.nr_entries;
766
767 /*
768 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
769 * regions, so if we're using the machine memory map leave the
770 * region as RAM as it is in the pseudo-physical map.
771 *
772 * UNUSABLE regions in domUs are not handled and will need
773 * a patch in the future.
774 */
775 if (xen_initial_domain())
776 xen_ignore_unusable();
777
778 /* Make sure the Xen-supplied memory map is well-ordered. */
779 e820__update_table(&xen_e820_table);
780
781 max_pages = xen_get_max_pages();
782
783 /* How many extra pages do we need due to remapping? */
784 max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
785
786 if (max_pages > max_pfn)
787 extra_pages += max_pages - max_pfn;
788
789 /*
790 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
791 * factor the base size.
792 *
793 * Make sure we have no memory above max_pages, as this area
794 * isn't handled by the p2m management.
795 */
796 extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
797 extra_pages, max_pages - max_pfn);
798 i = 0;
799 addr = xen_e820_table.entries[0].addr;
800 size = xen_e820_table.entries[0].size;
801 while (i < xen_e820_table.nr_entries) {
802 bool discard = false;
803
804 chunk_size = size;
805 type = xen_e820_table.entries[i].type;
806
807 if (type == E820_TYPE_RAM) {
808 if (addr < mem_end) {
809 chunk_size = min(size, mem_end - addr);
810 } else if (extra_pages) {
811 chunk_size = min(size, PFN_PHYS(extra_pages));
812 pfn_s = PFN_UP(addr);
813 n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
814 extra_pages -= n_pfns;
815 xen_add_extra_mem(pfn_s, n_pfns);
816 xen_max_p2m_pfn = pfn_s + n_pfns;
817 } else
818 discard = true;
819 }
820
821 if (!discard)
822 xen_align_and_add_e820_region(addr, chunk_size, type);
823
824 addr += chunk_size;
825 size -= chunk_size;
826 if (size == 0) {
827 i++;
828 if (i < xen_e820_table.nr_entries) {
829 addr = xen_e820_table.entries[i].addr;
830 size = xen_e820_table.entries[i].size;
831 }
832 }
833 }
834
835 /*
836 * Set the rest as identity mapped, in case PCI BARs are
837 * located here.
838 */
839 set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
840
841 /*
842 * In domU, the ISA region is normal, usable memory, but we
843 * reserve ISA memory anyway because too many things poke
844 * about in there.
845 */
846 e820__range_add(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_TYPE_RESERVED);
847
848 e820__update_table(e820_table);
849
850 /*
851 * Check whether the kernel itself conflicts with the target E820 map.
852 * Failing now is better than running into weird problems later due
853 * to relocating (and even reusing) pages with kernel text or data.
854 */
855 if (xen_is_e820_reserved(__pa_symbol(_text),
856 __pa_symbol(__bss_stop) - __pa_symbol(_text))) {
857 xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
858 BUG();
859 }
860
861 /*
862 * Check for a conflict of the hypervisor supplied page tables with
863 * the target E820 map.
864 */
865 xen_pt_check_e820();
866
867 xen_reserve_xen_mfnlist();
868
869 /* Check for a conflict of the initrd with the target E820 map. */
870 if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
871 boot_params.hdr.ramdisk_size)) {
872 phys_addr_t new_area, start, size;
873
874 new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
875 if (!new_area) {
876 xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
877 BUG();
878 }
879
880 start = boot_params.hdr.ramdisk_image;
881 size = boot_params.hdr.ramdisk_size;
882 xen_phys_memcpy(new_area, start, size);
883 pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
884 start, start + size, new_area, new_area + size);
885 memblock_phys_free(start, size);
886 boot_params.hdr.ramdisk_image = new_area;
887 boot_params.ext_ramdisk_image = new_area >> 32;
888 }
889
890 /*
891 * Set identity map on non-RAM pages and prepare remapping the
892 * underlying RAM.
893 */
894 xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
895
896 pr_info("Released %ld page(s)\n", xen_released_pages);
897
898 return "Xen";
899}
900
901static int register_callback(unsigned type, const void *func)
902{
903 struct callback_register callback = {
904 .type = type,
905 .address = XEN_CALLBACK(__KERNEL_CS, func),
906 .flags = CALLBACKF_mask_events,
907 };
908
909 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
910}
911
912void xen_enable_sysenter(void)
913{
914 if (cpu_feature_enabled(X86_FEATURE_SYSENTER32) &&
915 register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat))
916 setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
917}
918
919void xen_enable_syscall(void)
920{
921 int ret;
922
923 ret = register_callback(CALLBACKTYPE_syscall, xen_entry_SYSCALL_64);
924 if (ret != 0) {
925 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
926 /* Pretty fatal; 64-bit userspace has no other
927 mechanism for syscalls. */
928 }
929
930 if (cpu_feature_enabled(X86_FEATURE_SYSCALL32) &&
931 register_callback(CALLBACKTYPE_syscall32, xen_entry_SYSCALL_compat))
932 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
933}
934
935static void __init xen_pvmmu_arch_setup(void)
936{
937 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
938 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
939
940 HYPERVISOR_vm_assist(VMASST_CMD_enable,
941 VMASST_TYPE_pae_extended_cr3);
942
943 if (register_callback(CALLBACKTYPE_event,
944 xen_asm_exc_xen_hypervisor_callback) ||
945 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
946 BUG();
947
948 xen_enable_sysenter();
949 xen_enable_syscall();
950}
951
952/* This function is not called for HVM domains */
953void __init xen_arch_setup(void)
954{
955 xen_panic_handler_init();
956 xen_pvmmu_arch_setup();
957
958#ifdef CONFIG_ACPI
959 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
960 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
961 disable_acpi();
962 }
963#endif
964
965 memcpy(boot_command_line, xen_start_info->cmd_line,
966 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
967 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
968
969 /* Set up idle, making sure it calls safe_halt() pvop */
970 disable_cpuidle();
971 disable_cpufreq();
972 WARN_ON(xen_set_default_idle());
973#ifdef CONFIG_NUMA
974 numa_off = 1;
975#endif
976}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Machine specific setup for xen
4 *
5 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
6 */
7
8#include <linux/init.h>
9#include <linux/iscsi_ibft.h>
10#include <linux/sched.h>
11#include <linux/kstrtox.h>
12#include <linux/mm.h>
13#include <linux/pm.h>
14#include <linux/memblock.h>
15#include <linux/cpuidle.h>
16#include <linux/cpufreq.h>
17#include <linux/memory_hotplug.h>
18#include <linux/acpi.h>
19
20#include <asm/elf.h>
21#include <asm/vdso.h>
22#include <asm/e820/api.h>
23#include <asm/setup.h>
24#include <asm/numa.h>
25#include <asm/idtentry.h>
26#include <asm/xen/hypervisor.h>
27#include <asm/xen/hypercall.h>
28
29#include <xen/xen.h>
30#include <xen/page.h>
31#include <xen/interface/callback.h>
32#include <xen/interface/memory.h>
33#include <xen/interface/physdev.h>
34#include <xen/features.h>
35#include <xen/hvc-console.h>
36#include "xen-ops.h"
37
38#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
39
40/* Number of pages released from the initial allocation. */
41unsigned long xen_released_pages;
42
43/* Memory map would allow PCI passthrough. */
44bool xen_pv_pci_possible;
45
46/* E820 map used during setting up memory. */
47static struct e820_table xen_e820_table __initdata;
48
49/* Number of initially usable memory pages. */
50static unsigned long ini_nr_pages __initdata;
51
52/*
53 * Buffer used to remap identity mapped pages. We only need the virtual space.
54 * The physical page behind this address is remapped as needed to different
55 * buffer pages.
56 */
57#define REMAP_SIZE (P2M_PER_PAGE - 3)
58static struct {
59 unsigned long next_area_mfn;
60 unsigned long target_pfn;
61 unsigned long size;
62 unsigned long mfns[REMAP_SIZE];
63} xen_remap_buf __initdata __aligned(PAGE_SIZE);
64static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
65
66static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
67
68static void __init xen_parse_512gb(void)
69{
70 bool val = false;
71 char *arg;
72
73 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
74 if (!arg)
75 return;
76
77 arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
78 if (!arg)
79 val = true;
80 else if (kstrtobool(arg + strlen("xen_512gb_limit="), &val))
81 return;
82
83 xen_512gb_limit = val;
84}
85
86static void __init xen_del_extra_mem(unsigned long start_pfn,
87 unsigned long n_pfns)
88{
89 int i;
90 unsigned long start_r, size_r;
91
92 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
93 start_r = xen_extra_mem[i].start_pfn;
94 size_r = xen_extra_mem[i].n_pfns;
95
96 /* Start of region. */
97 if (start_r == start_pfn) {
98 BUG_ON(n_pfns > size_r);
99 xen_extra_mem[i].start_pfn += n_pfns;
100 xen_extra_mem[i].n_pfns -= n_pfns;
101 break;
102 }
103 /* End of region. */
104 if (start_r + size_r == start_pfn + n_pfns) {
105 BUG_ON(n_pfns > size_r);
106 xen_extra_mem[i].n_pfns -= n_pfns;
107 break;
108 }
109 /* Mid of region. */
110 if (start_pfn > start_r && start_pfn < start_r + size_r) {
111 BUG_ON(start_pfn + n_pfns > start_r + size_r);
112 xen_extra_mem[i].n_pfns = start_pfn - start_r;
113 /* Calling memblock_reserve() again is okay. */
114 xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
115 (start_pfn + n_pfns));
116 break;
117 }
118 }
119 memblock_phys_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
120}
121
122/*
123 * Called during boot before the p2m list can take entries beyond the
124 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
125 * invalid.
126 */
127unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
128{
129 int i;
130
131 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
132 if (pfn >= xen_extra_mem[i].start_pfn &&
133 pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
134 return INVALID_P2M_ENTRY;
135 }
136
137 return IDENTITY_FRAME(pfn);
138}
139
140/*
141 * Mark all pfns of extra mem as invalid in p2m list.
142 */
143void __init xen_inv_extra_mem(void)
144{
145 unsigned long pfn, pfn_s, pfn_e;
146 int i;
147
148 for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
149 if (!xen_extra_mem[i].n_pfns)
150 continue;
151 pfn_s = xen_extra_mem[i].start_pfn;
152 pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
153 for (pfn = pfn_s; pfn < pfn_e; pfn++)
154 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
155 }
156}
157
158/*
159 * Finds the next RAM pfn available in the E820 map after min_pfn.
160 * This function updates min_pfn with the pfn found and returns
161 * the size of that range or zero if not found.
162 */
163static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
164{
165 const struct e820_entry *entry = xen_e820_table.entries;
166 unsigned int i;
167 unsigned long done = 0;
168
169 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
170 unsigned long s_pfn;
171 unsigned long e_pfn;
172
173 if (entry->type != E820_TYPE_RAM)
174 continue;
175
176 e_pfn = PFN_DOWN(entry->addr + entry->size);
177
178 /* We only care about E820 after this */
179 if (e_pfn <= *min_pfn)
180 continue;
181
182 s_pfn = PFN_UP(entry->addr);
183
184 /* If min_pfn falls within the E820 entry, we want to start
185 * at the min_pfn PFN.
186 */
187 if (s_pfn <= *min_pfn) {
188 done = e_pfn - *min_pfn;
189 } else {
190 done = e_pfn - s_pfn;
191 *min_pfn = s_pfn;
192 }
193 break;
194 }
195
196 return done;
197}
198
199static int __init xen_free_mfn(unsigned long mfn)
200{
201 struct xen_memory_reservation reservation = {
202 .address_bits = 0,
203 .extent_order = 0,
204 .domid = DOMID_SELF
205 };
206
207 set_xen_guest_handle(reservation.extent_start, &mfn);
208 reservation.nr_extents = 1;
209
210 return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
211}
212
213/*
214 * This releases a chunk of memory and then does the identity map. It's used
215 * as a fallback if the remapping fails.
216 */
217static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
218 unsigned long end_pfn)
219{
220 unsigned long pfn, end;
221 int ret;
222
223 WARN_ON(start_pfn > end_pfn);
224
225 /* Release pages first. */
226 end = min(end_pfn, ini_nr_pages);
227 for (pfn = start_pfn; pfn < end; pfn++) {
228 unsigned long mfn = pfn_to_mfn(pfn);
229
230 /* Make sure pfn exists to start with */
231 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
232 continue;
233
234 ret = xen_free_mfn(mfn);
235 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
236
237 if (ret == 1) {
238 xen_released_pages++;
239 if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
240 break;
241 } else
242 break;
243 }
244
245 set_phys_range_identity(start_pfn, end_pfn);
246}
247
248/*
249 * Helper function to update the p2m and m2p tables and kernel mapping.
250 */
251static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
252{
253 struct mmu_update update = {
254 .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
255 .val = pfn
256 };
257
258 /* Update p2m */
259 if (!set_phys_to_machine(pfn, mfn)) {
260 WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
261 pfn, mfn);
262 BUG();
263 }
264
265 /* Update m2p */
266 if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
267 WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
268 mfn, pfn);
269 BUG();
270 }
271
272 if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
273 mfn_pte(mfn, PAGE_KERNEL), 0)) {
274 WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
275 mfn, pfn);
276 BUG();
277 }
278}
279
280/*
281 * This function updates the p2m and m2p tables with an identity map from
282 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
283 * original allocation at remap_pfn. The information needed for remapping is
284 * saved in the memory itself to avoid the need for allocating buffers. The
285 * complete remap information is contained in a list of MFNs each containing
286 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
287 * This enables us to preserve the original mfn sequence while doing the
288 * remapping at a time when the memory management is capable of allocating
289 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
290 * its callers.
291 */
292static void __init xen_do_set_identity_and_remap_chunk(
293 unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
294{
295 unsigned long buf = (unsigned long)&xen_remap_buf;
296 unsigned long mfn_save, mfn;
297 unsigned long ident_pfn_iter, remap_pfn_iter;
298 unsigned long ident_end_pfn = start_pfn + size;
299 unsigned long left = size;
300 unsigned int i, chunk;
301
302 WARN_ON(size == 0);
303
304 mfn_save = virt_to_mfn((void *)buf);
305
306 for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
307 ident_pfn_iter < ident_end_pfn;
308 ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
309 chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
310
311 /* Map first pfn to xen_remap_buf */
312 mfn = pfn_to_mfn(ident_pfn_iter);
313 set_pte_mfn(buf, mfn, PAGE_KERNEL);
314
315 /* Save mapping information in page */
316 xen_remap_buf.next_area_mfn = xen_remap_mfn;
317 xen_remap_buf.target_pfn = remap_pfn_iter;
318 xen_remap_buf.size = chunk;
319 for (i = 0; i < chunk; i++)
320 xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
321
322 /* Put remap buf into list. */
323 xen_remap_mfn = mfn;
324
325 /* Set identity map */
326 set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
327
328 left -= chunk;
329 }
330
331 /* Restore old xen_remap_buf mapping */
332 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
333}
334
335/*
336 * This function takes a contiguous pfn range that needs to be identity mapped
337 * and:
338 *
339 * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
340 * 2) Calls the do_ function to actually do the mapping/remapping work.
341 *
342 * The goal is to not allocate additional memory but to remap the existing
343 * pages. In the case of an error the underlying memory is simply released back
344 * to Xen and not remapped.
345 */
346static unsigned long __init xen_set_identity_and_remap_chunk(
347 unsigned long start_pfn, unsigned long end_pfn, unsigned long remap_pfn)
348{
349 unsigned long pfn;
350 unsigned long i = 0;
351 unsigned long n = end_pfn - start_pfn;
352
353 if (remap_pfn == 0)
354 remap_pfn = ini_nr_pages;
355
356 while (i < n) {
357 unsigned long cur_pfn = start_pfn + i;
358 unsigned long left = n - i;
359 unsigned long size = left;
360 unsigned long remap_range_size;
361
362 /* Do not remap pages beyond the current allocation */
363 if (cur_pfn >= ini_nr_pages) {
364 /* Identity map remaining pages */
365 set_phys_range_identity(cur_pfn, cur_pfn + size);
366 break;
367 }
368 if (cur_pfn + size > ini_nr_pages)
369 size = ini_nr_pages - cur_pfn;
370
371 remap_range_size = xen_find_pfn_range(&remap_pfn);
372 if (!remap_range_size) {
373 pr_warn("Unable to find available pfn range, not remapping identity pages\n");
374 xen_set_identity_and_release_chunk(cur_pfn,
375 cur_pfn + left);
376 break;
377 }
378 /* Adjust size to fit in current e820 RAM region */
379 if (size > remap_range_size)
380 size = remap_range_size;
381
382 xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
383
384 /* Update variables to reflect new mappings. */
385 i += size;
386 remap_pfn += size;
387 }
388
389 /*
390 * If the PFNs are currently mapped, their VA mappings need to be
391 * zapped.
392 */
393 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
394 (void)HYPERVISOR_update_va_mapping(
395 (unsigned long)__va(pfn << PAGE_SHIFT),
396 native_make_pte(0), 0);
397
398 return remap_pfn;
399}
400
401static unsigned long __init xen_count_remap_pages(
402 unsigned long start_pfn, unsigned long end_pfn,
403 unsigned long remap_pages)
404{
405 if (start_pfn >= ini_nr_pages)
406 return remap_pages;
407
408 return remap_pages + min(end_pfn, ini_nr_pages) - start_pfn;
409}
410
411static unsigned long __init xen_foreach_remap_area(
412 unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
413 unsigned long last_val))
414{
415 phys_addr_t start = 0;
416 unsigned long ret_val = 0;
417 const struct e820_entry *entry = xen_e820_table.entries;
418 int i;
419
420 /*
421 * Combine non-RAM regions and gaps until a RAM region (or the
422 * end of the map) is reached, then call the provided function
423 * to perform its duty on the non-RAM region.
424 *
425 * The combined non-RAM regions are rounded to a whole number
426 * of pages so any partial pages are accessible via the 1:1
427 * mapping. This is needed for some BIOSes that put (for
428 * example) the DMI tables in a reserved region that begins on
429 * a non-page boundary.
430 */
431 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
432 phys_addr_t end = entry->addr + entry->size;
433 if (entry->type == E820_TYPE_RAM || i == xen_e820_table.nr_entries - 1) {
434 unsigned long start_pfn = PFN_DOWN(start);
435 unsigned long end_pfn = PFN_UP(end);
436
437 if (entry->type == E820_TYPE_RAM)
438 end_pfn = PFN_UP(entry->addr);
439
440 if (start_pfn < end_pfn)
441 ret_val = func(start_pfn, end_pfn, ret_val);
442 start = end;
443 }
444 }
445
446 return ret_val;
447}
448
449/*
450 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
451 * The remap information (which mfn remap to which pfn) is contained in the
452 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
453 * This scheme allows to remap the different chunks in arbitrary order while
454 * the resulting mapping will be independent from the order.
455 */
456void __init xen_remap_memory(void)
457{
458 unsigned long buf = (unsigned long)&xen_remap_buf;
459 unsigned long mfn_save, pfn;
460 unsigned long remapped = 0;
461 unsigned int i;
462 unsigned long pfn_s = ~0UL;
463 unsigned long len = 0;
464
465 mfn_save = virt_to_mfn((void *)buf);
466
467 while (xen_remap_mfn != INVALID_P2M_ENTRY) {
468 /* Map the remap information */
469 set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
470
471 BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
472
473 pfn = xen_remap_buf.target_pfn;
474 for (i = 0; i < xen_remap_buf.size; i++) {
475 xen_update_mem_tables(pfn, xen_remap_buf.mfns[i]);
476 remapped++;
477 pfn++;
478 }
479 if (pfn_s == ~0UL || pfn == pfn_s) {
480 pfn_s = xen_remap_buf.target_pfn;
481 len += xen_remap_buf.size;
482 } else if (pfn_s + len == xen_remap_buf.target_pfn) {
483 len += xen_remap_buf.size;
484 } else {
485 xen_del_extra_mem(pfn_s, len);
486 pfn_s = xen_remap_buf.target_pfn;
487 len = xen_remap_buf.size;
488 }
489 xen_remap_mfn = xen_remap_buf.next_area_mfn;
490 }
491
492 if (pfn_s != ~0UL && len)
493 xen_del_extra_mem(pfn_s, len);
494
495 set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
496
497 pr_info("Remapped %ld page(s)\n", remapped);
498
499 xen_do_remap_nonram();
500}
501
502static unsigned long __init xen_get_pages_limit(void)
503{
504 unsigned long limit;
505
506 limit = MAXMEM / PAGE_SIZE;
507 if (!xen_initial_domain() && xen_512gb_limit)
508 limit = GB(512) / PAGE_SIZE;
509
510 return limit;
511}
512
513static unsigned long __init xen_get_max_pages(void)
514{
515 unsigned long max_pages, limit;
516 domid_t domid = DOMID_SELF;
517 long ret;
518
519 limit = xen_get_pages_limit();
520 max_pages = limit;
521
522 /*
523 * For the initial domain we use the maximum reservation as
524 * the maximum page.
525 *
526 * For guest domains the current maximum reservation reflects
527 * the current maximum rather than the static maximum. In this
528 * case the e820 map provided to us will cover the static
529 * maximum region.
530 */
531 if (xen_initial_domain()) {
532 ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
533 if (ret > 0)
534 max_pages = ret;
535 }
536
537 return min(max_pages, limit);
538}
539
540static void __init xen_align_and_add_e820_region(phys_addr_t start,
541 phys_addr_t size, int type)
542{
543 phys_addr_t end = start + size;
544
545 /* Align RAM regions to page boundaries. */
546 if (type == E820_TYPE_RAM) {
547 start = PAGE_ALIGN(start);
548 end &= ~((phys_addr_t)PAGE_SIZE - 1);
549#ifdef CONFIG_MEMORY_HOTPLUG
550 /*
551 * Don't allow adding memory not in E820 map while booting the
552 * system. Once the balloon driver is up it will remove that
553 * restriction again.
554 */
555 max_mem_size = end;
556#endif
557 }
558
559 e820__range_add(start, end - start, type);
560}
561
562static void __init xen_ignore_unusable(void)
563{
564 struct e820_entry *entry = xen_e820_table.entries;
565 unsigned int i;
566
567 for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
568 if (entry->type == E820_TYPE_UNUSABLE)
569 entry->type = E820_TYPE_RAM;
570 }
571}
572
573static bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
574{
575 struct e820_entry *entry;
576 unsigned mapcnt;
577 phys_addr_t end;
578
579 if (!size)
580 return false;
581
582 end = start + size;
583 entry = xen_e820_table.entries;
584
585 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
586 if (entry->type == E820_TYPE_RAM && entry->addr <= start &&
587 (entry->addr + entry->size) >= end)
588 return false;
589
590 entry++;
591 }
592
593 return true;
594}
595
596/*
597 * Find a free area in physical memory not yet reserved and compliant with
598 * E820 map.
599 * Used to relocate pre-allocated areas like initrd or p2m list which are in
600 * conflict with the to be used E820 map.
601 * In case no area is found, return 0. Otherwise return the physical address
602 * of the area which is already reserved for convenience.
603 */
604phys_addr_t __init xen_find_free_area(phys_addr_t size)
605{
606 unsigned mapcnt;
607 phys_addr_t addr, start;
608 struct e820_entry *entry = xen_e820_table.entries;
609
610 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++, entry++) {
611 if (entry->type != E820_TYPE_RAM || entry->size < size)
612 continue;
613 start = entry->addr;
614 for (addr = start; addr < start + size; addr += PAGE_SIZE) {
615 if (!memblock_is_reserved(addr))
616 continue;
617 start = addr + PAGE_SIZE;
618 if (start + size > entry->addr + entry->size)
619 break;
620 }
621 if (addr >= start + size) {
622 memblock_reserve(start, size);
623 return start;
624 }
625 }
626
627 return 0;
628}
629
630/*
631 * Swap a non-RAM E820 map entry with RAM above ini_nr_pages.
632 * Note that the E820 map is modified accordingly, but the P2M map isn't yet.
633 * The adaption of the P2M must be deferred until page allocation is possible.
634 */
635static void __init xen_e820_swap_entry_with_ram(struct e820_entry *swap_entry)
636{
637 struct e820_entry *entry;
638 unsigned int mapcnt;
639 phys_addr_t mem_end = PFN_PHYS(ini_nr_pages);
640 phys_addr_t swap_addr, swap_size, entry_end;
641
642 swap_addr = PAGE_ALIGN_DOWN(swap_entry->addr);
643 swap_size = PAGE_ALIGN(swap_entry->addr - swap_addr + swap_entry->size);
644 entry = xen_e820_table.entries;
645
646 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
647 entry_end = entry->addr + entry->size;
648 if (entry->type == E820_TYPE_RAM && entry->size >= swap_size &&
649 entry_end - swap_size >= mem_end) {
650 /* Reduce RAM entry by needed space (whole pages). */
651 entry->size -= swap_size;
652
653 /* Add new entry at the end of E820 map. */
654 entry = xen_e820_table.entries +
655 xen_e820_table.nr_entries;
656 xen_e820_table.nr_entries++;
657
658 /* Fill new entry (keep size and page offset). */
659 entry->type = swap_entry->type;
660 entry->addr = entry_end - swap_size +
661 swap_addr - swap_entry->addr;
662 entry->size = swap_entry->size;
663
664 /* Convert old entry to RAM, align to pages. */
665 swap_entry->type = E820_TYPE_RAM;
666 swap_entry->addr = swap_addr;
667 swap_entry->size = swap_size;
668
669 /* Remember PFN<->MFN relation for P2M update. */
670 xen_add_remap_nonram(swap_addr, entry_end - swap_size,
671 swap_size);
672
673 /* Order E820 table and merge entries. */
674 e820__update_table(&xen_e820_table);
675
676 return;
677 }
678
679 entry++;
680 }
681
682 xen_raw_console_write("No suitable area found for required E820 entry remapping action\n");
683 BUG();
684}
685
686/*
687 * Look for non-RAM memory types in a specific guest physical area and move
688 * those away if possible (ACPI NVS only for now).
689 */
690static void __init xen_e820_resolve_conflicts(phys_addr_t start,
691 phys_addr_t size)
692{
693 struct e820_entry *entry;
694 unsigned int mapcnt;
695 phys_addr_t end;
696
697 if (!size)
698 return;
699
700 end = start + size;
701 entry = xen_e820_table.entries;
702
703 for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
704 if (entry->addr >= end)
705 return;
706
707 if (entry->addr + entry->size > start &&
708 entry->type == E820_TYPE_NVS)
709 xen_e820_swap_entry_with_ram(entry);
710
711 entry++;
712 }
713}
714
715/*
716 * Check for an area in physical memory to be usable for non-movable purposes.
717 * An area is considered to usable if the used E820 map lists it to be RAM or
718 * some other type which can be moved to higher PFNs while keeping the MFNs.
719 * In case the area is not usable, crash the system with an error message.
720 */
721void __init xen_chk_is_e820_usable(phys_addr_t start, phys_addr_t size,
722 const char *component)
723{
724 xen_e820_resolve_conflicts(start, size);
725
726 if (!xen_is_e820_reserved(start, size))
727 return;
728
729 xen_raw_console_write("Xen hypervisor allocated ");
730 xen_raw_console_write(component);
731 xen_raw_console_write(" memory conflicts with E820 map\n");
732 BUG();
733}
734
735/*
736 * Like memcpy, but with physical addresses for dest and src.
737 */
738static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
739 phys_addr_t n)
740{
741 phys_addr_t dest_off, src_off, dest_len, src_len, len;
742 void *from, *to;
743
744 while (n) {
745 dest_off = dest & ~PAGE_MASK;
746 src_off = src & ~PAGE_MASK;
747 dest_len = n;
748 if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
749 dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
750 src_len = n;
751 if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
752 src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
753 len = min(dest_len, src_len);
754 to = early_memremap(dest - dest_off, dest_len + dest_off);
755 from = early_memremap(src - src_off, src_len + src_off);
756 memcpy(to, from, len);
757 early_memunmap(to, dest_len + dest_off);
758 early_memunmap(from, src_len + src_off);
759 n -= len;
760 dest += len;
761 src += len;
762 }
763}
764
765/*
766 * Reserve Xen mfn_list.
767 */
768static void __init xen_reserve_xen_mfnlist(void)
769{
770 phys_addr_t start, size;
771
772 if (xen_start_info->mfn_list >= __START_KERNEL_map) {
773 start = __pa(xen_start_info->mfn_list);
774 size = PFN_ALIGN(xen_start_info->nr_pages *
775 sizeof(unsigned long));
776 } else {
777 start = PFN_PHYS(xen_start_info->first_p2m_pfn);
778 size = PFN_PHYS(xen_start_info->nr_p2m_frames);
779 }
780
781 memblock_reserve(start, size);
782 if (!xen_is_e820_reserved(start, size))
783 return;
784
785 xen_relocate_p2m();
786 memblock_phys_free(start, size);
787}
788
789/**
790 * xen_memory_setup - Hook for machine specific memory setup.
791 **/
792char * __init xen_memory_setup(void)
793{
794 unsigned long pfn_s, n_pfns;
795 phys_addr_t mem_end, addr, size, chunk_size;
796 u32 type;
797 int rc;
798 struct xen_memory_map memmap;
799 unsigned long max_pages;
800 unsigned long extra_pages = 0;
801 unsigned long maxmem_pages;
802 int i;
803 int op;
804
805 xen_parse_512gb();
806 ini_nr_pages = min(xen_get_pages_limit(), xen_start_info->nr_pages);
807 mem_end = PFN_PHYS(ini_nr_pages);
808
809 memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
810 set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
811
812#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
813 xen_saved_max_mem_size = max_mem_size;
814#endif
815
816 op = xen_initial_domain() ?
817 XENMEM_machine_memory_map :
818 XENMEM_memory_map;
819 rc = HYPERVISOR_memory_op(op, &memmap);
820 if (rc == -ENOSYS) {
821 BUG_ON(xen_initial_domain());
822 memmap.nr_entries = 1;
823 xen_e820_table.entries[0].addr = 0ULL;
824 xen_e820_table.entries[0].size = mem_end;
825 /* 8MB slack (to balance backend allocations). */
826 xen_e820_table.entries[0].size += 8ULL << 20;
827 xen_e820_table.entries[0].type = E820_TYPE_RAM;
828 rc = 0;
829 }
830 BUG_ON(rc);
831 BUG_ON(memmap.nr_entries == 0);
832 xen_e820_table.nr_entries = memmap.nr_entries;
833
834 if (xen_initial_domain()) {
835 /*
836 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
837 * regions, so if we're using the machine memory map leave the
838 * region as RAM as it is in the pseudo-physical map.
839 *
840 * UNUSABLE regions in domUs are not handled and will need
841 * a patch in the future.
842 */
843 xen_ignore_unusable();
844
845#ifdef CONFIG_ISCSI_IBFT_FIND
846 /* Reserve 0.5 MiB to 1 MiB region so iBFT can be found */
847 xen_e820_table.entries[xen_e820_table.nr_entries].addr = IBFT_START;
848 xen_e820_table.entries[xen_e820_table.nr_entries].size = IBFT_END - IBFT_START;
849 xen_e820_table.entries[xen_e820_table.nr_entries].type = E820_TYPE_RESERVED;
850 xen_e820_table.nr_entries++;
851#endif
852 }
853
854 /* Make sure the Xen-supplied memory map is well-ordered. */
855 e820__update_table(&xen_e820_table);
856
857 /*
858 * Check whether the kernel itself conflicts with the target E820 map.
859 * Failing now is better than running into weird problems later due
860 * to relocating (and even reusing) pages with kernel text or data.
861 */
862 xen_chk_is_e820_usable(__pa_symbol(_text),
863 __pa_symbol(_end) - __pa_symbol(_text),
864 "kernel");
865
866 /*
867 * Check for a conflict of the xen_start_info memory with the target
868 * E820 map.
869 */
870 xen_chk_is_e820_usable(__pa(xen_start_info), sizeof(*xen_start_info),
871 "xen_start_info");
872
873 /*
874 * Check for a conflict of the hypervisor supplied page tables with
875 * the target E820 map.
876 */
877 xen_pt_check_e820();
878
879 max_pages = xen_get_max_pages();
880
881 /* How many extra pages do we need due to remapping? */
882 max_pages += xen_foreach_remap_area(xen_count_remap_pages);
883
884 if (max_pages > ini_nr_pages)
885 extra_pages += max_pages - ini_nr_pages;
886
887 /*
888 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
889 * factor the base size.
890 *
891 * Make sure we have no memory above max_pages, as this area
892 * isn't handled by the p2m management.
893 */
894 maxmem_pages = EXTRA_MEM_RATIO * min(ini_nr_pages, PFN_DOWN(MAXMEM));
895 extra_pages = min3(maxmem_pages, extra_pages, max_pages - ini_nr_pages);
896 i = 0;
897 addr = xen_e820_table.entries[0].addr;
898 size = xen_e820_table.entries[0].size;
899 while (i < xen_e820_table.nr_entries) {
900 bool discard = false;
901
902 chunk_size = size;
903 type = xen_e820_table.entries[i].type;
904
905 if (type == E820_TYPE_RESERVED)
906 xen_pv_pci_possible = true;
907
908 if (type == E820_TYPE_RAM) {
909 if (addr < mem_end) {
910 chunk_size = min(size, mem_end - addr);
911 } else if (extra_pages) {
912 chunk_size = min(size, PFN_PHYS(extra_pages));
913 pfn_s = PFN_UP(addr);
914 n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
915 extra_pages -= n_pfns;
916 xen_add_extra_mem(pfn_s, n_pfns);
917 xen_max_p2m_pfn = pfn_s + n_pfns;
918 } else
919 discard = true;
920 }
921
922 if (!discard)
923 xen_align_and_add_e820_region(addr, chunk_size, type);
924
925 addr += chunk_size;
926 size -= chunk_size;
927 if (size == 0) {
928 i++;
929 if (i < xen_e820_table.nr_entries) {
930 addr = xen_e820_table.entries[i].addr;
931 size = xen_e820_table.entries[i].size;
932 }
933 }
934 }
935
936 /*
937 * Set the rest as identity mapped, in case PCI BARs are
938 * located here.
939 */
940 set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
941
942 /*
943 * In domU, the ISA region is normal, usable memory, but we
944 * reserve ISA memory anyway because too many things poke
945 * about in there.
946 */
947 e820__range_add(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_TYPE_RESERVED);
948
949 e820__update_table(e820_table);
950
951 xen_reserve_xen_mfnlist();
952
953 /* Check for a conflict of the initrd with the target E820 map. */
954 if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
955 boot_params.hdr.ramdisk_size)) {
956 phys_addr_t new_area, start, size;
957
958 new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
959 if (!new_area) {
960 xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
961 BUG();
962 }
963
964 start = boot_params.hdr.ramdisk_image;
965 size = boot_params.hdr.ramdisk_size;
966 xen_phys_memcpy(new_area, start, size);
967 pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
968 start, start + size, new_area, new_area + size);
969 memblock_phys_free(start, size);
970 boot_params.hdr.ramdisk_image = new_area;
971 boot_params.ext_ramdisk_image = new_area >> 32;
972 }
973
974 /*
975 * Set identity map on non-RAM pages and prepare remapping the
976 * underlying RAM.
977 */
978 xen_foreach_remap_area(xen_set_identity_and_remap_chunk);
979
980 pr_info("Released %ld page(s)\n", xen_released_pages);
981
982 return "Xen";
983}
984
985static int register_callback(unsigned type, const void *func)
986{
987 struct callback_register callback = {
988 .type = type,
989 .address = XEN_CALLBACK(__KERNEL_CS, func),
990 .flags = CALLBACKF_mask_events,
991 };
992
993 return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
994}
995
996void xen_enable_sysenter(void)
997{
998 if (cpu_feature_enabled(X86_FEATURE_SYSENTER32) &&
999 register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat))
1000 setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
1001}
1002
1003void xen_enable_syscall(void)
1004{
1005 int ret;
1006
1007 ret = register_callback(CALLBACKTYPE_syscall, xen_entry_SYSCALL_64);
1008 if (ret != 0) {
1009 printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
1010 /* Pretty fatal; 64-bit userspace has no other
1011 mechanism for syscalls. */
1012 }
1013
1014 if (cpu_feature_enabled(X86_FEATURE_SYSCALL32) &&
1015 register_callback(CALLBACKTYPE_syscall32, xen_entry_SYSCALL_compat))
1016 setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
1017}
1018
1019static void __init xen_pvmmu_arch_setup(void)
1020{
1021 HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
1022
1023 if (register_callback(CALLBACKTYPE_event,
1024 xen_asm_exc_xen_hypervisor_callback) ||
1025 register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
1026 BUG();
1027
1028 xen_enable_sysenter();
1029 xen_enable_syscall();
1030}
1031
1032/* This function is not called for HVM domains */
1033void __init xen_arch_setup(void)
1034{
1035 xen_panic_handler_init();
1036 xen_pvmmu_arch_setup();
1037
1038#ifdef CONFIG_ACPI
1039 if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
1040 printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
1041 disable_acpi();
1042 }
1043#endif
1044
1045 memcpy(boot_command_line, xen_start_info->cmd_line,
1046 MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
1047 COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
1048
1049 /* Set up idle, making sure it calls safe_halt() pvop */
1050 disable_cpuidle();
1051 disable_cpufreq();
1052 WARN_ON(xen_set_default_idle());
1053#ifdef CONFIG_NUMA
1054 numa_off = 1;
1055#endif
1056}