Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Machine specific setup for xen
  4 *
  5 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  6 */
  7
  8#include <linux/init.h>
  9#include <linux/sched.h>
 10#include <linux/kstrtox.h>
 11#include <linux/mm.h>
 12#include <linux/pm.h>
 13#include <linux/memblock.h>
 14#include <linux/cpuidle.h>
 15#include <linux/cpufreq.h>
 16#include <linux/memory_hotplug.h>
 17
 18#include <asm/elf.h>
 19#include <asm/vdso.h>
 20#include <asm/e820/api.h>
 21#include <asm/setup.h>
 22#include <asm/acpi.h>
 23#include <asm/numa.h>
 24#include <asm/idtentry.h>
 25#include <asm/xen/hypervisor.h>
 26#include <asm/xen/hypercall.h>
 27
 28#include <xen/xen.h>
 29#include <xen/page.h>
 30#include <xen/interface/callback.h>
 31#include <xen/interface/memory.h>
 32#include <xen/interface/physdev.h>
 33#include <xen/features.h>
 34#include <xen/hvc-console.h>
 35#include "xen-ops.h"
 36#include "mmu.h"
 37
 38#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
 39
 40/* Amount of extra memory space we add to the e820 ranges */
 41struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
 42
 43/* Number of pages released from the initial allocation. */
 44unsigned long xen_released_pages;
 45
 46/* E820 map used during setting up memory. */
 47static struct e820_table xen_e820_table __initdata;
 48
 49/*
 50 * Buffer used to remap identity mapped pages. We only need the virtual space.
 51 * The physical page behind this address is remapped as needed to different
 52 * buffer pages.
 53 */
 54#define REMAP_SIZE	(P2M_PER_PAGE - 3)
 55static struct {
 56	unsigned long	next_area_mfn;
 57	unsigned long	target_pfn;
 58	unsigned long	size;
 59	unsigned long	mfns[REMAP_SIZE];
 60} xen_remap_buf __initdata __aligned(PAGE_SIZE);
 61static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
 62
 63/*
 64 * The maximum amount of extra memory compared to the base size.  The
 65 * main scaling factor is the size of struct page.  At extreme ratios
 66 * of base:extra, all the base memory can be filled with page
 67 * structures for the extra memory, leaving no space for anything
 68 * else.
 69 *
 70 * 10x seems like a reasonable balance between scaling flexibility and
 71 * leaving a practically usable system.
 72 */
 73#define EXTRA_MEM_RATIO		(10)
 74
 75static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
 76
 77static void __init xen_parse_512gb(void)
 78{
 79	bool val = false;
 80	char *arg;
 81
 82	arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
 83	if (!arg)
 84		return;
 85
 86	arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
 87	if (!arg)
 88		val = true;
 89	else if (kstrtobool(arg + strlen("xen_512gb_limit="), &val))
 90		return;
 91
 92	xen_512gb_limit = val;
 93}
 94
 95static void __init xen_add_extra_mem(unsigned long start_pfn,
 96				     unsigned long n_pfns)
 97{
 98	int i;
 99
100	/*
101	 * No need to check for zero size, should happen rarely and will only
102	 * write a new entry regarded to be unused due to zero size.
103	 */
104	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
105		/* Add new region. */
106		if (xen_extra_mem[i].n_pfns == 0) {
107			xen_extra_mem[i].start_pfn = start_pfn;
108			xen_extra_mem[i].n_pfns = n_pfns;
109			break;
110		}
111		/* Append to existing region. */
112		if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
113		    start_pfn) {
114			xen_extra_mem[i].n_pfns += n_pfns;
115			break;
116		}
117	}
118	if (i == XEN_EXTRA_MEM_MAX_REGIONS)
119		printk(KERN_WARNING "Warning: not enough extra memory regions\n");
120
121	memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
122}
123
124static void __init xen_del_extra_mem(unsigned long start_pfn,
125				     unsigned long n_pfns)
126{
127	int i;
128	unsigned long start_r, size_r;
129
130	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
131		start_r = xen_extra_mem[i].start_pfn;
132		size_r = xen_extra_mem[i].n_pfns;
133
134		/* Start of region. */
135		if (start_r == start_pfn) {
136			BUG_ON(n_pfns > size_r);
137			xen_extra_mem[i].start_pfn += n_pfns;
138			xen_extra_mem[i].n_pfns -= n_pfns;
139			break;
140		}
141		/* End of region. */
142		if (start_r + size_r == start_pfn + n_pfns) {
143			BUG_ON(n_pfns > size_r);
144			xen_extra_mem[i].n_pfns -= n_pfns;
145			break;
146		}
147		/* Mid of region. */
148		if (start_pfn > start_r && start_pfn < start_r + size_r) {
149			BUG_ON(start_pfn + n_pfns > start_r + size_r);
150			xen_extra_mem[i].n_pfns = start_pfn - start_r;
151			/* Calling memblock_reserve() again is okay. */
152			xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
153					  (start_pfn + n_pfns));
154			break;
155		}
156	}
157	memblock_phys_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
158}
159
160/*
161 * Called during boot before the p2m list can take entries beyond the
162 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
163 * invalid.
164 */
165unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
166{
167	int i;
168
169	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
170		if (pfn >= xen_extra_mem[i].start_pfn &&
171		    pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
172			return INVALID_P2M_ENTRY;
173	}
174
175	return IDENTITY_FRAME(pfn);
176}
177
178/*
179 * Mark all pfns of extra mem as invalid in p2m list.
180 */
181void __init xen_inv_extra_mem(void)
182{
183	unsigned long pfn, pfn_s, pfn_e;
184	int i;
185
186	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
187		if (!xen_extra_mem[i].n_pfns)
188			continue;
189		pfn_s = xen_extra_mem[i].start_pfn;
190		pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
191		for (pfn = pfn_s; pfn < pfn_e; pfn++)
192			set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
193	}
194}
195
196/*
197 * Finds the next RAM pfn available in the E820 map after min_pfn.
198 * This function updates min_pfn with the pfn found and returns
199 * the size of that range or zero if not found.
200 */
201static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
202{
203	const struct e820_entry *entry = xen_e820_table.entries;
204	unsigned int i;
205	unsigned long done = 0;
206
207	for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
208		unsigned long s_pfn;
209		unsigned long e_pfn;
210
211		if (entry->type != E820_TYPE_RAM)
212			continue;
213
214		e_pfn = PFN_DOWN(entry->addr + entry->size);
215
216		/* We only care about E820 after this */
217		if (e_pfn <= *min_pfn)
218			continue;
219
220		s_pfn = PFN_UP(entry->addr);
221
222		/* If min_pfn falls within the E820 entry, we want to start
223		 * at the min_pfn PFN.
224		 */
225		if (s_pfn <= *min_pfn) {
226			done = e_pfn - *min_pfn;
227		} else {
228			done = e_pfn - s_pfn;
229			*min_pfn = s_pfn;
230		}
231		break;
232	}
233
234	return done;
235}
236
237static int __init xen_free_mfn(unsigned long mfn)
238{
239	struct xen_memory_reservation reservation = {
240		.address_bits = 0,
241		.extent_order = 0,
242		.domid        = DOMID_SELF
243	};
244
245	set_xen_guest_handle(reservation.extent_start, &mfn);
246	reservation.nr_extents = 1;
247
248	return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
249}
250
251/*
252 * This releases a chunk of memory and then does the identity map. It's used
253 * as a fallback if the remapping fails.
254 */
255static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
256			unsigned long end_pfn, unsigned long nr_pages)
257{
258	unsigned long pfn, end;
259	int ret;
260
261	WARN_ON(start_pfn > end_pfn);
262
263	/* Release pages first. */
264	end = min(end_pfn, nr_pages);
265	for (pfn = start_pfn; pfn < end; pfn++) {
266		unsigned long mfn = pfn_to_mfn(pfn);
267
268		/* Make sure pfn exists to start with */
269		if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
270			continue;
271
272		ret = xen_free_mfn(mfn);
273		WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
274
275		if (ret == 1) {
276			xen_released_pages++;
277			if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
278				break;
279		} else
280			break;
281	}
282
283	set_phys_range_identity(start_pfn, end_pfn);
284}
285
286/*
287 * Helper function to update the p2m and m2p tables and kernel mapping.
288 */
289static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
290{
291	struct mmu_update update = {
292		.ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
293		.val = pfn
294	};
295
296	/* Update p2m */
297	if (!set_phys_to_machine(pfn, mfn)) {
298		WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
299		     pfn, mfn);
300		BUG();
301	}
302
303	/* Update m2p */
304	if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
305		WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
306		     mfn, pfn);
307		BUG();
308	}
309
 
 
 
 
310	if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
311					 mfn_pte(mfn, PAGE_KERNEL), 0)) {
312		WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
313		      mfn, pfn);
314		BUG();
315	}
316}
317
318/*
319 * This function updates the p2m and m2p tables with an identity map from
320 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
321 * original allocation at remap_pfn. The information needed for remapping is
322 * saved in the memory itself to avoid the need for allocating buffers. The
323 * complete remap information is contained in a list of MFNs each containing
324 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
325 * This enables us to preserve the original mfn sequence while doing the
326 * remapping at a time when the memory management is capable of allocating
327 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
328 * its callers.
329 */
330static void __init xen_do_set_identity_and_remap_chunk(
331        unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
332{
333	unsigned long buf = (unsigned long)&xen_remap_buf;
334	unsigned long mfn_save, mfn;
335	unsigned long ident_pfn_iter, remap_pfn_iter;
336	unsigned long ident_end_pfn = start_pfn + size;
337	unsigned long left = size;
338	unsigned int i, chunk;
339
340	WARN_ON(size == 0);
341
342	mfn_save = virt_to_mfn(buf);
343
344	for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
345	     ident_pfn_iter < ident_end_pfn;
346	     ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
347		chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
348
349		/* Map first pfn to xen_remap_buf */
350		mfn = pfn_to_mfn(ident_pfn_iter);
351		set_pte_mfn(buf, mfn, PAGE_KERNEL);
352
353		/* Save mapping information in page */
354		xen_remap_buf.next_area_mfn = xen_remap_mfn;
355		xen_remap_buf.target_pfn = remap_pfn_iter;
356		xen_remap_buf.size = chunk;
357		for (i = 0; i < chunk; i++)
358			xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
359
360		/* Put remap buf into list. */
361		xen_remap_mfn = mfn;
362
363		/* Set identity map */
364		set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
365
366		left -= chunk;
367	}
368
369	/* Restore old xen_remap_buf mapping */
370	set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
371}
372
373/*
374 * This function takes a contiguous pfn range that needs to be identity mapped
375 * and:
376 *
377 *  1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
378 *  2) Calls the do_ function to actually do the mapping/remapping work.
379 *
380 * The goal is to not allocate additional memory but to remap the existing
381 * pages. In the case of an error the underlying memory is simply released back
382 * to Xen and not remapped.
383 */
384static unsigned long __init xen_set_identity_and_remap_chunk(
385	unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
386	unsigned long remap_pfn)
387{
388	unsigned long pfn;
389	unsigned long i = 0;
390	unsigned long n = end_pfn - start_pfn;
391
392	if (remap_pfn == 0)
393		remap_pfn = nr_pages;
394
395	while (i < n) {
396		unsigned long cur_pfn = start_pfn + i;
397		unsigned long left = n - i;
398		unsigned long size = left;
399		unsigned long remap_range_size;
400
401		/* Do not remap pages beyond the current allocation */
402		if (cur_pfn >= nr_pages) {
403			/* Identity map remaining pages */
404			set_phys_range_identity(cur_pfn, cur_pfn + size);
405			break;
406		}
407		if (cur_pfn + size > nr_pages)
408			size = nr_pages - cur_pfn;
409
410		remap_range_size = xen_find_pfn_range(&remap_pfn);
411		if (!remap_range_size) {
412			pr_warn("Unable to find available pfn range, not remapping identity pages\n");
413			xen_set_identity_and_release_chunk(cur_pfn,
414						cur_pfn + left, nr_pages);
415			break;
416		}
417		/* Adjust size to fit in current e820 RAM region */
418		if (size > remap_range_size)
419			size = remap_range_size;
420
421		xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
422
423		/* Update variables to reflect new mappings. */
424		i += size;
425		remap_pfn += size;
426	}
427
428	/*
429	 * If the PFNs are currently mapped, their VA mappings need to be
430	 * zapped.
431	 */
432	for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
433		(void)HYPERVISOR_update_va_mapping(
434			(unsigned long)__va(pfn << PAGE_SHIFT),
435			native_make_pte(0), 0);
436
437	return remap_pfn;
438}
439
440static unsigned long __init xen_count_remap_pages(
441	unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
442	unsigned long remap_pages)
443{
444	if (start_pfn >= nr_pages)
445		return remap_pages;
446
447	return remap_pages + min(end_pfn, nr_pages) - start_pfn;
448}
449
450static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
451	unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
452			      unsigned long nr_pages, unsigned long last_val))
453{
454	phys_addr_t start = 0;
455	unsigned long ret_val = 0;
456	const struct e820_entry *entry = xen_e820_table.entries;
457	int i;
458
459	/*
460	 * Combine non-RAM regions and gaps until a RAM region (or the
461	 * end of the map) is reached, then call the provided function
462	 * to perform its duty on the non-RAM region.
463	 *
464	 * The combined non-RAM regions are rounded to a whole number
465	 * of pages so any partial pages are accessible via the 1:1
466	 * mapping.  This is needed for some BIOSes that put (for
467	 * example) the DMI tables in a reserved region that begins on
468	 * a non-page boundary.
469	 */
470	for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
471		phys_addr_t end = entry->addr + entry->size;
472		if (entry->type == E820_TYPE_RAM || i == xen_e820_table.nr_entries - 1) {
473			unsigned long start_pfn = PFN_DOWN(start);
474			unsigned long end_pfn = PFN_UP(end);
475
476			if (entry->type == E820_TYPE_RAM)
477				end_pfn = PFN_UP(entry->addr);
478
479			if (start_pfn < end_pfn)
480				ret_val = func(start_pfn, end_pfn, nr_pages,
481					       ret_val);
482			start = end;
483		}
484	}
485
486	return ret_val;
487}
488
489/*
490 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
491 * The remap information (which mfn remap to which pfn) is contained in the
492 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
493 * This scheme allows to remap the different chunks in arbitrary order while
494 * the resulting mapping will be independent from the order.
495 */
496void __init xen_remap_memory(void)
497{
498	unsigned long buf = (unsigned long)&xen_remap_buf;
499	unsigned long mfn_save, pfn;
500	unsigned long remapped = 0;
501	unsigned int i;
502	unsigned long pfn_s = ~0UL;
503	unsigned long len = 0;
504
505	mfn_save = virt_to_mfn(buf);
506
507	while (xen_remap_mfn != INVALID_P2M_ENTRY) {
508		/* Map the remap information */
509		set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
510
511		BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
512
513		pfn = xen_remap_buf.target_pfn;
514		for (i = 0; i < xen_remap_buf.size; i++) {
515			xen_update_mem_tables(pfn, xen_remap_buf.mfns[i]);
516			remapped++;
517			pfn++;
518		}
519		if (pfn_s == ~0UL || pfn == pfn_s) {
520			pfn_s = xen_remap_buf.target_pfn;
521			len += xen_remap_buf.size;
522		} else if (pfn_s + len == xen_remap_buf.target_pfn) {
523			len += xen_remap_buf.size;
524		} else {
525			xen_del_extra_mem(pfn_s, len);
526			pfn_s = xen_remap_buf.target_pfn;
527			len = xen_remap_buf.size;
528		}
529		xen_remap_mfn = xen_remap_buf.next_area_mfn;
530	}
531
532	if (pfn_s != ~0UL && len)
533		xen_del_extra_mem(pfn_s, len);
534
535	set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
536
537	pr_info("Remapped %ld page(s)\n", remapped);
538}
539
540static unsigned long __init xen_get_pages_limit(void)
541{
542	unsigned long limit;
543
544	limit = MAXMEM / PAGE_SIZE;
545	if (!xen_initial_domain() && xen_512gb_limit)
546		limit = GB(512) / PAGE_SIZE;
547
548	return limit;
549}
550
551static unsigned long __init xen_get_max_pages(void)
552{
553	unsigned long max_pages, limit;
554	domid_t domid = DOMID_SELF;
555	long ret;
556
557	limit = xen_get_pages_limit();
558	max_pages = limit;
559
560	/*
561	 * For the initial domain we use the maximum reservation as
562	 * the maximum page.
563	 *
564	 * For guest domains the current maximum reservation reflects
565	 * the current maximum rather than the static maximum. In this
566	 * case the e820 map provided to us will cover the static
567	 * maximum region.
568	 */
569	if (xen_initial_domain()) {
570		ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
571		if (ret > 0)
572			max_pages = ret;
573	}
574
575	return min(max_pages, limit);
576}
577
578static void __init xen_align_and_add_e820_region(phys_addr_t start,
579						 phys_addr_t size, int type)
580{
581	phys_addr_t end = start + size;
582
583	/* Align RAM regions to page boundaries. */
584	if (type == E820_TYPE_RAM) {
585		start = PAGE_ALIGN(start);
586		end &= ~((phys_addr_t)PAGE_SIZE - 1);
587#ifdef CONFIG_MEMORY_HOTPLUG
588		/*
589		 * Don't allow adding memory not in E820 map while booting the
590		 * system. Once the balloon driver is up it will remove that
591		 * restriction again.
592		 */
593		max_mem_size = end;
594#endif
595	}
596
597	e820__range_add(start, end - start, type);
598}
599
600static void __init xen_ignore_unusable(void)
601{
602	struct e820_entry *entry = xen_e820_table.entries;
603	unsigned int i;
604
605	for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
606		if (entry->type == E820_TYPE_UNUSABLE)
607			entry->type = E820_TYPE_RAM;
608	}
609}
610
611bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
612{
613	struct e820_entry *entry;
614	unsigned mapcnt;
615	phys_addr_t end;
616
617	if (!size)
618		return false;
619
620	end = start + size;
621	entry = xen_e820_table.entries;
622
623	for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
624		if (entry->type == E820_TYPE_RAM && entry->addr <= start &&
625		    (entry->addr + entry->size) >= end)
626			return false;
627
628		entry++;
629	}
630
631	return true;
632}
633
634/*
635 * Find a free area in physical memory not yet reserved and compliant with
636 * E820 map.
637 * Used to relocate pre-allocated areas like initrd or p2m list which are in
638 * conflict with the to be used E820 map.
639 * In case no area is found, return 0. Otherwise return the physical address
640 * of the area which is already reserved for convenience.
641 */
642phys_addr_t __init xen_find_free_area(phys_addr_t size)
643{
644	unsigned mapcnt;
645	phys_addr_t addr, start;
646	struct e820_entry *entry = xen_e820_table.entries;
647
648	for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++, entry++) {
649		if (entry->type != E820_TYPE_RAM || entry->size < size)
650			continue;
651		start = entry->addr;
652		for (addr = start; addr < start + size; addr += PAGE_SIZE) {
653			if (!memblock_is_reserved(addr))
654				continue;
655			start = addr + PAGE_SIZE;
656			if (start + size > entry->addr + entry->size)
657				break;
658		}
659		if (addr >= start + size) {
660			memblock_reserve(start, size);
661			return start;
662		}
663	}
664
665	return 0;
666}
667
668/*
669 * Like memcpy, but with physical addresses for dest and src.
670 */
671static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
672				   phys_addr_t n)
673{
674	phys_addr_t dest_off, src_off, dest_len, src_len, len;
675	void *from, *to;
676
677	while (n) {
678		dest_off = dest & ~PAGE_MASK;
679		src_off = src & ~PAGE_MASK;
680		dest_len = n;
681		if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
682			dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
683		src_len = n;
684		if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
685			src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
686		len = min(dest_len, src_len);
687		to = early_memremap(dest - dest_off, dest_len + dest_off);
688		from = early_memremap(src - src_off, src_len + src_off);
689		memcpy(to, from, len);
690		early_memunmap(to, dest_len + dest_off);
691		early_memunmap(from, src_len + src_off);
692		n -= len;
693		dest += len;
694		src += len;
695	}
696}
697
698/*
699 * Reserve Xen mfn_list.
700 */
701static void __init xen_reserve_xen_mfnlist(void)
702{
703	phys_addr_t start, size;
704
705	if (xen_start_info->mfn_list >= __START_KERNEL_map) {
706		start = __pa(xen_start_info->mfn_list);
707		size = PFN_ALIGN(xen_start_info->nr_pages *
708				 sizeof(unsigned long));
709	} else {
710		start = PFN_PHYS(xen_start_info->first_p2m_pfn);
711		size = PFN_PHYS(xen_start_info->nr_p2m_frames);
712	}
713
714	memblock_reserve(start, size);
715	if (!xen_is_e820_reserved(start, size))
716		return;
717
718	xen_relocate_p2m();
719	memblock_phys_free(start, size);
720}
721
722/**
723 * xen_memory_setup - Hook for machine specific memory setup.
724 **/
725char * __init xen_memory_setup(void)
726{
727	unsigned long max_pfn, pfn_s, n_pfns;
728	phys_addr_t mem_end, addr, size, chunk_size;
729	u32 type;
730	int rc;
731	struct xen_memory_map memmap;
732	unsigned long max_pages;
733	unsigned long extra_pages = 0;
734	int i;
735	int op;
736
737	xen_parse_512gb();
738	max_pfn = xen_get_pages_limit();
739	max_pfn = min(max_pfn, xen_start_info->nr_pages);
740	mem_end = PFN_PHYS(max_pfn);
741
742	memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
743	set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
744
745#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
746	xen_saved_max_mem_size = max_mem_size;
747#endif
748
749	op = xen_initial_domain() ?
750		XENMEM_machine_memory_map :
751		XENMEM_memory_map;
752	rc = HYPERVISOR_memory_op(op, &memmap);
753	if (rc == -ENOSYS) {
754		BUG_ON(xen_initial_domain());
755		memmap.nr_entries = 1;
756		xen_e820_table.entries[0].addr = 0ULL;
757		xen_e820_table.entries[0].size = mem_end;
758		/* 8MB slack (to balance backend allocations). */
759		xen_e820_table.entries[0].size += 8ULL << 20;
760		xen_e820_table.entries[0].type = E820_TYPE_RAM;
761		rc = 0;
762	}
763	BUG_ON(rc);
764	BUG_ON(memmap.nr_entries == 0);
765	xen_e820_table.nr_entries = memmap.nr_entries;
766
767	/*
768	 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
769	 * regions, so if we're using the machine memory map leave the
770	 * region as RAM as it is in the pseudo-physical map.
771	 *
772	 * UNUSABLE regions in domUs are not handled and will need
773	 * a patch in the future.
774	 */
775	if (xen_initial_domain())
776		xen_ignore_unusable();
777
778	/* Make sure the Xen-supplied memory map is well-ordered. */
779	e820__update_table(&xen_e820_table);
780
781	max_pages = xen_get_max_pages();
782
783	/* How many extra pages do we need due to remapping? */
784	max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
785
786	if (max_pages > max_pfn)
787		extra_pages += max_pages - max_pfn;
788
789	/*
790	 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
791	 * factor the base size.
 
 
 
792	 *
793	 * Make sure we have no memory above max_pages, as this area
794	 * isn't handled by the p2m management.
 
 
 
 
795	 */
796	extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
797			   extra_pages, max_pages - max_pfn);
798	i = 0;
799	addr = xen_e820_table.entries[0].addr;
800	size = xen_e820_table.entries[0].size;
801	while (i < xen_e820_table.nr_entries) {
802		bool discard = false;
803
804		chunk_size = size;
805		type = xen_e820_table.entries[i].type;
806
807		if (type == E820_TYPE_RAM) {
808			if (addr < mem_end) {
809				chunk_size = min(size, mem_end - addr);
810			} else if (extra_pages) {
811				chunk_size = min(size, PFN_PHYS(extra_pages));
812				pfn_s = PFN_UP(addr);
813				n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
814				extra_pages -= n_pfns;
815				xen_add_extra_mem(pfn_s, n_pfns);
816				xen_max_p2m_pfn = pfn_s + n_pfns;
817			} else
818				discard = true;
819		}
820
821		if (!discard)
822			xen_align_and_add_e820_region(addr, chunk_size, type);
823
824		addr += chunk_size;
825		size -= chunk_size;
826		if (size == 0) {
827			i++;
828			if (i < xen_e820_table.nr_entries) {
829				addr = xen_e820_table.entries[i].addr;
830				size = xen_e820_table.entries[i].size;
831			}
832		}
833	}
834
835	/*
836	 * Set the rest as identity mapped, in case PCI BARs are
837	 * located here.
838	 */
839	set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
840
841	/*
842	 * In domU, the ISA region is normal, usable memory, but we
843	 * reserve ISA memory anyway because too many things poke
844	 * about in there.
845	 */
846	e820__range_add(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_TYPE_RESERVED);
847
848	e820__update_table(e820_table);
849
850	/*
851	 * Check whether the kernel itself conflicts with the target E820 map.
852	 * Failing now is better than running into weird problems later due
853	 * to relocating (and even reusing) pages with kernel text or data.
854	 */
855	if (xen_is_e820_reserved(__pa_symbol(_text),
856			__pa_symbol(__bss_stop) - __pa_symbol(_text))) {
857		xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
858		BUG();
859	}
860
861	/*
862	 * Check for a conflict of the hypervisor supplied page tables with
863	 * the target E820 map.
864	 */
865	xen_pt_check_e820();
866
867	xen_reserve_xen_mfnlist();
868
869	/* Check for a conflict of the initrd with the target E820 map. */
870	if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
871				 boot_params.hdr.ramdisk_size)) {
872		phys_addr_t new_area, start, size;
873
874		new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
875		if (!new_area) {
876			xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
877			BUG();
878		}
879
880		start = boot_params.hdr.ramdisk_image;
881		size = boot_params.hdr.ramdisk_size;
882		xen_phys_memcpy(new_area, start, size);
883		pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
884			start, start + size, new_area, new_area + size);
885		memblock_phys_free(start, size);
886		boot_params.hdr.ramdisk_image = new_area;
887		boot_params.ext_ramdisk_image = new_area >> 32;
888	}
889
890	/*
891	 * Set identity map on non-RAM pages and prepare remapping the
892	 * underlying RAM.
893	 */
894	xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
895
896	pr_info("Released %ld page(s)\n", xen_released_pages);
897
898	return "Xen";
899}
900
901static int register_callback(unsigned type, const void *func)
902{
903	struct callback_register callback = {
904		.type = type,
905		.address = XEN_CALLBACK(__KERNEL_CS, func),
906		.flags = CALLBACKF_mask_events,
907	};
908
909	return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
910}
911
912void xen_enable_sysenter(void)
913{
914	if (cpu_feature_enabled(X86_FEATURE_SYSENTER32) &&
915	    register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat))
916		setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
 
 
 
 
 
 
 
 
917}
918
919void xen_enable_syscall(void)
920{
921	int ret;
922
923	ret = register_callback(CALLBACKTYPE_syscall, xen_entry_SYSCALL_64);
924	if (ret != 0) {
925		printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
926		/* Pretty fatal; 64-bit userspace has no other
927		   mechanism for syscalls. */
928	}
929
930	if (cpu_feature_enabled(X86_FEATURE_SYSCALL32) &&
931	    register_callback(CALLBACKTYPE_syscall32, xen_entry_SYSCALL_compat))
932		setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
 
 
 
933}
934
935static void __init xen_pvmmu_arch_setup(void)
936{
937	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
938	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
939
940	HYPERVISOR_vm_assist(VMASST_CMD_enable,
941			     VMASST_TYPE_pae_extended_cr3);
942
943	if (register_callback(CALLBACKTYPE_event,
944			      xen_asm_exc_xen_hypervisor_callback) ||
945	    register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
946		BUG();
947
948	xen_enable_sysenter();
949	xen_enable_syscall();
950}
951
952/* This function is not called for HVM domains */
953void __init xen_arch_setup(void)
954{
955	xen_panic_handler_init();
956	xen_pvmmu_arch_setup();
957
958#ifdef CONFIG_ACPI
959	if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
960		printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
961		disable_acpi();
962	}
963#endif
964
965	memcpy(boot_command_line, xen_start_info->cmd_line,
966	       MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
967	       COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
968
969	/* Set up idle, making sure it calls safe_halt() pvop */
970	disable_cpuidle();
971	disable_cpufreq();
972	WARN_ON(xen_set_default_idle());
973#ifdef CONFIG_NUMA
974	numa_off = 1;
975#endif
976}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Machine specific setup for xen
  4 *
  5 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  6 */
  7
  8#include <linux/init.h>
  9#include <linux/sched.h>
 
 10#include <linux/mm.h>
 11#include <linux/pm.h>
 12#include <linux/memblock.h>
 13#include <linux/cpuidle.h>
 14#include <linux/cpufreq.h>
 15#include <linux/memory_hotplug.h>
 16
 17#include <asm/elf.h>
 18#include <asm/vdso.h>
 19#include <asm/e820/api.h>
 20#include <asm/setup.h>
 21#include <asm/acpi.h>
 22#include <asm/numa.h>
 23#include <asm/idtentry.h>
 24#include <asm/xen/hypervisor.h>
 25#include <asm/xen/hypercall.h>
 26
 27#include <xen/xen.h>
 28#include <xen/page.h>
 29#include <xen/interface/callback.h>
 30#include <xen/interface/memory.h>
 31#include <xen/interface/physdev.h>
 32#include <xen/features.h>
 33#include <xen/hvc-console.h>
 34#include "xen-ops.h"
 35#include "mmu.h"
 36
 37#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
 38
 39/* Amount of extra memory space we add to the e820 ranges */
 40struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
 41
 42/* Number of pages released from the initial allocation. */
 43unsigned long xen_released_pages;
 44
 45/* E820 map used during setting up memory. */
 46static struct e820_table xen_e820_table __initdata;
 47
 48/*
 49 * Buffer used to remap identity mapped pages. We only need the virtual space.
 50 * The physical page behind this address is remapped as needed to different
 51 * buffer pages.
 52 */
 53#define REMAP_SIZE	(P2M_PER_PAGE - 3)
 54static struct {
 55	unsigned long	next_area_mfn;
 56	unsigned long	target_pfn;
 57	unsigned long	size;
 58	unsigned long	mfns[REMAP_SIZE];
 59} xen_remap_buf __initdata __aligned(PAGE_SIZE);
 60static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
 61
 62/* 
 63 * The maximum amount of extra memory compared to the base size.  The
 64 * main scaling factor is the size of struct page.  At extreme ratios
 65 * of base:extra, all the base memory can be filled with page
 66 * structures for the extra memory, leaving no space for anything
 67 * else.
 68 * 
 69 * 10x seems like a reasonable balance between scaling flexibility and
 70 * leaving a practically usable system.
 71 */
 72#define EXTRA_MEM_RATIO		(10)
 73
 74static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
 75
 76static void __init xen_parse_512gb(void)
 77{
 78	bool val = false;
 79	char *arg;
 80
 81	arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
 82	if (!arg)
 83		return;
 84
 85	arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
 86	if (!arg)
 87		val = true;
 88	else if (strtobool(arg + strlen("xen_512gb_limit="), &val))
 89		return;
 90
 91	xen_512gb_limit = val;
 92}
 93
 94static void __init xen_add_extra_mem(unsigned long start_pfn,
 95				     unsigned long n_pfns)
 96{
 97	int i;
 98
 99	/*
100	 * No need to check for zero size, should happen rarely and will only
101	 * write a new entry regarded to be unused due to zero size.
102	 */
103	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
104		/* Add new region. */
105		if (xen_extra_mem[i].n_pfns == 0) {
106			xen_extra_mem[i].start_pfn = start_pfn;
107			xen_extra_mem[i].n_pfns = n_pfns;
108			break;
109		}
110		/* Append to existing region. */
111		if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
112		    start_pfn) {
113			xen_extra_mem[i].n_pfns += n_pfns;
114			break;
115		}
116	}
117	if (i == XEN_EXTRA_MEM_MAX_REGIONS)
118		printk(KERN_WARNING "Warning: not enough extra memory regions\n");
119
120	memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
121}
122
123static void __init xen_del_extra_mem(unsigned long start_pfn,
124				     unsigned long n_pfns)
125{
126	int i;
127	unsigned long start_r, size_r;
128
129	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
130		start_r = xen_extra_mem[i].start_pfn;
131		size_r = xen_extra_mem[i].n_pfns;
132
133		/* Start of region. */
134		if (start_r == start_pfn) {
135			BUG_ON(n_pfns > size_r);
136			xen_extra_mem[i].start_pfn += n_pfns;
137			xen_extra_mem[i].n_pfns -= n_pfns;
138			break;
139		}
140		/* End of region. */
141		if (start_r + size_r == start_pfn + n_pfns) {
142			BUG_ON(n_pfns > size_r);
143			xen_extra_mem[i].n_pfns -= n_pfns;
144			break;
145		}
146		/* Mid of region. */
147		if (start_pfn > start_r && start_pfn < start_r + size_r) {
148			BUG_ON(start_pfn + n_pfns > start_r + size_r);
149			xen_extra_mem[i].n_pfns = start_pfn - start_r;
150			/* Calling memblock_reserve() again is okay. */
151			xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
152					  (start_pfn + n_pfns));
153			break;
154		}
155	}
156	memblock_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
157}
158
159/*
160 * Called during boot before the p2m list can take entries beyond the
161 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
162 * invalid.
163 */
164unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
165{
166	int i;
167
168	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
169		if (pfn >= xen_extra_mem[i].start_pfn &&
170		    pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
171			return INVALID_P2M_ENTRY;
172	}
173
174	return IDENTITY_FRAME(pfn);
175}
176
177/*
178 * Mark all pfns of extra mem as invalid in p2m list.
179 */
180void __init xen_inv_extra_mem(void)
181{
182	unsigned long pfn, pfn_s, pfn_e;
183	int i;
184
185	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
186		if (!xen_extra_mem[i].n_pfns)
187			continue;
188		pfn_s = xen_extra_mem[i].start_pfn;
189		pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
190		for (pfn = pfn_s; pfn < pfn_e; pfn++)
191			set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
192	}
193}
194
195/*
196 * Finds the next RAM pfn available in the E820 map after min_pfn.
197 * This function updates min_pfn with the pfn found and returns
198 * the size of that range or zero if not found.
199 */
200static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
201{
202	const struct e820_entry *entry = xen_e820_table.entries;
203	unsigned int i;
204	unsigned long done = 0;
205
206	for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
207		unsigned long s_pfn;
208		unsigned long e_pfn;
209
210		if (entry->type != E820_TYPE_RAM)
211			continue;
212
213		e_pfn = PFN_DOWN(entry->addr + entry->size);
214
215		/* We only care about E820 after this */
216		if (e_pfn <= *min_pfn)
217			continue;
218
219		s_pfn = PFN_UP(entry->addr);
220
221		/* If min_pfn falls within the E820 entry, we want to start
222		 * at the min_pfn PFN.
223		 */
224		if (s_pfn <= *min_pfn) {
225			done = e_pfn - *min_pfn;
226		} else {
227			done = e_pfn - s_pfn;
228			*min_pfn = s_pfn;
229		}
230		break;
231	}
232
233	return done;
234}
235
236static int __init xen_free_mfn(unsigned long mfn)
237{
238	struct xen_memory_reservation reservation = {
239		.address_bits = 0,
240		.extent_order = 0,
241		.domid        = DOMID_SELF
242	};
243
244	set_xen_guest_handle(reservation.extent_start, &mfn);
245	reservation.nr_extents = 1;
246
247	return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
248}
249
250/*
251 * This releases a chunk of memory and then does the identity map. It's used
252 * as a fallback if the remapping fails.
253 */
254static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
255			unsigned long end_pfn, unsigned long nr_pages)
256{
257	unsigned long pfn, end;
258	int ret;
259
260	WARN_ON(start_pfn > end_pfn);
261
262	/* Release pages first. */
263	end = min(end_pfn, nr_pages);
264	for (pfn = start_pfn; pfn < end; pfn++) {
265		unsigned long mfn = pfn_to_mfn(pfn);
266
267		/* Make sure pfn exists to start with */
268		if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
269			continue;
270
271		ret = xen_free_mfn(mfn);
272		WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
273
274		if (ret == 1) {
275			xen_released_pages++;
276			if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
277				break;
278		} else
279			break;
280	}
281
282	set_phys_range_identity(start_pfn, end_pfn);
283}
284
285/*
286 * Helper function to update the p2m and m2p tables and kernel mapping.
287 */
288static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
289{
290	struct mmu_update update = {
291		.ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
292		.val = pfn
293	};
294
295	/* Update p2m */
296	if (!set_phys_to_machine(pfn, mfn)) {
297		WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
298		     pfn, mfn);
299		BUG();
300	}
301
302	/* Update m2p */
303	if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
304		WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
305		     mfn, pfn);
306		BUG();
307	}
308
309	/* Update kernel mapping, but not for highmem. */
310	if (pfn >= PFN_UP(__pa(high_memory - 1)))
311		return;
312
313	if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
314					 mfn_pte(mfn, PAGE_KERNEL), 0)) {
315		WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
316		      mfn, pfn);
317		BUG();
318	}
319}
320
321/*
322 * This function updates the p2m and m2p tables with an identity map from
323 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
324 * original allocation at remap_pfn. The information needed for remapping is
325 * saved in the memory itself to avoid the need for allocating buffers. The
326 * complete remap information is contained in a list of MFNs each containing
327 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
328 * This enables us to preserve the original mfn sequence while doing the
329 * remapping at a time when the memory management is capable of allocating
330 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
331 * its callers.
332 */
333static void __init xen_do_set_identity_and_remap_chunk(
334        unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
335{
336	unsigned long buf = (unsigned long)&xen_remap_buf;
337	unsigned long mfn_save, mfn;
338	unsigned long ident_pfn_iter, remap_pfn_iter;
339	unsigned long ident_end_pfn = start_pfn + size;
340	unsigned long left = size;
341	unsigned int i, chunk;
342
343	WARN_ON(size == 0);
344
345	mfn_save = virt_to_mfn(buf);
346
347	for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
348	     ident_pfn_iter < ident_end_pfn;
349	     ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
350		chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
351
352		/* Map first pfn to xen_remap_buf */
353		mfn = pfn_to_mfn(ident_pfn_iter);
354		set_pte_mfn(buf, mfn, PAGE_KERNEL);
355
356		/* Save mapping information in page */
357		xen_remap_buf.next_area_mfn = xen_remap_mfn;
358		xen_remap_buf.target_pfn = remap_pfn_iter;
359		xen_remap_buf.size = chunk;
360		for (i = 0; i < chunk; i++)
361			xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
362
363		/* Put remap buf into list. */
364		xen_remap_mfn = mfn;
365
366		/* Set identity map */
367		set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
368
369		left -= chunk;
370	}
371
372	/* Restore old xen_remap_buf mapping */
373	set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
374}
375
376/*
377 * This function takes a contiguous pfn range that needs to be identity mapped
378 * and:
379 *
380 *  1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
381 *  2) Calls the do_ function to actually do the mapping/remapping work.
382 *
383 * The goal is to not allocate additional memory but to remap the existing
384 * pages. In the case of an error the underlying memory is simply released back
385 * to Xen and not remapped.
386 */
387static unsigned long __init xen_set_identity_and_remap_chunk(
388	unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
389	unsigned long remap_pfn)
390{
391	unsigned long pfn;
392	unsigned long i = 0;
393	unsigned long n = end_pfn - start_pfn;
394
395	if (remap_pfn == 0)
396		remap_pfn = nr_pages;
397
398	while (i < n) {
399		unsigned long cur_pfn = start_pfn + i;
400		unsigned long left = n - i;
401		unsigned long size = left;
402		unsigned long remap_range_size;
403
404		/* Do not remap pages beyond the current allocation */
405		if (cur_pfn >= nr_pages) {
406			/* Identity map remaining pages */
407			set_phys_range_identity(cur_pfn, cur_pfn + size);
408			break;
409		}
410		if (cur_pfn + size > nr_pages)
411			size = nr_pages - cur_pfn;
412
413		remap_range_size = xen_find_pfn_range(&remap_pfn);
414		if (!remap_range_size) {
415			pr_warn("Unable to find available pfn range, not remapping identity pages\n");
416			xen_set_identity_and_release_chunk(cur_pfn,
417						cur_pfn + left, nr_pages);
418			break;
419		}
420		/* Adjust size to fit in current e820 RAM region */
421		if (size > remap_range_size)
422			size = remap_range_size;
423
424		xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
425
426		/* Update variables to reflect new mappings. */
427		i += size;
428		remap_pfn += size;
429	}
430
431	/*
432	 * If the PFNs are currently mapped, the VA mapping also needs
433	 * to be updated to be 1:1.
434	 */
435	for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
436		(void)HYPERVISOR_update_va_mapping(
437			(unsigned long)__va(pfn << PAGE_SHIFT),
438			mfn_pte(pfn, PAGE_KERNEL_IO), 0);
439
440	return remap_pfn;
441}
442
443static unsigned long __init xen_count_remap_pages(
444	unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
445	unsigned long remap_pages)
446{
447	if (start_pfn >= nr_pages)
448		return remap_pages;
449
450	return remap_pages + min(end_pfn, nr_pages) - start_pfn;
451}
452
453static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
454	unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
455			      unsigned long nr_pages, unsigned long last_val))
456{
457	phys_addr_t start = 0;
458	unsigned long ret_val = 0;
459	const struct e820_entry *entry = xen_e820_table.entries;
460	int i;
461
462	/*
463	 * Combine non-RAM regions and gaps until a RAM region (or the
464	 * end of the map) is reached, then call the provided function
465	 * to perform its duty on the non-RAM region.
466	 *
467	 * The combined non-RAM regions are rounded to a whole number
468	 * of pages so any partial pages are accessible via the 1:1
469	 * mapping.  This is needed for some BIOSes that put (for
470	 * example) the DMI tables in a reserved region that begins on
471	 * a non-page boundary.
472	 */
473	for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
474		phys_addr_t end = entry->addr + entry->size;
475		if (entry->type == E820_TYPE_RAM || i == xen_e820_table.nr_entries - 1) {
476			unsigned long start_pfn = PFN_DOWN(start);
477			unsigned long end_pfn = PFN_UP(end);
478
479			if (entry->type == E820_TYPE_RAM)
480				end_pfn = PFN_UP(entry->addr);
481
482			if (start_pfn < end_pfn)
483				ret_val = func(start_pfn, end_pfn, nr_pages,
484					       ret_val);
485			start = end;
486		}
487	}
488
489	return ret_val;
490}
491
492/*
493 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
494 * The remap information (which mfn remap to which pfn) is contained in the
495 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
496 * This scheme allows to remap the different chunks in arbitrary order while
497 * the resulting mapping will be independent from the order.
498 */
499void __init xen_remap_memory(void)
500{
501	unsigned long buf = (unsigned long)&xen_remap_buf;
502	unsigned long mfn_save, pfn;
503	unsigned long remapped = 0;
504	unsigned int i;
505	unsigned long pfn_s = ~0UL;
506	unsigned long len = 0;
507
508	mfn_save = virt_to_mfn(buf);
509
510	while (xen_remap_mfn != INVALID_P2M_ENTRY) {
511		/* Map the remap information */
512		set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
513
514		BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
515
516		pfn = xen_remap_buf.target_pfn;
517		for (i = 0; i < xen_remap_buf.size; i++) {
518			xen_update_mem_tables(pfn, xen_remap_buf.mfns[i]);
519			remapped++;
520			pfn++;
521		}
522		if (pfn_s == ~0UL || pfn == pfn_s) {
523			pfn_s = xen_remap_buf.target_pfn;
524			len += xen_remap_buf.size;
525		} else if (pfn_s + len == xen_remap_buf.target_pfn) {
526			len += xen_remap_buf.size;
527		} else {
528			xen_del_extra_mem(pfn_s, len);
529			pfn_s = xen_remap_buf.target_pfn;
530			len = xen_remap_buf.size;
531		}
532		xen_remap_mfn = xen_remap_buf.next_area_mfn;
533	}
534
535	if (pfn_s != ~0UL && len)
536		xen_del_extra_mem(pfn_s, len);
537
538	set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
539
540	pr_info("Remapped %ld page(s)\n", remapped);
541}
542
543static unsigned long __init xen_get_pages_limit(void)
544{
545	unsigned long limit;
546
547	limit = MAXMEM / PAGE_SIZE;
548	if (!xen_initial_domain() && xen_512gb_limit)
549		limit = GB(512) / PAGE_SIZE;
550
551	return limit;
552}
553
554static unsigned long __init xen_get_max_pages(void)
555{
556	unsigned long max_pages, limit;
557	domid_t domid = DOMID_SELF;
558	long ret;
559
560	limit = xen_get_pages_limit();
561	max_pages = limit;
562
563	/*
564	 * For the initial domain we use the maximum reservation as
565	 * the maximum page.
566	 *
567	 * For guest domains the current maximum reservation reflects
568	 * the current maximum rather than the static maximum. In this
569	 * case the e820 map provided to us will cover the static
570	 * maximum region.
571	 */
572	if (xen_initial_domain()) {
573		ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
574		if (ret > 0)
575			max_pages = ret;
576	}
577
578	return min(max_pages, limit);
579}
580
581static void __init xen_align_and_add_e820_region(phys_addr_t start,
582						 phys_addr_t size, int type)
583{
584	phys_addr_t end = start + size;
585
586	/* Align RAM regions to page boundaries. */
587	if (type == E820_TYPE_RAM) {
588		start = PAGE_ALIGN(start);
589		end &= ~((phys_addr_t)PAGE_SIZE - 1);
590#ifdef CONFIG_MEMORY_HOTPLUG
591		/*
592		 * Don't allow adding memory not in E820 map while booting the
593		 * system. Once the balloon driver is up it will remove that
594		 * restriction again.
595		 */
596		max_mem_size = end;
597#endif
598	}
599
600	e820__range_add(start, end - start, type);
601}
602
603static void __init xen_ignore_unusable(void)
604{
605	struct e820_entry *entry = xen_e820_table.entries;
606	unsigned int i;
607
608	for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
609		if (entry->type == E820_TYPE_UNUSABLE)
610			entry->type = E820_TYPE_RAM;
611	}
612}
613
614bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
615{
616	struct e820_entry *entry;
617	unsigned mapcnt;
618	phys_addr_t end;
619
620	if (!size)
621		return false;
622
623	end = start + size;
624	entry = xen_e820_table.entries;
625
626	for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
627		if (entry->type == E820_TYPE_RAM && entry->addr <= start &&
628		    (entry->addr + entry->size) >= end)
629			return false;
630
631		entry++;
632	}
633
634	return true;
635}
636
637/*
638 * Find a free area in physical memory not yet reserved and compliant with
639 * E820 map.
640 * Used to relocate pre-allocated areas like initrd or p2m list which are in
641 * conflict with the to be used E820 map.
642 * In case no area is found, return 0. Otherwise return the physical address
643 * of the area which is already reserved for convenience.
644 */
645phys_addr_t __init xen_find_free_area(phys_addr_t size)
646{
647	unsigned mapcnt;
648	phys_addr_t addr, start;
649	struct e820_entry *entry = xen_e820_table.entries;
650
651	for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++, entry++) {
652		if (entry->type != E820_TYPE_RAM || entry->size < size)
653			continue;
654		start = entry->addr;
655		for (addr = start; addr < start + size; addr += PAGE_SIZE) {
656			if (!memblock_is_reserved(addr))
657				continue;
658			start = addr + PAGE_SIZE;
659			if (start + size > entry->addr + entry->size)
660				break;
661		}
662		if (addr >= start + size) {
663			memblock_reserve(start, size);
664			return start;
665		}
666	}
667
668	return 0;
669}
670
671/*
672 * Like memcpy, but with physical addresses for dest and src.
673 */
674static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
675				   phys_addr_t n)
676{
677	phys_addr_t dest_off, src_off, dest_len, src_len, len;
678	void *from, *to;
679
680	while (n) {
681		dest_off = dest & ~PAGE_MASK;
682		src_off = src & ~PAGE_MASK;
683		dest_len = n;
684		if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
685			dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
686		src_len = n;
687		if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
688			src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
689		len = min(dest_len, src_len);
690		to = early_memremap(dest - dest_off, dest_len + dest_off);
691		from = early_memremap(src - src_off, src_len + src_off);
692		memcpy(to, from, len);
693		early_memunmap(to, dest_len + dest_off);
694		early_memunmap(from, src_len + src_off);
695		n -= len;
696		dest += len;
697		src += len;
698	}
699}
700
701/*
702 * Reserve Xen mfn_list.
703 */
704static void __init xen_reserve_xen_mfnlist(void)
705{
706	phys_addr_t start, size;
707
708	if (xen_start_info->mfn_list >= __START_KERNEL_map) {
709		start = __pa(xen_start_info->mfn_list);
710		size = PFN_ALIGN(xen_start_info->nr_pages *
711				 sizeof(unsigned long));
712	} else {
713		start = PFN_PHYS(xen_start_info->first_p2m_pfn);
714		size = PFN_PHYS(xen_start_info->nr_p2m_frames);
715	}
716
717	memblock_reserve(start, size);
718	if (!xen_is_e820_reserved(start, size))
719		return;
720
721	xen_relocate_p2m();
722	memblock_free(start, size);
723}
724
725/**
726 * machine_specific_memory_setup - Hook for machine specific memory setup.
727 **/
728char * __init xen_memory_setup(void)
729{
730	unsigned long max_pfn, pfn_s, n_pfns;
731	phys_addr_t mem_end, addr, size, chunk_size;
732	u32 type;
733	int rc;
734	struct xen_memory_map memmap;
735	unsigned long max_pages;
736	unsigned long extra_pages = 0;
737	int i;
738	int op;
739
740	xen_parse_512gb();
741	max_pfn = xen_get_pages_limit();
742	max_pfn = min(max_pfn, xen_start_info->nr_pages);
743	mem_end = PFN_PHYS(max_pfn);
744
745	memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
746	set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
747
748#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
749	xen_saved_max_mem_size = max_mem_size;
750#endif
751
752	op = xen_initial_domain() ?
753		XENMEM_machine_memory_map :
754		XENMEM_memory_map;
755	rc = HYPERVISOR_memory_op(op, &memmap);
756	if (rc == -ENOSYS) {
757		BUG_ON(xen_initial_domain());
758		memmap.nr_entries = 1;
759		xen_e820_table.entries[0].addr = 0ULL;
760		xen_e820_table.entries[0].size = mem_end;
761		/* 8MB slack (to balance backend allocations). */
762		xen_e820_table.entries[0].size += 8ULL << 20;
763		xen_e820_table.entries[0].type = E820_TYPE_RAM;
764		rc = 0;
765	}
766	BUG_ON(rc);
767	BUG_ON(memmap.nr_entries == 0);
768	xen_e820_table.nr_entries = memmap.nr_entries;
769
770	/*
771	 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
772	 * regions, so if we're using the machine memory map leave the
773	 * region as RAM as it is in the pseudo-physical map.
774	 *
775	 * UNUSABLE regions in domUs are not handled and will need
776	 * a patch in the future.
777	 */
778	if (xen_initial_domain())
779		xen_ignore_unusable();
780
781	/* Make sure the Xen-supplied memory map is well-ordered. */
782	e820__update_table(&xen_e820_table);
783
784	max_pages = xen_get_max_pages();
785
786	/* How many extra pages do we need due to remapping? */
787	max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
788
789	if (max_pages > max_pfn)
790		extra_pages += max_pages - max_pfn;
791
792	/*
793	 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
794	 * factor the base size.  On non-highmem systems, the base
795	 * size is the full initial memory allocation; on highmem it
796	 * is limited to the max size of lowmem, so that it doesn't
797	 * get completely filled.
798	 *
799	 * Make sure we have no memory above max_pages, as this area
800	 * isn't handled by the p2m management.
801	 *
802	 * In principle there could be a problem in lowmem systems if
803	 * the initial memory is also very large with respect to
804	 * lowmem, but we won't try to deal with that here.
805	 */
806	extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
807			   extra_pages, max_pages - max_pfn);
808	i = 0;
809	addr = xen_e820_table.entries[0].addr;
810	size = xen_e820_table.entries[0].size;
811	while (i < xen_e820_table.nr_entries) {
812		bool discard = false;
813
814		chunk_size = size;
815		type = xen_e820_table.entries[i].type;
816
817		if (type == E820_TYPE_RAM) {
818			if (addr < mem_end) {
819				chunk_size = min(size, mem_end - addr);
820			} else if (extra_pages) {
821				chunk_size = min(size, PFN_PHYS(extra_pages));
822				pfn_s = PFN_UP(addr);
823				n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
824				extra_pages -= n_pfns;
825				xen_add_extra_mem(pfn_s, n_pfns);
826				xen_max_p2m_pfn = pfn_s + n_pfns;
827			} else
828				discard = true;
829		}
830
831		if (!discard)
832			xen_align_and_add_e820_region(addr, chunk_size, type);
833
834		addr += chunk_size;
835		size -= chunk_size;
836		if (size == 0) {
837			i++;
838			if (i < xen_e820_table.nr_entries) {
839				addr = xen_e820_table.entries[i].addr;
840				size = xen_e820_table.entries[i].size;
841			}
842		}
843	}
844
845	/*
846	 * Set the rest as identity mapped, in case PCI BARs are
847	 * located here.
848	 */
849	set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
850
851	/*
852	 * In domU, the ISA region is normal, usable memory, but we
853	 * reserve ISA memory anyway because too many things poke
854	 * about in there.
855	 */
856	e820__range_add(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_TYPE_RESERVED);
857
858	e820__update_table(e820_table);
859
860	/*
861	 * Check whether the kernel itself conflicts with the target E820 map.
862	 * Failing now is better than running into weird problems later due
863	 * to relocating (and even reusing) pages with kernel text or data.
864	 */
865	if (xen_is_e820_reserved(__pa_symbol(_text),
866			__pa_symbol(__bss_stop) - __pa_symbol(_text))) {
867		xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
868		BUG();
869	}
870
871	/*
872	 * Check for a conflict of the hypervisor supplied page tables with
873	 * the target E820 map.
874	 */
875	xen_pt_check_e820();
876
877	xen_reserve_xen_mfnlist();
878
879	/* Check for a conflict of the initrd with the target E820 map. */
880	if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
881				 boot_params.hdr.ramdisk_size)) {
882		phys_addr_t new_area, start, size;
883
884		new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
885		if (!new_area) {
886			xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
887			BUG();
888		}
889
890		start = boot_params.hdr.ramdisk_image;
891		size = boot_params.hdr.ramdisk_size;
892		xen_phys_memcpy(new_area, start, size);
893		pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
894			start, start + size, new_area, new_area + size);
895		memblock_free(start, size);
896		boot_params.hdr.ramdisk_image = new_area;
897		boot_params.ext_ramdisk_image = new_area >> 32;
898	}
899
900	/*
901	 * Set identity map on non-RAM pages and prepare remapping the
902	 * underlying RAM.
903	 */
904	xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
905
906	pr_info("Released %ld page(s)\n", xen_released_pages);
907
908	return "Xen";
909}
910
911static int register_callback(unsigned type, const void *func)
912{
913	struct callback_register callback = {
914		.type = type,
915		.address = XEN_CALLBACK(__KERNEL_CS, func),
916		.flags = CALLBACKF_mask_events,
917	};
918
919	return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
920}
921
922void xen_enable_sysenter(void)
923{
924	int ret;
925	unsigned sysenter_feature;
926
927	sysenter_feature = X86_FEATURE_SYSENTER32;
928
929	if (!boot_cpu_has(sysenter_feature))
930		return;
931
932	ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
933	if(ret != 0)
934		setup_clear_cpu_cap(sysenter_feature);
935}
936
937void xen_enable_syscall(void)
938{
939	int ret;
940
941	ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
942	if (ret != 0) {
943		printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
944		/* Pretty fatal; 64-bit userspace has no other
945		   mechanism for syscalls. */
946	}
947
948	if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
949		ret = register_callback(CALLBACKTYPE_syscall32,
950					xen_syscall32_target);
951		if (ret != 0)
952			setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
953	}
954}
955
956static void __init xen_pvmmu_arch_setup(void)
957{
958	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
959	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
960
961	HYPERVISOR_vm_assist(VMASST_CMD_enable,
962			     VMASST_TYPE_pae_extended_cr3);
963
964	if (register_callback(CALLBACKTYPE_event,
965			      xen_asm_exc_xen_hypervisor_callback) ||
966	    register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
967		BUG();
968
969	xen_enable_sysenter();
970	xen_enable_syscall();
971}
972
973/* This function is not called for HVM domains */
974void __init xen_arch_setup(void)
975{
976	xen_panic_handler_init();
977	xen_pvmmu_arch_setup();
978
979#ifdef CONFIG_ACPI
980	if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
981		printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
982		disable_acpi();
983	}
984#endif
985
986	memcpy(boot_command_line, xen_start_info->cmd_line,
987	       MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
988	       COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
989
990	/* Set up idle, making sure it calls safe_halt() pvop */
991	disable_cpuidle();
992	disable_cpufreq();
993	WARN_ON(xen_set_default_idle());
994#ifdef CONFIG_NUMA
995	numa_off = 1;
996#endif
997}