Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Machine specific setup for xen
  4 *
  5 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  6 */
  7
  8#include <linux/init.h>
  9#include <linux/sched.h>
 10#include <linux/kstrtox.h>
 11#include <linux/mm.h>
 12#include <linux/pm.h>
 13#include <linux/memblock.h>
 14#include <linux/cpuidle.h>
 15#include <linux/cpufreq.h>
 16#include <linux/memory_hotplug.h>
 17
 18#include <asm/elf.h>
 19#include <asm/vdso.h>
 20#include <asm/e820/api.h>
 21#include <asm/setup.h>
 22#include <asm/acpi.h>
 23#include <asm/numa.h>
 24#include <asm/idtentry.h>
 25#include <asm/xen/hypervisor.h>
 26#include <asm/xen/hypercall.h>
 27
 28#include <xen/xen.h>
 29#include <xen/page.h>
 30#include <xen/interface/callback.h>
 31#include <xen/interface/memory.h>
 32#include <xen/interface/physdev.h>
 33#include <xen/features.h>
 34#include <xen/hvc-console.h>
 35#include "xen-ops.h"
 36#include "mmu.h"
 
 
 37
 38#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024)
 
 
 
 
 
 
 
 
 39
 40/* Amount of extra memory space we add to the e820 ranges */
 41struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
 42
 43/* Number of pages released from the initial allocation. */
 44unsigned long xen_released_pages;
 45
 46/* E820 map used during setting up memory. */
 47static struct e820_table xen_e820_table __initdata;
 48
 49/*
 50 * Buffer used to remap identity mapped pages. We only need the virtual space.
 51 * The physical page behind this address is remapped as needed to different
 52 * buffer pages.
 53 */
 54#define REMAP_SIZE	(P2M_PER_PAGE - 3)
 55static struct {
 56	unsigned long	next_area_mfn;
 57	unsigned long	target_pfn;
 58	unsigned long	size;
 59	unsigned long	mfns[REMAP_SIZE];
 60} xen_remap_buf __initdata __aligned(PAGE_SIZE);
 61static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY;
 62
 63/*
 64 * The maximum amount of extra memory compared to the base size.  The
 65 * main scaling factor is the size of struct page.  At extreme ratios
 66 * of base:extra, all the base memory can be filled with page
 67 * structures for the extra memory, leaving no space for anything
 68 * else.
 69 *
 70 * 10x seems like a reasonable balance between scaling flexibility and
 71 * leaving a practically usable system.
 72 */
 73#define EXTRA_MEM_RATIO		(10)
 74
 75static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB);
 76
 77static void __init xen_parse_512gb(void)
 78{
 79	bool val = false;
 80	char *arg;
 81
 82	arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit");
 83	if (!arg)
 84		return;
 85
 86	arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit=");
 87	if (!arg)
 88		val = true;
 89	else if (kstrtobool(arg + strlen("xen_512gb_limit="), &val))
 90		return;
 91
 92	xen_512gb_limit = val;
 93}
 94
 95static void __init xen_add_extra_mem(unsigned long start_pfn,
 96				     unsigned long n_pfns)
 97{
 
 98	int i;
 99
100	/*
101	 * No need to check for zero size, should happen rarely and will only
102	 * write a new entry regarded to be unused due to zero size.
103	 */
104	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
105		/* Add new region. */
106		if (xen_extra_mem[i].n_pfns == 0) {
107			xen_extra_mem[i].start_pfn = start_pfn;
108			xen_extra_mem[i].n_pfns = n_pfns;
109			break;
110		}
111		/* Append to existing region. */
112		if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns ==
113		    start_pfn) {
114			xen_extra_mem[i].n_pfns += n_pfns;
115			break;
116		}
117	}
118	if (i == XEN_EXTRA_MEM_MAX_REGIONS)
119		printk(KERN_WARNING "Warning: not enough extra memory regions\n");
120
121	memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
122}
123
124static void __init xen_del_extra_mem(unsigned long start_pfn,
125				     unsigned long n_pfns)
126{
127	int i;
128	unsigned long start_r, size_r;
129
130	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
131		start_r = xen_extra_mem[i].start_pfn;
132		size_r = xen_extra_mem[i].n_pfns;
133
134		/* Start of region. */
135		if (start_r == start_pfn) {
136			BUG_ON(n_pfns > size_r);
137			xen_extra_mem[i].start_pfn += n_pfns;
138			xen_extra_mem[i].n_pfns -= n_pfns;
139			break;
140		}
141		/* End of region. */
142		if (start_r + size_r == start_pfn + n_pfns) {
143			BUG_ON(n_pfns > size_r);
144			xen_extra_mem[i].n_pfns -= n_pfns;
145			break;
146		}
147		/* Mid of region. */
148		if (start_pfn > start_r && start_pfn < start_r + size_r) {
149			BUG_ON(start_pfn + n_pfns > start_r + size_r);
150			xen_extra_mem[i].n_pfns = start_pfn - start_r;
151			/* Calling memblock_reserve() again is okay. */
152			xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r -
153					  (start_pfn + n_pfns));
154			break;
155		}
156	}
157	memblock_phys_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns));
158}
159
160/*
161 * Called during boot before the p2m list can take entries beyond the
162 * hypervisor supplied p2m list. Entries in extra mem are to be regarded as
163 * invalid.
164 */
165unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
166{
167	int i;
168
169	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
170		if (pfn >= xen_extra_mem[i].start_pfn &&
171		    pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns)
172			return INVALID_P2M_ENTRY;
173	}
174
175	return IDENTITY_FRAME(pfn);
176}
177
178/*
179 * Mark all pfns of extra mem as invalid in p2m list.
180 */
181void __init xen_inv_extra_mem(void)
182{
183	unsigned long pfn, pfn_s, pfn_e;
184	int i;
185
186	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
187		if (!xen_extra_mem[i].n_pfns)
188			continue;
189		pfn_s = xen_extra_mem[i].start_pfn;
190		pfn_e = pfn_s + xen_extra_mem[i].n_pfns;
191		for (pfn = pfn_s; pfn < pfn_e; pfn++)
192			set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
193	}
194}
195
196/*
197 * Finds the next RAM pfn available in the E820 map after min_pfn.
198 * This function updates min_pfn with the pfn found and returns
199 * the size of that range or zero if not found.
200 */
201static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn)
202{
203	const struct e820_entry *entry = xen_e820_table.entries;
204	unsigned int i;
205	unsigned long done = 0;
206
207	for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
208		unsigned long s_pfn;
209		unsigned long e_pfn;
210
211		if (entry->type != E820_TYPE_RAM)
212			continue;
213
214		e_pfn = PFN_DOWN(entry->addr + entry->size);
 
 
215
216		/* We only care about E820 after this */
217		if (e_pfn <= *min_pfn)
218			continue;
 
 
219
220		s_pfn = PFN_UP(entry->addr);
221
222		/* If min_pfn falls within the E820 entry, we want to start
223		 * at the min_pfn PFN.
224		 */
225		if (s_pfn <= *min_pfn) {
226			done = e_pfn - *min_pfn;
227		} else {
228			done = e_pfn - s_pfn;
229			*min_pfn = s_pfn;
230		}
231		break;
232	}
233
234	return done;
235}
236
237static int __init xen_free_mfn(unsigned long mfn)
 
238{
239	struct xen_memory_reservation reservation = {
240		.address_bits = 0,
241		.extent_order = 0,
242		.domid        = DOMID_SELF
243	};
244
245	set_xen_guest_handle(reservation.extent_start, &mfn);
246	reservation.nr_extents = 1;
247
248	return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
249}
250
251/*
252 * This releases a chunk of memory and then does the identity map. It's used
253 * as a fallback if the remapping fails.
254 */
255static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
256			unsigned long end_pfn, unsigned long nr_pages)
257{
258	unsigned long pfn, end;
259	int ret;
260
261	WARN_ON(start_pfn > end_pfn);
262
263	/* Release pages first. */
264	end = min(end_pfn, nr_pages);
265	for (pfn = start_pfn; pfn < end; pfn++) {
266		unsigned long mfn = pfn_to_mfn(pfn);
267
268		/* Make sure pfn exists to start with */
269		if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
270			continue;
 
 
 
 
 
 
 
 
 
271
272		ret = xen_free_mfn(mfn);
273		WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
 
 
274
275		if (ret == 1) {
276			xen_released_pages++;
277			if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
 
 
 
 
 
278				break;
 
 
279		} else
280			break;
281	}
 
 
 
 
 
282
283	set_phys_range_identity(start_pfn, end_pfn);
284}
285
286/*
287 * Helper function to update the p2m and m2p tables and kernel mapping.
288 */
289static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
290{
291	struct mmu_update update = {
292		.ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE,
293		.val = pfn
294	};
295
296	/* Update p2m */
297	if (!set_phys_to_machine(pfn, mfn)) {
298		WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n",
299		     pfn, mfn);
300		BUG();
301	}
302
303	/* Update m2p */
304	if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) {
305		WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n",
306		     mfn, pfn);
307		BUG();
308	}
309
310	if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
311					 mfn_pte(mfn, PAGE_KERNEL), 0)) {
312		WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n",
313		      mfn, pfn);
314		BUG();
315	}
316}
317
318/*
319 * This function updates the p2m and m2p tables with an identity map from
320 * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the
321 * original allocation at remap_pfn. The information needed for remapping is
322 * saved in the memory itself to avoid the need for allocating buffers. The
323 * complete remap information is contained in a list of MFNs each containing
324 * up to REMAP_SIZE MFNs and the start target PFN for doing the remap.
325 * This enables us to preserve the original mfn sequence while doing the
326 * remapping at a time when the memory management is capable of allocating
327 * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and
328 * its callers.
329 */
330static void __init xen_do_set_identity_and_remap_chunk(
331        unsigned long start_pfn, unsigned long size, unsigned long remap_pfn)
332{
333	unsigned long buf = (unsigned long)&xen_remap_buf;
334	unsigned long mfn_save, mfn;
335	unsigned long ident_pfn_iter, remap_pfn_iter;
336	unsigned long ident_end_pfn = start_pfn + size;
337	unsigned long left = size;
338	unsigned int i, chunk;
339
340	WARN_ON(size == 0);
 
 
 
 
341
342	mfn_save = virt_to_mfn(buf);
 
343
344	for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn;
345	     ident_pfn_iter < ident_end_pfn;
346	     ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) {
347		chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE;
348
349		/* Map first pfn to xen_remap_buf */
350		mfn = pfn_to_mfn(ident_pfn_iter);
351		set_pte_mfn(buf, mfn, PAGE_KERNEL);
352
353		/* Save mapping information in page */
354		xen_remap_buf.next_area_mfn = xen_remap_mfn;
355		xen_remap_buf.target_pfn = remap_pfn_iter;
356		xen_remap_buf.size = chunk;
357		for (i = 0; i < chunk; i++)
358			xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i);
359
360		/* Put remap buf into list. */
361		xen_remap_mfn = mfn;
 
 
 
 
 
 
 
 
 
 
362
363		/* Set identity map */
364		set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
365
366		left -= chunk;
 
 
 
 
 
367	}
368
369	/* Restore old xen_remap_buf mapping */
370	set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
371}
372
373/*
374 * This function takes a contiguous pfn range that needs to be identity mapped
375 * and:
376 *
377 *  1) Finds a new range of pfns to use to remap based on E820 and remap_pfn.
378 *  2) Calls the do_ function to actually do the mapping/remapping work.
379 *
380 * The goal is to not allocate additional memory but to remap the existing
381 * pages. In the case of an error the underlying memory is simply released back
382 * to Xen and not remapped.
383 */
384static unsigned long __init xen_set_identity_and_remap_chunk(
385	unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
386	unsigned long remap_pfn)
387{
388	unsigned long pfn;
389	unsigned long i = 0;
390	unsigned long n = end_pfn - start_pfn;
391
392	if (remap_pfn == 0)
393		remap_pfn = nr_pages;
394
395	while (i < n) {
396		unsigned long cur_pfn = start_pfn + i;
397		unsigned long left = n - i;
398		unsigned long size = left;
399		unsigned long remap_range_size;
400
401		/* Do not remap pages beyond the current allocation */
402		if (cur_pfn >= nr_pages) {
403			/* Identity map remaining pages */
404			set_phys_range_identity(cur_pfn, cur_pfn + size);
405			break;
406		}
407		if (cur_pfn + size > nr_pages)
408			size = nr_pages - cur_pfn;
409
410		remap_range_size = xen_find_pfn_range(&remap_pfn);
411		if (!remap_range_size) {
412			pr_warn("Unable to find available pfn range, not remapping identity pages\n");
413			xen_set_identity_and_release_chunk(cur_pfn,
414						cur_pfn + left, nr_pages);
415			break;
416		}
417		/* Adjust size to fit in current e820 RAM region */
418		if (size > remap_range_size)
419			size = remap_range_size;
420
421		xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn);
422
423		/* Update variables to reflect new mappings. */
424		i += size;
425		remap_pfn += size;
426	}
427
428	/*
429	 * If the PFNs are currently mapped, their VA mappings need to be
430	 * zapped.
431	 */
432	for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
433		(void)HYPERVISOR_update_va_mapping(
434			(unsigned long)__va(pfn << PAGE_SHIFT),
435			native_make_pte(0), 0);
436
437	return remap_pfn;
438}
439
440static unsigned long __init xen_count_remap_pages(
441	unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
442	unsigned long remap_pages)
443{
444	if (start_pfn >= nr_pages)
445		return remap_pages;
446
447	return remap_pages + min(end_pfn, nr_pages) - start_pfn;
 
 
 
 
448}
449
450static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
451	unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn,
452			      unsigned long nr_pages, unsigned long last_val))
453{
454	phys_addr_t start = 0;
455	unsigned long ret_val = 0;
456	const struct e820_entry *entry = xen_e820_table.entries;
 
457	int i;
458
459	/*
460	 * Combine non-RAM regions and gaps until a RAM region (or the
461	 * end of the map) is reached, then call the provided function
462	 * to perform its duty on the non-RAM region.
463	 *
464	 * The combined non-RAM regions are rounded to a whole number
465	 * of pages so any partial pages are accessible via the 1:1
466	 * mapping.  This is needed for some BIOSes that put (for
467	 * example) the DMI tables in a reserved region that begins on
468	 * a non-page boundary.
469	 */
470	for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
471		phys_addr_t end = entry->addr + entry->size;
472		if (entry->type == E820_TYPE_RAM || i == xen_e820_table.nr_entries - 1) {
473			unsigned long start_pfn = PFN_DOWN(start);
474			unsigned long end_pfn = PFN_UP(end);
475
476			if (entry->type == E820_TYPE_RAM)
477				end_pfn = PFN_UP(entry->addr);
478
479			if (start_pfn < end_pfn)
480				ret_val = func(start_pfn, end_pfn, nr_pages,
481					       ret_val);
482			start = end;
483		}
484	}
485
486	return ret_val;
487}
488
489/*
490 * Remap the memory prepared in xen_do_set_identity_and_remap_chunk().
491 * The remap information (which mfn remap to which pfn) is contained in the
492 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
493 * This scheme allows to remap the different chunks in arbitrary order while
494 * the resulting mapping will be independent from the order.
495 */
496void __init xen_remap_memory(void)
497{
498	unsigned long buf = (unsigned long)&xen_remap_buf;
499	unsigned long mfn_save, pfn;
500	unsigned long remapped = 0;
501	unsigned int i;
502	unsigned long pfn_s = ~0UL;
503	unsigned long len = 0;
504
505	mfn_save = virt_to_mfn(buf);
506
507	while (xen_remap_mfn != INVALID_P2M_ENTRY) {
508		/* Map the remap information */
509		set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL);
510
511		BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]);
512
513		pfn = xen_remap_buf.target_pfn;
514		for (i = 0; i < xen_remap_buf.size; i++) {
515			xen_update_mem_tables(pfn, xen_remap_buf.mfns[i]);
516			remapped++;
517			pfn++;
518		}
519		if (pfn_s == ~0UL || pfn == pfn_s) {
520			pfn_s = xen_remap_buf.target_pfn;
521			len += xen_remap_buf.size;
522		} else if (pfn_s + len == xen_remap_buf.target_pfn) {
523			len += xen_remap_buf.size;
524		} else {
525			xen_del_extra_mem(pfn_s, len);
526			pfn_s = xen_remap_buf.target_pfn;
527			len = xen_remap_buf.size;
528		}
529		xen_remap_mfn = xen_remap_buf.next_area_mfn;
530	}
531
532	if (pfn_s != ~0UL && len)
533		xen_del_extra_mem(pfn_s, len);
 
 
534
535	set_pte_mfn(buf, mfn_save, PAGE_KERNEL);
536
537	pr_info("Remapped %ld page(s)\n", remapped);
538}
539
540static unsigned long __init xen_get_pages_limit(void)
541{
542	unsigned long limit;
543
544	limit = MAXMEM / PAGE_SIZE;
545	if (!xen_initial_domain() && xen_512gb_limit)
546		limit = GB(512) / PAGE_SIZE;
547
548	return limit;
549}
550
551static unsigned long __init xen_get_max_pages(void)
552{
553	unsigned long max_pages, limit;
554	domid_t domid = DOMID_SELF;
555	long ret;
556
557	limit = xen_get_pages_limit();
558	max_pages = limit;
559
560	/*
561	 * For the initial domain we use the maximum reservation as
562	 * the maximum page.
563	 *
564	 * For guest domains the current maximum reservation reflects
565	 * the current maximum rather than the static maximum. In this
566	 * case the e820 map provided to us will cover the static
567	 * maximum region.
568	 */
569	if (xen_initial_domain()) {
570		ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
571		if (ret > 0)
572			max_pages = ret;
573	}
574
575	return min(max_pages, limit);
576}
577
578static void __init xen_align_and_add_e820_region(phys_addr_t start,
579						 phys_addr_t size, int type)
580{
581	phys_addr_t end = start + size;
582
583	/* Align RAM regions to page boundaries. */
584	if (type == E820_TYPE_RAM) {
585		start = PAGE_ALIGN(start);
586		end &= ~((phys_addr_t)PAGE_SIZE - 1);
587#ifdef CONFIG_MEMORY_HOTPLUG
588		/*
589		 * Don't allow adding memory not in E820 map while booting the
590		 * system. Once the balloon driver is up it will remove that
591		 * restriction again.
592		 */
593		max_mem_size = end;
594#endif
595	}
596
597	e820__range_add(start, end - start, type);
598}
599
600static void __init xen_ignore_unusable(void)
601{
602	struct e820_entry *entry = xen_e820_table.entries;
603	unsigned int i;
604
605	for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) {
606		if (entry->type == E820_TYPE_UNUSABLE)
607			entry->type = E820_TYPE_RAM;
608	}
609}
610
611bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size)
612{
613	struct e820_entry *entry;
614	unsigned mapcnt;
615	phys_addr_t end;
616
617	if (!size)
618		return false;
619
620	end = start + size;
621	entry = xen_e820_table.entries;
622
623	for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) {
624		if (entry->type == E820_TYPE_RAM && entry->addr <= start &&
625		    (entry->addr + entry->size) >= end)
626			return false;
627
628		entry++;
629	}
630
631	return true;
632}
633
634/*
635 * Find a free area in physical memory not yet reserved and compliant with
636 * E820 map.
637 * Used to relocate pre-allocated areas like initrd or p2m list which are in
638 * conflict with the to be used E820 map.
639 * In case no area is found, return 0. Otherwise return the physical address
640 * of the area which is already reserved for convenience.
641 */
642phys_addr_t __init xen_find_free_area(phys_addr_t size)
643{
644	unsigned mapcnt;
645	phys_addr_t addr, start;
646	struct e820_entry *entry = xen_e820_table.entries;
647
648	for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++, entry++) {
649		if (entry->type != E820_TYPE_RAM || entry->size < size)
650			continue;
651		start = entry->addr;
652		for (addr = start; addr < start + size; addr += PAGE_SIZE) {
653			if (!memblock_is_reserved(addr))
654				continue;
655			start = addr + PAGE_SIZE;
656			if (start + size > entry->addr + entry->size)
657				break;
658		}
659		if (addr >= start + size) {
660			memblock_reserve(start, size);
661			return start;
662		}
663	}
664
665	return 0;
666}
667
668/*
669 * Like memcpy, but with physical addresses for dest and src.
670 */
671static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src,
672				   phys_addr_t n)
673{
674	phys_addr_t dest_off, src_off, dest_len, src_len, len;
675	void *from, *to;
676
677	while (n) {
678		dest_off = dest & ~PAGE_MASK;
679		src_off = src & ~PAGE_MASK;
680		dest_len = n;
681		if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off)
682			dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off;
683		src_len = n;
684		if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off)
685			src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off;
686		len = min(dest_len, src_len);
687		to = early_memremap(dest - dest_off, dest_len + dest_off);
688		from = early_memremap(src - src_off, src_len + src_off);
689		memcpy(to, from, len);
690		early_memunmap(to, dest_len + dest_off);
691		early_memunmap(from, src_len + src_off);
692		n -= len;
693		dest += len;
694		src += len;
695	}
696}
697
698/*
699 * Reserve Xen mfn_list.
700 */
701static void __init xen_reserve_xen_mfnlist(void)
702{
703	phys_addr_t start, size;
704
705	if (xen_start_info->mfn_list >= __START_KERNEL_map) {
706		start = __pa(xen_start_info->mfn_list);
707		size = PFN_ALIGN(xen_start_info->nr_pages *
708				 sizeof(unsigned long));
709	} else {
710		start = PFN_PHYS(xen_start_info->first_p2m_pfn);
711		size = PFN_PHYS(xen_start_info->nr_p2m_frames);
712	}
713
714	memblock_reserve(start, size);
715	if (!xen_is_e820_reserved(start, size))
716		return;
717
718	xen_relocate_p2m();
719	memblock_phys_free(start, size);
720}
721
722/**
723 * xen_memory_setup - Hook for machine specific memory setup.
724 **/
725char * __init xen_memory_setup(void)
726{
727	unsigned long max_pfn, pfn_s, n_pfns;
728	phys_addr_t mem_end, addr, size, chunk_size;
729	u32 type;
 
730	int rc;
731	struct xen_memory_map memmap;
732	unsigned long max_pages;
 
733	unsigned long extra_pages = 0;
 
734	int i;
735	int op;
736
737	xen_parse_512gb();
738	max_pfn = xen_get_pages_limit();
739	max_pfn = min(max_pfn, xen_start_info->nr_pages);
740	mem_end = PFN_PHYS(max_pfn);
741
742	memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries);
743	set_xen_guest_handle(memmap.buffer, xen_e820_table.entries);
744
745#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON)
746	xen_saved_max_mem_size = max_mem_size;
747#endif
748
749	op = xen_initial_domain() ?
750		XENMEM_machine_memory_map :
751		XENMEM_memory_map;
752	rc = HYPERVISOR_memory_op(op, &memmap);
753	if (rc == -ENOSYS) {
754		BUG_ON(xen_initial_domain());
755		memmap.nr_entries = 1;
756		xen_e820_table.entries[0].addr = 0ULL;
757		xen_e820_table.entries[0].size = mem_end;
758		/* 8MB slack (to balance backend allocations). */
759		xen_e820_table.entries[0].size += 8ULL << 20;
760		xen_e820_table.entries[0].type = E820_TYPE_RAM;
761		rc = 0;
762	}
763	BUG_ON(rc);
764	BUG_ON(memmap.nr_entries == 0);
765	xen_e820_table.nr_entries = memmap.nr_entries;
766
767	/*
768	 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
769	 * regions, so if we're using the machine memory map leave the
770	 * region as RAM as it is in the pseudo-physical map.
771	 *
772	 * UNUSABLE regions in domUs are not handled and will need
773	 * a patch in the future.
774	 */
775	if (xen_initial_domain())
776		xen_ignore_unusable();
777
778	/* Make sure the Xen-supplied memory map is well-ordered. */
779	e820__update_table(&xen_e820_table);
780
781	max_pages = xen_get_max_pages();
782
783	/* How many extra pages do we need due to remapping? */
784	max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages);
785
786	if (max_pages > max_pfn)
787		extra_pages += max_pages - max_pfn;
788
789	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
790	 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
791	 * factor the base size.
 
 
 
792	 *
793	 * Make sure we have no memory above max_pages, as this area
794	 * isn't handled by the p2m management.
 
795	 */
796	extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
797			   extra_pages, max_pages - max_pfn);
798	i = 0;
799	addr = xen_e820_table.entries[0].addr;
800	size = xen_e820_table.entries[0].size;
801	while (i < xen_e820_table.nr_entries) {
802		bool discard = false;
803
804		chunk_size = size;
805		type = xen_e820_table.entries[i].type;
806
807		if (type == E820_TYPE_RAM) {
808			if (addr < mem_end) {
809				chunk_size = min(size, mem_end - addr);
810			} else if (extra_pages) {
811				chunk_size = min(size, PFN_PHYS(extra_pages));
812				pfn_s = PFN_UP(addr);
813				n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s;
814				extra_pages -= n_pfns;
815				xen_add_extra_mem(pfn_s, n_pfns);
816				xen_max_p2m_pfn = pfn_s + n_pfns;
817			} else
818				discard = true;
819		}
820
821		if (!discard)
822			xen_align_and_add_e820_region(addr, chunk_size, type);
823
824		addr += chunk_size;
825		size -= chunk_size;
826		if (size == 0) {
827			i++;
828			if (i < xen_e820_table.nr_entries) {
829				addr = xen_e820_table.entries[i].addr;
830				size = xen_e820_table.entries[i].size;
831			}
832		}
833	}
834
835	/*
836	 * Set the rest as identity mapped, in case PCI BARs are
837	 * located here.
838	 */
839	set_phys_range_identity(addr / PAGE_SIZE, ~0ul);
840
841	/*
842	 * In domU, the ISA region is normal, usable memory, but we
843	 * reserve ISA memory anyway because too many things poke
844	 * about in there.
845	 */
846	e820__range_add(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_TYPE_RESERVED);
847
848	e820__update_table(e820_table);
849
850	/*
851	 * Check whether the kernel itself conflicts with the target E820 map.
852	 * Failing now is better than running into weird problems later due
853	 * to relocating (and even reusing) pages with kernel text or data.
854	 */
855	if (xen_is_e820_reserved(__pa_symbol(_text),
856			__pa_symbol(__bss_stop) - __pa_symbol(_text))) {
857		xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n");
858		BUG();
859	}
860
861	/*
862	 * Check for a conflict of the hypervisor supplied page tables with
863	 * the target E820 map.
864	 */
865	xen_pt_check_e820();
866
867	xen_reserve_xen_mfnlist();
868
869	/* Check for a conflict of the initrd with the target E820 map. */
870	if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image,
871				 boot_params.hdr.ramdisk_size)) {
872		phys_addr_t new_area, start, size;
873
874		new_area = xen_find_free_area(boot_params.hdr.ramdisk_size);
875		if (!new_area) {
876			xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n");
877			BUG();
878		}
879
880		start = boot_params.hdr.ramdisk_image;
881		size = boot_params.hdr.ramdisk_size;
882		xen_phys_memcpy(new_area, start, size);
883		pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n",
884			start, start + size, new_area, new_area + size);
885		memblock_phys_free(start, size);
886		boot_params.hdr.ramdisk_image = new_area;
887		boot_params.ext_ramdisk_image = new_area >> 32;
888	}
889
890	/*
891	 * Set identity map on non-RAM pages and prepare remapping the
892	 * underlying RAM.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
893	 */
894	xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk);
 
895
896	pr_info("Released %ld page(s)\n", xen_released_pages);
897
898	return "Xen";
899}
900
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
901static int register_callback(unsigned type, const void *func)
902{
903	struct callback_register callback = {
904		.type = type,
905		.address = XEN_CALLBACK(__KERNEL_CS, func),
906		.flags = CALLBACKF_mask_events,
907	};
908
909	return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
910}
911
912void xen_enable_sysenter(void)
913{
914	if (cpu_feature_enabled(X86_FEATURE_SYSENTER32) &&
915	    register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat))
916		setup_clear_cpu_cap(X86_FEATURE_SYSENTER32);
 
 
 
 
 
 
 
 
 
 
 
 
917}
918
919void xen_enable_syscall(void)
920{
 
921	int ret;
922
923	ret = register_callback(CALLBACKTYPE_syscall, xen_entry_SYSCALL_64);
924	if (ret != 0) {
925		printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
926		/* Pretty fatal; 64-bit userspace has no other
927		   mechanism for syscalls. */
928	}
929
930	if (cpu_feature_enabled(X86_FEATURE_SYSCALL32) &&
931	    register_callback(CALLBACKTYPE_syscall32, xen_entry_SYSCALL_compat))
932		setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
 
 
 
 
933}
934
935static void __init xen_pvmmu_arch_setup(void)
 
 
 
 
 
 
936{
937	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
938	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
939
940	HYPERVISOR_vm_assist(VMASST_CMD_enable,
941			     VMASST_TYPE_pae_extended_cr3);
942
943	if (register_callback(CALLBACKTYPE_event,
944			      xen_asm_exc_xen_hypervisor_callback) ||
945	    register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
946		BUG();
947
948	xen_enable_sysenter();
949	xen_enable_syscall();
 
950}
951
952/* This function is not called for HVM domains */
953void __init xen_arch_setup(void)
954{
955	xen_panic_handler_init();
956	xen_pvmmu_arch_setup();
 
957
958#ifdef CONFIG_ACPI
959	if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
960		printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
961		disable_acpi();
962	}
963#endif
964
965	memcpy(boot_command_line, xen_start_info->cmd_line,
966	       MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
967	       COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
968
969	/* Set up idle, making sure it calls safe_halt() pvop */
970	disable_cpuidle();
971	disable_cpufreq();
972	WARN_ON(xen_set_default_idle());
 
973#ifdef CONFIG_NUMA
974	numa_off = 1;
975#endif
976}
v3.15
 
  1/*
  2 * Machine specific setup for xen
  3 *
  4 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
  5 */
  6
  7#include <linux/module.h>
  8#include <linux/sched.h>
 
  9#include <linux/mm.h>
 10#include <linux/pm.h>
 11#include <linux/memblock.h>
 12#include <linux/cpuidle.h>
 13#include <linux/cpufreq.h>
 
 14
 15#include <asm/elf.h>
 16#include <asm/vdso.h>
 17#include <asm/e820.h>
 18#include <asm/setup.h>
 19#include <asm/acpi.h>
 20#include <asm/numa.h>
 
 21#include <asm/xen/hypervisor.h>
 22#include <asm/xen/hypercall.h>
 23
 24#include <xen/xen.h>
 25#include <xen/page.h>
 26#include <xen/interface/callback.h>
 27#include <xen/interface/memory.h>
 28#include <xen/interface/physdev.h>
 29#include <xen/features.h>
 
 
 30#include "mmu.h"
 31#include "xen-ops.h"
 32#include "vdso.h"
 33
 34/* These are code, but not functions.  Defined in entry.S */
 35extern const char xen_hypervisor_callback[];
 36extern const char xen_failsafe_callback[];
 37#ifdef CONFIG_X86_64
 38extern asmlinkage void nmi(void);
 39#endif
 40extern void xen_sysenter_target(void);
 41extern void xen_syscall_target(void);
 42extern void xen_syscall32_target(void);
 43
 44/* Amount of extra memory space we add to the e820 ranges */
 45struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
 46
 47/* Number of pages released from the initial allocation. */
 48unsigned long xen_released_pages;
 49
 50/* 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51 * The maximum amount of extra memory compared to the base size.  The
 52 * main scaling factor is the size of struct page.  At extreme ratios
 53 * of base:extra, all the base memory can be filled with page
 54 * structures for the extra memory, leaving no space for anything
 55 * else.
 56 * 
 57 * 10x seems like a reasonable balance between scaling flexibility and
 58 * leaving a practically usable system.
 59 */
 60#define EXTRA_MEM_RATIO		(10)
 61
 62static void __init xen_add_extra_mem(u64 start, u64 size)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 63{
 64	unsigned long pfn;
 65	int i;
 66
 
 
 
 
 67	for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
 68		/* Add new region. */
 69		if (xen_extra_mem[i].size == 0) {
 70			xen_extra_mem[i].start = start;
 71			xen_extra_mem[i].size  = size;
 72			break;
 73		}
 74		/* Append to existing region. */
 75		if (xen_extra_mem[i].start + xen_extra_mem[i].size == start) {
 76			xen_extra_mem[i].size += size;
 
 77			break;
 78		}
 79	}
 80	if (i == XEN_EXTRA_MEM_MAX_REGIONS)
 81		printk(KERN_WARNING "Warning: not enough extra memory regions\n");
 82
 83	memblock_reserve(start, size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 84
 85	if (xen_feature(XENFEAT_auto_translated_physmap))
 86		return;
 87
 88	xen_max_p2m_pfn = PFN_DOWN(start + size);
 89	for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
 90		unsigned long mfn = pfn_to_mfn(pfn);
 91
 92		if (WARN(mfn == pfn, "Trying to over-write 1-1 mapping (pfn: %lx)\n", pfn))
 
 93			continue;
 94		WARN(mfn != INVALID_P2M_ENTRY, "Trying to remove %lx which has %lx mfn!\n",
 95			pfn, mfn);
 96
 97		__set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
 
 
 
 
 
 
 
 
 
 
 
 98	}
 
 
 99}
100
101static unsigned long __init xen_do_chunk(unsigned long start,
102					 unsigned long end, bool release)
103{
104	struct xen_memory_reservation reservation = {
105		.address_bits = 0,
106		.extent_order = 0,
107		.domid        = DOMID_SELF
108	};
109	unsigned long len = 0;
110	int xlated_phys = xen_feature(XENFEAT_auto_translated_physmap);
111	unsigned long pfn;
 
 
 
 
 
 
 
 
 
 
 
 
112	int ret;
113
114	for (pfn = start; pfn < end; pfn++) {
115		unsigned long frame;
 
 
 
116		unsigned long mfn = pfn_to_mfn(pfn);
117
118		if (release) {
119			/* Make sure pfn exists to start with */
120			if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
121				continue;
122			frame = mfn;
123		} else {
124			if (!xlated_phys && mfn != INVALID_P2M_ENTRY)
125				continue;
126			frame = pfn;
127		}
128		set_xen_guest_handle(reservation.extent_start, &frame);
129		reservation.nr_extents = 1;
130
131		ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
132					   &reservation);
133		WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
134		     release ? "release" : "populate", pfn, ret);
135
136		if (ret == 1) {
137			if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
138				if (release)
139					break;
140				set_xen_guest_handle(reservation.extent_start, &frame);
141				reservation.nr_extents = 1;
142				ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
143							   &reservation);
144				break;
145			}
146			len++;
147		} else
148			break;
149	}
150	if (len)
151		printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
152		       release ? "Freeing" : "Populating",
153		       start, end, len,
154		       release ? "freed" : "added");
155
156	return len;
157}
158
159static unsigned long __init xen_release_chunk(unsigned long start,
160					      unsigned long end)
 
 
161{
162	/*
163	 * Xen already ballooned out the E820 non RAM regions for us
164	 * and set them up properly in EPT.
165	 */
166	if (xen_feature(XENFEAT_auto_translated_physmap))
167		return end - start;
 
 
 
 
 
 
 
 
 
 
 
 
168
169	return xen_do_chunk(start, end, true);
 
 
 
 
 
170}
171
172static unsigned long __init xen_populate_chunk(
173	const struct e820entry *list, size_t map_size,
174	unsigned long max_pfn, unsigned long *last_pfn,
175	unsigned long credits_left)
 
 
 
 
 
 
 
 
 
 
176{
177	const struct e820entry *entry;
178	unsigned int i;
179	unsigned long done = 0;
180	unsigned long dest_pfn;
 
 
181
182	for (i = 0, entry = list; i < map_size; i++, entry++) {
183		unsigned long s_pfn;
184		unsigned long e_pfn;
185		unsigned long pfns;
186		long capacity;
187
188		if (credits_left <= 0)
189			break;
190
191		if (entry->type != E820_RAM)
192			continue;
 
 
193
194		e_pfn = PFN_DOWN(entry->addr + entry->size);
 
 
195
196		/* We only care about E820 after the xen_start_info->nr_pages */
197		if (e_pfn <= max_pfn)
198			continue;
 
 
 
199
200		s_pfn = PFN_UP(entry->addr);
201		/* If the E820 falls within the nr_pages, we want to start
202		 * at the nr_pages PFN.
203		 * If that would mean going past the E820 entry, skip it
204		 */
205		if (s_pfn <= max_pfn) {
206			capacity = e_pfn - max_pfn;
207			dest_pfn = max_pfn;
208		} else {
209			capacity = e_pfn - s_pfn;
210			dest_pfn = s_pfn;
211		}
212
213		if (credits_left < capacity)
214			capacity = credits_left;
215
216		pfns = xen_do_chunk(dest_pfn, dest_pfn + capacity, false);
217		done += pfns;
218		*last_pfn = (dest_pfn + pfns);
219		if (pfns < capacity)
220			break;
221		credits_left -= pfns;
222	}
223	return done;
 
 
224}
225
226static void __init xen_set_identity_and_release_chunk(
 
 
 
 
 
 
 
 
 
 
 
227	unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
228	unsigned long *released, unsigned long *identity)
229{
230	unsigned long pfn;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
231
232	/*
233	 * If the PFNs are currently mapped, clear the mappings
234	 * (except for the ISA region which must be 1:1 mapped) to
235	 * release the refcounts (in Xen) on the original frames.
236	 */
 
237
238	/*
239	 * PVH E820 matches the hypervisor's P2M which means we need to
240	 * account for the proper values of *release and *identity.
241	 */
242	for (pfn = start_pfn; !xen_feature(XENFEAT_auto_translated_physmap) &&
243	     pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) {
244		pte_t pte = __pte_ma(0);
 
245
246		if (pfn < PFN_UP(ISA_END_ADDRESS))
247			pte = mfn_pte(pfn, PAGE_KERNEL_IO);
248
249		(void)HYPERVISOR_update_va_mapping(
250			(unsigned long)__va(pfn << PAGE_SHIFT), pte, 0);
251	}
 
 
 
252
253	if (start_pfn < nr_pages)
254		*released += xen_release_chunk(
255			start_pfn, min(end_pfn, nr_pages));
256
257	*identity += set_phys_range_identity(start_pfn, end_pfn);
258}
259
260static unsigned long __init xen_set_identity_and_release(
261	const struct e820entry *list, size_t map_size, unsigned long nr_pages)
 
262{
263	phys_addr_t start = 0;
264	unsigned long released = 0;
265	unsigned long identity = 0;
266	const struct e820entry *entry;
267	int i;
268
269	/*
270	 * Combine non-RAM regions and gaps until a RAM region (or the
271	 * end of the map) is reached, then set the 1:1 map and
272	 * release the pages (if available) in those non-RAM regions.
273	 *
274	 * The combined non-RAM regions are rounded to a whole number
275	 * of pages so any partial pages are accessible via the 1:1
276	 * mapping.  This is needed for some BIOSes that put (for
277	 * example) the DMI tables in a reserved region that begins on
278	 * a non-page boundary.
279	 */
280	for (i = 0, entry = list; i < map_size; i++, entry++) {
281		phys_addr_t end = entry->addr + entry->size;
282		if (entry->type == E820_RAM || i == map_size - 1) {
283			unsigned long start_pfn = PFN_DOWN(start);
284			unsigned long end_pfn = PFN_UP(end);
285
286			if (entry->type == E820_RAM)
287				end_pfn = PFN_UP(entry->addr);
288
289			if (start_pfn < end_pfn)
290				xen_set_identity_and_release_chunk(
291					start_pfn, end_pfn, nr_pages,
292					&released, &identity);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
293
294			start = end;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
295		}
 
296	}
297
298	if (released)
299		printk(KERN_INFO "Released %lu pages of unused memory\n", released);
300	if (identity)
301		printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
302
303	return released;
 
 
 
 
 
 
 
 
 
 
 
 
 
304}
305
306static unsigned long __init xen_get_max_pages(void)
307{
308	unsigned long max_pages = MAX_DOMAIN_PAGES;
309	domid_t domid = DOMID_SELF;
310	int ret;
 
 
 
311
312	/*
313	 * For the initial domain we use the maximum reservation as
314	 * the maximum page.
315	 *
316	 * For guest domains the current maximum reservation reflects
317	 * the current maximum rather than the static maximum. In this
318	 * case the e820 map provided to us will cover the static
319	 * maximum region.
320	 */
321	if (xen_initial_domain()) {
322		ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
323		if (ret > 0)
324			max_pages = ret;
325	}
326
327	return min(max_pages, MAX_DOMAIN_PAGES);
328}
329
330static void xen_align_and_add_e820_region(u64 start, u64 size, int type)
 
331{
332	u64 end = start + size;
333
334	/* Align RAM regions to page boundaries. */
335	if (type == E820_RAM) {
336		start = PAGE_ALIGN(start);
337		end &= ~((u64)PAGE_SIZE - 1);
 
 
 
 
 
 
 
 
338	}
339
340	e820_add_region(start, end - start, type);
341}
342
343void xen_ignore_unusable(struct e820entry *list, size_t map_size)
344{
345	struct e820entry *entry;
346	unsigned int i;
347
348	for (i = 0, entry = list; i < map_size; i++, entry++) {
349		if (entry->type == E820_UNUSABLE)
350			entry->type = E820_RAM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
351	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352}
353
354/**
355 * machine_specific_memory_setup - Hook for machine specific memory setup.
356 **/
357char * __init xen_memory_setup(void)
358{
359	static struct e820entry map[E820MAX] __initdata;
360
361	unsigned long max_pfn = xen_start_info->nr_pages;
362	unsigned long long mem_end;
363	int rc;
364	struct xen_memory_map memmap;
365	unsigned long max_pages;
366	unsigned long last_pfn = 0;
367	unsigned long extra_pages = 0;
368	unsigned long populated;
369	int i;
370	int op;
371
372	max_pfn = min(MAX_DOMAIN_PAGES, max_pfn);
 
 
373	mem_end = PFN_PHYS(max_pfn);
374
375	memmap.nr_entries = E820MAX;
376	set_xen_guest_handle(memmap.buffer, map);
 
 
 
 
377
378	op = xen_initial_domain() ?
379		XENMEM_machine_memory_map :
380		XENMEM_memory_map;
381	rc = HYPERVISOR_memory_op(op, &memmap);
382	if (rc == -ENOSYS) {
383		BUG_ON(xen_initial_domain());
384		memmap.nr_entries = 1;
385		map[0].addr = 0ULL;
386		map[0].size = mem_end;
387		/* 8MB slack (to balance backend allocations). */
388		map[0].size += 8ULL << 20;
389		map[0].type = E820_RAM;
390		rc = 0;
391	}
392	BUG_ON(rc);
 
 
393
394	/*
395	 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
396	 * regions, so if we're using the machine memory map leave the
397	 * region as RAM as it is in the pseudo-physical map.
398	 *
399	 * UNUSABLE regions in domUs are not handled and will need
400	 * a patch in the future.
401	 */
402	if (xen_initial_domain())
403		xen_ignore_unusable(map, memmap.nr_entries);
404
405	/* Make sure the Xen-supplied memory map is well-ordered. */
406	sanitize_e820_map(map, memmap.nr_entries, &memmap.nr_entries);
407
408	max_pages = xen_get_max_pages();
 
 
 
 
409	if (max_pages > max_pfn)
410		extra_pages += max_pages - max_pfn;
411
412	/*
413	 * Set P2M for all non-RAM pages and E820 gaps to be identity
414	 * type PFNs.  Any RAM pages that would be made inaccesible by
415	 * this are first released.
416	 */
417	xen_released_pages = xen_set_identity_and_release(
418		map, memmap.nr_entries, max_pfn);
419
420	/*
421	 * Populate back the non-RAM pages and E820 gaps that had been
422	 * released. */
423	populated = xen_populate_chunk(map, memmap.nr_entries,
424			max_pfn, &last_pfn, xen_released_pages);
425
426	xen_released_pages -= populated;
427	extra_pages += xen_released_pages;
428
429	if (last_pfn > max_pfn) {
430		max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
431		mem_end = PFN_PHYS(max_pfn);
432	}
433	/*
434	 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
435	 * factor the base size.  On non-highmem systems, the base
436	 * size is the full initial memory allocation; on highmem it
437	 * is limited to the max size of lowmem, so that it doesn't
438	 * get completely filled.
439	 *
440	 * In principle there could be a problem in lowmem systems if
441	 * the initial memory is also very large with respect to
442	 * lowmem, but we won't try to deal with that here.
443	 */
444	extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
445			  extra_pages);
446	i = 0;
447	while (i < memmap.nr_entries) {
448		u64 addr = map[i].addr;
449		u64 size = map[i].size;
450		u32 type = map[i].type;
451
452		if (type == E820_RAM) {
 
 
 
453			if (addr < mem_end) {
454				size = min(size, mem_end - addr);
455			} else if (extra_pages) {
456				size = min(size, (u64)extra_pages * PAGE_SIZE);
457				extra_pages -= size / PAGE_SIZE;
458				xen_add_extra_mem(addr, size);
 
 
 
459			} else
460				type = E820_UNUSABLE;
461		}
462
463		xen_align_and_add_e820_region(addr, size, type);
 
464
465		map[i].addr += size;
466		map[i].size -= size;
467		if (map[i].size == 0)
468			i++;
 
 
 
 
 
469	}
470
471	/*
 
 
 
 
 
 
472	 * In domU, the ISA region is normal, usable memory, but we
473	 * reserve ISA memory anyway because too many things poke
474	 * about in there.
475	 */
476	e820_add_region(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS,
477			E820_RESERVED);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
478
479	/*
480	 * Reserve Xen bits:
481	 *  - mfn_list
482	 *  - xen_start_info
483	 * See comment above "struct start_info" in <xen/interface/xen.h>
484	 * We tried to make the the memblock_reserve more selective so
485	 * that it would be clear what region is reserved. Sadly we ran
486	 * in the problem wherein on a 64-bit hypervisor with a 32-bit
487	 * initial domain, the pt_base has the cr3 value which is not
488	 * neccessarily where the pagetable starts! As Jan put it: "
489	 * Actually, the adjustment turns out to be correct: The page
490	 * tables for a 32-on-64 dom0 get allocated in the order "first L1",
491	 * "first L2", "first L3", so the offset to the page table base is
492	 * indeed 2. When reading xen/include/public/xen.h's comment
493	 * very strictly, this is not a violation (since there nothing is said
494	 * that the first thing in the page table space is pointed to by
495	 * pt_base; I admit that this seems to be implied though, namely
496	 * do I think that it is implied that the page table space is the
497	 * range [pt_base, pt_base + nt_pt_frames), whereas that
498	 * range here indeed is [pt_base - 2, pt_base - 2 + nt_pt_frames),
499	 * which - without a priori knowledge - the kernel would have
500	 * difficulty to figure out)." - so lets just fall back to the
501	 * easy way and reserve the whole region.
502	 */
503	memblock_reserve(__pa(xen_start_info->mfn_list),
504			 xen_start_info->pt_base - xen_start_info->mfn_list);
505
506	sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
507
508	return "Xen";
509}
510
511/*
512 * Set the bit indicating "nosegneg" library variants should be used.
513 * We only need to bother in pure 32-bit mode; compat 32-bit processes
514 * can have un-truncated segments, so wrapping around is allowed.
515 */
516static void __init fiddle_vdso(void)
517{
518#ifdef CONFIG_X86_32
519	u32 *mask;
520	mask = VDSO32_SYMBOL(&vdso32_int80_start, NOTE_MASK);
521	*mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
522	mask = VDSO32_SYMBOL(&vdso32_sysenter_start, NOTE_MASK);
523	*mask |= 1 << VDSO_NOTE_NONEGSEG_BIT;
524#endif
525}
526
527static int register_callback(unsigned type, const void *func)
528{
529	struct callback_register callback = {
530		.type = type,
531		.address = XEN_CALLBACK(__KERNEL_CS, func),
532		.flags = CALLBACKF_mask_events,
533	};
534
535	return HYPERVISOR_callback_op(CALLBACKOP_register, &callback);
536}
537
538void xen_enable_sysenter(void)
539{
540	int ret;
541	unsigned sysenter_feature;
542
543#ifdef CONFIG_X86_32
544	sysenter_feature = X86_FEATURE_SEP;
545#else
546	sysenter_feature = X86_FEATURE_SYSENTER32;
547#endif
548
549	if (!boot_cpu_has(sysenter_feature))
550		return;
551
552	ret = register_callback(CALLBACKTYPE_sysenter, xen_sysenter_target);
553	if(ret != 0)
554		setup_clear_cpu_cap(sysenter_feature);
555}
556
557void xen_enable_syscall(void)
558{
559#ifdef CONFIG_X86_64
560	int ret;
561
562	ret = register_callback(CALLBACKTYPE_syscall, xen_syscall_target);
563	if (ret != 0) {
564		printk(KERN_ERR "Failed to set syscall callback: %d\n", ret);
565		/* Pretty fatal; 64-bit userspace has no other
566		   mechanism for syscalls. */
567	}
568
569	if (boot_cpu_has(X86_FEATURE_SYSCALL32)) {
570		ret = register_callback(CALLBACKTYPE_syscall32,
571					xen_syscall32_target);
572		if (ret != 0)
573			setup_clear_cpu_cap(X86_FEATURE_SYSCALL32);
574	}
575#endif /* CONFIG_X86_64 */
576}
577void xen_enable_nmi(void)
578{
579#ifdef CONFIG_X86_64
580	if (register_callback(CALLBACKTYPE_nmi, (char *)nmi))
581		BUG();
582#endif
583}
584void __init xen_pvmmu_arch_setup(void)
585{
586	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
587	HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
588
589	HYPERVISOR_vm_assist(VMASST_CMD_enable,
590			     VMASST_TYPE_pae_extended_cr3);
591
592	if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
 
593	    register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
594		BUG();
595
596	xen_enable_sysenter();
597	xen_enable_syscall();
598	xen_enable_nmi();
599}
600
601/* This function is not called for HVM domains */
602void __init xen_arch_setup(void)
603{
604	xen_panic_handler_init();
605	if (!xen_feature(XENFEAT_auto_translated_physmap))
606		xen_pvmmu_arch_setup();
607
608#ifdef CONFIG_ACPI
609	if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
610		printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
611		disable_acpi();
612	}
613#endif
614
615	memcpy(boot_command_line, xen_start_info->cmd_line,
616	       MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ?
617	       COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE);
618
619	/* Set up idle, making sure it calls safe_halt() pvop */
620	disable_cpuidle();
621	disable_cpufreq();
622	WARN_ON(xen_set_default_idle());
623	fiddle_vdso();
624#ifdef CONFIG_NUMA
625	numa_off = 1;
626#endif
627}