Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.2
 1// SPDX-License-Identifier: GPL-2.0
 2
 3#include <linux/pfn.h>
 4#include <asm/xen/page.h>
 5#include <asm/xen/hypercall.h>
 6#include <xen/interface/memory.h>
 7
 8#include "multicalls.h"
 9#include "mmu.h"
10
 
 
 
 
 
 
11unsigned long arbitrary_virt_to_mfn(void *vaddr)
12{
13	xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
14
15	return PFN_DOWN(maddr.maddr);
16}
17
18xmaddr_t arbitrary_virt_to_machine(void *vaddr)
19{
20	unsigned long address = (unsigned long)vaddr;
21	unsigned int level;
22	pte_t *pte;
23	unsigned offset;
24
25	/*
26	 * if the PFN is in the linear mapped vaddr range, we can just use
27	 * the (quick) virt_to_machine() p2m lookup
28	 */
29	if (virt_addr_valid(vaddr))
30		return virt_to_machine(vaddr);
31
32	/* otherwise we have to do a (slower) full page-table walk */
33
34	pte = lookup_address(address, &level);
35	BUG_ON(pte == NULL);
36	offset = address & ~PAGE_MASK;
37	return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
38}
39EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
41/* Returns: 0 success */
42int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
43			       int nr, struct page **pages)
44{
45	if (xen_feature(XENFEAT_auto_translated_physmap))
46		return xen_xlate_unmap_gfn_range(vma, nr, pages);
47
48	if (!pages)
49		return 0;
50
51	return -EINVAL;
52}
53EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);
v4.17
 
 
  1#include <linux/pfn.h>
  2#include <asm/xen/page.h>
  3#include <asm/xen/hypercall.h>
  4#include <xen/interface/memory.h>
  5
  6#include "multicalls.h"
  7#include "mmu.h"
  8
  9/*
 10 * Protects atomic reservation decrease/increase against concurrent increases.
 11 * Also protects non-atomic updates of current_pages and balloon lists.
 12 */
 13DEFINE_SPINLOCK(xen_reservation_lock);
 14
 15unsigned long arbitrary_virt_to_mfn(void *vaddr)
 16{
 17	xmaddr_t maddr = arbitrary_virt_to_machine(vaddr);
 18
 19	return PFN_DOWN(maddr.maddr);
 20}
 21
 22xmaddr_t arbitrary_virt_to_machine(void *vaddr)
 23{
 24	unsigned long address = (unsigned long)vaddr;
 25	unsigned int level;
 26	pte_t *pte;
 27	unsigned offset;
 28
 29	/*
 30	 * if the PFN is in the linear mapped vaddr range, we can just use
 31	 * the (quick) virt_to_machine() p2m lookup
 32	 */
 33	if (virt_addr_valid(vaddr))
 34		return virt_to_machine(vaddr);
 35
 36	/* otherwise we have to do a (slower) full page-table walk */
 37
 38	pte = lookup_address(address, &level);
 39	BUG_ON(pte == NULL);
 40	offset = address & ~PAGE_MASK;
 41	return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset);
 42}
 43EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine);
 44
 45static noinline void xen_flush_tlb_all(void)
 46{
 47	struct mmuext_op *op;
 48	struct multicall_space mcs;
 49
 50	preempt_disable();
 51
 52	mcs = xen_mc_entry(sizeof(*op));
 53
 54	op = mcs.args;
 55	op->cmd = MMUEXT_TLB_FLUSH_ALL;
 56	MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF);
 57
 58	xen_mc_issue(PARAVIRT_LAZY_MMU);
 59
 60	preempt_enable();
 61}
 62
 63#define REMAP_BATCH_SIZE 16
 64
 65struct remap_data {
 66	xen_pfn_t *mfn;
 67	bool contiguous;
 68	pgprot_t prot;
 69	struct mmu_update *mmu_update;
 70};
 71
 72static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token,
 73				 unsigned long addr, void *data)
 74{
 75	struct remap_data *rmd = data;
 76	pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot));
 77
 78	/* If we have a contiguous range, just update the mfn itself,
 79	   else update pointer to be "next mfn". */
 80	if (rmd->contiguous)
 81		(*rmd->mfn)++;
 82	else
 83		rmd->mfn++;
 84
 85	rmd->mmu_update->ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE;
 86	rmd->mmu_update->val = pte_val_ma(pte);
 87	rmd->mmu_update++;
 88
 89	return 0;
 90}
 91
 92static int do_remap_gfn(struct vm_area_struct *vma,
 93			unsigned long addr,
 94			xen_pfn_t *gfn, int nr,
 95			int *err_ptr, pgprot_t prot,
 96			unsigned domid,
 97			struct page **pages)
 98{
 99	int err = 0;
100	struct remap_data rmd;
101	struct mmu_update mmu_update[REMAP_BATCH_SIZE];
102	unsigned long range;
103	int mapped = 0;
104
105	BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO)));
106
107	rmd.mfn = gfn;
108	rmd.prot = prot;
109	/* We use the err_ptr to indicate if there we are doing a contiguous
110	 * mapping or a discontigious mapping. */
111	rmd.contiguous = !err_ptr;
112
113	while (nr) {
114		int index = 0;
115		int done = 0;
116		int batch = min(REMAP_BATCH_SIZE, nr);
117		int batch_left = batch;
118		range = (unsigned long)batch << PAGE_SHIFT;
119
120		rmd.mmu_update = mmu_update;
121		err = apply_to_page_range(vma->vm_mm, addr, range,
122					  remap_area_mfn_pte_fn, &rmd);
123		if (err)
124			goto out;
125
126		/* We record the error for each page that gives an error, but
127		 * continue mapping until the whole set is done */
128		do {
129			int i;
130
131			err = HYPERVISOR_mmu_update(&mmu_update[index],
132						    batch_left, &done, domid);
133
134			/*
135			 * @err_ptr may be the same buffer as @gfn, so
136			 * only clear it after each chunk of @gfn is
137			 * used.
138			 */
139			if (err_ptr) {
140				for (i = index; i < index + done; i++)
141					err_ptr[i] = 0;
142			}
143			if (err < 0) {
144				if (!err_ptr)
145					goto out;
146				err_ptr[i] = err;
147				done++; /* Skip failed frame. */
148			} else
149				mapped += done;
150			batch_left -= done;
151			index += done;
152		} while (batch_left);
153
154		nr -= batch;
155		addr += range;
156		if (err_ptr)
157			err_ptr += batch;
158		cond_resched();
159	}
160out:
161
162	xen_flush_tlb_all();
163
164	return err < 0 ? err : mapped;
165}
166
167int xen_remap_domain_gfn_range(struct vm_area_struct *vma,
168			       unsigned long addr,
169			       xen_pfn_t gfn, int nr,
170			       pgprot_t prot, unsigned domid,
171			       struct page **pages)
172{
173	if (xen_feature(XENFEAT_auto_translated_physmap))
174		return -EOPNOTSUPP;
175
176	return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages);
177}
178EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range);
179
180int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
181			       unsigned long addr,
182			       xen_pfn_t *gfn, int nr,
183			       int *err_ptr, pgprot_t prot,
184			       unsigned domid, struct page **pages)
185{
186	if (xen_feature(XENFEAT_auto_translated_physmap))
187		return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr,
188						 prot, domid, pages);
189
190	/* We BUG_ON because it's a programmer error to pass a NULL err_ptr,
191	 * and the consequences later is quite hard to detect what the actual
192	 * cause of "wrong memory was mapped in".
193	 */
194	BUG_ON(err_ptr == NULL);
195	return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages);
196}
197EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array);
198
199/* Returns: 0 success */
200int xen_unmap_domain_gfn_range(struct vm_area_struct *vma,
201			       int nr, struct page **pages)
202{
203	if (xen_feature(XENFEAT_auto_translated_physmap))
204		return xen_xlate_unmap_gfn_range(vma, nr, pages);
205
206	if (!pages)
207		return 0;
208
209	return -EINVAL;
210}
211EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range);