Loading...
1/******************************************************************************
2 * grant_table.h
3 *
4 * Two sets of functionality:
5 * 1. Granting foreign access to our memory reservation.
6 * 2. Accessing others' memory reservations via grant references.
7 * (i.e., mechanisms for both sender and recipient of grant references)
8 *
9 * Copyright (c) 2004-2005, K A Fraser
10 * Copyright (c) 2005, Christopher Clark
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
36
37#ifndef __ASM_GNTTAB_H__
38#define __ASM_GNTTAB_H__
39
40#include <asm/page.h>
41
42#include <xen/interface/xen.h>
43#include <xen/interface/grant_table.h>
44
45#include <asm/xen/hypervisor.h>
46
47#include <xen/features.h>
48#include <xen/page.h>
49#include <linux/mm_types.h>
50#include <linux/page-flags.h>
51#include <linux/kernel.h>
52
53/*
54 * Technically there's no reliably invalid grant reference or grant handle,
55 * so pick the value that is the most unlikely one to be observed valid.
56 */
57#define INVALID_GRANT_REF ((grant_ref_t)-1)
58#define INVALID_GRANT_HANDLE ((grant_handle_t)-1)
59
60#define GNTTAB_RESERVED_XENSTORE 1
61
62/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
63#define NR_GRANT_FRAMES 4
64
65struct gnttab_free_callback {
66 struct gnttab_free_callback *next;
67 void (*fn)(void *);
68 void *arg;
69 u16 count;
70};
71
72struct gntab_unmap_queue_data;
73
74typedef void (*gnttab_unmap_refs_done)(int result, struct gntab_unmap_queue_data *data);
75
76struct gntab_unmap_queue_data
77{
78 struct delayed_work gnttab_work;
79 void *data;
80 gnttab_unmap_refs_done done;
81 struct gnttab_unmap_grant_ref *unmap_ops;
82 struct gnttab_unmap_grant_ref *kunmap_ops;
83 struct page **pages;
84 unsigned int count;
85 unsigned int age;
86};
87
88int gnttab_init(void);
89int gnttab_suspend(void);
90int gnttab_resume(void);
91
92int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
93 int readonly);
94
95/*
96 * End access through the given grant reference, iff the grant entry is no
97 * longer in use. Return 1 if the grant entry was freed, 0 if it is still in
98 * use.
99 */
100int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly);
101
102/*
103 * Eventually end access through the given grant reference, and once that
104 * access has been ended, free the given page too. Access will be ended
105 * immediately iff the grant entry is not in use, otherwise it will happen
106 * some time later. page may be 0, in which case no freeing will occur.
107 */
108void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
109 unsigned long page);
110
111int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
112
113unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
114unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
115
116int gnttab_query_foreign_access(grant_ref_t ref);
117
118/*
119 * operations on reserved batches of grant references
120 */
121int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
122
123void gnttab_free_grant_reference(grant_ref_t ref);
124
125void gnttab_free_grant_references(grant_ref_t head);
126
127int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
128
129int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
130
131void gnttab_release_grant_reference(grant_ref_t *private_head,
132 grant_ref_t release);
133
134void gnttab_request_free_callback(struct gnttab_free_callback *callback,
135 void (*fn)(void *), void *arg, u16 count);
136void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
137
138void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
139 unsigned long frame, int readonly);
140
141/* Give access to the first 4K of the page */
142static inline void gnttab_page_grant_foreign_access_ref_one(
143 grant_ref_t ref, domid_t domid,
144 struct page *page, int readonly)
145{
146 gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page),
147 readonly);
148}
149
150void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
151 unsigned long pfn);
152
153static inline void
154gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
155 uint32_t flags, grant_ref_t ref, domid_t domid)
156{
157 if (flags & GNTMAP_contains_pte)
158 map->host_addr = addr;
159 else if (xen_feature(XENFEAT_auto_translated_physmap))
160 map->host_addr = __pa(addr);
161 else
162 map->host_addr = addr;
163
164 map->flags = flags;
165 map->ref = ref;
166 map->dom = domid;
167 map->status = 1; /* arbitrary positive value */
168}
169
170static inline void
171gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
172 uint32_t flags, grant_handle_t handle)
173{
174 if (flags & GNTMAP_contains_pte)
175 unmap->host_addr = addr;
176 else if (xen_feature(XENFEAT_auto_translated_physmap))
177 unmap->host_addr = __pa(addr);
178 else
179 unmap->host_addr = addr;
180
181 unmap->handle = handle;
182 unmap->dev_bus_addr = 0;
183}
184
185int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
186int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
187 unsigned long max_nr_gframes,
188 void **__shared);
189int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
190 unsigned long max_nr_gframes,
191 grant_status_t **__shared);
192void arch_gnttab_unmap(void *shared, unsigned long nr_gframes);
193
194struct grant_frames {
195 xen_pfn_t *pfn;
196 unsigned int count;
197 void *vaddr;
198};
199extern struct grant_frames xen_auto_xlat_grant_frames;
200unsigned int gnttab_max_grant_frames(void);
201int gnttab_setup_auto_xlat_frames(phys_addr_t addr);
202void gnttab_free_auto_xlat_frames(void);
203
204#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
205
206int gnttab_alloc_pages(int nr_pages, struct page **pages);
207void gnttab_free_pages(int nr_pages, struct page **pages);
208
209struct gnttab_page_cache {
210 spinlock_t lock;
211#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
212 struct page *pages;
213#else
214 struct list_head pages;
215#endif
216 unsigned int num_pages;
217};
218
219void gnttab_page_cache_init(struct gnttab_page_cache *cache);
220int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page);
221void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
222 unsigned int num);
223void gnttab_page_cache_shrink(struct gnttab_page_cache *cache,
224 unsigned int num);
225
226#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
227struct gnttab_dma_alloc_args {
228 /* Device for which DMA memory will be/was allocated. */
229 struct device *dev;
230 /* If set then DMA buffer is coherent and write-combine otherwise. */
231 bool coherent;
232
233 int nr_pages;
234 struct page **pages;
235 xen_pfn_t *frames;
236 void *vaddr;
237 dma_addr_t dev_bus_addr;
238};
239
240int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args);
241int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args);
242#endif
243
244int gnttab_pages_set_private(int nr_pages, struct page **pages);
245void gnttab_pages_clear_private(int nr_pages, struct page **pages);
246
247int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
248 struct gnttab_map_grant_ref *kmap_ops,
249 struct page **pages, unsigned int count);
250int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
251 struct gnttab_unmap_grant_ref *kunmap_ops,
252 struct page **pages, unsigned int count);
253void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
254int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item);
255
256
257/* Perform a batch of grant map/copy operations. Retry every batch slot
258 * for which the hypervisor returns GNTST_eagain. This is typically due
259 * to paged out target frames.
260 *
261 * Will retry for 1, 2, ... 255 ms, i.e. 256 times during 32 seconds.
262 *
263 * Return value in each iand every status field of the batch guaranteed
264 * to not be GNTST_eagain.
265 */
266void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
267void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
268
269
270struct xen_page_foreign {
271 domid_t domid;
272 grant_ref_t gref;
273};
274
275static inline struct xen_page_foreign *xen_page_foreign(struct page *page)
276{
277 if (!PageForeign(page))
278 return NULL;
279#if BITS_PER_LONG < 64
280 return (struct xen_page_foreign *)page->private;
281#else
282 BUILD_BUG_ON(sizeof(struct xen_page_foreign) > BITS_PER_LONG);
283 return (struct xen_page_foreign *)&page->private;
284#endif
285}
286
287/* Split Linux page in chunk of the size of the grant and call fn
288 *
289 * Parameters of fn:
290 * gfn: guest frame number
291 * offset: offset in the grant
292 * len: length of the data in the grant.
293 * data: internal information
294 */
295typedef void (*xen_grant_fn_t)(unsigned long gfn, unsigned int offset,
296 unsigned int len, void *data);
297
298void gnttab_foreach_grant_in_range(struct page *page,
299 unsigned int offset,
300 unsigned int len,
301 xen_grant_fn_t fn,
302 void *data);
303
304/* Helper to get to call fn only on the first "grant chunk" */
305static inline void gnttab_for_one_grant(struct page *page, unsigned int offset,
306 unsigned len, xen_grant_fn_t fn,
307 void *data)
308{
309 /* The first request is limited to the size of one grant */
310 len = min_t(unsigned int, XEN_PAGE_SIZE - (offset & ~XEN_PAGE_MASK),
311 len);
312
313 gnttab_foreach_grant_in_range(page, offset, len, fn, data);
314}
315
316/* Get @nr_grefs grants from an array of page and call fn for each grant */
317void gnttab_foreach_grant(struct page **pages,
318 unsigned int nr_grefs,
319 xen_grant_fn_t fn,
320 void *data);
321
322/* Get the number of grant in a specified region
323 *
324 * start: Offset from the beginning of the first page
325 * len: total length of data (can cross multiple page)
326 */
327static inline unsigned int gnttab_count_grant(unsigned int start,
328 unsigned int len)
329{
330 return XEN_PFN_UP(xen_offset_in_page(start) + len);
331}
332
333#endif /* __ASM_GNTTAB_H__ */
1/******************************************************************************
2 * grant_table.h
3 *
4 * Two sets of functionality:
5 * 1. Granting foreign access to our memory reservation.
6 * 2. Accessing others' memory reservations via grant references.
7 * (i.e., mechanisms for both sender and recipient of grant references)
8 *
9 * Copyright (c) 2004-2005, K A Fraser
10 * Copyright (c) 2005, Christopher Clark
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License version 2
14 * as published by the Free Software Foundation; or, when distributed
15 * separately from the Linux kernel or incorporated into other
16 * software packages, subject to the following license:
17 *
18 * Permission is hereby granted, free of charge, to any person obtaining a copy
19 * of this source file (the "Software"), to deal in the Software without
20 * restriction, including without limitation the rights to use, copy, modify,
21 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22 * and to permit persons to whom the Software is furnished to do so, subject to
23 * the following conditions:
24 *
25 * The above copyright notice and this permission notice shall be included in
26 * all copies or substantial portions of the Software.
27 *
28 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 * IN THE SOFTWARE.
35 */
36
37#ifndef __ASM_GNTTAB_H__
38#define __ASM_GNTTAB_H__
39
40#include <asm/page.h>
41
42#include <xen/interface/xen.h>
43#include <xen/interface/grant_table.h>
44
45#include <asm/xen/hypervisor.h>
46
47#include <xen/features.h>
48#include <xen/page.h>
49#include <linux/mm_types.h>
50#include <linux/page-flags.h>
51#include <linux/kernel.h>
52
53/*
54 * Technically there's no reliably invalid grant reference or grant handle,
55 * so pick the value that is the most unlikely one to be observed valid.
56 */
57#define INVALID_GRANT_REF ((grant_ref_t)-1)
58#define INVALID_GRANT_HANDLE ((grant_handle_t)-1)
59
60/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
61#define NR_GRANT_FRAMES 4
62
63struct gnttab_free_callback {
64 struct gnttab_free_callback *next;
65 void (*fn)(void *);
66 void *arg;
67 u16 count;
68};
69
70struct gntab_unmap_queue_data;
71
72typedef void (*gnttab_unmap_refs_done)(int result, struct gntab_unmap_queue_data *data);
73
74struct gntab_unmap_queue_data
75{
76 struct delayed_work gnttab_work;
77 void *data;
78 gnttab_unmap_refs_done done;
79 struct gnttab_unmap_grant_ref *unmap_ops;
80 struct gnttab_unmap_grant_ref *kunmap_ops;
81 struct page **pages;
82 unsigned int count;
83 unsigned int age;
84};
85
86int gnttab_init(void);
87int gnttab_suspend(void);
88int gnttab_resume(void);
89
90int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
91 int readonly);
92
93/*
94 * End access through the given grant reference, iff the grant entry is no
95 * longer in use. Return 1 if the grant entry was freed, 0 if it is still in
96 * use.
97 */
98int gnttab_end_foreign_access_ref(grant_ref_t ref);
99
100/*
101 * Eventually end access through the given grant reference, and once that
102 * access has been ended, free the given page too. Access will be ended
103 * immediately iff the grant entry is not in use, otherwise it will happen
104 * some time later. page may be NULL, in which case no freeing will occur.
105 * Note that the granted page might still be accessed (read or write) by the
106 * other side after gnttab_end_foreign_access() returns, so even if page was
107 * specified as NULL it is not allowed to just reuse the page for other
108 * purposes immediately. gnttab_end_foreign_access() will take an additional
109 * reference to the granted page in this case, which is dropped only after
110 * the grant is no longer in use.
111 * This requires that multi page allocations for areas subject to
112 * gnttab_end_foreign_access() are done via alloc_pages_exact() (and freeing
113 * via free_pages_exact()) in order to avoid high order pages.
114 */
115void gnttab_end_foreign_access(grant_ref_t ref, struct page *page);
116
117/*
118 * End access through the given grant reference, iff the grant entry is
119 * no longer in use. In case of success ending foreign access, the
120 * grant reference is deallocated.
121 * Return 1 if the grant entry was freed, 0 if it is still in use.
122 */
123int gnttab_try_end_foreign_access(grant_ref_t ref);
124
125/*
126 * operations on reserved batches of grant references
127 */
128int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
129
130int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first);
131
132void gnttab_free_grant_reference(grant_ref_t ref);
133
134void gnttab_free_grant_references(grant_ref_t head);
135
136void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count);
137
138int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
139
140int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
141
142void gnttab_release_grant_reference(grant_ref_t *private_head,
143 grant_ref_t release);
144
145void gnttab_request_free_callback(struct gnttab_free_callback *callback,
146 void (*fn)(void *), void *arg, u16 count);
147void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
148
149void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
150 unsigned long frame, int readonly);
151
152/* Give access to the first 4K of the page */
153static inline void gnttab_page_grant_foreign_access_ref_one(
154 grant_ref_t ref, domid_t domid,
155 struct page *page, int readonly)
156{
157 gnttab_grant_foreign_access_ref(ref, domid, xen_page_to_gfn(page),
158 readonly);
159}
160
161static inline void
162gnttab_set_map_op(struct gnttab_map_grant_ref *map, phys_addr_t addr,
163 uint32_t flags, grant_ref_t ref, domid_t domid)
164{
165 if (flags & GNTMAP_contains_pte)
166 map->host_addr = addr;
167 else if (xen_feature(XENFEAT_auto_translated_physmap))
168 map->host_addr = __pa(addr);
169 else
170 map->host_addr = addr;
171
172 map->flags = flags;
173 map->ref = ref;
174 map->dom = domid;
175 map->status = 1; /* arbitrary positive value */
176}
177
178static inline void
179gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
180 uint32_t flags, grant_handle_t handle)
181{
182 if (flags & GNTMAP_contains_pte)
183 unmap->host_addr = addr;
184 else if (xen_feature(XENFEAT_auto_translated_physmap))
185 unmap->host_addr = __pa(addr);
186 else
187 unmap->host_addr = addr;
188
189 unmap->handle = handle;
190 unmap->dev_bus_addr = 0;
191}
192
193int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
194int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
195 unsigned long max_nr_gframes,
196 void **__shared);
197int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
198 unsigned long max_nr_gframes,
199 grant_status_t **__shared);
200void arch_gnttab_unmap(void *shared, unsigned long nr_gframes);
201
202struct grant_frames {
203 xen_pfn_t *pfn;
204 unsigned int count;
205 void *vaddr;
206};
207extern struct grant_frames xen_auto_xlat_grant_frames;
208unsigned int gnttab_max_grant_frames(void);
209int gnttab_setup_auto_xlat_frames(phys_addr_t addr);
210void gnttab_free_auto_xlat_frames(void);
211
212#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
213
214int gnttab_alloc_pages(int nr_pages, struct page **pages);
215void gnttab_free_pages(int nr_pages, struct page **pages);
216
217struct gnttab_page_cache {
218 spinlock_t lock;
219#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
220 struct page *pages;
221#else
222 struct list_head pages;
223#endif
224 unsigned int num_pages;
225};
226
227void gnttab_page_cache_init(struct gnttab_page_cache *cache);
228int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page);
229void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
230 unsigned int num);
231void gnttab_page_cache_shrink(struct gnttab_page_cache *cache,
232 unsigned int num);
233
234#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
235struct gnttab_dma_alloc_args {
236 /* Device for which DMA memory will be/was allocated. */
237 struct device *dev;
238 /* If set then DMA buffer is coherent and write-combine otherwise. */
239 bool coherent;
240
241 int nr_pages;
242 struct page **pages;
243 xen_pfn_t *frames;
244 void *vaddr;
245 dma_addr_t dev_bus_addr;
246};
247
248int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args);
249int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args);
250#endif
251
252int gnttab_pages_set_private(int nr_pages, struct page **pages);
253void gnttab_pages_clear_private(int nr_pages, struct page **pages);
254
255int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
256 struct gnttab_map_grant_ref *kmap_ops,
257 struct page **pages, unsigned int count);
258int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
259 struct gnttab_unmap_grant_ref *kunmap_ops,
260 struct page **pages, unsigned int count);
261void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
262int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item);
263
264
265/* Perform a batch of grant map/copy operations. Retry every batch slot
266 * for which the hypervisor returns GNTST_eagain. This is typically due
267 * to paged out target frames.
268 *
269 * Will retry for 1, 2, ... 255 ms, i.e. 256 times during 32 seconds.
270 *
271 * Return value in each iand every status field of the batch guaranteed
272 * to not be GNTST_eagain.
273 */
274void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count);
275void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count);
276
277
278struct xen_page_foreign {
279 domid_t domid;
280 grant_ref_t gref;
281};
282
283static inline struct xen_page_foreign *xen_page_foreign(struct page *page)
284{
285 if (!PageForeign(page))
286 return NULL;
287#if BITS_PER_LONG < 64
288 return (struct xen_page_foreign *)page->private;
289#else
290 BUILD_BUG_ON(sizeof(struct xen_page_foreign) > BITS_PER_LONG);
291 return (struct xen_page_foreign *)&page->private;
292#endif
293}
294
295/* Split Linux page in chunk of the size of the grant and call fn
296 *
297 * Parameters of fn:
298 * gfn: guest frame number
299 * offset: offset in the grant
300 * len: length of the data in the grant.
301 * data: internal information
302 */
303typedef void (*xen_grant_fn_t)(unsigned long gfn, unsigned int offset,
304 unsigned int len, void *data);
305
306void gnttab_foreach_grant_in_range(struct page *page,
307 unsigned int offset,
308 unsigned int len,
309 xen_grant_fn_t fn,
310 void *data);
311
312/* Helper to get to call fn only on the first "grant chunk" */
313static inline void gnttab_for_one_grant(struct page *page, unsigned int offset,
314 unsigned len, xen_grant_fn_t fn,
315 void *data)
316{
317 /* The first request is limited to the size of one grant */
318 len = min_t(unsigned int, XEN_PAGE_SIZE - (offset & ~XEN_PAGE_MASK),
319 len);
320
321 gnttab_foreach_grant_in_range(page, offset, len, fn, data);
322}
323
324/* Get @nr_grefs grants from an array of page and call fn for each grant */
325void gnttab_foreach_grant(struct page **pages,
326 unsigned int nr_grefs,
327 xen_grant_fn_t fn,
328 void *data);
329
330/* Get the number of grant in a specified region
331 *
332 * start: Offset from the beginning of the first page
333 * len: total length of data (can cross multiple page)
334 */
335static inline unsigned int gnttab_count_grant(unsigned int start,
336 unsigned int len)
337{
338 return XEN_PFN_UP(xen_offset_in_page(start) + len);
339}
340
341#endif /* __ASM_GNTTAB_H__ */