Loading...
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2
3/*
4 * Xen frontend/backend page directory based shared buffer
5 * helper module.
6 *
7 * Copyright (C) 2018 EPAM Systems Inc.
8 *
9 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
10 */
11
12#include <linux/module.h>
13#include <linux/errno.h>
14#include <linux/mm.h>
15
16#include <asm/xen/hypervisor.h>
17#include <xen/balloon.h>
18#include <xen/xen.h>
19#include <xen/xenbus.h>
20#include <xen/interface/io/ring.h>
21
22#include <xen/xen-front-pgdir-shbuf.h>
23
24#ifndef GRANT_INVALID_REF
25/*
26 * FIXME: usage of grant reference 0 as invalid grant reference:
27 * grant reference 0 is valid, but never exposed to a PV driver,
28 * because of the fact it is already in use/reserved by the PV console.
29 */
30#define GRANT_INVALID_REF 0
31#endif
32
33/**
34 * This structure represents the structure of a shared page
35 * that contains grant references to the pages of the shared
36 * buffer. This structure is common to many Xen para-virtualized
37 * protocols at include/xen/interface/io/
38 */
39struct xen_page_directory {
40 grant_ref_t gref_dir_next_page;
41 grant_ref_t gref[1]; /* Variable length */
42};
43
44/**
45 * Shared buffer ops which are differently implemented
46 * depending on the allocation mode, e.g. if the buffer
47 * is allocated by the corresponding backend or frontend.
48 * Some of the operations.
49 */
50struct xen_front_pgdir_shbuf_ops {
51 /*
52 * Calculate number of grefs required to handle this buffer,
53 * e.g. if grefs are required for page directory only or the buffer
54 * pages as well.
55 */
56 void (*calc_num_grefs)(struct xen_front_pgdir_shbuf *buf);
57
58 /* Fill page directory according to para-virtual display protocol. */
59 void (*fill_page_dir)(struct xen_front_pgdir_shbuf *buf);
60
61 /* Claim grant references for the pages of the buffer. */
62 int (*grant_refs_for_buffer)(struct xen_front_pgdir_shbuf *buf,
63 grant_ref_t *priv_gref_head, int gref_idx);
64
65 /* Map grant references of the buffer. */
66 int (*map)(struct xen_front_pgdir_shbuf *buf);
67
68 /* Unmap grant references of the buffer. */
69 int (*unmap)(struct xen_front_pgdir_shbuf *buf);
70};
71
72/**
73 * Get granted reference to the very first page of the
74 * page directory. Usually this is passed to the backend,
75 * so it can find/fill the grant references to the buffer's
76 * pages.
77 *
78 * \param buf shared buffer which page directory is of interest.
79 * \return granted reference to the very first page of the
80 * page directory.
81 */
82grant_ref_t
83xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf)
84{
85 if (!buf->grefs)
86 return GRANT_INVALID_REF;
87
88 return buf->grefs[0];
89}
90EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_get_dir_start);
91
92/**
93 * Map granted references of the shared buffer.
94 *
95 * Depending on the shared buffer mode of allocation
96 * (be_alloc flag) this can either do nothing (for buffers
97 * shared by the frontend itself) or map the provided granted
98 * references onto the backing storage (buf->pages).
99 *
100 * \param buf shared buffer which grants to be maped.
101 * \return zero on success or a negative number on failure.
102 */
103int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf)
104{
105 if (buf->ops && buf->ops->map)
106 return buf->ops->map(buf);
107
108 /* No need to map own grant references. */
109 return 0;
110}
111EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_map);
112
113/**
114 * Unmap granted references of the shared buffer.
115 *
116 * Depending on the shared buffer mode of allocation
117 * (be_alloc flag) this can either do nothing (for buffers
118 * shared by the frontend itself) or unmap the provided granted
119 * references.
120 *
121 * \param buf shared buffer which grants to be unmaped.
122 * \return zero on success or a negative number on failure.
123 */
124int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf)
125{
126 if (buf->ops && buf->ops->unmap)
127 return buf->ops->unmap(buf);
128
129 /* No need to unmap own grant references. */
130 return 0;
131}
132EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_unmap);
133
134/**
135 * Free all the resources of the shared buffer.
136 *
137 * \param buf shared buffer which resources to be freed.
138 */
139void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf)
140{
141 if (buf->grefs) {
142 int i;
143
144 for (i = 0; i < buf->num_grefs; i++)
145 if (buf->grefs[i] != GRANT_INVALID_REF)
146 gnttab_end_foreign_access(buf->grefs[i],
147 0, 0UL);
148 }
149 kfree(buf->grefs);
150 kfree(buf->directory);
151}
152EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_free);
153
154/*
155 * Number of grefs a page can hold with respect to the
156 * struct xen_page_directory header.
157 */
158#define XEN_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
159 offsetof(struct xen_page_directory, \
160 gref)) / sizeof(grant_ref_t))
161
162/**
163 * Get the number of pages the page directory consumes itself.
164 *
165 * \param buf shared buffer.
166 */
167static int get_num_pages_dir(struct xen_front_pgdir_shbuf *buf)
168{
169 return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE);
170}
171
172/**
173 * Calculate the number of grant references needed to share the buffer
174 * and its pages when backend allocates the buffer.
175 *
176 * \param buf shared buffer.
177 */
178static void backend_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
179{
180 /* Only for pages the page directory consumes itself. */
181 buf->num_grefs = get_num_pages_dir(buf);
182}
183
184/**
185 * Calculate the number of grant references needed to share the buffer
186 * and its pages when frontend allocates the buffer.
187 *
188 * \param buf shared buffer.
189 */
190static void guest_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
191{
192 /*
193 * Number of pages the page directory consumes itself
194 * plus grefs for the buffer pages.
195 */
196 buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
197}
198
199#define xen_page_to_vaddr(page) \
200 ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
201
202/**
203 * Unmap the buffer previously mapped with grant references
204 * provided by the backend.
205 *
206 * \param buf shared buffer.
207 * \return zero on success or a negative number on failure.
208 */
209static int backend_unmap(struct xen_front_pgdir_shbuf *buf)
210{
211 struct gnttab_unmap_grant_ref *unmap_ops;
212 int i, ret;
213
214 if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
215 return 0;
216
217 unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
218 GFP_KERNEL);
219 if (!unmap_ops)
220 return -ENOMEM;
221
222 for (i = 0; i < buf->num_pages; i++) {
223 phys_addr_t addr;
224
225 addr = xen_page_to_vaddr(buf->pages[i]);
226 gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
227 buf->backend_map_handles[i]);
228 }
229
230 ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
231 buf->num_pages);
232
233 for (i = 0; i < buf->num_pages; i++) {
234 if (unlikely(unmap_ops[i].status != GNTST_okay))
235 dev_err(&buf->xb_dev->dev,
236 "Failed to unmap page %d: %d\n",
237 i, unmap_ops[i].status);
238 }
239
240 if (ret)
241 dev_err(&buf->xb_dev->dev,
242 "Failed to unmap grant references, ret %d", ret);
243
244 kfree(unmap_ops);
245 kfree(buf->backend_map_handles);
246 buf->backend_map_handles = NULL;
247 return ret;
248}
249
250/**
251 * Map the buffer with grant references provided by the backend.
252 *
253 * \param buf shared buffer.
254 * \return zero on success or a negative number on failure.
255 */
256static int backend_map(struct xen_front_pgdir_shbuf *buf)
257{
258 struct gnttab_map_grant_ref *map_ops = NULL;
259 unsigned char *ptr;
260 int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
261
262 map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
263 if (!map_ops)
264 return -ENOMEM;
265
266 buf->backend_map_handles = kcalloc(buf->num_pages,
267 sizeof(*buf->backend_map_handles),
268 GFP_KERNEL);
269 if (!buf->backend_map_handles) {
270 kfree(map_ops);
271 return -ENOMEM;
272 }
273
274 /*
275 * Read page directory to get grefs from the backend: for external
276 * buffer we only allocate buf->grefs for the page directory,
277 * so buf->num_grefs has number of pages in the page directory itself.
278 */
279 ptr = buf->directory;
280 grefs_left = buf->num_pages;
281 cur_page = 0;
282 for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
283 struct xen_page_directory *page_dir =
284 (struct xen_page_directory *)ptr;
285 int to_copy = XEN_NUM_GREFS_PER_PAGE;
286
287 if (to_copy > grefs_left)
288 to_copy = grefs_left;
289
290 for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
291 phys_addr_t addr;
292
293 addr = xen_page_to_vaddr(buf->pages[cur_page]);
294 gnttab_set_map_op(&map_ops[cur_page], addr,
295 GNTMAP_host_map,
296 page_dir->gref[cur_gref],
297 buf->xb_dev->otherend_id);
298 cur_page++;
299 }
300
301 grefs_left -= to_copy;
302 ptr += PAGE_SIZE;
303 }
304 ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
305
306 /* Save handles even if error, so we can unmap. */
307 for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
308 buf->backend_map_handles[cur_page] = map_ops[cur_page].handle;
309 if (unlikely(map_ops[cur_page].status != GNTST_okay))
310 dev_err(&buf->xb_dev->dev,
311 "Failed to map page %d: %d\n",
312 cur_page, map_ops[cur_page].status);
313 }
314
315 if (ret) {
316 dev_err(&buf->xb_dev->dev,
317 "Failed to map grant references, ret %d", ret);
318 backend_unmap(buf);
319 }
320
321 kfree(map_ops);
322 return ret;
323}
324
325/**
326 * Fill page directory with grant references to the pages of the
327 * page directory itself.
328 *
329 * The grant references to the buffer pages are provided by the
330 * backend in this case.
331 *
332 * \param buf shared buffer.
333 */
334static void backend_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
335{
336 struct xen_page_directory *page_dir;
337 unsigned char *ptr;
338 int i, num_pages_dir;
339
340 ptr = buf->directory;
341 num_pages_dir = get_num_pages_dir(buf);
342
343 /* Fill only grefs for the page directory itself. */
344 for (i = 0; i < num_pages_dir - 1; i++) {
345 page_dir = (struct xen_page_directory *)ptr;
346
347 page_dir->gref_dir_next_page = buf->grefs[i + 1];
348 ptr += PAGE_SIZE;
349 }
350 /* Last page must say there is no more pages. */
351 page_dir = (struct xen_page_directory *)ptr;
352 page_dir->gref_dir_next_page = GRANT_INVALID_REF;
353}
354
355/**
356 * Fill page directory with grant references to the pages of the
357 * page directory and the buffer we share with the backend.
358 *
359 * \param buf shared buffer.
360 */
361static void guest_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
362{
363 unsigned char *ptr;
364 int cur_gref, grefs_left, to_copy, i, num_pages_dir;
365
366 ptr = buf->directory;
367 num_pages_dir = get_num_pages_dir(buf);
368
369 /*
370 * While copying, skip grefs at start, they are for pages
371 * granted for the page directory itself.
372 */
373 cur_gref = num_pages_dir;
374 grefs_left = buf->num_pages;
375 for (i = 0; i < num_pages_dir; i++) {
376 struct xen_page_directory *page_dir =
377 (struct xen_page_directory *)ptr;
378
379 if (grefs_left <= XEN_NUM_GREFS_PER_PAGE) {
380 to_copy = grefs_left;
381 page_dir->gref_dir_next_page = GRANT_INVALID_REF;
382 } else {
383 to_copy = XEN_NUM_GREFS_PER_PAGE;
384 page_dir->gref_dir_next_page = buf->grefs[i + 1];
385 }
386 memcpy(&page_dir->gref, &buf->grefs[cur_gref],
387 to_copy * sizeof(grant_ref_t));
388 ptr += PAGE_SIZE;
389 grefs_left -= to_copy;
390 cur_gref += to_copy;
391 }
392}
393
394/**
395 * Grant references to the frontend's buffer pages.
396 *
397 * These will be shared with the backend, so it can
398 * access the buffer's data.
399 *
400 * \param buf shared buffer.
401 * \return zero on success or a negative number on failure.
402 */
403static int guest_grant_refs_for_buffer(struct xen_front_pgdir_shbuf *buf,
404 grant_ref_t *priv_gref_head,
405 int gref_idx)
406{
407 int i, cur_ref, otherend_id;
408
409 otherend_id = buf->xb_dev->otherend_id;
410 for (i = 0; i < buf->num_pages; i++) {
411 cur_ref = gnttab_claim_grant_reference(priv_gref_head);
412 if (cur_ref < 0)
413 return cur_ref;
414
415 gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
416 xen_page_to_gfn(buf->pages[i]),
417 0);
418 buf->grefs[gref_idx++] = cur_ref;
419 }
420 return 0;
421}
422
423/**
424 * Grant all the references needed to share the buffer.
425 *
426 * Grant references to the page directory pages and, if
427 * needed, also to the pages of the shared buffer data.
428 *
429 * \param buf shared buffer.
430 * \return zero on success or a negative number on failure.
431 */
432static int grant_references(struct xen_front_pgdir_shbuf *buf)
433{
434 grant_ref_t priv_gref_head;
435 int ret, i, j, cur_ref;
436 int otherend_id, num_pages_dir;
437
438 ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
439 if (ret < 0) {
440 dev_err(&buf->xb_dev->dev,
441 "Cannot allocate grant references\n");
442 return ret;
443 }
444
445 otherend_id = buf->xb_dev->otherend_id;
446 j = 0;
447 num_pages_dir = get_num_pages_dir(buf);
448 for (i = 0; i < num_pages_dir; i++) {
449 unsigned long frame;
450
451 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
452 if (cur_ref < 0)
453 return cur_ref;
454
455 frame = xen_page_to_gfn(virt_to_page(buf->directory +
456 PAGE_SIZE * i));
457 gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
458 buf->grefs[j++] = cur_ref;
459 }
460
461 if (buf->ops->grant_refs_for_buffer) {
462 ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
463 if (ret)
464 return ret;
465 }
466
467 gnttab_free_grant_references(priv_gref_head);
468 return 0;
469}
470
471/**
472 * Allocate all required structures to mange shared buffer.
473 *
474 * \param buf shared buffer.
475 * \return zero on success or a negative number on failure.
476 */
477static int alloc_storage(struct xen_front_pgdir_shbuf *buf)
478{
479 buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
480 if (!buf->grefs)
481 return -ENOMEM;
482
483 buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
484 if (!buf->directory)
485 return -ENOMEM;
486
487 return 0;
488}
489
490/*
491 * For backend allocated buffers we don't need grant_refs_for_buffer
492 * as those grant references are allocated at backend side.
493 */
494static const struct xen_front_pgdir_shbuf_ops backend_ops = {
495 .calc_num_grefs = backend_calc_num_grefs,
496 .fill_page_dir = backend_fill_page_dir,
497 .map = backend_map,
498 .unmap = backend_unmap
499};
500
501/*
502 * For locally granted references we do not need to map/unmap
503 * the references.
504 */
505static const struct xen_front_pgdir_shbuf_ops local_ops = {
506 .calc_num_grefs = guest_calc_num_grefs,
507 .fill_page_dir = guest_fill_page_dir,
508 .grant_refs_for_buffer = guest_grant_refs_for_buffer,
509};
510
511/**
512 * Allocate a new instance of a shared buffer.
513 *
514 * \param cfg configuration to be used while allocating a new shared buffer.
515 * \return zero on success or a negative number on failure.
516 */
517int xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg *cfg)
518{
519 struct xen_front_pgdir_shbuf *buf = cfg->pgdir;
520 int ret;
521
522 if (cfg->be_alloc)
523 buf->ops = &backend_ops;
524 else
525 buf->ops = &local_ops;
526 buf->xb_dev = cfg->xb_dev;
527 buf->num_pages = cfg->num_pages;
528 buf->pages = cfg->pages;
529
530 buf->ops->calc_num_grefs(buf);
531
532 ret = alloc_storage(buf);
533 if (ret)
534 goto fail;
535
536 ret = grant_references(buf);
537 if (ret)
538 goto fail;
539
540 buf->ops->fill_page_dir(buf);
541
542 return 0;
543
544fail:
545 xen_front_pgdir_shbuf_free(buf);
546 return ret;
547}
548EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_alloc);
549
550MODULE_DESCRIPTION("Xen frontend/backend page directory based "
551 "shared buffer handling");
552MODULE_AUTHOR("Oleksandr Andrushchenko");
553MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0 OR MIT
2
3/*
4 * Xen frontend/backend page directory based shared buffer
5 * helper module.
6 *
7 * Copyright (C) 2018 EPAM Systems Inc.
8 *
9 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
10 */
11
12#include <linux/module.h>
13#include <linux/errno.h>
14#include <linux/mm.h>
15
16#include <asm/xen/hypervisor.h>
17#include <xen/balloon.h>
18#include <xen/xen.h>
19#include <xen/xenbus.h>
20#include <xen/interface/io/ring.h>
21
22#include <xen/xen-front-pgdir-shbuf.h>
23
24/**
25 * This structure represents the structure of a shared page
26 * that contains grant references to the pages of the shared
27 * buffer. This structure is common to many Xen para-virtualized
28 * protocols at include/xen/interface/io/
29 */
30struct xen_page_directory {
31 grant_ref_t gref_dir_next_page;
32#define XEN_GREF_LIST_END 0
33 grant_ref_t gref[1]; /* Variable length */
34};
35
36/**
37 * Shared buffer ops which are differently implemented
38 * depending on the allocation mode, e.g. if the buffer
39 * is allocated by the corresponding backend or frontend.
40 * Some of the operations.
41 */
42struct xen_front_pgdir_shbuf_ops {
43 /*
44 * Calculate number of grefs required to handle this buffer,
45 * e.g. if grefs are required for page directory only or the buffer
46 * pages as well.
47 */
48 void (*calc_num_grefs)(struct xen_front_pgdir_shbuf *buf);
49
50 /* Fill page directory according to para-virtual display protocol. */
51 void (*fill_page_dir)(struct xen_front_pgdir_shbuf *buf);
52
53 /* Claim grant references for the pages of the buffer. */
54 int (*grant_refs_for_buffer)(struct xen_front_pgdir_shbuf *buf,
55 grant_ref_t *priv_gref_head, int gref_idx);
56
57 /* Map grant references of the buffer. */
58 int (*map)(struct xen_front_pgdir_shbuf *buf);
59
60 /* Unmap grant references of the buffer. */
61 int (*unmap)(struct xen_front_pgdir_shbuf *buf);
62};
63
64/**
65 * Get granted reference to the very first page of the
66 * page directory. Usually this is passed to the backend,
67 * so it can find/fill the grant references to the buffer's
68 * pages.
69 *
70 * \param buf shared buffer which page directory is of interest.
71 * \return granted reference to the very first page of the
72 * page directory.
73 */
74grant_ref_t
75xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf)
76{
77 if (!buf->grefs)
78 return INVALID_GRANT_REF;
79
80 return buf->grefs[0];
81}
82EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_get_dir_start);
83
84/**
85 * Map granted references of the shared buffer.
86 *
87 * Depending on the shared buffer mode of allocation
88 * (be_alloc flag) this can either do nothing (for buffers
89 * shared by the frontend itself) or map the provided granted
90 * references onto the backing storage (buf->pages).
91 *
92 * \param buf shared buffer which grants to be mapped.
93 * \return zero on success or a negative number on failure.
94 */
95int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf)
96{
97 if (buf->ops && buf->ops->map)
98 return buf->ops->map(buf);
99
100 /* No need to map own grant references. */
101 return 0;
102}
103EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_map);
104
105/**
106 * Unmap granted references of the shared buffer.
107 *
108 * Depending on the shared buffer mode of allocation
109 * (be_alloc flag) this can either do nothing (for buffers
110 * shared by the frontend itself) or unmap the provided granted
111 * references.
112 *
113 * \param buf shared buffer which grants to be unmapped.
114 * \return zero on success or a negative number on failure.
115 */
116int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf)
117{
118 if (buf->ops && buf->ops->unmap)
119 return buf->ops->unmap(buf);
120
121 /* No need to unmap own grant references. */
122 return 0;
123}
124EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_unmap);
125
126/**
127 * Free all the resources of the shared buffer.
128 *
129 * \param buf shared buffer which resources to be freed.
130 */
131void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf)
132{
133 if (buf->grefs) {
134 int i;
135
136 for (i = 0; i < buf->num_grefs; i++)
137 if (buf->grefs[i] != INVALID_GRANT_REF)
138 gnttab_end_foreign_access(buf->grefs[i], NULL);
139 }
140 kfree(buf->grefs);
141 kfree(buf->directory);
142}
143EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_free);
144
145/*
146 * Number of grefs a page can hold with respect to the
147 * struct xen_page_directory header.
148 */
149#define XEN_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
150 offsetof(struct xen_page_directory, \
151 gref)) / sizeof(grant_ref_t))
152
153/**
154 * Get the number of pages the page directory consumes itself.
155 *
156 * \param buf shared buffer.
157 */
158static int get_num_pages_dir(struct xen_front_pgdir_shbuf *buf)
159{
160 return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE);
161}
162
163/**
164 * Calculate the number of grant references needed to share the buffer
165 * and its pages when backend allocates the buffer.
166 *
167 * \param buf shared buffer.
168 */
169static void backend_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
170{
171 /* Only for pages the page directory consumes itself. */
172 buf->num_grefs = get_num_pages_dir(buf);
173}
174
175/**
176 * Calculate the number of grant references needed to share the buffer
177 * and its pages when frontend allocates the buffer.
178 *
179 * \param buf shared buffer.
180 */
181static void guest_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
182{
183 /*
184 * Number of pages the page directory consumes itself
185 * plus grefs for the buffer pages.
186 */
187 buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
188}
189
190#define xen_page_to_vaddr(page) \
191 ((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
192
193/**
194 * Unmap the buffer previously mapped with grant references
195 * provided by the backend.
196 *
197 * \param buf shared buffer.
198 * \return zero on success or a negative number on failure.
199 */
200static int backend_unmap(struct xen_front_pgdir_shbuf *buf)
201{
202 struct gnttab_unmap_grant_ref *unmap_ops;
203 int i, ret;
204
205 if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
206 return 0;
207
208 unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
209 GFP_KERNEL);
210 if (!unmap_ops)
211 return -ENOMEM;
212
213 for (i = 0; i < buf->num_pages; i++) {
214 phys_addr_t addr;
215
216 addr = xen_page_to_vaddr(buf->pages[i]);
217 gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
218 buf->backend_map_handles[i]);
219 }
220
221 ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
222 buf->num_pages);
223
224 for (i = 0; i < buf->num_pages; i++) {
225 if (unlikely(unmap_ops[i].status != GNTST_okay))
226 dev_err(&buf->xb_dev->dev,
227 "Failed to unmap page %d: %d\n",
228 i, unmap_ops[i].status);
229 }
230
231 if (ret)
232 dev_err(&buf->xb_dev->dev,
233 "Failed to unmap grant references, ret %d", ret);
234
235 kfree(unmap_ops);
236 kfree(buf->backend_map_handles);
237 buf->backend_map_handles = NULL;
238 return ret;
239}
240
241/**
242 * Map the buffer with grant references provided by the backend.
243 *
244 * \param buf shared buffer.
245 * \return zero on success or a negative number on failure.
246 */
247static int backend_map(struct xen_front_pgdir_shbuf *buf)
248{
249 struct gnttab_map_grant_ref *map_ops = NULL;
250 unsigned char *ptr;
251 int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
252
253 map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
254 if (!map_ops)
255 return -ENOMEM;
256
257 buf->backend_map_handles = kcalloc(buf->num_pages,
258 sizeof(*buf->backend_map_handles),
259 GFP_KERNEL);
260 if (!buf->backend_map_handles) {
261 kfree(map_ops);
262 return -ENOMEM;
263 }
264
265 /*
266 * Read page directory to get grefs from the backend: for external
267 * buffer we only allocate buf->grefs for the page directory,
268 * so buf->num_grefs has number of pages in the page directory itself.
269 */
270 ptr = buf->directory;
271 grefs_left = buf->num_pages;
272 cur_page = 0;
273 for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
274 struct xen_page_directory *page_dir =
275 (struct xen_page_directory *)ptr;
276 int to_copy = XEN_NUM_GREFS_PER_PAGE;
277
278 if (to_copy > grefs_left)
279 to_copy = grefs_left;
280
281 for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
282 phys_addr_t addr;
283
284 addr = xen_page_to_vaddr(buf->pages[cur_page]);
285 gnttab_set_map_op(&map_ops[cur_page], addr,
286 GNTMAP_host_map,
287 page_dir->gref[cur_gref],
288 buf->xb_dev->otherend_id);
289 cur_page++;
290 }
291
292 grefs_left -= to_copy;
293 ptr += PAGE_SIZE;
294 }
295 ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
296
297 /* Save handles even if error, so we can unmap. */
298 for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
299 if (likely(map_ops[cur_page].status == GNTST_okay)) {
300 buf->backend_map_handles[cur_page] =
301 map_ops[cur_page].handle;
302 } else {
303 buf->backend_map_handles[cur_page] =
304 INVALID_GRANT_HANDLE;
305 if (!ret)
306 ret = -ENXIO;
307 dev_err(&buf->xb_dev->dev,
308 "Failed to map page %d: %d\n",
309 cur_page, map_ops[cur_page].status);
310 }
311 }
312
313 if (ret) {
314 dev_err(&buf->xb_dev->dev,
315 "Failed to map grant references, ret %d", ret);
316 backend_unmap(buf);
317 }
318
319 kfree(map_ops);
320 return ret;
321}
322
323/**
324 * Fill page directory with grant references to the pages of the
325 * page directory itself.
326 *
327 * The grant references to the buffer pages are provided by the
328 * backend in this case.
329 *
330 * \param buf shared buffer.
331 */
332static void backend_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
333{
334 struct xen_page_directory *page_dir;
335 unsigned char *ptr;
336 int i, num_pages_dir;
337
338 ptr = buf->directory;
339 num_pages_dir = get_num_pages_dir(buf);
340
341 /* Fill only grefs for the page directory itself. */
342 for (i = 0; i < num_pages_dir - 1; i++) {
343 page_dir = (struct xen_page_directory *)ptr;
344
345 page_dir->gref_dir_next_page = buf->grefs[i + 1];
346 ptr += PAGE_SIZE;
347 }
348 /* Last page must say there is no more pages. */
349 page_dir = (struct xen_page_directory *)ptr;
350 page_dir->gref_dir_next_page = XEN_GREF_LIST_END;
351}
352
353/**
354 * Fill page directory with grant references to the pages of the
355 * page directory and the buffer we share with the backend.
356 *
357 * \param buf shared buffer.
358 */
359static void guest_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
360{
361 unsigned char *ptr;
362 int cur_gref, grefs_left, to_copy, i, num_pages_dir;
363
364 ptr = buf->directory;
365 num_pages_dir = get_num_pages_dir(buf);
366
367 /*
368 * While copying, skip grefs at start, they are for pages
369 * granted for the page directory itself.
370 */
371 cur_gref = num_pages_dir;
372 grefs_left = buf->num_pages;
373 for (i = 0; i < num_pages_dir; i++) {
374 struct xen_page_directory *page_dir =
375 (struct xen_page_directory *)ptr;
376
377 if (grefs_left <= XEN_NUM_GREFS_PER_PAGE) {
378 to_copy = grefs_left;
379 page_dir->gref_dir_next_page = XEN_GREF_LIST_END;
380 } else {
381 to_copy = XEN_NUM_GREFS_PER_PAGE;
382 page_dir->gref_dir_next_page = buf->grefs[i + 1];
383 }
384 memcpy(&page_dir->gref, &buf->grefs[cur_gref],
385 to_copy * sizeof(grant_ref_t));
386 ptr += PAGE_SIZE;
387 grefs_left -= to_copy;
388 cur_gref += to_copy;
389 }
390}
391
392/**
393 * Grant references to the frontend's buffer pages.
394 *
395 * These will be shared with the backend, so it can
396 * access the buffer's data.
397 *
398 * \param buf shared buffer.
399 * \return zero on success or a negative number on failure.
400 */
401static int guest_grant_refs_for_buffer(struct xen_front_pgdir_shbuf *buf,
402 grant_ref_t *priv_gref_head,
403 int gref_idx)
404{
405 int i, cur_ref, otherend_id;
406
407 otherend_id = buf->xb_dev->otherend_id;
408 for (i = 0; i < buf->num_pages; i++) {
409 cur_ref = gnttab_claim_grant_reference(priv_gref_head);
410 if (cur_ref < 0)
411 return cur_ref;
412
413 gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
414 xen_page_to_gfn(buf->pages[i]),
415 0);
416 buf->grefs[gref_idx++] = cur_ref;
417 }
418 return 0;
419}
420
421/**
422 * Grant all the references needed to share the buffer.
423 *
424 * Grant references to the page directory pages and, if
425 * needed, also to the pages of the shared buffer data.
426 *
427 * \param buf shared buffer.
428 * \return zero on success or a negative number on failure.
429 */
430static int grant_references(struct xen_front_pgdir_shbuf *buf)
431{
432 grant_ref_t priv_gref_head;
433 int ret, i, j, cur_ref;
434 int otherend_id, num_pages_dir;
435
436 ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
437 if (ret < 0) {
438 dev_err(&buf->xb_dev->dev,
439 "Cannot allocate grant references\n");
440 return ret;
441 }
442
443 otherend_id = buf->xb_dev->otherend_id;
444 j = 0;
445 num_pages_dir = get_num_pages_dir(buf);
446 for (i = 0; i < num_pages_dir; i++) {
447 unsigned long frame;
448
449 cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
450 if (cur_ref < 0)
451 return cur_ref;
452
453 frame = xen_page_to_gfn(virt_to_page(buf->directory +
454 PAGE_SIZE * i));
455 gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
456 buf->grefs[j++] = cur_ref;
457 }
458
459 if (buf->ops->grant_refs_for_buffer) {
460 ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
461 if (ret)
462 return ret;
463 }
464
465 gnttab_free_grant_references(priv_gref_head);
466 return 0;
467}
468
469/**
470 * Allocate all required structures to mange shared buffer.
471 *
472 * \param buf shared buffer.
473 * \return zero on success or a negative number on failure.
474 */
475static int alloc_storage(struct xen_front_pgdir_shbuf *buf)
476{
477 buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
478 if (!buf->grefs)
479 return -ENOMEM;
480
481 buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
482 if (!buf->directory)
483 return -ENOMEM;
484
485 return 0;
486}
487
488/*
489 * For backend allocated buffers we don't need grant_refs_for_buffer
490 * as those grant references are allocated at backend side.
491 */
492static const struct xen_front_pgdir_shbuf_ops backend_ops = {
493 .calc_num_grefs = backend_calc_num_grefs,
494 .fill_page_dir = backend_fill_page_dir,
495 .map = backend_map,
496 .unmap = backend_unmap
497};
498
499/*
500 * For locally granted references we do not need to map/unmap
501 * the references.
502 */
503static const struct xen_front_pgdir_shbuf_ops local_ops = {
504 .calc_num_grefs = guest_calc_num_grefs,
505 .fill_page_dir = guest_fill_page_dir,
506 .grant_refs_for_buffer = guest_grant_refs_for_buffer,
507};
508
509/**
510 * Allocate a new instance of a shared buffer.
511 *
512 * \param cfg configuration to be used while allocating a new shared buffer.
513 * \return zero on success or a negative number on failure.
514 */
515int xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg *cfg)
516{
517 struct xen_front_pgdir_shbuf *buf = cfg->pgdir;
518 int ret;
519
520 if (cfg->be_alloc)
521 buf->ops = &backend_ops;
522 else
523 buf->ops = &local_ops;
524 buf->xb_dev = cfg->xb_dev;
525 buf->num_pages = cfg->num_pages;
526 buf->pages = cfg->pages;
527
528 buf->ops->calc_num_grefs(buf);
529
530 ret = alloc_storage(buf);
531 if (ret)
532 goto fail;
533
534 ret = grant_references(buf);
535 if (ret)
536 goto fail;
537
538 buf->ops->fill_page_dir(buf);
539
540 return 0;
541
542fail:
543 xen_front_pgdir_shbuf_free(buf);
544 return ret;
545}
546EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_alloc);
547
548MODULE_DESCRIPTION("Xen frontend/backend page directory based "
549 "shared buffer handling");
550MODULE_AUTHOR("Oleksandr Andrushchenko");
551MODULE_LICENSE("GPL");