Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2
  3/*
  4 * Xen frontend/backend page directory based shared buffer
  5 * helper module.
  6 *
  7 * Copyright (C) 2018 EPAM Systems Inc.
  8 *
  9 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
 10 */
 11
 12#include <linux/module.h>
 13#include <linux/errno.h>
 14#include <linux/mm.h>
 15
 16#include <asm/xen/hypervisor.h>
 17#include <xen/balloon.h>
 18#include <xen/xen.h>
 19#include <xen/xenbus.h>
 20#include <xen/interface/io/ring.h>
 21
 22#include <xen/xen-front-pgdir-shbuf.h>
 23
 24#ifndef GRANT_INVALID_REF
 25/*
 26 * FIXME: usage of grant reference 0 as invalid grant reference:
 27 * grant reference 0 is valid, but never exposed to a PV driver,
 28 * because of the fact it is already in use/reserved by the PV console.
 29 */
 30#define GRANT_INVALID_REF	0
 31#endif
 32
 33/**
 34 * This structure represents the structure of a shared page
 35 * that contains grant references to the pages of the shared
 36 * buffer. This structure is common to many Xen para-virtualized
 37 * protocols at include/xen/interface/io/
 38 */
 39struct xen_page_directory {
 40	grant_ref_t gref_dir_next_page;
 41	grant_ref_t gref[1]; /* Variable length */
 42};
 43
 44/**
 45 * Shared buffer ops which are differently implemented
 46 * depending on the allocation mode, e.g. if the buffer
 47 * is allocated by the corresponding backend or frontend.
 48 * Some of the operations.
 49 */
 50struct xen_front_pgdir_shbuf_ops {
 51	/*
 52	 * Calculate number of grefs required to handle this buffer,
 53	 * e.g. if grefs are required for page directory only or the buffer
 54	 * pages as well.
 55	 */
 56	void (*calc_num_grefs)(struct xen_front_pgdir_shbuf *buf);
 57
 58	/* Fill page directory according to para-virtual display protocol. */
 59	void (*fill_page_dir)(struct xen_front_pgdir_shbuf *buf);
 60
 61	/* Claim grant references for the pages of the buffer. */
 62	int (*grant_refs_for_buffer)(struct xen_front_pgdir_shbuf *buf,
 63				     grant_ref_t *priv_gref_head, int gref_idx);
 64
 65	/* Map grant references of the buffer. */
 66	int (*map)(struct xen_front_pgdir_shbuf *buf);
 67
 68	/* Unmap grant references of the buffer. */
 69	int (*unmap)(struct xen_front_pgdir_shbuf *buf);
 70};
 71
 72/**
 73 * Get granted reference to the very first page of the
 74 * page directory. Usually this is passed to the backend,
 75 * so it can find/fill the grant references to the buffer's
 76 * pages.
 77 *
 78 * \param buf shared buffer which page directory is of interest.
 79 * \return granted reference to the very first page of the
 80 * page directory.
 81 */
 82grant_ref_t
 83xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf)
 84{
 85	if (!buf->grefs)
 86		return GRANT_INVALID_REF;
 87
 88	return buf->grefs[0];
 89}
 90EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_get_dir_start);
 91
 92/**
 93 * Map granted references of the shared buffer.
 94 *
 95 * Depending on the shared buffer mode of allocation
 96 * (be_alloc flag) this can either do nothing (for buffers
 97 * shared by the frontend itself) or map the provided granted
 98 * references onto the backing storage (buf->pages).
 99 *
100 * \param buf shared buffer which grants to be maped.
101 * \return zero on success or a negative number on failure.
102 */
103int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf)
104{
105	if (buf->ops && buf->ops->map)
106		return buf->ops->map(buf);
107
108	/* No need to map own grant references. */
109	return 0;
110}
111EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_map);
112
113/**
114 * Unmap granted references of the shared buffer.
115 *
116 * Depending on the shared buffer mode of allocation
117 * (be_alloc flag) this can either do nothing (for buffers
118 * shared by the frontend itself) or unmap the provided granted
119 * references.
120 *
121 * \param buf shared buffer which grants to be unmaped.
122 * \return zero on success or a negative number on failure.
123 */
124int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf)
125{
126	if (buf->ops && buf->ops->unmap)
127		return buf->ops->unmap(buf);
128
129	/* No need to unmap own grant references. */
130	return 0;
131}
132EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_unmap);
133
134/**
135 * Free all the resources of the shared buffer.
136 *
137 * \param buf shared buffer which resources to be freed.
138 */
139void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf)
140{
141	if (buf->grefs) {
142		int i;
143
144		for (i = 0; i < buf->num_grefs; i++)
145			if (buf->grefs[i] != GRANT_INVALID_REF)
146				gnttab_end_foreign_access(buf->grefs[i],
147							  0, 0UL);
148	}
149	kfree(buf->grefs);
150	kfree(buf->directory);
151}
152EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_free);
153
154/*
155 * Number of grefs a page can hold with respect to the
156 * struct xen_page_directory header.
157 */
158#define XEN_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
159				 offsetof(struct xen_page_directory, \
160					  gref)) / sizeof(grant_ref_t))
161
162/**
163 * Get the number of pages the page directory consumes itself.
164 *
165 * \param buf shared buffer.
166 */
167static int get_num_pages_dir(struct xen_front_pgdir_shbuf *buf)
168{
169	return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE);
170}
171
172/**
173 * Calculate the number of grant references needed to share the buffer
174 * and its pages when backend allocates the buffer.
175 *
176 * \param buf shared buffer.
177 */
178static void backend_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
179{
180	/* Only for pages the page directory consumes itself. */
181	buf->num_grefs = get_num_pages_dir(buf);
182}
183
184/**
185 * Calculate the number of grant references needed to share the buffer
186 * and its pages when frontend allocates the buffer.
187 *
188 * \param buf shared buffer.
189 */
190static void guest_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
191{
192	/*
193	 * Number of pages the page directory consumes itself
194	 * plus grefs for the buffer pages.
195	 */
196	buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
197}
198
199#define xen_page_to_vaddr(page) \
200	((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
201
202/**
203 * Unmap the buffer previously mapped with grant references
204 * provided by the backend.
205 *
206 * \param buf shared buffer.
207 * \return zero on success or a negative number on failure.
208 */
209static int backend_unmap(struct xen_front_pgdir_shbuf *buf)
210{
211	struct gnttab_unmap_grant_ref *unmap_ops;
212	int i, ret;
213
214	if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
215		return 0;
216
217	unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
218			    GFP_KERNEL);
219	if (!unmap_ops)
220		return -ENOMEM;
221
222	for (i = 0; i < buf->num_pages; i++) {
223		phys_addr_t addr;
224
225		addr = xen_page_to_vaddr(buf->pages[i]);
226		gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
227				    buf->backend_map_handles[i]);
228	}
229
230	ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
231				buf->num_pages);
232
233	for (i = 0; i < buf->num_pages; i++) {
234		if (unlikely(unmap_ops[i].status != GNTST_okay))
235			dev_err(&buf->xb_dev->dev,
236				"Failed to unmap page %d: %d\n",
237				i, unmap_ops[i].status);
238	}
239
240	if (ret)
241		dev_err(&buf->xb_dev->dev,
242			"Failed to unmap grant references, ret %d", ret);
243
244	kfree(unmap_ops);
245	kfree(buf->backend_map_handles);
246	buf->backend_map_handles = NULL;
247	return ret;
248}
249
250/**
251 * Map the buffer with grant references provided by the backend.
252 *
253 * \param buf shared buffer.
254 * \return zero on success or a negative number on failure.
255 */
256static int backend_map(struct xen_front_pgdir_shbuf *buf)
257{
258	struct gnttab_map_grant_ref *map_ops = NULL;
259	unsigned char *ptr;
260	int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
261
262	map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
263	if (!map_ops)
264		return -ENOMEM;
265
266	buf->backend_map_handles = kcalloc(buf->num_pages,
267					   sizeof(*buf->backend_map_handles),
268					   GFP_KERNEL);
269	if (!buf->backend_map_handles) {
270		kfree(map_ops);
271		return -ENOMEM;
272	}
273
274	/*
275	 * Read page directory to get grefs from the backend: for external
276	 * buffer we only allocate buf->grefs for the page directory,
277	 * so buf->num_grefs has number of pages in the page directory itself.
278	 */
279	ptr = buf->directory;
280	grefs_left = buf->num_pages;
281	cur_page = 0;
282	for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
283		struct xen_page_directory *page_dir =
284			(struct xen_page_directory *)ptr;
285		int to_copy = XEN_NUM_GREFS_PER_PAGE;
286
287		if (to_copy > grefs_left)
288			to_copy = grefs_left;
289
290		for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
291			phys_addr_t addr;
292
293			addr = xen_page_to_vaddr(buf->pages[cur_page]);
294			gnttab_set_map_op(&map_ops[cur_page], addr,
295					  GNTMAP_host_map,
296					  page_dir->gref[cur_gref],
297					  buf->xb_dev->otherend_id);
298			cur_page++;
299		}
300
301		grefs_left -= to_copy;
302		ptr += PAGE_SIZE;
303	}
304	ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
305
306	/* Save handles even if error, so we can unmap. */
307	for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
308		buf->backend_map_handles[cur_page] = map_ops[cur_page].handle;
309		if (unlikely(map_ops[cur_page].status != GNTST_okay))
310			dev_err(&buf->xb_dev->dev,
311				"Failed to map page %d: %d\n",
312				cur_page, map_ops[cur_page].status);
313	}
314
315	if (ret) {
316		dev_err(&buf->xb_dev->dev,
317			"Failed to map grant references, ret %d", ret);
318		backend_unmap(buf);
319	}
320
321	kfree(map_ops);
322	return ret;
323}
324
325/**
326 * Fill page directory with grant references to the pages of the
327 * page directory itself.
328 *
329 * The grant references to the buffer pages are provided by the
330 * backend in this case.
331 *
332 * \param buf shared buffer.
333 */
334static void backend_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
335{
336	struct xen_page_directory *page_dir;
337	unsigned char *ptr;
338	int i, num_pages_dir;
339
340	ptr = buf->directory;
341	num_pages_dir = get_num_pages_dir(buf);
342
343	/* Fill only grefs for the page directory itself. */
344	for (i = 0; i < num_pages_dir - 1; i++) {
345		page_dir = (struct xen_page_directory *)ptr;
346
347		page_dir->gref_dir_next_page = buf->grefs[i + 1];
348		ptr += PAGE_SIZE;
349	}
350	/* Last page must say there is no more pages. */
351	page_dir = (struct xen_page_directory *)ptr;
352	page_dir->gref_dir_next_page = GRANT_INVALID_REF;
353}
354
355/**
356 * Fill page directory with grant references to the pages of the
357 * page directory and the buffer we share with the backend.
358 *
359 * \param buf shared buffer.
360 */
361static void guest_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
362{
363	unsigned char *ptr;
364	int cur_gref, grefs_left, to_copy, i, num_pages_dir;
365
366	ptr = buf->directory;
367	num_pages_dir = get_num_pages_dir(buf);
368
369	/*
370	 * While copying, skip grefs at start, they are for pages
371	 * granted for the page directory itself.
372	 */
373	cur_gref = num_pages_dir;
374	grefs_left = buf->num_pages;
375	for (i = 0; i < num_pages_dir; i++) {
376		struct xen_page_directory *page_dir =
377			(struct xen_page_directory *)ptr;
378
379		if (grefs_left <= XEN_NUM_GREFS_PER_PAGE) {
380			to_copy = grefs_left;
381			page_dir->gref_dir_next_page = GRANT_INVALID_REF;
382		} else {
383			to_copy = XEN_NUM_GREFS_PER_PAGE;
384			page_dir->gref_dir_next_page = buf->grefs[i + 1];
385		}
386		memcpy(&page_dir->gref, &buf->grefs[cur_gref],
387		       to_copy * sizeof(grant_ref_t));
388		ptr += PAGE_SIZE;
389		grefs_left -= to_copy;
390		cur_gref += to_copy;
391	}
392}
393
394/**
395 * Grant references to the frontend's buffer pages.
396 *
397 * These will be shared with the backend, so it can
398 * access the buffer's data.
399 *
400 * \param buf shared buffer.
401 * \return zero on success or a negative number on failure.
402 */
403static int guest_grant_refs_for_buffer(struct xen_front_pgdir_shbuf *buf,
404				       grant_ref_t *priv_gref_head,
405				       int gref_idx)
406{
407	int i, cur_ref, otherend_id;
408
409	otherend_id = buf->xb_dev->otherend_id;
410	for (i = 0; i < buf->num_pages; i++) {
411		cur_ref = gnttab_claim_grant_reference(priv_gref_head);
412		if (cur_ref < 0)
413			return cur_ref;
414
415		gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
416						xen_page_to_gfn(buf->pages[i]),
417						0);
418		buf->grefs[gref_idx++] = cur_ref;
419	}
420	return 0;
421}
422
423/**
424 * Grant all the references needed to share the buffer.
425 *
426 * Grant references to the page directory pages and, if
427 * needed, also to the pages of the shared buffer data.
428 *
429 * \param buf shared buffer.
430 * \return zero on success or a negative number on failure.
431 */
432static int grant_references(struct xen_front_pgdir_shbuf *buf)
433{
434	grant_ref_t priv_gref_head;
435	int ret, i, j, cur_ref;
436	int otherend_id, num_pages_dir;
437
438	ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
439	if (ret < 0) {
440		dev_err(&buf->xb_dev->dev,
441			"Cannot allocate grant references\n");
442		return ret;
443	}
444
445	otherend_id = buf->xb_dev->otherend_id;
446	j = 0;
447	num_pages_dir = get_num_pages_dir(buf);
448	for (i = 0; i < num_pages_dir; i++) {
449		unsigned long frame;
450
451		cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
452		if (cur_ref < 0)
453			return cur_ref;
454
455		frame = xen_page_to_gfn(virt_to_page(buf->directory +
456						     PAGE_SIZE * i));
457		gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
458		buf->grefs[j++] = cur_ref;
459	}
460
461	if (buf->ops->grant_refs_for_buffer) {
462		ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
463		if (ret)
464			return ret;
465	}
466
467	gnttab_free_grant_references(priv_gref_head);
468	return 0;
469}
470
471/**
472 * Allocate all required structures to mange shared buffer.
473 *
474 * \param buf shared buffer.
475 * \return zero on success or a negative number on failure.
476 */
477static int alloc_storage(struct xen_front_pgdir_shbuf *buf)
478{
479	buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
480	if (!buf->grefs)
481		return -ENOMEM;
482
483	buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
484	if (!buf->directory)
485		return -ENOMEM;
486
487	return 0;
488}
489
490/*
491 * For backend allocated buffers we don't need grant_refs_for_buffer
492 * as those grant references are allocated at backend side.
493 */
494static const struct xen_front_pgdir_shbuf_ops backend_ops = {
495	.calc_num_grefs = backend_calc_num_grefs,
496	.fill_page_dir = backend_fill_page_dir,
497	.map = backend_map,
498	.unmap = backend_unmap
499};
500
501/*
502 * For locally granted references we do not need to map/unmap
503 * the references.
504 */
505static const struct xen_front_pgdir_shbuf_ops local_ops = {
506	.calc_num_grefs = guest_calc_num_grefs,
507	.fill_page_dir = guest_fill_page_dir,
508	.grant_refs_for_buffer = guest_grant_refs_for_buffer,
509};
510
511/**
512 * Allocate a new instance of a shared buffer.
513 *
514 * \param cfg configuration to be used while allocating a new shared buffer.
515 * \return zero on success or a negative number on failure.
516 */
517int xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg *cfg)
518{
519	struct xen_front_pgdir_shbuf *buf = cfg->pgdir;
520	int ret;
521
522	if (cfg->be_alloc)
523		buf->ops = &backend_ops;
524	else
525		buf->ops = &local_ops;
526	buf->xb_dev = cfg->xb_dev;
527	buf->num_pages = cfg->num_pages;
528	buf->pages = cfg->pages;
529
530	buf->ops->calc_num_grefs(buf);
531
532	ret = alloc_storage(buf);
533	if (ret)
534		goto fail;
535
536	ret = grant_references(buf);
537	if (ret)
538		goto fail;
539
540	buf->ops->fill_page_dir(buf);
541
542	return 0;
543
544fail:
545	xen_front_pgdir_shbuf_free(buf);
546	return ret;
547}
548EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_alloc);
549
550MODULE_DESCRIPTION("Xen frontend/backend page directory based "
551		   "shared buffer handling");
552MODULE_AUTHOR("Oleksandr Andrushchenko");
553MODULE_LICENSE("GPL");
v5.4
  1// SPDX-License-Identifier: GPL-2.0 OR MIT
  2
  3/*
  4 * Xen frontend/backend page directory based shared buffer
  5 * helper module.
  6 *
  7 * Copyright (C) 2018 EPAM Systems Inc.
  8 *
  9 * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
 10 */
 11
 12#include <linux/module.h>
 13#include <linux/errno.h>
 14#include <linux/mm.h>
 15
 16#include <asm/xen/hypervisor.h>
 17#include <xen/balloon.h>
 18#include <xen/xen.h>
 19#include <xen/xenbus.h>
 20#include <xen/interface/io/ring.h>
 21
 22#include <xen/xen-front-pgdir-shbuf.h>
 23
 24#ifndef GRANT_INVALID_REF
 25/*
 26 * FIXME: usage of grant reference 0 as invalid grant reference:
 27 * grant reference 0 is valid, but never exposed to a PV driver,
 28 * because of the fact it is already in use/reserved by the PV console.
 29 */
 30#define GRANT_INVALID_REF	0
 31#endif
 32
 33/**
 34 * This structure represents the structure of a shared page
 35 * that contains grant references to the pages of the shared
 36 * buffer. This structure is common to many Xen para-virtualized
 37 * protocols at include/xen/interface/io/
 38 */
 39struct xen_page_directory {
 40	grant_ref_t gref_dir_next_page;
 41	grant_ref_t gref[1]; /* Variable length */
 42};
 43
 44/**
 45 * Shared buffer ops which are differently implemented
 46 * depending on the allocation mode, e.g. if the buffer
 47 * is allocated by the corresponding backend or frontend.
 48 * Some of the operations.
 49 */
 50struct xen_front_pgdir_shbuf_ops {
 51	/*
 52	 * Calculate number of grefs required to handle this buffer,
 53	 * e.g. if grefs are required for page directory only or the buffer
 54	 * pages as well.
 55	 */
 56	void (*calc_num_grefs)(struct xen_front_pgdir_shbuf *buf);
 57
 58	/* Fill page directory according to para-virtual display protocol. */
 59	void (*fill_page_dir)(struct xen_front_pgdir_shbuf *buf);
 60
 61	/* Claim grant references for the pages of the buffer. */
 62	int (*grant_refs_for_buffer)(struct xen_front_pgdir_shbuf *buf,
 63				     grant_ref_t *priv_gref_head, int gref_idx);
 64
 65	/* Map grant references of the buffer. */
 66	int (*map)(struct xen_front_pgdir_shbuf *buf);
 67
 68	/* Unmap grant references of the buffer. */
 69	int (*unmap)(struct xen_front_pgdir_shbuf *buf);
 70};
 71
 72/**
 73 * Get granted reference to the very first page of the
 74 * page directory. Usually this is passed to the backend,
 75 * so it can find/fill the grant references to the buffer's
 76 * pages.
 77 *
 78 * \param buf shared buffer which page directory is of interest.
 79 * \return granted reference to the very first page of the
 80 * page directory.
 81 */
 82grant_ref_t
 83xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf)
 84{
 85	if (!buf->grefs)
 86		return GRANT_INVALID_REF;
 87
 88	return buf->grefs[0];
 89}
 90EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_get_dir_start);
 91
 92/**
 93 * Map granted references of the shared buffer.
 94 *
 95 * Depending on the shared buffer mode of allocation
 96 * (be_alloc flag) this can either do nothing (for buffers
 97 * shared by the frontend itself) or map the provided granted
 98 * references onto the backing storage (buf->pages).
 99 *
100 * \param buf shared buffer which grants to be maped.
101 * \return zero on success or a negative number on failure.
102 */
103int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf)
104{
105	if (buf->ops && buf->ops->map)
106		return buf->ops->map(buf);
107
108	/* No need to map own grant references. */
109	return 0;
110}
111EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_map);
112
113/**
114 * Unmap granted references of the shared buffer.
115 *
116 * Depending on the shared buffer mode of allocation
117 * (be_alloc flag) this can either do nothing (for buffers
118 * shared by the frontend itself) or unmap the provided granted
119 * references.
120 *
121 * \param buf shared buffer which grants to be unmaped.
122 * \return zero on success or a negative number on failure.
123 */
124int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf)
125{
126	if (buf->ops && buf->ops->unmap)
127		return buf->ops->unmap(buf);
128
129	/* No need to unmap own grant references. */
130	return 0;
131}
132EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_unmap);
133
134/**
135 * Free all the resources of the shared buffer.
136 *
137 * \param buf shared buffer which resources to be freed.
138 */
139void xen_front_pgdir_shbuf_free(struct xen_front_pgdir_shbuf *buf)
140{
141	if (buf->grefs) {
142		int i;
143
144		for (i = 0; i < buf->num_grefs; i++)
145			if (buf->grefs[i] != GRANT_INVALID_REF)
146				gnttab_end_foreign_access(buf->grefs[i],
147							  0, 0UL);
148	}
149	kfree(buf->grefs);
150	kfree(buf->directory);
151}
152EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_free);
153
154/*
155 * Number of grefs a page can hold with respect to the
156 * struct xen_page_directory header.
157 */
158#define XEN_NUM_GREFS_PER_PAGE ((PAGE_SIZE - \
159				 offsetof(struct xen_page_directory, \
160					  gref)) / sizeof(grant_ref_t))
161
162/**
163 * Get the number of pages the page directory consumes itself.
164 *
165 * \param buf shared buffer.
166 */
167static int get_num_pages_dir(struct xen_front_pgdir_shbuf *buf)
168{
169	return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE);
170}
171
172/**
173 * Calculate the number of grant references needed to share the buffer
174 * and its pages when backend allocates the buffer.
175 *
176 * \param buf shared buffer.
177 */
178static void backend_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
179{
180	/* Only for pages the page directory consumes itself. */
181	buf->num_grefs = get_num_pages_dir(buf);
182}
183
184/**
185 * Calculate the number of grant references needed to share the buffer
186 * and its pages when frontend allocates the buffer.
187 *
188 * \param buf shared buffer.
189 */
190static void guest_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
191{
192	/*
193	 * Number of pages the page directory consumes itself
194	 * plus grefs for the buffer pages.
195	 */
196	buf->num_grefs = get_num_pages_dir(buf) + buf->num_pages;
197}
198
199#define xen_page_to_vaddr(page) \
200	((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
201
202/**
203 * Unmap the buffer previously mapped with grant references
204 * provided by the backend.
205 *
206 * \param buf shared buffer.
207 * \return zero on success or a negative number on failure.
208 */
209static int backend_unmap(struct xen_front_pgdir_shbuf *buf)
210{
211	struct gnttab_unmap_grant_ref *unmap_ops;
212	int i, ret;
213
214	if (!buf->pages || !buf->backend_map_handles || !buf->grefs)
215		return 0;
216
217	unmap_ops = kcalloc(buf->num_pages, sizeof(*unmap_ops),
218			    GFP_KERNEL);
219	if (!unmap_ops)
220		return -ENOMEM;
221
222	for (i = 0; i < buf->num_pages; i++) {
223		phys_addr_t addr;
224
225		addr = xen_page_to_vaddr(buf->pages[i]);
226		gnttab_set_unmap_op(&unmap_ops[i], addr, GNTMAP_host_map,
227				    buf->backend_map_handles[i]);
228	}
229
230	ret = gnttab_unmap_refs(unmap_ops, NULL, buf->pages,
231				buf->num_pages);
232
233	for (i = 0; i < buf->num_pages; i++) {
234		if (unlikely(unmap_ops[i].status != GNTST_okay))
235			dev_err(&buf->xb_dev->dev,
236				"Failed to unmap page %d: %d\n",
237				i, unmap_ops[i].status);
238	}
239
240	if (ret)
241		dev_err(&buf->xb_dev->dev,
242			"Failed to unmap grant references, ret %d", ret);
243
244	kfree(unmap_ops);
245	kfree(buf->backend_map_handles);
246	buf->backend_map_handles = NULL;
247	return ret;
248}
249
250/**
251 * Map the buffer with grant references provided by the backend.
252 *
253 * \param buf shared buffer.
254 * \return zero on success or a negative number on failure.
255 */
256static int backend_map(struct xen_front_pgdir_shbuf *buf)
257{
258	struct gnttab_map_grant_ref *map_ops = NULL;
259	unsigned char *ptr;
260	int ret, cur_gref, cur_dir_page, cur_page, grefs_left;
261
262	map_ops = kcalloc(buf->num_pages, sizeof(*map_ops), GFP_KERNEL);
263	if (!map_ops)
264		return -ENOMEM;
265
266	buf->backend_map_handles = kcalloc(buf->num_pages,
267					   sizeof(*buf->backend_map_handles),
268					   GFP_KERNEL);
269	if (!buf->backend_map_handles) {
270		kfree(map_ops);
271		return -ENOMEM;
272	}
273
274	/*
275	 * Read page directory to get grefs from the backend: for external
276	 * buffer we only allocate buf->grefs for the page directory,
277	 * so buf->num_grefs has number of pages in the page directory itself.
278	 */
279	ptr = buf->directory;
280	grefs_left = buf->num_pages;
281	cur_page = 0;
282	for (cur_dir_page = 0; cur_dir_page < buf->num_grefs; cur_dir_page++) {
283		struct xen_page_directory *page_dir =
284			(struct xen_page_directory *)ptr;
285		int to_copy = XEN_NUM_GREFS_PER_PAGE;
286
287		if (to_copy > grefs_left)
288			to_copy = grefs_left;
289
290		for (cur_gref = 0; cur_gref < to_copy; cur_gref++) {
291			phys_addr_t addr;
292
293			addr = xen_page_to_vaddr(buf->pages[cur_page]);
294			gnttab_set_map_op(&map_ops[cur_page], addr,
295					  GNTMAP_host_map,
296					  page_dir->gref[cur_gref],
297					  buf->xb_dev->otherend_id);
298			cur_page++;
299		}
300
301		grefs_left -= to_copy;
302		ptr += PAGE_SIZE;
303	}
304	ret = gnttab_map_refs(map_ops, NULL, buf->pages, buf->num_pages);
305
306	/* Save handles even if error, so we can unmap. */
307	for (cur_page = 0; cur_page < buf->num_pages; cur_page++) {
308		buf->backend_map_handles[cur_page] = map_ops[cur_page].handle;
309		if (unlikely(map_ops[cur_page].status != GNTST_okay))
310			dev_err(&buf->xb_dev->dev,
311				"Failed to map page %d: %d\n",
312				cur_page, map_ops[cur_page].status);
313	}
314
315	if (ret) {
316		dev_err(&buf->xb_dev->dev,
317			"Failed to map grant references, ret %d", ret);
318		backend_unmap(buf);
319	}
320
321	kfree(map_ops);
322	return ret;
323}
324
325/**
326 * Fill page directory with grant references to the pages of the
327 * page directory itself.
328 *
329 * The grant references to the buffer pages are provided by the
330 * backend in this case.
331 *
332 * \param buf shared buffer.
333 */
334static void backend_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
335{
336	struct xen_page_directory *page_dir;
337	unsigned char *ptr;
338	int i, num_pages_dir;
339
340	ptr = buf->directory;
341	num_pages_dir = get_num_pages_dir(buf);
342
343	/* Fill only grefs for the page directory itself. */
344	for (i = 0; i < num_pages_dir - 1; i++) {
345		page_dir = (struct xen_page_directory *)ptr;
346
347		page_dir->gref_dir_next_page = buf->grefs[i + 1];
348		ptr += PAGE_SIZE;
349	}
350	/* Last page must say there is no more pages. */
351	page_dir = (struct xen_page_directory *)ptr;
352	page_dir->gref_dir_next_page = GRANT_INVALID_REF;
353}
354
355/**
356 * Fill page directory with grant references to the pages of the
357 * page directory and the buffer we share with the backend.
358 *
359 * \param buf shared buffer.
360 */
361static void guest_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
362{
363	unsigned char *ptr;
364	int cur_gref, grefs_left, to_copy, i, num_pages_dir;
365
366	ptr = buf->directory;
367	num_pages_dir = get_num_pages_dir(buf);
368
369	/*
370	 * While copying, skip grefs at start, they are for pages
371	 * granted for the page directory itself.
372	 */
373	cur_gref = num_pages_dir;
374	grefs_left = buf->num_pages;
375	for (i = 0; i < num_pages_dir; i++) {
376		struct xen_page_directory *page_dir =
377			(struct xen_page_directory *)ptr;
378
379		if (grefs_left <= XEN_NUM_GREFS_PER_PAGE) {
380			to_copy = grefs_left;
381			page_dir->gref_dir_next_page = GRANT_INVALID_REF;
382		} else {
383			to_copy = XEN_NUM_GREFS_PER_PAGE;
384			page_dir->gref_dir_next_page = buf->grefs[i + 1];
385		}
386		memcpy(&page_dir->gref, &buf->grefs[cur_gref],
387		       to_copy * sizeof(grant_ref_t));
388		ptr += PAGE_SIZE;
389		grefs_left -= to_copy;
390		cur_gref += to_copy;
391	}
392}
393
394/**
395 * Grant references to the frontend's buffer pages.
396 *
397 * These will be shared with the backend, so it can
398 * access the buffer's data.
399 *
400 * \param buf shared buffer.
401 * \return zero on success or a negative number on failure.
402 */
403static int guest_grant_refs_for_buffer(struct xen_front_pgdir_shbuf *buf,
404				       grant_ref_t *priv_gref_head,
405				       int gref_idx)
406{
407	int i, cur_ref, otherend_id;
408
409	otherend_id = buf->xb_dev->otherend_id;
410	for (i = 0; i < buf->num_pages; i++) {
411		cur_ref = gnttab_claim_grant_reference(priv_gref_head);
412		if (cur_ref < 0)
413			return cur_ref;
414
415		gnttab_grant_foreign_access_ref(cur_ref, otherend_id,
416						xen_page_to_gfn(buf->pages[i]),
417						0);
418		buf->grefs[gref_idx++] = cur_ref;
419	}
420	return 0;
421}
422
423/**
424 * Grant all the references needed to share the buffer.
425 *
426 * Grant references to the page directory pages and, if
427 * needed, also to the pages of the shared buffer data.
428 *
429 * \param buf shared buffer.
430 * \return zero on success or a negative number on failure.
431 */
432static int grant_references(struct xen_front_pgdir_shbuf *buf)
433{
434	grant_ref_t priv_gref_head;
435	int ret, i, j, cur_ref;
436	int otherend_id, num_pages_dir;
437
438	ret = gnttab_alloc_grant_references(buf->num_grefs, &priv_gref_head);
439	if (ret < 0) {
440		dev_err(&buf->xb_dev->dev,
441			"Cannot allocate grant references\n");
442		return ret;
443	}
444
445	otherend_id = buf->xb_dev->otherend_id;
446	j = 0;
447	num_pages_dir = get_num_pages_dir(buf);
448	for (i = 0; i < num_pages_dir; i++) {
449		unsigned long frame;
450
451		cur_ref = gnttab_claim_grant_reference(&priv_gref_head);
452		if (cur_ref < 0)
453			return cur_ref;
454
455		frame = xen_page_to_gfn(virt_to_page(buf->directory +
456						     PAGE_SIZE * i));
457		gnttab_grant_foreign_access_ref(cur_ref, otherend_id, frame, 0);
458		buf->grefs[j++] = cur_ref;
459	}
460
461	if (buf->ops->grant_refs_for_buffer) {
462		ret = buf->ops->grant_refs_for_buffer(buf, &priv_gref_head, j);
463		if (ret)
464			return ret;
465	}
466
467	gnttab_free_grant_references(priv_gref_head);
468	return 0;
469}
470
471/**
472 * Allocate all required structures to mange shared buffer.
473 *
474 * \param buf shared buffer.
475 * \return zero on success or a negative number on failure.
476 */
477static int alloc_storage(struct xen_front_pgdir_shbuf *buf)
478{
479	buf->grefs = kcalloc(buf->num_grefs, sizeof(*buf->grefs), GFP_KERNEL);
480	if (!buf->grefs)
481		return -ENOMEM;
482
483	buf->directory = kcalloc(get_num_pages_dir(buf), PAGE_SIZE, GFP_KERNEL);
484	if (!buf->directory)
485		return -ENOMEM;
486
487	return 0;
488}
489
490/*
491 * For backend allocated buffers we don't need grant_refs_for_buffer
492 * as those grant references are allocated at backend side.
493 */
494static const struct xen_front_pgdir_shbuf_ops backend_ops = {
495	.calc_num_grefs = backend_calc_num_grefs,
496	.fill_page_dir = backend_fill_page_dir,
497	.map = backend_map,
498	.unmap = backend_unmap
499};
500
501/*
502 * For locally granted references we do not need to map/unmap
503 * the references.
504 */
505static const struct xen_front_pgdir_shbuf_ops local_ops = {
506	.calc_num_grefs = guest_calc_num_grefs,
507	.fill_page_dir = guest_fill_page_dir,
508	.grant_refs_for_buffer = guest_grant_refs_for_buffer,
509};
510
511/**
512 * Allocate a new instance of a shared buffer.
513 *
514 * \param cfg configuration to be used while allocating a new shared buffer.
515 * \return zero on success or a negative number on failure.
516 */
517int xen_front_pgdir_shbuf_alloc(struct xen_front_pgdir_shbuf_cfg *cfg)
518{
519	struct xen_front_pgdir_shbuf *buf = cfg->pgdir;
520	int ret;
521
522	if (cfg->be_alloc)
523		buf->ops = &backend_ops;
524	else
525		buf->ops = &local_ops;
526	buf->xb_dev = cfg->xb_dev;
527	buf->num_pages = cfg->num_pages;
528	buf->pages = cfg->pages;
529
530	buf->ops->calc_num_grefs(buf);
531
532	ret = alloc_storage(buf);
533	if (ret)
534		goto fail;
535
536	ret = grant_references(buf);
537	if (ret)
538		goto fail;
539
540	buf->ops->fill_page_dir(buf);
541
542	return 0;
543
544fail:
545	xen_front_pgdir_shbuf_free(buf);
546	return ret;
547}
548EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_alloc);
549
550MODULE_DESCRIPTION("Xen frontend/backend page directory based "
551		   "shared buffer handling");
552MODULE_AUTHOR("Oleksandr Andrushchenko");
553MODULE_LICENSE("GPL");