Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v3.1
  1/******************************************************************************
  2 * gntdev.c
  3 *
  4 * Device for accessing (in user-space) pages that have been granted by other
  5 * domains.
  6 *
  7 * Copyright (c) 2006-2007, D G Murray.
  8 *           (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
  9 *
 10 * This program is distributed in the hope that it will be useful,
 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 * GNU General Public License for more details.
 14 *
 15 * You should have received a copy of the GNU General Public License
 16 * along with this program; if not, write to the Free Software
 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 18 */
 19
 20#undef DEBUG
 21
 
 
 22#include <linux/module.h>
 23#include <linux/kernel.h>
 24#include <linux/init.h>
 25#include <linux/miscdevice.h>
 26#include <linux/fs.h>
 27#include <linux/mm.h>
 28#include <linux/mman.h>
 29#include <linux/mmu_notifier.h>
 30#include <linux/types.h>
 31#include <linux/uaccess.h>
 32#include <linux/sched.h>
 
 33#include <linux/spinlock.h>
 34#include <linux/slab.h>
 35#include <linux/highmem.h>
 
 36
 37#include <xen/xen.h>
 38#include <xen/grant_table.h>
 39#include <xen/balloon.h>
 40#include <xen/gntdev.h>
 41#include <xen/events.h>
 
 42#include <asm/xen/hypervisor.h>
 43#include <asm/xen/hypercall.h>
 44#include <asm/xen/page.h>
 45
 46MODULE_LICENSE("GPL");
 47MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
 48	      "Gerd Hoffmann <kraxel@redhat.com>");
 49MODULE_DESCRIPTION("User-space granted page access driver");
 50
 51static int limit = 1024*1024;
 52module_param(limit, int, 0644);
 53MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
 54		"the gntdev device");
 55
 56static atomic_t pages_mapped = ATOMIC_INIT(0);
 57
 58static int use_ptemod;
 
 59
 60struct gntdev_priv {
 
 61	struct list_head maps;
 62	/* lock protects maps from concurrent changes */
 63	spinlock_t lock;
 
 
 
 64	struct mm_struct *mm;
 65	struct mmu_notifier mn;
 66};
 67
 68struct unmap_notify {
 69	int flags;
 70	/* Address relative to the start of the grant_map */
 71	int addr;
 72	int event;
 73};
 74
 75struct grant_map {
 76	struct list_head next;
 77	struct vm_area_struct *vma;
 78	int index;
 79	int count;
 80	int flags;
 81	atomic_t users;
 82	struct unmap_notify notify;
 83	struct ioctl_gntdev_grant_ref *grants;
 84	struct gnttab_map_grant_ref   *map_ops;
 85	struct gnttab_unmap_grant_ref *unmap_ops;
 
 
 86	struct page **pages;
 
 87};
 88
 89static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
 90
 91/* ------------------------------------------------------------------ */
 92
 93static void gntdev_print_maps(struct gntdev_priv *priv,
 94			      char *text, int text_index)
 95{
 96#ifdef DEBUG
 97	struct grant_map *map;
 98
 99	pr_debug("%s: maps list (priv %p)\n", __func__, priv);
100	list_for_each_entry(map, &priv->maps, next)
101		pr_debug("  index %2d, count %2d %s\n",
102		       map->index, map->count,
103		       map->index == text_index && text ? text : "");
104#endif
105}
106
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
108{
109	struct grant_map *add;
110	int i;
111
112	add = kzalloc(sizeof(struct grant_map), GFP_KERNEL);
113	if (NULL == add)
114		return NULL;
115
116	add->grants    = kzalloc(sizeof(add->grants[0])    * count, GFP_KERNEL);
117	add->map_ops   = kzalloc(sizeof(add->map_ops[0])   * count, GFP_KERNEL);
118	add->unmap_ops = kzalloc(sizeof(add->unmap_ops[0]) * count, GFP_KERNEL);
119	add->pages     = kzalloc(sizeof(add->pages[0])     * count, GFP_KERNEL);
 
 
120	if (NULL == add->grants    ||
121	    NULL == add->map_ops   ||
122	    NULL == add->unmap_ops ||
 
 
123	    NULL == add->pages)
124		goto err;
125
126	if (alloc_xenballooned_pages(count, add->pages))
127		goto err;
128
129	for (i = 0; i < count; i++) {
130		add->map_ops[i].handle = -1;
131		add->unmap_ops[i].handle = -1;
 
 
132	}
133
134	add->index = 0;
135	add->count = count;
136	atomic_set(&add->users, 1);
137
138	return add;
139
140err:
141	kfree(add->pages);
142	kfree(add->grants);
143	kfree(add->map_ops);
144	kfree(add->unmap_ops);
145	kfree(add);
146	return NULL;
147}
148
149static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
150{
151	struct grant_map *map;
152
153	list_for_each_entry(map, &priv->maps, next) {
154		if (add->index + add->count < map->index) {
155			list_add_tail(&add->next, &map->next);
156			goto done;
157		}
158		add->index = map->index + map->count;
159	}
160	list_add_tail(&add->next, &priv->maps);
161
162done:
163	gntdev_print_maps(priv, "[new]", add->index);
164}
165
166static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
167		int index, int count)
168{
169	struct grant_map *map;
170
171	list_for_each_entry(map, &priv->maps, next) {
172		if (map->index != index)
173			continue;
174		if (count && map->count != count)
175			continue;
176		return map;
177	}
178	return NULL;
179}
180
181static void gntdev_put_map(struct grant_map *map)
182{
183	if (!map)
184		return;
185
186	if (!atomic_dec_and_test(&map->users))
187		return;
188
189	atomic_sub(map->count, &pages_mapped);
190
191	if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
192		notify_remote_via_evtchn(map->notify.event);
 
193	}
194
195	if (map->pages) {
196		if (!use_ptemod)
197			unmap_grant_pages(map, 0, map->count);
198
199		free_xenballooned_pages(map->count, map->pages);
200	}
201	kfree(map->pages);
202	kfree(map->grants);
203	kfree(map->map_ops);
204	kfree(map->unmap_ops);
205	kfree(map);
206}
207
208/* ------------------------------------------------------------------ */
209
210static int find_grant_ptes(pte_t *pte, pgtable_t token,
211		unsigned long addr, void *data)
212{
213	struct grant_map *map = data;
214	unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
215	int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
216	u64 pte_maddr;
217
218	BUG_ON(pgnr >= map->count);
219	pte_maddr = arbitrary_virt_to_machine(pte).maddr;
220
 
 
 
 
 
 
 
 
221	gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
222			  map->grants[pgnr].ref,
223			  map->grants[pgnr].domid);
224	gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
225			    -1 /* handle */);
226	return 0;
227}
228
 
 
 
 
 
 
 
 
 
229static int map_grant_pages(struct grant_map *map)
230{
231	int i, err = 0;
232
233	if (!use_ptemod) {
234		/* Note: it could already be mapped */
235		if (map->map_ops[0].handle != -1)
236			return 0;
237		for (i = 0; i < map->count; i++) {
238			unsigned long addr = (unsigned long)
239				pfn_to_kaddr(page_to_pfn(map->pages[i]));
240			gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
241				map->grants[i].ref,
242				map->grants[i].domid);
243			gnttab_set_unmap_op(&map->unmap_ops[i], addr,
244				map->flags, -1 /* handle */);
245		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
246	}
247
248	pr_debug("map %d+%d\n", map->index, map->count);
249	err = gnttab_map_refs(map->map_ops, map->pages, map->count);
 
250	if (err)
251		return err;
252
253	for (i = 0; i < map->count; i++) {
254		if (map->map_ops[i].status)
255			err = -EINVAL;
256		else {
257			BUG_ON(map->map_ops[i].handle == -1);
258			map->unmap_ops[i].handle = map->map_ops[i].handle;
259			pr_debug("map handle=%d\n", map->map_ops[i].handle);
260		}
 
 
 
 
261	}
262	return err;
263}
264
265static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
266{
267	int i, err = 0;
 
268
269	if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
270		int pgno = (map->notify.addr >> PAGE_SHIFT);
271		if (pgno >= offset && pgno < offset + pages && use_ptemod) {
272			void __user *tmp = (void __user *)
273				map->vma->vm_start + map->notify.addr;
274			err = copy_to_user(tmp, &err, 1);
275			if (err)
276				return -EFAULT;
277			map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
278		} else if (pgno >= offset && pgno < offset + pages) {
279			uint8_t *tmp = kmap(map->pages[pgno]);
280			tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
281			kunmap(map->pages[pgno]);
282			map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
283		}
284	}
285
286	err = gnttab_unmap_refs(map->unmap_ops + offset, map->pages + offset, pages);
 
 
 
 
 
287	if (err)
288		return err;
289
290	for (i = 0; i < pages; i++) {
291		if (map->unmap_ops[offset+i].status)
292			err = -EINVAL;
293		pr_debug("unmap handle=%d st=%d\n",
294			map->unmap_ops[offset+i].handle,
295			map->unmap_ops[offset+i].status);
296		map->unmap_ops[offset+i].handle = -1;
297	}
298	return err;
299}
300
301static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
302{
303	int range, err = 0;
304
305	pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
306
307	/* It is possible the requested range will have a "hole" where we
308	 * already unmapped some of the grants. Only unmap valid ranges.
309	 */
310	while (pages && !err) {
311		while (pages && map->unmap_ops[offset].handle == -1) {
312			offset++;
313			pages--;
314		}
315		range = 0;
316		while (range < pages) {
317			if (map->unmap_ops[offset+range].handle == -1) {
318				range--;
319				break;
320			}
321			range++;
322		}
323		err = __unmap_grant_pages(map, offset, range);
324		offset += range;
325		pages -= range;
326	}
327
328	return err;
329}
330
331/* ------------------------------------------------------------------ */
332
333static void gntdev_vma_open(struct vm_area_struct *vma)
334{
335	struct grant_map *map = vma->vm_private_data;
336
337	pr_debug("gntdev_vma_open %p\n", vma);
338	atomic_inc(&map->users);
339}
340
341static void gntdev_vma_close(struct vm_area_struct *vma)
342{
343	struct grant_map *map = vma->vm_private_data;
 
 
344
345	pr_debug("gntdev_vma_close %p\n", vma);
346	map->vma = NULL;
 
 
 
 
 
 
 
 
 
 
 
347	vma->vm_private_data = NULL;
348	gntdev_put_map(map);
349}
350
351static struct vm_operations_struct gntdev_vmops = {
 
 
 
 
 
 
 
 
352	.open = gntdev_vma_open,
353	.close = gntdev_vma_close,
 
354};
355
356/* ------------------------------------------------------------------ */
357
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
358static void mn_invl_range_start(struct mmu_notifier *mn,
359				struct mm_struct *mm,
360				unsigned long start, unsigned long end)
361{
362	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
363	struct grant_map *map;
364	unsigned long mstart, mend;
365	int err;
366
367	spin_lock(&priv->lock);
368	list_for_each_entry(map, &priv->maps, next) {
369		if (!map->vma)
370			continue;
371		if (map->vma->vm_start >= end)
372			continue;
373		if (map->vma->vm_end <= start)
374			continue;
375		mstart = max(start, map->vma->vm_start);
376		mend   = min(end,   map->vma->vm_end);
377		pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
378				map->index, map->count,
379				map->vma->vm_start, map->vma->vm_end,
380				start, end, mstart, mend);
381		err = unmap_grant_pages(map,
382					(mstart - map->vma->vm_start) >> PAGE_SHIFT,
383					(mend - mstart) >> PAGE_SHIFT);
384		WARN_ON(err);
385	}
386	spin_unlock(&priv->lock);
387}
388
389static void mn_invl_page(struct mmu_notifier *mn,
390			 struct mm_struct *mm,
391			 unsigned long address)
392{
393	mn_invl_range_start(mn, mm, address, address + PAGE_SIZE);
394}
395
396static void mn_release(struct mmu_notifier *mn,
397		       struct mm_struct *mm)
398{
399	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
400	struct grant_map *map;
401	int err;
402
403	spin_lock(&priv->lock);
404	list_for_each_entry(map, &priv->maps, next) {
405		if (!map->vma)
406			continue;
407		pr_debug("map %d+%d (%lx %lx)\n",
408				map->index, map->count,
409				map->vma->vm_start, map->vma->vm_end);
410		err = unmap_grant_pages(map, /* offset */ 0, map->count);
411		WARN_ON(err);
412	}
413	spin_unlock(&priv->lock);
 
 
 
 
 
 
 
 
 
414}
415
416struct mmu_notifier_ops gntdev_mmu_ops = {
417	.release                = mn_release,
418	.invalidate_page        = mn_invl_page,
419	.invalidate_range_start = mn_invl_range_start,
420};
421
422/* ------------------------------------------------------------------ */
423
424static int gntdev_open(struct inode *inode, struct file *flip)
425{
426	struct gntdev_priv *priv;
427	int ret = 0;
428
429	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
430	if (!priv)
431		return -ENOMEM;
432
433	INIT_LIST_HEAD(&priv->maps);
434	spin_lock_init(&priv->lock);
 
435
436	if (use_ptemod) {
437		priv->mm = get_task_mm(current);
438		if (!priv->mm) {
439			kfree(priv);
440			return -ENOMEM;
441		}
442		priv->mn.ops = &gntdev_mmu_ops;
443		ret = mmu_notifier_register(&priv->mn, priv->mm);
444		mmput(priv->mm);
445	}
446
447	if (ret) {
448		kfree(priv);
449		return ret;
450	}
451
452	flip->private_data = priv;
453	pr_debug("priv %p\n", priv);
454
455	return 0;
456}
457
458static int gntdev_release(struct inode *inode, struct file *flip)
459{
460	struct gntdev_priv *priv = flip->private_data;
461	struct grant_map *map;
462
463	pr_debug("priv %p\n", priv);
464
465	spin_lock(&priv->lock);
466	while (!list_empty(&priv->maps)) {
467		map = list_entry(priv->maps.next, struct grant_map, next);
468		list_del(&map->next);
469		gntdev_put_map(map);
470	}
471	spin_unlock(&priv->lock);
 
472
473	if (use_ptemod)
474		mmu_notifier_unregister(&priv->mn, priv->mm);
475	kfree(priv);
476	return 0;
477}
478
479static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
480				       struct ioctl_gntdev_map_grant_ref __user *u)
481{
482	struct ioctl_gntdev_map_grant_ref op;
483	struct grant_map *map;
484	int err;
485
486	if (copy_from_user(&op, u, sizeof(op)) != 0)
487		return -EFAULT;
488	pr_debug("priv %p, add %d\n", priv, op.count);
489	if (unlikely(op.count <= 0))
490		return -EINVAL;
491
492	err = -ENOMEM;
493	map = gntdev_alloc_map(priv, op.count);
494	if (!map)
495		return err;
496
497	if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
498		pr_debug("can't map: over limit\n");
499		gntdev_put_map(map);
500		return err;
501	}
502
503	if (copy_from_user(map->grants, &u->refs,
504			   sizeof(map->grants[0]) * op.count) != 0) {
505		gntdev_put_map(map);
506		return err;
507	}
508
509	spin_lock(&priv->lock);
510	gntdev_add_map(priv, map);
511	op.index = map->index << PAGE_SHIFT;
512	spin_unlock(&priv->lock);
513
514	if (copy_to_user(u, &op, sizeof(op)) != 0)
515		return -EFAULT;
516
517	return 0;
518}
519
520static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
521					 struct ioctl_gntdev_unmap_grant_ref __user *u)
522{
523	struct ioctl_gntdev_unmap_grant_ref op;
524	struct grant_map *map;
525	int err = -ENOENT;
526
527	if (copy_from_user(&op, u, sizeof(op)) != 0)
528		return -EFAULT;
529	pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
530
531	spin_lock(&priv->lock);
532	map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
533	if (map) {
534		list_del(&map->next);
535		gntdev_put_map(map);
 
536		err = 0;
537	}
538	spin_unlock(&priv->lock);
 
 
539	return err;
540}
541
542static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
543					      struct ioctl_gntdev_get_offset_for_vaddr __user *u)
544{
545	struct ioctl_gntdev_get_offset_for_vaddr op;
546	struct vm_area_struct *vma;
547	struct grant_map *map;
 
548
549	if (copy_from_user(&op, u, sizeof(op)) != 0)
550		return -EFAULT;
551	pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
552
 
553	vma = find_vma(current->mm, op.vaddr);
554	if (!vma || vma->vm_ops != &gntdev_vmops)
555		return -EINVAL;
556
557	map = vma->vm_private_data;
558	if (!map)
559		return -EINVAL;
560
561	op.offset = map->index << PAGE_SHIFT;
562	op.count = map->count;
 
563
564	if (copy_to_user(u, &op, sizeof(op)) != 0)
 
 
 
565		return -EFAULT;
566	return 0;
567}
568
569static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
570{
571	struct ioctl_gntdev_unmap_notify op;
572	struct grant_map *map;
573	int rc;
 
 
574
575	if (copy_from_user(&op, u, sizeof(op)))
576		return -EFAULT;
577
578	if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
579		return -EINVAL;
580
581	spin_lock(&priv->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
582
583	list_for_each_entry(map, &priv->maps, next) {
584		uint64_t begin = map->index << PAGE_SHIFT;
585		uint64_t end = (map->index + map->count) << PAGE_SHIFT;
586		if (op.index >= begin && op.index < end)
587			goto found;
588	}
589	rc = -ENOENT;
590	goto unlock_out;
591
592 found:
593	if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
594			(map->flags & GNTMAP_readonly)) {
595		rc = -EINVAL;
596		goto unlock_out;
597	}
598
 
 
 
599	map->notify.flags = op.action;
600	map->notify.addr = op.index - (map->index << PAGE_SHIFT);
601	map->notify.event = op.event_channel_port;
 
602	rc = 0;
 
603 unlock_out:
604	spin_unlock(&priv->lock);
 
 
 
 
 
605	return rc;
606}
607
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
608static long gntdev_ioctl(struct file *flip,
609			 unsigned int cmd, unsigned long arg)
610{
611	struct gntdev_priv *priv = flip->private_data;
612	void __user *ptr = (void __user *)arg;
613
614	switch (cmd) {
615	case IOCTL_GNTDEV_MAP_GRANT_REF:
616		return gntdev_ioctl_map_grant_ref(priv, ptr);
617
618	case IOCTL_GNTDEV_UNMAP_GRANT_REF:
619		return gntdev_ioctl_unmap_grant_ref(priv, ptr);
620
621	case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
622		return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
623
624	case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
625		return gntdev_ioctl_notify(priv, ptr);
626
 
 
 
627	default:
628		pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
629		return -ENOIOCTLCMD;
630	}
631
632	return 0;
633}
634
635static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
636{
637	struct gntdev_priv *priv = flip->private_data;
638	int index = vma->vm_pgoff;
639	int count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
640	struct grant_map *map;
641	int i, err = -EINVAL;
642
643	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
644		return -EINVAL;
645
646	pr_debug("map %d+%d at %lx (pgoff %lx)\n",
647			index, count, vma->vm_start, vma->vm_pgoff);
648
649	spin_lock(&priv->lock);
650	map = gntdev_find_map_index(priv, index, count);
651	if (!map)
652		goto unlock_out;
653	if (use_ptemod && map->vma)
654		goto unlock_out;
655	if (use_ptemod && priv->mm != vma->vm_mm) {
656		printk(KERN_WARNING "Huh? Other mm?\n");
657		goto unlock_out;
658	}
659
660	atomic_inc(&map->users);
661
662	vma->vm_ops = &gntdev_vmops;
663
664	vma->vm_flags |= VM_RESERVED|VM_DONTEXPAND;
665
666	if (use_ptemod)
667		vma->vm_flags |= VM_DONTCOPY|VM_PFNMAP;
668
669	vma->vm_private_data = map;
670
671	if (use_ptemod)
672		map->vma = vma;
673
674	if (map->flags) {
675		if ((vma->vm_flags & VM_WRITE) &&
676				(map->flags & GNTMAP_readonly))
677			goto out_unlock_put;
678	} else {
679		map->flags = GNTMAP_host_map;
680		if (!(vma->vm_flags & VM_WRITE))
681			map->flags |= GNTMAP_readonly;
682	}
683
684	spin_unlock(&priv->lock);
685
686	if (use_ptemod) {
 
687		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
688					  vma->vm_end - vma->vm_start,
689					  find_grant_ptes, map);
690		if (err) {
691			printk(KERN_WARNING "find_grant_ptes() failure.\n");
692			goto out_put_map;
693		}
694	}
695
696	err = map_grant_pages(map);
697	if (err)
698		goto out_put_map;
699
700	if (!use_ptemod) {
701		for (i = 0; i < count; i++) {
702			err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
703				map->pages[i]);
704			if (err)
705				goto out_put_map;
706		}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
707	}
708
709	return 0;
710
711unlock_out:
712	spin_unlock(&priv->lock);
713	return err;
714
715out_unlock_put:
716	spin_unlock(&priv->lock);
717out_put_map:
718	if (use_ptemod)
719		map->vma = NULL;
720	gntdev_put_map(map);
 
 
721	return err;
722}
723
724static const struct file_operations gntdev_fops = {
725	.owner = THIS_MODULE,
726	.open = gntdev_open,
727	.release = gntdev_release,
728	.mmap = gntdev_mmap,
729	.unlocked_ioctl = gntdev_ioctl
730};
731
732static struct miscdevice gntdev_miscdev = {
733	.minor        = MISC_DYNAMIC_MINOR,
734	.name         = "xen/gntdev",
735	.fops         = &gntdev_fops,
736};
737
738/* ------------------------------------------------------------------ */
739
740static int __init gntdev_init(void)
741{
742	int err;
743
744	if (!xen_domain())
745		return -ENODEV;
746
747	use_ptemod = xen_pv_domain();
748
749	err = misc_register(&gntdev_miscdev);
750	if (err != 0) {
751		printk(KERN_ERR "Could not register gntdev device\n");
752		return err;
753	}
754	return 0;
755}
756
757static void __exit gntdev_exit(void)
758{
759	misc_deregister(&gntdev_miscdev);
760}
761
762module_init(gntdev_init);
763module_exit(gntdev_exit);
764
765/* ------------------------------------------------------------------ */
v4.17
   1/******************************************************************************
   2 * gntdev.c
   3 *
   4 * Device for accessing (in user-space) pages that have been granted by other
   5 * domains.
   6 *
   7 * Copyright (c) 2006-2007, D G Murray.
   8 *           (c) 2009 Gerd Hoffmann <kraxel@redhat.com>
   9 *
  10 * This program is distributed in the hope that it will be useful,
  11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  13 * GNU General Public License for more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program; if not, write to the Free Software
  17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  18 */
  19
  20#undef DEBUG
  21
  22#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  23
  24#include <linux/module.h>
  25#include <linux/kernel.h>
  26#include <linux/init.h>
  27#include <linux/miscdevice.h>
  28#include <linux/fs.h>
  29#include <linux/mm.h>
  30#include <linux/mman.h>
  31#include <linux/mmu_notifier.h>
  32#include <linux/types.h>
  33#include <linux/uaccess.h>
  34#include <linux/sched.h>
  35#include <linux/sched/mm.h>
  36#include <linux/spinlock.h>
  37#include <linux/slab.h>
  38#include <linux/highmem.h>
  39#include <linux/refcount.h>
  40
  41#include <xen/xen.h>
  42#include <xen/grant_table.h>
  43#include <xen/balloon.h>
  44#include <xen/gntdev.h>
  45#include <xen/events.h>
  46#include <xen/page.h>
  47#include <asm/xen/hypervisor.h>
  48#include <asm/xen/hypercall.h>
 
  49
  50MODULE_LICENSE("GPL");
  51MODULE_AUTHOR("Derek G. Murray <Derek.Murray@cl.cam.ac.uk>, "
  52	      "Gerd Hoffmann <kraxel@redhat.com>");
  53MODULE_DESCRIPTION("User-space granted page access driver");
  54
  55static int limit = 1024*1024;
  56module_param(limit, int, 0644);
  57MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by "
  58		"the gntdev device");
  59
  60static atomic_t pages_mapped = ATOMIC_INIT(0);
  61
  62static int use_ptemod;
  63#define populate_freeable_maps use_ptemod
  64
  65struct gntdev_priv {
  66	/* maps with visible offsets in the file descriptor */
  67	struct list_head maps;
  68	/* maps that are not visible; will be freed on munmap.
  69	 * Only populated if populate_freeable_maps == 1 */
  70	struct list_head freeable_maps;
  71	/* lock protects maps and freeable_maps */
  72	struct mutex lock;
  73	struct mm_struct *mm;
  74	struct mmu_notifier mn;
  75};
  76
  77struct unmap_notify {
  78	int flags;
  79	/* Address relative to the start of the grant_map */
  80	int addr;
  81	int event;
  82};
  83
  84struct grant_map {
  85	struct list_head next;
  86	struct vm_area_struct *vma;
  87	int index;
  88	int count;
  89	int flags;
  90	refcount_t users;
  91	struct unmap_notify notify;
  92	struct ioctl_gntdev_grant_ref *grants;
  93	struct gnttab_map_grant_ref   *map_ops;
  94	struct gnttab_unmap_grant_ref *unmap_ops;
  95	struct gnttab_map_grant_ref   *kmap_ops;
  96	struct gnttab_unmap_grant_ref *kunmap_ops;
  97	struct page **pages;
  98	unsigned long pages_vm_start;
  99};
 100
 101static int unmap_grant_pages(struct grant_map *map, int offset, int pages);
 102
 103/* ------------------------------------------------------------------ */
 104
 105static void gntdev_print_maps(struct gntdev_priv *priv,
 106			      char *text, int text_index)
 107{
 108#ifdef DEBUG
 109	struct grant_map *map;
 110
 111	pr_debug("%s: maps list (priv %p)\n", __func__, priv);
 112	list_for_each_entry(map, &priv->maps, next)
 113		pr_debug("  index %2d, count %2d %s\n",
 114		       map->index, map->count,
 115		       map->index == text_index && text ? text : "");
 116#endif
 117}
 118
 119static void gntdev_free_map(struct grant_map *map)
 120{
 121	if (map == NULL)
 122		return;
 123
 124	if (map->pages)
 125		gnttab_free_pages(map->count, map->pages);
 126	kfree(map->pages);
 127	kfree(map->grants);
 128	kfree(map->map_ops);
 129	kfree(map->unmap_ops);
 130	kfree(map->kmap_ops);
 131	kfree(map->kunmap_ops);
 132	kfree(map);
 133}
 134
 135static struct grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count)
 136{
 137	struct grant_map *add;
 138	int i;
 139
 140	add = kzalloc(sizeof(struct grant_map), GFP_KERNEL);
 141	if (NULL == add)
 142		return NULL;
 143
 144	add->grants    = kcalloc(count, sizeof(add->grants[0]), GFP_KERNEL);
 145	add->map_ops   = kcalloc(count, sizeof(add->map_ops[0]), GFP_KERNEL);
 146	add->unmap_ops = kcalloc(count, sizeof(add->unmap_ops[0]), GFP_KERNEL);
 147	add->kmap_ops  = kcalloc(count, sizeof(add->kmap_ops[0]), GFP_KERNEL);
 148	add->kunmap_ops = kcalloc(count, sizeof(add->kunmap_ops[0]), GFP_KERNEL);
 149	add->pages     = kcalloc(count, sizeof(add->pages[0]), GFP_KERNEL);
 150	if (NULL == add->grants    ||
 151	    NULL == add->map_ops   ||
 152	    NULL == add->unmap_ops ||
 153	    NULL == add->kmap_ops  ||
 154	    NULL == add->kunmap_ops ||
 155	    NULL == add->pages)
 156		goto err;
 157
 158	if (gnttab_alloc_pages(count, add->pages))
 159		goto err;
 160
 161	for (i = 0; i < count; i++) {
 162		add->map_ops[i].handle = -1;
 163		add->unmap_ops[i].handle = -1;
 164		add->kmap_ops[i].handle = -1;
 165		add->kunmap_ops[i].handle = -1;
 166	}
 167
 168	add->index = 0;
 169	add->count = count;
 170	refcount_set(&add->users, 1);
 171
 172	return add;
 173
 174err:
 175	gntdev_free_map(add);
 
 
 
 
 176	return NULL;
 177}
 178
 179static void gntdev_add_map(struct gntdev_priv *priv, struct grant_map *add)
 180{
 181	struct grant_map *map;
 182
 183	list_for_each_entry(map, &priv->maps, next) {
 184		if (add->index + add->count < map->index) {
 185			list_add_tail(&add->next, &map->next);
 186			goto done;
 187		}
 188		add->index = map->index + map->count;
 189	}
 190	list_add_tail(&add->next, &priv->maps);
 191
 192done:
 193	gntdev_print_maps(priv, "[new]", add->index);
 194}
 195
 196static struct grant_map *gntdev_find_map_index(struct gntdev_priv *priv,
 197		int index, int count)
 198{
 199	struct grant_map *map;
 200
 201	list_for_each_entry(map, &priv->maps, next) {
 202		if (map->index != index)
 203			continue;
 204		if (count && map->count != count)
 205			continue;
 206		return map;
 207	}
 208	return NULL;
 209}
 210
 211static void gntdev_put_map(struct gntdev_priv *priv, struct grant_map *map)
 212{
 213	if (!map)
 214		return;
 215
 216	if (!refcount_dec_and_test(&map->users))
 217		return;
 218
 219	atomic_sub(map->count, &pages_mapped);
 220
 221	if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) {
 222		notify_remote_via_evtchn(map->notify.event);
 223		evtchn_put(map->notify.event);
 224	}
 225
 226	if (populate_freeable_maps && priv) {
 227		mutex_lock(&priv->lock);
 228		list_del(&map->next);
 229		mutex_unlock(&priv->lock);
 
 230	}
 231
 232	if (map->pages && !use_ptemod)
 233		unmap_grant_pages(map, 0, map->count);
 234	gntdev_free_map(map);
 
 235}
 236
 237/* ------------------------------------------------------------------ */
 238
 239static int find_grant_ptes(pte_t *pte, pgtable_t token,
 240		unsigned long addr, void *data)
 241{
 242	struct grant_map *map = data;
 243	unsigned int pgnr = (addr - map->vma->vm_start) >> PAGE_SHIFT;
 244	int flags = map->flags | GNTMAP_application_map | GNTMAP_contains_pte;
 245	u64 pte_maddr;
 246
 247	BUG_ON(pgnr >= map->count);
 248	pte_maddr = arbitrary_virt_to_machine(pte).maddr;
 249
 250	/*
 251	 * Set the PTE as special to force get_user_pages_fast() fall
 252	 * back to the slow path.  If this is not supported as part of
 253	 * the grant map, it will be done afterwards.
 254	 */
 255	if (xen_feature(XENFEAT_gnttab_map_avail_bits))
 256		flags |= (1 << _GNTMAP_guest_avail0);
 257
 258	gnttab_set_map_op(&map->map_ops[pgnr], pte_maddr, flags,
 259			  map->grants[pgnr].ref,
 260			  map->grants[pgnr].domid);
 261	gnttab_set_unmap_op(&map->unmap_ops[pgnr], pte_maddr, flags,
 262			    -1 /* handle */);
 263	return 0;
 264}
 265
 266#ifdef CONFIG_X86
 267static int set_grant_ptes_as_special(pte_t *pte, pgtable_t token,
 268				     unsigned long addr, void *data)
 269{
 270	set_pte_at(current->mm, addr, pte, pte_mkspecial(*pte));
 271	return 0;
 272}
 273#endif
 274
 275static int map_grant_pages(struct grant_map *map)
 276{
 277	int i, err = 0;
 278
 279	if (!use_ptemod) {
 280		/* Note: it could already be mapped */
 281		if (map->map_ops[0].handle != -1)
 282			return 0;
 283		for (i = 0; i < map->count; i++) {
 284			unsigned long addr = (unsigned long)
 285				pfn_to_kaddr(page_to_pfn(map->pages[i]));
 286			gnttab_set_map_op(&map->map_ops[i], addr, map->flags,
 287				map->grants[i].ref,
 288				map->grants[i].domid);
 289			gnttab_set_unmap_op(&map->unmap_ops[i], addr,
 290				map->flags, -1 /* handle */);
 291		}
 292	} else {
 293		/*
 294		 * Setup the map_ops corresponding to the pte entries pointing
 295		 * to the kernel linear addresses of the struct pages.
 296		 * These ptes are completely different from the user ptes dealt
 297		 * with find_grant_ptes.
 298		 */
 299		for (i = 0; i < map->count; i++) {
 300			unsigned long address = (unsigned long)
 301				pfn_to_kaddr(page_to_pfn(map->pages[i]));
 302			BUG_ON(PageHighMem(map->pages[i]));
 303
 304			gnttab_set_map_op(&map->kmap_ops[i], address,
 305				map->flags | GNTMAP_host_map,
 306				map->grants[i].ref,
 307				map->grants[i].domid);
 308			gnttab_set_unmap_op(&map->kunmap_ops[i], address,
 309				map->flags | GNTMAP_host_map, -1);
 310		}
 311	}
 312
 313	pr_debug("map %d+%d\n", map->index, map->count);
 314	err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
 315			map->pages, map->count);
 316	if (err)
 317		return err;
 318
 319	for (i = 0; i < map->count; i++) {
 320		if (map->map_ops[i].status) {
 321			err = -EINVAL;
 322			continue;
 
 
 
 323		}
 324
 325		map->unmap_ops[i].handle = map->map_ops[i].handle;
 326		if (use_ptemod)
 327			map->kunmap_ops[i].handle = map->kmap_ops[i].handle;
 328	}
 329	return err;
 330}
 331
 332static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
 333{
 334	int i, err = 0;
 335	struct gntab_unmap_queue_data unmap_data;
 336
 337	if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
 338		int pgno = (map->notify.addr >> PAGE_SHIFT);
 339		if (pgno >= offset && pgno < offset + pages) {
 340			/* No need for kmap, pages are in lowmem */
 341			uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno]));
 
 
 
 
 
 
 342			tmp[map->notify.addr & (PAGE_SIZE-1)] = 0;
 
 343			map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE;
 344		}
 345	}
 346
 347	unmap_data.unmap_ops = map->unmap_ops + offset;
 348	unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL;
 349	unmap_data.pages = map->pages + offset;
 350	unmap_data.count = pages;
 351
 352	err = gnttab_unmap_refs_sync(&unmap_data);
 353	if (err)
 354		return err;
 355
 356	for (i = 0; i < pages; i++) {
 357		if (map->unmap_ops[offset+i].status)
 358			err = -EINVAL;
 359		pr_debug("unmap handle=%d st=%d\n",
 360			map->unmap_ops[offset+i].handle,
 361			map->unmap_ops[offset+i].status);
 362		map->unmap_ops[offset+i].handle = -1;
 363	}
 364	return err;
 365}
 366
 367static int unmap_grant_pages(struct grant_map *map, int offset, int pages)
 368{
 369	int range, err = 0;
 370
 371	pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages);
 372
 373	/* It is possible the requested range will have a "hole" where we
 374	 * already unmapped some of the grants. Only unmap valid ranges.
 375	 */
 376	while (pages && !err) {
 377		while (pages && map->unmap_ops[offset].handle == -1) {
 378			offset++;
 379			pages--;
 380		}
 381		range = 0;
 382		while (range < pages) {
 383			if (map->unmap_ops[offset+range].handle == -1)
 
 384				break;
 
 385			range++;
 386		}
 387		err = __unmap_grant_pages(map, offset, range);
 388		offset += range;
 389		pages -= range;
 390	}
 391
 392	return err;
 393}
 394
 395/* ------------------------------------------------------------------ */
 396
 397static void gntdev_vma_open(struct vm_area_struct *vma)
 398{
 399	struct grant_map *map = vma->vm_private_data;
 400
 401	pr_debug("gntdev_vma_open %p\n", vma);
 402	refcount_inc(&map->users);
 403}
 404
 405static void gntdev_vma_close(struct vm_area_struct *vma)
 406{
 407	struct grant_map *map = vma->vm_private_data;
 408	struct file *file = vma->vm_file;
 409	struct gntdev_priv *priv = file->private_data;
 410
 411	pr_debug("gntdev_vma_close %p\n", vma);
 412	if (use_ptemod) {
 413		/* It is possible that an mmu notifier could be running
 414		 * concurrently, so take priv->lock to ensure that the vma won't
 415		 * vanishing during the unmap_grant_pages call, since we will
 416		 * spin here until that completes. Such a concurrent call will
 417		 * not do any unmapping, since that has been done prior to
 418		 * closing the vma, but it may still iterate the unmap_ops list.
 419		 */
 420		mutex_lock(&priv->lock);
 421		map->vma = NULL;
 422		mutex_unlock(&priv->lock);
 423	}
 424	vma->vm_private_data = NULL;
 425	gntdev_put_map(priv, map);
 426}
 427
 428static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
 429						 unsigned long addr)
 430{
 431	struct grant_map *map = vma->vm_private_data;
 432
 433	return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
 434}
 435
 436static const struct vm_operations_struct gntdev_vmops = {
 437	.open = gntdev_vma_open,
 438	.close = gntdev_vma_close,
 439	.find_special_page = gntdev_vma_find_special_page,
 440};
 441
 442/* ------------------------------------------------------------------ */
 443
 444static void unmap_if_in_range(struct grant_map *map,
 445			      unsigned long start, unsigned long end)
 446{
 447	unsigned long mstart, mend;
 448	int err;
 449
 450	if (!map->vma)
 451		return;
 452	if (map->vma->vm_start >= end)
 453		return;
 454	if (map->vma->vm_end <= start)
 455		return;
 456	mstart = max(start, map->vma->vm_start);
 457	mend   = min(end,   map->vma->vm_end);
 458	pr_debug("map %d+%d (%lx %lx), range %lx %lx, mrange %lx %lx\n",
 459			map->index, map->count,
 460			map->vma->vm_start, map->vma->vm_end,
 461			start, end, mstart, mend);
 462	err = unmap_grant_pages(map,
 463				(mstart - map->vma->vm_start) >> PAGE_SHIFT,
 464				(mend - mstart) >> PAGE_SHIFT);
 465	WARN_ON(err);
 466}
 467
 468static void mn_invl_range_start(struct mmu_notifier *mn,
 469				struct mm_struct *mm,
 470				unsigned long start, unsigned long end)
 471{
 472	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
 473	struct grant_map *map;
 
 
 474
 475	mutex_lock(&priv->lock);
 476	list_for_each_entry(map, &priv->maps, next) {
 477		unmap_if_in_range(map, start, end);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 478	}
 479	list_for_each_entry(map, &priv->freeable_maps, next) {
 480		unmap_if_in_range(map, start, end);
 481	}
 482	mutex_unlock(&priv->lock);
 
 
 
 
 483}
 484
 485static void mn_release(struct mmu_notifier *mn,
 486		       struct mm_struct *mm)
 487{
 488	struct gntdev_priv *priv = container_of(mn, struct gntdev_priv, mn);
 489	struct grant_map *map;
 490	int err;
 491
 492	mutex_lock(&priv->lock);
 493	list_for_each_entry(map, &priv->maps, next) {
 494		if (!map->vma)
 495			continue;
 496		pr_debug("map %d+%d (%lx %lx)\n",
 497				map->index, map->count,
 498				map->vma->vm_start, map->vma->vm_end);
 499		err = unmap_grant_pages(map, /* offset */ 0, map->count);
 500		WARN_ON(err);
 501	}
 502	list_for_each_entry(map, &priv->freeable_maps, next) {
 503		if (!map->vma)
 504			continue;
 505		pr_debug("map %d+%d (%lx %lx)\n",
 506				map->index, map->count,
 507				map->vma->vm_start, map->vma->vm_end);
 508		err = unmap_grant_pages(map, /* offset */ 0, map->count);
 509		WARN_ON(err);
 510	}
 511	mutex_unlock(&priv->lock);
 512}
 513
 514static const struct mmu_notifier_ops gntdev_mmu_ops = {
 515	.release                = mn_release,
 
 516	.invalidate_range_start = mn_invl_range_start,
 517};
 518
 519/* ------------------------------------------------------------------ */
 520
 521static int gntdev_open(struct inode *inode, struct file *flip)
 522{
 523	struct gntdev_priv *priv;
 524	int ret = 0;
 525
 526	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
 527	if (!priv)
 528		return -ENOMEM;
 529
 530	INIT_LIST_HEAD(&priv->maps);
 531	INIT_LIST_HEAD(&priv->freeable_maps);
 532	mutex_init(&priv->lock);
 533
 534	if (use_ptemod) {
 535		priv->mm = get_task_mm(current);
 536		if (!priv->mm) {
 537			kfree(priv);
 538			return -ENOMEM;
 539		}
 540		priv->mn.ops = &gntdev_mmu_ops;
 541		ret = mmu_notifier_register(&priv->mn, priv->mm);
 542		mmput(priv->mm);
 543	}
 544
 545	if (ret) {
 546		kfree(priv);
 547		return ret;
 548	}
 549
 550	flip->private_data = priv;
 551	pr_debug("priv %p\n", priv);
 552
 553	return 0;
 554}
 555
 556static int gntdev_release(struct inode *inode, struct file *flip)
 557{
 558	struct gntdev_priv *priv = flip->private_data;
 559	struct grant_map *map;
 560
 561	pr_debug("priv %p\n", priv);
 562
 563	mutex_lock(&priv->lock);
 564	while (!list_empty(&priv->maps)) {
 565		map = list_entry(priv->maps.next, struct grant_map, next);
 566		list_del(&map->next);
 567		gntdev_put_map(NULL /* already removed */, map);
 568	}
 569	WARN_ON(!list_empty(&priv->freeable_maps));
 570	mutex_unlock(&priv->lock);
 571
 572	if (use_ptemod)
 573		mmu_notifier_unregister(&priv->mn, priv->mm);
 574	kfree(priv);
 575	return 0;
 576}
 577
 578static long gntdev_ioctl_map_grant_ref(struct gntdev_priv *priv,
 579				       struct ioctl_gntdev_map_grant_ref __user *u)
 580{
 581	struct ioctl_gntdev_map_grant_ref op;
 582	struct grant_map *map;
 583	int err;
 584
 585	if (copy_from_user(&op, u, sizeof(op)) != 0)
 586		return -EFAULT;
 587	pr_debug("priv %p, add %d\n", priv, op.count);
 588	if (unlikely(op.count <= 0))
 589		return -EINVAL;
 590
 591	err = -ENOMEM;
 592	map = gntdev_alloc_map(priv, op.count);
 593	if (!map)
 594		return err;
 595
 596	if (unlikely(atomic_add_return(op.count, &pages_mapped) > limit)) {
 597		pr_debug("can't map: over limit\n");
 598		gntdev_put_map(NULL, map);
 599		return err;
 600	}
 601
 602	if (copy_from_user(map->grants, &u->refs,
 603			   sizeof(map->grants[0]) * op.count) != 0) {
 604		gntdev_put_map(NULL, map);
 605		return -EFAULT;
 606	}
 607
 608	mutex_lock(&priv->lock);
 609	gntdev_add_map(priv, map);
 610	op.index = map->index << PAGE_SHIFT;
 611	mutex_unlock(&priv->lock);
 612
 613	if (copy_to_user(u, &op, sizeof(op)) != 0)
 614		return -EFAULT;
 615
 616	return 0;
 617}
 618
 619static long gntdev_ioctl_unmap_grant_ref(struct gntdev_priv *priv,
 620					 struct ioctl_gntdev_unmap_grant_ref __user *u)
 621{
 622	struct ioctl_gntdev_unmap_grant_ref op;
 623	struct grant_map *map;
 624	int err = -ENOENT;
 625
 626	if (copy_from_user(&op, u, sizeof(op)) != 0)
 627		return -EFAULT;
 628	pr_debug("priv %p, del %d+%d\n", priv, (int)op.index, (int)op.count);
 629
 630	mutex_lock(&priv->lock);
 631	map = gntdev_find_map_index(priv, op.index >> PAGE_SHIFT, op.count);
 632	if (map) {
 633		list_del(&map->next);
 634		if (populate_freeable_maps)
 635			list_add_tail(&map->next, &priv->freeable_maps);
 636		err = 0;
 637	}
 638	mutex_unlock(&priv->lock);
 639	if (map)
 640		gntdev_put_map(priv, map);
 641	return err;
 642}
 643
 644static long gntdev_ioctl_get_offset_for_vaddr(struct gntdev_priv *priv,
 645					      struct ioctl_gntdev_get_offset_for_vaddr __user *u)
 646{
 647	struct ioctl_gntdev_get_offset_for_vaddr op;
 648	struct vm_area_struct *vma;
 649	struct grant_map *map;
 650	int rv = -EINVAL;
 651
 652	if (copy_from_user(&op, u, sizeof(op)) != 0)
 653		return -EFAULT;
 654	pr_debug("priv %p, offset for vaddr %lx\n", priv, (unsigned long)op.vaddr);
 655
 656	down_read(&current->mm->mmap_sem);
 657	vma = find_vma(current->mm, op.vaddr);
 658	if (!vma || vma->vm_ops != &gntdev_vmops)
 659		goto out_unlock;
 660
 661	map = vma->vm_private_data;
 662	if (!map)
 663		goto out_unlock;
 664
 665	op.offset = map->index << PAGE_SHIFT;
 666	op.count = map->count;
 667	rv = 0;
 668
 669 out_unlock:
 670	up_read(&current->mm->mmap_sem);
 671
 672	if (rv == 0 && copy_to_user(u, &op, sizeof(op)) != 0)
 673		return -EFAULT;
 674	return rv;
 675}
 676
 677static long gntdev_ioctl_notify(struct gntdev_priv *priv, void __user *u)
 678{
 679	struct ioctl_gntdev_unmap_notify op;
 680	struct grant_map *map;
 681	int rc;
 682	int out_flags;
 683	unsigned int out_event;
 684
 685	if (copy_from_user(&op, u, sizeof(op)))
 686		return -EFAULT;
 687
 688	if (op.action & ~(UNMAP_NOTIFY_CLEAR_BYTE|UNMAP_NOTIFY_SEND_EVENT))
 689		return -EINVAL;
 690
 691	/* We need to grab a reference to the event channel we are going to use
 692	 * to send the notify before releasing the reference we may already have
 693	 * (if someone has called this ioctl twice). This is required so that
 694	 * it is possible to change the clear_byte part of the notification
 695	 * without disturbing the event channel part, which may now be the last
 696	 * reference to that event channel.
 697	 */
 698	if (op.action & UNMAP_NOTIFY_SEND_EVENT) {
 699		if (evtchn_get(op.event_channel_port))
 700			return -EINVAL;
 701	}
 702
 703	out_flags = op.action;
 704	out_event = op.event_channel_port;
 705
 706	mutex_lock(&priv->lock);
 707
 708	list_for_each_entry(map, &priv->maps, next) {
 709		uint64_t begin = map->index << PAGE_SHIFT;
 710		uint64_t end = (map->index + map->count) << PAGE_SHIFT;
 711		if (op.index >= begin && op.index < end)
 712			goto found;
 713	}
 714	rc = -ENOENT;
 715	goto unlock_out;
 716
 717 found:
 718	if ((op.action & UNMAP_NOTIFY_CLEAR_BYTE) &&
 719			(map->flags & GNTMAP_readonly)) {
 720		rc = -EINVAL;
 721		goto unlock_out;
 722	}
 723
 724	out_flags = map->notify.flags;
 725	out_event = map->notify.event;
 726
 727	map->notify.flags = op.action;
 728	map->notify.addr = op.index - (map->index << PAGE_SHIFT);
 729	map->notify.event = op.event_channel_port;
 730
 731	rc = 0;
 732
 733 unlock_out:
 734	mutex_unlock(&priv->lock);
 735
 736	/* Drop the reference to the event channel we did not save in the map */
 737	if (out_flags & UNMAP_NOTIFY_SEND_EVENT)
 738		evtchn_put(out_event);
 739
 740	return rc;
 741}
 742
 743#define GNTDEV_COPY_BATCH 16
 744
 745struct gntdev_copy_batch {
 746	struct gnttab_copy ops[GNTDEV_COPY_BATCH];
 747	struct page *pages[GNTDEV_COPY_BATCH];
 748	s16 __user *status[GNTDEV_COPY_BATCH];
 749	unsigned int nr_ops;
 750	unsigned int nr_pages;
 751};
 752
 753static int gntdev_get_page(struct gntdev_copy_batch *batch, void __user *virt,
 754			   bool writeable, unsigned long *gfn)
 755{
 756	unsigned long addr = (unsigned long)virt;
 757	struct page *page;
 758	unsigned long xen_pfn;
 759	int ret;
 760
 761	ret = get_user_pages_fast(addr, 1, writeable, &page);
 762	if (ret < 0)
 763		return ret;
 764
 765	batch->pages[batch->nr_pages++] = page;
 766
 767	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(addr & ~PAGE_MASK);
 768	*gfn = pfn_to_gfn(xen_pfn);
 769
 770	return 0;
 771}
 772
 773static void gntdev_put_pages(struct gntdev_copy_batch *batch)
 774{
 775	unsigned int i;
 776
 777	for (i = 0; i < batch->nr_pages; i++)
 778		put_page(batch->pages[i]);
 779	batch->nr_pages = 0;
 780}
 781
 782static int gntdev_copy(struct gntdev_copy_batch *batch)
 783{
 784	unsigned int i;
 785
 786	gnttab_batch_copy(batch->ops, batch->nr_ops);
 787	gntdev_put_pages(batch);
 788
 789	/*
 790	 * For each completed op, update the status if the op failed
 791	 * and all previous ops for the segment were successful.
 792	 */
 793	for (i = 0; i < batch->nr_ops; i++) {
 794		s16 status = batch->ops[i].status;
 795		s16 old_status;
 796
 797		if (status == GNTST_okay)
 798			continue;
 799
 800		if (__get_user(old_status, batch->status[i]))
 801			return -EFAULT;
 802
 803		if (old_status != GNTST_okay)
 804			continue;
 805
 806		if (__put_user(status, batch->status[i]))
 807			return -EFAULT;
 808	}
 809
 810	batch->nr_ops = 0;
 811	return 0;
 812}
 813
 814static int gntdev_grant_copy_seg(struct gntdev_copy_batch *batch,
 815				 struct gntdev_grant_copy_segment *seg,
 816				 s16 __user *status)
 817{
 818	uint16_t copied = 0;
 819
 820	/*
 821	 * Disallow local -> local copies since there is only space in
 822	 * batch->pages for one page per-op and this would be a very
 823	 * expensive memcpy().
 824	 */
 825	if (!(seg->flags & (GNTCOPY_source_gref | GNTCOPY_dest_gref)))
 826		return -EINVAL;
 827
 828	/* Can't cross page if source/dest is a grant ref. */
 829	if (seg->flags & GNTCOPY_source_gref) {
 830		if (seg->source.foreign.offset + seg->len > XEN_PAGE_SIZE)
 831			return -EINVAL;
 832	}
 833	if (seg->flags & GNTCOPY_dest_gref) {
 834		if (seg->dest.foreign.offset + seg->len > XEN_PAGE_SIZE)
 835			return -EINVAL;
 836	}
 837
 838	if (put_user(GNTST_okay, status))
 839		return -EFAULT;
 840
 841	while (copied < seg->len) {
 842		struct gnttab_copy *op;
 843		void __user *virt;
 844		size_t len, off;
 845		unsigned long gfn;
 846		int ret;
 847
 848		if (batch->nr_ops >= GNTDEV_COPY_BATCH) {
 849			ret = gntdev_copy(batch);
 850			if (ret < 0)
 851				return ret;
 852		}
 853
 854		len = seg->len - copied;
 855
 856		op = &batch->ops[batch->nr_ops];
 857		op->flags = 0;
 858
 859		if (seg->flags & GNTCOPY_source_gref) {
 860			op->source.u.ref = seg->source.foreign.ref;
 861			op->source.domid = seg->source.foreign.domid;
 862			op->source.offset = seg->source.foreign.offset + copied;
 863			op->flags |= GNTCOPY_source_gref;
 864		} else {
 865			virt = seg->source.virt + copied;
 866			off = (unsigned long)virt & ~XEN_PAGE_MASK;
 867			len = min(len, (size_t)XEN_PAGE_SIZE - off);
 868
 869			ret = gntdev_get_page(batch, virt, false, &gfn);
 870			if (ret < 0)
 871				return ret;
 872
 873			op->source.u.gmfn = gfn;
 874			op->source.domid = DOMID_SELF;
 875			op->source.offset = off;
 876		}
 877
 878		if (seg->flags & GNTCOPY_dest_gref) {
 879			op->dest.u.ref = seg->dest.foreign.ref;
 880			op->dest.domid = seg->dest.foreign.domid;
 881			op->dest.offset = seg->dest.foreign.offset + copied;
 882			op->flags |= GNTCOPY_dest_gref;
 883		} else {
 884			virt = seg->dest.virt + copied;
 885			off = (unsigned long)virt & ~XEN_PAGE_MASK;
 886			len = min(len, (size_t)XEN_PAGE_SIZE - off);
 887
 888			ret = gntdev_get_page(batch, virt, true, &gfn);
 889			if (ret < 0)
 890				return ret;
 891
 892			op->dest.u.gmfn = gfn;
 893			op->dest.domid = DOMID_SELF;
 894			op->dest.offset = off;
 895		}
 896
 897		op->len = len;
 898		copied += len;
 899
 900		batch->status[batch->nr_ops] = status;
 901		batch->nr_ops++;
 902	}
 903
 904	return 0;
 905}
 906
 907static long gntdev_ioctl_grant_copy(struct gntdev_priv *priv, void __user *u)
 908{
 909	struct ioctl_gntdev_grant_copy copy;
 910	struct gntdev_copy_batch batch;
 911	unsigned int i;
 912	int ret = 0;
 913
 914	if (copy_from_user(&copy, u, sizeof(copy)))
 915		return -EFAULT;
 916
 917	batch.nr_ops = 0;
 918	batch.nr_pages = 0;
 919
 920	for (i = 0; i < copy.count; i++) {
 921		struct gntdev_grant_copy_segment seg;
 922
 923		if (copy_from_user(&seg, &copy.segments[i], sizeof(seg))) {
 924			ret = -EFAULT;
 925			goto out;
 926		}
 927
 928		ret = gntdev_grant_copy_seg(&batch, &seg, &copy.segments[i].status);
 929		if (ret < 0)
 930			goto out;
 931
 932		cond_resched();
 933	}
 934	if (batch.nr_ops)
 935		ret = gntdev_copy(&batch);
 936	return ret;
 937
 938  out:
 939	gntdev_put_pages(&batch);
 940	return ret;
 941}
 942
 943static long gntdev_ioctl(struct file *flip,
 944			 unsigned int cmd, unsigned long arg)
 945{
 946	struct gntdev_priv *priv = flip->private_data;
 947	void __user *ptr = (void __user *)arg;
 948
 949	switch (cmd) {
 950	case IOCTL_GNTDEV_MAP_GRANT_REF:
 951		return gntdev_ioctl_map_grant_ref(priv, ptr);
 952
 953	case IOCTL_GNTDEV_UNMAP_GRANT_REF:
 954		return gntdev_ioctl_unmap_grant_ref(priv, ptr);
 955
 956	case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
 957		return gntdev_ioctl_get_offset_for_vaddr(priv, ptr);
 958
 959	case IOCTL_GNTDEV_SET_UNMAP_NOTIFY:
 960		return gntdev_ioctl_notify(priv, ptr);
 961
 962	case IOCTL_GNTDEV_GRANT_COPY:
 963		return gntdev_ioctl_grant_copy(priv, ptr);
 964
 965	default:
 966		pr_debug("priv %p, unknown cmd %x\n", priv, cmd);
 967		return -ENOIOCTLCMD;
 968	}
 969
 970	return 0;
 971}
 972
 973static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma)
 974{
 975	struct gntdev_priv *priv = flip->private_data;
 976	int index = vma->vm_pgoff;
 977	int count = vma_pages(vma);
 978	struct grant_map *map;
 979	int i, err = -EINVAL;
 980
 981	if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED))
 982		return -EINVAL;
 983
 984	pr_debug("map %d+%d at %lx (pgoff %lx)\n",
 985			index, count, vma->vm_start, vma->vm_pgoff);
 986
 987	mutex_lock(&priv->lock);
 988	map = gntdev_find_map_index(priv, index, count);
 989	if (!map)
 990		goto unlock_out;
 991	if (use_ptemod && map->vma)
 992		goto unlock_out;
 993	if (use_ptemod && priv->mm != vma->vm_mm) {
 994		pr_warn("Huh? Other mm?\n");
 995		goto unlock_out;
 996	}
 997
 998	refcount_inc(&map->users);
 999
1000	vma->vm_ops = &gntdev_vmops;
1001
1002	vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP | VM_MIXEDMAP;
1003
1004	if (use_ptemod)
1005		vma->vm_flags |= VM_DONTCOPY;
1006
1007	vma->vm_private_data = map;
1008
1009	if (use_ptemod)
1010		map->vma = vma;
1011
1012	if (map->flags) {
1013		if ((vma->vm_flags & VM_WRITE) &&
1014				(map->flags & GNTMAP_readonly))
1015			goto out_unlock_put;
1016	} else {
1017		map->flags = GNTMAP_host_map;
1018		if (!(vma->vm_flags & VM_WRITE))
1019			map->flags |= GNTMAP_readonly;
1020	}
1021
1022	mutex_unlock(&priv->lock);
1023
1024	if (use_ptemod) {
1025		map->pages_vm_start = vma->vm_start;
1026		err = apply_to_page_range(vma->vm_mm, vma->vm_start,
1027					  vma->vm_end - vma->vm_start,
1028					  find_grant_ptes, map);
1029		if (err) {
1030			pr_warn("find_grant_ptes() failure.\n");
1031			goto out_put_map;
1032		}
1033	}
1034
1035	err = map_grant_pages(map);
1036	if (err)
1037		goto out_put_map;
1038
1039	if (!use_ptemod) {
1040		for (i = 0; i < count; i++) {
1041			err = vm_insert_page(vma, vma->vm_start + i*PAGE_SIZE,
1042				map->pages[i]);
1043			if (err)
1044				goto out_put_map;
1045		}
1046	} else {
1047#ifdef CONFIG_X86
1048		/*
1049		 * If the PTEs were not made special by the grant map
1050		 * hypercall, do so here.
1051		 *
1052		 * This is racy since the mapping is already visible
1053		 * to userspace but userspace should be well-behaved
1054		 * enough to not touch it until the mmap() call
1055		 * returns.
1056		 */
1057		if (!xen_feature(XENFEAT_gnttab_map_avail_bits)) {
1058			apply_to_page_range(vma->vm_mm, vma->vm_start,
1059					    vma->vm_end - vma->vm_start,
1060					    set_grant_ptes_as_special, NULL);
1061		}
1062#endif
1063	}
1064
1065	return 0;
1066
1067unlock_out:
1068	mutex_unlock(&priv->lock);
1069	return err;
1070
1071out_unlock_put:
1072	mutex_unlock(&priv->lock);
1073out_put_map:
1074	if (use_ptemod) {
1075		map->vma = NULL;
1076		unmap_grant_pages(map, 0, map->count);
1077	}
1078	gntdev_put_map(priv, map);
1079	return err;
1080}
1081
1082static const struct file_operations gntdev_fops = {
1083	.owner = THIS_MODULE,
1084	.open = gntdev_open,
1085	.release = gntdev_release,
1086	.mmap = gntdev_mmap,
1087	.unlocked_ioctl = gntdev_ioctl
1088};
1089
1090static struct miscdevice gntdev_miscdev = {
1091	.minor        = MISC_DYNAMIC_MINOR,
1092	.name         = "xen/gntdev",
1093	.fops         = &gntdev_fops,
1094};
1095
1096/* ------------------------------------------------------------------ */
1097
1098static int __init gntdev_init(void)
1099{
1100	int err;
1101
1102	if (!xen_domain())
1103		return -ENODEV;
1104
1105	use_ptemod = !xen_feature(XENFEAT_auto_translated_physmap);
1106
1107	err = misc_register(&gntdev_miscdev);
1108	if (err != 0) {
1109		pr_err("Could not register gntdev device\n");
1110		return err;
1111	}
1112	return 0;
1113}
1114
1115static void __exit gntdev_exit(void)
1116{
1117	misc_deregister(&gntdev_miscdev);
1118}
1119
1120module_init(gntdev_init);
1121module_exit(gntdev_exit);
1122
1123/* ------------------------------------------------------------------ */