Linux Audio

Check our new training course

Loading...
v3.1
  1/******************************************************************************
  2 * grant_table.c
  3 *
  4 * Granting foreign access to our memory reservation.
  5 *
  6 * Copyright (c) 2005-2006, Christopher Clark
  7 * Copyright (c) 2004-2005, K A Fraser
  8 *
  9 * This program is free software; you can redistribute it and/or
 10 * modify it under the terms of the GNU General Public License version 2
 11 * as published by the Free Software Foundation; or, when distributed
 12 * separately from the Linux kernel or incorporated into other
 13 * software packages, subject to the following license:
 14 *
 15 * Permission is hereby granted, free of charge, to any person obtaining a copy
 16 * of this source file (the "Software"), to deal in the Software without
 17 * restriction, including without limitation the rights to use, copy, modify,
 18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 19 * and to permit persons to whom the Software is furnished to do so, subject to
 20 * the following conditions:
 21 *
 22 * The above copyright notice and this permission notice shall be included in
 23 * all copies or substantial portions of the Software.
 24 *
 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 31 * IN THE SOFTWARE.
 32 */
 33
 34#include <linux/module.h>
 35#include <linux/sched.h>
 36#include <linux/mm.h>
 37#include <linux/slab.h>
 38#include <linux/vmalloc.h>
 39#include <linux/uaccess.h>
 40#include <linux/io.h>
 41
 42#include <xen/xen.h>
 43#include <xen/interface/xen.h>
 44#include <xen/page.h>
 45#include <xen/grant_table.h>
 46#include <xen/interface/memory.h>
 47#include <asm/xen/hypercall.h>
 48
 49#include <asm/pgtable.h>
 50#include <asm/sync_bitops.h>
 51
 52
 53/* External tools reserve first few grant table entries. */
 54#define NR_RESERVED_ENTRIES 8
 55#define GNTTAB_LIST_END 0xffffffff
 56#define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(struct grant_entry))
 57
 58static grant_ref_t **gnttab_list;
 59static unsigned int nr_grant_frames;
 60static unsigned int boot_max_nr_grant_frames;
 61static int gnttab_free_count;
 62static grant_ref_t gnttab_free_head;
 63static DEFINE_SPINLOCK(gnttab_list_lock);
 64unsigned long xen_hvm_resume_frames;
 65EXPORT_SYMBOL_GPL(xen_hvm_resume_frames);
 66
 67static struct grant_entry *shared;
 68
 69static struct gnttab_free_callback *gnttab_free_callback_list;
 70
 71static int gnttab_expand(unsigned int req_entries);
 72
 73#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
 74
 75static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
 76{
 77	return &gnttab_list[(entry) / RPP][(entry) % RPP];
 78}
 79/* This can be used as an l-value */
 80#define gnttab_entry(entry) (*__gnttab_entry(entry))
 81
 82static int get_free_entries(unsigned count)
 83{
 84	unsigned long flags;
 85	int ref, rc = 0;
 86	grant_ref_t head;
 87
 88	spin_lock_irqsave(&gnttab_list_lock, flags);
 89
 90	if ((gnttab_free_count < count) &&
 91	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
 92		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 93		return rc;
 94	}
 95
 96	ref = head = gnttab_free_head;
 97	gnttab_free_count -= count;
 98	while (count-- > 1)
 99		head = gnttab_entry(head);
100	gnttab_free_head = gnttab_entry(head);
101	gnttab_entry(head) = GNTTAB_LIST_END;
102
103	spin_unlock_irqrestore(&gnttab_list_lock, flags);
104
105	return ref;
106}
107
108static void do_free_callbacks(void)
109{
110	struct gnttab_free_callback *callback, *next;
111
112	callback = gnttab_free_callback_list;
113	gnttab_free_callback_list = NULL;
114
115	while (callback != NULL) {
116		next = callback->next;
117		if (gnttab_free_count >= callback->count) {
118			callback->next = NULL;
119			callback->fn(callback->arg);
120		} else {
121			callback->next = gnttab_free_callback_list;
122			gnttab_free_callback_list = callback;
123		}
124		callback = next;
125	}
126}
127
128static inline void check_free_callbacks(void)
129{
130	if (unlikely(gnttab_free_callback_list))
131		do_free_callbacks();
132}
133
134static void put_free_entry(grant_ref_t ref)
135{
136	unsigned long flags;
137	spin_lock_irqsave(&gnttab_list_lock, flags);
138	gnttab_entry(ref) = gnttab_free_head;
139	gnttab_free_head = ref;
140	gnttab_free_count++;
141	check_free_callbacks();
142	spin_unlock_irqrestore(&gnttab_list_lock, flags);
143}
144
145static void update_grant_entry(grant_ref_t ref, domid_t domid,
146			       unsigned long frame, unsigned flags)
147{
148	/*
149	 * Introducing a valid entry into the grant table:
150	 *  1. Write ent->domid.
151	 *  2. Write ent->frame:
152	 *      GTF_permit_access:   Frame to which access is permitted.
153	 *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
154	 *                           frame, or zero if none.
155	 *  3. Write memory barrier (WMB).
156	 *  4. Write ent->flags, inc. valid type.
157	 */
158	shared[ref].frame = frame;
159	shared[ref].domid = domid;
160	wmb();
161	shared[ref].flags = flags;
162}
163
164/*
165 * Public grant-issuing interface functions
166 */
167void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
168				     unsigned long frame, int readonly)
169{
170	update_grant_entry(ref, domid, frame,
171			   GTF_permit_access | (readonly ? GTF_readonly : 0));
172}
173EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
174
175int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
176				int readonly)
177{
178	int ref;
179
180	ref = get_free_entries(1);
181	if (unlikely(ref < 0))
182		return -ENOSPC;
183
184	gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
185
186	return ref;
187}
188EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
189
190int gnttab_query_foreign_access(grant_ref_t ref)
191{
192	u16 nflags;
193
194	nflags = shared[ref].flags;
195
196	return (nflags & (GTF_reading|GTF_writing));
197}
198EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
199
200int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
201{
202	u16 flags, nflags;
203
204	nflags = shared[ref].flags;
205	do {
206		flags = nflags;
207		if (flags & (GTF_reading|GTF_writing)) {
208			printk(KERN_ALERT "WARNING: g.e. still in use!\n");
209			return 0;
210		}
211	} while ((nflags = sync_cmpxchg(&shared[ref].flags, flags, 0)) != flags);
212
213	return 1;
214}
215EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
216
217void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
218			       unsigned long page)
219{
220	if (gnttab_end_foreign_access_ref(ref, readonly)) {
221		put_free_entry(ref);
222		if (page != 0)
223			free_page(page);
224	} else {
225		/* XXX This needs to be fixed so that the ref and page are
226		   placed on a list to be freed up later. */
227		printk(KERN_WARNING
228		       "WARNING: leaking g.e. and page still in use!\n");
229	}
230}
231EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
232
233int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
234{
235	int ref;
236
237	ref = get_free_entries(1);
238	if (unlikely(ref < 0))
239		return -ENOSPC;
240	gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
241
242	return ref;
243}
244EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
245
246void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
247				       unsigned long pfn)
248{
249	update_grant_entry(ref, domid, pfn, GTF_accept_transfer);
250}
251EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
252
253unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
254{
255	unsigned long frame;
256	u16           flags;
257
258	/*
259	 * If a transfer is not even yet started, try to reclaim the grant
260	 * reference and return failure (== 0).
261	 */
262	while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
263		if (sync_cmpxchg(&shared[ref].flags, flags, 0) == flags)
264			return 0;
265		cpu_relax();
266	}
267
268	/* If a transfer is in progress then wait until it is completed. */
269	while (!(flags & GTF_transfer_completed)) {
270		flags = shared[ref].flags;
271		cpu_relax();
272	}
273
274	rmb();	/* Read the frame number /after/ reading completion status. */
275	frame = shared[ref].frame;
276	BUG_ON(frame == 0);
277
278	return frame;
279}
280EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
281
282unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
283{
284	unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
285	put_free_entry(ref);
286	return frame;
287}
288EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
289
290void gnttab_free_grant_reference(grant_ref_t ref)
291{
292	put_free_entry(ref);
293}
294EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
295
296void gnttab_free_grant_references(grant_ref_t head)
297{
298	grant_ref_t ref;
299	unsigned long flags;
300	int count = 1;
301	if (head == GNTTAB_LIST_END)
302		return;
303	spin_lock_irqsave(&gnttab_list_lock, flags);
304	ref = head;
305	while (gnttab_entry(ref) != GNTTAB_LIST_END) {
306		ref = gnttab_entry(ref);
307		count++;
308	}
309	gnttab_entry(ref) = gnttab_free_head;
310	gnttab_free_head = head;
311	gnttab_free_count += count;
312	check_free_callbacks();
313	spin_unlock_irqrestore(&gnttab_list_lock, flags);
314}
315EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
316
317int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
318{
319	int h = get_free_entries(count);
320
321	if (h < 0)
322		return -ENOSPC;
323
324	*head = h;
325
326	return 0;
327}
328EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
329
330int gnttab_empty_grant_references(const grant_ref_t *private_head)
331{
332	return (*private_head == GNTTAB_LIST_END);
333}
334EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
335
336int gnttab_claim_grant_reference(grant_ref_t *private_head)
337{
338	grant_ref_t g = *private_head;
339	if (unlikely(g == GNTTAB_LIST_END))
340		return -ENOSPC;
341	*private_head = gnttab_entry(g);
342	return g;
343}
344EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
345
346void gnttab_release_grant_reference(grant_ref_t *private_head,
347				    grant_ref_t release)
348{
349	gnttab_entry(release) = *private_head;
350	*private_head = release;
351}
352EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
353
354void gnttab_request_free_callback(struct gnttab_free_callback *callback,
355				  void (*fn)(void *), void *arg, u16 count)
356{
357	unsigned long flags;
358	spin_lock_irqsave(&gnttab_list_lock, flags);
359	if (callback->next)
360		goto out;
361	callback->fn = fn;
362	callback->arg = arg;
363	callback->count = count;
364	callback->next = gnttab_free_callback_list;
365	gnttab_free_callback_list = callback;
366	check_free_callbacks();
367out:
368	spin_unlock_irqrestore(&gnttab_list_lock, flags);
369}
370EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
371
372void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
373{
374	struct gnttab_free_callback **pcb;
375	unsigned long flags;
376
377	spin_lock_irqsave(&gnttab_list_lock, flags);
378	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
379		if (*pcb == callback) {
380			*pcb = callback->next;
381			break;
382		}
383	}
384	spin_unlock_irqrestore(&gnttab_list_lock, flags);
385}
386EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
387
388static int grow_gnttab_list(unsigned int more_frames)
389{
390	unsigned int new_nr_grant_frames, extra_entries, i;
391	unsigned int nr_glist_frames, new_nr_glist_frames;
392
393	new_nr_grant_frames = nr_grant_frames + more_frames;
394	extra_entries       = more_frames * GREFS_PER_GRANT_FRAME;
395
396	nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
397	new_nr_glist_frames =
398		(new_nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
399	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
400		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
401		if (!gnttab_list[i])
402			goto grow_nomem;
403	}
404
405
406	for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames;
407	     i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
408		gnttab_entry(i) = i + 1;
409
410	gnttab_entry(i) = gnttab_free_head;
411	gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames;
412	gnttab_free_count += extra_entries;
413
414	nr_grant_frames = new_nr_grant_frames;
415
416	check_free_callbacks();
417
418	return 0;
419
420grow_nomem:
421	for ( ; i >= nr_glist_frames; i--)
422		free_page((unsigned long) gnttab_list[i]);
423	return -ENOMEM;
424}
425
426static unsigned int __max_nr_grant_frames(void)
427{
428	struct gnttab_query_size query;
429	int rc;
430
431	query.dom = DOMID_SELF;
432
433	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
434	if ((rc < 0) || (query.status != GNTST_okay))
435		return 4; /* Legacy max supported number of frames */
436
437	return query.max_nr_frames;
438}
439
440unsigned int gnttab_max_grant_frames(void)
441{
442	unsigned int xen_max = __max_nr_grant_frames();
443
444	if (xen_max > boot_max_nr_grant_frames)
445		return boot_max_nr_grant_frames;
446	return xen_max;
447}
448EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
449
450int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
451		    struct page **pages, unsigned int count)
452{
453	int i, ret;
454	pte_t *pte;
455	unsigned long mfn;
456
457	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
458	if (ret)
459		return ret;
460
461	if (xen_feature(XENFEAT_auto_translated_physmap))
462		return ret;
463
464	for (i = 0; i < count; i++) {
465		/* Do not add to override if the map failed. */
466		if (map_ops[i].status)
467			continue;
468
469		if (map_ops[i].flags & GNTMAP_contains_pte) {
470			pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
471				(map_ops[i].host_addr & ~PAGE_MASK));
472			mfn = pte_mfn(*pte);
473		} else {
474			/* If you really wanted to do this:
475			 * mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
476			 *
477			 * The reason we do not implement it is b/c on the
478			 * unmap path (gnttab_unmap_refs) we have no means of
479			 * checking whether the page is !GNTMAP_contains_pte.
480			 *
481			 * That is without some extra data-structure to carry
482			 * the struct page, bool clear_pte, and list_head next
483			 * tuples and deal with allocation/delallocation, etc.
484			 *
485			 * The users of this API set the GNTMAP_contains_pte
486			 * flag so lets just return not supported until it
487			 * becomes neccessary to implement.
488			 */
489			return -EOPNOTSUPP;
490		}
491		ret = m2p_add_override(mfn, pages[i],
492				       map_ops[i].flags & GNTMAP_contains_pte);
493		if (ret)
494			return ret;
495	}
496
497	return ret;
498}
499EXPORT_SYMBOL_GPL(gnttab_map_refs);
500
501int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
502		struct page **pages, unsigned int count)
503{
504	int i, ret;
505
506	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
507	if (ret)
508		return ret;
509
510	if (xen_feature(XENFEAT_auto_translated_physmap))
511		return ret;
512
513	for (i = 0; i < count; i++) {
514		ret = m2p_remove_override(pages[i], true /* clear the PTE */);
515		if (ret)
516			return ret;
517	}
518
519	return ret;
520}
521EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
522
523static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
524{
525	struct gnttab_setup_table setup;
526	unsigned long *frames;
527	unsigned int nr_gframes = end_idx + 1;
528	int rc;
529
530	if (xen_hvm_domain()) {
531		struct xen_add_to_physmap xatp;
532		unsigned int i = end_idx;
533		rc = 0;
534		/*
535		 * Loop backwards, so that the first hypercall has the largest
536		 * index, ensuring that the table will grow only once.
537		 */
538		do {
539			xatp.domid = DOMID_SELF;
540			xatp.idx = i;
541			xatp.space = XENMAPSPACE_grant_table;
542			xatp.gpfn = (xen_hvm_resume_frames >> PAGE_SHIFT) + i;
543			rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
544			if (rc != 0) {
545				printk(KERN_WARNING
546						"grant table add_to_physmap failed, err=%d\n", rc);
547				break;
548			}
549		} while (i-- > start_idx);
550
551		return rc;
552	}
553
554	frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
555	if (!frames)
556		return -ENOMEM;
557
558	setup.dom        = DOMID_SELF;
559	setup.nr_frames  = nr_gframes;
560	set_xen_guest_handle(setup.frame_list, frames);
561
562	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
563	if (rc == -ENOSYS) {
564		kfree(frames);
565		return -ENOSYS;
566	}
567
568	BUG_ON(rc || setup.status);
569
570	rc = arch_gnttab_map_shared(frames, nr_gframes, gnttab_max_grant_frames(),
571				    &shared);
572	BUG_ON(rc);
573
574	kfree(frames);
575
576	return 0;
577}
578
579int gnttab_resume(void)
580{
581	unsigned int max_nr_gframes;
582
583	max_nr_gframes = gnttab_max_grant_frames();
584	if (max_nr_gframes < nr_grant_frames)
585		return -ENOSYS;
586
587	if (xen_pv_domain())
588		return gnttab_map(0, nr_grant_frames - 1);
589
590	if (!shared) {
591		shared = ioremap(xen_hvm_resume_frames, PAGE_SIZE * max_nr_gframes);
592		if (shared == NULL) {
593			printk(KERN_WARNING
594					"Failed to ioremap gnttab share frames!");
595			return -ENOMEM;
596		}
597	}
598
599	gnttab_map(0, nr_grant_frames - 1);
600
601	return 0;
602}
603
604int gnttab_suspend(void)
605{
606	arch_gnttab_unmap_shared(shared, nr_grant_frames);
607	return 0;
608}
609
610static int gnttab_expand(unsigned int req_entries)
611{
612	int rc;
613	unsigned int cur, extra;
614
615	cur = nr_grant_frames;
616	extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) /
617		 GREFS_PER_GRANT_FRAME);
618	if (cur + extra > gnttab_max_grant_frames())
619		return -ENOSPC;
620
621	rc = gnttab_map(cur, cur + extra - 1);
622	if (rc == 0)
623		rc = grow_gnttab_list(extra);
624
625	return rc;
626}
627
628int gnttab_init(void)
629{
630	int i;
631	unsigned int max_nr_glist_frames, nr_glist_frames;
632	unsigned int nr_init_grefs;
633
634	nr_grant_frames = 1;
635	boot_max_nr_grant_frames = __max_nr_grant_frames();
636
637	/* Determine the maximum number of frames required for the
638	 * grant reference free list on the current hypervisor.
639	 */
640	max_nr_glist_frames = (boot_max_nr_grant_frames *
641			       GREFS_PER_GRANT_FRAME / RPP);
642
643	gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
644			      GFP_KERNEL);
645	if (gnttab_list == NULL)
646		return -ENOMEM;
647
648	nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
649	for (i = 0; i < nr_glist_frames; i++) {
650		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
651		if (gnttab_list[i] == NULL)
652			goto ini_nomem;
653	}
654
655	if (gnttab_resume() < 0)
656		return -ENODEV;
657
658	nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
659
660	for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
661		gnttab_entry(i) = i + 1;
662
663	gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
664	gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
665	gnttab_free_head  = NR_RESERVED_ENTRIES;
666
667	printk("Grant table initialized\n");
668	return 0;
669
670 ini_nomem:
671	for (i--; i >= 0; i--)
672		free_page((unsigned long)gnttab_list[i]);
673	kfree(gnttab_list);
674	return -ENOMEM;
675}
676EXPORT_SYMBOL_GPL(gnttab_init);
677
678static int __devinit __gnttab_init(void)
679{
680	/* Delay grant-table initialization in the PV on HVM case */
681	if (xen_hvm_domain())
682		return 0;
683
684	if (!xen_pv_domain())
685		return -ENODEV;
686
687	return gnttab_init();
688}
689
690core_initcall(__gnttab_init);
v3.1
  1/******************************************************************************
  2 * grant_table.c
  3 *
  4 * Granting foreign access to our memory reservation.
  5 *
  6 * Copyright (c) 2005-2006, Christopher Clark
  7 * Copyright (c) 2004-2005, K A Fraser
  8 *
  9 * This program is free software; you can redistribute it and/or
 10 * modify it under the terms of the GNU General Public License version 2
 11 * as published by the Free Software Foundation; or, when distributed
 12 * separately from the Linux kernel or incorporated into other
 13 * software packages, subject to the following license:
 14 *
 15 * Permission is hereby granted, free of charge, to any person obtaining a copy
 16 * of this source file (the "Software"), to deal in the Software without
 17 * restriction, including without limitation the rights to use, copy, modify,
 18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
 19 * and to permit persons to whom the Software is furnished to do so, subject to
 20 * the following conditions:
 21 *
 22 * The above copyright notice and this permission notice shall be included in
 23 * all copies or substantial portions of the Software.
 24 *
 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
 28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 31 * IN THE SOFTWARE.
 32 */
 33
 34#include <linux/module.h>
 35#include <linux/sched.h>
 36#include <linux/mm.h>
 37#include <linux/slab.h>
 38#include <linux/vmalloc.h>
 39#include <linux/uaccess.h>
 40#include <linux/io.h>
 41
 42#include <xen/xen.h>
 43#include <xen/interface/xen.h>
 44#include <xen/page.h>
 45#include <xen/grant_table.h>
 46#include <xen/interface/memory.h>
 47#include <asm/xen/hypercall.h>
 48
 49#include <asm/pgtable.h>
 50#include <asm/sync_bitops.h>
 51
 52
 53/* External tools reserve first few grant table entries. */
 54#define NR_RESERVED_ENTRIES 8
 55#define GNTTAB_LIST_END 0xffffffff
 56#define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(struct grant_entry))
 57
 58static grant_ref_t **gnttab_list;
 59static unsigned int nr_grant_frames;
 60static unsigned int boot_max_nr_grant_frames;
 61static int gnttab_free_count;
 62static grant_ref_t gnttab_free_head;
 63static DEFINE_SPINLOCK(gnttab_list_lock);
 64unsigned long xen_hvm_resume_frames;
 65EXPORT_SYMBOL_GPL(xen_hvm_resume_frames);
 66
 67static struct grant_entry *shared;
 68
 69static struct gnttab_free_callback *gnttab_free_callback_list;
 70
 71static int gnttab_expand(unsigned int req_entries);
 72
 73#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
 74
 75static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
 76{
 77	return &gnttab_list[(entry) / RPP][(entry) % RPP];
 78}
 79/* This can be used as an l-value */
 80#define gnttab_entry(entry) (*__gnttab_entry(entry))
 81
 82static int get_free_entries(unsigned count)
 83{
 84	unsigned long flags;
 85	int ref, rc = 0;
 86	grant_ref_t head;
 87
 88	spin_lock_irqsave(&gnttab_list_lock, flags);
 89
 90	if ((gnttab_free_count < count) &&
 91	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
 92		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 93		return rc;
 94	}
 95
 96	ref = head = gnttab_free_head;
 97	gnttab_free_count -= count;
 98	while (count-- > 1)
 99		head = gnttab_entry(head);
100	gnttab_free_head = gnttab_entry(head);
101	gnttab_entry(head) = GNTTAB_LIST_END;
102
103	spin_unlock_irqrestore(&gnttab_list_lock, flags);
104
105	return ref;
106}
107
108static void do_free_callbacks(void)
109{
110	struct gnttab_free_callback *callback, *next;
111
112	callback = gnttab_free_callback_list;
113	gnttab_free_callback_list = NULL;
114
115	while (callback != NULL) {
116		next = callback->next;
117		if (gnttab_free_count >= callback->count) {
118			callback->next = NULL;
119			callback->fn(callback->arg);
120		} else {
121			callback->next = gnttab_free_callback_list;
122			gnttab_free_callback_list = callback;
123		}
124		callback = next;
125	}
126}
127
128static inline void check_free_callbacks(void)
129{
130	if (unlikely(gnttab_free_callback_list))
131		do_free_callbacks();
132}
133
134static void put_free_entry(grant_ref_t ref)
135{
136	unsigned long flags;
137	spin_lock_irqsave(&gnttab_list_lock, flags);
138	gnttab_entry(ref) = gnttab_free_head;
139	gnttab_free_head = ref;
140	gnttab_free_count++;
141	check_free_callbacks();
142	spin_unlock_irqrestore(&gnttab_list_lock, flags);
143}
144
145static void update_grant_entry(grant_ref_t ref, domid_t domid,
146			       unsigned long frame, unsigned flags)
147{
148	/*
149	 * Introducing a valid entry into the grant table:
150	 *  1. Write ent->domid.
151	 *  2. Write ent->frame:
152	 *      GTF_permit_access:   Frame to which access is permitted.
153	 *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
154	 *                           frame, or zero if none.
155	 *  3. Write memory barrier (WMB).
156	 *  4. Write ent->flags, inc. valid type.
157	 */
158	shared[ref].frame = frame;
159	shared[ref].domid = domid;
160	wmb();
161	shared[ref].flags = flags;
162}
163
164/*
165 * Public grant-issuing interface functions
166 */
167void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
168				     unsigned long frame, int readonly)
169{
170	update_grant_entry(ref, domid, frame,
171			   GTF_permit_access | (readonly ? GTF_readonly : 0));
172}
173EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
174
175int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
176				int readonly)
177{
178	int ref;
179
180	ref = get_free_entries(1);
181	if (unlikely(ref < 0))
182		return -ENOSPC;
183
184	gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
185
186	return ref;
187}
188EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
189
190int gnttab_query_foreign_access(grant_ref_t ref)
191{
192	u16 nflags;
193
194	nflags = shared[ref].flags;
195
196	return (nflags & (GTF_reading|GTF_writing));
197}
198EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
199
200int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
201{
202	u16 flags, nflags;
203
204	nflags = shared[ref].flags;
205	do {
206		flags = nflags;
207		if (flags & (GTF_reading|GTF_writing)) {
208			printk(KERN_ALERT "WARNING: g.e. still in use!\n");
209			return 0;
210		}
211	} while ((nflags = sync_cmpxchg(&shared[ref].flags, flags, 0)) != flags);
212
213	return 1;
214}
215EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
216
217void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
218			       unsigned long page)
219{
220	if (gnttab_end_foreign_access_ref(ref, readonly)) {
221		put_free_entry(ref);
222		if (page != 0)
223			free_page(page);
224	} else {
225		/* XXX This needs to be fixed so that the ref and page are
226		   placed on a list to be freed up later. */
227		printk(KERN_WARNING
228		       "WARNING: leaking g.e. and page still in use!\n");
229	}
230}
231EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
232
233int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
234{
235	int ref;
236
237	ref = get_free_entries(1);
238	if (unlikely(ref < 0))
239		return -ENOSPC;
240	gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
241
242	return ref;
243}
244EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
245
246void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
247				       unsigned long pfn)
248{
249	update_grant_entry(ref, domid, pfn, GTF_accept_transfer);
250}
251EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
252
253unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
254{
255	unsigned long frame;
256	u16           flags;
257
258	/*
259	 * If a transfer is not even yet started, try to reclaim the grant
260	 * reference and return failure (== 0).
261	 */
262	while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
263		if (sync_cmpxchg(&shared[ref].flags, flags, 0) == flags)
264			return 0;
265		cpu_relax();
266	}
267
268	/* If a transfer is in progress then wait until it is completed. */
269	while (!(flags & GTF_transfer_completed)) {
270		flags = shared[ref].flags;
271		cpu_relax();
272	}
273
274	rmb();	/* Read the frame number /after/ reading completion status. */
275	frame = shared[ref].frame;
276	BUG_ON(frame == 0);
277
278	return frame;
279}
280EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
281
282unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
283{
284	unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
285	put_free_entry(ref);
286	return frame;
287}
288EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
289
290void gnttab_free_grant_reference(grant_ref_t ref)
291{
292	put_free_entry(ref);
293}
294EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
295
296void gnttab_free_grant_references(grant_ref_t head)
297{
298	grant_ref_t ref;
299	unsigned long flags;
300	int count = 1;
301	if (head == GNTTAB_LIST_END)
302		return;
303	spin_lock_irqsave(&gnttab_list_lock, flags);
304	ref = head;
305	while (gnttab_entry(ref) != GNTTAB_LIST_END) {
306		ref = gnttab_entry(ref);
307		count++;
308	}
309	gnttab_entry(ref) = gnttab_free_head;
310	gnttab_free_head = head;
311	gnttab_free_count += count;
312	check_free_callbacks();
313	spin_unlock_irqrestore(&gnttab_list_lock, flags);
314}
315EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
316
317int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
318{
319	int h = get_free_entries(count);
320
321	if (h < 0)
322		return -ENOSPC;
323
324	*head = h;
325
326	return 0;
327}
328EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
329
330int gnttab_empty_grant_references(const grant_ref_t *private_head)
331{
332	return (*private_head == GNTTAB_LIST_END);
333}
334EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
335
336int gnttab_claim_grant_reference(grant_ref_t *private_head)
337{
338	grant_ref_t g = *private_head;
339	if (unlikely(g == GNTTAB_LIST_END))
340		return -ENOSPC;
341	*private_head = gnttab_entry(g);
342	return g;
343}
344EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
345
346void gnttab_release_grant_reference(grant_ref_t *private_head,
347				    grant_ref_t release)
348{
349	gnttab_entry(release) = *private_head;
350	*private_head = release;
351}
352EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
353
354void gnttab_request_free_callback(struct gnttab_free_callback *callback,
355				  void (*fn)(void *), void *arg, u16 count)
356{
357	unsigned long flags;
358	spin_lock_irqsave(&gnttab_list_lock, flags);
359	if (callback->next)
360		goto out;
361	callback->fn = fn;
362	callback->arg = arg;
363	callback->count = count;
364	callback->next = gnttab_free_callback_list;
365	gnttab_free_callback_list = callback;
366	check_free_callbacks();
367out:
368	spin_unlock_irqrestore(&gnttab_list_lock, flags);
369}
370EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
371
372void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
373{
374	struct gnttab_free_callback **pcb;
375	unsigned long flags;
376
377	spin_lock_irqsave(&gnttab_list_lock, flags);
378	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
379		if (*pcb == callback) {
380			*pcb = callback->next;
381			break;
382		}
383	}
384	spin_unlock_irqrestore(&gnttab_list_lock, flags);
385}
386EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
387
388static int grow_gnttab_list(unsigned int more_frames)
389{
390	unsigned int new_nr_grant_frames, extra_entries, i;
391	unsigned int nr_glist_frames, new_nr_glist_frames;
392
393	new_nr_grant_frames = nr_grant_frames + more_frames;
394	extra_entries       = more_frames * GREFS_PER_GRANT_FRAME;
395
396	nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
397	new_nr_glist_frames =
398		(new_nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
399	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
400		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
401		if (!gnttab_list[i])
402			goto grow_nomem;
403	}
404
405
406	for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames;
407	     i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
408		gnttab_entry(i) = i + 1;
409
410	gnttab_entry(i) = gnttab_free_head;
411	gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames;
412	gnttab_free_count += extra_entries;
413
414	nr_grant_frames = new_nr_grant_frames;
415
416	check_free_callbacks();
417
418	return 0;
419
420grow_nomem:
421	for ( ; i >= nr_glist_frames; i--)
422		free_page((unsigned long) gnttab_list[i]);
423	return -ENOMEM;
424}
425
426static unsigned int __max_nr_grant_frames(void)
427{
428	struct gnttab_query_size query;
429	int rc;
430
431	query.dom = DOMID_SELF;
432
433	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
434	if ((rc < 0) || (query.status != GNTST_okay))
435		return 4; /* Legacy max supported number of frames */
436
437	return query.max_nr_frames;
438}
439
440unsigned int gnttab_max_grant_frames(void)
441{
442	unsigned int xen_max = __max_nr_grant_frames();
443
444	if (xen_max > boot_max_nr_grant_frames)
445		return boot_max_nr_grant_frames;
446	return xen_max;
447}
448EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
449
450int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
451		    struct page **pages, unsigned int count)
452{
453	int i, ret;
454	pte_t *pte;
455	unsigned long mfn;
456
457	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
458	if (ret)
459		return ret;
460
461	if (xen_feature(XENFEAT_auto_translated_physmap))
462		return ret;
463
464	for (i = 0; i < count; i++) {
465		/* Do not add to override if the map failed. */
466		if (map_ops[i].status)
467			continue;
468
469		if (map_ops[i].flags & GNTMAP_contains_pte) {
470			pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
471				(map_ops[i].host_addr & ~PAGE_MASK));
472			mfn = pte_mfn(*pte);
473		} else {
474			/* If you really wanted to do this:
475			 * mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
476			 *
477			 * The reason we do not implement it is b/c on the
478			 * unmap path (gnttab_unmap_refs) we have no means of
479			 * checking whether the page is !GNTMAP_contains_pte.
480			 *
481			 * That is without some extra data-structure to carry
482			 * the struct page, bool clear_pte, and list_head next
483			 * tuples and deal with allocation/delallocation, etc.
484			 *
485			 * The users of this API set the GNTMAP_contains_pte
486			 * flag so lets just return not supported until it
487			 * becomes neccessary to implement.
488			 */
489			return -EOPNOTSUPP;
490		}
491		ret = m2p_add_override(mfn, pages[i],
492				       map_ops[i].flags & GNTMAP_contains_pte);
493		if (ret)
494			return ret;
495	}
496
497	return ret;
498}
499EXPORT_SYMBOL_GPL(gnttab_map_refs);
500
501int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
502		struct page **pages, unsigned int count)
503{
504	int i, ret;
505
506	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
507	if (ret)
508		return ret;
509
510	if (xen_feature(XENFEAT_auto_translated_physmap))
511		return ret;
512
513	for (i = 0; i < count; i++) {
514		ret = m2p_remove_override(pages[i], true /* clear the PTE */);
515		if (ret)
516			return ret;
517	}
518
519	return ret;
520}
521EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
522
523static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
524{
525	struct gnttab_setup_table setup;
526	unsigned long *frames;
527	unsigned int nr_gframes = end_idx + 1;
528	int rc;
529
530	if (xen_hvm_domain()) {
531		struct xen_add_to_physmap xatp;
532		unsigned int i = end_idx;
533		rc = 0;
534		/*
535		 * Loop backwards, so that the first hypercall has the largest
536		 * index, ensuring that the table will grow only once.
537		 */
538		do {
539			xatp.domid = DOMID_SELF;
540			xatp.idx = i;
541			xatp.space = XENMAPSPACE_grant_table;
542			xatp.gpfn = (xen_hvm_resume_frames >> PAGE_SHIFT) + i;
543			rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
544			if (rc != 0) {
545				printk(KERN_WARNING
546						"grant table add_to_physmap failed, err=%d\n", rc);
547				break;
548			}
549		} while (i-- > start_idx);
550
551		return rc;
552	}
553
554	frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
555	if (!frames)
556		return -ENOMEM;
557
558	setup.dom        = DOMID_SELF;
559	setup.nr_frames  = nr_gframes;
560	set_xen_guest_handle(setup.frame_list, frames);
561
562	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
563	if (rc == -ENOSYS) {
564		kfree(frames);
565		return -ENOSYS;
566	}
567
568	BUG_ON(rc || setup.status);
569
570	rc = arch_gnttab_map_shared(frames, nr_gframes, gnttab_max_grant_frames(),
571				    &shared);
572	BUG_ON(rc);
573
574	kfree(frames);
575
576	return 0;
577}
578
579int gnttab_resume(void)
580{
581	unsigned int max_nr_gframes;
582
583	max_nr_gframes = gnttab_max_grant_frames();
584	if (max_nr_gframes < nr_grant_frames)
585		return -ENOSYS;
586
587	if (xen_pv_domain())
588		return gnttab_map(0, nr_grant_frames - 1);
589
590	if (!shared) {
591		shared = ioremap(xen_hvm_resume_frames, PAGE_SIZE * max_nr_gframes);
592		if (shared == NULL) {
593			printk(KERN_WARNING
594					"Failed to ioremap gnttab share frames!");
595			return -ENOMEM;
596		}
597	}
598
599	gnttab_map(0, nr_grant_frames - 1);
600
601	return 0;
602}
603
604int gnttab_suspend(void)
605{
606	arch_gnttab_unmap_shared(shared, nr_grant_frames);
607	return 0;
608}
609
610static int gnttab_expand(unsigned int req_entries)
611{
612	int rc;
613	unsigned int cur, extra;
614
615	cur = nr_grant_frames;
616	extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) /
617		 GREFS_PER_GRANT_FRAME);
618	if (cur + extra > gnttab_max_grant_frames())
619		return -ENOSPC;
620
621	rc = gnttab_map(cur, cur + extra - 1);
622	if (rc == 0)
623		rc = grow_gnttab_list(extra);
624
625	return rc;
626}
627
628int gnttab_init(void)
629{
630	int i;
631	unsigned int max_nr_glist_frames, nr_glist_frames;
632	unsigned int nr_init_grefs;
633
634	nr_grant_frames = 1;
635	boot_max_nr_grant_frames = __max_nr_grant_frames();
636
637	/* Determine the maximum number of frames required for the
638	 * grant reference free list on the current hypervisor.
639	 */
640	max_nr_glist_frames = (boot_max_nr_grant_frames *
641			       GREFS_PER_GRANT_FRAME / RPP);
642
643	gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
644			      GFP_KERNEL);
645	if (gnttab_list == NULL)
646		return -ENOMEM;
647
648	nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
649	for (i = 0; i < nr_glist_frames; i++) {
650		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
651		if (gnttab_list[i] == NULL)
652			goto ini_nomem;
653	}
654
655	if (gnttab_resume() < 0)
656		return -ENODEV;
657
658	nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
659
660	for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
661		gnttab_entry(i) = i + 1;
662
663	gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
664	gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
665	gnttab_free_head  = NR_RESERVED_ENTRIES;
666
667	printk("Grant table initialized\n");
668	return 0;
669
670 ini_nomem:
671	for (i--; i >= 0; i--)
672		free_page((unsigned long)gnttab_list[i]);
673	kfree(gnttab_list);
674	return -ENOMEM;
675}
676EXPORT_SYMBOL_GPL(gnttab_init);
677
678static int __devinit __gnttab_init(void)
679{
680	/* Delay grant-table initialization in the PV on HVM case */
681	if (xen_hvm_domain())
682		return 0;
683
684	if (!xen_pv_domain())
685		return -ENODEV;
686
687	return gnttab_init();
688}
689
690core_initcall(__gnttab_init);