Loading...
1/******************************************************************************
2 * grant_table.c
3 *
4 * Granting foreign access to our memory reservation.
5 *
6 * Copyright (c) 2005-2006, Christopher Clark
7 * Copyright (c) 2004-2005, K A Fraser
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
21 *
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33
34#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
35
36#include <linux/bitmap.h>
37#include <linux/memblock.h>
38#include <linux/sched.h>
39#include <linux/mm.h>
40#include <linux/slab.h>
41#include <linux/vmalloc.h>
42#include <linux/uaccess.h>
43#include <linux/io.h>
44#include <linux/delay.h>
45#include <linux/hardirq.h>
46#include <linux/workqueue.h>
47#include <linux/ratelimit.h>
48#include <linux/moduleparam.h>
49#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
50#include <linux/dma-mapping.h>
51#endif
52
53#include <xen/xen.h>
54#include <xen/interface/xen.h>
55#include <xen/page.h>
56#include <xen/grant_table.h>
57#include <xen/interface/memory.h>
58#include <xen/hvc-console.h>
59#include <xen/swiotlb-xen.h>
60#include <xen/balloon.h>
61#ifdef CONFIG_X86
62#include <asm/xen/cpuid.h>
63#endif
64#include <xen/mem-reservation.h>
65#include <asm/xen/hypercall.h>
66#include <asm/xen/interface.h>
67
68#include <asm/sync_bitops.h>
69
70#define GNTTAB_LIST_END 0xffffffff
71
72static grant_ref_t **gnttab_list;
73static unsigned int nr_grant_frames;
74
75/*
76 * Handling of free grants:
77 *
78 * Free grants are in a simple list anchored in gnttab_free_head. They are
79 * linked by grant ref, the last element contains GNTTAB_LIST_END. The number
80 * of free entries is stored in gnttab_free_count.
81 * Additionally there is a bitmap of free entries anchored in
82 * gnttab_free_bitmap. This is being used for simplifying allocation of
83 * multiple consecutive grants, which is needed e.g. for support of virtio.
84 * gnttab_last_free is used to add free entries of new frames at the end of
85 * the free list.
86 * gnttab_free_tail_ptr specifies the variable which references the start
87 * of consecutive free grants ending with gnttab_last_free. This pointer is
88 * updated in a rather defensive way, in order to avoid performance hits in
89 * hot paths.
90 * All those variables are protected by gnttab_list_lock.
91 */
92static int gnttab_free_count;
93static unsigned int gnttab_size;
94static grant_ref_t gnttab_free_head = GNTTAB_LIST_END;
95static grant_ref_t gnttab_last_free = GNTTAB_LIST_END;
96static grant_ref_t *gnttab_free_tail_ptr;
97static unsigned long *gnttab_free_bitmap;
98static DEFINE_SPINLOCK(gnttab_list_lock);
99
100struct grant_frames xen_auto_xlat_grant_frames;
101static unsigned int xen_gnttab_version;
102module_param_named(version, xen_gnttab_version, uint, 0);
103
104static union {
105 struct grant_entry_v1 *v1;
106 union grant_entry_v2 *v2;
107 void *addr;
108} gnttab_shared;
109
110/*This is a structure of function pointers for grant table*/
111struct gnttab_ops {
112 /*
113 * Version of the grant interface.
114 */
115 unsigned int version;
116 /*
117 * Grant refs per grant frame.
118 */
119 unsigned int grefs_per_grant_frame;
120 /*
121 * Mapping a list of frames for storing grant entries. Frames parameter
122 * is used to store grant table address when grant table being setup,
123 * nr_gframes is the number of frames to map grant table. Returning
124 * GNTST_okay means success and negative value means failure.
125 */
126 int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
127 /*
128 * Release a list of frames which are mapped in map_frames for grant
129 * entry status.
130 */
131 void (*unmap_frames)(void);
132 /*
133 * Introducing a valid entry into the grant table, granting the frame of
134 * this grant entry to domain for accessing. Ref
135 * parameter is reference of this introduced grant entry, domid is id of
136 * granted domain, frame is the page frame to be granted, and flags is
137 * status of the grant entry to be updated.
138 */
139 void (*update_entry)(grant_ref_t ref, domid_t domid,
140 unsigned long frame, unsigned flags);
141 /*
142 * Stop granting a grant entry to domain for accessing. Ref parameter is
143 * reference of a grant entry whose grant access will be stopped.
144 * If the grant entry is currently mapped for reading or writing, just
145 * return failure(==0) directly and don't tear down the grant access.
146 * Otherwise, stop grant access for this entry and return success(==1).
147 */
148 int (*end_foreign_access_ref)(grant_ref_t ref);
149 /*
150 * Read the frame number related to a given grant reference.
151 */
152 unsigned long (*read_frame)(grant_ref_t ref);
153};
154
155struct unmap_refs_callback_data {
156 struct completion completion;
157 int result;
158};
159
160static const struct gnttab_ops *gnttab_interface;
161
162/* This reflects status of grant entries, so act as a global value. */
163static grant_status_t *grstatus;
164
165static struct gnttab_free_callback *gnttab_free_callback_list;
166
167static int gnttab_expand(unsigned int req_entries);
168
169#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
170#define SPP (PAGE_SIZE / sizeof(grant_status_t))
171
172static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
173{
174 return &gnttab_list[(entry) / RPP][(entry) % RPP];
175}
176/* This can be used as an l-value */
177#define gnttab_entry(entry) (*__gnttab_entry(entry))
178
179static int get_free_entries(unsigned count)
180{
181 unsigned long flags;
182 int ref, rc = 0;
183 grant_ref_t head;
184
185 spin_lock_irqsave(&gnttab_list_lock, flags);
186
187 if ((gnttab_free_count < count) &&
188 ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
189 spin_unlock_irqrestore(&gnttab_list_lock, flags);
190 return rc;
191 }
192
193 ref = head = gnttab_free_head;
194 gnttab_free_count -= count;
195 while (count--) {
196 bitmap_clear(gnttab_free_bitmap, head, 1);
197 if (gnttab_free_tail_ptr == __gnttab_entry(head))
198 gnttab_free_tail_ptr = &gnttab_free_head;
199 if (count)
200 head = gnttab_entry(head);
201 }
202 gnttab_free_head = gnttab_entry(head);
203 gnttab_entry(head) = GNTTAB_LIST_END;
204
205 if (!gnttab_free_count) {
206 gnttab_last_free = GNTTAB_LIST_END;
207 gnttab_free_tail_ptr = NULL;
208 }
209
210 spin_unlock_irqrestore(&gnttab_list_lock, flags);
211
212 return ref;
213}
214
215static int get_seq_entry_count(void)
216{
217 if (gnttab_last_free == GNTTAB_LIST_END || !gnttab_free_tail_ptr ||
218 *gnttab_free_tail_ptr == GNTTAB_LIST_END)
219 return 0;
220
221 return gnttab_last_free - *gnttab_free_tail_ptr + 1;
222}
223
224/* Rebuilds the free grant list and tries to find count consecutive entries. */
225static int get_free_seq(unsigned int count)
226{
227 int ret = -ENOSPC;
228 unsigned int from, to;
229 grant_ref_t *last;
230
231 gnttab_free_tail_ptr = &gnttab_free_head;
232 last = &gnttab_free_head;
233
234 for (from = find_first_bit(gnttab_free_bitmap, gnttab_size);
235 from < gnttab_size;
236 from = find_next_bit(gnttab_free_bitmap, gnttab_size, to + 1)) {
237 to = find_next_zero_bit(gnttab_free_bitmap, gnttab_size,
238 from + 1);
239 if (ret < 0 && to - from >= count) {
240 ret = from;
241 bitmap_clear(gnttab_free_bitmap, ret, count);
242 from += count;
243 gnttab_free_count -= count;
244 if (from == to)
245 continue;
246 }
247
248 /*
249 * Recreate the free list in order to have it properly sorted.
250 * This is needed to make sure that the free tail has the maximum
251 * possible size.
252 */
253 while (from < to) {
254 *last = from;
255 last = __gnttab_entry(from);
256 gnttab_last_free = from;
257 from++;
258 }
259 if (to < gnttab_size)
260 gnttab_free_tail_ptr = __gnttab_entry(to - 1);
261 }
262
263 *last = GNTTAB_LIST_END;
264 if (gnttab_last_free != gnttab_size - 1)
265 gnttab_free_tail_ptr = NULL;
266
267 return ret;
268}
269
270static int get_free_entries_seq(unsigned int count)
271{
272 unsigned long flags;
273 int ret = 0;
274
275 spin_lock_irqsave(&gnttab_list_lock, flags);
276
277 if (gnttab_free_count < count) {
278 ret = gnttab_expand(count - gnttab_free_count);
279 if (ret < 0)
280 goto out;
281 }
282
283 if (get_seq_entry_count() < count) {
284 ret = get_free_seq(count);
285 if (ret >= 0)
286 goto out;
287 ret = gnttab_expand(count - get_seq_entry_count());
288 if (ret < 0)
289 goto out;
290 }
291
292 ret = *gnttab_free_tail_ptr;
293 *gnttab_free_tail_ptr = gnttab_entry(ret + count - 1);
294 gnttab_free_count -= count;
295 if (!gnttab_free_count)
296 gnttab_free_tail_ptr = NULL;
297 bitmap_clear(gnttab_free_bitmap, ret, count);
298
299 out:
300 spin_unlock_irqrestore(&gnttab_list_lock, flags);
301
302 return ret;
303}
304
305static void do_free_callbacks(void)
306{
307 struct gnttab_free_callback *callback, *next;
308
309 callback = gnttab_free_callback_list;
310 gnttab_free_callback_list = NULL;
311
312 while (callback != NULL) {
313 next = callback->next;
314 if (gnttab_free_count >= callback->count) {
315 callback->next = NULL;
316 callback->fn(callback->arg);
317 } else {
318 callback->next = gnttab_free_callback_list;
319 gnttab_free_callback_list = callback;
320 }
321 callback = next;
322 }
323}
324
325static inline void check_free_callbacks(void)
326{
327 if (unlikely(gnttab_free_callback_list))
328 do_free_callbacks();
329}
330
331static void put_free_entry_locked(grant_ref_t ref)
332{
333 if (unlikely(ref < GNTTAB_NR_RESERVED_ENTRIES))
334 return;
335
336 gnttab_entry(ref) = gnttab_free_head;
337 gnttab_free_head = ref;
338 if (!gnttab_free_count)
339 gnttab_last_free = ref;
340 if (gnttab_free_tail_ptr == &gnttab_free_head)
341 gnttab_free_tail_ptr = __gnttab_entry(ref);
342 gnttab_free_count++;
343 bitmap_set(gnttab_free_bitmap, ref, 1);
344}
345
346static void put_free_entry(grant_ref_t ref)
347{
348 unsigned long flags;
349
350 spin_lock_irqsave(&gnttab_list_lock, flags);
351 put_free_entry_locked(ref);
352 check_free_callbacks();
353 spin_unlock_irqrestore(&gnttab_list_lock, flags);
354}
355
356static void gnttab_set_free(unsigned int start, unsigned int n)
357{
358 unsigned int i;
359
360 for (i = start; i < start + n - 1; i++)
361 gnttab_entry(i) = i + 1;
362
363 gnttab_entry(i) = GNTTAB_LIST_END;
364 if (!gnttab_free_count) {
365 gnttab_free_head = start;
366 gnttab_free_tail_ptr = &gnttab_free_head;
367 } else {
368 gnttab_entry(gnttab_last_free) = start;
369 }
370 gnttab_free_count += n;
371 gnttab_last_free = i;
372
373 bitmap_set(gnttab_free_bitmap, start, n);
374}
375
376/*
377 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
378 * Introducing a valid entry into the grant table:
379 * 1. Write ent->domid.
380 * 2. Write ent->frame: Frame to which access is permitted.
381 * 3. Write memory barrier (WMB).
382 * 4. Write ent->flags, inc. valid type.
383 */
384static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
385 unsigned long frame, unsigned flags)
386{
387 gnttab_shared.v1[ref].domid = domid;
388 gnttab_shared.v1[ref].frame = frame;
389 wmb();
390 gnttab_shared.v1[ref].flags = flags;
391}
392
393static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
394 unsigned long frame, unsigned int flags)
395{
396 gnttab_shared.v2[ref].hdr.domid = domid;
397 gnttab_shared.v2[ref].full_page.frame = frame;
398 wmb(); /* Hypervisor concurrent accesses. */
399 gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
400}
401
402/*
403 * Public grant-issuing interface functions
404 */
405void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
406 unsigned long frame, int readonly)
407{
408 gnttab_interface->update_entry(ref, domid, frame,
409 GTF_permit_access | (readonly ? GTF_readonly : 0));
410}
411EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
412
413int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
414 int readonly)
415{
416 int ref;
417
418 ref = get_free_entries(1);
419 if (unlikely(ref < 0))
420 return -ENOSPC;
421
422 gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
423
424 return ref;
425}
426EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
427
428static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref)
429{
430 u16 flags, nflags;
431 u16 *pflags;
432
433 pflags = &gnttab_shared.v1[ref].flags;
434 nflags = *pflags;
435 do {
436 flags = nflags;
437 if (flags & (GTF_reading|GTF_writing))
438 return 0;
439 } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
440
441 return 1;
442}
443
444static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref)
445{
446 gnttab_shared.v2[ref].hdr.flags = 0;
447 mb(); /* Concurrent access by hypervisor. */
448 if (grstatus[ref] & (GTF_reading|GTF_writing)) {
449 return 0;
450 } else {
451 /*
452 * The read of grstatus needs to have acquire semantics.
453 * On x86, reads already have that, and we just need to
454 * protect against compiler reorderings.
455 * On other architectures we may need a full barrier.
456 */
457#ifdef CONFIG_X86
458 barrier();
459#else
460 mb();
461#endif
462 }
463
464 return 1;
465}
466
467static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref)
468{
469 return gnttab_interface->end_foreign_access_ref(ref);
470}
471
472int gnttab_end_foreign_access_ref(grant_ref_t ref)
473{
474 if (_gnttab_end_foreign_access_ref(ref))
475 return 1;
476 pr_warn("WARNING: g.e. %#x still in use!\n", ref);
477 return 0;
478}
479EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
480
481static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
482{
483 return gnttab_shared.v1[ref].frame;
484}
485
486static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
487{
488 return gnttab_shared.v2[ref].full_page.frame;
489}
490
491struct deferred_entry {
492 struct list_head list;
493 grant_ref_t ref;
494 uint16_t warn_delay;
495 struct page *page;
496};
497static LIST_HEAD(deferred_list);
498static void gnttab_handle_deferred(struct timer_list *);
499static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
500
501static void gnttab_handle_deferred(struct timer_list *unused)
502{
503 unsigned int nr = 10;
504 struct deferred_entry *first = NULL;
505 unsigned long flags;
506
507 spin_lock_irqsave(&gnttab_list_lock, flags);
508 while (nr--) {
509 struct deferred_entry *entry
510 = list_first_entry(&deferred_list,
511 struct deferred_entry, list);
512
513 if (entry == first)
514 break;
515 list_del(&entry->list);
516 spin_unlock_irqrestore(&gnttab_list_lock, flags);
517 if (_gnttab_end_foreign_access_ref(entry->ref)) {
518 put_free_entry(entry->ref);
519 pr_debug("freeing g.e. %#x (pfn %#lx)\n",
520 entry->ref, page_to_pfn(entry->page));
521 put_page(entry->page);
522 kfree(entry);
523 entry = NULL;
524 } else {
525 if (!--entry->warn_delay)
526 pr_info("g.e. %#x still pending\n", entry->ref);
527 if (!first)
528 first = entry;
529 }
530 spin_lock_irqsave(&gnttab_list_lock, flags);
531 if (entry)
532 list_add_tail(&entry->list, &deferred_list);
533 else if (list_empty(&deferred_list))
534 break;
535 }
536 if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
537 deferred_timer.expires = jiffies + HZ;
538 add_timer(&deferred_timer);
539 }
540 spin_unlock_irqrestore(&gnttab_list_lock, flags);
541}
542
543static void gnttab_add_deferred(grant_ref_t ref, struct page *page)
544{
545 struct deferred_entry *entry;
546 gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
547 const char *what = KERN_WARNING "leaking";
548
549 entry = kmalloc(sizeof(*entry), gfp);
550 if (!page) {
551 unsigned long gfn = gnttab_interface->read_frame(ref);
552
553 page = pfn_to_page(gfn_to_pfn(gfn));
554 get_page(page);
555 }
556
557 if (entry) {
558 unsigned long flags;
559
560 entry->ref = ref;
561 entry->page = page;
562 entry->warn_delay = 60;
563 spin_lock_irqsave(&gnttab_list_lock, flags);
564 list_add_tail(&entry->list, &deferred_list);
565 if (!timer_pending(&deferred_timer)) {
566 deferred_timer.expires = jiffies + HZ;
567 add_timer(&deferred_timer);
568 }
569 spin_unlock_irqrestore(&gnttab_list_lock, flags);
570 what = KERN_DEBUG "deferring";
571 }
572 printk("%s g.e. %#x (pfn %#lx)\n",
573 what, ref, page ? page_to_pfn(page) : -1);
574}
575
576int gnttab_try_end_foreign_access(grant_ref_t ref)
577{
578 int ret = _gnttab_end_foreign_access_ref(ref);
579
580 if (ret)
581 put_free_entry(ref);
582
583 return ret;
584}
585EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
586
587void gnttab_end_foreign_access(grant_ref_t ref, struct page *page)
588{
589 if (gnttab_try_end_foreign_access(ref)) {
590 if (page)
591 put_page(page);
592 } else
593 gnttab_add_deferred(ref, page);
594}
595EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
596
597void gnttab_free_grant_reference(grant_ref_t ref)
598{
599 put_free_entry(ref);
600}
601EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
602
603void gnttab_free_grant_references(grant_ref_t head)
604{
605 grant_ref_t ref;
606 unsigned long flags;
607
608 spin_lock_irqsave(&gnttab_list_lock, flags);
609 while (head != GNTTAB_LIST_END) {
610 ref = gnttab_entry(head);
611 put_free_entry_locked(head);
612 head = ref;
613 }
614 check_free_callbacks();
615 spin_unlock_irqrestore(&gnttab_list_lock, flags);
616}
617EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
618
619void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count)
620{
621 unsigned long flags;
622 unsigned int i;
623
624 spin_lock_irqsave(&gnttab_list_lock, flags);
625 for (i = count; i > 0; i--)
626 put_free_entry_locked(head + i - 1);
627 check_free_callbacks();
628 spin_unlock_irqrestore(&gnttab_list_lock, flags);
629}
630EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_seq);
631
632int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
633{
634 int h = get_free_entries(count);
635
636 if (h < 0)
637 return -ENOSPC;
638
639 *head = h;
640
641 return 0;
642}
643EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
644
645int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first)
646{
647 int h;
648
649 if (count == 1)
650 h = get_free_entries(1);
651 else
652 h = get_free_entries_seq(count);
653
654 if (h < 0)
655 return -ENOSPC;
656
657 *first = h;
658
659 return 0;
660}
661EXPORT_SYMBOL_GPL(gnttab_alloc_grant_reference_seq);
662
663int gnttab_empty_grant_references(const grant_ref_t *private_head)
664{
665 return (*private_head == GNTTAB_LIST_END);
666}
667EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
668
669int gnttab_claim_grant_reference(grant_ref_t *private_head)
670{
671 grant_ref_t g = *private_head;
672 if (unlikely(g == GNTTAB_LIST_END))
673 return -ENOSPC;
674 *private_head = gnttab_entry(g);
675 return g;
676}
677EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
678
679void gnttab_release_grant_reference(grant_ref_t *private_head,
680 grant_ref_t release)
681{
682 gnttab_entry(release) = *private_head;
683 *private_head = release;
684}
685EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
686
687void gnttab_request_free_callback(struct gnttab_free_callback *callback,
688 void (*fn)(void *), void *arg, u16 count)
689{
690 unsigned long flags;
691 struct gnttab_free_callback *cb;
692
693 spin_lock_irqsave(&gnttab_list_lock, flags);
694
695 /* Check if the callback is already on the list */
696 cb = gnttab_free_callback_list;
697 while (cb) {
698 if (cb == callback)
699 goto out;
700 cb = cb->next;
701 }
702
703 callback->fn = fn;
704 callback->arg = arg;
705 callback->count = count;
706 callback->next = gnttab_free_callback_list;
707 gnttab_free_callback_list = callback;
708 check_free_callbacks();
709out:
710 spin_unlock_irqrestore(&gnttab_list_lock, flags);
711}
712EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
713
714void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
715{
716 struct gnttab_free_callback **pcb;
717 unsigned long flags;
718
719 spin_lock_irqsave(&gnttab_list_lock, flags);
720 for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
721 if (*pcb == callback) {
722 *pcb = callback->next;
723 break;
724 }
725 }
726 spin_unlock_irqrestore(&gnttab_list_lock, flags);
727}
728EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
729
730static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
731{
732 return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
733 align;
734}
735
736static int grow_gnttab_list(unsigned int more_frames)
737{
738 unsigned int new_nr_grant_frames, extra_entries, i;
739 unsigned int nr_glist_frames, new_nr_glist_frames;
740 unsigned int grefs_per_frame;
741
742 grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
743
744 new_nr_grant_frames = nr_grant_frames + more_frames;
745 extra_entries = more_frames * grefs_per_frame;
746
747 nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
748 new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
749 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
750 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
751 if (!gnttab_list[i])
752 goto grow_nomem;
753 }
754
755 gnttab_set_free(gnttab_size, extra_entries);
756
757 if (!gnttab_free_tail_ptr)
758 gnttab_free_tail_ptr = __gnttab_entry(gnttab_size);
759
760 nr_grant_frames = new_nr_grant_frames;
761 gnttab_size += extra_entries;
762
763 check_free_callbacks();
764
765 return 0;
766
767grow_nomem:
768 while (i-- > nr_glist_frames)
769 free_page((unsigned long) gnttab_list[i]);
770 return -ENOMEM;
771}
772
773static unsigned int __max_nr_grant_frames(void)
774{
775 struct gnttab_query_size query;
776 int rc;
777
778 query.dom = DOMID_SELF;
779
780 rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
781 if ((rc < 0) || (query.status != GNTST_okay))
782 return 4; /* Legacy max supported number of frames */
783
784 return query.max_nr_frames;
785}
786
787unsigned int gnttab_max_grant_frames(void)
788{
789 unsigned int xen_max = __max_nr_grant_frames();
790 static unsigned int boot_max_nr_grant_frames;
791
792 /* First time, initialize it properly. */
793 if (!boot_max_nr_grant_frames)
794 boot_max_nr_grant_frames = __max_nr_grant_frames();
795
796 if (xen_max > boot_max_nr_grant_frames)
797 return boot_max_nr_grant_frames;
798 return xen_max;
799}
800EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
801
802int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
803{
804 xen_pfn_t *pfn;
805 unsigned int max_nr_gframes = __max_nr_grant_frames();
806 unsigned int i;
807 void *vaddr;
808
809 if (xen_auto_xlat_grant_frames.count)
810 return -EINVAL;
811
812 vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB);
813 if (vaddr == NULL) {
814 pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
815 &addr);
816 return -ENOMEM;
817 }
818 pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
819 if (!pfn) {
820 memunmap(vaddr);
821 return -ENOMEM;
822 }
823 for (i = 0; i < max_nr_gframes; i++)
824 pfn[i] = XEN_PFN_DOWN(addr) + i;
825
826 xen_auto_xlat_grant_frames.vaddr = vaddr;
827 xen_auto_xlat_grant_frames.pfn = pfn;
828 xen_auto_xlat_grant_frames.count = max_nr_gframes;
829
830 return 0;
831}
832EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
833
834void gnttab_free_auto_xlat_frames(void)
835{
836 if (!xen_auto_xlat_grant_frames.count)
837 return;
838 kfree(xen_auto_xlat_grant_frames.pfn);
839 memunmap(xen_auto_xlat_grant_frames.vaddr);
840
841 xen_auto_xlat_grant_frames.pfn = NULL;
842 xen_auto_xlat_grant_frames.count = 0;
843 xen_auto_xlat_grant_frames.vaddr = NULL;
844}
845EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
846
847int gnttab_pages_set_private(int nr_pages, struct page **pages)
848{
849 int i;
850
851 for (i = 0; i < nr_pages; i++) {
852#if BITS_PER_LONG < 64
853 struct xen_page_foreign *foreign;
854
855 foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
856 if (!foreign)
857 return -ENOMEM;
858
859 set_page_private(pages[i], (unsigned long)foreign);
860#endif
861 SetPagePrivate(pages[i]);
862 }
863
864 return 0;
865}
866EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
867
868/**
869 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
870 * @nr_pages: number of pages to alloc
871 * @pages: returns the pages
872 */
873int gnttab_alloc_pages(int nr_pages, struct page **pages)
874{
875 int ret;
876
877 ret = xen_alloc_unpopulated_pages(nr_pages, pages);
878 if (ret < 0)
879 return ret;
880
881 ret = gnttab_pages_set_private(nr_pages, pages);
882 if (ret < 0)
883 gnttab_free_pages(nr_pages, pages);
884
885 return ret;
886}
887EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
888
889#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
890static inline void cache_init(struct gnttab_page_cache *cache)
891{
892 cache->pages = NULL;
893}
894
895static inline bool cache_empty(struct gnttab_page_cache *cache)
896{
897 return !cache->pages;
898}
899
900static inline struct page *cache_deq(struct gnttab_page_cache *cache)
901{
902 struct page *page;
903
904 page = cache->pages;
905 cache->pages = page->zone_device_data;
906
907 return page;
908}
909
910static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
911{
912 page->zone_device_data = cache->pages;
913 cache->pages = page;
914}
915#else
916static inline void cache_init(struct gnttab_page_cache *cache)
917{
918 INIT_LIST_HEAD(&cache->pages);
919}
920
921static inline bool cache_empty(struct gnttab_page_cache *cache)
922{
923 return list_empty(&cache->pages);
924}
925
926static inline struct page *cache_deq(struct gnttab_page_cache *cache)
927{
928 struct page *page;
929
930 page = list_first_entry(&cache->pages, struct page, lru);
931 list_del(&page->lru);
932
933 return page;
934}
935
936static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
937{
938 list_add(&page->lru, &cache->pages);
939}
940#endif
941
942void gnttab_page_cache_init(struct gnttab_page_cache *cache)
943{
944 spin_lock_init(&cache->lock);
945 cache_init(cache);
946 cache->num_pages = 0;
947}
948EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
949
950int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
951{
952 unsigned long flags;
953
954 spin_lock_irqsave(&cache->lock, flags);
955
956 if (cache_empty(cache)) {
957 spin_unlock_irqrestore(&cache->lock, flags);
958 return gnttab_alloc_pages(1, page);
959 }
960
961 page[0] = cache_deq(cache);
962 cache->num_pages--;
963
964 spin_unlock_irqrestore(&cache->lock, flags);
965
966 return 0;
967}
968EXPORT_SYMBOL_GPL(gnttab_page_cache_get);
969
970void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
971 unsigned int num)
972{
973 unsigned long flags;
974 unsigned int i;
975
976 spin_lock_irqsave(&cache->lock, flags);
977
978 for (i = 0; i < num; i++)
979 cache_enq(cache, page[i]);
980 cache->num_pages += num;
981
982 spin_unlock_irqrestore(&cache->lock, flags);
983}
984EXPORT_SYMBOL_GPL(gnttab_page_cache_put);
985
986void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
987{
988 struct page *page[10];
989 unsigned int i = 0;
990 unsigned long flags;
991
992 spin_lock_irqsave(&cache->lock, flags);
993
994 while (cache->num_pages > num) {
995 page[i] = cache_deq(cache);
996 cache->num_pages--;
997 if (++i == ARRAY_SIZE(page)) {
998 spin_unlock_irqrestore(&cache->lock, flags);
999 gnttab_free_pages(i, page);
1000 i = 0;
1001 spin_lock_irqsave(&cache->lock, flags);
1002 }
1003 }
1004
1005 spin_unlock_irqrestore(&cache->lock, flags);
1006
1007 if (i != 0)
1008 gnttab_free_pages(i, page);
1009}
1010EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);
1011
1012void gnttab_pages_clear_private(int nr_pages, struct page **pages)
1013{
1014 int i;
1015
1016 for (i = 0; i < nr_pages; i++) {
1017 if (PagePrivate(pages[i])) {
1018#if BITS_PER_LONG < 64
1019 kfree((void *)page_private(pages[i]));
1020#endif
1021 ClearPagePrivate(pages[i]);
1022 }
1023 }
1024}
1025EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
1026
1027/**
1028 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
1029 * @nr_pages; number of pages to free
1030 * @pages: the pages
1031 */
1032void gnttab_free_pages(int nr_pages, struct page **pages)
1033{
1034 gnttab_pages_clear_private(nr_pages, pages);
1035 xen_free_unpopulated_pages(nr_pages, pages);
1036}
1037EXPORT_SYMBOL_GPL(gnttab_free_pages);
1038
1039#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
1040/**
1041 * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
1042 * @args: arguments to the function
1043 */
1044int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
1045{
1046 unsigned long pfn, start_pfn;
1047 size_t size;
1048 int i, ret;
1049
1050 if (args->nr_pages < 0 || args->nr_pages > (INT_MAX >> PAGE_SHIFT))
1051 return -ENOMEM;
1052
1053 size = args->nr_pages << PAGE_SHIFT;
1054 if (args->coherent)
1055 args->vaddr = dma_alloc_coherent(args->dev, size,
1056 &args->dev_bus_addr,
1057 GFP_KERNEL | __GFP_NOWARN);
1058 else
1059 args->vaddr = dma_alloc_wc(args->dev, size,
1060 &args->dev_bus_addr,
1061 GFP_KERNEL | __GFP_NOWARN);
1062 if (!args->vaddr) {
1063 pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
1064 return -ENOMEM;
1065 }
1066
1067 start_pfn = __phys_to_pfn(args->dev_bus_addr);
1068 for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
1069 pfn++, i++) {
1070 struct page *page = pfn_to_page(pfn);
1071
1072 args->pages[i] = page;
1073 args->frames[i] = xen_page_to_gfn(page);
1074 xenmem_reservation_scrub_page(page);
1075 }
1076
1077 xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
1078
1079 ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
1080 if (ret != args->nr_pages) {
1081 pr_debug("Failed to decrease reservation for DMA buffer\n");
1082 ret = -EFAULT;
1083 goto fail;
1084 }
1085
1086 ret = gnttab_pages_set_private(args->nr_pages, args->pages);
1087 if (ret < 0)
1088 goto fail;
1089
1090 return 0;
1091
1092fail:
1093 gnttab_dma_free_pages(args);
1094 return ret;
1095}
1096EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
1097
1098/**
1099 * gnttab_dma_free_pages - free DMAable pages
1100 * @args: arguments to the function
1101 */
1102int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
1103{
1104 size_t size;
1105 int i, ret;
1106
1107 gnttab_pages_clear_private(args->nr_pages, args->pages);
1108
1109 for (i = 0; i < args->nr_pages; i++)
1110 args->frames[i] = page_to_xen_pfn(args->pages[i]);
1111
1112 ret = xenmem_reservation_increase(args->nr_pages, args->frames);
1113 if (ret != args->nr_pages) {
1114 pr_debug("Failed to increase reservation for DMA buffer\n");
1115 ret = -EFAULT;
1116 } else {
1117 ret = 0;
1118 }
1119
1120 xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
1121 args->frames);
1122
1123 size = args->nr_pages << PAGE_SHIFT;
1124 if (args->coherent)
1125 dma_free_coherent(args->dev, size,
1126 args->vaddr, args->dev_bus_addr);
1127 else
1128 dma_free_wc(args->dev, size,
1129 args->vaddr, args->dev_bus_addr);
1130 return ret;
1131}
1132EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
1133#endif
1134
1135/* Handling of paged out grant targets (GNTST_eagain) */
1136#define MAX_DELAY 256
1137static inline void
1138gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
1139 const char *func)
1140{
1141 unsigned delay = 1;
1142
1143 do {
1144 BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
1145 if (*status == GNTST_eagain)
1146 msleep(delay++);
1147 } while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
1148
1149 if (delay >= MAX_DELAY) {
1150 pr_err("%s: %s eagain grant\n", func, current->comm);
1151 *status = GNTST_bad_page;
1152 }
1153}
1154
1155void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
1156{
1157 struct gnttab_map_grant_ref *op;
1158
1159 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
1160 BUG();
1161 for (op = batch; op < batch + count; op++)
1162 if (op->status == GNTST_eagain)
1163 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
1164 &op->status, __func__);
1165}
1166EXPORT_SYMBOL_GPL(gnttab_batch_map);
1167
1168void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
1169{
1170 struct gnttab_copy *op;
1171
1172 if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
1173 BUG();
1174 for (op = batch; op < batch + count; op++)
1175 if (op->status == GNTST_eagain)
1176 gnttab_retry_eagain_gop(GNTTABOP_copy, op,
1177 &op->status, __func__);
1178}
1179EXPORT_SYMBOL_GPL(gnttab_batch_copy);
1180
1181void gnttab_foreach_grant_in_range(struct page *page,
1182 unsigned int offset,
1183 unsigned int len,
1184 xen_grant_fn_t fn,
1185 void *data)
1186{
1187 unsigned int goffset;
1188 unsigned int glen;
1189 unsigned long xen_pfn;
1190
1191 len = min_t(unsigned int, PAGE_SIZE - offset, len);
1192 goffset = xen_offset_in_page(offset);
1193
1194 xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
1195
1196 while (len) {
1197 glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
1198 fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1199
1200 goffset = 0;
1201 xen_pfn++;
1202 len -= glen;
1203 }
1204}
1205EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1206
1207void gnttab_foreach_grant(struct page **pages,
1208 unsigned int nr_grefs,
1209 xen_grant_fn_t fn,
1210 void *data)
1211{
1212 unsigned int goffset = 0;
1213 unsigned long xen_pfn = 0;
1214 unsigned int i;
1215
1216 for (i = 0; i < nr_grefs; i++) {
1217 if ((i % XEN_PFN_PER_PAGE) == 0) {
1218 xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1219 goffset = 0;
1220 }
1221
1222 fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1223
1224 goffset += XEN_PAGE_SIZE;
1225 xen_pfn++;
1226 }
1227}
1228
1229int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1230 struct gnttab_map_grant_ref *kmap_ops,
1231 struct page **pages, unsigned int count)
1232{
1233 int i, ret;
1234
1235 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1236 if (ret)
1237 return ret;
1238
1239 for (i = 0; i < count; i++) {
1240 switch (map_ops[i].status) {
1241 case GNTST_okay:
1242 {
1243 struct xen_page_foreign *foreign;
1244
1245 SetPageForeign(pages[i]);
1246 foreign = xen_page_foreign(pages[i]);
1247 foreign->domid = map_ops[i].dom;
1248 foreign->gref = map_ops[i].ref;
1249 break;
1250 }
1251
1252 case GNTST_no_device_space:
1253 pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1254 break;
1255
1256 case GNTST_eagain:
1257 /* Retry eagain maps */
1258 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1259 map_ops + i,
1260 &map_ops[i].status, __func__);
1261 /* Test status in next loop iteration. */
1262 i--;
1263 break;
1264
1265 default:
1266 break;
1267 }
1268 }
1269
1270 return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1271}
1272EXPORT_SYMBOL_GPL(gnttab_map_refs);
1273
1274int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1275 struct gnttab_unmap_grant_ref *kunmap_ops,
1276 struct page **pages, unsigned int count)
1277{
1278 unsigned int i;
1279 int ret;
1280
1281 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1282 if (ret)
1283 return ret;
1284
1285 for (i = 0; i < count; i++)
1286 ClearPageForeign(pages[i]);
1287
1288 return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1289}
1290EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1291
1292#define GNTTAB_UNMAP_REFS_DELAY 5
1293
1294static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1295
1296static void gnttab_unmap_work(struct work_struct *work)
1297{
1298 struct gntab_unmap_queue_data
1299 *unmap_data = container_of(work,
1300 struct gntab_unmap_queue_data,
1301 gnttab_work.work);
1302 if (unmap_data->age != UINT_MAX)
1303 unmap_data->age++;
1304 __gnttab_unmap_refs_async(unmap_data);
1305}
1306
1307static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1308{
1309 int ret;
1310 int pc;
1311
1312 for (pc = 0; pc < item->count; pc++) {
1313 if (page_count(item->pages[pc]) > 1) {
1314 unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1315 schedule_delayed_work(&item->gnttab_work,
1316 msecs_to_jiffies(delay));
1317 return;
1318 }
1319 }
1320
1321 ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1322 item->pages, item->count);
1323 item->done(ret, item);
1324}
1325
1326void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1327{
1328 INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1329 item->age = 0;
1330
1331 __gnttab_unmap_refs_async(item);
1332}
1333EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1334
1335static void unmap_refs_callback(int result,
1336 struct gntab_unmap_queue_data *data)
1337{
1338 struct unmap_refs_callback_data *d = data->data;
1339
1340 d->result = result;
1341 complete(&d->completion);
1342}
1343
1344int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1345{
1346 struct unmap_refs_callback_data data;
1347
1348 init_completion(&data.completion);
1349 item->data = &data;
1350 item->done = &unmap_refs_callback;
1351 gnttab_unmap_refs_async(item);
1352 wait_for_completion(&data.completion);
1353
1354 return data.result;
1355}
1356EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1357
1358static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1359{
1360 return gnttab_frames(nr_grant_frames, SPP);
1361}
1362
1363static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1364{
1365 int rc;
1366
1367 rc = arch_gnttab_map_shared(frames, nr_gframes,
1368 gnttab_max_grant_frames(),
1369 &gnttab_shared.addr);
1370 BUG_ON(rc);
1371
1372 return 0;
1373}
1374
1375static void gnttab_unmap_frames_v1(void)
1376{
1377 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1378}
1379
1380static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1381{
1382 uint64_t *sframes;
1383 unsigned int nr_sframes;
1384 struct gnttab_get_status_frames getframes;
1385 int rc;
1386
1387 nr_sframes = nr_status_frames(nr_gframes);
1388
1389 /* No need for kzalloc as it is initialized in following hypercall
1390 * GNTTABOP_get_status_frames.
1391 */
1392 sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1393 if (!sframes)
1394 return -ENOMEM;
1395
1396 getframes.dom = DOMID_SELF;
1397 getframes.nr_frames = nr_sframes;
1398 set_xen_guest_handle(getframes.frame_list, sframes);
1399
1400 rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1401 &getframes, 1);
1402 if (rc == -ENOSYS) {
1403 kfree(sframes);
1404 return -ENOSYS;
1405 }
1406
1407 BUG_ON(rc || getframes.status);
1408
1409 rc = arch_gnttab_map_status(sframes, nr_sframes,
1410 nr_status_frames(gnttab_max_grant_frames()),
1411 &grstatus);
1412 BUG_ON(rc);
1413 kfree(sframes);
1414
1415 rc = arch_gnttab_map_shared(frames, nr_gframes,
1416 gnttab_max_grant_frames(),
1417 &gnttab_shared.addr);
1418 BUG_ON(rc);
1419
1420 return 0;
1421}
1422
1423static void gnttab_unmap_frames_v2(void)
1424{
1425 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1426 arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1427}
1428
1429static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1430{
1431 struct gnttab_setup_table setup;
1432 xen_pfn_t *frames;
1433 unsigned int nr_gframes = end_idx + 1;
1434 int rc;
1435
1436 if (xen_feature(XENFEAT_auto_translated_physmap)) {
1437 struct xen_add_to_physmap xatp;
1438 unsigned int i = end_idx;
1439 rc = 0;
1440 BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1441 /*
1442 * Loop backwards, so that the first hypercall has the largest
1443 * index, ensuring that the table will grow only once.
1444 */
1445 do {
1446 xatp.domid = DOMID_SELF;
1447 xatp.idx = i;
1448 xatp.space = XENMAPSPACE_grant_table;
1449 xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1450 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1451 if (rc != 0) {
1452 pr_warn("grant table add_to_physmap failed, err=%d\n",
1453 rc);
1454 break;
1455 }
1456 } while (i-- > start_idx);
1457
1458 return rc;
1459 }
1460
1461 /* No need for kzalloc as it is initialized in following hypercall
1462 * GNTTABOP_setup_table.
1463 */
1464 frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1465 if (!frames)
1466 return -ENOMEM;
1467
1468 setup.dom = DOMID_SELF;
1469 setup.nr_frames = nr_gframes;
1470 set_xen_guest_handle(setup.frame_list, frames);
1471
1472 rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1473 if (rc == -ENOSYS) {
1474 kfree(frames);
1475 return -ENOSYS;
1476 }
1477
1478 BUG_ON(rc || setup.status);
1479
1480 rc = gnttab_interface->map_frames(frames, nr_gframes);
1481
1482 kfree(frames);
1483
1484 return rc;
1485}
1486
1487static const struct gnttab_ops gnttab_v1_ops = {
1488 .version = 1,
1489 .grefs_per_grant_frame = XEN_PAGE_SIZE /
1490 sizeof(struct grant_entry_v1),
1491 .map_frames = gnttab_map_frames_v1,
1492 .unmap_frames = gnttab_unmap_frames_v1,
1493 .update_entry = gnttab_update_entry_v1,
1494 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
1495 .read_frame = gnttab_read_frame_v1,
1496};
1497
1498static const struct gnttab_ops gnttab_v2_ops = {
1499 .version = 2,
1500 .grefs_per_grant_frame = XEN_PAGE_SIZE /
1501 sizeof(union grant_entry_v2),
1502 .map_frames = gnttab_map_frames_v2,
1503 .unmap_frames = gnttab_unmap_frames_v2,
1504 .update_entry = gnttab_update_entry_v2,
1505 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
1506 .read_frame = gnttab_read_frame_v2,
1507};
1508
1509static bool gnttab_need_v2(void)
1510{
1511#ifdef CONFIG_X86
1512 uint32_t base, width;
1513
1514 if (xen_pv_domain()) {
1515 base = xen_cpuid_base();
1516 if (cpuid_eax(base) < 5)
1517 return false; /* Information not available, use V1. */
1518 width = cpuid_ebx(base + 5) &
1519 XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1520 return width > 32 + PAGE_SHIFT;
1521 }
1522#endif
1523 return !!(max_possible_pfn >> 32);
1524}
1525
1526static void gnttab_request_version(void)
1527{
1528 long rc;
1529 struct gnttab_set_version gsv;
1530
1531 if (gnttab_need_v2())
1532 gsv.version = 2;
1533 else
1534 gsv.version = 1;
1535
1536 /* Boot parameter overrides automatic selection. */
1537 if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1538 gsv.version = xen_gnttab_version;
1539
1540 rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1541 if (rc == 0 && gsv.version == 2)
1542 gnttab_interface = &gnttab_v2_ops;
1543 else
1544 gnttab_interface = &gnttab_v1_ops;
1545 pr_info("Grant tables using version %d layout\n",
1546 gnttab_interface->version);
1547}
1548
1549static int gnttab_setup(void)
1550{
1551 unsigned int max_nr_gframes;
1552
1553 max_nr_gframes = gnttab_max_grant_frames();
1554 if (max_nr_gframes < nr_grant_frames)
1555 return -ENOSYS;
1556
1557 if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1558 gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1559 if (gnttab_shared.addr == NULL) {
1560 pr_warn("gnttab share frames is not mapped!\n");
1561 return -ENOMEM;
1562 }
1563 }
1564 return gnttab_map(0, nr_grant_frames - 1);
1565}
1566
1567int gnttab_resume(void)
1568{
1569 gnttab_request_version();
1570 return gnttab_setup();
1571}
1572
1573int gnttab_suspend(void)
1574{
1575 if (!xen_feature(XENFEAT_auto_translated_physmap))
1576 gnttab_interface->unmap_frames();
1577 return 0;
1578}
1579
1580static int gnttab_expand(unsigned int req_entries)
1581{
1582 int rc;
1583 unsigned int cur, extra;
1584
1585 cur = nr_grant_frames;
1586 extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1587 gnttab_interface->grefs_per_grant_frame);
1588 if (cur + extra > gnttab_max_grant_frames()) {
1589 pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1590 " cur=%u extra=%u limit=%u"
1591 " gnttab_free_count=%u req_entries=%u\n",
1592 cur, extra, gnttab_max_grant_frames(),
1593 gnttab_free_count, req_entries);
1594 return -ENOSPC;
1595 }
1596
1597 rc = gnttab_map(cur, cur + extra - 1);
1598 if (rc == 0)
1599 rc = grow_gnttab_list(extra);
1600
1601 return rc;
1602}
1603
1604int gnttab_init(void)
1605{
1606 int i;
1607 unsigned long max_nr_grant_frames, max_nr_grefs;
1608 unsigned int max_nr_glist_frames, nr_glist_frames;
1609 int ret;
1610
1611 gnttab_request_version();
1612 max_nr_grant_frames = gnttab_max_grant_frames();
1613 max_nr_grefs = max_nr_grant_frames *
1614 gnttab_interface->grefs_per_grant_frame;
1615 nr_grant_frames = 1;
1616
1617 /* Determine the maximum number of frames required for the
1618 * grant reference free list on the current hypervisor.
1619 */
1620 max_nr_glist_frames = max_nr_grefs / RPP;
1621
1622 gnttab_list = kmalloc_array(max_nr_glist_frames,
1623 sizeof(grant_ref_t *),
1624 GFP_KERNEL);
1625 if (gnttab_list == NULL)
1626 return -ENOMEM;
1627
1628 nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1629 for (i = 0; i < nr_glist_frames; i++) {
1630 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1631 if (gnttab_list[i] == NULL) {
1632 ret = -ENOMEM;
1633 goto ini_nomem;
1634 }
1635 }
1636
1637 gnttab_free_bitmap = bitmap_zalloc(max_nr_grefs, GFP_KERNEL);
1638 if (!gnttab_free_bitmap) {
1639 ret = -ENOMEM;
1640 goto ini_nomem;
1641 }
1642
1643 ret = arch_gnttab_init(max_nr_grant_frames,
1644 nr_status_frames(max_nr_grant_frames));
1645 if (ret < 0)
1646 goto ini_nomem;
1647
1648 if (gnttab_setup() < 0) {
1649 ret = -ENODEV;
1650 goto ini_nomem;
1651 }
1652
1653 gnttab_size = nr_grant_frames * gnttab_interface->grefs_per_grant_frame;
1654
1655 gnttab_set_free(GNTTAB_NR_RESERVED_ENTRIES,
1656 gnttab_size - GNTTAB_NR_RESERVED_ENTRIES);
1657
1658 printk("Grant table initialized\n");
1659 return 0;
1660
1661 ini_nomem:
1662 for (i--; i >= 0; i--)
1663 free_page((unsigned long)gnttab_list[i]);
1664 kfree(gnttab_list);
1665 bitmap_free(gnttab_free_bitmap);
1666 return ret;
1667}
1668EXPORT_SYMBOL_GPL(gnttab_init);
1669
1670static int __gnttab_init(void)
1671{
1672 if (!xen_domain())
1673 return -ENODEV;
1674
1675 /* Delay grant-table initialization in the PV on HVM case */
1676 if (xen_hvm_domain() && !xen_pvh_domain())
1677 return 0;
1678
1679 return gnttab_init();
1680}
1681/* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1682 * beforehand to initialize xen_auto_xlat_grant_frames. */
1683core_initcall_sync(__gnttab_init);
1/******************************************************************************
2 * grant_table.c
3 *
4 * Granting foreign access to our memory reservation.
5 *
6 * Copyright (c) 2005-2006, Christopher Clark
7 * Copyright (c) 2004-2005, K A Fraser
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
21 *
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33
34#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
35
36#include <linux/module.h>
37#include <linux/sched.h>
38#include <linux/mm.h>
39#include <linux/slab.h>
40#include <linux/vmalloc.h>
41#include <linux/uaccess.h>
42#include <linux/io.h>
43#include <linux/delay.h>
44#include <linux/hardirq.h>
45
46#include <xen/xen.h>
47#include <xen/interface/xen.h>
48#include <xen/page.h>
49#include <xen/grant_table.h>
50#include <xen/interface/memory.h>
51#include <xen/hvc-console.h>
52#include <xen/swiotlb-xen.h>
53#include <asm/xen/hypercall.h>
54#include <asm/xen/interface.h>
55
56#include <asm/pgtable.h>
57#include <asm/sync_bitops.h>
58
59/* External tools reserve first few grant table entries. */
60#define NR_RESERVED_ENTRIES 8
61#define GNTTAB_LIST_END 0xffffffff
62
63static grant_ref_t **gnttab_list;
64static unsigned int nr_grant_frames;
65static int gnttab_free_count;
66static grant_ref_t gnttab_free_head;
67static DEFINE_SPINLOCK(gnttab_list_lock);
68struct grant_frames xen_auto_xlat_grant_frames;
69
70static union {
71 struct grant_entry_v1 *v1;
72 union grant_entry_v2 *v2;
73 void *addr;
74} gnttab_shared;
75
76/*This is a structure of function pointers for grant table*/
77struct gnttab_ops {
78 /*
79 * Mapping a list of frames for storing grant entries. Frames parameter
80 * is used to store grant table address when grant table being setup,
81 * nr_gframes is the number of frames to map grant table. Returning
82 * GNTST_okay means success and negative value means failure.
83 */
84 int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
85 /*
86 * Release a list of frames which are mapped in map_frames for grant
87 * entry status.
88 */
89 void (*unmap_frames)(void);
90 /*
91 * Introducing a valid entry into the grant table, granting the frame of
92 * this grant entry to domain for accessing or transfering. Ref
93 * parameter is reference of this introduced grant entry, domid is id of
94 * granted domain, frame is the page frame to be granted, and flags is
95 * status of the grant entry to be updated.
96 */
97 void (*update_entry)(grant_ref_t ref, domid_t domid,
98 unsigned long frame, unsigned flags);
99 /*
100 * Stop granting a grant entry to domain for accessing. Ref parameter is
101 * reference of a grant entry whose grant access will be stopped,
102 * readonly is not in use in this function. If the grant entry is
103 * currently mapped for reading or writing, just return failure(==0)
104 * directly and don't tear down the grant access. Otherwise, stop grant
105 * access for this entry and return success(==1).
106 */
107 int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
108 /*
109 * Stop granting a grant entry to domain for transfer. Ref parameter is
110 * reference of a grant entry whose grant transfer will be stopped. If
111 * tranfer has not started, just reclaim the grant entry and return
112 * failure(==0). Otherwise, wait for the transfer to complete and then
113 * return the frame.
114 */
115 unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
116 /*
117 * Query the status of a grant entry. Ref parameter is reference of
118 * queried grant entry, return value is the status of queried entry.
119 * Detailed status(writing/reading) can be gotten from the return value
120 * by bit operations.
121 */
122 int (*query_foreign_access)(grant_ref_t ref);
123 /*
124 * Grant a domain to access a range of bytes within the page referred by
125 * an available grant entry. Ref parameter is reference of a grant entry
126 * which will be sub-page accessed, domid is id of grantee domain, frame
127 * is frame address of subpage grant, flags is grant type and flag
128 * information, page_off is offset of the range of bytes, and length is
129 * length of bytes to be accessed.
130 */
131 void (*update_subpage_entry)(grant_ref_t ref, domid_t domid,
132 unsigned long frame, int flags,
133 unsigned page_off, unsigned length);
134 /*
135 * Redirect an available grant entry on domain A to another grant
136 * reference of domain B, then allow domain C to use grant reference
137 * of domain B transitively. Ref parameter is an available grant entry
138 * reference on domain A, domid is id of domain C which accesses grant
139 * entry transitively, flags is grant type and flag information,
140 * trans_domid is id of domain B whose grant entry is finally accessed
141 * transitively, trans_gref is grant entry transitive reference of
142 * domain B.
143 */
144 void (*update_trans_entry)(grant_ref_t ref, domid_t domid, int flags,
145 domid_t trans_domid, grant_ref_t trans_gref);
146};
147
148static struct gnttab_ops *gnttab_interface;
149
150/*This reflects status of grant entries, so act as a global value*/
151static grant_status_t *grstatus;
152
153static int grant_table_version;
154static int grefs_per_grant_frame;
155
156static struct gnttab_free_callback *gnttab_free_callback_list;
157
158static int gnttab_expand(unsigned int req_entries);
159
160#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
161#define SPP (PAGE_SIZE / sizeof(grant_status_t))
162
163static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
164{
165 return &gnttab_list[(entry) / RPP][(entry) % RPP];
166}
167/* This can be used as an l-value */
168#define gnttab_entry(entry) (*__gnttab_entry(entry))
169
170static int get_free_entries(unsigned count)
171{
172 unsigned long flags;
173 int ref, rc = 0;
174 grant_ref_t head;
175
176 spin_lock_irqsave(&gnttab_list_lock, flags);
177
178 if ((gnttab_free_count < count) &&
179 ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
180 spin_unlock_irqrestore(&gnttab_list_lock, flags);
181 return rc;
182 }
183
184 ref = head = gnttab_free_head;
185 gnttab_free_count -= count;
186 while (count-- > 1)
187 head = gnttab_entry(head);
188 gnttab_free_head = gnttab_entry(head);
189 gnttab_entry(head) = GNTTAB_LIST_END;
190
191 spin_unlock_irqrestore(&gnttab_list_lock, flags);
192
193 return ref;
194}
195
196static void do_free_callbacks(void)
197{
198 struct gnttab_free_callback *callback, *next;
199
200 callback = gnttab_free_callback_list;
201 gnttab_free_callback_list = NULL;
202
203 while (callback != NULL) {
204 next = callback->next;
205 if (gnttab_free_count >= callback->count) {
206 callback->next = NULL;
207 callback->fn(callback->arg);
208 } else {
209 callback->next = gnttab_free_callback_list;
210 gnttab_free_callback_list = callback;
211 }
212 callback = next;
213 }
214}
215
216static inline void check_free_callbacks(void)
217{
218 if (unlikely(gnttab_free_callback_list))
219 do_free_callbacks();
220}
221
222static void put_free_entry(grant_ref_t ref)
223{
224 unsigned long flags;
225 spin_lock_irqsave(&gnttab_list_lock, flags);
226 gnttab_entry(ref) = gnttab_free_head;
227 gnttab_free_head = ref;
228 gnttab_free_count++;
229 check_free_callbacks();
230 spin_unlock_irqrestore(&gnttab_list_lock, flags);
231}
232
233/*
234 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
235 * Introducing a valid entry into the grant table:
236 * 1. Write ent->domid.
237 * 2. Write ent->frame:
238 * GTF_permit_access: Frame to which access is permitted.
239 * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
240 * frame, or zero if none.
241 * 3. Write memory barrier (WMB).
242 * 4. Write ent->flags, inc. valid type.
243 */
244static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
245 unsigned long frame, unsigned flags)
246{
247 gnttab_shared.v1[ref].domid = domid;
248 gnttab_shared.v1[ref].frame = frame;
249 wmb();
250 gnttab_shared.v1[ref].flags = flags;
251}
252
253static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
254 unsigned long frame, unsigned flags)
255{
256 gnttab_shared.v2[ref].hdr.domid = domid;
257 gnttab_shared.v2[ref].full_page.frame = frame;
258 wmb();
259 gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
260}
261
262/*
263 * Public grant-issuing interface functions
264 */
265void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
266 unsigned long frame, int readonly)
267{
268 gnttab_interface->update_entry(ref, domid, frame,
269 GTF_permit_access | (readonly ? GTF_readonly : 0));
270}
271EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
272
273int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
274 int readonly)
275{
276 int ref;
277
278 ref = get_free_entries(1);
279 if (unlikely(ref < 0))
280 return -ENOSPC;
281
282 gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
283
284 return ref;
285}
286EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
287
288static void gnttab_update_subpage_entry_v2(grant_ref_t ref, domid_t domid,
289 unsigned long frame, int flags,
290 unsigned page_off, unsigned length)
291{
292 gnttab_shared.v2[ref].sub_page.frame = frame;
293 gnttab_shared.v2[ref].sub_page.page_off = page_off;
294 gnttab_shared.v2[ref].sub_page.length = length;
295 gnttab_shared.v2[ref].hdr.domid = domid;
296 wmb();
297 gnttab_shared.v2[ref].hdr.flags =
298 GTF_permit_access | GTF_sub_page | flags;
299}
300
301int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid,
302 unsigned long frame, int flags,
303 unsigned page_off,
304 unsigned length)
305{
306 if (flags & (GTF_accept_transfer | GTF_reading |
307 GTF_writing | GTF_transitive))
308 return -EPERM;
309
310 if (gnttab_interface->update_subpage_entry == NULL)
311 return -ENOSYS;
312
313 gnttab_interface->update_subpage_entry(ref, domid, frame, flags,
314 page_off, length);
315
316 return 0;
317}
318EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage_ref);
319
320int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame,
321 int flags, unsigned page_off,
322 unsigned length)
323{
324 int ref, rc;
325
326 ref = get_free_entries(1);
327 if (unlikely(ref < 0))
328 return -ENOSPC;
329
330 rc = gnttab_grant_foreign_access_subpage_ref(ref, domid, frame, flags,
331 page_off, length);
332 if (rc < 0) {
333 put_free_entry(ref);
334 return rc;
335 }
336
337 return ref;
338}
339EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage);
340
341bool gnttab_subpage_grants_available(void)
342{
343 return gnttab_interface->update_subpage_entry != NULL;
344}
345EXPORT_SYMBOL_GPL(gnttab_subpage_grants_available);
346
347static void gnttab_update_trans_entry_v2(grant_ref_t ref, domid_t domid,
348 int flags, domid_t trans_domid,
349 grant_ref_t trans_gref)
350{
351 gnttab_shared.v2[ref].transitive.trans_domid = trans_domid;
352 gnttab_shared.v2[ref].transitive.gref = trans_gref;
353 gnttab_shared.v2[ref].hdr.domid = domid;
354 wmb();
355 gnttab_shared.v2[ref].hdr.flags =
356 GTF_permit_access | GTF_transitive | flags;
357}
358
359int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid,
360 int flags, domid_t trans_domid,
361 grant_ref_t trans_gref)
362{
363 if (flags & (GTF_accept_transfer | GTF_reading |
364 GTF_writing | GTF_sub_page))
365 return -EPERM;
366
367 if (gnttab_interface->update_trans_entry == NULL)
368 return -ENOSYS;
369
370 gnttab_interface->update_trans_entry(ref, domid, flags, trans_domid,
371 trans_gref);
372
373 return 0;
374}
375EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans_ref);
376
377int gnttab_grant_foreign_access_trans(domid_t domid, int flags,
378 domid_t trans_domid,
379 grant_ref_t trans_gref)
380{
381 int ref, rc;
382
383 ref = get_free_entries(1);
384 if (unlikely(ref < 0))
385 return -ENOSPC;
386
387 rc = gnttab_grant_foreign_access_trans_ref(ref, domid, flags,
388 trans_domid, trans_gref);
389 if (rc < 0) {
390 put_free_entry(ref);
391 return rc;
392 }
393
394 return ref;
395}
396EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans);
397
398bool gnttab_trans_grants_available(void)
399{
400 return gnttab_interface->update_trans_entry != NULL;
401}
402EXPORT_SYMBOL_GPL(gnttab_trans_grants_available);
403
404static int gnttab_query_foreign_access_v1(grant_ref_t ref)
405{
406 return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
407}
408
409static int gnttab_query_foreign_access_v2(grant_ref_t ref)
410{
411 return grstatus[ref] & (GTF_reading|GTF_writing);
412}
413
414int gnttab_query_foreign_access(grant_ref_t ref)
415{
416 return gnttab_interface->query_foreign_access(ref);
417}
418EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
419
420static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
421{
422 u16 flags, nflags;
423 u16 *pflags;
424
425 pflags = &gnttab_shared.v1[ref].flags;
426 nflags = *pflags;
427 do {
428 flags = nflags;
429 if (flags & (GTF_reading|GTF_writing))
430 return 0;
431 } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
432
433 return 1;
434}
435
436static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
437{
438 gnttab_shared.v2[ref].hdr.flags = 0;
439 mb();
440 if (grstatus[ref] & (GTF_reading|GTF_writing)) {
441 return 0;
442 } else {
443 /* The read of grstatus needs to have acquire
444 semantics. On x86, reads already have
445 that, and we just need to protect against
446 compiler reorderings. On other
447 architectures we may need a full
448 barrier. */
449#ifdef CONFIG_X86
450 barrier();
451#else
452 mb();
453#endif
454 }
455
456 return 1;
457}
458
459static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
460{
461 return gnttab_interface->end_foreign_access_ref(ref, readonly);
462}
463
464int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
465{
466 if (_gnttab_end_foreign_access_ref(ref, readonly))
467 return 1;
468 pr_warn("WARNING: g.e. %#x still in use!\n", ref);
469 return 0;
470}
471EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
472
473struct deferred_entry {
474 struct list_head list;
475 grant_ref_t ref;
476 bool ro;
477 uint16_t warn_delay;
478 struct page *page;
479};
480static LIST_HEAD(deferred_list);
481static void gnttab_handle_deferred(unsigned long);
482static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0);
483
484static void gnttab_handle_deferred(unsigned long unused)
485{
486 unsigned int nr = 10;
487 struct deferred_entry *first = NULL;
488 unsigned long flags;
489
490 spin_lock_irqsave(&gnttab_list_lock, flags);
491 while (nr--) {
492 struct deferred_entry *entry
493 = list_first_entry(&deferred_list,
494 struct deferred_entry, list);
495
496 if (entry == first)
497 break;
498 list_del(&entry->list);
499 spin_unlock_irqrestore(&gnttab_list_lock, flags);
500 if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
501 put_free_entry(entry->ref);
502 if (entry->page) {
503 pr_debug("freeing g.e. %#x (pfn %#lx)\n",
504 entry->ref, page_to_pfn(entry->page));
505 __free_page(entry->page);
506 } else
507 pr_info("freeing g.e. %#x\n", entry->ref);
508 kfree(entry);
509 entry = NULL;
510 } else {
511 if (!--entry->warn_delay)
512 pr_info("g.e. %#x still pending\n", entry->ref);
513 if (!first)
514 first = entry;
515 }
516 spin_lock_irqsave(&gnttab_list_lock, flags);
517 if (entry)
518 list_add_tail(&entry->list, &deferred_list);
519 else if (list_empty(&deferred_list))
520 break;
521 }
522 if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
523 deferred_timer.expires = jiffies + HZ;
524 add_timer(&deferred_timer);
525 }
526 spin_unlock_irqrestore(&gnttab_list_lock, flags);
527}
528
529static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
530 struct page *page)
531{
532 struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
533 const char *what = KERN_WARNING "leaking";
534
535 if (entry) {
536 unsigned long flags;
537
538 entry->ref = ref;
539 entry->ro = readonly;
540 entry->page = page;
541 entry->warn_delay = 60;
542 spin_lock_irqsave(&gnttab_list_lock, flags);
543 list_add_tail(&entry->list, &deferred_list);
544 if (!timer_pending(&deferred_timer)) {
545 deferred_timer.expires = jiffies + HZ;
546 add_timer(&deferred_timer);
547 }
548 spin_unlock_irqrestore(&gnttab_list_lock, flags);
549 what = KERN_DEBUG "deferring";
550 }
551 printk("%s g.e. %#x (pfn %#lx)\n",
552 what, ref, page ? page_to_pfn(page) : -1);
553}
554
555void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
556 unsigned long page)
557{
558 if (gnttab_end_foreign_access_ref(ref, readonly)) {
559 put_free_entry(ref);
560 if (page != 0)
561 free_page(page);
562 } else
563 gnttab_add_deferred(ref, readonly,
564 page ? virt_to_page(page) : NULL);
565}
566EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
567
568int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
569{
570 int ref;
571
572 ref = get_free_entries(1);
573 if (unlikely(ref < 0))
574 return -ENOSPC;
575 gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
576
577 return ref;
578}
579EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
580
581void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
582 unsigned long pfn)
583{
584 gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
585}
586EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
587
588static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
589{
590 unsigned long frame;
591 u16 flags;
592 u16 *pflags;
593
594 pflags = &gnttab_shared.v1[ref].flags;
595
596 /*
597 * If a transfer is not even yet started, try to reclaim the grant
598 * reference and return failure (== 0).
599 */
600 while (!((flags = *pflags) & GTF_transfer_committed)) {
601 if (sync_cmpxchg(pflags, flags, 0) == flags)
602 return 0;
603 cpu_relax();
604 }
605
606 /* If a transfer is in progress then wait until it is completed. */
607 while (!(flags & GTF_transfer_completed)) {
608 flags = *pflags;
609 cpu_relax();
610 }
611
612 rmb(); /* Read the frame number /after/ reading completion status. */
613 frame = gnttab_shared.v1[ref].frame;
614 BUG_ON(frame == 0);
615
616 return frame;
617}
618
619static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
620{
621 unsigned long frame;
622 u16 flags;
623 u16 *pflags;
624
625 pflags = &gnttab_shared.v2[ref].hdr.flags;
626
627 /*
628 * If a transfer is not even yet started, try to reclaim the grant
629 * reference and return failure (== 0).
630 */
631 while (!((flags = *pflags) & GTF_transfer_committed)) {
632 if (sync_cmpxchg(pflags, flags, 0) == flags)
633 return 0;
634 cpu_relax();
635 }
636
637 /* If a transfer is in progress then wait until it is completed. */
638 while (!(flags & GTF_transfer_completed)) {
639 flags = *pflags;
640 cpu_relax();
641 }
642
643 rmb(); /* Read the frame number /after/ reading completion status. */
644 frame = gnttab_shared.v2[ref].full_page.frame;
645 BUG_ON(frame == 0);
646
647 return frame;
648}
649
650unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
651{
652 return gnttab_interface->end_foreign_transfer_ref(ref);
653}
654EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
655
656unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
657{
658 unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
659 put_free_entry(ref);
660 return frame;
661}
662EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
663
664void gnttab_free_grant_reference(grant_ref_t ref)
665{
666 put_free_entry(ref);
667}
668EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
669
670void gnttab_free_grant_references(grant_ref_t head)
671{
672 grant_ref_t ref;
673 unsigned long flags;
674 int count = 1;
675 if (head == GNTTAB_LIST_END)
676 return;
677 spin_lock_irqsave(&gnttab_list_lock, flags);
678 ref = head;
679 while (gnttab_entry(ref) != GNTTAB_LIST_END) {
680 ref = gnttab_entry(ref);
681 count++;
682 }
683 gnttab_entry(ref) = gnttab_free_head;
684 gnttab_free_head = head;
685 gnttab_free_count += count;
686 check_free_callbacks();
687 spin_unlock_irqrestore(&gnttab_list_lock, flags);
688}
689EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
690
691int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
692{
693 int h = get_free_entries(count);
694
695 if (h < 0)
696 return -ENOSPC;
697
698 *head = h;
699
700 return 0;
701}
702EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
703
704int gnttab_empty_grant_references(const grant_ref_t *private_head)
705{
706 return (*private_head == GNTTAB_LIST_END);
707}
708EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
709
710int gnttab_claim_grant_reference(grant_ref_t *private_head)
711{
712 grant_ref_t g = *private_head;
713 if (unlikely(g == GNTTAB_LIST_END))
714 return -ENOSPC;
715 *private_head = gnttab_entry(g);
716 return g;
717}
718EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
719
720void gnttab_release_grant_reference(grant_ref_t *private_head,
721 grant_ref_t release)
722{
723 gnttab_entry(release) = *private_head;
724 *private_head = release;
725}
726EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
727
728void gnttab_request_free_callback(struct gnttab_free_callback *callback,
729 void (*fn)(void *), void *arg, u16 count)
730{
731 unsigned long flags;
732 struct gnttab_free_callback *cb;
733
734 spin_lock_irqsave(&gnttab_list_lock, flags);
735
736 /* Check if the callback is already on the list */
737 cb = gnttab_free_callback_list;
738 while (cb) {
739 if (cb == callback)
740 goto out;
741 cb = cb->next;
742 }
743
744 callback->fn = fn;
745 callback->arg = arg;
746 callback->count = count;
747 callback->next = gnttab_free_callback_list;
748 gnttab_free_callback_list = callback;
749 check_free_callbacks();
750out:
751 spin_unlock_irqrestore(&gnttab_list_lock, flags);
752}
753EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
754
755void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
756{
757 struct gnttab_free_callback **pcb;
758 unsigned long flags;
759
760 spin_lock_irqsave(&gnttab_list_lock, flags);
761 for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
762 if (*pcb == callback) {
763 *pcb = callback->next;
764 break;
765 }
766 }
767 spin_unlock_irqrestore(&gnttab_list_lock, flags);
768}
769EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
770
771static int grow_gnttab_list(unsigned int more_frames)
772{
773 unsigned int new_nr_grant_frames, extra_entries, i;
774 unsigned int nr_glist_frames, new_nr_glist_frames;
775
776 BUG_ON(grefs_per_grant_frame == 0);
777
778 new_nr_grant_frames = nr_grant_frames + more_frames;
779 extra_entries = more_frames * grefs_per_grant_frame;
780
781 nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
782 new_nr_glist_frames =
783 (new_nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
784 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
785 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
786 if (!gnttab_list[i])
787 goto grow_nomem;
788 }
789
790
791 for (i = grefs_per_grant_frame * nr_grant_frames;
792 i < grefs_per_grant_frame * new_nr_grant_frames - 1; i++)
793 gnttab_entry(i) = i + 1;
794
795 gnttab_entry(i) = gnttab_free_head;
796 gnttab_free_head = grefs_per_grant_frame * nr_grant_frames;
797 gnttab_free_count += extra_entries;
798
799 nr_grant_frames = new_nr_grant_frames;
800
801 check_free_callbacks();
802
803 return 0;
804
805grow_nomem:
806 for ( ; i >= nr_glist_frames; i--)
807 free_page((unsigned long) gnttab_list[i]);
808 return -ENOMEM;
809}
810
811static unsigned int __max_nr_grant_frames(void)
812{
813 struct gnttab_query_size query;
814 int rc;
815
816 query.dom = DOMID_SELF;
817
818 rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
819 if ((rc < 0) || (query.status != GNTST_okay))
820 return 4; /* Legacy max supported number of frames */
821
822 return query.max_nr_frames;
823}
824
825unsigned int gnttab_max_grant_frames(void)
826{
827 unsigned int xen_max = __max_nr_grant_frames();
828 static unsigned int boot_max_nr_grant_frames;
829
830 /* First time, initialize it properly. */
831 if (!boot_max_nr_grant_frames)
832 boot_max_nr_grant_frames = __max_nr_grant_frames();
833
834 if (xen_max > boot_max_nr_grant_frames)
835 return boot_max_nr_grant_frames;
836 return xen_max;
837}
838EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
839
840int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
841{
842 xen_pfn_t *pfn;
843 unsigned int max_nr_gframes = __max_nr_grant_frames();
844 unsigned int i;
845 void *vaddr;
846
847 if (xen_auto_xlat_grant_frames.count)
848 return -EINVAL;
849
850 vaddr = xen_remap(addr, PAGE_SIZE * max_nr_gframes);
851 if (vaddr == NULL) {
852 pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
853 &addr);
854 return -ENOMEM;
855 }
856 pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
857 if (!pfn) {
858 xen_unmap(vaddr);
859 return -ENOMEM;
860 }
861 for (i = 0; i < max_nr_gframes; i++)
862 pfn[i] = PFN_DOWN(addr) + i;
863
864 xen_auto_xlat_grant_frames.vaddr = vaddr;
865 xen_auto_xlat_grant_frames.pfn = pfn;
866 xen_auto_xlat_grant_frames.count = max_nr_gframes;
867
868 return 0;
869}
870EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
871
872void gnttab_free_auto_xlat_frames(void)
873{
874 if (!xen_auto_xlat_grant_frames.count)
875 return;
876 kfree(xen_auto_xlat_grant_frames.pfn);
877 xen_unmap(xen_auto_xlat_grant_frames.vaddr);
878
879 xen_auto_xlat_grant_frames.pfn = NULL;
880 xen_auto_xlat_grant_frames.count = 0;
881 xen_auto_xlat_grant_frames.vaddr = NULL;
882}
883EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
884
885/* Handling of paged out grant targets (GNTST_eagain) */
886#define MAX_DELAY 256
887static inline void
888gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
889 const char *func)
890{
891 unsigned delay = 1;
892
893 do {
894 BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
895 if (*status == GNTST_eagain)
896 msleep(delay++);
897 } while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
898
899 if (delay >= MAX_DELAY) {
900 pr_err("%s: %s eagain grant\n", func, current->comm);
901 *status = GNTST_bad_page;
902 }
903}
904
905void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
906{
907 struct gnttab_map_grant_ref *op;
908
909 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
910 BUG();
911 for (op = batch; op < batch + count; op++)
912 if (op->status == GNTST_eagain)
913 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
914 &op->status, __func__);
915}
916EXPORT_SYMBOL_GPL(gnttab_batch_map);
917
918void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
919{
920 struct gnttab_copy *op;
921
922 if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
923 BUG();
924 for (op = batch; op < batch + count; op++)
925 if (op->status == GNTST_eagain)
926 gnttab_retry_eagain_gop(GNTTABOP_copy, op,
927 &op->status, __func__);
928}
929EXPORT_SYMBOL_GPL(gnttab_batch_copy);
930
931int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
932 struct gnttab_map_grant_ref *kmap_ops,
933 struct page **pages, unsigned int count)
934{
935 int i, ret;
936
937 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
938 if (ret)
939 return ret;
940
941 /* Retry eagain maps */
942 for (i = 0; i < count; i++)
943 if (map_ops[i].status == GNTST_eagain)
944 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
945 &map_ops[i].status, __func__);
946
947 return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
948}
949EXPORT_SYMBOL_GPL(gnttab_map_refs);
950
951int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
952 struct gnttab_map_grant_ref *kmap_ops,
953 struct page **pages, unsigned int count)
954{
955 int ret;
956
957 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
958 if (ret)
959 return ret;
960
961 return clear_foreign_p2m_mapping(unmap_ops, kmap_ops, pages, count);
962}
963EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
964
965static unsigned nr_status_frames(unsigned nr_grant_frames)
966{
967 BUG_ON(grefs_per_grant_frame == 0);
968 return (nr_grant_frames * grefs_per_grant_frame + SPP - 1) / SPP;
969}
970
971static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
972{
973 int rc;
974
975 rc = arch_gnttab_map_shared(frames, nr_gframes,
976 gnttab_max_grant_frames(),
977 &gnttab_shared.addr);
978 BUG_ON(rc);
979
980 return 0;
981}
982
983static void gnttab_unmap_frames_v1(void)
984{
985 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
986}
987
988static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
989{
990 uint64_t *sframes;
991 unsigned int nr_sframes;
992 struct gnttab_get_status_frames getframes;
993 int rc;
994
995 nr_sframes = nr_status_frames(nr_gframes);
996
997 /* No need for kzalloc as it is initialized in following hypercall
998 * GNTTABOP_get_status_frames.
999 */
1000 sframes = kmalloc(nr_sframes * sizeof(uint64_t), GFP_ATOMIC);
1001 if (!sframes)
1002 return -ENOMEM;
1003
1004 getframes.dom = DOMID_SELF;
1005 getframes.nr_frames = nr_sframes;
1006 set_xen_guest_handle(getframes.frame_list, sframes);
1007
1008 rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1009 &getframes, 1);
1010 if (rc == -ENOSYS) {
1011 kfree(sframes);
1012 return -ENOSYS;
1013 }
1014
1015 BUG_ON(rc || getframes.status);
1016
1017 rc = arch_gnttab_map_status(sframes, nr_sframes,
1018 nr_status_frames(gnttab_max_grant_frames()),
1019 &grstatus);
1020 BUG_ON(rc);
1021 kfree(sframes);
1022
1023 rc = arch_gnttab_map_shared(frames, nr_gframes,
1024 gnttab_max_grant_frames(),
1025 &gnttab_shared.addr);
1026 BUG_ON(rc);
1027
1028 return 0;
1029}
1030
1031static void gnttab_unmap_frames_v2(void)
1032{
1033 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1034 arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1035}
1036
1037static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1038{
1039 struct gnttab_setup_table setup;
1040 xen_pfn_t *frames;
1041 unsigned int nr_gframes = end_idx + 1;
1042 int rc;
1043
1044 if (xen_feature(XENFEAT_auto_translated_physmap)) {
1045 struct xen_add_to_physmap xatp;
1046 unsigned int i = end_idx;
1047 rc = 0;
1048 BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1049 /*
1050 * Loop backwards, so that the first hypercall has the largest
1051 * index, ensuring that the table will grow only once.
1052 */
1053 do {
1054 xatp.domid = DOMID_SELF;
1055 xatp.idx = i;
1056 xatp.space = XENMAPSPACE_grant_table;
1057 xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1058 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1059 if (rc != 0) {
1060 pr_warn("grant table add_to_physmap failed, err=%d\n",
1061 rc);
1062 break;
1063 }
1064 } while (i-- > start_idx);
1065
1066 return rc;
1067 }
1068
1069 /* No need for kzalloc as it is initialized in following hypercall
1070 * GNTTABOP_setup_table.
1071 */
1072 frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
1073 if (!frames)
1074 return -ENOMEM;
1075
1076 setup.dom = DOMID_SELF;
1077 setup.nr_frames = nr_gframes;
1078 set_xen_guest_handle(setup.frame_list, frames);
1079
1080 rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1081 if (rc == -ENOSYS) {
1082 kfree(frames);
1083 return -ENOSYS;
1084 }
1085
1086 BUG_ON(rc || setup.status);
1087
1088 rc = gnttab_interface->map_frames(frames, nr_gframes);
1089
1090 kfree(frames);
1091
1092 return rc;
1093}
1094
1095static struct gnttab_ops gnttab_v1_ops = {
1096 .map_frames = gnttab_map_frames_v1,
1097 .unmap_frames = gnttab_unmap_frames_v1,
1098 .update_entry = gnttab_update_entry_v1,
1099 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
1100 .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1,
1101 .query_foreign_access = gnttab_query_foreign_access_v1,
1102};
1103
1104static struct gnttab_ops gnttab_v2_ops = {
1105 .map_frames = gnttab_map_frames_v2,
1106 .unmap_frames = gnttab_unmap_frames_v2,
1107 .update_entry = gnttab_update_entry_v2,
1108 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
1109 .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
1110 .query_foreign_access = gnttab_query_foreign_access_v2,
1111 .update_subpage_entry = gnttab_update_subpage_entry_v2,
1112 .update_trans_entry = gnttab_update_trans_entry_v2,
1113};
1114
1115static void gnttab_request_version(void)
1116{
1117 int rc;
1118 struct gnttab_set_version gsv;
1119
1120 gsv.version = 1;
1121
1122 rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1123 if (rc == 0 && gsv.version == 2) {
1124 grant_table_version = 2;
1125 grefs_per_grant_frame = PAGE_SIZE / sizeof(union grant_entry_v2);
1126 gnttab_interface = &gnttab_v2_ops;
1127 } else if (grant_table_version == 2) {
1128 /*
1129 * If we've already used version 2 features,
1130 * but then suddenly discover that they're not
1131 * available (e.g. migrating to an older
1132 * version of Xen), almost unbounded badness
1133 * can happen.
1134 */
1135 panic("we need grant tables version 2, but only version 1 is available");
1136 } else {
1137 grant_table_version = 1;
1138 grefs_per_grant_frame = PAGE_SIZE / sizeof(struct grant_entry_v1);
1139 gnttab_interface = &gnttab_v1_ops;
1140 }
1141 pr_info("Grant tables using version %d layout\n", grant_table_version);
1142}
1143
1144static int gnttab_setup(void)
1145{
1146 unsigned int max_nr_gframes;
1147
1148 max_nr_gframes = gnttab_max_grant_frames();
1149 if (max_nr_gframes < nr_grant_frames)
1150 return -ENOSYS;
1151
1152 if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1153 gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1154 if (gnttab_shared.addr == NULL) {
1155 pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n",
1156 (unsigned long)xen_auto_xlat_grant_frames.vaddr);
1157 return -ENOMEM;
1158 }
1159 }
1160 return gnttab_map(0, nr_grant_frames - 1);
1161}
1162
1163int gnttab_resume(void)
1164{
1165 gnttab_request_version();
1166 return gnttab_setup();
1167}
1168
1169int gnttab_suspend(void)
1170{
1171 gnttab_interface->unmap_frames();
1172 return 0;
1173}
1174
1175static int gnttab_expand(unsigned int req_entries)
1176{
1177 int rc;
1178 unsigned int cur, extra;
1179
1180 BUG_ON(grefs_per_grant_frame == 0);
1181 cur = nr_grant_frames;
1182 extra = ((req_entries + (grefs_per_grant_frame-1)) /
1183 grefs_per_grant_frame);
1184 if (cur + extra > gnttab_max_grant_frames())
1185 return -ENOSPC;
1186
1187 rc = gnttab_map(cur, cur + extra - 1);
1188 if (rc == 0)
1189 rc = grow_gnttab_list(extra);
1190
1191 return rc;
1192}
1193
1194int gnttab_init(void)
1195{
1196 int i;
1197 unsigned int max_nr_glist_frames, nr_glist_frames;
1198 unsigned int nr_init_grefs;
1199 int ret;
1200
1201 gnttab_request_version();
1202 nr_grant_frames = 1;
1203
1204 /* Determine the maximum number of frames required for the
1205 * grant reference free list on the current hypervisor.
1206 */
1207 BUG_ON(grefs_per_grant_frame == 0);
1208 max_nr_glist_frames = (gnttab_max_grant_frames() *
1209 grefs_per_grant_frame / RPP);
1210
1211 gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
1212 GFP_KERNEL);
1213 if (gnttab_list == NULL)
1214 return -ENOMEM;
1215
1216 nr_glist_frames = (nr_grant_frames * grefs_per_grant_frame + RPP - 1) / RPP;
1217 for (i = 0; i < nr_glist_frames; i++) {
1218 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1219 if (gnttab_list[i] == NULL) {
1220 ret = -ENOMEM;
1221 goto ini_nomem;
1222 }
1223 }
1224
1225 if (gnttab_setup() < 0) {
1226 ret = -ENODEV;
1227 goto ini_nomem;
1228 }
1229
1230 nr_init_grefs = nr_grant_frames * grefs_per_grant_frame;
1231
1232 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1233 gnttab_entry(i) = i + 1;
1234
1235 gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
1236 gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
1237 gnttab_free_head = NR_RESERVED_ENTRIES;
1238
1239 printk("Grant table initialized\n");
1240 return 0;
1241
1242 ini_nomem:
1243 for (i--; i >= 0; i--)
1244 free_page((unsigned long)gnttab_list[i]);
1245 kfree(gnttab_list);
1246 return ret;
1247}
1248EXPORT_SYMBOL_GPL(gnttab_init);
1249
1250static int __gnttab_init(void)
1251{
1252 /* Delay grant-table initialization in the PV on HVM case */
1253 if (xen_hvm_domain())
1254 return 0;
1255
1256 if (!xen_pv_domain())
1257 return -ENODEV;
1258
1259 return gnttab_init();
1260}
1261/* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1262 * beforehand to initialize xen_auto_xlat_grant_frames. */
1263core_initcall_sync(__gnttab_init);