Loading...
1/******************************************************************************
2 * grant_table.c
3 *
4 * Granting foreign access to our memory reservation.
5 *
6 * Copyright (c) 2005-2006, Christopher Clark
7 * Copyright (c) 2004-2005, K A Fraser
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
21 *
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33
34#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
35
36#include <linux/memblock.h>
37#include <linux/sched.h>
38#include <linux/mm.h>
39#include <linux/slab.h>
40#include <linux/vmalloc.h>
41#include <linux/uaccess.h>
42#include <linux/io.h>
43#include <linux/delay.h>
44#include <linux/hardirq.h>
45#include <linux/workqueue.h>
46#include <linux/ratelimit.h>
47#include <linux/moduleparam.h>
48#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
49#include <linux/dma-mapping.h>
50#endif
51
52#include <xen/xen.h>
53#include <xen/interface/xen.h>
54#include <xen/page.h>
55#include <xen/grant_table.h>
56#include <xen/interface/memory.h>
57#include <xen/hvc-console.h>
58#include <xen/swiotlb-xen.h>
59#include <xen/balloon.h>
60#ifdef CONFIG_X86
61#include <asm/xen/cpuid.h>
62#endif
63#include <xen/mem-reservation.h>
64#include <asm/xen/hypercall.h>
65#include <asm/xen/interface.h>
66
67#include <asm/sync_bitops.h>
68
69/* External tools reserve first few grant table entries. */
70#define NR_RESERVED_ENTRIES 8
71#define GNTTAB_LIST_END 0xffffffff
72
73static grant_ref_t **gnttab_list;
74static unsigned int nr_grant_frames;
75static int gnttab_free_count;
76static grant_ref_t gnttab_free_head;
77static DEFINE_SPINLOCK(gnttab_list_lock);
78struct grant_frames xen_auto_xlat_grant_frames;
79static unsigned int xen_gnttab_version;
80module_param_named(version, xen_gnttab_version, uint, 0);
81
82static union {
83 struct grant_entry_v1 *v1;
84 union grant_entry_v2 *v2;
85 void *addr;
86} gnttab_shared;
87
88/*This is a structure of function pointers for grant table*/
89struct gnttab_ops {
90 /*
91 * Version of the grant interface.
92 */
93 unsigned int version;
94 /*
95 * Grant refs per grant frame.
96 */
97 unsigned int grefs_per_grant_frame;
98 /*
99 * Mapping a list of frames for storing grant entries. Frames parameter
100 * is used to store grant table address when grant table being setup,
101 * nr_gframes is the number of frames to map grant table. Returning
102 * GNTST_okay means success and negative value means failure.
103 */
104 int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
105 /*
106 * Release a list of frames which are mapped in map_frames for grant
107 * entry status.
108 */
109 void (*unmap_frames)(void);
110 /*
111 * Introducing a valid entry into the grant table, granting the frame of
112 * this grant entry to domain for accessing or transfering. Ref
113 * parameter is reference of this introduced grant entry, domid is id of
114 * granted domain, frame is the page frame to be granted, and flags is
115 * status of the grant entry to be updated.
116 */
117 void (*update_entry)(grant_ref_t ref, domid_t domid,
118 unsigned long frame, unsigned flags);
119 /*
120 * Stop granting a grant entry to domain for accessing. Ref parameter is
121 * reference of a grant entry whose grant access will be stopped,
122 * readonly is not in use in this function. If the grant entry is
123 * currently mapped for reading or writing, just return failure(==0)
124 * directly and don't tear down the grant access. Otherwise, stop grant
125 * access for this entry and return success(==1).
126 */
127 int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
128 /*
129 * Stop granting a grant entry to domain for transfer. Ref parameter is
130 * reference of a grant entry whose grant transfer will be stopped. If
131 * tranfer has not started, just reclaim the grant entry and return
132 * failure(==0). Otherwise, wait for the transfer to complete and then
133 * return the frame.
134 */
135 unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
136 /*
137 * Query the status of a grant entry. Ref parameter is reference of
138 * queried grant entry, return value is the status of queried entry.
139 * Detailed status(writing/reading) can be gotten from the return value
140 * by bit operations.
141 */
142 int (*query_foreign_access)(grant_ref_t ref);
143};
144
145struct unmap_refs_callback_data {
146 struct completion completion;
147 int result;
148};
149
150static const struct gnttab_ops *gnttab_interface;
151
152/* This reflects status of grant entries, so act as a global value. */
153static grant_status_t *grstatus;
154
155static struct gnttab_free_callback *gnttab_free_callback_list;
156
157static int gnttab_expand(unsigned int req_entries);
158
159#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
160#define SPP (PAGE_SIZE / sizeof(grant_status_t))
161
162static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
163{
164 return &gnttab_list[(entry) / RPP][(entry) % RPP];
165}
166/* This can be used as an l-value */
167#define gnttab_entry(entry) (*__gnttab_entry(entry))
168
169static int get_free_entries(unsigned count)
170{
171 unsigned long flags;
172 int ref, rc = 0;
173 grant_ref_t head;
174
175 spin_lock_irqsave(&gnttab_list_lock, flags);
176
177 if ((gnttab_free_count < count) &&
178 ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
179 spin_unlock_irqrestore(&gnttab_list_lock, flags);
180 return rc;
181 }
182
183 ref = head = gnttab_free_head;
184 gnttab_free_count -= count;
185 while (count-- > 1)
186 head = gnttab_entry(head);
187 gnttab_free_head = gnttab_entry(head);
188 gnttab_entry(head) = GNTTAB_LIST_END;
189
190 spin_unlock_irqrestore(&gnttab_list_lock, flags);
191
192 return ref;
193}
194
195static void do_free_callbacks(void)
196{
197 struct gnttab_free_callback *callback, *next;
198
199 callback = gnttab_free_callback_list;
200 gnttab_free_callback_list = NULL;
201
202 while (callback != NULL) {
203 next = callback->next;
204 if (gnttab_free_count >= callback->count) {
205 callback->next = NULL;
206 callback->fn(callback->arg);
207 } else {
208 callback->next = gnttab_free_callback_list;
209 gnttab_free_callback_list = callback;
210 }
211 callback = next;
212 }
213}
214
215static inline void check_free_callbacks(void)
216{
217 if (unlikely(gnttab_free_callback_list))
218 do_free_callbacks();
219}
220
221static void put_free_entry(grant_ref_t ref)
222{
223 unsigned long flags;
224 spin_lock_irqsave(&gnttab_list_lock, flags);
225 gnttab_entry(ref) = gnttab_free_head;
226 gnttab_free_head = ref;
227 gnttab_free_count++;
228 check_free_callbacks();
229 spin_unlock_irqrestore(&gnttab_list_lock, flags);
230}
231
232/*
233 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
234 * Introducing a valid entry into the grant table:
235 * 1. Write ent->domid.
236 * 2. Write ent->frame:
237 * GTF_permit_access: Frame to which access is permitted.
238 * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
239 * frame, or zero if none.
240 * 3. Write memory barrier (WMB).
241 * 4. Write ent->flags, inc. valid type.
242 */
243static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
244 unsigned long frame, unsigned flags)
245{
246 gnttab_shared.v1[ref].domid = domid;
247 gnttab_shared.v1[ref].frame = frame;
248 wmb();
249 gnttab_shared.v1[ref].flags = flags;
250}
251
252static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
253 unsigned long frame, unsigned int flags)
254{
255 gnttab_shared.v2[ref].hdr.domid = domid;
256 gnttab_shared.v2[ref].full_page.frame = frame;
257 wmb(); /* Hypervisor concurrent accesses. */
258 gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
259}
260
261/*
262 * Public grant-issuing interface functions
263 */
264void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
265 unsigned long frame, int readonly)
266{
267 gnttab_interface->update_entry(ref, domid, frame,
268 GTF_permit_access | (readonly ? GTF_readonly : 0));
269}
270EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
271
272int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
273 int readonly)
274{
275 int ref;
276
277 ref = get_free_entries(1);
278 if (unlikely(ref < 0))
279 return -ENOSPC;
280
281 gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
282
283 return ref;
284}
285EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
286
287static int gnttab_query_foreign_access_v1(grant_ref_t ref)
288{
289 return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
290}
291
292static int gnttab_query_foreign_access_v2(grant_ref_t ref)
293{
294 return grstatus[ref] & (GTF_reading|GTF_writing);
295}
296
297int gnttab_query_foreign_access(grant_ref_t ref)
298{
299 return gnttab_interface->query_foreign_access(ref);
300}
301EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
302
303static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
304{
305 u16 flags, nflags;
306 u16 *pflags;
307
308 pflags = &gnttab_shared.v1[ref].flags;
309 nflags = *pflags;
310 do {
311 flags = nflags;
312 if (flags & (GTF_reading|GTF_writing))
313 return 0;
314 } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
315
316 return 1;
317}
318
319static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
320{
321 gnttab_shared.v2[ref].hdr.flags = 0;
322 mb(); /* Concurrent access by hypervisor. */
323 if (grstatus[ref] & (GTF_reading|GTF_writing)) {
324 return 0;
325 } else {
326 /*
327 * The read of grstatus needs to have acquire semantics.
328 * On x86, reads already have that, and we just need to
329 * protect against compiler reorderings.
330 * On other architectures we may need a full barrier.
331 */
332#ifdef CONFIG_X86
333 barrier();
334#else
335 mb();
336#endif
337 }
338
339 return 1;
340}
341
342static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
343{
344 return gnttab_interface->end_foreign_access_ref(ref, readonly);
345}
346
347int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
348{
349 if (_gnttab_end_foreign_access_ref(ref, readonly))
350 return 1;
351 pr_warn("WARNING: g.e. %#x still in use!\n", ref);
352 return 0;
353}
354EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
355
356struct deferred_entry {
357 struct list_head list;
358 grant_ref_t ref;
359 bool ro;
360 uint16_t warn_delay;
361 struct page *page;
362};
363static LIST_HEAD(deferred_list);
364static void gnttab_handle_deferred(struct timer_list *);
365static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
366
367static void gnttab_handle_deferred(struct timer_list *unused)
368{
369 unsigned int nr = 10;
370 struct deferred_entry *first = NULL;
371 unsigned long flags;
372
373 spin_lock_irqsave(&gnttab_list_lock, flags);
374 while (nr--) {
375 struct deferred_entry *entry
376 = list_first_entry(&deferred_list,
377 struct deferred_entry, list);
378
379 if (entry == first)
380 break;
381 list_del(&entry->list);
382 spin_unlock_irqrestore(&gnttab_list_lock, flags);
383 if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
384 put_free_entry(entry->ref);
385 if (entry->page) {
386 pr_debug("freeing g.e. %#x (pfn %#lx)\n",
387 entry->ref, page_to_pfn(entry->page));
388 put_page(entry->page);
389 } else
390 pr_info("freeing g.e. %#x\n", entry->ref);
391 kfree(entry);
392 entry = NULL;
393 } else {
394 if (!--entry->warn_delay)
395 pr_info("g.e. %#x still pending\n", entry->ref);
396 if (!first)
397 first = entry;
398 }
399 spin_lock_irqsave(&gnttab_list_lock, flags);
400 if (entry)
401 list_add_tail(&entry->list, &deferred_list);
402 else if (list_empty(&deferred_list))
403 break;
404 }
405 if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
406 deferred_timer.expires = jiffies + HZ;
407 add_timer(&deferred_timer);
408 }
409 spin_unlock_irqrestore(&gnttab_list_lock, flags);
410}
411
412static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
413 struct page *page)
414{
415 struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
416 const char *what = KERN_WARNING "leaking";
417
418 if (entry) {
419 unsigned long flags;
420
421 entry->ref = ref;
422 entry->ro = readonly;
423 entry->page = page;
424 entry->warn_delay = 60;
425 spin_lock_irqsave(&gnttab_list_lock, flags);
426 list_add_tail(&entry->list, &deferred_list);
427 if (!timer_pending(&deferred_timer)) {
428 deferred_timer.expires = jiffies + HZ;
429 add_timer(&deferred_timer);
430 }
431 spin_unlock_irqrestore(&gnttab_list_lock, flags);
432 what = KERN_DEBUG "deferring";
433 }
434 printk("%s g.e. %#x (pfn %#lx)\n",
435 what, ref, page ? page_to_pfn(page) : -1);
436}
437
438void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
439 unsigned long page)
440{
441 if (gnttab_end_foreign_access_ref(ref, readonly)) {
442 put_free_entry(ref);
443 if (page != 0)
444 put_page(virt_to_page(page));
445 } else
446 gnttab_add_deferred(ref, readonly,
447 page ? virt_to_page(page) : NULL);
448}
449EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
450
451int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
452{
453 int ref;
454
455 ref = get_free_entries(1);
456 if (unlikely(ref < 0))
457 return -ENOSPC;
458 gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
459
460 return ref;
461}
462EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
463
464void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
465 unsigned long pfn)
466{
467 gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
468}
469EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
470
471static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
472{
473 unsigned long frame;
474 u16 flags;
475 u16 *pflags;
476
477 pflags = &gnttab_shared.v1[ref].flags;
478
479 /*
480 * If a transfer is not even yet started, try to reclaim the grant
481 * reference and return failure (== 0).
482 */
483 while (!((flags = *pflags) & GTF_transfer_committed)) {
484 if (sync_cmpxchg(pflags, flags, 0) == flags)
485 return 0;
486 cpu_relax();
487 }
488
489 /* If a transfer is in progress then wait until it is completed. */
490 while (!(flags & GTF_transfer_completed)) {
491 flags = *pflags;
492 cpu_relax();
493 }
494
495 rmb(); /* Read the frame number /after/ reading completion status. */
496 frame = gnttab_shared.v1[ref].frame;
497 BUG_ON(frame == 0);
498
499 return frame;
500}
501
502static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
503{
504 unsigned long frame;
505 u16 flags;
506 u16 *pflags;
507
508 pflags = &gnttab_shared.v2[ref].hdr.flags;
509
510 /*
511 * If a transfer is not even yet started, try to reclaim the grant
512 * reference and return failure (== 0).
513 */
514 while (!((flags = *pflags) & GTF_transfer_committed)) {
515 if (sync_cmpxchg(pflags, flags, 0) == flags)
516 return 0;
517 cpu_relax();
518 }
519
520 /* If a transfer is in progress then wait until it is completed. */
521 while (!(flags & GTF_transfer_completed)) {
522 flags = *pflags;
523 cpu_relax();
524 }
525
526 rmb(); /* Read the frame number /after/ reading completion status. */
527 frame = gnttab_shared.v2[ref].full_page.frame;
528 BUG_ON(frame == 0);
529
530 return frame;
531}
532
533unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
534{
535 return gnttab_interface->end_foreign_transfer_ref(ref);
536}
537EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
538
539unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
540{
541 unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
542 put_free_entry(ref);
543 return frame;
544}
545EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
546
547void gnttab_free_grant_reference(grant_ref_t ref)
548{
549 put_free_entry(ref);
550}
551EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
552
553void gnttab_free_grant_references(grant_ref_t head)
554{
555 grant_ref_t ref;
556 unsigned long flags;
557 int count = 1;
558 if (head == GNTTAB_LIST_END)
559 return;
560 spin_lock_irqsave(&gnttab_list_lock, flags);
561 ref = head;
562 while (gnttab_entry(ref) != GNTTAB_LIST_END) {
563 ref = gnttab_entry(ref);
564 count++;
565 }
566 gnttab_entry(ref) = gnttab_free_head;
567 gnttab_free_head = head;
568 gnttab_free_count += count;
569 check_free_callbacks();
570 spin_unlock_irqrestore(&gnttab_list_lock, flags);
571}
572EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
573
574int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
575{
576 int h = get_free_entries(count);
577
578 if (h < 0)
579 return -ENOSPC;
580
581 *head = h;
582
583 return 0;
584}
585EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
586
587int gnttab_empty_grant_references(const grant_ref_t *private_head)
588{
589 return (*private_head == GNTTAB_LIST_END);
590}
591EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
592
593int gnttab_claim_grant_reference(grant_ref_t *private_head)
594{
595 grant_ref_t g = *private_head;
596 if (unlikely(g == GNTTAB_LIST_END))
597 return -ENOSPC;
598 *private_head = gnttab_entry(g);
599 return g;
600}
601EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
602
603void gnttab_release_grant_reference(grant_ref_t *private_head,
604 grant_ref_t release)
605{
606 gnttab_entry(release) = *private_head;
607 *private_head = release;
608}
609EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
610
611void gnttab_request_free_callback(struct gnttab_free_callback *callback,
612 void (*fn)(void *), void *arg, u16 count)
613{
614 unsigned long flags;
615 struct gnttab_free_callback *cb;
616
617 spin_lock_irqsave(&gnttab_list_lock, flags);
618
619 /* Check if the callback is already on the list */
620 cb = gnttab_free_callback_list;
621 while (cb) {
622 if (cb == callback)
623 goto out;
624 cb = cb->next;
625 }
626
627 callback->fn = fn;
628 callback->arg = arg;
629 callback->count = count;
630 callback->next = gnttab_free_callback_list;
631 gnttab_free_callback_list = callback;
632 check_free_callbacks();
633out:
634 spin_unlock_irqrestore(&gnttab_list_lock, flags);
635}
636EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
637
638void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
639{
640 struct gnttab_free_callback **pcb;
641 unsigned long flags;
642
643 spin_lock_irqsave(&gnttab_list_lock, flags);
644 for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
645 if (*pcb == callback) {
646 *pcb = callback->next;
647 break;
648 }
649 }
650 spin_unlock_irqrestore(&gnttab_list_lock, flags);
651}
652EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
653
654static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
655{
656 return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
657 align;
658}
659
660static int grow_gnttab_list(unsigned int more_frames)
661{
662 unsigned int new_nr_grant_frames, extra_entries, i;
663 unsigned int nr_glist_frames, new_nr_glist_frames;
664 unsigned int grefs_per_frame;
665
666 grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
667
668 new_nr_grant_frames = nr_grant_frames + more_frames;
669 extra_entries = more_frames * grefs_per_frame;
670
671 nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
672 new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
673 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
674 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
675 if (!gnttab_list[i])
676 goto grow_nomem;
677 }
678
679
680 for (i = grefs_per_frame * nr_grant_frames;
681 i < grefs_per_frame * new_nr_grant_frames - 1; i++)
682 gnttab_entry(i) = i + 1;
683
684 gnttab_entry(i) = gnttab_free_head;
685 gnttab_free_head = grefs_per_frame * nr_grant_frames;
686 gnttab_free_count += extra_entries;
687
688 nr_grant_frames = new_nr_grant_frames;
689
690 check_free_callbacks();
691
692 return 0;
693
694grow_nomem:
695 while (i-- > nr_glist_frames)
696 free_page((unsigned long) gnttab_list[i]);
697 return -ENOMEM;
698}
699
700static unsigned int __max_nr_grant_frames(void)
701{
702 struct gnttab_query_size query;
703 int rc;
704
705 query.dom = DOMID_SELF;
706
707 rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
708 if ((rc < 0) || (query.status != GNTST_okay))
709 return 4; /* Legacy max supported number of frames */
710
711 return query.max_nr_frames;
712}
713
714unsigned int gnttab_max_grant_frames(void)
715{
716 unsigned int xen_max = __max_nr_grant_frames();
717 static unsigned int boot_max_nr_grant_frames;
718
719 /* First time, initialize it properly. */
720 if (!boot_max_nr_grant_frames)
721 boot_max_nr_grant_frames = __max_nr_grant_frames();
722
723 if (xen_max > boot_max_nr_grant_frames)
724 return boot_max_nr_grant_frames;
725 return xen_max;
726}
727EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
728
729int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
730{
731 xen_pfn_t *pfn;
732 unsigned int max_nr_gframes = __max_nr_grant_frames();
733 unsigned int i;
734 void *vaddr;
735
736 if (xen_auto_xlat_grant_frames.count)
737 return -EINVAL;
738
739 vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
740 if (vaddr == NULL) {
741 pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
742 &addr);
743 return -ENOMEM;
744 }
745 pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
746 if (!pfn) {
747 xen_unmap(vaddr);
748 return -ENOMEM;
749 }
750 for (i = 0; i < max_nr_gframes; i++)
751 pfn[i] = XEN_PFN_DOWN(addr) + i;
752
753 xen_auto_xlat_grant_frames.vaddr = vaddr;
754 xen_auto_xlat_grant_frames.pfn = pfn;
755 xen_auto_xlat_grant_frames.count = max_nr_gframes;
756
757 return 0;
758}
759EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
760
761void gnttab_free_auto_xlat_frames(void)
762{
763 if (!xen_auto_xlat_grant_frames.count)
764 return;
765 kfree(xen_auto_xlat_grant_frames.pfn);
766 xen_unmap(xen_auto_xlat_grant_frames.vaddr);
767
768 xen_auto_xlat_grant_frames.pfn = NULL;
769 xen_auto_xlat_grant_frames.count = 0;
770 xen_auto_xlat_grant_frames.vaddr = NULL;
771}
772EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
773
774int gnttab_pages_set_private(int nr_pages, struct page **pages)
775{
776 int i;
777
778 for (i = 0; i < nr_pages; i++) {
779#if BITS_PER_LONG < 64
780 struct xen_page_foreign *foreign;
781
782 foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
783 if (!foreign)
784 return -ENOMEM;
785
786 set_page_private(pages[i], (unsigned long)foreign);
787#endif
788 SetPagePrivate(pages[i]);
789 }
790
791 return 0;
792}
793EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
794
795/**
796 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
797 * @nr_pages: number of pages to alloc
798 * @pages: returns the pages
799 */
800int gnttab_alloc_pages(int nr_pages, struct page **pages)
801{
802 int ret;
803
804 ret = xen_alloc_unpopulated_pages(nr_pages, pages);
805 if (ret < 0)
806 return ret;
807
808 ret = gnttab_pages_set_private(nr_pages, pages);
809 if (ret < 0)
810 gnttab_free_pages(nr_pages, pages);
811
812 return ret;
813}
814EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
815
816void gnttab_pages_clear_private(int nr_pages, struct page **pages)
817{
818 int i;
819
820 for (i = 0; i < nr_pages; i++) {
821 if (PagePrivate(pages[i])) {
822#if BITS_PER_LONG < 64
823 kfree((void *)page_private(pages[i]));
824#endif
825 ClearPagePrivate(pages[i]);
826 }
827 }
828}
829EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
830
831/**
832 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
833 * @nr_pages; number of pages to free
834 * @pages: the pages
835 */
836void gnttab_free_pages(int nr_pages, struct page **pages)
837{
838 gnttab_pages_clear_private(nr_pages, pages);
839 xen_free_unpopulated_pages(nr_pages, pages);
840}
841EXPORT_SYMBOL_GPL(gnttab_free_pages);
842
843#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
844/**
845 * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
846 * @args: arguments to the function
847 */
848int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
849{
850 unsigned long pfn, start_pfn;
851 size_t size;
852 int i, ret;
853
854 size = args->nr_pages << PAGE_SHIFT;
855 if (args->coherent)
856 args->vaddr = dma_alloc_coherent(args->dev, size,
857 &args->dev_bus_addr,
858 GFP_KERNEL | __GFP_NOWARN);
859 else
860 args->vaddr = dma_alloc_wc(args->dev, size,
861 &args->dev_bus_addr,
862 GFP_KERNEL | __GFP_NOWARN);
863 if (!args->vaddr) {
864 pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
865 return -ENOMEM;
866 }
867
868 start_pfn = __phys_to_pfn(args->dev_bus_addr);
869 for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
870 pfn++, i++) {
871 struct page *page = pfn_to_page(pfn);
872
873 args->pages[i] = page;
874 args->frames[i] = xen_page_to_gfn(page);
875 xenmem_reservation_scrub_page(page);
876 }
877
878 xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
879
880 ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
881 if (ret != args->nr_pages) {
882 pr_debug("Failed to decrease reservation for DMA buffer\n");
883 ret = -EFAULT;
884 goto fail;
885 }
886
887 ret = gnttab_pages_set_private(args->nr_pages, args->pages);
888 if (ret < 0)
889 goto fail;
890
891 return 0;
892
893fail:
894 gnttab_dma_free_pages(args);
895 return ret;
896}
897EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
898
899/**
900 * gnttab_dma_free_pages - free DMAable pages
901 * @args: arguments to the function
902 */
903int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
904{
905 size_t size;
906 int i, ret;
907
908 gnttab_pages_clear_private(args->nr_pages, args->pages);
909
910 for (i = 0; i < args->nr_pages; i++)
911 args->frames[i] = page_to_xen_pfn(args->pages[i]);
912
913 ret = xenmem_reservation_increase(args->nr_pages, args->frames);
914 if (ret != args->nr_pages) {
915 pr_debug("Failed to increase reservation for DMA buffer\n");
916 ret = -EFAULT;
917 } else {
918 ret = 0;
919 }
920
921 xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
922 args->frames);
923
924 size = args->nr_pages << PAGE_SHIFT;
925 if (args->coherent)
926 dma_free_coherent(args->dev, size,
927 args->vaddr, args->dev_bus_addr);
928 else
929 dma_free_wc(args->dev, size,
930 args->vaddr, args->dev_bus_addr);
931 return ret;
932}
933EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
934#endif
935
936/* Handling of paged out grant targets (GNTST_eagain) */
937#define MAX_DELAY 256
938static inline void
939gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
940 const char *func)
941{
942 unsigned delay = 1;
943
944 do {
945 BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
946 if (*status == GNTST_eagain)
947 msleep(delay++);
948 } while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
949
950 if (delay >= MAX_DELAY) {
951 pr_err("%s: %s eagain grant\n", func, current->comm);
952 *status = GNTST_bad_page;
953 }
954}
955
956void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
957{
958 struct gnttab_map_grant_ref *op;
959
960 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
961 BUG();
962 for (op = batch; op < batch + count; op++)
963 if (op->status == GNTST_eagain)
964 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
965 &op->status, __func__);
966}
967EXPORT_SYMBOL_GPL(gnttab_batch_map);
968
969void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
970{
971 struct gnttab_copy *op;
972
973 if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
974 BUG();
975 for (op = batch; op < batch + count; op++)
976 if (op->status == GNTST_eagain)
977 gnttab_retry_eagain_gop(GNTTABOP_copy, op,
978 &op->status, __func__);
979}
980EXPORT_SYMBOL_GPL(gnttab_batch_copy);
981
982void gnttab_foreach_grant_in_range(struct page *page,
983 unsigned int offset,
984 unsigned int len,
985 xen_grant_fn_t fn,
986 void *data)
987{
988 unsigned int goffset;
989 unsigned int glen;
990 unsigned long xen_pfn;
991
992 len = min_t(unsigned int, PAGE_SIZE - offset, len);
993 goffset = xen_offset_in_page(offset);
994
995 xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
996
997 while (len) {
998 glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
999 fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1000
1001 goffset = 0;
1002 xen_pfn++;
1003 len -= glen;
1004 }
1005}
1006EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1007
1008void gnttab_foreach_grant(struct page **pages,
1009 unsigned int nr_grefs,
1010 xen_grant_fn_t fn,
1011 void *data)
1012{
1013 unsigned int goffset = 0;
1014 unsigned long xen_pfn = 0;
1015 unsigned int i;
1016
1017 for (i = 0; i < nr_grefs; i++) {
1018 if ((i % XEN_PFN_PER_PAGE) == 0) {
1019 xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1020 goffset = 0;
1021 }
1022
1023 fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1024
1025 goffset += XEN_PAGE_SIZE;
1026 xen_pfn++;
1027 }
1028}
1029
1030int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1031 struct gnttab_map_grant_ref *kmap_ops,
1032 struct page **pages, unsigned int count)
1033{
1034 int i, ret;
1035
1036 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1037 if (ret)
1038 return ret;
1039
1040 for (i = 0; i < count; i++) {
1041 switch (map_ops[i].status) {
1042 case GNTST_okay:
1043 {
1044 struct xen_page_foreign *foreign;
1045
1046 SetPageForeign(pages[i]);
1047 foreign = xen_page_foreign(pages[i]);
1048 foreign->domid = map_ops[i].dom;
1049 foreign->gref = map_ops[i].ref;
1050 break;
1051 }
1052
1053 case GNTST_no_device_space:
1054 pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1055 break;
1056
1057 case GNTST_eagain:
1058 /* Retry eagain maps */
1059 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1060 map_ops + i,
1061 &map_ops[i].status, __func__);
1062 /* Test status in next loop iteration. */
1063 i--;
1064 break;
1065
1066 default:
1067 break;
1068 }
1069 }
1070
1071 return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1072}
1073EXPORT_SYMBOL_GPL(gnttab_map_refs);
1074
1075int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1076 struct gnttab_unmap_grant_ref *kunmap_ops,
1077 struct page **pages, unsigned int count)
1078{
1079 unsigned int i;
1080 int ret;
1081
1082 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1083 if (ret)
1084 return ret;
1085
1086 for (i = 0; i < count; i++)
1087 ClearPageForeign(pages[i]);
1088
1089 return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1090}
1091EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1092
1093#define GNTTAB_UNMAP_REFS_DELAY 5
1094
1095static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1096
1097static void gnttab_unmap_work(struct work_struct *work)
1098{
1099 struct gntab_unmap_queue_data
1100 *unmap_data = container_of(work,
1101 struct gntab_unmap_queue_data,
1102 gnttab_work.work);
1103 if (unmap_data->age != UINT_MAX)
1104 unmap_data->age++;
1105 __gnttab_unmap_refs_async(unmap_data);
1106}
1107
1108static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1109{
1110 int ret;
1111 int pc;
1112
1113 for (pc = 0; pc < item->count; pc++) {
1114 if (page_count(item->pages[pc]) > 1) {
1115 unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1116 schedule_delayed_work(&item->gnttab_work,
1117 msecs_to_jiffies(delay));
1118 return;
1119 }
1120 }
1121
1122 ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1123 item->pages, item->count);
1124 item->done(ret, item);
1125}
1126
1127void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1128{
1129 INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1130 item->age = 0;
1131
1132 __gnttab_unmap_refs_async(item);
1133}
1134EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1135
1136static void unmap_refs_callback(int result,
1137 struct gntab_unmap_queue_data *data)
1138{
1139 struct unmap_refs_callback_data *d = data->data;
1140
1141 d->result = result;
1142 complete(&d->completion);
1143}
1144
1145int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1146{
1147 struct unmap_refs_callback_data data;
1148
1149 init_completion(&data.completion);
1150 item->data = &data;
1151 item->done = &unmap_refs_callback;
1152 gnttab_unmap_refs_async(item);
1153 wait_for_completion(&data.completion);
1154
1155 return data.result;
1156}
1157EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1158
1159static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1160{
1161 return gnttab_frames(nr_grant_frames, SPP);
1162}
1163
1164static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1165{
1166 int rc;
1167
1168 rc = arch_gnttab_map_shared(frames, nr_gframes,
1169 gnttab_max_grant_frames(),
1170 &gnttab_shared.addr);
1171 BUG_ON(rc);
1172
1173 return 0;
1174}
1175
1176static void gnttab_unmap_frames_v1(void)
1177{
1178 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1179}
1180
1181static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1182{
1183 uint64_t *sframes;
1184 unsigned int nr_sframes;
1185 struct gnttab_get_status_frames getframes;
1186 int rc;
1187
1188 nr_sframes = nr_status_frames(nr_gframes);
1189
1190 /* No need for kzalloc as it is initialized in following hypercall
1191 * GNTTABOP_get_status_frames.
1192 */
1193 sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1194 if (!sframes)
1195 return -ENOMEM;
1196
1197 getframes.dom = DOMID_SELF;
1198 getframes.nr_frames = nr_sframes;
1199 set_xen_guest_handle(getframes.frame_list, sframes);
1200
1201 rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1202 &getframes, 1);
1203 if (rc == -ENOSYS) {
1204 kfree(sframes);
1205 return -ENOSYS;
1206 }
1207
1208 BUG_ON(rc || getframes.status);
1209
1210 rc = arch_gnttab_map_status(sframes, nr_sframes,
1211 nr_status_frames(gnttab_max_grant_frames()),
1212 &grstatus);
1213 BUG_ON(rc);
1214 kfree(sframes);
1215
1216 rc = arch_gnttab_map_shared(frames, nr_gframes,
1217 gnttab_max_grant_frames(),
1218 &gnttab_shared.addr);
1219 BUG_ON(rc);
1220
1221 return 0;
1222}
1223
1224static void gnttab_unmap_frames_v2(void)
1225{
1226 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1227 arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1228}
1229
1230static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1231{
1232 struct gnttab_setup_table setup;
1233 xen_pfn_t *frames;
1234 unsigned int nr_gframes = end_idx + 1;
1235 int rc;
1236
1237 if (xen_feature(XENFEAT_auto_translated_physmap)) {
1238 struct xen_add_to_physmap xatp;
1239 unsigned int i = end_idx;
1240 rc = 0;
1241 BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1242 /*
1243 * Loop backwards, so that the first hypercall has the largest
1244 * index, ensuring that the table will grow only once.
1245 */
1246 do {
1247 xatp.domid = DOMID_SELF;
1248 xatp.idx = i;
1249 xatp.space = XENMAPSPACE_grant_table;
1250 xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1251 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1252 if (rc != 0) {
1253 pr_warn("grant table add_to_physmap failed, err=%d\n",
1254 rc);
1255 break;
1256 }
1257 } while (i-- > start_idx);
1258
1259 return rc;
1260 }
1261
1262 /* No need for kzalloc as it is initialized in following hypercall
1263 * GNTTABOP_setup_table.
1264 */
1265 frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1266 if (!frames)
1267 return -ENOMEM;
1268
1269 setup.dom = DOMID_SELF;
1270 setup.nr_frames = nr_gframes;
1271 set_xen_guest_handle(setup.frame_list, frames);
1272
1273 rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1274 if (rc == -ENOSYS) {
1275 kfree(frames);
1276 return -ENOSYS;
1277 }
1278
1279 BUG_ON(rc || setup.status);
1280
1281 rc = gnttab_interface->map_frames(frames, nr_gframes);
1282
1283 kfree(frames);
1284
1285 return rc;
1286}
1287
1288static const struct gnttab_ops gnttab_v1_ops = {
1289 .version = 1,
1290 .grefs_per_grant_frame = XEN_PAGE_SIZE /
1291 sizeof(struct grant_entry_v1),
1292 .map_frames = gnttab_map_frames_v1,
1293 .unmap_frames = gnttab_unmap_frames_v1,
1294 .update_entry = gnttab_update_entry_v1,
1295 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
1296 .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1,
1297 .query_foreign_access = gnttab_query_foreign_access_v1,
1298};
1299
1300static const struct gnttab_ops gnttab_v2_ops = {
1301 .version = 2,
1302 .grefs_per_grant_frame = XEN_PAGE_SIZE /
1303 sizeof(union grant_entry_v2),
1304 .map_frames = gnttab_map_frames_v2,
1305 .unmap_frames = gnttab_unmap_frames_v2,
1306 .update_entry = gnttab_update_entry_v2,
1307 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
1308 .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
1309 .query_foreign_access = gnttab_query_foreign_access_v2,
1310};
1311
1312static bool gnttab_need_v2(void)
1313{
1314#ifdef CONFIG_X86
1315 uint32_t base, width;
1316
1317 if (xen_pv_domain()) {
1318 base = xen_cpuid_base();
1319 if (cpuid_eax(base) < 5)
1320 return false; /* Information not available, use V1. */
1321 width = cpuid_ebx(base + 5) &
1322 XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1323 return width > 32 + PAGE_SHIFT;
1324 }
1325#endif
1326 return !!(max_possible_pfn >> 32);
1327}
1328
1329static void gnttab_request_version(void)
1330{
1331 long rc;
1332 struct gnttab_set_version gsv;
1333
1334 if (gnttab_need_v2())
1335 gsv.version = 2;
1336 else
1337 gsv.version = 1;
1338
1339 /* Boot parameter overrides automatic selection. */
1340 if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1341 gsv.version = xen_gnttab_version;
1342
1343 rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1344 if (rc == 0 && gsv.version == 2)
1345 gnttab_interface = &gnttab_v2_ops;
1346 else
1347 gnttab_interface = &gnttab_v1_ops;
1348 pr_info("Grant tables using version %d layout\n",
1349 gnttab_interface->version);
1350}
1351
1352static int gnttab_setup(void)
1353{
1354 unsigned int max_nr_gframes;
1355
1356 max_nr_gframes = gnttab_max_grant_frames();
1357 if (max_nr_gframes < nr_grant_frames)
1358 return -ENOSYS;
1359
1360 if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1361 gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1362 if (gnttab_shared.addr == NULL) {
1363 pr_warn("gnttab share frames is not mapped!\n");
1364 return -ENOMEM;
1365 }
1366 }
1367 return gnttab_map(0, nr_grant_frames - 1);
1368}
1369
1370int gnttab_resume(void)
1371{
1372 gnttab_request_version();
1373 return gnttab_setup();
1374}
1375
1376int gnttab_suspend(void)
1377{
1378 if (!xen_feature(XENFEAT_auto_translated_physmap))
1379 gnttab_interface->unmap_frames();
1380 return 0;
1381}
1382
1383static int gnttab_expand(unsigned int req_entries)
1384{
1385 int rc;
1386 unsigned int cur, extra;
1387
1388 cur = nr_grant_frames;
1389 extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1390 gnttab_interface->grefs_per_grant_frame);
1391 if (cur + extra > gnttab_max_grant_frames()) {
1392 pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1393 " cur=%u extra=%u limit=%u"
1394 " gnttab_free_count=%u req_entries=%u\n",
1395 cur, extra, gnttab_max_grant_frames(),
1396 gnttab_free_count, req_entries);
1397 return -ENOSPC;
1398 }
1399
1400 rc = gnttab_map(cur, cur + extra - 1);
1401 if (rc == 0)
1402 rc = grow_gnttab_list(extra);
1403
1404 return rc;
1405}
1406
1407int gnttab_init(void)
1408{
1409 int i;
1410 unsigned long max_nr_grant_frames;
1411 unsigned int max_nr_glist_frames, nr_glist_frames;
1412 unsigned int nr_init_grefs;
1413 int ret;
1414
1415 gnttab_request_version();
1416 max_nr_grant_frames = gnttab_max_grant_frames();
1417 nr_grant_frames = 1;
1418
1419 /* Determine the maximum number of frames required for the
1420 * grant reference free list on the current hypervisor.
1421 */
1422 max_nr_glist_frames = (max_nr_grant_frames *
1423 gnttab_interface->grefs_per_grant_frame / RPP);
1424
1425 gnttab_list = kmalloc_array(max_nr_glist_frames,
1426 sizeof(grant_ref_t *),
1427 GFP_KERNEL);
1428 if (gnttab_list == NULL)
1429 return -ENOMEM;
1430
1431 nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1432 for (i = 0; i < nr_glist_frames; i++) {
1433 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1434 if (gnttab_list[i] == NULL) {
1435 ret = -ENOMEM;
1436 goto ini_nomem;
1437 }
1438 }
1439
1440 ret = arch_gnttab_init(max_nr_grant_frames,
1441 nr_status_frames(max_nr_grant_frames));
1442 if (ret < 0)
1443 goto ini_nomem;
1444
1445 if (gnttab_setup() < 0) {
1446 ret = -ENODEV;
1447 goto ini_nomem;
1448 }
1449
1450 nr_init_grefs = nr_grant_frames *
1451 gnttab_interface->grefs_per_grant_frame;
1452
1453 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1454 gnttab_entry(i) = i + 1;
1455
1456 gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
1457 gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
1458 gnttab_free_head = NR_RESERVED_ENTRIES;
1459
1460 printk("Grant table initialized\n");
1461 return 0;
1462
1463 ini_nomem:
1464 for (i--; i >= 0; i--)
1465 free_page((unsigned long)gnttab_list[i]);
1466 kfree(gnttab_list);
1467 return ret;
1468}
1469EXPORT_SYMBOL_GPL(gnttab_init);
1470
1471static int __gnttab_init(void)
1472{
1473 if (!xen_domain())
1474 return -ENODEV;
1475
1476 /* Delay grant-table initialization in the PV on HVM case */
1477 if (xen_hvm_domain() && !xen_pvh_domain())
1478 return 0;
1479
1480 return gnttab_init();
1481}
1482/* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1483 * beforehand to initialize xen_auto_xlat_grant_frames. */
1484core_initcall_sync(__gnttab_init);
1/******************************************************************************
2 * grant_table.c
3 *
4 * Granting foreign access to our memory reservation.
5 *
6 * Copyright (c) 2005-2006, Christopher Clark
7 * Copyright (c) 2004-2005, K A Fraser
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
21 *
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33
34#include <linux/module.h>
35#include <linux/sched.h>
36#include <linux/mm.h>
37#include <linux/slab.h>
38#include <linux/vmalloc.h>
39#include <linux/uaccess.h>
40#include <linux/io.h>
41#include <linux/hardirq.h>
42
43#include <xen/xen.h>
44#include <xen/interface/xen.h>
45#include <xen/page.h>
46#include <xen/grant_table.h>
47#include <xen/interface/memory.h>
48#include <xen/hvc-console.h>
49#include <asm/xen/hypercall.h>
50
51#include <asm/pgtable.h>
52#include <asm/sync_bitops.h>
53
54/* External tools reserve first few grant table entries. */
55#define NR_RESERVED_ENTRIES 8
56#define GNTTAB_LIST_END 0xffffffff
57#define GREFS_PER_GRANT_FRAME \
58(grant_table_version == 1 ? \
59(PAGE_SIZE / sizeof(struct grant_entry_v1)) : \
60(PAGE_SIZE / sizeof(union grant_entry_v2)))
61
62static grant_ref_t **gnttab_list;
63static unsigned int nr_grant_frames;
64static unsigned int boot_max_nr_grant_frames;
65static int gnttab_free_count;
66static grant_ref_t gnttab_free_head;
67static DEFINE_SPINLOCK(gnttab_list_lock);
68unsigned long xen_hvm_resume_frames;
69EXPORT_SYMBOL_GPL(xen_hvm_resume_frames);
70
71static union {
72 struct grant_entry_v1 *v1;
73 union grant_entry_v2 *v2;
74 void *addr;
75} gnttab_shared;
76
77/*This is a structure of function pointers for grant table*/
78struct gnttab_ops {
79 /*
80 * Mapping a list of frames for storing grant entries. Frames parameter
81 * is used to store grant table address when grant table being setup,
82 * nr_gframes is the number of frames to map grant table. Returning
83 * GNTST_okay means success and negative value means failure.
84 */
85 int (*map_frames)(unsigned long *frames, unsigned int nr_gframes);
86 /*
87 * Release a list of frames which are mapped in map_frames for grant
88 * entry status.
89 */
90 void (*unmap_frames)(void);
91 /*
92 * Introducing a valid entry into the grant table, granting the frame of
93 * this grant entry to domain for accessing or transfering. Ref
94 * parameter is reference of this introduced grant entry, domid is id of
95 * granted domain, frame is the page frame to be granted, and flags is
96 * status of the grant entry to be updated.
97 */
98 void (*update_entry)(grant_ref_t ref, domid_t domid,
99 unsigned long frame, unsigned flags);
100 /*
101 * Stop granting a grant entry to domain for accessing. Ref parameter is
102 * reference of a grant entry whose grant access will be stopped,
103 * readonly is not in use in this function. If the grant entry is
104 * currently mapped for reading or writing, just return failure(==0)
105 * directly and don't tear down the grant access. Otherwise, stop grant
106 * access for this entry and return success(==1).
107 */
108 int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
109 /*
110 * Stop granting a grant entry to domain for transfer. Ref parameter is
111 * reference of a grant entry whose grant transfer will be stopped. If
112 * tranfer has not started, just reclaim the grant entry and return
113 * failure(==0). Otherwise, wait for the transfer to complete and then
114 * return the frame.
115 */
116 unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
117 /*
118 * Query the status of a grant entry. Ref parameter is reference of
119 * queried grant entry, return value is the status of queried entry.
120 * Detailed status(writing/reading) can be gotten from the return value
121 * by bit operations.
122 */
123 int (*query_foreign_access)(grant_ref_t ref);
124 /*
125 * Grant a domain to access a range of bytes within the page referred by
126 * an available grant entry. Ref parameter is reference of a grant entry
127 * which will be sub-page accessed, domid is id of grantee domain, frame
128 * is frame address of subpage grant, flags is grant type and flag
129 * information, page_off is offset of the range of bytes, and length is
130 * length of bytes to be accessed.
131 */
132 void (*update_subpage_entry)(grant_ref_t ref, domid_t domid,
133 unsigned long frame, int flags,
134 unsigned page_off, unsigned length);
135 /*
136 * Redirect an available grant entry on domain A to another grant
137 * reference of domain B, then allow domain C to use grant reference
138 * of domain B transitively. Ref parameter is an available grant entry
139 * reference on domain A, domid is id of domain C which accesses grant
140 * entry transitively, flags is grant type and flag information,
141 * trans_domid is id of domain B whose grant entry is finally accessed
142 * transitively, trans_gref is grant entry transitive reference of
143 * domain B.
144 */
145 void (*update_trans_entry)(grant_ref_t ref, domid_t domid, int flags,
146 domid_t trans_domid, grant_ref_t trans_gref);
147};
148
149static struct gnttab_ops *gnttab_interface;
150
151/*This reflects status of grant entries, so act as a global value*/
152static grant_status_t *grstatus;
153
154static int grant_table_version;
155
156static struct gnttab_free_callback *gnttab_free_callback_list;
157
158static int gnttab_expand(unsigned int req_entries);
159
160#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
161#define SPP (PAGE_SIZE / sizeof(grant_status_t))
162
163static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
164{
165 return &gnttab_list[(entry) / RPP][(entry) % RPP];
166}
167/* This can be used as an l-value */
168#define gnttab_entry(entry) (*__gnttab_entry(entry))
169
170static int get_free_entries(unsigned count)
171{
172 unsigned long flags;
173 int ref, rc = 0;
174 grant_ref_t head;
175
176 spin_lock_irqsave(&gnttab_list_lock, flags);
177
178 if ((gnttab_free_count < count) &&
179 ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
180 spin_unlock_irqrestore(&gnttab_list_lock, flags);
181 return rc;
182 }
183
184 ref = head = gnttab_free_head;
185 gnttab_free_count -= count;
186 while (count-- > 1)
187 head = gnttab_entry(head);
188 gnttab_free_head = gnttab_entry(head);
189 gnttab_entry(head) = GNTTAB_LIST_END;
190
191 spin_unlock_irqrestore(&gnttab_list_lock, flags);
192
193 return ref;
194}
195
196static void do_free_callbacks(void)
197{
198 struct gnttab_free_callback *callback, *next;
199
200 callback = gnttab_free_callback_list;
201 gnttab_free_callback_list = NULL;
202
203 while (callback != NULL) {
204 next = callback->next;
205 if (gnttab_free_count >= callback->count) {
206 callback->next = NULL;
207 callback->fn(callback->arg);
208 } else {
209 callback->next = gnttab_free_callback_list;
210 gnttab_free_callback_list = callback;
211 }
212 callback = next;
213 }
214}
215
216static inline void check_free_callbacks(void)
217{
218 if (unlikely(gnttab_free_callback_list))
219 do_free_callbacks();
220}
221
222static void put_free_entry(grant_ref_t ref)
223{
224 unsigned long flags;
225 spin_lock_irqsave(&gnttab_list_lock, flags);
226 gnttab_entry(ref) = gnttab_free_head;
227 gnttab_free_head = ref;
228 gnttab_free_count++;
229 check_free_callbacks();
230 spin_unlock_irqrestore(&gnttab_list_lock, flags);
231}
232
233/*
234 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
235 * Introducing a valid entry into the grant table:
236 * 1. Write ent->domid.
237 * 2. Write ent->frame:
238 * GTF_permit_access: Frame to which access is permitted.
239 * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
240 * frame, or zero if none.
241 * 3. Write memory barrier (WMB).
242 * 4. Write ent->flags, inc. valid type.
243 */
244static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
245 unsigned long frame, unsigned flags)
246{
247 gnttab_shared.v1[ref].domid = domid;
248 gnttab_shared.v1[ref].frame = frame;
249 wmb();
250 gnttab_shared.v1[ref].flags = flags;
251}
252
253static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
254 unsigned long frame, unsigned flags)
255{
256 gnttab_shared.v2[ref].hdr.domid = domid;
257 gnttab_shared.v2[ref].full_page.frame = frame;
258 wmb();
259 gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
260}
261
262/*
263 * Public grant-issuing interface functions
264 */
265void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
266 unsigned long frame, int readonly)
267{
268 gnttab_interface->update_entry(ref, domid, frame,
269 GTF_permit_access | (readonly ? GTF_readonly : 0));
270}
271EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
272
273int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
274 int readonly)
275{
276 int ref;
277
278 ref = get_free_entries(1);
279 if (unlikely(ref < 0))
280 return -ENOSPC;
281
282 gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
283
284 return ref;
285}
286EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
287
288void gnttab_update_subpage_entry_v2(grant_ref_t ref, domid_t domid,
289 unsigned long frame, int flags,
290 unsigned page_off,
291 unsigned length)
292{
293 gnttab_shared.v2[ref].sub_page.frame = frame;
294 gnttab_shared.v2[ref].sub_page.page_off = page_off;
295 gnttab_shared.v2[ref].sub_page.length = length;
296 gnttab_shared.v2[ref].hdr.domid = domid;
297 wmb();
298 gnttab_shared.v2[ref].hdr.flags =
299 GTF_permit_access | GTF_sub_page | flags;
300}
301
302int gnttab_grant_foreign_access_subpage_ref(grant_ref_t ref, domid_t domid,
303 unsigned long frame, int flags,
304 unsigned page_off,
305 unsigned length)
306{
307 if (flags & (GTF_accept_transfer | GTF_reading |
308 GTF_writing | GTF_transitive))
309 return -EPERM;
310
311 if (gnttab_interface->update_subpage_entry == NULL)
312 return -ENOSYS;
313
314 gnttab_interface->update_subpage_entry(ref, domid, frame, flags,
315 page_off, length);
316
317 return 0;
318}
319EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage_ref);
320
321int gnttab_grant_foreign_access_subpage(domid_t domid, unsigned long frame,
322 int flags, unsigned page_off,
323 unsigned length)
324{
325 int ref, rc;
326
327 ref = get_free_entries(1);
328 if (unlikely(ref < 0))
329 return -ENOSPC;
330
331 rc = gnttab_grant_foreign_access_subpage_ref(ref, domid, frame, flags,
332 page_off, length);
333 if (rc < 0) {
334 put_free_entry(ref);
335 return rc;
336 }
337
338 return ref;
339}
340EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_subpage);
341
342bool gnttab_subpage_grants_available(void)
343{
344 return gnttab_interface->update_subpage_entry != NULL;
345}
346EXPORT_SYMBOL_GPL(gnttab_subpage_grants_available);
347
348void gnttab_update_trans_entry_v2(grant_ref_t ref, domid_t domid,
349 int flags, domid_t trans_domid,
350 grant_ref_t trans_gref)
351{
352 gnttab_shared.v2[ref].transitive.trans_domid = trans_domid;
353 gnttab_shared.v2[ref].transitive.gref = trans_gref;
354 gnttab_shared.v2[ref].hdr.domid = domid;
355 wmb();
356 gnttab_shared.v2[ref].hdr.flags =
357 GTF_permit_access | GTF_transitive | flags;
358}
359
360int gnttab_grant_foreign_access_trans_ref(grant_ref_t ref, domid_t domid,
361 int flags, domid_t trans_domid,
362 grant_ref_t trans_gref)
363{
364 if (flags & (GTF_accept_transfer | GTF_reading |
365 GTF_writing | GTF_sub_page))
366 return -EPERM;
367
368 if (gnttab_interface->update_trans_entry == NULL)
369 return -ENOSYS;
370
371 gnttab_interface->update_trans_entry(ref, domid, flags, trans_domid,
372 trans_gref);
373
374 return 0;
375}
376EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans_ref);
377
378int gnttab_grant_foreign_access_trans(domid_t domid, int flags,
379 domid_t trans_domid,
380 grant_ref_t trans_gref)
381{
382 int ref, rc;
383
384 ref = get_free_entries(1);
385 if (unlikely(ref < 0))
386 return -ENOSPC;
387
388 rc = gnttab_grant_foreign_access_trans_ref(ref, domid, flags,
389 trans_domid, trans_gref);
390 if (rc < 0) {
391 put_free_entry(ref);
392 return rc;
393 }
394
395 return ref;
396}
397EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_trans);
398
399bool gnttab_trans_grants_available(void)
400{
401 return gnttab_interface->update_trans_entry != NULL;
402}
403EXPORT_SYMBOL_GPL(gnttab_trans_grants_available);
404
405static int gnttab_query_foreign_access_v1(grant_ref_t ref)
406{
407 return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
408}
409
410static int gnttab_query_foreign_access_v2(grant_ref_t ref)
411{
412 return grstatus[ref] & (GTF_reading|GTF_writing);
413}
414
415int gnttab_query_foreign_access(grant_ref_t ref)
416{
417 return gnttab_interface->query_foreign_access(ref);
418}
419EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
420
421static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
422{
423 u16 flags, nflags;
424 u16 *pflags;
425
426 pflags = &gnttab_shared.v1[ref].flags;
427 nflags = *pflags;
428 do {
429 flags = nflags;
430 if (flags & (GTF_reading|GTF_writing))
431 return 0;
432 } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
433
434 return 1;
435}
436
437static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
438{
439 gnttab_shared.v2[ref].hdr.flags = 0;
440 mb();
441 if (grstatus[ref] & (GTF_reading|GTF_writing)) {
442 return 0;
443 } else {
444 /* The read of grstatus needs to have acquire
445 semantics. On x86, reads already have
446 that, and we just need to protect against
447 compiler reorderings. On other
448 architectures we may need a full
449 barrier. */
450#ifdef CONFIG_X86
451 barrier();
452#else
453 mb();
454#endif
455 }
456
457 return 1;
458}
459
460static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
461{
462 return gnttab_interface->end_foreign_access_ref(ref, readonly);
463}
464
465int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
466{
467 if (_gnttab_end_foreign_access_ref(ref, readonly))
468 return 1;
469 pr_warn("WARNING: g.e. %#x still in use!\n", ref);
470 return 0;
471}
472EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
473
474struct deferred_entry {
475 struct list_head list;
476 grant_ref_t ref;
477 bool ro;
478 uint16_t warn_delay;
479 struct page *page;
480};
481static LIST_HEAD(deferred_list);
482static void gnttab_handle_deferred(unsigned long);
483static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0);
484
485static void gnttab_handle_deferred(unsigned long unused)
486{
487 unsigned int nr = 10;
488 struct deferred_entry *first = NULL;
489 unsigned long flags;
490
491 spin_lock_irqsave(&gnttab_list_lock, flags);
492 while (nr--) {
493 struct deferred_entry *entry
494 = list_first_entry(&deferred_list,
495 struct deferred_entry, list);
496
497 if (entry == first)
498 break;
499 list_del(&entry->list);
500 spin_unlock_irqrestore(&gnttab_list_lock, flags);
501 if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
502 put_free_entry(entry->ref);
503 if (entry->page) {
504 pr_debug("freeing g.e. %#x (pfn %#lx)\n",
505 entry->ref, page_to_pfn(entry->page));
506 __free_page(entry->page);
507 } else
508 pr_info("freeing g.e. %#x\n", entry->ref);
509 kfree(entry);
510 entry = NULL;
511 } else {
512 if (!--entry->warn_delay)
513 pr_info("g.e. %#x still pending\n",
514 entry->ref);
515 if (!first)
516 first = entry;
517 }
518 spin_lock_irqsave(&gnttab_list_lock, flags);
519 if (entry)
520 list_add_tail(&entry->list, &deferred_list);
521 else if (list_empty(&deferred_list))
522 break;
523 }
524 if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
525 deferred_timer.expires = jiffies + HZ;
526 add_timer(&deferred_timer);
527 }
528 spin_unlock_irqrestore(&gnttab_list_lock, flags);
529}
530
531static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
532 struct page *page)
533{
534 struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
535 const char *what = KERN_WARNING "leaking";
536
537 if (entry) {
538 unsigned long flags;
539
540 entry->ref = ref;
541 entry->ro = readonly;
542 entry->page = page;
543 entry->warn_delay = 60;
544 spin_lock_irqsave(&gnttab_list_lock, flags);
545 list_add_tail(&entry->list, &deferred_list);
546 if (!timer_pending(&deferred_timer)) {
547 deferred_timer.expires = jiffies + HZ;
548 add_timer(&deferred_timer);
549 }
550 spin_unlock_irqrestore(&gnttab_list_lock, flags);
551 what = KERN_DEBUG "deferring";
552 }
553 printk("%s g.e. %#x (pfn %#lx)\n",
554 what, ref, page ? page_to_pfn(page) : -1);
555}
556
557void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
558 unsigned long page)
559{
560 if (gnttab_end_foreign_access_ref(ref, readonly)) {
561 put_free_entry(ref);
562 if (page != 0)
563 free_page(page);
564 } else
565 gnttab_add_deferred(ref, readonly,
566 page ? virt_to_page(page) : NULL);
567}
568EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
569
570int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
571{
572 int ref;
573
574 ref = get_free_entries(1);
575 if (unlikely(ref < 0))
576 return -ENOSPC;
577 gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
578
579 return ref;
580}
581EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
582
583void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
584 unsigned long pfn)
585{
586 gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
587}
588EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
589
590static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
591{
592 unsigned long frame;
593 u16 flags;
594 u16 *pflags;
595
596 pflags = &gnttab_shared.v1[ref].flags;
597
598 /*
599 * If a transfer is not even yet started, try to reclaim the grant
600 * reference and return failure (== 0).
601 */
602 while (!((flags = *pflags) & GTF_transfer_committed)) {
603 if (sync_cmpxchg(pflags, flags, 0) == flags)
604 return 0;
605 cpu_relax();
606 }
607
608 /* If a transfer is in progress then wait until it is completed. */
609 while (!(flags & GTF_transfer_completed)) {
610 flags = *pflags;
611 cpu_relax();
612 }
613
614 rmb(); /* Read the frame number /after/ reading completion status. */
615 frame = gnttab_shared.v1[ref].frame;
616 BUG_ON(frame == 0);
617
618 return frame;
619}
620
621static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
622{
623 unsigned long frame;
624 u16 flags;
625 u16 *pflags;
626
627 pflags = &gnttab_shared.v2[ref].hdr.flags;
628
629 /*
630 * If a transfer is not even yet started, try to reclaim the grant
631 * reference and return failure (== 0).
632 */
633 while (!((flags = *pflags) & GTF_transfer_committed)) {
634 if (sync_cmpxchg(pflags, flags, 0) == flags)
635 return 0;
636 cpu_relax();
637 }
638
639 /* If a transfer is in progress then wait until it is completed. */
640 while (!(flags & GTF_transfer_completed)) {
641 flags = *pflags;
642 cpu_relax();
643 }
644
645 rmb(); /* Read the frame number /after/ reading completion status. */
646 frame = gnttab_shared.v2[ref].full_page.frame;
647 BUG_ON(frame == 0);
648
649 return frame;
650}
651
652unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
653{
654 return gnttab_interface->end_foreign_transfer_ref(ref);
655}
656EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
657
658unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
659{
660 unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
661 put_free_entry(ref);
662 return frame;
663}
664EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
665
666void gnttab_free_grant_reference(grant_ref_t ref)
667{
668 put_free_entry(ref);
669}
670EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
671
672void gnttab_free_grant_references(grant_ref_t head)
673{
674 grant_ref_t ref;
675 unsigned long flags;
676 int count = 1;
677 if (head == GNTTAB_LIST_END)
678 return;
679 spin_lock_irqsave(&gnttab_list_lock, flags);
680 ref = head;
681 while (gnttab_entry(ref) != GNTTAB_LIST_END) {
682 ref = gnttab_entry(ref);
683 count++;
684 }
685 gnttab_entry(ref) = gnttab_free_head;
686 gnttab_free_head = head;
687 gnttab_free_count += count;
688 check_free_callbacks();
689 spin_unlock_irqrestore(&gnttab_list_lock, flags);
690}
691EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
692
693int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
694{
695 int h = get_free_entries(count);
696
697 if (h < 0)
698 return -ENOSPC;
699
700 *head = h;
701
702 return 0;
703}
704EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
705
706int gnttab_empty_grant_references(const grant_ref_t *private_head)
707{
708 return (*private_head == GNTTAB_LIST_END);
709}
710EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
711
712int gnttab_claim_grant_reference(grant_ref_t *private_head)
713{
714 grant_ref_t g = *private_head;
715 if (unlikely(g == GNTTAB_LIST_END))
716 return -ENOSPC;
717 *private_head = gnttab_entry(g);
718 return g;
719}
720EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
721
722void gnttab_release_grant_reference(grant_ref_t *private_head,
723 grant_ref_t release)
724{
725 gnttab_entry(release) = *private_head;
726 *private_head = release;
727}
728EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
729
730void gnttab_request_free_callback(struct gnttab_free_callback *callback,
731 void (*fn)(void *), void *arg, u16 count)
732{
733 unsigned long flags;
734 spin_lock_irqsave(&gnttab_list_lock, flags);
735 if (callback->next)
736 goto out;
737 callback->fn = fn;
738 callback->arg = arg;
739 callback->count = count;
740 callback->next = gnttab_free_callback_list;
741 gnttab_free_callback_list = callback;
742 check_free_callbacks();
743out:
744 spin_unlock_irqrestore(&gnttab_list_lock, flags);
745}
746EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
747
748void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
749{
750 struct gnttab_free_callback **pcb;
751 unsigned long flags;
752
753 spin_lock_irqsave(&gnttab_list_lock, flags);
754 for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
755 if (*pcb == callback) {
756 *pcb = callback->next;
757 break;
758 }
759 }
760 spin_unlock_irqrestore(&gnttab_list_lock, flags);
761}
762EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
763
764static int grow_gnttab_list(unsigned int more_frames)
765{
766 unsigned int new_nr_grant_frames, extra_entries, i;
767 unsigned int nr_glist_frames, new_nr_glist_frames;
768
769 new_nr_grant_frames = nr_grant_frames + more_frames;
770 extra_entries = more_frames * GREFS_PER_GRANT_FRAME;
771
772 nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
773 new_nr_glist_frames =
774 (new_nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
775 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
776 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
777 if (!gnttab_list[i])
778 goto grow_nomem;
779 }
780
781
782 for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames;
783 i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
784 gnttab_entry(i) = i + 1;
785
786 gnttab_entry(i) = gnttab_free_head;
787 gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames;
788 gnttab_free_count += extra_entries;
789
790 nr_grant_frames = new_nr_grant_frames;
791
792 check_free_callbacks();
793
794 return 0;
795
796grow_nomem:
797 for ( ; i >= nr_glist_frames; i--)
798 free_page((unsigned long) gnttab_list[i]);
799 return -ENOMEM;
800}
801
802static unsigned int __max_nr_grant_frames(void)
803{
804 struct gnttab_query_size query;
805 int rc;
806
807 query.dom = DOMID_SELF;
808
809 rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
810 if ((rc < 0) || (query.status != GNTST_okay))
811 return 4; /* Legacy max supported number of frames */
812
813 return query.max_nr_frames;
814}
815
816unsigned int gnttab_max_grant_frames(void)
817{
818 unsigned int xen_max = __max_nr_grant_frames();
819
820 if (xen_max > boot_max_nr_grant_frames)
821 return boot_max_nr_grant_frames;
822 return xen_max;
823}
824EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
825
826int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
827 struct gnttab_map_grant_ref *kmap_ops,
828 struct page **pages, unsigned int count)
829{
830 int i, ret;
831 bool lazy = false;
832 pte_t *pte;
833 unsigned long mfn;
834
835 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
836 if (ret)
837 return ret;
838
839 if (xen_feature(XENFEAT_auto_translated_physmap))
840 return ret;
841
842 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
843 arch_enter_lazy_mmu_mode();
844 lazy = true;
845 }
846
847 for (i = 0; i < count; i++) {
848 /* Do not add to override if the map failed. */
849 if (map_ops[i].status)
850 continue;
851
852 if (map_ops[i].flags & GNTMAP_contains_pte) {
853 pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
854 (map_ops[i].host_addr & ~PAGE_MASK));
855 mfn = pte_mfn(*pte);
856 } else {
857 mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
858 }
859 ret = m2p_add_override(mfn, pages[i], kmap_ops ?
860 &kmap_ops[i] : NULL);
861 if (ret)
862 return ret;
863 }
864
865 if (lazy)
866 arch_leave_lazy_mmu_mode();
867
868 return ret;
869}
870EXPORT_SYMBOL_GPL(gnttab_map_refs);
871
872int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
873 struct gnttab_map_grant_ref *kmap_ops,
874 struct page **pages, unsigned int count)
875{
876 int i, ret;
877 bool lazy = false;
878
879 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
880 if (ret)
881 return ret;
882
883 if (xen_feature(XENFEAT_auto_translated_physmap))
884 return ret;
885
886 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
887 arch_enter_lazy_mmu_mode();
888 lazy = true;
889 }
890
891 for (i = 0; i < count; i++) {
892 ret = m2p_remove_override(pages[i], kmap_ops ?
893 &kmap_ops[i] : NULL);
894 if (ret)
895 return ret;
896 }
897
898 if (lazy)
899 arch_leave_lazy_mmu_mode();
900
901 return ret;
902}
903EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
904
905static unsigned nr_status_frames(unsigned nr_grant_frames)
906{
907 return (nr_grant_frames * GREFS_PER_GRANT_FRAME + SPP - 1) / SPP;
908}
909
910static int gnttab_map_frames_v1(unsigned long *frames, unsigned int nr_gframes)
911{
912 int rc;
913
914 rc = arch_gnttab_map_shared(frames, nr_gframes,
915 gnttab_max_grant_frames(),
916 &gnttab_shared.addr);
917 BUG_ON(rc);
918
919 return 0;
920}
921
922static void gnttab_unmap_frames_v1(void)
923{
924 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
925}
926
927static int gnttab_map_frames_v2(unsigned long *frames, unsigned int nr_gframes)
928{
929 uint64_t *sframes;
930 unsigned int nr_sframes;
931 struct gnttab_get_status_frames getframes;
932 int rc;
933
934 nr_sframes = nr_status_frames(nr_gframes);
935
936 /* No need for kzalloc as it is initialized in following hypercall
937 * GNTTABOP_get_status_frames.
938 */
939 sframes = kmalloc(nr_sframes * sizeof(uint64_t), GFP_ATOMIC);
940 if (!sframes)
941 return -ENOMEM;
942
943 getframes.dom = DOMID_SELF;
944 getframes.nr_frames = nr_sframes;
945 set_xen_guest_handle(getframes.frame_list, sframes);
946
947 rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
948 &getframes, 1);
949 if (rc == -ENOSYS) {
950 kfree(sframes);
951 return -ENOSYS;
952 }
953
954 BUG_ON(rc || getframes.status);
955
956 rc = arch_gnttab_map_status(sframes, nr_sframes,
957 nr_status_frames(gnttab_max_grant_frames()),
958 &grstatus);
959 BUG_ON(rc);
960 kfree(sframes);
961
962 rc = arch_gnttab_map_shared(frames, nr_gframes,
963 gnttab_max_grant_frames(),
964 &gnttab_shared.addr);
965 BUG_ON(rc);
966
967 return 0;
968}
969
970static void gnttab_unmap_frames_v2(void)
971{
972 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
973 arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
974}
975
976static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
977{
978 struct gnttab_setup_table setup;
979 unsigned long *frames;
980 unsigned int nr_gframes = end_idx + 1;
981 int rc;
982
983 if (xen_hvm_domain()) {
984 struct xen_add_to_physmap xatp;
985 unsigned int i = end_idx;
986 rc = 0;
987 /*
988 * Loop backwards, so that the first hypercall has the largest
989 * index, ensuring that the table will grow only once.
990 */
991 do {
992 xatp.domid = DOMID_SELF;
993 xatp.idx = i;
994 xatp.space = XENMAPSPACE_grant_table;
995 xatp.gpfn = (xen_hvm_resume_frames >> PAGE_SHIFT) + i;
996 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
997 if (rc != 0) {
998 printk(KERN_WARNING
999 "grant table add_to_physmap failed, err=%d\n", rc);
1000 break;
1001 }
1002 } while (i-- > start_idx);
1003
1004 return rc;
1005 }
1006
1007 /* No need for kzalloc as it is initialized in following hypercall
1008 * GNTTABOP_setup_table.
1009 */
1010 frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
1011 if (!frames)
1012 return -ENOMEM;
1013
1014 setup.dom = DOMID_SELF;
1015 setup.nr_frames = nr_gframes;
1016 set_xen_guest_handle(setup.frame_list, frames);
1017
1018 rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1019 if (rc == -ENOSYS) {
1020 kfree(frames);
1021 return -ENOSYS;
1022 }
1023
1024 BUG_ON(rc || setup.status);
1025
1026 rc = gnttab_interface->map_frames(frames, nr_gframes);
1027
1028 kfree(frames);
1029
1030 return rc;
1031}
1032
1033static struct gnttab_ops gnttab_v1_ops = {
1034 .map_frames = gnttab_map_frames_v1,
1035 .unmap_frames = gnttab_unmap_frames_v1,
1036 .update_entry = gnttab_update_entry_v1,
1037 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
1038 .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v1,
1039 .query_foreign_access = gnttab_query_foreign_access_v1,
1040};
1041
1042static struct gnttab_ops gnttab_v2_ops = {
1043 .map_frames = gnttab_map_frames_v2,
1044 .unmap_frames = gnttab_unmap_frames_v2,
1045 .update_entry = gnttab_update_entry_v2,
1046 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
1047 .end_foreign_transfer_ref = gnttab_end_foreign_transfer_ref_v2,
1048 .query_foreign_access = gnttab_query_foreign_access_v2,
1049 .update_subpage_entry = gnttab_update_subpage_entry_v2,
1050 .update_trans_entry = gnttab_update_trans_entry_v2,
1051};
1052
1053static void gnttab_request_version(void)
1054{
1055 int rc;
1056 struct gnttab_set_version gsv;
1057
1058 if (xen_hvm_domain())
1059 gsv.version = 1;
1060 else
1061 gsv.version = 2;
1062 rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1063 if (rc == 0 && gsv.version == 2) {
1064 grant_table_version = 2;
1065 gnttab_interface = &gnttab_v2_ops;
1066 } else if (grant_table_version == 2) {
1067 /*
1068 * If we've already used version 2 features,
1069 * but then suddenly discover that they're not
1070 * available (e.g. migrating to an older
1071 * version of Xen), almost unbounded badness
1072 * can happen.
1073 */
1074 panic("we need grant tables version 2, but only version 1 is available");
1075 } else {
1076 grant_table_version = 1;
1077 gnttab_interface = &gnttab_v1_ops;
1078 }
1079 printk(KERN_INFO "Grant tables using version %d layout.\n",
1080 grant_table_version);
1081}
1082
1083int gnttab_resume(void)
1084{
1085 unsigned int max_nr_gframes;
1086
1087 gnttab_request_version();
1088 max_nr_gframes = gnttab_max_grant_frames();
1089 if (max_nr_gframes < nr_grant_frames)
1090 return -ENOSYS;
1091
1092 if (xen_pv_domain())
1093 return gnttab_map(0, nr_grant_frames - 1);
1094
1095 if (gnttab_shared.addr == NULL) {
1096 gnttab_shared.addr = ioremap(xen_hvm_resume_frames,
1097 PAGE_SIZE * max_nr_gframes);
1098 if (gnttab_shared.addr == NULL) {
1099 printk(KERN_WARNING
1100 "Failed to ioremap gnttab share frames!");
1101 return -ENOMEM;
1102 }
1103 }
1104
1105 gnttab_map(0, nr_grant_frames - 1);
1106
1107 return 0;
1108}
1109
1110int gnttab_suspend(void)
1111{
1112 gnttab_interface->unmap_frames();
1113 return 0;
1114}
1115
1116static int gnttab_expand(unsigned int req_entries)
1117{
1118 int rc;
1119 unsigned int cur, extra;
1120
1121 cur = nr_grant_frames;
1122 extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) /
1123 GREFS_PER_GRANT_FRAME);
1124 if (cur + extra > gnttab_max_grant_frames())
1125 return -ENOSPC;
1126
1127 rc = gnttab_map(cur, cur + extra - 1);
1128 if (rc == 0)
1129 rc = grow_gnttab_list(extra);
1130
1131 return rc;
1132}
1133
1134int gnttab_init(void)
1135{
1136 int i;
1137 unsigned int max_nr_glist_frames, nr_glist_frames;
1138 unsigned int nr_init_grefs;
1139 int ret;
1140
1141 nr_grant_frames = 1;
1142 boot_max_nr_grant_frames = __max_nr_grant_frames();
1143
1144 /* Determine the maximum number of frames required for the
1145 * grant reference free list on the current hypervisor.
1146 */
1147 max_nr_glist_frames = (boot_max_nr_grant_frames *
1148 GREFS_PER_GRANT_FRAME / RPP);
1149
1150 gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
1151 GFP_KERNEL);
1152 if (gnttab_list == NULL)
1153 return -ENOMEM;
1154
1155 nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
1156 for (i = 0; i < nr_glist_frames; i++) {
1157 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1158 if (gnttab_list[i] == NULL) {
1159 ret = -ENOMEM;
1160 goto ini_nomem;
1161 }
1162 }
1163
1164 if (gnttab_resume() < 0) {
1165 ret = -ENODEV;
1166 goto ini_nomem;
1167 }
1168
1169 nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
1170
1171 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1172 gnttab_entry(i) = i + 1;
1173
1174 gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
1175 gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
1176 gnttab_free_head = NR_RESERVED_ENTRIES;
1177
1178 printk("Grant table initialized\n");
1179 return 0;
1180
1181 ini_nomem:
1182 for (i--; i >= 0; i--)
1183 free_page((unsigned long)gnttab_list[i]);
1184 kfree(gnttab_list);
1185 return ret;
1186}
1187EXPORT_SYMBOL_GPL(gnttab_init);
1188
1189static int __devinit __gnttab_init(void)
1190{
1191 /* Delay grant-table initialization in the PV on HVM case */
1192 if (xen_hvm_domain())
1193 return 0;
1194
1195 if (!xen_pv_domain())
1196 return -ENODEV;
1197
1198 return gnttab_init();
1199}
1200
1201core_initcall(__gnttab_init);