Loading...
1/******************************************************************************
2 * grant_table.c
3 *
4 * Granting foreign access to our memory reservation.
5 *
6 * Copyright (c) 2005-2006, Christopher Clark
7 * Copyright (c) 2004-2005, K A Fraser
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
21 *
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33
34#include <linux/module.h>
35#include <linux/sched.h>
36#include <linux/mm.h>
37#include <linux/slab.h>
38#include <linux/vmalloc.h>
39#include <linux/uaccess.h>
40#include <linux/io.h>
41
42#include <xen/xen.h>
43#include <xen/interface/xen.h>
44#include <xen/page.h>
45#include <xen/grant_table.h>
46#include <xen/interface/memory.h>
47#include <asm/xen/hypercall.h>
48
49#include <asm/pgtable.h>
50#include <asm/sync_bitops.h>
51
52
53/* External tools reserve first few grant table entries. */
54#define NR_RESERVED_ENTRIES 8
55#define GNTTAB_LIST_END 0xffffffff
56#define GREFS_PER_GRANT_FRAME (PAGE_SIZE / sizeof(struct grant_entry))
57
58static grant_ref_t **gnttab_list;
59static unsigned int nr_grant_frames;
60static unsigned int boot_max_nr_grant_frames;
61static int gnttab_free_count;
62static grant_ref_t gnttab_free_head;
63static DEFINE_SPINLOCK(gnttab_list_lock);
64unsigned long xen_hvm_resume_frames;
65EXPORT_SYMBOL_GPL(xen_hvm_resume_frames);
66
67static struct grant_entry *shared;
68
69static struct gnttab_free_callback *gnttab_free_callback_list;
70
71static int gnttab_expand(unsigned int req_entries);
72
73#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
74
75static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
76{
77 return &gnttab_list[(entry) / RPP][(entry) % RPP];
78}
79/* This can be used as an l-value */
80#define gnttab_entry(entry) (*__gnttab_entry(entry))
81
82static int get_free_entries(unsigned count)
83{
84 unsigned long flags;
85 int ref, rc = 0;
86 grant_ref_t head;
87
88 spin_lock_irqsave(&gnttab_list_lock, flags);
89
90 if ((gnttab_free_count < count) &&
91 ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
92 spin_unlock_irqrestore(&gnttab_list_lock, flags);
93 return rc;
94 }
95
96 ref = head = gnttab_free_head;
97 gnttab_free_count -= count;
98 while (count-- > 1)
99 head = gnttab_entry(head);
100 gnttab_free_head = gnttab_entry(head);
101 gnttab_entry(head) = GNTTAB_LIST_END;
102
103 spin_unlock_irqrestore(&gnttab_list_lock, flags);
104
105 return ref;
106}
107
108static void do_free_callbacks(void)
109{
110 struct gnttab_free_callback *callback, *next;
111
112 callback = gnttab_free_callback_list;
113 gnttab_free_callback_list = NULL;
114
115 while (callback != NULL) {
116 next = callback->next;
117 if (gnttab_free_count >= callback->count) {
118 callback->next = NULL;
119 callback->fn(callback->arg);
120 } else {
121 callback->next = gnttab_free_callback_list;
122 gnttab_free_callback_list = callback;
123 }
124 callback = next;
125 }
126}
127
128static inline void check_free_callbacks(void)
129{
130 if (unlikely(gnttab_free_callback_list))
131 do_free_callbacks();
132}
133
134static void put_free_entry(grant_ref_t ref)
135{
136 unsigned long flags;
137 spin_lock_irqsave(&gnttab_list_lock, flags);
138 gnttab_entry(ref) = gnttab_free_head;
139 gnttab_free_head = ref;
140 gnttab_free_count++;
141 check_free_callbacks();
142 spin_unlock_irqrestore(&gnttab_list_lock, flags);
143}
144
145static void update_grant_entry(grant_ref_t ref, domid_t domid,
146 unsigned long frame, unsigned flags)
147{
148 /*
149 * Introducing a valid entry into the grant table:
150 * 1. Write ent->domid.
151 * 2. Write ent->frame:
152 * GTF_permit_access: Frame to which access is permitted.
153 * GTF_accept_transfer: Pseudo-phys frame slot being filled by new
154 * frame, or zero if none.
155 * 3. Write memory barrier (WMB).
156 * 4. Write ent->flags, inc. valid type.
157 */
158 shared[ref].frame = frame;
159 shared[ref].domid = domid;
160 wmb();
161 shared[ref].flags = flags;
162}
163
164/*
165 * Public grant-issuing interface functions
166 */
167void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
168 unsigned long frame, int readonly)
169{
170 update_grant_entry(ref, domid, frame,
171 GTF_permit_access | (readonly ? GTF_readonly : 0));
172}
173EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
174
175int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
176 int readonly)
177{
178 int ref;
179
180 ref = get_free_entries(1);
181 if (unlikely(ref < 0))
182 return -ENOSPC;
183
184 gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
185
186 return ref;
187}
188EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
189
190int gnttab_query_foreign_access(grant_ref_t ref)
191{
192 u16 nflags;
193
194 nflags = shared[ref].flags;
195
196 return (nflags & (GTF_reading|GTF_writing));
197}
198EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
199
200int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
201{
202 u16 flags, nflags;
203
204 nflags = shared[ref].flags;
205 do {
206 flags = nflags;
207 if (flags & (GTF_reading|GTF_writing)) {
208 printk(KERN_ALERT "WARNING: g.e. still in use!\n");
209 return 0;
210 }
211 } while ((nflags = sync_cmpxchg(&shared[ref].flags, flags, 0)) != flags);
212
213 return 1;
214}
215EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
216
217void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
218 unsigned long page)
219{
220 if (gnttab_end_foreign_access_ref(ref, readonly)) {
221 put_free_entry(ref);
222 if (page != 0)
223 free_page(page);
224 } else {
225 /* XXX This needs to be fixed so that the ref and page are
226 placed on a list to be freed up later. */
227 printk(KERN_WARNING
228 "WARNING: leaking g.e. and page still in use!\n");
229 }
230}
231EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
232
233int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
234{
235 int ref;
236
237 ref = get_free_entries(1);
238 if (unlikely(ref < 0))
239 return -ENOSPC;
240 gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
241
242 return ref;
243}
244EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
245
246void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
247 unsigned long pfn)
248{
249 update_grant_entry(ref, domid, pfn, GTF_accept_transfer);
250}
251EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
252
253unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
254{
255 unsigned long frame;
256 u16 flags;
257
258 /*
259 * If a transfer is not even yet started, try to reclaim the grant
260 * reference and return failure (== 0).
261 */
262 while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
263 if (sync_cmpxchg(&shared[ref].flags, flags, 0) == flags)
264 return 0;
265 cpu_relax();
266 }
267
268 /* If a transfer is in progress then wait until it is completed. */
269 while (!(flags & GTF_transfer_completed)) {
270 flags = shared[ref].flags;
271 cpu_relax();
272 }
273
274 rmb(); /* Read the frame number /after/ reading completion status. */
275 frame = shared[ref].frame;
276 BUG_ON(frame == 0);
277
278 return frame;
279}
280EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
281
282unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
283{
284 unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
285 put_free_entry(ref);
286 return frame;
287}
288EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
289
290void gnttab_free_grant_reference(grant_ref_t ref)
291{
292 put_free_entry(ref);
293}
294EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
295
296void gnttab_free_grant_references(grant_ref_t head)
297{
298 grant_ref_t ref;
299 unsigned long flags;
300 int count = 1;
301 if (head == GNTTAB_LIST_END)
302 return;
303 spin_lock_irqsave(&gnttab_list_lock, flags);
304 ref = head;
305 while (gnttab_entry(ref) != GNTTAB_LIST_END) {
306 ref = gnttab_entry(ref);
307 count++;
308 }
309 gnttab_entry(ref) = gnttab_free_head;
310 gnttab_free_head = head;
311 gnttab_free_count += count;
312 check_free_callbacks();
313 spin_unlock_irqrestore(&gnttab_list_lock, flags);
314}
315EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
316
317int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
318{
319 int h = get_free_entries(count);
320
321 if (h < 0)
322 return -ENOSPC;
323
324 *head = h;
325
326 return 0;
327}
328EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
329
330int gnttab_empty_grant_references(const grant_ref_t *private_head)
331{
332 return (*private_head == GNTTAB_LIST_END);
333}
334EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
335
336int gnttab_claim_grant_reference(grant_ref_t *private_head)
337{
338 grant_ref_t g = *private_head;
339 if (unlikely(g == GNTTAB_LIST_END))
340 return -ENOSPC;
341 *private_head = gnttab_entry(g);
342 return g;
343}
344EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
345
346void gnttab_release_grant_reference(grant_ref_t *private_head,
347 grant_ref_t release)
348{
349 gnttab_entry(release) = *private_head;
350 *private_head = release;
351}
352EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
353
354void gnttab_request_free_callback(struct gnttab_free_callback *callback,
355 void (*fn)(void *), void *arg, u16 count)
356{
357 unsigned long flags;
358 spin_lock_irqsave(&gnttab_list_lock, flags);
359 if (callback->next)
360 goto out;
361 callback->fn = fn;
362 callback->arg = arg;
363 callback->count = count;
364 callback->next = gnttab_free_callback_list;
365 gnttab_free_callback_list = callback;
366 check_free_callbacks();
367out:
368 spin_unlock_irqrestore(&gnttab_list_lock, flags);
369}
370EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
371
372void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
373{
374 struct gnttab_free_callback **pcb;
375 unsigned long flags;
376
377 spin_lock_irqsave(&gnttab_list_lock, flags);
378 for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
379 if (*pcb == callback) {
380 *pcb = callback->next;
381 break;
382 }
383 }
384 spin_unlock_irqrestore(&gnttab_list_lock, flags);
385}
386EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
387
388static int grow_gnttab_list(unsigned int more_frames)
389{
390 unsigned int new_nr_grant_frames, extra_entries, i;
391 unsigned int nr_glist_frames, new_nr_glist_frames;
392
393 new_nr_grant_frames = nr_grant_frames + more_frames;
394 extra_entries = more_frames * GREFS_PER_GRANT_FRAME;
395
396 nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
397 new_nr_glist_frames =
398 (new_nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
399 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
400 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
401 if (!gnttab_list[i])
402 goto grow_nomem;
403 }
404
405
406 for (i = GREFS_PER_GRANT_FRAME * nr_grant_frames;
407 i < GREFS_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
408 gnttab_entry(i) = i + 1;
409
410 gnttab_entry(i) = gnttab_free_head;
411 gnttab_free_head = GREFS_PER_GRANT_FRAME * nr_grant_frames;
412 gnttab_free_count += extra_entries;
413
414 nr_grant_frames = new_nr_grant_frames;
415
416 check_free_callbacks();
417
418 return 0;
419
420grow_nomem:
421 for ( ; i >= nr_glist_frames; i--)
422 free_page((unsigned long) gnttab_list[i]);
423 return -ENOMEM;
424}
425
426static unsigned int __max_nr_grant_frames(void)
427{
428 struct gnttab_query_size query;
429 int rc;
430
431 query.dom = DOMID_SELF;
432
433 rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
434 if ((rc < 0) || (query.status != GNTST_okay))
435 return 4; /* Legacy max supported number of frames */
436
437 return query.max_nr_frames;
438}
439
440unsigned int gnttab_max_grant_frames(void)
441{
442 unsigned int xen_max = __max_nr_grant_frames();
443
444 if (xen_max > boot_max_nr_grant_frames)
445 return boot_max_nr_grant_frames;
446 return xen_max;
447}
448EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
449
450int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
451 struct page **pages, unsigned int count)
452{
453 int i, ret;
454 pte_t *pte;
455 unsigned long mfn;
456
457 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
458 if (ret)
459 return ret;
460
461 if (xen_feature(XENFEAT_auto_translated_physmap))
462 return ret;
463
464 for (i = 0; i < count; i++) {
465 /* Do not add to override if the map failed. */
466 if (map_ops[i].status)
467 continue;
468
469 if (map_ops[i].flags & GNTMAP_contains_pte) {
470 pte = (pte_t *) (mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) +
471 (map_ops[i].host_addr & ~PAGE_MASK));
472 mfn = pte_mfn(*pte);
473 } else {
474 /* If you really wanted to do this:
475 * mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
476 *
477 * The reason we do not implement it is b/c on the
478 * unmap path (gnttab_unmap_refs) we have no means of
479 * checking whether the page is !GNTMAP_contains_pte.
480 *
481 * That is without some extra data-structure to carry
482 * the struct page, bool clear_pte, and list_head next
483 * tuples and deal with allocation/delallocation, etc.
484 *
485 * The users of this API set the GNTMAP_contains_pte
486 * flag so lets just return not supported until it
487 * becomes neccessary to implement.
488 */
489 return -EOPNOTSUPP;
490 }
491 ret = m2p_add_override(mfn, pages[i],
492 map_ops[i].flags & GNTMAP_contains_pte);
493 if (ret)
494 return ret;
495 }
496
497 return ret;
498}
499EXPORT_SYMBOL_GPL(gnttab_map_refs);
500
501int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
502 struct page **pages, unsigned int count)
503{
504 int i, ret;
505
506 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
507 if (ret)
508 return ret;
509
510 if (xen_feature(XENFEAT_auto_translated_physmap))
511 return ret;
512
513 for (i = 0; i < count; i++) {
514 ret = m2p_remove_override(pages[i], true /* clear the PTE */);
515 if (ret)
516 return ret;
517 }
518
519 return ret;
520}
521EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
522
523static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
524{
525 struct gnttab_setup_table setup;
526 unsigned long *frames;
527 unsigned int nr_gframes = end_idx + 1;
528 int rc;
529
530 if (xen_hvm_domain()) {
531 struct xen_add_to_physmap xatp;
532 unsigned int i = end_idx;
533 rc = 0;
534 /*
535 * Loop backwards, so that the first hypercall has the largest
536 * index, ensuring that the table will grow only once.
537 */
538 do {
539 xatp.domid = DOMID_SELF;
540 xatp.idx = i;
541 xatp.space = XENMAPSPACE_grant_table;
542 xatp.gpfn = (xen_hvm_resume_frames >> PAGE_SHIFT) + i;
543 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
544 if (rc != 0) {
545 printk(KERN_WARNING
546 "grant table add_to_physmap failed, err=%d\n", rc);
547 break;
548 }
549 } while (i-- > start_idx);
550
551 return rc;
552 }
553
554 frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
555 if (!frames)
556 return -ENOMEM;
557
558 setup.dom = DOMID_SELF;
559 setup.nr_frames = nr_gframes;
560 set_xen_guest_handle(setup.frame_list, frames);
561
562 rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
563 if (rc == -ENOSYS) {
564 kfree(frames);
565 return -ENOSYS;
566 }
567
568 BUG_ON(rc || setup.status);
569
570 rc = arch_gnttab_map_shared(frames, nr_gframes, gnttab_max_grant_frames(),
571 &shared);
572 BUG_ON(rc);
573
574 kfree(frames);
575
576 return 0;
577}
578
579int gnttab_resume(void)
580{
581 unsigned int max_nr_gframes;
582
583 max_nr_gframes = gnttab_max_grant_frames();
584 if (max_nr_gframes < nr_grant_frames)
585 return -ENOSYS;
586
587 if (xen_pv_domain())
588 return gnttab_map(0, nr_grant_frames - 1);
589
590 if (!shared) {
591 shared = ioremap(xen_hvm_resume_frames, PAGE_SIZE * max_nr_gframes);
592 if (shared == NULL) {
593 printk(KERN_WARNING
594 "Failed to ioremap gnttab share frames!");
595 return -ENOMEM;
596 }
597 }
598
599 gnttab_map(0, nr_grant_frames - 1);
600
601 return 0;
602}
603
604int gnttab_suspend(void)
605{
606 arch_gnttab_unmap_shared(shared, nr_grant_frames);
607 return 0;
608}
609
610static int gnttab_expand(unsigned int req_entries)
611{
612 int rc;
613 unsigned int cur, extra;
614
615 cur = nr_grant_frames;
616 extra = ((req_entries + (GREFS_PER_GRANT_FRAME-1)) /
617 GREFS_PER_GRANT_FRAME);
618 if (cur + extra > gnttab_max_grant_frames())
619 return -ENOSPC;
620
621 rc = gnttab_map(cur, cur + extra - 1);
622 if (rc == 0)
623 rc = grow_gnttab_list(extra);
624
625 return rc;
626}
627
628int gnttab_init(void)
629{
630 int i;
631 unsigned int max_nr_glist_frames, nr_glist_frames;
632 unsigned int nr_init_grefs;
633
634 nr_grant_frames = 1;
635 boot_max_nr_grant_frames = __max_nr_grant_frames();
636
637 /* Determine the maximum number of frames required for the
638 * grant reference free list on the current hypervisor.
639 */
640 max_nr_glist_frames = (boot_max_nr_grant_frames *
641 GREFS_PER_GRANT_FRAME / RPP);
642
643 gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
644 GFP_KERNEL);
645 if (gnttab_list == NULL)
646 return -ENOMEM;
647
648 nr_glist_frames = (nr_grant_frames * GREFS_PER_GRANT_FRAME + RPP - 1) / RPP;
649 for (i = 0; i < nr_glist_frames; i++) {
650 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
651 if (gnttab_list[i] == NULL)
652 goto ini_nomem;
653 }
654
655 if (gnttab_resume() < 0)
656 return -ENODEV;
657
658 nr_init_grefs = nr_grant_frames * GREFS_PER_GRANT_FRAME;
659
660 for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
661 gnttab_entry(i) = i + 1;
662
663 gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
664 gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
665 gnttab_free_head = NR_RESERVED_ENTRIES;
666
667 printk("Grant table initialized\n");
668 return 0;
669
670 ini_nomem:
671 for (i--; i >= 0; i--)
672 free_page((unsigned long)gnttab_list[i]);
673 kfree(gnttab_list);
674 return -ENOMEM;
675}
676EXPORT_SYMBOL_GPL(gnttab_init);
677
678static int __devinit __gnttab_init(void)
679{
680 /* Delay grant-table initialization in the PV on HVM case */
681 if (xen_hvm_domain())
682 return 0;
683
684 if (!xen_pv_domain())
685 return -ENODEV;
686
687 return gnttab_init();
688}
689
690core_initcall(__gnttab_init);
1/******************************************************************************
2 * grant_table.c
3 *
4 * Granting foreign access to our memory reservation.
5 *
6 * Copyright (c) 2005-2006, Christopher Clark
7 * Copyright (c) 2004-2005, K A Fraser
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
21 *
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33
34#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
35
36#include <linux/bitmap.h>
37#include <linux/memblock.h>
38#include <linux/sched.h>
39#include <linux/mm.h>
40#include <linux/slab.h>
41#include <linux/vmalloc.h>
42#include <linux/uaccess.h>
43#include <linux/io.h>
44#include <linux/delay.h>
45#include <linux/hardirq.h>
46#include <linux/workqueue.h>
47#include <linux/ratelimit.h>
48#include <linux/moduleparam.h>
49#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
50#include <linux/dma-mapping.h>
51#endif
52
53#include <xen/xen.h>
54#include <xen/interface/xen.h>
55#include <xen/page.h>
56#include <xen/grant_table.h>
57#include <xen/interface/memory.h>
58#include <xen/hvc-console.h>
59#include <xen/swiotlb-xen.h>
60#include <xen/balloon.h>
61#ifdef CONFIG_X86
62#include <asm/xen/cpuid.h>
63#endif
64#include <xen/mem-reservation.h>
65#include <asm/xen/hypercall.h>
66#include <asm/xen/interface.h>
67
68#include <asm/sync_bitops.h>
69
70#define GNTTAB_LIST_END 0xffffffff
71
72static grant_ref_t **gnttab_list;
73static unsigned int nr_grant_frames;
74
75/*
76 * Handling of free grants:
77 *
78 * Free grants are in a simple list anchored in gnttab_free_head. They are
79 * linked by grant ref, the last element contains GNTTAB_LIST_END. The number
80 * of free entries is stored in gnttab_free_count.
81 * Additionally there is a bitmap of free entries anchored in
82 * gnttab_free_bitmap. This is being used for simplifying allocation of
83 * multiple consecutive grants, which is needed e.g. for support of virtio.
84 * gnttab_last_free is used to add free entries of new frames at the end of
85 * the free list.
86 * gnttab_free_tail_ptr specifies the variable which references the start
87 * of consecutive free grants ending with gnttab_last_free. This pointer is
88 * updated in a rather defensive way, in order to avoid performance hits in
89 * hot paths.
90 * All those variables are protected by gnttab_list_lock.
91 */
92static int gnttab_free_count;
93static unsigned int gnttab_size;
94static grant_ref_t gnttab_free_head = GNTTAB_LIST_END;
95static grant_ref_t gnttab_last_free = GNTTAB_LIST_END;
96static grant_ref_t *gnttab_free_tail_ptr;
97static unsigned long *gnttab_free_bitmap;
98static DEFINE_SPINLOCK(gnttab_list_lock);
99
100struct grant_frames xen_auto_xlat_grant_frames;
101static unsigned int xen_gnttab_version;
102module_param_named(version, xen_gnttab_version, uint, 0);
103
104static union {
105 struct grant_entry_v1 *v1;
106 union grant_entry_v2 *v2;
107 void *addr;
108} gnttab_shared;
109
110/*This is a structure of function pointers for grant table*/
111struct gnttab_ops {
112 /*
113 * Version of the grant interface.
114 */
115 unsigned int version;
116 /*
117 * Grant refs per grant frame.
118 */
119 unsigned int grefs_per_grant_frame;
120 /*
121 * Mapping a list of frames for storing grant entries. Frames parameter
122 * is used to store grant table address when grant table being setup,
123 * nr_gframes is the number of frames to map grant table. Returning
124 * GNTST_okay means success and negative value means failure.
125 */
126 int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
127 /*
128 * Release a list of frames which are mapped in map_frames for grant
129 * entry status.
130 */
131 void (*unmap_frames)(void);
132 /*
133 * Introducing a valid entry into the grant table, granting the frame of
134 * this grant entry to domain for accessing. Ref
135 * parameter is reference of this introduced grant entry, domid is id of
136 * granted domain, frame is the page frame to be granted, and flags is
137 * status of the grant entry to be updated.
138 */
139 void (*update_entry)(grant_ref_t ref, domid_t domid,
140 unsigned long frame, unsigned flags);
141 /*
142 * Stop granting a grant entry to domain for accessing. Ref parameter is
143 * reference of a grant entry whose grant access will be stopped.
144 * If the grant entry is currently mapped for reading or writing, just
145 * return failure(==0) directly and don't tear down the grant access.
146 * Otherwise, stop grant access for this entry and return success(==1).
147 */
148 int (*end_foreign_access_ref)(grant_ref_t ref);
149 /*
150 * Read the frame number related to a given grant reference.
151 */
152 unsigned long (*read_frame)(grant_ref_t ref);
153};
154
155struct unmap_refs_callback_data {
156 struct completion completion;
157 int result;
158};
159
160static const struct gnttab_ops *gnttab_interface;
161
162/* This reflects status of grant entries, so act as a global value. */
163static grant_status_t *grstatus;
164
165static struct gnttab_free_callback *gnttab_free_callback_list;
166
167static int gnttab_expand(unsigned int req_entries);
168
169#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
170#define SPP (PAGE_SIZE / sizeof(grant_status_t))
171
172static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
173{
174 return &gnttab_list[(entry) / RPP][(entry) % RPP];
175}
176/* This can be used as an l-value */
177#define gnttab_entry(entry) (*__gnttab_entry(entry))
178
179static int get_free_entries(unsigned count)
180{
181 unsigned long flags;
182 int ref, rc = 0;
183 grant_ref_t head;
184
185 spin_lock_irqsave(&gnttab_list_lock, flags);
186
187 if ((gnttab_free_count < count) &&
188 ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
189 spin_unlock_irqrestore(&gnttab_list_lock, flags);
190 return rc;
191 }
192
193 ref = head = gnttab_free_head;
194 gnttab_free_count -= count;
195 while (count--) {
196 bitmap_clear(gnttab_free_bitmap, head, 1);
197 if (gnttab_free_tail_ptr == __gnttab_entry(head))
198 gnttab_free_tail_ptr = &gnttab_free_head;
199 if (count)
200 head = gnttab_entry(head);
201 }
202 gnttab_free_head = gnttab_entry(head);
203 gnttab_entry(head) = GNTTAB_LIST_END;
204
205 if (!gnttab_free_count) {
206 gnttab_last_free = GNTTAB_LIST_END;
207 gnttab_free_tail_ptr = NULL;
208 }
209
210 spin_unlock_irqrestore(&gnttab_list_lock, flags);
211
212 return ref;
213}
214
215static int get_seq_entry_count(void)
216{
217 if (gnttab_last_free == GNTTAB_LIST_END || !gnttab_free_tail_ptr ||
218 *gnttab_free_tail_ptr == GNTTAB_LIST_END)
219 return 0;
220
221 return gnttab_last_free - *gnttab_free_tail_ptr + 1;
222}
223
224/* Rebuilds the free grant list and tries to find count consecutive entries. */
225static int get_free_seq(unsigned int count)
226{
227 int ret = -ENOSPC;
228 unsigned int from, to;
229 grant_ref_t *last;
230
231 gnttab_free_tail_ptr = &gnttab_free_head;
232 last = &gnttab_free_head;
233
234 for (from = find_first_bit(gnttab_free_bitmap, gnttab_size);
235 from < gnttab_size;
236 from = find_next_bit(gnttab_free_bitmap, gnttab_size, to + 1)) {
237 to = find_next_zero_bit(gnttab_free_bitmap, gnttab_size,
238 from + 1);
239 if (ret < 0 && to - from >= count) {
240 ret = from;
241 bitmap_clear(gnttab_free_bitmap, ret, count);
242 from += count;
243 gnttab_free_count -= count;
244 if (from == to)
245 continue;
246 }
247
248 /*
249 * Recreate the free list in order to have it properly sorted.
250 * This is needed to make sure that the free tail has the maximum
251 * possible size.
252 */
253 while (from < to) {
254 *last = from;
255 last = __gnttab_entry(from);
256 gnttab_last_free = from;
257 from++;
258 }
259 if (to < gnttab_size)
260 gnttab_free_tail_ptr = __gnttab_entry(to - 1);
261 }
262
263 *last = GNTTAB_LIST_END;
264 if (gnttab_last_free != gnttab_size - 1)
265 gnttab_free_tail_ptr = NULL;
266
267 return ret;
268}
269
270static int get_free_entries_seq(unsigned int count)
271{
272 unsigned long flags;
273 int ret = 0;
274
275 spin_lock_irqsave(&gnttab_list_lock, flags);
276
277 if (gnttab_free_count < count) {
278 ret = gnttab_expand(count - gnttab_free_count);
279 if (ret < 0)
280 goto out;
281 }
282
283 if (get_seq_entry_count() < count) {
284 ret = get_free_seq(count);
285 if (ret >= 0)
286 goto out;
287 ret = gnttab_expand(count - get_seq_entry_count());
288 if (ret < 0)
289 goto out;
290 }
291
292 ret = *gnttab_free_tail_ptr;
293 *gnttab_free_tail_ptr = gnttab_entry(ret + count - 1);
294 gnttab_free_count -= count;
295 if (!gnttab_free_count)
296 gnttab_free_tail_ptr = NULL;
297 bitmap_clear(gnttab_free_bitmap, ret, count);
298
299 out:
300 spin_unlock_irqrestore(&gnttab_list_lock, flags);
301
302 return ret;
303}
304
305static void do_free_callbacks(void)
306{
307 struct gnttab_free_callback *callback, *next;
308
309 callback = gnttab_free_callback_list;
310 gnttab_free_callback_list = NULL;
311
312 while (callback != NULL) {
313 next = callback->next;
314 if (gnttab_free_count >= callback->count) {
315 callback->next = NULL;
316 callback->fn(callback->arg);
317 } else {
318 callback->next = gnttab_free_callback_list;
319 gnttab_free_callback_list = callback;
320 }
321 callback = next;
322 }
323}
324
325static inline void check_free_callbacks(void)
326{
327 if (unlikely(gnttab_free_callback_list))
328 do_free_callbacks();
329}
330
331static void put_free_entry_locked(grant_ref_t ref)
332{
333 if (unlikely(ref < GNTTAB_NR_RESERVED_ENTRIES))
334 return;
335
336 gnttab_entry(ref) = gnttab_free_head;
337 gnttab_free_head = ref;
338 if (!gnttab_free_count)
339 gnttab_last_free = ref;
340 if (gnttab_free_tail_ptr == &gnttab_free_head)
341 gnttab_free_tail_ptr = __gnttab_entry(ref);
342 gnttab_free_count++;
343 bitmap_set(gnttab_free_bitmap, ref, 1);
344}
345
346static void put_free_entry(grant_ref_t ref)
347{
348 unsigned long flags;
349
350 spin_lock_irqsave(&gnttab_list_lock, flags);
351 put_free_entry_locked(ref);
352 check_free_callbacks();
353 spin_unlock_irqrestore(&gnttab_list_lock, flags);
354}
355
356static void gnttab_set_free(unsigned int start, unsigned int n)
357{
358 unsigned int i;
359
360 for (i = start; i < start + n - 1; i++)
361 gnttab_entry(i) = i + 1;
362
363 gnttab_entry(i) = GNTTAB_LIST_END;
364 if (!gnttab_free_count) {
365 gnttab_free_head = start;
366 gnttab_free_tail_ptr = &gnttab_free_head;
367 } else {
368 gnttab_entry(gnttab_last_free) = start;
369 }
370 gnttab_free_count += n;
371 gnttab_last_free = i;
372
373 bitmap_set(gnttab_free_bitmap, start, n);
374}
375
376/*
377 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
378 * Introducing a valid entry into the grant table:
379 * 1. Write ent->domid.
380 * 2. Write ent->frame: Frame to which access is permitted.
381 * 3. Write memory barrier (WMB).
382 * 4. Write ent->flags, inc. valid type.
383 */
384static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
385 unsigned long frame, unsigned flags)
386{
387 gnttab_shared.v1[ref].domid = domid;
388 gnttab_shared.v1[ref].frame = frame;
389 wmb();
390 gnttab_shared.v1[ref].flags = flags;
391}
392
393static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
394 unsigned long frame, unsigned int flags)
395{
396 gnttab_shared.v2[ref].hdr.domid = domid;
397 gnttab_shared.v2[ref].full_page.frame = frame;
398 wmb(); /* Hypervisor concurrent accesses. */
399 gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
400}
401
402/*
403 * Public grant-issuing interface functions
404 */
405void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
406 unsigned long frame, int readonly)
407{
408 gnttab_interface->update_entry(ref, domid, frame,
409 GTF_permit_access | (readonly ? GTF_readonly : 0));
410}
411EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
412
413int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
414 int readonly)
415{
416 int ref;
417
418 ref = get_free_entries(1);
419 if (unlikely(ref < 0))
420 return -ENOSPC;
421
422 gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
423
424 return ref;
425}
426EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
427
428static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref)
429{
430 u16 flags, nflags;
431 u16 *pflags;
432
433 pflags = &gnttab_shared.v1[ref].flags;
434 nflags = *pflags;
435 do {
436 flags = nflags;
437 if (flags & (GTF_reading|GTF_writing))
438 return 0;
439 } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
440
441 return 1;
442}
443
444static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref)
445{
446 gnttab_shared.v2[ref].hdr.flags = 0;
447 mb(); /* Concurrent access by hypervisor. */
448 if (grstatus[ref] & (GTF_reading|GTF_writing)) {
449 return 0;
450 } else {
451 /*
452 * The read of grstatus needs to have acquire semantics.
453 * On x86, reads already have that, and we just need to
454 * protect against compiler reorderings.
455 * On other architectures we may need a full barrier.
456 */
457#ifdef CONFIG_X86
458 barrier();
459#else
460 mb();
461#endif
462 }
463
464 return 1;
465}
466
467static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref)
468{
469 return gnttab_interface->end_foreign_access_ref(ref);
470}
471
472int gnttab_end_foreign_access_ref(grant_ref_t ref)
473{
474 if (_gnttab_end_foreign_access_ref(ref))
475 return 1;
476 pr_warn("WARNING: g.e. %#x still in use!\n", ref);
477 return 0;
478}
479EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
480
481static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
482{
483 return gnttab_shared.v1[ref].frame;
484}
485
486static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
487{
488 return gnttab_shared.v2[ref].full_page.frame;
489}
490
491struct deferred_entry {
492 struct list_head list;
493 grant_ref_t ref;
494 uint16_t warn_delay;
495 struct page *page;
496};
497static LIST_HEAD(deferred_list);
498static void gnttab_handle_deferred(struct timer_list *);
499static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
500
501static void gnttab_handle_deferred(struct timer_list *unused)
502{
503 unsigned int nr = 10;
504 struct deferred_entry *first = NULL;
505 unsigned long flags;
506
507 spin_lock_irqsave(&gnttab_list_lock, flags);
508 while (nr--) {
509 struct deferred_entry *entry
510 = list_first_entry(&deferred_list,
511 struct deferred_entry, list);
512
513 if (entry == first)
514 break;
515 list_del(&entry->list);
516 spin_unlock_irqrestore(&gnttab_list_lock, flags);
517 if (_gnttab_end_foreign_access_ref(entry->ref)) {
518 put_free_entry(entry->ref);
519 pr_debug("freeing g.e. %#x (pfn %#lx)\n",
520 entry->ref, page_to_pfn(entry->page));
521 put_page(entry->page);
522 kfree(entry);
523 entry = NULL;
524 } else {
525 if (!--entry->warn_delay)
526 pr_info("g.e. %#x still pending\n", entry->ref);
527 if (!first)
528 first = entry;
529 }
530 spin_lock_irqsave(&gnttab_list_lock, flags);
531 if (entry)
532 list_add_tail(&entry->list, &deferred_list);
533 else if (list_empty(&deferred_list))
534 break;
535 }
536 if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
537 deferred_timer.expires = jiffies + HZ;
538 add_timer(&deferred_timer);
539 }
540 spin_unlock_irqrestore(&gnttab_list_lock, flags);
541}
542
543static void gnttab_add_deferred(grant_ref_t ref, struct page *page)
544{
545 struct deferred_entry *entry;
546 gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
547 const char *what = KERN_WARNING "leaking";
548
549 entry = kmalloc(sizeof(*entry), gfp);
550 if (!page) {
551 unsigned long gfn = gnttab_interface->read_frame(ref);
552
553 page = pfn_to_page(gfn_to_pfn(gfn));
554 get_page(page);
555 }
556
557 if (entry) {
558 unsigned long flags;
559
560 entry->ref = ref;
561 entry->page = page;
562 entry->warn_delay = 60;
563 spin_lock_irqsave(&gnttab_list_lock, flags);
564 list_add_tail(&entry->list, &deferred_list);
565 if (!timer_pending(&deferred_timer)) {
566 deferred_timer.expires = jiffies + HZ;
567 add_timer(&deferred_timer);
568 }
569 spin_unlock_irqrestore(&gnttab_list_lock, flags);
570 what = KERN_DEBUG "deferring";
571 }
572 printk("%s g.e. %#x (pfn %#lx)\n",
573 what, ref, page ? page_to_pfn(page) : -1);
574}
575
576int gnttab_try_end_foreign_access(grant_ref_t ref)
577{
578 int ret = _gnttab_end_foreign_access_ref(ref);
579
580 if (ret)
581 put_free_entry(ref);
582
583 return ret;
584}
585EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
586
587void gnttab_end_foreign_access(grant_ref_t ref, struct page *page)
588{
589 if (gnttab_try_end_foreign_access(ref)) {
590 if (page)
591 put_page(page);
592 } else
593 gnttab_add_deferred(ref, page);
594}
595EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
596
597void gnttab_free_grant_reference(grant_ref_t ref)
598{
599 put_free_entry(ref);
600}
601EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
602
603void gnttab_free_grant_references(grant_ref_t head)
604{
605 grant_ref_t ref;
606 unsigned long flags;
607
608 spin_lock_irqsave(&gnttab_list_lock, flags);
609 while (head != GNTTAB_LIST_END) {
610 ref = gnttab_entry(head);
611 put_free_entry_locked(head);
612 head = ref;
613 }
614 check_free_callbacks();
615 spin_unlock_irqrestore(&gnttab_list_lock, flags);
616}
617EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
618
619void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count)
620{
621 unsigned long flags;
622 unsigned int i;
623
624 spin_lock_irqsave(&gnttab_list_lock, flags);
625 for (i = count; i > 0; i--)
626 put_free_entry_locked(head + i - 1);
627 check_free_callbacks();
628 spin_unlock_irqrestore(&gnttab_list_lock, flags);
629}
630EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_seq);
631
632int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
633{
634 int h = get_free_entries(count);
635
636 if (h < 0)
637 return -ENOSPC;
638
639 *head = h;
640
641 return 0;
642}
643EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
644
645int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first)
646{
647 int h;
648
649 if (count == 1)
650 h = get_free_entries(1);
651 else
652 h = get_free_entries_seq(count);
653
654 if (h < 0)
655 return -ENOSPC;
656
657 *first = h;
658
659 return 0;
660}
661EXPORT_SYMBOL_GPL(gnttab_alloc_grant_reference_seq);
662
663int gnttab_empty_grant_references(const grant_ref_t *private_head)
664{
665 return (*private_head == GNTTAB_LIST_END);
666}
667EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
668
669int gnttab_claim_grant_reference(grant_ref_t *private_head)
670{
671 grant_ref_t g = *private_head;
672 if (unlikely(g == GNTTAB_LIST_END))
673 return -ENOSPC;
674 *private_head = gnttab_entry(g);
675 return g;
676}
677EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
678
679void gnttab_release_grant_reference(grant_ref_t *private_head,
680 grant_ref_t release)
681{
682 gnttab_entry(release) = *private_head;
683 *private_head = release;
684}
685EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
686
687void gnttab_request_free_callback(struct gnttab_free_callback *callback,
688 void (*fn)(void *), void *arg, u16 count)
689{
690 unsigned long flags;
691 struct gnttab_free_callback *cb;
692
693 spin_lock_irqsave(&gnttab_list_lock, flags);
694
695 /* Check if the callback is already on the list */
696 cb = gnttab_free_callback_list;
697 while (cb) {
698 if (cb == callback)
699 goto out;
700 cb = cb->next;
701 }
702
703 callback->fn = fn;
704 callback->arg = arg;
705 callback->count = count;
706 callback->next = gnttab_free_callback_list;
707 gnttab_free_callback_list = callback;
708 check_free_callbacks();
709out:
710 spin_unlock_irqrestore(&gnttab_list_lock, flags);
711}
712EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
713
714void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
715{
716 struct gnttab_free_callback **pcb;
717 unsigned long flags;
718
719 spin_lock_irqsave(&gnttab_list_lock, flags);
720 for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
721 if (*pcb == callback) {
722 *pcb = callback->next;
723 break;
724 }
725 }
726 spin_unlock_irqrestore(&gnttab_list_lock, flags);
727}
728EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
729
730static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
731{
732 return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
733 align;
734}
735
736static int grow_gnttab_list(unsigned int more_frames)
737{
738 unsigned int new_nr_grant_frames, extra_entries, i;
739 unsigned int nr_glist_frames, new_nr_glist_frames;
740 unsigned int grefs_per_frame;
741
742 grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
743
744 new_nr_grant_frames = nr_grant_frames + more_frames;
745 extra_entries = more_frames * grefs_per_frame;
746
747 nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
748 new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
749 for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
750 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
751 if (!gnttab_list[i])
752 goto grow_nomem;
753 }
754
755 gnttab_set_free(gnttab_size, extra_entries);
756
757 if (!gnttab_free_tail_ptr)
758 gnttab_free_tail_ptr = __gnttab_entry(gnttab_size);
759
760 nr_grant_frames = new_nr_grant_frames;
761 gnttab_size += extra_entries;
762
763 check_free_callbacks();
764
765 return 0;
766
767grow_nomem:
768 while (i-- > nr_glist_frames)
769 free_page((unsigned long) gnttab_list[i]);
770 return -ENOMEM;
771}
772
773static unsigned int __max_nr_grant_frames(void)
774{
775 struct gnttab_query_size query;
776 int rc;
777
778 query.dom = DOMID_SELF;
779
780 rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
781 if ((rc < 0) || (query.status != GNTST_okay))
782 return 4; /* Legacy max supported number of frames */
783
784 return query.max_nr_frames;
785}
786
787unsigned int gnttab_max_grant_frames(void)
788{
789 unsigned int xen_max = __max_nr_grant_frames();
790 static unsigned int boot_max_nr_grant_frames;
791
792 /* First time, initialize it properly. */
793 if (!boot_max_nr_grant_frames)
794 boot_max_nr_grant_frames = __max_nr_grant_frames();
795
796 if (xen_max > boot_max_nr_grant_frames)
797 return boot_max_nr_grant_frames;
798 return xen_max;
799}
800EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
801
802int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
803{
804 xen_pfn_t *pfn;
805 unsigned int max_nr_gframes = __max_nr_grant_frames();
806 unsigned int i;
807 void *vaddr;
808
809 if (xen_auto_xlat_grant_frames.count)
810 return -EINVAL;
811
812 vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB);
813 if (vaddr == NULL) {
814 pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
815 &addr);
816 return -ENOMEM;
817 }
818 pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
819 if (!pfn) {
820 memunmap(vaddr);
821 return -ENOMEM;
822 }
823 for (i = 0; i < max_nr_gframes; i++)
824 pfn[i] = XEN_PFN_DOWN(addr) + i;
825
826 xen_auto_xlat_grant_frames.vaddr = vaddr;
827 xen_auto_xlat_grant_frames.pfn = pfn;
828 xen_auto_xlat_grant_frames.count = max_nr_gframes;
829
830 return 0;
831}
832EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
833
834void gnttab_free_auto_xlat_frames(void)
835{
836 if (!xen_auto_xlat_grant_frames.count)
837 return;
838 kfree(xen_auto_xlat_grant_frames.pfn);
839 memunmap(xen_auto_xlat_grant_frames.vaddr);
840
841 xen_auto_xlat_grant_frames.pfn = NULL;
842 xen_auto_xlat_grant_frames.count = 0;
843 xen_auto_xlat_grant_frames.vaddr = NULL;
844}
845EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
846
847int gnttab_pages_set_private(int nr_pages, struct page **pages)
848{
849 int i;
850
851 for (i = 0; i < nr_pages; i++) {
852#if BITS_PER_LONG < 64
853 struct xen_page_foreign *foreign;
854
855 foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
856 if (!foreign)
857 return -ENOMEM;
858
859 set_page_private(pages[i], (unsigned long)foreign);
860#endif
861 SetPagePrivate(pages[i]);
862 }
863
864 return 0;
865}
866EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
867
868/**
869 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
870 * @nr_pages: number of pages to alloc
871 * @pages: returns the pages
872 */
873int gnttab_alloc_pages(int nr_pages, struct page **pages)
874{
875 int ret;
876
877 ret = xen_alloc_unpopulated_pages(nr_pages, pages);
878 if (ret < 0)
879 return ret;
880
881 ret = gnttab_pages_set_private(nr_pages, pages);
882 if (ret < 0)
883 gnttab_free_pages(nr_pages, pages);
884
885 return ret;
886}
887EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
888
889#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
890static inline void cache_init(struct gnttab_page_cache *cache)
891{
892 cache->pages = NULL;
893}
894
895static inline bool cache_empty(struct gnttab_page_cache *cache)
896{
897 return !cache->pages;
898}
899
900static inline struct page *cache_deq(struct gnttab_page_cache *cache)
901{
902 struct page *page;
903
904 page = cache->pages;
905 cache->pages = page->zone_device_data;
906
907 return page;
908}
909
910static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
911{
912 page->zone_device_data = cache->pages;
913 cache->pages = page;
914}
915#else
916static inline void cache_init(struct gnttab_page_cache *cache)
917{
918 INIT_LIST_HEAD(&cache->pages);
919}
920
921static inline bool cache_empty(struct gnttab_page_cache *cache)
922{
923 return list_empty(&cache->pages);
924}
925
926static inline struct page *cache_deq(struct gnttab_page_cache *cache)
927{
928 struct page *page;
929
930 page = list_first_entry(&cache->pages, struct page, lru);
931 list_del(&page->lru);
932
933 return page;
934}
935
936static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
937{
938 list_add(&page->lru, &cache->pages);
939}
940#endif
941
942void gnttab_page_cache_init(struct gnttab_page_cache *cache)
943{
944 spin_lock_init(&cache->lock);
945 cache_init(cache);
946 cache->num_pages = 0;
947}
948EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
949
950int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
951{
952 unsigned long flags;
953
954 spin_lock_irqsave(&cache->lock, flags);
955
956 if (cache_empty(cache)) {
957 spin_unlock_irqrestore(&cache->lock, flags);
958 return gnttab_alloc_pages(1, page);
959 }
960
961 page[0] = cache_deq(cache);
962 cache->num_pages--;
963
964 spin_unlock_irqrestore(&cache->lock, flags);
965
966 return 0;
967}
968EXPORT_SYMBOL_GPL(gnttab_page_cache_get);
969
970void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
971 unsigned int num)
972{
973 unsigned long flags;
974 unsigned int i;
975
976 spin_lock_irqsave(&cache->lock, flags);
977
978 for (i = 0; i < num; i++)
979 cache_enq(cache, page[i]);
980 cache->num_pages += num;
981
982 spin_unlock_irqrestore(&cache->lock, flags);
983}
984EXPORT_SYMBOL_GPL(gnttab_page_cache_put);
985
986void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
987{
988 struct page *page[10];
989 unsigned int i = 0;
990 unsigned long flags;
991
992 spin_lock_irqsave(&cache->lock, flags);
993
994 while (cache->num_pages > num) {
995 page[i] = cache_deq(cache);
996 cache->num_pages--;
997 if (++i == ARRAY_SIZE(page)) {
998 spin_unlock_irqrestore(&cache->lock, flags);
999 gnttab_free_pages(i, page);
1000 i = 0;
1001 spin_lock_irqsave(&cache->lock, flags);
1002 }
1003 }
1004
1005 spin_unlock_irqrestore(&cache->lock, flags);
1006
1007 if (i != 0)
1008 gnttab_free_pages(i, page);
1009}
1010EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);
1011
1012void gnttab_pages_clear_private(int nr_pages, struct page **pages)
1013{
1014 int i;
1015
1016 for (i = 0; i < nr_pages; i++) {
1017 if (PagePrivate(pages[i])) {
1018#if BITS_PER_LONG < 64
1019 kfree((void *)page_private(pages[i]));
1020#endif
1021 ClearPagePrivate(pages[i]);
1022 }
1023 }
1024}
1025EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
1026
1027/**
1028 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
1029 * @nr_pages; number of pages to free
1030 * @pages: the pages
1031 */
1032void gnttab_free_pages(int nr_pages, struct page **pages)
1033{
1034 gnttab_pages_clear_private(nr_pages, pages);
1035 xen_free_unpopulated_pages(nr_pages, pages);
1036}
1037EXPORT_SYMBOL_GPL(gnttab_free_pages);
1038
1039#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
1040/**
1041 * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
1042 * @args: arguments to the function
1043 */
1044int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
1045{
1046 unsigned long pfn, start_pfn;
1047 size_t size;
1048 int i, ret;
1049
1050 if (args->nr_pages < 0 || args->nr_pages > (INT_MAX >> PAGE_SHIFT))
1051 return -ENOMEM;
1052
1053 size = args->nr_pages << PAGE_SHIFT;
1054 if (args->coherent)
1055 args->vaddr = dma_alloc_coherent(args->dev, size,
1056 &args->dev_bus_addr,
1057 GFP_KERNEL | __GFP_NOWARN);
1058 else
1059 args->vaddr = dma_alloc_wc(args->dev, size,
1060 &args->dev_bus_addr,
1061 GFP_KERNEL | __GFP_NOWARN);
1062 if (!args->vaddr) {
1063 pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
1064 return -ENOMEM;
1065 }
1066
1067 start_pfn = __phys_to_pfn(args->dev_bus_addr);
1068 for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
1069 pfn++, i++) {
1070 struct page *page = pfn_to_page(pfn);
1071
1072 args->pages[i] = page;
1073 args->frames[i] = xen_page_to_gfn(page);
1074 xenmem_reservation_scrub_page(page);
1075 }
1076
1077 xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
1078
1079 ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
1080 if (ret != args->nr_pages) {
1081 pr_debug("Failed to decrease reservation for DMA buffer\n");
1082 ret = -EFAULT;
1083 goto fail;
1084 }
1085
1086 ret = gnttab_pages_set_private(args->nr_pages, args->pages);
1087 if (ret < 0)
1088 goto fail;
1089
1090 return 0;
1091
1092fail:
1093 gnttab_dma_free_pages(args);
1094 return ret;
1095}
1096EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
1097
1098/**
1099 * gnttab_dma_free_pages - free DMAable pages
1100 * @args: arguments to the function
1101 */
1102int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
1103{
1104 size_t size;
1105 int i, ret;
1106
1107 gnttab_pages_clear_private(args->nr_pages, args->pages);
1108
1109 for (i = 0; i < args->nr_pages; i++)
1110 args->frames[i] = page_to_xen_pfn(args->pages[i]);
1111
1112 ret = xenmem_reservation_increase(args->nr_pages, args->frames);
1113 if (ret != args->nr_pages) {
1114 pr_debug("Failed to increase reservation for DMA buffer\n");
1115 ret = -EFAULT;
1116 } else {
1117 ret = 0;
1118 }
1119
1120 xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
1121 args->frames);
1122
1123 size = args->nr_pages << PAGE_SHIFT;
1124 if (args->coherent)
1125 dma_free_coherent(args->dev, size,
1126 args->vaddr, args->dev_bus_addr);
1127 else
1128 dma_free_wc(args->dev, size,
1129 args->vaddr, args->dev_bus_addr);
1130 return ret;
1131}
1132EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
1133#endif
1134
1135/* Handling of paged out grant targets (GNTST_eagain) */
1136#define MAX_DELAY 256
1137static inline void
1138gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
1139 const char *func)
1140{
1141 unsigned delay = 1;
1142
1143 do {
1144 BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
1145 if (*status == GNTST_eagain)
1146 msleep(delay++);
1147 } while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
1148
1149 if (delay >= MAX_DELAY) {
1150 pr_err("%s: %s eagain grant\n", func, current->comm);
1151 *status = GNTST_bad_page;
1152 }
1153}
1154
1155void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
1156{
1157 struct gnttab_map_grant_ref *op;
1158
1159 if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
1160 BUG();
1161 for (op = batch; op < batch + count; op++)
1162 if (op->status == GNTST_eagain)
1163 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
1164 &op->status, __func__);
1165}
1166EXPORT_SYMBOL_GPL(gnttab_batch_map);
1167
1168void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
1169{
1170 struct gnttab_copy *op;
1171
1172 if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
1173 BUG();
1174 for (op = batch; op < batch + count; op++)
1175 if (op->status == GNTST_eagain)
1176 gnttab_retry_eagain_gop(GNTTABOP_copy, op,
1177 &op->status, __func__);
1178}
1179EXPORT_SYMBOL_GPL(gnttab_batch_copy);
1180
1181void gnttab_foreach_grant_in_range(struct page *page,
1182 unsigned int offset,
1183 unsigned int len,
1184 xen_grant_fn_t fn,
1185 void *data)
1186{
1187 unsigned int goffset;
1188 unsigned int glen;
1189 unsigned long xen_pfn;
1190
1191 len = min_t(unsigned int, PAGE_SIZE - offset, len);
1192 goffset = xen_offset_in_page(offset);
1193
1194 xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
1195
1196 while (len) {
1197 glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
1198 fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1199
1200 goffset = 0;
1201 xen_pfn++;
1202 len -= glen;
1203 }
1204}
1205EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1206
1207void gnttab_foreach_grant(struct page **pages,
1208 unsigned int nr_grefs,
1209 xen_grant_fn_t fn,
1210 void *data)
1211{
1212 unsigned int goffset = 0;
1213 unsigned long xen_pfn = 0;
1214 unsigned int i;
1215
1216 for (i = 0; i < nr_grefs; i++) {
1217 if ((i % XEN_PFN_PER_PAGE) == 0) {
1218 xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1219 goffset = 0;
1220 }
1221
1222 fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1223
1224 goffset += XEN_PAGE_SIZE;
1225 xen_pfn++;
1226 }
1227}
1228
1229int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1230 struct gnttab_map_grant_ref *kmap_ops,
1231 struct page **pages, unsigned int count)
1232{
1233 int i, ret;
1234
1235 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1236 if (ret)
1237 return ret;
1238
1239 for (i = 0; i < count; i++) {
1240 switch (map_ops[i].status) {
1241 case GNTST_okay:
1242 {
1243 struct xen_page_foreign *foreign;
1244
1245 SetPageForeign(pages[i]);
1246 foreign = xen_page_foreign(pages[i]);
1247 foreign->domid = map_ops[i].dom;
1248 foreign->gref = map_ops[i].ref;
1249 break;
1250 }
1251
1252 case GNTST_no_device_space:
1253 pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1254 break;
1255
1256 case GNTST_eagain:
1257 /* Retry eagain maps */
1258 gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1259 map_ops + i,
1260 &map_ops[i].status, __func__);
1261 /* Test status in next loop iteration. */
1262 i--;
1263 break;
1264
1265 default:
1266 break;
1267 }
1268 }
1269
1270 return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1271}
1272EXPORT_SYMBOL_GPL(gnttab_map_refs);
1273
1274int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1275 struct gnttab_unmap_grant_ref *kunmap_ops,
1276 struct page **pages, unsigned int count)
1277{
1278 unsigned int i;
1279 int ret;
1280
1281 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1282 if (ret)
1283 return ret;
1284
1285 for (i = 0; i < count; i++)
1286 ClearPageForeign(pages[i]);
1287
1288 return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1289}
1290EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1291
1292#define GNTTAB_UNMAP_REFS_DELAY 5
1293
1294static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1295
1296static void gnttab_unmap_work(struct work_struct *work)
1297{
1298 struct gntab_unmap_queue_data
1299 *unmap_data = container_of(work,
1300 struct gntab_unmap_queue_data,
1301 gnttab_work.work);
1302 if (unmap_data->age != UINT_MAX)
1303 unmap_data->age++;
1304 __gnttab_unmap_refs_async(unmap_data);
1305}
1306
1307static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1308{
1309 int ret;
1310 int pc;
1311
1312 for (pc = 0; pc < item->count; pc++) {
1313 if (page_count(item->pages[pc]) > 1) {
1314 unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1315 schedule_delayed_work(&item->gnttab_work,
1316 msecs_to_jiffies(delay));
1317 return;
1318 }
1319 }
1320
1321 ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1322 item->pages, item->count);
1323 item->done(ret, item);
1324}
1325
1326void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1327{
1328 INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1329 item->age = 0;
1330
1331 __gnttab_unmap_refs_async(item);
1332}
1333EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1334
1335static void unmap_refs_callback(int result,
1336 struct gntab_unmap_queue_data *data)
1337{
1338 struct unmap_refs_callback_data *d = data->data;
1339
1340 d->result = result;
1341 complete(&d->completion);
1342}
1343
1344int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1345{
1346 struct unmap_refs_callback_data data;
1347
1348 init_completion(&data.completion);
1349 item->data = &data;
1350 item->done = &unmap_refs_callback;
1351 gnttab_unmap_refs_async(item);
1352 wait_for_completion(&data.completion);
1353
1354 return data.result;
1355}
1356EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1357
1358static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1359{
1360 return gnttab_frames(nr_grant_frames, SPP);
1361}
1362
1363static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1364{
1365 int rc;
1366
1367 rc = arch_gnttab_map_shared(frames, nr_gframes,
1368 gnttab_max_grant_frames(),
1369 &gnttab_shared.addr);
1370 BUG_ON(rc);
1371
1372 return 0;
1373}
1374
1375static void gnttab_unmap_frames_v1(void)
1376{
1377 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1378}
1379
1380static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1381{
1382 uint64_t *sframes;
1383 unsigned int nr_sframes;
1384 struct gnttab_get_status_frames getframes;
1385 int rc;
1386
1387 nr_sframes = nr_status_frames(nr_gframes);
1388
1389 /* No need for kzalloc as it is initialized in following hypercall
1390 * GNTTABOP_get_status_frames.
1391 */
1392 sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1393 if (!sframes)
1394 return -ENOMEM;
1395
1396 getframes.dom = DOMID_SELF;
1397 getframes.nr_frames = nr_sframes;
1398 set_xen_guest_handle(getframes.frame_list, sframes);
1399
1400 rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1401 &getframes, 1);
1402 if (rc == -ENOSYS) {
1403 kfree(sframes);
1404 return -ENOSYS;
1405 }
1406
1407 BUG_ON(rc || getframes.status);
1408
1409 rc = arch_gnttab_map_status(sframes, nr_sframes,
1410 nr_status_frames(gnttab_max_grant_frames()),
1411 &grstatus);
1412 BUG_ON(rc);
1413 kfree(sframes);
1414
1415 rc = arch_gnttab_map_shared(frames, nr_gframes,
1416 gnttab_max_grant_frames(),
1417 &gnttab_shared.addr);
1418 BUG_ON(rc);
1419
1420 return 0;
1421}
1422
1423static void gnttab_unmap_frames_v2(void)
1424{
1425 arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1426 arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1427}
1428
1429static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1430{
1431 struct gnttab_setup_table setup;
1432 xen_pfn_t *frames;
1433 unsigned int nr_gframes = end_idx + 1;
1434 int rc;
1435
1436 if (xen_feature(XENFEAT_auto_translated_physmap)) {
1437 struct xen_add_to_physmap xatp;
1438 unsigned int i = end_idx;
1439 rc = 0;
1440 BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1441 /*
1442 * Loop backwards, so that the first hypercall has the largest
1443 * index, ensuring that the table will grow only once.
1444 */
1445 do {
1446 xatp.domid = DOMID_SELF;
1447 xatp.idx = i;
1448 xatp.space = XENMAPSPACE_grant_table;
1449 xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1450 rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1451 if (rc != 0) {
1452 pr_warn("grant table add_to_physmap failed, err=%d\n",
1453 rc);
1454 break;
1455 }
1456 } while (i-- > start_idx);
1457
1458 return rc;
1459 }
1460
1461 /* No need for kzalloc as it is initialized in following hypercall
1462 * GNTTABOP_setup_table.
1463 */
1464 frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1465 if (!frames)
1466 return -ENOMEM;
1467
1468 setup.dom = DOMID_SELF;
1469 setup.nr_frames = nr_gframes;
1470 set_xen_guest_handle(setup.frame_list, frames);
1471
1472 rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1473 if (rc == -ENOSYS) {
1474 kfree(frames);
1475 return -ENOSYS;
1476 }
1477
1478 BUG_ON(rc || setup.status);
1479
1480 rc = gnttab_interface->map_frames(frames, nr_gframes);
1481
1482 kfree(frames);
1483
1484 return rc;
1485}
1486
1487static const struct gnttab_ops gnttab_v1_ops = {
1488 .version = 1,
1489 .grefs_per_grant_frame = XEN_PAGE_SIZE /
1490 sizeof(struct grant_entry_v1),
1491 .map_frames = gnttab_map_frames_v1,
1492 .unmap_frames = gnttab_unmap_frames_v1,
1493 .update_entry = gnttab_update_entry_v1,
1494 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v1,
1495 .read_frame = gnttab_read_frame_v1,
1496};
1497
1498static const struct gnttab_ops gnttab_v2_ops = {
1499 .version = 2,
1500 .grefs_per_grant_frame = XEN_PAGE_SIZE /
1501 sizeof(union grant_entry_v2),
1502 .map_frames = gnttab_map_frames_v2,
1503 .unmap_frames = gnttab_unmap_frames_v2,
1504 .update_entry = gnttab_update_entry_v2,
1505 .end_foreign_access_ref = gnttab_end_foreign_access_ref_v2,
1506 .read_frame = gnttab_read_frame_v2,
1507};
1508
1509static bool gnttab_need_v2(void)
1510{
1511#ifdef CONFIG_X86
1512 uint32_t base, width;
1513
1514 if (xen_pv_domain()) {
1515 base = xen_cpuid_base();
1516 if (cpuid_eax(base) < 5)
1517 return false; /* Information not available, use V1. */
1518 width = cpuid_ebx(base + 5) &
1519 XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1520 return width > 32 + PAGE_SHIFT;
1521 }
1522#endif
1523 return !!(max_possible_pfn >> 32);
1524}
1525
1526static void gnttab_request_version(void)
1527{
1528 long rc;
1529 struct gnttab_set_version gsv;
1530
1531 if (gnttab_need_v2())
1532 gsv.version = 2;
1533 else
1534 gsv.version = 1;
1535
1536 /* Boot parameter overrides automatic selection. */
1537 if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1538 gsv.version = xen_gnttab_version;
1539
1540 rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1541 if (rc == 0 && gsv.version == 2)
1542 gnttab_interface = &gnttab_v2_ops;
1543 else
1544 gnttab_interface = &gnttab_v1_ops;
1545 pr_info("Grant tables using version %d layout\n",
1546 gnttab_interface->version);
1547}
1548
1549static int gnttab_setup(void)
1550{
1551 unsigned int max_nr_gframes;
1552
1553 max_nr_gframes = gnttab_max_grant_frames();
1554 if (max_nr_gframes < nr_grant_frames)
1555 return -ENOSYS;
1556
1557 if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1558 gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1559 if (gnttab_shared.addr == NULL) {
1560 pr_warn("gnttab share frames is not mapped!\n");
1561 return -ENOMEM;
1562 }
1563 }
1564 return gnttab_map(0, nr_grant_frames - 1);
1565}
1566
1567int gnttab_resume(void)
1568{
1569 gnttab_request_version();
1570 return gnttab_setup();
1571}
1572
1573int gnttab_suspend(void)
1574{
1575 if (!xen_feature(XENFEAT_auto_translated_physmap))
1576 gnttab_interface->unmap_frames();
1577 return 0;
1578}
1579
1580static int gnttab_expand(unsigned int req_entries)
1581{
1582 int rc;
1583 unsigned int cur, extra;
1584
1585 cur = nr_grant_frames;
1586 extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1587 gnttab_interface->grefs_per_grant_frame);
1588 if (cur + extra > gnttab_max_grant_frames()) {
1589 pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1590 " cur=%u extra=%u limit=%u"
1591 " gnttab_free_count=%u req_entries=%u\n",
1592 cur, extra, gnttab_max_grant_frames(),
1593 gnttab_free_count, req_entries);
1594 return -ENOSPC;
1595 }
1596
1597 rc = gnttab_map(cur, cur + extra - 1);
1598 if (rc == 0)
1599 rc = grow_gnttab_list(extra);
1600
1601 return rc;
1602}
1603
1604int gnttab_init(void)
1605{
1606 int i;
1607 unsigned long max_nr_grant_frames, max_nr_grefs;
1608 unsigned int max_nr_glist_frames, nr_glist_frames;
1609 int ret;
1610
1611 gnttab_request_version();
1612 max_nr_grant_frames = gnttab_max_grant_frames();
1613 max_nr_grefs = max_nr_grant_frames *
1614 gnttab_interface->grefs_per_grant_frame;
1615 nr_grant_frames = 1;
1616
1617 /* Determine the maximum number of frames required for the
1618 * grant reference free list on the current hypervisor.
1619 */
1620 max_nr_glist_frames = max_nr_grefs / RPP;
1621
1622 gnttab_list = kmalloc_array(max_nr_glist_frames,
1623 sizeof(grant_ref_t *),
1624 GFP_KERNEL);
1625 if (gnttab_list == NULL)
1626 return -ENOMEM;
1627
1628 nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1629 for (i = 0; i < nr_glist_frames; i++) {
1630 gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1631 if (gnttab_list[i] == NULL) {
1632 ret = -ENOMEM;
1633 goto ini_nomem;
1634 }
1635 }
1636
1637 gnttab_free_bitmap = bitmap_zalloc(max_nr_grefs, GFP_KERNEL);
1638 if (!gnttab_free_bitmap) {
1639 ret = -ENOMEM;
1640 goto ini_nomem;
1641 }
1642
1643 ret = arch_gnttab_init(max_nr_grant_frames,
1644 nr_status_frames(max_nr_grant_frames));
1645 if (ret < 0)
1646 goto ini_nomem;
1647
1648 if (gnttab_setup() < 0) {
1649 ret = -ENODEV;
1650 goto ini_nomem;
1651 }
1652
1653 gnttab_size = nr_grant_frames * gnttab_interface->grefs_per_grant_frame;
1654
1655 gnttab_set_free(GNTTAB_NR_RESERVED_ENTRIES,
1656 gnttab_size - GNTTAB_NR_RESERVED_ENTRIES);
1657
1658 printk("Grant table initialized\n");
1659 return 0;
1660
1661 ini_nomem:
1662 for (i--; i >= 0; i--)
1663 free_page((unsigned long)gnttab_list[i]);
1664 kfree(gnttab_list);
1665 bitmap_free(gnttab_free_bitmap);
1666 return ret;
1667}
1668EXPORT_SYMBOL_GPL(gnttab_init);
1669
1670static int __gnttab_init(void)
1671{
1672 if (!xen_domain())
1673 return -ENODEV;
1674
1675 /* Delay grant-table initialization in the PV on HVM case */
1676 if (xen_hvm_domain() && !xen_pvh_domain())
1677 return 0;
1678
1679 return gnttab_init();
1680}
1681/* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1682 * beforehand to initialize xen_auto_xlat_grant_frames. */
1683core_initcall_sync(__gnttab_init);