Linux Audio

Check our new training course

Loading...
v6.2
   1/******************************************************************************
   2 * grant_table.c
   3 *
   4 * Granting foreign access to our memory reservation.
   5 *
   6 * Copyright (c) 2005-2006, Christopher Clark
   7 * Copyright (c) 2004-2005, K A Fraser
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License version 2
  11 * as published by the Free Software Foundation; or, when distributed
  12 * separately from the Linux kernel or incorporated into other
  13 * software packages, subject to the following license:
  14 *
  15 * Permission is hereby granted, free of charge, to any person obtaining a copy
  16 * of this source file (the "Software"), to deal in the Software without
  17 * restriction, including without limitation the rights to use, copy, modify,
  18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  19 * and to permit persons to whom the Software is furnished to do so, subject to
  20 * the following conditions:
  21 *
  22 * The above copyright notice and this permission notice shall be included in
  23 * all copies or substantial portions of the Software.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  31 * IN THE SOFTWARE.
  32 */
  33
  34#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  35
  36#include <linux/bitmap.h>
  37#include <linux/memblock.h>
  38#include <linux/sched.h>
  39#include <linux/mm.h>
  40#include <linux/slab.h>
  41#include <linux/vmalloc.h>
  42#include <linux/uaccess.h>
  43#include <linux/io.h>
  44#include <linux/delay.h>
  45#include <linux/hardirq.h>
  46#include <linux/workqueue.h>
  47#include <linux/ratelimit.h>
  48#include <linux/moduleparam.h>
  49#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
  50#include <linux/dma-mapping.h>
  51#endif
  52
  53#include <xen/xen.h>
  54#include <xen/interface/xen.h>
  55#include <xen/page.h>
  56#include <xen/grant_table.h>
  57#include <xen/interface/memory.h>
  58#include <xen/hvc-console.h>
  59#include <xen/swiotlb-xen.h>
  60#include <xen/balloon.h>
  61#ifdef CONFIG_X86
  62#include <asm/xen/cpuid.h>
  63#endif
  64#include <xen/mem-reservation.h>
  65#include <asm/xen/hypercall.h>
  66#include <asm/xen/interface.h>
  67
 
  68#include <asm/sync_bitops.h>
  69
 
 
  70#define GNTTAB_LIST_END 0xffffffff
  71
  72static grant_ref_t **gnttab_list;
  73static unsigned int nr_grant_frames;
  74
  75/*
  76 * Handling of free grants:
  77 *
  78 * Free grants are in a simple list anchored in gnttab_free_head. They are
  79 * linked by grant ref, the last element contains GNTTAB_LIST_END. The number
  80 * of free entries is stored in gnttab_free_count.
  81 * Additionally there is a bitmap of free entries anchored in
  82 * gnttab_free_bitmap. This is being used for simplifying allocation of
  83 * multiple consecutive grants, which is needed e.g. for support of virtio.
  84 * gnttab_last_free is used to add free entries of new frames at the end of
  85 * the free list.
  86 * gnttab_free_tail_ptr specifies the variable which references the start
  87 * of consecutive free grants ending with gnttab_last_free. This pointer is
  88 * updated in a rather defensive way, in order to avoid performance hits in
  89 * hot paths.
  90 * All those variables are protected by gnttab_list_lock.
  91 */
  92static int gnttab_free_count;
  93static unsigned int gnttab_size;
  94static grant_ref_t gnttab_free_head = GNTTAB_LIST_END;
  95static grant_ref_t gnttab_last_free = GNTTAB_LIST_END;
  96static grant_ref_t *gnttab_free_tail_ptr;
  97static unsigned long *gnttab_free_bitmap;
  98static DEFINE_SPINLOCK(gnttab_list_lock);
  99
 100struct grant_frames xen_auto_xlat_grant_frames;
 101static unsigned int xen_gnttab_version;
 102module_param_named(version, xen_gnttab_version, uint, 0);
 103
 104static union {
 105	struct grant_entry_v1 *v1;
 106	union grant_entry_v2 *v2;
 107	void *addr;
 108} gnttab_shared;
 109
 110/*This is a structure of function pointers for grant table*/
 111struct gnttab_ops {
 112	/*
 113	 * Version of the grant interface.
 114	 */
 115	unsigned int version;
 116	/*
 117	 * Grant refs per grant frame.
 118	 */
 119	unsigned int grefs_per_grant_frame;
 120	/*
 121	 * Mapping a list of frames for storing grant entries. Frames parameter
 122	 * is used to store grant table address when grant table being setup,
 123	 * nr_gframes is the number of frames to map grant table. Returning
 124	 * GNTST_okay means success and negative value means failure.
 125	 */
 126	int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
 127	/*
 128	 * Release a list of frames which are mapped in map_frames for grant
 129	 * entry status.
 130	 */
 131	void (*unmap_frames)(void);
 132	/*
 133	 * Introducing a valid entry into the grant table, granting the frame of
 134	 * this grant entry to domain for accessing. Ref
 135	 * parameter is reference of this introduced grant entry, domid is id of
 136	 * granted domain, frame is the page frame to be granted, and flags is
 137	 * status of the grant entry to be updated.
 138	 */
 139	void (*update_entry)(grant_ref_t ref, domid_t domid,
 140			     unsigned long frame, unsigned flags);
 141	/*
 142	 * Stop granting a grant entry to domain for accessing. Ref parameter is
 143	 * reference of a grant entry whose grant access will be stopped.
 144	 * If the grant entry is currently mapped for reading or writing, just
 145	 * return failure(==0) directly and don't tear down the grant access.
 146	 * Otherwise, stop grant access for this entry and return success(==1).
 
 147	 */
 148	int (*end_foreign_access_ref)(grant_ref_t ref);
 149	/*
 150	 * Read the frame number related to a given grant reference.
 
 
 
 
 151	 */
 152	unsigned long (*read_frame)(grant_ref_t ref);
 
 
 
 
 
 
 
 153};
 154
 155struct unmap_refs_callback_data {
 156	struct completion completion;
 157	int result;
 158};
 159
 160static const struct gnttab_ops *gnttab_interface;
 161
 162/* This reflects status of grant entries, so act as a global value. */
 163static grant_status_t *grstatus;
 164
 165static struct gnttab_free_callback *gnttab_free_callback_list;
 166
 167static int gnttab_expand(unsigned int req_entries);
 168
 169#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
 170#define SPP (PAGE_SIZE / sizeof(grant_status_t))
 171
 172static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
 173{
 174	return &gnttab_list[(entry) / RPP][(entry) % RPP];
 175}
 176/* This can be used as an l-value */
 177#define gnttab_entry(entry) (*__gnttab_entry(entry))
 178
 179static int get_free_entries(unsigned count)
 180{
 181	unsigned long flags;
 182	int ref, rc = 0;
 183	grant_ref_t head;
 184
 185	spin_lock_irqsave(&gnttab_list_lock, flags);
 186
 187	if ((gnttab_free_count < count) &&
 188	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
 189		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 190		return rc;
 191	}
 192
 193	ref = head = gnttab_free_head;
 194	gnttab_free_count -= count;
 195	while (count--) {
 196		bitmap_clear(gnttab_free_bitmap, head, 1);
 197		if (gnttab_free_tail_ptr == __gnttab_entry(head))
 198			gnttab_free_tail_ptr = &gnttab_free_head;
 199		if (count)
 200			head = gnttab_entry(head);
 201	}
 202	gnttab_free_head = gnttab_entry(head);
 203	gnttab_entry(head) = GNTTAB_LIST_END;
 204
 205	if (!gnttab_free_count) {
 206		gnttab_last_free = GNTTAB_LIST_END;
 207		gnttab_free_tail_ptr = NULL;
 208	}
 209
 210	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 211
 212	return ref;
 213}
 214
 215static int get_seq_entry_count(void)
 216{
 217	if (gnttab_last_free == GNTTAB_LIST_END || !gnttab_free_tail_ptr ||
 218	    *gnttab_free_tail_ptr == GNTTAB_LIST_END)
 219		return 0;
 220
 221	return gnttab_last_free - *gnttab_free_tail_ptr + 1;
 222}
 223
 224/* Rebuilds the free grant list and tries to find count consecutive entries. */
 225static int get_free_seq(unsigned int count)
 226{
 227	int ret = -ENOSPC;
 228	unsigned int from, to;
 229	grant_ref_t *last;
 230
 231	gnttab_free_tail_ptr = &gnttab_free_head;
 232	last = &gnttab_free_head;
 233
 234	for (from = find_first_bit(gnttab_free_bitmap, gnttab_size);
 235	     from < gnttab_size;
 236	     from = find_next_bit(gnttab_free_bitmap, gnttab_size, to + 1)) {
 237		to = find_next_zero_bit(gnttab_free_bitmap, gnttab_size,
 238					from + 1);
 239		if (ret < 0 && to - from >= count) {
 240			ret = from;
 241			bitmap_clear(gnttab_free_bitmap, ret, count);
 242			from += count;
 243			gnttab_free_count -= count;
 244			if (from == to)
 245				continue;
 246		}
 247
 248		/*
 249		 * Recreate the free list in order to have it properly sorted.
 250		 * This is needed to make sure that the free tail has the maximum
 251		 * possible size.
 252		 */
 253		while (from < to) {
 254			*last = from;
 255			last = __gnttab_entry(from);
 256			gnttab_last_free = from;
 257			from++;
 258		}
 259		if (to < gnttab_size)
 260			gnttab_free_tail_ptr = __gnttab_entry(to - 1);
 261	}
 262
 263	*last = GNTTAB_LIST_END;
 264	if (gnttab_last_free != gnttab_size - 1)
 265		gnttab_free_tail_ptr = NULL;
 266
 267	return ret;
 268}
 269
 270static int get_free_entries_seq(unsigned int count)
 271{
 272	unsigned long flags;
 273	int ret = 0;
 274
 275	spin_lock_irqsave(&gnttab_list_lock, flags);
 276
 277	if (gnttab_free_count < count) {
 278		ret = gnttab_expand(count - gnttab_free_count);
 279		if (ret < 0)
 280			goto out;
 281	}
 282
 283	if (get_seq_entry_count() < count) {
 284		ret = get_free_seq(count);
 285		if (ret >= 0)
 286			goto out;
 287		ret = gnttab_expand(count - get_seq_entry_count());
 288		if (ret < 0)
 289			goto out;
 290	}
 291
 292	ret = *gnttab_free_tail_ptr;
 293	*gnttab_free_tail_ptr = gnttab_entry(ret + count - 1);
 294	gnttab_free_count -= count;
 295	if (!gnttab_free_count)
 296		gnttab_free_tail_ptr = NULL;
 297	bitmap_clear(gnttab_free_bitmap, ret, count);
 298
 299 out:
 300	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 301
 302	return ret;
 303}
 304
 305static void do_free_callbacks(void)
 306{
 307	struct gnttab_free_callback *callback, *next;
 308
 309	callback = gnttab_free_callback_list;
 310	gnttab_free_callback_list = NULL;
 311
 312	while (callback != NULL) {
 313		next = callback->next;
 314		if (gnttab_free_count >= callback->count) {
 315			callback->next = NULL;
 316			callback->fn(callback->arg);
 317		} else {
 318			callback->next = gnttab_free_callback_list;
 319			gnttab_free_callback_list = callback;
 320		}
 321		callback = next;
 322	}
 323}
 324
 325static inline void check_free_callbacks(void)
 326{
 327	if (unlikely(gnttab_free_callback_list))
 328		do_free_callbacks();
 329}
 330
 331static void put_free_entry_locked(grant_ref_t ref)
 332{
 333	if (unlikely(ref < GNTTAB_NR_RESERVED_ENTRIES))
 334		return;
 335
 336	gnttab_entry(ref) = gnttab_free_head;
 337	gnttab_free_head = ref;
 338	if (!gnttab_free_count)
 339		gnttab_last_free = ref;
 340	if (gnttab_free_tail_ptr == &gnttab_free_head)
 341		gnttab_free_tail_ptr = __gnttab_entry(ref);
 342	gnttab_free_count++;
 343	bitmap_set(gnttab_free_bitmap, ref, 1);
 344}
 345
 346static void put_free_entry(grant_ref_t ref)
 347{
 348	unsigned long flags;
 349
 350	spin_lock_irqsave(&gnttab_list_lock, flags);
 351	put_free_entry_locked(ref);
 
 
 352	check_free_callbacks();
 353	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 354}
 355
 356static void gnttab_set_free(unsigned int start, unsigned int n)
 357{
 358	unsigned int i;
 359
 360	for (i = start; i < start + n - 1; i++)
 361		gnttab_entry(i) = i + 1;
 362
 363	gnttab_entry(i) = GNTTAB_LIST_END;
 364	if (!gnttab_free_count) {
 365		gnttab_free_head = start;
 366		gnttab_free_tail_ptr = &gnttab_free_head;
 367	} else {
 368		gnttab_entry(gnttab_last_free) = start;
 369	}
 370	gnttab_free_count += n;
 371	gnttab_last_free = i;
 372
 373	bitmap_set(gnttab_free_bitmap, start, n);
 374}
 375
 376/*
 377 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
 378 * Introducing a valid entry into the grant table:
 379 *  1. Write ent->domid.
 380 *  2. Write ent->frame: Frame to which access is permitted.
 
 
 
 381 *  3. Write memory barrier (WMB).
 382 *  4. Write ent->flags, inc. valid type.
 383 */
 384static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
 385				   unsigned long frame, unsigned flags)
 386{
 387	gnttab_shared.v1[ref].domid = domid;
 388	gnttab_shared.v1[ref].frame = frame;
 389	wmb();
 390	gnttab_shared.v1[ref].flags = flags;
 391}
 392
 393static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
 394				   unsigned long frame, unsigned int flags)
 395{
 396	gnttab_shared.v2[ref].hdr.domid = domid;
 397	gnttab_shared.v2[ref].full_page.frame = frame;
 398	wmb();	/* Hypervisor concurrent accesses. */
 399	gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
 400}
 401
 402/*
 403 * Public grant-issuing interface functions
 404 */
 405void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
 406				     unsigned long frame, int readonly)
 407{
 408	gnttab_interface->update_entry(ref, domid, frame,
 409			   GTF_permit_access | (readonly ? GTF_readonly : 0));
 410}
 411EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
 412
 413int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
 414				int readonly)
 415{
 416	int ref;
 417
 418	ref = get_free_entries(1);
 419	if (unlikely(ref < 0))
 420		return -ENOSPC;
 421
 422	gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
 423
 424	return ref;
 425}
 426EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
 427
 428static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 429{
 430	u16 flags, nflags;
 431	u16 *pflags;
 432
 433	pflags = &gnttab_shared.v1[ref].flags;
 434	nflags = *pflags;
 435	do {
 436		flags = nflags;
 437		if (flags & (GTF_reading|GTF_writing))
 438			return 0;
 439	} while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
 440
 441	return 1;
 442}
 443
 444static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref)
 445{
 446	gnttab_shared.v2[ref].hdr.flags = 0;
 447	mb();	/* Concurrent access by hypervisor. */
 448	if (grstatus[ref] & (GTF_reading|GTF_writing)) {
 449		return 0;
 450	} else {
 451		/*
 452		 * The read of grstatus needs to have acquire semantics.
 453		 *  On x86, reads already have that, and we just need to
 454		 * protect against compiler reorderings.
 455		 * On other architectures we may need a full barrier.
 456		 */
 457#ifdef CONFIG_X86
 458		barrier();
 459#else
 460		mb();
 461#endif
 462	}
 463
 464	return 1;
 465}
 466
 467static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref)
 468{
 469	return gnttab_interface->end_foreign_access_ref(ref);
 470}
 471
 472int gnttab_end_foreign_access_ref(grant_ref_t ref)
 473{
 474	if (_gnttab_end_foreign_access_ref(ref))
 475		return 1;
 476	pr_warn("WARNING: g.e. %#x still in use!\n", ref);
 477	return 0;
 478}
 479EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
 480
 481static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
 482{
 483	return gnttab_shared.v1[ref].frame;
 484}
 485
 486static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
 487{
 488	return gnttab_shared.v2[ref].full_page.frame;
 489}
 490
 491struct deferred_entry {
 492	struct list_head list;
 493	grant_ref_t ref;
 
 494	uint16_t warn_delay;
 495	struct page *page;
 496};
 497static LIST_HEAD(deferred_list);
 498static void gnttab_handle_deferred(struct timer_list *);
 499static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
 500
 501static void gnttab_handle_deferred(struct timer_list *unused)
 502{
 503	unsigned int nr = 10;
 504	struct deferred_entry *first = NULL;
 505	unsigned long flags;
 506
 507	spin_lock_irqsave(&gnttab_list_lock, flags);
 508	while (nr--) {
 509		struct deferred_entry *entry
 510			= list_first_entry(&deferred_list,
 511					   struct deferred_entry, list);
 512
 513		if (entry == first)
 514			break;
 515		list_del(&entry->list);
 516		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 517		if (_gnttab_end_foreign_access_ref(entry->ref)) {
 518			put_free_entry(entry->ref);
 519			pr_debug("freeing g.e. %#x (pfn %#lx)\n",
 520				 entry->ref, page_to_pfn(entry->page));
 521			put_page(entry->page);
 
 
 
 522			kfree(entry);
 523			entry = NULL;
 524		} else {
 525			if (!--entry->warn_delay)
 526				pr_info("g.e. %#x still pending\n", entry->ref);
 527			if (!first)
 528				first = entry;
 529		}
 530		spin_lock_irqsave(&gnttab_list_lock, flags);
 531		if (entry)
 532			list_add_tail(&entry->list, &deferred_list);
 533		else if (list_empty(&deferred_list))
 534			break;
 535	}
 536	if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
 537		deferred_timer.expires = jiffies + HZ;
 538		add_timer(&deferred_timer);
 539	}
 540	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 541}
 542
 543static void gnttab_add_deferred(grant_ref_t ref, struct page *page)
 
 544{
 545	struct deferred_entry *entry;
 546	gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
 547	const char *what = KERN_WARNING "leaking";
 548
 549	entry = kmalloc(sizeof(*entry), gfp);
 550	if (!page) {
 551		unsigned long gfn = gnttab_interface->read_frame(ref);
 552
 553		page = pfn_to_page(gfn_to_pfn(gfn));
 554		get_page(page);
 555	}
 556
 557	if (entry) {
 558		unsigned long flags;
 559
 560		entry->ref = ref;
 
 561		entry->page = page;
 562		entry->warn_delay = 60;
 563		spin_lock_irqsave(&gnttab_list_lock, flags);
 564		list_add_tail(&entry->list, &deferred_list);
 565		if (!timer_pending(&deferred_timer)) {
 566			deferred_timer.expires = jiffies + HZ;
 567			add_timer(&deferred_timer);
 568		}
 569		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 570		what = KERN_DEBUG "deferring";
 571	}
 572	printk("%s g.e. %#x (pfn %#lx)\n",
 573	       what, ref, page ? page_to_pfn(page) : -1);
 574}
 575
 576int gnttab_try_end_foreign_access(grant_ref_t ref)
 
 577{
 578	int ret = _gnttab_end_foreign_access_ref(ref);
 579
 580	if (ret)
 581		put_free_entry(ref);
 
 
 
 
 
 
 
 582
 583	return ret;
 
 
 
 
 
 
 
 
 
 584}
 585EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
 586
 587void gnttab_end_foreign_access(grant_ref_t ref, struct page *page)
 
 588{
 589	if (gnttab_try_end_foreign_access(ref)) {
 590		if (page)
 591			put_page(page);
 592	} else
 593		gnttab_add_deferred(ref, page);
 594}
 595EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 596
 597void gnttab_free_grant_reference(grant_ref_t ref)
 598{
 599	put_free_entry(ref);
 600}
 601EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
 602
 603void gnttab_free_grant_references(grant_ref_t head)
 604{
 605	grant_ref_t ref;
 606	unsigned long flags;
 607
 
 
 608	spin_lock_irqsave(&gnttab_list_lock, flags);
 609	while (head != GNTTAB_LIST_END) {
 610		ref = gnttab_entry(head);
 611		put_free_entry_locked(head);
 612		head = ref;
 613	}
 
 
 
 614	check_free_callbacks();
 615	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 616}
 617EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
 618
 619void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count)
 620{
 621	unsigned long flags;
 622	unsigned int i;
 623
 624	spin_lock_irqsave(&gnttab_list_lock, flags);
 625	for (i = count; i > 0; i--)
 626		put_free_entry_locked(head + i - 1);
 627	check_free_callbacks();
 628	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 629}
 630EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_seq);
 631
 632int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
 633{
 634	int h = get_free_entries(count);
 635
 636	if (h < 0)
 637		return -ENOSPC;
 638
 639	*head = h;
 640
 641	return 0;
 642}
 643EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
 644
 645int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first)
 646{
 647	int h;
 648
 649	if (count == 1)
 650		h = get_free_entries(1);
 651	else
 652		h = get_free_entries_seq(count);
 653
 654	if (h < 0)
 655		return -ENOSPC;
 656
 657	*first = h;
 658
 659	return 0;
 660}
 661EXPORT_SYMBOL_GPL(gnttab_alloc_grant_reference_seq);
 662
 663int gnttab_empty_grant_references(const grant_ref_t *private_head)
 664{
 665	return (*private_head == GNTTAB_LIST_END);
 666}
 667EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
 668
 669int gnttab_claim_grant_reference(grant_ref_t *private_head)
 670{
 671	grant_ref_t g = *private_head;
 672	if (unlikely(g == GNTTAB_LIST_END))
 673		return -ENOSPC;
 674	*private_head = gnttab_entry(g);
 675	return g;
 676}
 677EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
 678
 679void gnttab_release_grant_reference(grant_ref_t *private_head,
 680				    grant_ref_t release)
 681{
 682	gnttab_entry(release) = *private_head;
 683	*private_head = release;
 684}
 685EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
 686
 687void gnttab_request_free_callback(struct gnttab_free_callback *callback,
 688				  void (*fn)(void *), void *arg, u16 count)
 689{
 690	unsigned long flags;
 691	struct gnttab_free_callback *cb;
 692
 693	spin_lock_irqsave(&gnttab_list_lock, flags);
 694
 695	/* Check if the callback is already on the list */
 696	cb = gnttab_free_callback_list;
 697	while (cb) {
 698		if (cb == callback)
 699			goto out;
 700		cb = cb->next;
 701	}
 702
 703	callback->fn = fn;
 704	callback->arg = arg;
 705	callback->count = count;
 706	callback->next = gnttab_free_callback_list;
 707	gnttab_free_callback_list = callback;
 708	check_free_callbacks();
 709out:
 710	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 711}
 712EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
 713
 714void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
 715{
 716	struct gnttab_free_callback **pcb;
 717	unsigned long flags;
 718
 719	spin_lock_irqsave(&gnttab_list_lock, flags);
 720	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
 721		if (*pcb == callback) {
 722			*pcb = callback->next;
 723			break;
 724		}
 725	}
 726	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 727}
 728EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
 729
 730static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
 731{
 732	return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
 733	       align;
 734}
 735
 736static int grow_gnttab_list(unsigned int more_frames)
 737{
 738	unsigned int new_nr_grant_frames, extra_entries, i;
 739	unsigned int nr_glist_frames, new_nr_glist_frames;
 740	unsigned int grefs_per_frame;
 741
 
 742	grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
 743
 744	new_nr_grant_frames = nr_grant_frames + more_frames;
 745	extra_entries = more_frames * grefs_per_frame;
 746
 747	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
 748	new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
 749	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
 750		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
 751		if (!gnttab_list[i])
 752			goto grow_nomem;
 753	}
 754
 755	gnttab_set_free(gnttab_size, extra_entries);
 756
 757	if (!gnttab_free_tail_ptr)
 758		gnttab_free_tail_ptr = __gnttab_entry(gnttab_size);
 
 
 
 
 
 759
 760	nr_grant_frames = new_nr_grant_frames;
 761	gnttab_size += extra_entries;
 762
 763	check_free_callbacks();
 764
 765	return 0;
 766
 767grow_nomem:
 768	while (i-- > nr_glist_frames)
 769		free_page((unsigned long) gnttab_list[i]);
 770	return -ENOMEM;
 771}
 772
 773static unsigned int __max_nr_grant_frames(void)
 774{
 775	struct gnttab_query_size query;
 776	int rc;
 777
 778	query.dom = DOMID_SELF;
 779
 780	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
 781	if ((rc < 0) || (query.status != GNTST_okay))
 782		return 4; /* Legacy max supported number of frames */
 783
 784	return query.max_nr_frames;
 785}
 786
 787unsigned int gnttab_max_grant_frames(void)
 788{
 789	unsigned int xen_max = __max_nr_grant_frames();
 790	static unsigned int boot_max_nr_grant_frames;
 791
 792	/* First time, initialize it properly. */
 793	if (!boot_max_nr_grant_frames)
 794		boot_max_nr_grant_frames = __max_nr_grant_frames();
 795
 796	if (xen_max > boot_max_nr_grant_frames)
 797		return boot_max_nr_grant_frames;
 798	return xen_max;
 799}
 800EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
 801
 802int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
 803{
 804	xen_pfn_t *pfn;
 805	unsigned int max_nr_gframes = __max_nr_grant_frames();
 806	unsigned int i;
 807	void *vaddr;
 808
 809	if (xen_auto_xlat_grant_frames.count)
 810		return -EINVAL;
 811
 812	vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB);
 813	if (vaddr == NULL) {
 814		pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
 815			&addr);
 816		return -ENOMEM;
 817	}
 818	pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
 819	if (!pfn) {
 820		memunmap(vaddr);
 821		return -ENOMEM;
 822	}
 823	for (i = 0; i < max_nr_gframes; i++)
 824		pfn[i] = XEN_PFN_DOWN(addr) + i;
 825
 826	xen_auto_xlat_grant_frames.vaddr = vaddr;
 827	xen_auto_xlat_grant_frames.pfn = pfn;
 828	xen_auto_xlat_grant_frames.count = max_nr_gframes;
 829
 830	return 0;
 831}
 832EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
 833
 834void gnttab_free_auto_xlat_frames(void)
 835{
 836	if (!xen_auto_xlat_grant_frames.count)
 837		return;
 838	kfree(xen_auto_xlat_grant_frames.pfn);
 839	memunmap(xen_auto_xlat_grant_frames.vaddr);
 840
 841	xen_auto_xlat_grant_frames.pfn = NULL;
 842	xen_auto_xlat_grant_frames.count = 0;
 843	xen_auto_xlat_grant_frames.vaddr = NULL;
 844}
 845EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
 846
 847int gnttab_pages_set_private(int nr_pages, struct page **pages)
 848{
 849	int i;
 850
 851	for (i = 0; i < nr_pages; i++) {
 852#if BITS_PER_LONG < 64
 853		struct xen_page_foreign *foreign;
 854
 855		foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
 856		if (!foreign)
 857			return -ENOMEM;
 858
 859		set_page_private(pages[i], (unsigned long)foreign);
 860#endif
 861		SetPagePrivate(pages[i]);
 862	}
 863
 864	return 0;
 865}
 866EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
 867
 868/**
 869 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
 870 * @nr_pages: number of pages to alloc
 871 * @pages: returns the pages
 872 */
 873int gnttab_alloc_pages(int nr_pages, struct page **pages)
 874{
 
 875	int ret;
 876
 877	ret = xen_alloc_unpopulated_pages(nr_pages, pages);
 878	if (ret < 0)
 879		return ret;
 880
 881	ret = gnttab_pages_set_private(nr_pages, pages);
 882	if (ret < 0)
 883		gnttab_free_pages(nr_pages, pages);
 884
 885	return ret;
 886}
 887EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
 888
 889#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
 890static inline void cache_init(struct gnttab_page_cache *cache)
 891{
 892	cache->pages = NULL;
 893}
 894
 895static inline bool cache_empty(struct gnttab_page_cache *cache)
 896{
 897	return !cache->pages;
 898}
 899
 900static inline struct page *cache_deq(struct gnttab_page_cache *cache)
 901{
 902	struct page *page;
 903
 904	page = cache->pages;
 905	cache->pages = page->zone_device_data;
 906
 907	return page;
 908}
 909
 910static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
 911{
 912	page->zone_device_data = cache->pages;
 913	cache->pages = page;
 914}
 915#else
 916static inline void cache_init(struct gnttab_page_cache *cache)
 917{
 918	INIT_LIST_HEAD(&cache->pages);
 919}
 920
 921static inline bool cache_empty(struct gnttab_page_cache *cache)
 922{
 923	return list_empty(&cache->pages);
 924}
 925
 926static inline struct page *cache_deq(struct gnttab_page_cache *cache)
 927{
 928	struct page *page;
 929
 930	page = list_first_entry(&cache->pages, struct page, lru);
 931	list_del(&page->lru);
 932
 933	return page;
 934}
 935
 936static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
 937{
 938	list_add(&page->lru, &cache->pages);
 939}
 
 
 940#endif
 941
 942void gnttab_page_cache_init(struct gnttab_page_cache *cache)
 943{
 944	spin_lock_init(&cache->lock);
 945	cache_init(cache);
 946	cache->num_pages = 0;
 947}
 948EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
 949
 950int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
 951{
 952	unsigned long flags;
 953
 954	spin_lock_irqsave(&cache->lock, flags);
 955
 956	if (cache_empty(cache)) {
 957		spin_unlock_irqrestore(&cache->lock, flags);
 958		return gnttab_alloc_pages(1, page);
 959	}
 960
 961	page[0] = cache_deq(cache);
 962	cache->num_pages--;
 963
 964	spin_unlock_irqrestore(&cache->lock, flags);
 965
 966	return 0;
 967}
 968EXPORT_SYMBOL_GPL(gnttab_page_cache_get);
 969
 970void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
 971			   unsigned int num)
 972{
 973	unsigned long flags;
 974	unsigned int i;
 975
 976	spin_lock_irqsave(&cache->lock, flags);
 977
 978	for (i = 0; i < num; i++)
 979		cache_enq(cache, page[i]);
 980	cache->num_pages += num;
 981
 982	spin_unlock_irqrestore(&cache->lock, flags);
 983}
 984EXPORT_SYMBOL_GPL(gnttab_page_cache_put);
 985
 986void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
 987{
 988	struct page *page[10];
 989	unsigned int i = 0;
 990	unsigned long flags;
 991
 992	spin_lock_irqsave(&cache->lock, flags);
 993
 994	while (cache->num_pages > num) {
 995		page[i] = cache_deq(cache);
 996		cache->num_pages--;
 997		if (++i == ARRAY_SIZE(page)) {
 998			spin_unlock_irqrestore(&cache->lock, flags);
 999			gnttab_free_pages(i, page);
1000			i = 0;
1001			spin_lock_irqsave(&cache->lock, flags);
1002		}
1003	}
1004
1005	spin_unlock_irqrestore(&cache->lock, flags);
1006
1007	if (i != 0)
1008		gnttab_free_pages(i, page);
1009}
1010EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);
1011
1012void gnttab_pages_clear_private(int nr_pages, struct page **pages)
 
 
 
 
 
1013{
1014	int i;
1015
1016	for (i = 0; i < nr_pages; i++) {
1017		if (PagePrivate(pages[i])) {
1018#if BITS_PER_LONG < 64
1019			kfree((void *)page_private(pages[i]));
1020#endif
1021			ClearPagePrivate(pages[i]);
1022		}
1023	}
 
1024}
1025EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
1026
1027/**
1028 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
1029 * @nr_pages; number of pages to free
1030 * @pages: the pages
1031 */
1032void gnttab_free_pages(int nr_pages, struct page **pages)
1033{
1034	gnttab_pages_clear_private(nr_pages, pages);
1035	xen_free_unpopulated_pages(nr_pages, pages);
1036}
1037EXPORT_SYMBOL_GPL(gnttab_free_pages);
1038
1039#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
1040/**
1041 * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
1042 * @args: arguments to the function
1043 */
1044int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
1045{
1046	unsigned long pfn, start_pfn;
1047	size_t size;
1048	int i, ret;
1049
1050	if (args->nr_pages < 0 || args->nr_pages > (INT_MAX >> PAGE_SHIFT))
1051		return -ENOMEM;
1052
1053	size = args->nr_pages << PAGE_SHIFT;
1054	if (args->coherent)
1055		args->vaddr = dma_alloc_coherent(args->dev, size,
1056						 &args->dev_bus_addr,
1057						 GFP_KERNEL | __GFP_NOWARN);
1058	else
1059		args->vaddr = dma_alloc_wc(args->dev, size,
1060					   &args->dev_bus_addr,
1061					   GFP_KERNEL | __GFP_NOWARN);
1062	if (!args->vaddr) {
1063		pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
1064		return -ENOMEM;
1065	}
1066
1067	start_pfn = __phys_to_pfn(args->dev_bus_addr);
1068	for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
1069			pfn++, i++) {
1070		struct page *page = pfn_to_page(pfn);
1071
1072		args->pages[i] = page;
1073		args->frames[i] = xen_page_to_gfn(page);
1074		xenmem_reservation_scrub_page(page);
1075	}
1076
1077	xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
1078
1079	ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
1080	if (ret != args->nr_pages) {
1081		pr_debug("Failed to decrease reservation for DMA buffer\n");
1082		ret = -EFAULT;
1083		goto fail;
1084	}
1085
1086	ret = gnttab_pages_set_private(args->nr_pages, args->pages);
1087	if (ret < 0)
1088		goto fail;
1089
1090	return 0;
1091
1092fail:
1093	gnttab_dma_free_pages(args);
1094	return ret;
1095}
1096EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
1097
1098/**
1099 * gnttab_dma_free_pages - free DMAable pages
1100 * @args: arguments to the function
1101 */
1102int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
1103{
1104	size_t size;
1105	int i, ret;
1106
1107	gnttab_pages_clear_private(args->nr_pages, args->pages);
1108
1109	for (i = 0; i < args->nr_pages; i++)
1110		args->frames[i] = page_to_xen_pfn(args->pages[i]);
1111
1112	ret = xenmem_reservation_increase(args->nr_pages, args->frames);
1113	if (ret != args->nr_pages) {
1114		pr_debug("Failed to increase reservation for DMA buffer\n");
1115		ret = -EFAULT;
1116	} else {
1117		ret = 0;
1118	}
1119
1120	xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
1121					     args->frames);
1122
1123	size = args->nr_pages << PAGE_SHIFT;
1124	if (args->coherent)
1125		dma_free_coherent(args->dev, size,
1126				  args->vaddr, args->dev_bus_addr);
1127	else
1128		dma_free_wc(args->dev, size,
1129			    args->vaddr, args->dev_bus_addr);
1130	return ret;
1131}
1132EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
1133#endif
1134
1135/* Handling of paged out grant targets (GNTST_eagain) */
1136#define MAX_DELAY 256
1137static inline void
1138gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
1139						const char *func)
1140{
1141	unsigned delay = 1;
1142
1143	do {
1144		BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
1145		if (*status == GNTST_eagain)
1146			msleep(delay++);
1147	} while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
1148
1149	if (delay >= MAX_DELAY) {
1150		pr_err("%s: %s eagain grant\n", func, current->comm);
1151		*status = GNTST_bad_page;
1152	}
1153}
1154
1155void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
1156{
1157	struct gnttab_map_grant_ref *op;
1158
1159	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
1160		BUG();
1161	for (op = batch; op < batch + count; op++)
1162		if (op->status == GNTST_eagain)
1163			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
1164						&op->status, __func__);
1165}
1166EXPORT_SYMBOL_GPL(gnttab_batch_map);
1167
1168void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
1169{
1170	struct gnttab_copy *op;
1171
1172	if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
1173		BUG();
1174	for (op = batch; op < batch + count; op++)
1175		if (op->status == GNTST_eagain)
1176			gnttab_retry_eagain_gop(GNTTABOP_copy, op,
1177						&op->status, __func__);
1178}
1179EXPORT_SYMBOL_GPL(gnttab_batch_copy);
1180
1181void gnttab_foreach_grant_in_range(struct page *page,
1182				   unsigned int offset,
1183				   unsigned int len,
1184				   xen_grant_fn_t fn,
1185				   void *data)
1186{
1187	unsigned int goffset;
1188	unsigned int glen;
1189	unsigned long xen_pfn;
1190
1191	len = min_t(unsigned int, PAGE_SIZE - offset, len);
1192	goffset = xen_offset_in_page(offset);
1193
1194	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
1195
1196	while (len) {
1197		glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
1198		fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1199
1200		goffset = 0;
1201		xen_pfn++;
1202		len -= glen;
1203	}
1204}
1205EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1206
1207void gnttab_foreach_grant(struct page **pages,
1208			  unsigned int nr_grefs,
1209			  xen_grant_fn_t fn,
1210			  void *data)
1211{
1212	unsigned int goffset = 0;
1213	unsigned long xen_pfn = 0;
1214	unsigned int i;
1215
1216	for (i = 0; i < nr_grefs; i++) {
1217		if ((i % XEN_PFN_PER_PAGE) == 0) {
1218			xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1219			goffset = 0;
1220		}
1221
1222		fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1223
1224		goffset += XEN_PAGE_SIZE;
1225		xen_pfn++;
1226	}
1227}
1228
1229int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1230		    struct gnttab_map_grant_ref *kmap_ops,
1231		    struct page **pages, unsigned int count)
1232{
1233	int i, ret;
1234
1235	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1236	if (ret)
1237		return ret;
1238
1239	for (i = 0; i < count; i++) {
1240		switch (map_ops[i].status) {
1241		case GNTST_okay:
1242		{
 
 
 
1243			struct xen_page_foreign *foreign;
1244
1245			SetPageForeign(pages[i]);
1246			foreign = xen_page_foreign(pages[i]);
1247			foreign->domid = map_ops[i].dom;
1248			foreign->gref = map_ops[i].ref;
1249			break;
1250		}
1251
1252		case GNTST_no_device_space:
1253			pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1254			break;
1255
1256		case GNTST_eagain:
1257			/* Retry eagain maps */
1258			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1259						map_ops + i,
1260						&map_ops[i].status, __func__);
1261			/* Test status in next loop iteration. */
1262			i--;
1263			break;
1264
1265		default:
1266			break;
1267		}
1268	}
1269
1270	return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1271}
1272EXPORT_SYMBOL_GPL(gnttab_map_refs);
1273
1274int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1275		      struct gnttab_unmap_grant_ref *kunmap_ops,
1276		      struct page **pages, unsigned int count)
1277{
1278	unsigned int i;
1279	int ret;
1280
1281	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1282	if (ret)
1283		return ret;
1284
1285	for (i = 0; i < count; i++)
1286		ClearPageForeign(pages[i]);
1287
1288	return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1289}
1290EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1291
1292#define GNTTAB_UNMAP_REFS_DELAY 5
1293
1294static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1295
1296static void gnttab_unmap_work(struct work_struct *work)
1297{
1298	struct gntab_unmap_queue_data
1299		*unmap_data = container_of(work, 
1300					   struct gntab_unmap_queue_data,
1301					   gnttab_work.work);
1302	if (unmap_data->age != UINT_MAX)
1303		unmap_data->age++;
1304	__gnttab_unmap_refs_async(unmap_data);
1305}
1306
1307static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1308{
1309	int ret;
1310	int pc;
1311
1312	for (pc = 0; pc < item->count; pc++) {
1313		if (page_count(item->pages[pc]) > 1) {
1314			unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1315			schedule_delayed_work(&item->gnttab_work,
1316					      msecs_to_jiffies(delay));
1317			return;
1318		}
1319	}
1320
1321	ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1322				item->pages, item->count);
1323	item->done(ret, item);
1324}
1325
1326void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1327{
1328	INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1329	item->age = 0;
1330
1331	__gnttab_unmap_refs_async(item);
1332}
1333EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1334
1335static void unmap_refs_callback(int result,
1336		struct gntab_unmap_queue_data *data)
1337{
1338	struct unmap_refs_callback_data *d = data->data;
1339
1340	d->result = result;
1341	complete(&d->completion);
1342}
1343
1344int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1345{
1346	struct unmap_refs_callback_data data;
1347
1348	init_completion(&data.completion);
1349	item->data = &data;
1350	item->done = &unmap_refs_callback;
1351	gnttab_unmap_refs_async(item);
1352	wait_for_completion(&data.completion);
1353
1354	return data.result;
1355}
1356EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1357
1358static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1359{
 
1360	return gnttab_frames(nr_grant_frames, SPP);
1361}
1362
1363static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1364{
1365	int rc;
1366
1367	rc = arch_gnttab_map_shared(frames, nr_gframes,
1368				    gnttab_max_grant_frames(),
1369				    &gnttab_shared.addr);
1370	BUG_ON(rc);
1371
1372	return 0;
1373}
1374
1375static void gnttab_unmap_frames_v1(void)
1376{
1377	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1378}
1379
1380static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1381{
1382	uint64_t *sframes;
1383	unsigned int nr_sframes;
1384	struct gnttab_get_status_frames getframes;
1385	int rc;
1386
1387	nr_sframes = nr_status_frames(nr_gframes);
1388
1389	/* No need for kzalloc as it is initialized in following hypercall
1390	 * GNTTABOP_get_status_frames.
1391	 */
1392	sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1393	if (!sframes)
1394		return -ENOMEM;
1395
1396	getframes.dom        = DOMID_SELF;
1397	getframes.nr_frames  = nr_sframes;
1398	set_xen_guest_handle(getframes.frame_list, sframes);
1399
1400	rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1401				       &getframes, 1);
1402	if (rc == -ENOSYS) {
1403		kfree(sframes);
1404		return -ENOSYS;
1405	}
1406
1407	BUG_ON(rc || getframes.status);
1408
1409	rc = arch_gnttab_map_status(sframes, nr_sframes,
1410				    nr_status_frames(gnttab_max_grant_frames()),
1411				    &grstatus);
1412	BUG_ON(rc);
1413	kfree(sframes);
1414
1415	rc = arch_gnttab_map_shared(frames, nr_gframes,
1416				    gnttab_max_grant_frames(),
1417				    &gnttab_shared.addr);
1418	BUG_ON(rc);
1419
1420	return 0;
1421}
1422
1423static void gnttab_unmap_frames_v2(void)
1424{
1425	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1426	arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1427}
1428
1429static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1430{
1431	struct gnttab_setup_table setup;
1432	xen_pfn_t *frames;
1433	unsigned int nr_gframes = end_idx + 1;
1434	int rc;
1435
1436	if (xen_feature(XENFEAT_auto_translated_physmap)) {
1437		struct xen_add_to_physmap xatp;
1438		unsigned int i = end_idx;
1439		rc = 0;
1440		BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1441		/*
1442		 * Loop backwards, so that the first hypercall has the largest
1443		 * index, ensuring that the table will grow only once.
1444		 */
1445		do {
1446			xatp.domid = DOMID_SELF;
1447			xatp.idx = i;
1448			xatp.space = XENMAPSPACE_grant_table;
1449			xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1450			rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1451			if (rc != 0) {
1452				pr_warn("grant table add_to_physmap failed, err=%d\n",
1453					rc);
1454				break;
1455			}
1456		} while (i-- > start_idx);
1457
1458		return rc;
1459	}
1460
1461	/* No need for kzalloc as it is initialized in following hypercall
1462	 * GNTTABOP_setup_table.
1463	 */
1464	frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1465	if (!frames)
1466		return -ENOMEM;
1467
1468	setup.dom        = DOMID_SELF;
1469	setup.nr_frames  = nr_gframes;
1470	set_xen_guest_handle(setup.frame_list, frames);
1471
1472	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1473	if (rc == -ENOSYS) {
1474		kfree(frames);
1475		return -ENOSYS;
1476	}
1477
1478	BUG_ON(rc || setup.status);
1479
1480	rc = gnttab_interface->map_frames(frames, nr_gframes);
1481
1482	kfree(frames);
1483
1484	return rc;
1485}
1486
1487static const struct gnttab_ops gnttab_v1_ops = {
1488	.version			= 1,
1489	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1490					  sizeof(struct grant_entry_v1),
1491	.map_frames			= gnttab_map_frames_v1,
1492	.unmap_frames			= gnttab_unmap_frames_v1,
1493	.update_entry			= gnttab_update_entry_v1,
1494	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v1,
1495	.read_frame			= gnttab_read_frame_v1,
 
1496};
1497
1498static const struct gnttab_ops gnttab_v2_ops = {
1499	.version			= 2,
1500	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1501					  sizeof(union grant_entry_v2),
1502	.map_frames			= gnttab_map_frames_v2,
1503	.unmap_frames			= gnttab_unmap_frames_v2,
1504	.update_entry			= gnttab_update_entry_v2,
1505	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v2,
1506	.read_frame			= gnttab_read_frame_v2,
 
1507};
1508
1509static bool gnttab_need_v2(void)
1510{
1511#ifdef CONFIG_X86
1512	uint32_t base, width;
1513
1514	if (xen_pv_domain()) {
1515		base = xen_cpuid_base();
1516		if (cpuid_eax(base) < 5)
1517			return false;	/* Information not available, use V1. */
1518		width = cpuid_ebx(base + 5) &
1519			XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1520		return width > 32 + PAGE_SHIFT;
1521	}
1522#endif
1523	return !!(max_possible_pfn >> 32);
1524}
1525
1526static void gnttab_request_version(void)
1527{
1528	long rc;
1529	struct gnttab_set_version gsv;
1530
1531	if (gnttab_need_v2())
1532		gsv.version = 2;
1533	else
1534		gsv.version = 1;
1535
1536	/* Boot parameter overrides automatic selection. */
1537	if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1538		gsv.version = xen_gnttab_version;
1539
1540	rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1541	if (rc == 0 && gsv.version == 2)
1542		gnttab_interface = &gnttab_v2_ops;
1543	else
1544		gnttab_interface = &gnttab_v1_ops;
1545	pr_info("Grant tables using version %d layout\n",
1546		gnttab_interface->version);
1547}
1548
1549static int gnttab_setup(void)
1550{
1551	unsigned int max_nr_gframes;
1552
1553	max_nr_gframes = gnttab_max_grant_frames();
1554	if (max_nr_gframes < nr_grant_frames)
1555		return -ENOSYS;
1556
1557	if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1558		gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1559		if (gnttab_shared.addr == NULL) {
1560			pr_warn("gnttab share frames is not mapped!\n");
 
1561			return -ENOMEM;
1562		}
1563	}
1564	return gnttab_map(0, nr_grant_frames - 1);
1565}
1566
1567int gnttab_resume(void)
1568{
1569	gnttab_request_version();
1570	return gnttab_setup();
1571}
1572
1573int gnttab_suspend(void)
1574{
1575	if (!xen_feature(XENFEAT_auto_translated_physmap))
1576		gnttab_interface->unmap_frames();
1577	return 0;
1578}
1579
1580static int gnttab_expand(unsigned int req_entries)
1581{
1582	int rc;
1583	unsigned int cur, extra;
1584
 
1585	cur = nr_grant_frames;
1586	extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1587		 gnttab_interface->grefs_per_grant_frame);
1588	if (cur + extra > gnttab_max_grant_frames()) {
1589		pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1590				    " cur=%u extra=%u limit=%u"
1591				    " gnttab_free_count=%u req_entries=%u\n",
1592				    cur, extra, gnttab_max_grant_frames(),
1593				    gnttab_free_count, req_entries);
1594		return -ENOSPC;
1595	}
1596
1597	rc = gnttab_map(cur, cur + extra - 1);
1598	if (rc == 0)
1599		rc = grow_gnttab_list(extra);
1600
1601	return rc;
1602}
1603
1604int gnttab_init(void)
1605{
1606	int i;
1607	unsigned long max_nr_grant_frames, max_nr_grefs;
1608	unsigned int max_nr_glist_frames, nr_glist_frames;
 
1609	int ret;
1610
1611	gnttab_request_version();
1612	max_nr_grant_frames = gnttab_max_grant_frames();
1613	max_nr_grefs = max_nr_grant_frames *
1614			gnttab_interface->grefs_per_grant_frame;
1615	nr_grant_frames = 1;
1616
1617	/* Determine the maximum number of frames required for the
1618	 * grant reference free list on the current hypervisor.
1619	 */
1620	max_nr_glist_frames = max_nr_grefs / RPP;
 
 
1621
1622	gnttab_list = kmalloc_array(max_nr_glist_frames,
1623				    sizeof(grant_ref_t *),
1624				    GFP_KERNEL);
1625	if (gnttab_list == NULL)
1626		return -ENOMEM;
1627
1628	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1629	for (i = 0; i < nr_glist_frames; i++) {
1630		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1631		if (gnttab_list[i] == NULL) {
1632			ret = -ENOMEM;
1633			goto ini_nomem;
1634		}
1635	}
1636
1637	gnttab_free_bitmap = bitmap_zalloc(max_nr_grefs, GFP_KERNEL);
1638	if (!gnttab_free_bitmap) {
1639		ret = -ENOMEM;
1640		goto ini_nomem;
1641	}
1642
1643	ret = arch_gnttab_init(max_nr_grant_frames,
1644			       nr_status_frames(max_nr_grant_frames));
1645	if (ret < 0)
1646		goto ini_nomem;
1647
1648	if (gnttab_setup() < 0) {
1649		ret = -ENODEV;
1650		goto ini_nomem;
1651	}
1652
1653	gnttab_size = nr_grant_frames * gnttab_interface->grefs_per_grant_frame;
 
 
 
 
1654
1655	gnttab_set_free(GNTTAB_NR_RESERVED_ENTRIES,
1656			gnttab_size - GNTTAB_NR_RESERVED_ENTRIES);
 
1657
1658	printk("Grant table initialized\n");
1659	return 0;
1660
1661 ini_nomem:
1662	for (i--; i >= 0; i--)
1663		free_page((unsigned long)gnttab_list[i]);
1664	kfree(gnttab_list);
1665	bitmap_free(gnttab_free_bitmap);
1666	return ret;
1667}
1668EXPORT_SYMBOL_GPL(gnttab_init);
1669
1670static int __gnttab_init(void)
1671{
1672	if (!xen_domain())
1673		return -ENODEV;
1674
1675	/* Delay grant-table initialization in the PV on HVM case */
1676	if (xen_hvm_domain() && !xen_pvh_domain())
1677		return 0;
1678
1679	return gnttab_init();
1680}
1681/* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1682 * beforehand to initialize xen_auto_xlat_grant_frames. */
1683core_initcall_sync(__gnttab_init);
v4.17
   1/******************************************************************************
   2 * grant_table.c
   3 *
   4 * Granting foreign access to our memory reservation.
   5 *
   6 * Copyright (c) 2005-2006, Christopher Clark
   7 * Copyright (c) 2004-2005, K A Fraser
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License version 2
  11 * as published by the Free Software Foundation; or, when distributed
  12 * separately from the Linux kernel or incorporated into other
  13 * software packages, subject to the following license:
  14 *
  15 * Permission is hereby granted, free of charge, to any person obtaining a copy
  16 * of this source file (the "Software"), to deal in the Software without
  17 * restriction, including without limitation the rights to use, copy, modify,
  18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  19 * and to permit persons to whom the Software is furnished to do so, subject to
  20 * the following conditions:
  21 *
  22 * The above copyright notice and this permission notice shall be included in
  23 * all copies or substantial portions of the Software.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  31 * IN THE SOFTWARE.
  32 */
  33
  34#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  35
  36#include <linux/bootmem.h>
 
  37#include <linux/sched.h>
  38#include <linux/mm.h>
  39#include <linux/slab.h>
  40#include <linux/vmalloc.h>
  41#include <linux/uaccess.h>
  42#include <linux/io.h>
  43#include <linux/delay.h>
  44#include <linux/hardirq.h>
  45#include <linux/workqueue.h>
  46#include <linux/ratelimit.h>
  47#include <linux/moduleparam.h>
 
 
 
  48
  49#include <xen/xen.h>
  50#include <xen/interface/xen.h>
  51#include <xen/page.h>
  52#include <xen/grant_table.h>
  53#include <xen/interface/memory.h>
  54#include <xen/hvc-console.h>
  55#include <xen/swiotlb-xen.h>
  56#include <xen/balloon.h>
  57#ifdef CONFIG_X86
  58#include <asm/xen/cpuid.h>
  59#endif
 
  60#include <asm/xen/hypercall.h>
  61#include <asm/xen/interface.h>
  62
  63#include <asm/pgtable.h>
  64#include <asm/sync_bitops.h>
  65
  66/* External tools reserve first few grant table entries. */
  67#define NR_RESERVED_ENTRIES 8
  68#define GNTTAB_LIST_END 0xffffffff
  69
  70static grant_ref_t **gnttab_list;
  71static unsigned int nr_grant_frames;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  72static int gnttab_free_count;
  73static grant_ref_t gnttab_free_head;
 
 
 
 
  74static DEFINE_SPINLOCK(gnttab_list_lock);
 
  75struct grant_frames xen_auto_xlat_grant_frames;
  76static unsigned int xen_gnttab_version;
  77module_param_named(version, xen_gnttab_version, uint, 0);
  78
  79static union {
  80	struct grant_entry_v1 *v1;
  81	union grant_entry_v2 *v2;
  82	void *addr;
  83} gnttab_shared;
  84
  85/*This is a structure of function pointers for grant table*/
  86struct gnttab_ops {
  87	/*
  88	 * Version of the grant interface.
  89	 */
  90	unsigned int version;
  91	/*
  92	 * Grant refs per grant frame.
  93	 */
  94	unsigned int grefs_per_grant_frame;
  95	/*
  96	 * Mapping a list of frames for storing grant entries. Frames parameter
  97	 * is used to store grant table address when grant table being setup,
  98	 * nr_gframes is the number of frames to map grant table. Returning
  99	 * GNTST_okay means success and negative value means failure.
 100	 */
 101	int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
 102	/*
 103	 * Release a list of frames which are mapped in map_frames for grant
 104	 * entry status.
 105	 */
 106	void (*unmap_frames)(void);
 107	/*
 108	 * Introducing a valid entry into the grant table, granting the frame of
 109	 * this grant entry to domain for accessing or transfering. Ref
 110	 * parameter is reference of this introduced grant entry, domid is id of
 111	 * granted domain, frame is the page frame to be granted, and flags is
 112	 * status of the grant entry to be updated.
 113	 */
 114	void (*update_entry)(grant_ref_t ref, domid_t domid,
 115			     unsigned long frame, unsigned flags);
 116	/*
 117	 * Stop granting a grant entry to domain for accessing. Ref parameter is
 118	 * reference of a grant entry whose grant access will be stopped,
 119	 * readonly is not in use in this function. If the grant entry is
 120	 * currently mapped for reading or writing, just return failure(==0)
 121	 * directly and don't tear down the grant access. Otherwise, stop grant
 122	 * access for this entry and return success(==1).
 123	 */
 124	int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
 125	/*
 126	 * Stop granting a grant entry to domain for transfer. Ref parameter is
 127	 * reference of a grant entry whose grant transfer will be stopped. If
 128	 * tranfer has not started, just reclaim the grant entry and return
 129	 * failure(==0). Otherwise, wait for the transfer to complete and then
 130	 * return the frame.
 131	 */
 132	unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
 133	/*
 134	 * Query the status of a grant entry. Ref parameter is reference of
 135	 * queried grant entry, return value is the status of queried entry.
 136	 * Detailed status(writing/reading) can be gotten from the return value
 137	 * by bit operations.
 138	 */
 139	int (*query_foreign_access)(grant_ref_t ref);
 140};
 141
 142struct unmap_refs_callback_data {
 143	struct completion completion;
 144	int result;
 145};
 146
 147static const struct gnttab_ops *gnttab_interface;
 148
 149/* This reflects status of grant entries, so act as a global value. */
 150static grant_status_t *grstatus;
 151
 152static struct gnttab_free_callback *gnttab_free_callback_list;
 153
 154static int gnttab_expand(unsigned int req_entries);
 155
 156#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
 157#define SPP (PAGE_SIZE / sizeof(grant_status_t))
 158
 159static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
 160{
 161	return &gnttab_list[(entry) / RPP][(entry) % RPP];
 162}
 163/* This can be used as an l-value */
 164#define gnttab_entry(entry) (*__gnttab_entry(entry))
 165
 166static int get_free_entries(unsigned count)
 167{
 168	unsigned long flags;
 169	int ref, rc = 0;
 170	grant_ref_t head;
 171
 172	spin_lock_irqsave(&gnttab_list_lock, flags);
 173
 174	if ((gnttab_free_count < count) &&
 175	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
 176		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 177		return rc;
 178	}
 179
 180	ref = head = gnttab_free_head;
 181	gnttab_free_count -= count;
 182	while (count-- > 1)
 183		head = gnttab_entry(head);
 
 
 
 
 
 184	gnttab_free_head = gnttab_entry(head);
 185	gnttab_entry(head) = GNTTAB_LIST_END;
 186
 
 
 
 
 
 187	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 188
 189	return ref;
 190}
 191
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 192static void do_free_callbacks(void)
 193{
 194	struct gnttab_free_callback *callback, *next;
 195
 196	callback = gnttab_free_callback_list;
 197	gnttab_free_callback_list = NULL;
 198
 199	while (callback != NULL) {
 200		next = callback->next;
 201		if (gnttab_free_count >= callback->count) {
 202			callback->next = NULL;
 203			callback->fn(callback->arg);
 204		} else {
 205			callback->next = gnttab_free_callback_list;
 206			gnttab_free_callback_list = callback;
 207		}
 208		callback = next;
 209	}
 210}
 211
 212static inline void check_free_callbacks(void)
 213{
 214	if (unlikely(gnttab_free_callback_list))
 215		do_free_callbacks();
 216}
 217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 218static void put_free_entry(grant_ref_t ref)
 219{
 220	unsigned long flags;
 
 221	spin_lock_irqsave(&gnttab_list_lock, flags);
 222	gnttab_entry(ref) = gnttab_free_head;
 223	gnttab_free_head = ref;
 224	gnttab_free_count++;
 225	check_free_callbacks();
 226	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 227}
 228
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 229/*
 230 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
 231 * Introducing a valid entry into the grant table:
 232 *  1. Write ent->domid.
 233 *  2. Write ent->frame:
 234 *      GTF_permit_access:   Frame to which access is permitted.
 235 *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
 236 *                           frame, or zero if none.
 237 *  3. Write memory barrier (WMB).
 238 *  4. Write ent->flags, inc. valid type.
 239 */
 240static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
 241				   unsigned long frame, unsigned flags)
 242{
 243	gnttab_shared.v1[ref].domid = domid;
 244	gnttab_shared.v1[ref].frame = frame;
 245	wmb();
 246	gnttab_shared.v1[ref].flags = flags;
 247}
 248
 249static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
 250				   unsigned long frame, unsigned int flags)
 251{
 252	gnttab_shared.v2[ref].hdr.domid = domid;
 253	gnttab_shared.v2[ref].full_page.frame = frame;
 254	wmb();	/* Hypervisor concurrent accesses. */
 255	gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
 256}
 257
 258/*
 259 * Public grant-issuing interface functions
 260 */
 261void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
 262				     unsigned long frame, int readonly)
 263{
 264	gnttab_interface->update_entry(ref, domid, frame,
 265			   GTF_permit_access | (readonly ? GTF_readonly : 0));
 266}
 267EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
 268
 269int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
 270				int readonly)
 271{
 272	int ref;
 273
 274	ref = get_free_entries(1);
 275	if (unlikely(ref < 0))
 276		return -ENOSPC;
 277
 278	gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
 279
 280	return ref;
 281}
 282EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
 283
 284static int gnttab_query_foreign_access_v1(grant_ref_t ref)
 285{
 286	return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
 287}
 288
 289static int gnttab_query_foreign_access_v2(grant_ref_t ref)
 290{
 291	return grstatus[ref] & (GTF_reading|GTF_writing);
 292}
 293
 294int gnttab_query_foreign_access(grant_ref_t ref)
 295{
 296	return gnttab_interface->query_foreign_access(ref);
 297}
 298EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
 299
 300static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
 301{
 302	u16 flags, nflags;
 303	u16 *pflags;
 304
 305	pflags = &gnttab_shared.v1[ref].flags;
 306	nflags = *pflags;
 307	do {
 308		flags = nflags;
 309		if (flags & (GTF_reading|GTF_writing))
 310			return 0;
 311	} while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
 312
 313	return 1;
 314}
 315
 316static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
 317{
 318	gnttab_shared.v2[ref].hdr.flags = 0;
 319	mb();	/* Concurrent access by hypervisor. */
 320	if (grstatus[ref] & (GTF_reading|GTF_writing)) {
 321		return 0;
 322	} else {
 323		/*
 324		 * The read of grstatus needs to have acquire semantics.
 325		 *  On x86, reads already have that, and we just need to
 326		 * protect against compiler reorderings.
 327		 * On other architectures we may need a full barrier.
 328		 */
 329#ifdef CONFIG_X86
 330		barrier();
 331#else
 332		mb();
 333#endif
 334	}
 335
 336	return 1;
 337}
 338
 339static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
 340{
 341	return gnttab_interface->end_foreign_access_ref(ref, readonly);
 342}
 343
 344int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
 345{
 346	if (_gnttab_end_foreign_access_ref(ref, readonly))
 347		return 1;
 348	pr_warn("WARNING: g.e. %#x still in use!\n", ref);
 349	return 0;
 350}
 351EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
 352
 
 
 
 
 
 
 
 
 
 
 353struct deferred_entry {
 354	struct list_head list;
 355	grant_ref_t ref;
 356	bool ro;
 357	uint16_t warn_delay;
 358	struct page *page;
 359};
 360static LIST_HEAD(deferred_list);
 361static void gnttab_handle_deferred(struct timer_list *);
 362static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
 363
 364static void gnttab_handle_deferred(struct timer_list *unused)
 365{
 366	unsigned int nr = 10;
 367	struct deferred_entry *first = NULL;
 368	unsigned long flags;
 369
 370	spin_lock_irqsave(&gnttab_list_lock, flags);
 371	while (nr--) {
 372		struct deferred_entry *entry
 373			= list_first_entry(&deferred_list,
 374					   struct deferred_entry, list);
 375
 376		if (entry == first)
 377			break;
 378		list_del(&entry->list);
 379		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 380		if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
 381			put_free_entry(entry->ref);
 382			if (entry->page) {
 383				pr_debug("freeing g.e. %#x (pfn %#lx)\n",
 384					 entry->ref, page_to_pfn(entry->page));
 385				put_page(entry->page);
 386			} else
 387				pr_info("freeing g.e. %#x\n", entry->ref);
 388			kfree(entry);
 389			entry = NULL;
 390		} else {
 391			if (!--entry->warn_delay)
 392				pr_info("g.e. %#x still pending\n", entry->ref);
 393			if (!first)
 394				first = entry;
 395		}
 396		spin_lock_irqsave(&gnttab_list_lock, flags);
 397		if (entry)
 398			list_add_tail(&entry->list, &deferred_list);
 399		else if (list_empty(&deferred_list))
 400			break;
 401	}
 402	if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
 403		deferred_timer.expires = jiffies + HZ;
 404		add_timer(&deferred_timer);
 405	}
 406	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 407}
 408
 409static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
 410				struct page *page)
 411{
 412	struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
 
 413	const char *what = KERN_WARNING "leaking";
 414
 
 
 
 
 
 
 
 
 415	if (entry) {
 416		unsigned long flags;
 417
 418		entry->ref = ref;
 419		entry->ro = readonly;
 420		entry->page = page;
 421		entry->warn_delay = 60;
 422		spin_lock_irqsave(&gnttab_list_lock, flags);
 423		list_add_tail(&entry->list, &deferred_list);
 424		if (!timer_pending(&deferred_timer)) {
 425			deferred_timer.expires = jiffies + HZ;
 426			add_timer(&deferred_timer);
 427		}
 428		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 429		what = KERN_DEBUG "deferring";
 430	}
 431	printk("%s g.e. %#x (pfn %#lx)\n",
 432	       what, ref, page ? page_to_pfn(page) : -1);
 433}
 434
 435void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
 436			       unsigned long page)
 437{
 438	if (gnttab_end_foreign_access_ref(ref, readonly)) {
 
 
 439		put_free_entry(ref);
 440		if (page != 0)
 441			put_page(virt_to_page(page));
 442	} else
 443		gnttab_add_deferred(ref, readonly,
 444				    page ? virt_to_page(page) : NULL);
 445}
 446EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
 447
 448int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
 449{
 450	int ref;
 451
 452	ref = get_free_entries(1);
 453	if (unlikely(ref < 0))
 454		return -ENOSPC;
 455	gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
 456
 457	return ref;
 458}
 459EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
 460
 461void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
 462				       unsigned long pfn)
 463{
 464	gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
 
 
 
 
 465}
 466EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
 467
 468static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
 469{
 470	unsigned long frame;
 471	u16           flags;
 472	u16          *pflags;
 473
 474	pflags = &gnttab_shared.v1[ref].flags;
 475
 476	/*
 477	 * If a transfer is not even yet started, try to reclaim the grant
 478	 * reference and return failure (== 0).
 479	 */
 480	while (!((flags = *pflags) & GTF_transfer_committed)) {
 481		if (sync_cmpxchg(pflags, flags, 0) == flags)
 482			return 0;
 483		cpu_relax();
 484	}
 485
 486	/* If a transfer is in progress then wait until it is completed. */
 487	while (!(flags & GTF_transfer_completed)) {
 488		flags = *pflags;
 489		cpu_relax();
 490	}
 491
 492	rmb();	/* Read the frame number /after/ reading completion status. */
 493	frame = gnttab_shared.v1[ref].frame;
 494	BUG_ON(frame == 0);
 495
 496	return frame;
 497}
 498
 499static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
 500{
 501	unsigned long frame;
 502	u16           flags;
 503	u16          *pflags;
 504
 505	pflags = &gnttab_shared.v2[ref].hdr.flags;
 506
 507	/*
 508	 * If a transfer is not even yet started, try to reclaim the grant
 509	 * reference and return failure (== 0).
 510	 */
 511	while (!((flags = *pflags) & GTF_transfer_committed)) {
 512		if (sync_cmpxchg(pflags, flags, 0) == flags)
 513			return 0;
 514		cpu_relax();
 515	}
 516
 517	/* If a transfer is in progress then wait until it is completed. */
 518	while (!(flags & GTF_transfer_completed)) {
 519		flags = *pflags;
 520		cpu_relax();
 521	}
 522
 523	rmb();  /* Read the frame number /after/ reading completion status. */
 524	frame = gnttab_shared.v2[ref].full_page.frame;
 525	BUG_ON(frame == 0);
 526
 527	return frame;
 528}
 529
 530unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
 531{
 532	return gnttab_interface->end_foreign_transfer_ref(ref);
 533}
 534EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
 535
 536unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
 537{
 538	unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
 539	put_free_entry(ref);
 540	return frame;
 541}
 542EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
 543
 544void gnttab_free_grant_reference(grant_ref_t ref)
 545{
 546	put_free_entry(ref);
 547}
 548EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
 549
 550void gnttab_free_grant_references(grant_ref_t head)
 551{
 552	grant_ref_t ref;
 553	unsigned long flags;
 554	int count = 1;
 555	if (head == GNTTAB_LIST_END)
 556		return;
 557	spin_lock_irqsave(&gnttab_list_lock, flags);
 558	ref = head;
 559	while (gnttab_entry(ref) != GNTTAB_LIST_END) {
 560		ref = gnttab_entry(ref);
 561		count++;
 562	}
 563	gnttab_entry(ref) = gnttab_free_head;
 564	gnttab_free_head = head;
 565	gnttab_free_count += count;
 566	check_free_callbacks();
 567	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 568}
 569EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
 570
 
 
 
 
 
 
 
 
 
 
 
 
 
 571int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
 572{
 573	int h = get_free_entries(count);
 574
 575	if (h < 0)
 576		return -ENOSPC;
 577
 578	*head = h;
 579
 580	return 0;
 581}
 582EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
 583
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 584int gnttab_empty_grant_references(const grant_ref_t *private_head)
 585{
 586	return (*private_head == GNTTAB_LIST_END);
 587}
 588EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
 589
 590int gnttab_claim_grant_reference(grant_ref_t *private_head)
 591{
 592	grant_ref_t g = *private_head;
 593	if (unlikely(g == GNTTAB_LIST_END))
 594		return -ENOSPC;
 595	*private_head = gnttab_entry(g);
 596	return g;
 597}
 598EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
 599
 600void gnttab_release_grant_reference(grant_ref_t *private_head,
 601				    grant_ref_t release)
 602{
 603	gnttab_entry(release) = *private_head;
 604	*private_head = release;
 605}
 606EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
 607
 608void gnttab_request_free_callback(struct gnttab_free_callback *callback,
 609				  void (*fn)(void *), void *arg, u16 count)
 610{
 611	unsigned long flags;
 612	struct gnttab_free_callback *cb;
 613
 614	spin_lock_irqsave(&gnttab_list_lock, flags);
 615
 616	/* Check if the callback is already on the list */
 617	cb = gnttab_free_callback_list;
 618	while (cb) {
 619		if (cb == callback)
 620			goto out;
 621		cb = cb->next;
 622	}
 623
 624	callback->fn = fn;
 625	callback->arg = arg;
 626	callback->count = count;
 627	callback->next = gnttab_free_callback_list;
 628	gnttab_free_callback_list = callback;
 629	check_free_callbacks();
 630out:
 631	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 632}
 633EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
 634
 635void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
 636{
 637	struct gnttab_free_callback **pcb;
 638	unsigned long flags;
 639
 640	spin_lock_irqsave(&gnttab_list_lock, flags);
 641	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
 642		if (*pcb == callback) {
 643			*pcb = callback->next;
 644			break;
 645		}
 646	}
 647	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 648}
 649EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
 650
 651static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
 652{
 653	return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
 654	       align;
 655}
 656
 657static int grow_gnttab_list(unsigned int more_frames)
 658{
 659	unsigned int new_nr_grant_frames, extra_entries, i;
 660	unsigned int nr_glist_frames, new_nr_glist_frames;
 661	unsigned int grefs_per_frame;
 662
 663	BUG_ON(gnttab_interface == NULL);
 664	grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
 665
 666	new_nr_grant_frames = nr_grant_frames + more_frames;
 667	extra_entries = more_frames * grefs_per_frame;
 668
 669	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
 670	new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
 671	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
 672		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
 673		if (!gnttab_list[i])
 674			goto grow_nomem;
 675	}
 676
 
 677
 678	for (i = grefs_per_frame * nr_grant_frames;
 679	     i < grefs_per_frame * new_nr_grant_frames - 1; i++)
 680		gnttab_entry(i) = i + 1;
 681
 682	gnttab_entry(i) = gnttab_free_head;
 683	gnttab_free_head = grefs_per_frame * nr_grant_frames;
 684	gnttab_free_count += extra_entries;
 685
 686	nr_grant_frames = new_nr_grant_frames;
 
 687
 688	check_free_callbacks();
 689
 690	return 0;
 691
 692grow_nomem:
 693	while (i-- > nr_glist_frames)
 694		free_page((unsigned long) gnttab_list[i]);
 695	return -ENOMEM;
 696}
 697
 698static unsigned int __max_nr_grant_frames(void)
 699{
 700	struct gnttab_query_size query;
 701	int rc;
 702
 703	query.dom = DOMID_SELF;
 704
 705	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
 706	if ((rc < 0) || (query.status != GNTST_okay))
 707		return 4; /* Legacy max supported number of frames */
 708
 709	return query.max_nr_frames;
 710}
 711
 712unsigned int gnttab_max_grant_frames(void)
 713{
 714	unsigned int xen_max = __max_nr_grant_frames();
 715	static unsigned int boot_max_nr_grant_frames;
 716
 717	/* First time, initialize it properly. */
 718	if (!boot_max_nr_grant_frames)
 719		boot_max_nr_grant_frames = __max_nr_grant_frames();
 720
 721	if (xen_max > boot_max_nr_grant_frames)
 722		return boot_max_nr_grant_frames;
 723	return xen_max;
 724}
 725EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
 726
 727int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
 728{
 729	xen_pfn_t *pfn;
 730	unsigned int max_nr_gframes = __max_nr_grant_frames();
 731	unsigned int i;
 732	void *vaddr;
 733
 734	if (xen_auto_xlat_grant_frames.count)
 735		return -EINVAL;
 736
 737	vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
 738	if (vaddr == NULL) {
 739		pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
 740			&addr);
 741		return -ENOMEM;
 742	}
 743	pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
 744	if (!pfn) {
 745		xen_unmap(vaddr);
 746		return -ENOMEM;
 747	}
 748	for (i = 0; i < max_nr_gframes; i++)
 749		pfn[i] = XEN_PFN_DOWN(addr) + i;
 750
 751	xen_auto_xlat_grant_frames.vaddr = vaddr;
 752	xen_auto_xlat_grant_frames.pfn = pfn;
 753	xen_auto_xlat_grant_frames.count = max_nr_gframes;
 754
 755	return 0;
 756}
 757EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
 758
 759void gnttab_free_auto_xlat_frames(void)
 760{
 761	if (!xen_auto_xlat_grant_frames.count)
 762		return;
 763	kfree(xen_auto_xlat_grant_frames.pfn);
 764	xen_unmap(xen_auto_xlat_grant_frames.vaddr);
 765
 766	xen_auto_xlat_grant_frames.pfn = NULL;
 767	xen_auto_xlat_grant_frames.count = 0;
 768	xen_auto_xlat_grant_frames.vaddr = NULL;
 769}
 770EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
 771
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 772/**
 773 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
 774 * @nr_pages: number of pages to alloc
 775 * @pages: returns the pages
 776 */
 777int gnttab_alloc_pages(int nr_pages, struct page **pages)
 778{
 779	int i;
 780	int ret;
 781
 782	ret = alloc_xenballooned_pages(nr_pages, pages);
 783	if (ret < 0)
 784		return ret;
 785
 786	for (i = 0; i < nr_pages; i++) {
 787#if BITS_PER_LONG < 64
 788		struct xen_page_foreign *foreign;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 789
 790		foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
 791		if (!foreign) {
 792			gnttab_free_pages(nr_pages, pages);
 793			return -ENOMEM;
 794		}
 795		set_page_private(pages[i], (unsigned long)foreign);
 796#endif
 797		SetPagePrivate(pages[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 798	}
 799
 
 
 
 
 
 800	return 0;
 801}
 802EXPORT_SYMBOL(gnttab_alloc_pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 803
 804/**
 805 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
 806 * @nr_pages; number of pages to free
 807 * @pages: the pages
 808 */
 809void gnttab_free_pages(int nr_pages, struct page **pages)
 810{
 811	int i;
 812
 813	for (i = 0; i < nr_pages; i++) {
 814		if (PagePrivate(pages[i])) {
 815#if BITS_PER_LONG < 64
 816			kfree((void *)page_private(pages[i]));
 817#endif
 818			ClearPagePrivate(pages[i]);
 819		}
 820	}
 821	free_xenballooned_pages(nr_pages, pages);
 822}
 823EXPORT_SYMBOL(gnttab_free_pages);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 824
 825/* Handling of paged out grant targets (GNTST_eagain) */
 826#define MAX_DELAY 256
 827static inline void
 828gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
 829						const char *func)
 830{
 831	unsigned delay = 1;
 832
 833	do {
 834		BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
 835		if (*status == GNTST_eagain)
 836			msleep(delay++);
 837	} while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
 838
 839	if (delay >= MAX_DELAY) {
 840		pr_err("%s: %s eagain grant\n", func, current->comm);
 841		*status = GNTST_bad_page;
 842	}
 843}
 844
 845void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
 846{
 847	struct gnttab_map_grant_ref *op;
 848
 849	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
 850		BUG();
 851	for (op = batch; op < batch + count; op++)
 852		if (op->status == GNTST_eagain)
 853			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
 854						&op->status, __func__);
 855}
 856EXPORT_SYMBOL_GPL(gnttab_batch_map);
 857
 858void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
 859{
 860	struct gnttab_copy *op;
 861
 862	if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
 863		BUG();
 864	for (op = batch; op < batch + count; op++)
 865		if (op->status == GNTST_eagain)
 866			gnttab_retry_eagain_gop(GNTTABOP_copy, op,
 867						&op->status, __func__);
 868}
 869EXPORT_SYMBOL_GPL(gnttab_batch_copy);
 870
 871void gnttab_foreach_grant_in_range(struct page *page,
 872				   unsigned int offset,
 873				   unsigned int len,
 874				   xen_grant_fn_t fn,
 875				   void *data)
 876{
 877	unsigned int goffset;
 878	unsigned int glen;
 879	unsigned long xen_pfn;
 880
 881	len = min_t(unsigned int, PAGE_SIZE - offset, len);
 882	goffset = xen_offset_in_page(offset);
 883
 884	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
 885
 886	while (len) {
 887		glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
 888		fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
 889
 890		goffset = 0;
 891		xen_pfn++;
 892		len -= glen;
 893	}
 894}
 895EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
 896
 897void gnttab_foreach_grant(struct page **pages,
 898			  unsigned int nr_grefs,
 899			  xen_grant_fn_t fn,
 900			  void *data)
 901{
 902	unsigned int goffset = 0;
 903	unsigned long xen_pfn = 0;
 904	unsigned int i;
 905
 906	for (i = 0; i < nr_grefs; i++) {
 907		if ((i % XEN_PFN_PER_PAGE) == 0) {
 908			xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
 909			goffset = 0;
 910		}
 911
 912		fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
 913
 914		goffset += XEN_PAGE_SIZE;
 915		xen_pfn++;
 916	}
 917}
 918
 919int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
 920		    struct gnttab_map_grant_ref *kmap_ops,
 921		    struct page **pages, unsigned int count)
 922{
 923	int i, ret;
 924
 925	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
 926	if (ret)
 927		return ret;
 928
 929	for (i = 0; i < count; i++) {
 930		/* Retry eagain maps */
 931		if (map_ops[i].status == GNTST_eagain)
 932			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, map_ops + i,
 933						&map_ops[i].status, __func__);
 934
 935		if (map_ops[i].status == GNTST_okay) {
 936			struct xen_page_foreign *foreign;
 937
 938			SetPageForeign(pages[i]);
 939			foreign = xen_page_foreign(pages[i]);
 940			foreign->domid = map_ops[i].dom;
 941			foreign->gref = map_ops[i].ref;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 942		}
 943	}
 944
 945	return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
 946}
 947EXPORT_SYMBOL_GPL(gnttab_map_refs);
 948
 949int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
 950		      struct gnttab_unmap_grant_ref *kunmap_ops,
 951		      struct page **pages, unsigned int count)
 952{
 953	unsigned int i;
 954	int ret;
 955
 956	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
 957	if (ret)
 958		return ret;
 959
 960	for (i = 0; i < count; i++)
 961		ClearPageForeign(pages[i]);
 962
 963	return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
 964}
 965EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
 966
 967#define GNTTAB_UNMAP_REFS_DELAY 5
 968
 969static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
 970
 971static void gnttab_unmap_work(struct work_struct *work)
 972{
 973	struct gntab_unmap_queue_data
 974		*unmap_data = container_of(work, 
 975					   struct gntab_unmap_queue_data,
 976					   gnttab_work.work);
 977	if (unmap_data->age != UINT_MAX)
 978		unmap_data->age++;
 979	__gnttab_unmap_refs_async(unmap_data);
 980}
 981
 982static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
 983{
 984	int ret;
 985	int pc;
 986
 987	for (pc = 0; pc < item->count; pc++) {
 988		if (page_count(item->pages[pc]) > 1) {
 989			unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
 990			schedule_delayed_work(&item->gnttab_work,
 991					      msecs_to_jiffies(delay));
 992			return;
 993		}
 994	}
 995
 996	ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
 997				item->pages, item->count);
 998	item->done(ret, item);
 999}
1000
1001void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1002{
1003	INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1004	item->age = 0;
1005
1006	__gnttab_unmap_refs_async(item);
1007}
1008EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1009
1010static void unmap_refs_callback(int result,
1011		struct gntab_unmap_queue_data *data)
1012{
1013	struct unmap_refs_callback_data *d = data->data;
1014
1015	d->result = result;
1016	complete(&d->completion);
1017}
1018
1019int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1020{
1021	struct unmap_refs_callback_data data;
1022
1023	init_completion(&data.completion);
1024	item->data = &data;
1025	item->done = &unmap_refs_callback;
1026	gnttab_unmap_refs_async(item);
1027	wait_for_completion(&data.completion);
1028
1029	return data.result;
1030}
1031EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1032
1033static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1034{
1035	BUG_ON(gnttab_interface == NULL);
1036	return gnttab_frames(nr_grant_frames, SPP);
1037}
1038
1039static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1040{
1041	int rc;
1042
1043	rc = arch_gnttab_map_shared(frames, nr_gframes,
1044				    gnttab_max_grant_frames(),
1045				    &gnttab_shared.addr);
1046	BUG_ON(rc);
1047
1048	return 0;
1049}
1050
1051static void gnttab_unmap_frames_v1(void)
1052{
1053	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1054}
1055
1056static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1057{
1058	uint64_t *sframes;
1059	unsigned int nr_sframes;
1060	struct gnttab_get_status_frames getframes;
1061	int rc;
1062
1063	nr_sframes = nr_status_frames(nr_gframes);
1064
1065	/* No need for kzalloc as it is initialized in following hypercall
1066	 * GNTTABOP_get_status_frames.
1067	 */
1068	sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1069	if (!sframes)
1070		return -ENOMEM;
1071
1072	getframes.dom        = DOMID_SELF;
1073	getframes.nr_frames  = nr_sframes;
1074	set_xen_guest_handle(getframes.frame_list, sframes);
1075
1076	rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1077				       &getframes, 1);
1078	if (rc == -ENOSYS) {
1079		kfree(sframes);
1080		return -ENOSYS;
1081	}
1082
1083	BUG_ON(rc || getframes.status);
1084
1085	rc = arch_gnttab_map_status(sframes, nr_sframes,
1086				    nr_status_frames(gnttab_max_grant_frames()),
1087				    &grstatus);
1088	BUG_ON(rc);
1089	kfree(sframes);
1090
1091	rc = arch_gnttab_map_shared(frames, nr_gframes,
1092				    gnttab_max_grant_frames(),
1093				    &gnttab_shared.addr);
1094	BUG_ON(rc);
1095
1096	return 0;
1097}
1098
1099static void gnttab_unmap_frames_v2(void)
1100{
1101	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1102	arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1103}
1104
1105static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1106{
1107	struct gnttab_setup_table setup;
1108	xen_pfn_t *frames;
1109	unsigned int nr_gframes = end_idx + 1;
1110	int rc;
1111
1112	if (xen_feature(XENFEAT_auto_translated_physmap)) {
1113		struct xen_add_to_physmap xatp;
1114		unsigned int i = end_idx;
1115		rc = 0;
1116		BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1117		/*
1118		 * Loop backwards, so that the first hypercall has the largest
1119		 * index, ensuring that the table will grow only once.
1120		 */
1121		do {
1122			xatp.domid = DOMID_SELF;
1123			xatp.idx = i;
1124			xatp.space = XENMAPSPACE_grant_table;
1125			xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1126			rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1127			if (rc != 0) {
1128				pr_warn("grant table add_to_physmap failed, err=%d\n",
1129					rc);
1130				break;
1131			}
1132		} while (i-- > start_idx);
1133
1134		return rc;
1135	}
1136
1137	/* No need for kzalloc as it is initialized in following hypercall
1138	 * GNTTABOP_setup_table.
1139	 */
1140	frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
1141	if (!frames)
1142		return -ENOMEM;
1143
1144	setup.dom        = DOMID_SELF;
1145	setup.nr_frames  = nr_gframes;
1146	set_xen_guest_handle(setup.frame_list, frames);
1147
1148	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1149	if (rc == -ENOSYS) {
1150		kfree(frames);
1151		return -ENOSYS;
1152	}
1153
1154	BUG_ON(rc || setup.status);
1155
1156	rc = gnttab_interface->map_frames(frames, nr_gframes);
1157
1158	kfree(frames);
1159
1160	return rc;
1161}
1162
1163static const struct gnttab_ops gnttab_v1_ops = {
1164	.version			= 1,
1165	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1166					  sizeof(struct grant_entry_v1),
1167	.map_frames			= gnttab_map_frames_v1,
1168	.unmap_frames			= gnttab_unmap_frames_v1,
1169	.update_entry			= gnttab_update_entry_v1,
1170	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v1,
1171	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v1,
1172	.query_foreign_access		= gnttab_query_foreign_access_v1,
1173};
1174
1175static const struct gnttab_ops gnttab_v2_ops = {
1176	.version			= 2,
1177	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1178					  sizeof(union grant_entry_v2),
1179	.map_frames			= gnttab_map_frames_v2,
1180	.unmap_frames			= gnttab_unmap_frames_v2,
1181	.update_entry			= gnttab_update_entry_v2,
1182	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v2,
1183	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v2,
1184	.query_foreign_access		= gnttab_query_foreign_access_v2,
1185};
1186
1187static bool gnttab_need_v2(void)
1188{
1189#ifdef CONFIG_X86
1190	uint32_t base, width;
1191
1192	if (xen_pv_domain()) {
1193		base = xen_cpuid_base();
1194		if (cpuid_eax(base) < 5)
1195			return false;	/* Information not available, use V1. */
1196		width = cpuid_ebx(base + 5) &
1197			XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1198		return width > 32 + PAGE_SHIFT;
1199	}
1200#endif
1201	return !!(max_possible_pfn >> 32);
1202}
1203
1204static void gnttab_request_version(void)
1205{
1206	long rc;
1207	struct gnttab_set_version gsv;
1208
1209	if (gnttab_need_v2())
1210		gsv.version = 2;
1211	else
1212		gsv.version = 1;
1213
1214	/* Boot parameter overrides automatic selection. */
1215	if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1216		gsv.version = xen_gnttab_version;
1217
1218	rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1219	if (rc == 0 && gsv.version == 2)
1220		gnttab_interface = &gnttab_v2_ops;
1221	else
1222		gnttab_interface = &gnttab_v1_ops;
1223	pr_info("Grant tables using version %d layout\n",
1224		gnttab_interface->version);
1225}
1226
1227static int gnttab_setup(void)
1228{
1229	unsigned int max_nr_gframes;
1230
1231	max_nr_gframes = gnttab_max_grant_frames();
1232	if (max_nr_gframes < nr_grant_frames)
1233		return -ENOSYS;
1234
1235	if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1236		gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1237		if (gnttab_shared.addr == NULL) {
1238			pr_warn("gnttab share frames (addr=0x%08lx) is not mapped!\n",
1239				(unsigned long)xen_auto_xlat_grant_frames.vaddr);
1240			return -ENOMEM;
1241		}
1242	}
1243	return gnttab_map(0, nr_grant_frames - 1);
1244}
1245
1246int gnttab_resume(void)
1247{
1248	gnttab_request_version();
1249	return gnttab_setup();
1250}
1251
1252int gnttab_suspend(void)
1253{
1254	if (!xen_feature(XENFEAT_auto_translated_physmap))
1255		gnttab_interface->unmap_frames();
1256	return 0;
1257}
1258
1259static int gnttab_expand(unsigned int req_entries)
1260{
1261	int rc;
1262	unsigned int cur, extra;
1263
1264	BUG_ON(gnttab_interface == NULL);
1265	cur = nr_grant_frames;
1266	extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1267		 gnttab_interface->grefs_per_grant_frame);
1268	if (cur + extra > gnttab_max_grant_frames()) {
1269		pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1270				    " cur=%u extra=%u limit=%u"
1271				    " gnttab_free_count=%u req_entries=%u\n",
1272				    cur, extra, gnttab_max_grant_frames(),
1273				    gnttab_free_count, req_entries);
1274		return -ENOSPC;
1275	}
1276
1277	rc = gnttab_map(cur, cur + extra - 1);
1278	if (rc == 0)
1279		rc = grow_gnttab_list(extra);
1280
1281	return rc;
1282}
1283
1284int gnttab_init(void)
1285{
1286	int i;
1287	unsigned long max_nr_grant_frames;
1288	unsigned int max_nr_glist_frames, nr_glist_frames;
1289	unsigned int nr_init_grefs;
1290	int ret;
1291
1292	gnttab_request_version();
1293	max_nr_grant_frames = gnttab_max_grant_frames();
 
 
1294	nr_grant_frames = 1;
1295
1296	/* Determine the maximum number of frames required for the
1297	 * grant reference free list on the current hypervisor.
1298	 */
1299	BUG_ON(gnttab_interface == NULL);
1300	max_nr_glist_frames = (max_nr_grant_frames *
1301			       gnttab_interface->grefs_per_grant_frame / RPP);
1302
1303	gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
1304			      GFP_KERNEL);
 
1305	if (gnttab_list == NULL)
1306		return -ENOMEM;
1307
1308	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1309	for (i = 0; i < nr_glist_frames; i++) {
1310		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1311		if (gnttab_list[i] == NULL) {
1312			ret = -ENOMEM;
1313			goto ini_nomem;
1314		}
1315	}
1316
 
 
 
 
 
 
1317	ret = arch_gnttab_init(max_nr_grant_frames,
1318			       nr_status_frames(max_nr_grant_frames));
1319	if (ret < 0)
1320		goto ini_nomem;
1321
1322	if (gnttab_setup() < 0) {
1323		ret = -ENODEV;
1324		goto ini_nomem;
1325	}
1326
1327	nr_init_grefs = nr_grant_frames *
1328			gnttab_interface->grefs_per_grant_frame;
1329
1330	for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1331		gnttab_entry(i) = i + 1;
1332
1333	gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
1334	gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
1335	gnttab_free_head  = NR_RESERVED_ENTRIES;
1336
1337	printk("Grant table initialized\n");
1338	return 0;
1339
1340 ini_nomem:
1341	for (i--; i >= 0; i--)
1342		free_page((unsigned long)gnttab_list[i]);
1343	kfree(gnttab_list);
 
1344	return ret;
1345}
1346EXPORT_SYMBOL_GPL(gnttab_init);
1347
1348static int __gnttab_init(void)
1349{
1350	if (!xen_domain())
1351		return -ENODEV;
1352
1353	/* Delay grant-table initialization in the PV on HVM case */
1354	if (xen_hvm_domain() && !xen_pvh_domain())
1355		return 0;
1356
1357	return gnttab_init();
1358}
1359/* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1360 * beforehand to initialize xen_auto_xlat_grant_frames. */
1361core_initcall_sync(__gnttab_init);