Linux Audio

Check our new training course

Loading...
v5.9
   1/******************************************************************************
   2 * grant_table.c
   3 *
   4 * Granting foreign access to our memory reservation.
   5 *
   6 * Copyright (c) 2005-2006, Christopher Clark
   7 * Copyright (c) 2004-2005, K A Fraser
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License version 2
  11 * as published by the Free Software Foundation; or, when distributed
  12 * separately from the Linux kernel or incorporated into other
  13 * software packages, subject to the following license:
  14 *
  15 * Permission is hereby granted, free of charge, to any person obtaining a copy
  16 * of this source file (the "Software"), to deal in the Software without
  17 * restriction, including without limitation the rights to use, copy, modify,
  18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  19 * and to permit persons to whom the Software is furnished to do so, subject to
  20 * the following conditions:
  21 *
  22 * The above copyright notice and this permission notice shall be included in
  23 * all copies or substantial portions of the Software.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  31 * IN THE SOFTWARE.
  32 */
  33
  34#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  35
  36#include <linux/memblock.h>
  37#include <linux/sched.h>
  38#include <linux/mm.h>
  39#include <linux/slab.h>
  40#include <linux/vmalloc.h>
  41#include <linux/uaccess.h>
  42#include <linux/io.h>
  43#include <linux/delay.h>
  44#include <linux/hardirq.h>
  45#include <linux/workqueue.h>
  46#include <linux/ratelimit.h>
  47#include <linux/moduleparam.h>
  48#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
  49#include <linux/dma-mapping.h>
  50#endif
  51
  52#include <xen/xen.h>
  53#include <xen/interface/xen.h>
  54#include <xen/page.h>
  55#include <xen/grant_table.h>
  56#include <xen/interface/memory.h>
  57#include <xen/hvc-console.h>
  58#include <xen/swiotlb-xen.h>
  59#include <xen/balloon.h>
  60#ifdef CONFIG_X86
  61#include <asm/xen/cpuid.h>
  62#endif
  63#include <xen/mem-reservation.h>
  64#include <asm/xen/hypercall.h>
  65#include <asm/xen/interface.h>
  66
 
  67#include <asm/sync_bitops.h>
  68
  69/* External tools reserve first few grant table entries. */
  70#define NR_RESERVED_ENTRIES 8
  71#define GNTTAB_LIST_END 0xffffffff
  72
  73static grant_ref_t **gnttab_list;
  74static unsigned int nr_grant_frames;
  75static int gnttab_free_count;
  76static grant_ref_t gnttab_free_head;
  77static DEFINE_SPINLOCK(gnttab_list_lock);
  78struct grant_frames xen_auto_xlat_grant_frames;
  79static unsigned int xen_gnttab_version;
  80module_param_named(version, xen_gnttab_version, uint, 0);
  81
  82static union {
  83	struct grant_entry_v1 *v1;
  84	union grant_entry_v2 *v2;
  85	void *addr;
  86} gnttab_shared;
  87
  88/*This is a structure of function pointers for grant table*/
  89struct gnttab_ops {
  90	/*
  91	 * Version of the grant interface.
  92	 */
  93	unsigned int version;
  94	/*
  95	 * Grant refs per grant frame.
  96	 */
  97	unsigned int grefs_per_grant_frame;
  98	/*
  99	 * Mapping a list of frames for storing grant entries. Frames parameter
 100	 * is used to store grant table address when grant table being setup,
 101	 * nr_gframes is the number of frames to map grant table. Returning
 102	 * GNTST_okay means success and negative value means failure.
 103	 */
 104	int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
 105	/*
 106	 * Release a list of frames which are mapped in map_frames for grant
 107	 * entry status.
 108	 */
 109	void (*unmap_frames)(void);
 110	/*
 111	 * Introducing a valid entry into the grant table, granting the frame of
 112	 * this grant entry to domain for accessing or transfering. Ref
 113	 * parameter is reference of this introduced grant entry, domid is id of
 114	 * granted domain, frame is the page frame to be granted, and flags is
 115	 * status of the grant entry to be updated.
 116	 */
 117	void (*update_entry)(grant_ref_t ref, domid_t domid,
 118			     unsigned long frame, unsigned flags);
 119	/*
 120	 * Stop granting a grant entry to domain for accessing. Ref parameter is
 121	 * reference of a grant entry whose grant access will be stopped,
 122	 * readonly is not in use in this function. If the grant entry is
 123	 * currently mapped for reading or writing, just return failure(==0)
 124	 * directly and don't tear down the grant access. Otherwise, stop grant
 125	 * access for this entry and return success(==1).
 126	 */
 127	int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
 128	/*
 129	 * Stop granting a grant entry to domain for transfer. Ref parameter is
 130	 * reference of a grant entry whose grant transfer will be stopped. If
 131	 * tranfer has not started, just reclaim the grant entry and return
 132	 * failure(==0). Otherwise, wait for the transfer to complete and then
 133	 * return the frame.
 134	 */
 135	unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
 136	/*
 137	 * Query the status of a grant entry. Ref parameter is reference of
 138	 * queried grant entry, return value is the status of queried entry.
 139	 * Detailed status(writing/reading) can be gotten from the return value
 140	 * by bit operations.
 141	 */
 142	int (*query_foreign_access)(grant_ref_t ref);
 143};
 144
 145struct unmap_refs_callback_data {
 146	struct completion completion;
 147	int result;
 148};
 149
 150static const struct gnttab_ops *gnttab_interface;
 151
 152/* This reflects status of grant entries, so act as a global value. */
 153static grant_status_t *grstatus;
 154
 155static struct gnttab_free_callback *gnttab_free_callback_list;
 156
 157static int gnttab_expand(unsigned int req_entries);
 158
 159#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
 160#define SPP (PAGE_SIZE / sizeof(grant_status_t))
 161
 162static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
 163{
 164	return &gnttab_list[(entry) / RPP][(entry) % RPP];
 165}
 166/* This can be used as an l-value */
 167#define gnttab_entry(entry) (*__gnttab_entry(entry))
 168
 169static int get_free_entries(unsigned count)
 170{
 171	unsigned long flags;
 172	int ref, rc = 0;
 173	grant_ref_t head;
 174
 175	spin_lock_irqsave(&gnttab_list_lock, flags);
 176
 177	if ((gnttab_free_count < count) &&
 178	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
 179		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 180		return rc;
 181	}
 182
 183	ref = head = gnttab_free_head;
 184	gnttab_free_count -= count;
 185	while (count-- > 1)
 186		head = gnttab_entry(head);
 187	gnttab_free_head = gnttab_entry(head);
 188	gnttab_entry(head) = GNTTAB_LIST_END;
 189
 190	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 191
 192	return ref;
 193}
 194
 195static void do_free_callbacks(void)
 196{
 197	struct gnttab_free_callback *callback, *next;
 198
 199	callback = gnttab_free_callback_list;
 200	gnttab_free_callback_list = NULL;
 201
 202	while (callback != NULL) {
 203		next = callback->next;
 204		if (gnttab_free_count >= callback->count) {
 205			callback->next = NULL;
 206			callback->fn(callback->arg);
 207		} else {
 208			callback->next = gnttab_free_callback_list;
 209			gnttab_free_callback_list = callback;
 210		}
 211		callback = next;
 212	}
 213}
 214
 215static inline void check_free_callbacks(void)
 216{
 217	if (unlikely(gnttab_free_callback_list))
 218		do_free_callbacks();
 219}
 220
 221static void put_free_entry(grant_ref_t ref)
 222{
 223	unsigned long flags;
 224	spin_lock_irqsave(&gnttab_list_lock, flags);
 225	gnttab_entry(ref) = gnttab_free_head;
 226	gnttab_free_head = ref;
 227	gnttab_free_count++;
 228	check_free_callbacks();
 229	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 230}
 231
 232/*
 233 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
 234 * Introducing a valid entry into the grant table:
 235 *  1. Write ent->domid.
 236 *  2. Write ent->frame:
 237 *      GTF_permit_access:   Frame to which access is permitted.
 238 *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
 239 *                           frame, or zero if none.
 240 *  3. Write memory barrier (WMB).
 241 *  4. Write ent->flags, inc. valid type.
 242 */
 243static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
 244				   unsigned long frame, unsigned flags)
 245{
 246	gnttab_shared.v1[ref].domid = domid;
 247	gnttab_shared.v1[ref].frame = frame;
 248	wmb();
 249	gnttab_shared.v1[ref].flags = flags;
 250}
 251
 252static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
 253				   unsigned long frame, unsigned int flags)
 254{
 255	gnttab_shared.v2[ref].hdr.domid = domid;
 256	gnttab_shared.v2[ref].full_page.frame = frame;
 257	wmb();	/* Hypervisor concurrent accesses. */
 258	gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
 259}
 260
 261/*
 262 * Public grant-issuing interface functions
 263 */
 264void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
 265				     unsigned long frame, int readonly)
 266{
 267	gnttab_interface->update_entry(ref, domid, frame,
 268			   GTF_permit_access | (readonly ? GTF_readonly : 0));
 269}
 270EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
 271
 272int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
 273				int readonly)
 274{
 275	int ref;
 276
 277	ref = get_free_entries(1);
 278	if (unlikely(ref < 0))
 279		return -ENOSPC;
 280
 281	gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
 282
 283	return ref;
 284}
 285EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
 286
 287static int gnttab_query_foreign_access_v1(grant_ref_t ref)
 288{
 289	return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
 290}
 291
 292static int gnttab_query_foreign_access_v2(grant_ref_t ref)
 293{
 294	return grstatus[ref] & (GTF_reading|GTF_writing);
 295}
 296
 297int gnttab_query_foreign_access(grant_ref_t ref)
 298{
 299	return gnttab_interface->query_foreign_access(ref);
 300}
 301EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
 302
 303static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
 304{
 305	u16 flags, nflags;
 306	u16 *pflags;
 307
 308	pflags = &gnttab_shared.v1[ref].flags;
 309	nflags = *pflags;
 310	do {
 311		flags = nflags;
 312		if (flags & (GTF_reading|GTF_writing))
 313			return 0;
 314	} while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
 315
 316	return 1;
 317}
 318
 319static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
 320{
 321	gnttab_shared.v2[ref].hdr.flags = 0;
 322	mb();	/* Concurrent access by hypervisor. */
 323	if (grstatus[ref] & (GTF_reading|GTF_writing)) {
 324		return 0;
 325	} else {
 326		/*
 327		 * The read of grstatus needs to have acquire semantics.
 328		 *  On x86, reads already have that, and we just need to
 329		 * protect against compiler reorderings.
 330		 * On other architectures we may need a full barrier.
 331		 */
 332#ifdef CONFIG_X86
 333		barrier();
 334#else
 335		mb();
 336#endif
 337	}
 338
 339	return 1;
 340}
 341
 342static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
 343{
 344	return gnttab_interface->end_foreign_access_ref(ref, readonly);
 345}
 346
 347int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
 348{
 349	if (_gnttab_end_foreign_access_ref(ref, readonly))
 350		return 1;
 351	pr_warn("WARNING: g.e. %#x still in use!\n", ref);
 352	return 0;
 353}
 354EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
 355
 356struct deferred_entry {
 357	struct list_head list;
 358	grant_ref_t ref;
 359	bool ro;
 360	uint16_t warn_delay;
 361	struct page *page;
 362};
 363static LIST_HEAD(deferred_list);
 364static void gnttab_handle_deferred(struct timer_list *);
 365static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
 366
 367static void gnttab_handle_deferred(struct timer_list *unused)
 368{
 369	unsigned int nr = 10;
 370	struct deferred_entry *first = NULL;
 371	unsigned long flags;
 372
 373	spin_lock_irqsave(&gnttab_list_lock, flags);
 374	while (nr--) {
 375		struct deferred_entry *entry
 376			= list_first_entry(&deferred_list,
 377					   struct deferred_entry, list);
 378
 379		if (entry == first)
 380			break;
 381		list_del(&entry->list);
 382		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 383		if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
 384			put_free_entry(entry->ref);
 385			if (entry->page) {
 386				pr_debug("freeing g.e. %#x (pfn %#lx)\n",
 387					 entry->ref, page_to_pfn(entry->page));
 388				put_page(entry->page);
 389			} else
 390				pr_info("freeing g.e. %#x\n", entry->ref);
 391			kfree(entry);
 392			entry = NULL;
 393		} else {
 394			if (!--entry->warn_delay)
 395				pr_info("g.e. %#x still pending\n", entry->ref);
 396			if (!first)
 397				first = entry;
 398		}
 399		spin_lock_irqsave(&gnttab_list_lock, flags);
 400		if (entry)
 401			list_add_tail(&entry->list, &deferred_list);
 402		else if (list_empty(&deferred_list))
 403			break;
 404	}
 405	if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
 406		deferred_timer.expires = jiffies + HZ;
 407		add_timer(&deferred_timer);
 408	}
 409	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 410}
 411
 412static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
 413				struct page *page)
 414{
 415	struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
 416	const char *what = KERN_WARNING "leaking";
 417
 418	if (entry) {
 419		unsigned long flags;
 420
 421		entry->ref = ref;
 422		entry->ro = readonly;
 423		entry->page = page;
 424		entry->warn_delay = 60;
 425		spin_lock_irqsave(&gnttab_list_lock, flags);
 426		list_add_tail(&entry->list, &deferred_list);
 427		if (!timer_pending(&deferred_timer)) {
 428			deferred_timer.expires = jiffies + HZ;
 429			add_timer(&deferred_timer);
 430		}
 431		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 432		what = KERN_DEBUG "deferring";
 433	}
 434	printk("%s g.e. %#x (pfn %#lx)\n",
 435	       what, ref, page ? page_to_pfn(page) : -1);
 436}
 437
 438void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
 439			       unsigned long page)
 440{
 441	if (gnttab_end_foreign_access_ref(ref, readonly)) {
 442		put_free_entry(ref);
 443		if (page != 0)
 444			put_page(virt_to_page(page));
 445	} else
 446		gnttab_add_deferred(ref, readonly,
 447				    page ? virt_to_page(page) : NULL);
 448}
 449EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
 450
 451int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
 452{
 453	int ref;
 454
 455	ref = get_free_entries(1);
 456	if (unlikely(ref < 0))
 457		return -ENOSPC;
 458	gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
 459
 460	return ref;
 461}
 462EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
 463
 464void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
 465				       unsigned long pfn)
 466{
 467	gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
 468}
 469EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
 470
 471static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
 472{
 473	unsigned long frame;
 474	u16           flags;
 475	u16          *pflags;
 476
 477	pflags = &gnttab_shared.v1[ref].flags;
 478
 479	/*
 480	 * If a transfer is not even yet started, try to reclaim the grant
 481	 * reference and return failure (== 0).
 482	 */
 483	while (!((flags = *pflags) & GTF_transfer_committed)) {
 484		if (sync_cmpxchg(pflags, flags, 0) == flags)
 485			return 0;
 486		cpu_relax();
 487	}
 488
 489	/* If a transfer is in progress then wait until it is completed. */
 490	while (!(flags & GTF_transfer_completed)) {
 491		flags = *pflags;
 492		cpu_relax();
 493	}
 494
 495	rmb();	/* Read the frame number /after/ reading completion status. */
 496	frame = gnttab_shared.v1[ref].frame;
 497	BUG_ON(frame == 0);
 498
 499	return frame;
 500}
 501
 502static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
 503{
 504	unsigned long frame;
 505	u16           flags;
 506	u16          *pflags;
 507
 508	pflags = &gnttab_shared.v2[ref].hdr.flags;
 509
 510	/*
 511	 * If a transfer is not even yet started, try to reclaim the grant
 512	 * reference and return failure (== 0).
 513	 */
 514	while (!((flags = *pflags) & GTF_transfer_committed)) {
 515		if (sync_cmpxchg(pflags, flags, 0) == flags)
 516			return 0;
 517		cpu_relax();
 518	}
 519
 520	/* If a transfer is in progress then wait until it is completed. */
 521	while (!(flags & GTF_transfer_completed)) {
 522		flags = *pflags;
 523		cpu_relax();
 524	}
 525
 526	rmb();  /* Read the frame number /after/ reading completion status. */
 527	frame = gnttab_shared.v2[ref].full_page.frame;
 528	BUG_ON(frame == 0);
 529
 530	return frame;
 531}
 532
 533unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
 534{
 535	return gnttab_interface->end_foreign_transfer_ref(ref);
 536}
 537EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
 538
 539unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
 540{
 541	unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
 542	put_free_entry(ref);
 543	return frame;
 544}
 545EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
 546
 547void gnttab_free_grant_reference(grant_ref_t ref)
 548{
 549	put_free_entry(ref);
 550}
 551EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
 552
 553void gnttab_free_grant_references(grant_ref_t head)
 554{
 555	grant_ref_t ref;
 556	unsigned long flags;
 557	int count = 1;
 558	if (head == GNTTAB_LIST_END)
 559		return;
 560	spin_lock_irqsave(&gnttab_list_lock, flags);
 561	ref = head;
 562	while (gnttab_entry(ref) != GNTTAB_LIST_END) {
 563		ref = gnttab_entry(ref);
 564		count++;
 565	}
 566	gnttab_entry(ref) = gnttab_free_head;
 567	gnttab_free_head = head;
 568	gnttab_free_count += count;
 569	check_free_callbacks();
 570	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 571}
 572EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
 573
 574int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
 575{
 576	int h = get_free_entries(count);
 577
 578	if (h < 0)
 579		return -ENOSPC;
 580
 581	*head = h;
 582
 583	return 0;
 584}
 585EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
 586
 587int gnttab_empty_grant_references(const grant_ref_t *private_head)
 588{
 589	return (*private_head == GNTTAB_LIST_END);
 590}
 591EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
 592
 593int gnttab_claim_grant_reference(grant_ref_t *private_head)
 594{
 595	grant_ref_t g = *private_head;
 596	if (unlikely(g == GNTTAB_LIST_END))
 597		return -ENOSPC;
 598	*private_head = gnttab_entry(g);
 599	return g;
 600}
 601EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
 602
 603void gnttab_release_grant_reference(grant_ref_t *private_head,
 604				    grant_ref_t release)
 605{
 606	gnttab_entry(release) = *private_head;
 607	*private_head = release;
 608}
 609EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
 610
 611void gnttab_request_free_callback(struct gnttab_free_callback *callback,
 612				  void (*fn)(void *), void *arg, u16 count)
 613{
 614	unsigned long flags;
 615	struct gnttab_free_callback *cb;
 616
 617	spin_lock_irqsave(&gnttab_list_lock, flags);
 618
 619	/* Check if the callback is already on the list */
 620	cb = gnttab_free_callback_list;
 621	while (cb) {
 622		if (cb == callback)
 623			goto out;
 624		cb = cb->next;
 625	}
 626
 627	callback->fn = fn;
 628	callback->arg = arg;
 629	callback->count = count;
 630	callback->next = gnttab_free_callback_list;
 631	gnttab_free_callback_list = callback;
 632	check_free_callbacks();
 633out:
 634	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 635}
 636EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
 637
 638void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
 639{
 640	struct gnttab_free_callback **pcb;
 641	unsigned long flags;
 642
 643	spin_lock_irqsave(&gnttab_list_lock, flags);
 644	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
 645		if (*pcb == callback) {
 646			*pcb = callback->next;
 647			break;
 648		}
 649	}
 650	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 651}
 652EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
 653
 654static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
 655{
 656	return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
 657	       align;
 658}
 659
 660static int grow_gnttab_list(unsigned int more_frames)
 661{
 662	unsigned int new_nr_grant_frames, extra_entries, i;
 663	unsigned int nr_glist_frames, new_nr_glist_frames;
 664	unsigned int grefs_per_frame;
 665
 
 666	grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
 667
 668	new_nr_grant_frames = nr_grant_frames + more_frames;
 669	extra_entries = more_frames * grefs_per_frame;
 670
 671	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
 672	new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
 673	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
 674		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
 675		if (!gnttab_list[i])
 676			goto grow_nomem;
 677	}
 678
 679
 680	for (i = grefs_per_frame * nr_grant_frames;
 681	     i < grefs_per_frame * new_nr_grant_frames - 1; i++)
 682		gnttab_entry(i) = i + 1;
 683
 684	gnttab_entry(i) = gnttab_free_head;
 685	gnttab_free_head = grefs_per_frame * nr_grant_frames;
 686	gnttab_free_count += extra_entries;
 687
 688	nr_grant_frames = new_nr_grant_frames;
 689
 690	check_free_callbacks();
 691
 692	return 0;
 693
 694grow_nomem:
 695	while (i-- > nr_glist_frames)
 696		free_page((unsigned long) gnttab_list[i]);
 697	return -ENOMEM;
 698}
 699
 700static unsigned int __max_nr_grant_frames(void)
 701{
 702	struct gnttab_query_size query;
 703	int rc;
 704
 705	query.dom = DOMID_SELF;
 706
 707	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
 708	if ((rc < 0) || (query.status != GNTST_okay))
 709		return 4; /* Legacy max supported number of frames */
 710
 711	return query.max_nr_frames;
 712}
 713
 714unsigned int gnttab_max_grant_frames(void)
 715{
 716	unsigned int xen_max = __max_nr_grant_frames();
 717	static unsigned int boot_max_nr_grant_frames;
 718
 719	/* First time, initialize it properly. */
 720	if (!boot_max_nr_grant_frames)
 721		boot_max_nr_grant_frames = __max_nr_grant_frames();
 722
 723	if (xen_max > boot_max_nr_grant_frames)
 724		return boot_max_nr_grant_frames;
 725	return xen_max;
 726}
 727EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
 728
 729int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
 730{
 731	xen_pfn_t *pfn;
 732	unsigned int max_nr_gframes = __max_nr_grant_frames();
 733	unsigned int i;
 734	void *vaddr;
 735
 736	if (xen_auto_xlat_grant_frames.count)
 737		return -EINVAL;
 738
 739	vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
 740	if (vaddr == NULL) {
 741		pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
 742			&addr);
 743		return -ENOMEM;
 744	}
 745	pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
 746	if (!pfn) {
 747		xen_unmap(vaddr);
 748		return -ENOMEM;
 749	}
 750	for (i = 0; i < max_nr_gframes; i++)
 751		pfn[i] = XEN_PFN_DOWN(addr) + i;
 752
 753	xen_auto_xlat_grant_frames.vaddr = vaddr;
 754	xen_auto_xlat_grant_frames.pfn = pfn;
 755	xen_auto_xlat_grant_frames.count = max_nr_gframes;
 756
 757	return 0;
 758}
 759EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
 760
 761void gnttab_free_auto_xlat_frames(void)
 762{
 763	if (!xen_auto_xlat_grant_frames.count)
 764		return;
 765	kfree(xen_auto_xlat_grant_frames.pfn);
 766	xen_unmap(xen_auto_xlat_grant_frames.vaddr);
 767
 768	xen_auto_xlat_grant_frames.pfn = NULL;
 769	xen_auto_xlat_grant_frames.count = 0;
 770	xen_auto_xlat_grant_frames.vaddr = NULL;
 771}
 772EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
 773
 774int gnttab_pages_set_private(int nr_pages, struct page **pages)
 775{
 776	int i;
 777
 778	for (i = 0; i < nr_pages; i++) {
 779#if BITS_PER_LONG < 64
 780		struct xen_page_foreign *foreign;
 781
 782		foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
 783		if (!foreign)
 784			return -ENOMEM;
 785
 786		set_page_private(pages[i], (unsigned long)foreign);
 787#endif
 788		SetPagePrivate(pages[i]);
 789	}
 790
 791	return 0;
 792}
 793EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
 794
 795/**
 796 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
 797 * @nr_pages: number of pages to alloc
 798 * @pages: returns the pages
 799 */
 800int gnttab_alloc_pages(int nr_pages, struct page **pages)
 801{
 802	int ret;
 803
 804	ret = xen_alloc_unpopulated_pages(nr_pages, pages);
 805	if (ret < 0)
 806		return ret;
 807
 808	ret = gnttab_pages_set_private(nr_pages, pages);
 809	if (ret < 0)
 810		gnttab_free_pages(nr_pages, pages);
 811
 812	return ret;
 813}
 814EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
 815
 816void gnttab_pages_clear_private(int nr_pages, struct page **pages)
 817{
 818	int i;
 819
 820	for (i = 0; i < nr_pages; i++) {
 821		if (PagePrivate(pages[i])) {
 822#if BITS_PER_LONG < 64
 823			kfree((void *)page_private(pages[i]));
 824#endif
 825			ClearPagePrivate(pages[i]);
 826		}
 827	}
 828}
 829EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
 830
 831/**
 832 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
 833 * @nr_pages; number of pages to free
 834 * @pages: the pages
 835 */
 836void gnttab_free_pages(int nr_pages, struct page **pages)
 837{
 838	gnttab_pages_clear_private(nr_pages, pages);
 839	xen_free_unpopulated_pages(nr_pages, pages);
 840}
 841EXPORT_SYMBOL_GPL(gnttab_free_pages);
 842
 843#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 844/**
 845 * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
 846 * @args: arguments to the function
 847 */
 848int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
 849{
 850	unsigned long pfn, start_pfn;
 851	size_t size;
 852	int i, ret;
 853
 854	size = args->nr_pages << PAGE_SHIFT;
 855	if (args->coherent)
 856		args->vaddr = dma_alloc_coherent(args->dev, size,
 857						 &args->dev_bus_addr,
 858						 GFP_KERNEL | __GFP_NOWARN);
 859	else
 860		args->vaddr = dma_alloc_wc(args->dev, size,
 861					   &args->dev_bus_addr,
 862					   GFP_KERNEL | __GFP_NOWARN);
 863	if (!args->vaddr) {
 864		pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
 865		return -ENOMEM;
 866	}
 867
 868	start_pfn = __phys_to_pfn(args->dev_bus_addr);
 869	for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
 870			pfn++, i++) {
 871		struct page *page = pfn_to_page(pfn);
 872
 873		args->pages[i] = page;
 874		args->frames[i] = xen_page_to_gfn(page);
 875		xenmem_reservation_scrub_page(page);
 876	}
 877
 878	xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
 879
 880	ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
 881	if (ret != args->nr_pages) {
 882		pr_debug("Failed to decrease reservation for DMA buffer\n");
 883		ret = -EFAULT;
 884		goto fail;
 885	}
 886
 887	ret = gnttab_pages_set_private(args->nr_pages, args->pages);
 888	if (ret < 0)
 889		goto fail;
 890
 891	return 0;
 892
 893fail:
 894	gnttab_dma_free_pages(args);
 895	return ret;
 896}
 897EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
 898
 899/**
 900 * gnttab_dma_free_pages - free DMAable pages
 901 * @args: arguments to the function
 902 */
 903int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
 904{
 905	size_t size;
 906	int i, ret;
 907
 908	gnttab_pages_clear_private(args->nr_pages, args->pages);
 909
 910	for (i = 0; i < args->nr_pages; i++)
 911		args->frames[i] = page_to_xen_pfn(args->pages[i]);
 912
 913	ret = xenmem_reservation_increase(args->nr_pages, args->frames);
 914	if (ret != args->nr_pages) {
 915		pr_debug("Failed to increase reservation for DMA buffer\n");
 916		ret = -EFAULT;
 917	} else {
 918		ret = 0;
 919	}
 920
 921	xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
 922					     args->frames);
 923
 924	size = args->nr_pages << PAGE_SHIFT;
 925	if (args->coherent)
 926		dma_free_coherent(args->dev, size,
 927				  args->vaddr, args->dev_bus_addr);
 928	else
 929		dma_free_wc(args->dev, size,
 930			    args->vaddr, args->dev_bus_addr);
 931	return ret;
 932}
 933EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
 934#endif
 935
 936/* Handling of paged out grant targets (GNTST_eagain) */
 937#define MAX_DELAY 256
 938static inline void
 939gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
 940						const char *func)
 941{
 942	unsigned delay = 1;
 943
 944	do {
 945		BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
 946		if (*status == GNTST_eagain)
 947			msleep(delay++);
 948	} while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
 949
 950	if (delay >= MAX_DELAY) {
 951		pr_err("%s: %s eagain grant\n", func, current->comm);
 952		*status = GNTST_bad_page;
 953	}
 954}
 955
 956void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
 957{
 958	struct gnttab_map_grant_ref *op;
 959
 960	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
 961		BUG();
 962	for (op = batch; op < batch + count; op++)
 963		if (op->status == GNTST_eagain)
 964			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
 965						&op->status, __func__);
 966}
 967EXPORT_SYMBOL_GPL(gnttab_batch_map);
 968
 969void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
 970{
 971	struct gnttab_copy *op;
 972
 973	if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
 974		BUG();
 975	for (op = batch; op < batch + count; op++)
 976		if (op->status == GNTST_eagain)
 977			gnttab_retry_eagain_gop(GNTTABOP_copy, op,
 978						&op->status, __func__);
 979}
 980EXPORT_SYMBOL_GPL(gnttab_batch_copy);
 981
 982void gnttab_foreach_grant_in_range(struct page *page,
 983				   unsigned int offset,
 984				   unsigned int len,
 985				   xen_grant_fn_t fn,
 986				   void *data)
 987{
 988	unsigned int goffset;
 989	unsigned int glen;
 990	unsigned long xen_pfn;
 991
 992	len = min_t(unsigned int, PAGE_SIZE - offset, len);
 993	goffset = xen_offset_in_page(offset);
 994
 995	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
 996
 997	while (len) {
 998		glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
 999		fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1000
1001		goffset = 0;
1002		xen_pfn++;
1003		len -= glen;
1004	}
1005}
1006EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1007
1008void gnttab_foreach_grant(struct page **pages,
1009			  unsigned int nr_grefs,
1010			  xen_grant_fn_t fn,
1011			  void *data)
1012{
1013	unsigned int goffset = 0;
1014	unsigned long xen_pfn = 0;
1015	unsigned int i;
1016
1017	for (i = 0; i < nr_grefs; i++) {
1018		if ((i % XEN_PFN_PER_PAGE) == 0) {
1019			xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1020			goffset = 0;
1021		}
1022
1023		fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1024
1025		goffset += XEN_PAGE_SIZE;
1026		xen_pfn++;
1027	}
1028}
1029
1030int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1031		    struct gnttab_map_grant_ref *kmap_ops,
1032		    struct page **pages, unsigned int count)
1033{
1034	int i, ret;
1035
1036	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1037	if (ret)
1038		return ret;
1039
1040	for (i = 0; i < count; i++) {
1041		switch (map_ops[i].status) {
1042		case GNTST_okay:
1043		{
1044			struct xen_page_foreign *foreign;
1045
1046			SetPageForeign(pages[i]);
1047			foreign = xen_page_foreign(pages[i]);
1048			foreign->domid = map_ops[i].dom;
1049			foreign->gref = map_ops[i].ref;
1050			break;
1051		}
1052
1053		case GNTST_no_device_space:
1054			pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1055			break;
1056
1057		case GNTST_eagain:
1058			/* Retry eagain maps */
1059			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1060						map_ops + i,
1061						&map_ops[i].status, __func__);
1062			/* Test status in next loop iteration. */
1063			i--;
1064			break;
1065
1066		default:
1067			break;
1068		}
1069	}
1070
1071	return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1072}
1073EXPORT_SYMBOL_GPL(gnttab_map_refs);
1074
1075int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1076		      struct gnttab_unmap_grant_ref *kunmap_ops,
1077		      struct page **pages, unsigned int count)
1078{
1079	unsigned int i;
1080	int ret;
1081
1082	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1083	if (ret)
1084		return ret;
1085
1086	for (i = 0; i < count; i++)
1087		ClearPageForeign(pages[i]);
1088
1089	return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1090}
1091EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1092
1093#define GNTTAB_UNMAP_REFS_DELAY 5
1094
1095static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1096
1097static void gnttab_unmap_work(struct work_struct *work)
1098{
1099	struct gntab_unmap_queue_data
1100		*unmap_data = container_of(work, 
1101					   struct gntab_unmap_queue_data,
1102					   gnttab_work.work);
1103	if (unmap_data->age != UINT_MAX)
1104		unmap_data->age++;
1105	__gnttab_unmap_refs_async(unmap_data);
1106}
1107
1108static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1109{
1110	int ret;
1111	int pc;
1112
1113	for (pc = 0; pc < item->count; pc++) {
1114		if (page_count(item->pages[pc]) > 1) {
1115			unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1116			schedule_delayed_work(&item->gnttab_work,
1117					      msecs_to_jiffies(delay));
1118			return;
1119		}
1120	}
1121
1122	ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1123				item->pages, item->count);
1124	item->done(ret, item);
1125}
1126
1127void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1128{
1129	INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1130	item->age = 0;
1131
1132	__gnttab_unmap_refs_async(item);
1133}
1134EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1135
1136static void unmap_refs_callback(int result,
1137		struct gntab_unmap_queue_data *data)
1138{
1139	struct unmap_refs_callback_data *d = data->data;
1140
1141	d->result = result;
1142	complete(&d->completion);
1143}
1144
1145int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1146{
1147	struct unmap_refs_callback_data data;
1148
1149	init_completion(&data.completion);
1150	item->data = &data;
1151	item->done = &unmap_refs_callback;
1152	gnttab_unmap_refs_async(item);
1153	wait_for_completion(&data.completion);
1154
1155	return data.result;
1156}
1157EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1158
1159static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1160{
 
1161	return gnttab_frames(nr_grant_frames, SPP);
1162}
1163
1164static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1165{
1166	int rc;
1167
1168	rc = arch_gnttab_map_shared(frames, nr_gframes,
1169				    gnttab_max_grant_frames(),
1170				    &gnttab_shared.addr);
1171	BUG_ON(rc);
1172
1173	return 0;
1174}
1175
1176static void gnttab_unmap_frames_v1(void)
1177{
1178	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1179}
1180
1181static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1182{
1183	uint64_t *sframes;
1184	unsigned int nr_sframes;
1185	struct gnttab_get_status_frames getframes;
1186	int rc;
1187
1188	nr_sframes = nr_status_frames(nr_gframes);
1189
1190	/* No need for kzalloc as it is initialized in following hypercall
1191	 * GNTTABOP_get_status_frames.
1192	 */
1193	sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1194	if (!sframes)
1195		return -ENOMEM;
1196
1197	getframes.dom        = DOMID_SELF;
1198	getframes.nr_frames  = nr_sframes;
1199	set_xen_guest_handle(getframes.frame_list, sframes);
1200
1201	rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1202				       &getframes, 1);
1203	if (rc == -ENOSYS) {
1204		kfree(sframes);
1205		return -ENOSYS;
1206	}
1207
1208	BUG_ON(rc || getframes.status);
1209
1210	rc = arch_gnttab_map_status(sframes, nr_sframes,
1211				    nr_status_frames(gnttab_max_grant_frames()),
1212				    &grstatus);
1213	BUG_ON(rc);
1214	kfree(sframes);
1215
1216	rc = arch_gnttab_map_shared(frames, nr_gframes,
1217				    gnttab_max_grant_frames(),
1218				    &gnttab_shared.addr);
1219	BUG_ON(rc);
1220
1221	return 0;
1222}
1223
1224static void gnttab_unmap_frames_v2(void)
1225{
1226	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1227	arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1228}
1229
1230static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1231{
1232	struct gnttab_setup_table setup;
1233	xen_pfn_t *frames;
1234	unsigned int nr_gframes = end_idx + 1;
1235	int rc;
1236
1237	if (xen_feature(XENFEAT_auto_translated_physmap)) {
1238		struct xen_add_to_physmap xatp;
1239		unsigned int i = end_idx;
1240		rc = 0;
1241		BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1242		/*
1243		 * Loop backwards, so that the first hypercall has the largest
1244		 * index, ensuring that the table will grow only once.
1245		 */
1246		do {
1247			xatp.domid = DOMID_SELF;
1248			xatp.idx = i;
1249			xatp.space = XENMAPSPACE_grant_table;
1250			xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1251			rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1252			if (rc != 0) {
1253				pr_warn("grant table add_to_physmap failed, err=%d\n",
1254					rc);
1255				break;
1256			}
1257		} while (i-- > start_idx);
1258
1259		return rc;
1260	}
1261
1262	/* No need for kzalloc as it is initialized in following hypercall
1263	 * GNTTABOP_setup_table.
1264	 */
1265	frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1266	if (!frames)
1267		return -ENOMEM;
1268
1269	setup.dom        = DOMID_SELF;
1270	setup.nr_frames  = nr_gframes;
1271	set_xen_guest_handle(setup.frame_list, frames);
1272
1273	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1274	if (rc == -ENOSYS) {
1275		kfree(frames);
1276		return -ENOSYS;
1277	}
1278
1279	BUG_ON(rc || setup.status);
1280
1281	rc = gnttab_interface->map_frames(frames, nr_gframes);
1282
1283	kfree(frames);
1284
1285	return rc;
1286}
1287
1288static const struct gnttab_ops gnttab_v1_ops = {
1289	.version			= 1,
1290	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1291					  sizeof(struct grant_entry_v1),
1292	.map_frames			= gnttab_map_frames_v1,
1293	.unmap_frames			= gnttab_unmap_frames_v1,
1294	.update_entry			= gnttab_update_entry_v1,
1295	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v1,
1296	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v1,
1297	.query_foreign_access		= gnttab_query_foreign_access_v1,
1298};
1299
1300static const struct gnttab_ops gnttab_v2_ops = {
1301	.version			= 2,
1302	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1303					  sizeof(union grant_entry_v2),
1304	.map_frames			= gnttab_map_frames_v2,
1305	.unmap_frames			= gnttab_unmap_frames_v2,
1306	.update_entry			= gnttab_update_entry_v2,
1307	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v2,
1308	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v2,
1309	.query_foreign_access		= gnttab_query_foreign_access_v2,
1310};
1311
1312static bool gnttab_need_v2(void)
1313{
1314#ifdef CONFIG_X86
1315	uint32_t base, width;
1316
1317	if (xen_pv_domain()) {
1318		base = xen_cpuid_base();
1319		if (cpuid_eax(base) < 5)
1320			return false;	/* Information not available, use V1. */
1321		width = cpuid_ebx(base + 5) &
1322			XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1323		return width > 32 + PAGE_SHIFT;
1324	}
1325#endif
1326	return !!(max_possible_pfn >> 32);
1327}
1328
1329static void gnttab_request_version(void)
1330{
1331	long rc;
1332	struct gnttab_set_version gsv;
1333
1334	if (gnttab_need_v2())
1335		gsv.version = 2;
1336	else
1337		gsv.version = 1;
1338
1339	/* Boot parameter overrides automatic selection. */
1340	if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1341		gsv.version = xen_gnttab_version;
1342
1343	rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1344	if (rc == 0 && gsv.version == 2)
1345		gnttab_interface = &gnttab_v2_ops;
1346	else
1347		gnttab_interface = &gnttab_v1_ops;
1348	pr_info("Grant tables using version %d layout\n",
1349		gnttab_interface->version);
1350}
1351
1352static int gnttab_setup(void)
1353{
1354	unsigned int max_nr_gframes;
1355
1356	max_nr_gframes = gnttab_max_grant_frames();
1357	if (max_nr_gframes < nr_grant_frames)
1358		return -ENOSYS;
1359
1360	if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1361		gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1362		if (gnttab_shared.addr == NULL) {
1363			pr_warn("gnttab share frames is not mapped!\n");
1364			return -ENOMEM;
1365		}
1366	}
1367	return gnttab_map(0, nr_grant_frames - 1);
1368}
1369
1370int gnttab_resume(void)
1371{
1372	gnttab_request_version();
1373	return gnttab_setup();
1374}
1375
1376int gnttab_suspend(void)
1377{
1378	if (!xen_feature(XENFEAT_auto_translated_physmap))
1379		gnttab_interface->unmap_frames();
1380	return 0;
1381}
1382
1383static int gnttab_expand(unsigned int req_entries)
1384{
1385	int rc;
1386	unsigned int cur, extra;
1387
 
1388	cur = nr_grant_frames;
1389	extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1390		 gnttab_interface->grefs_per_grant_frame);
1391	if (cur + extra > gnttab_max_grant_frames()) {
1392		pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1393				    " cur=%u extra=%u limit=%u"
1394				    " gnttab_free_count=%u req_entries=%u\n",
1395				    cur, extra, gnttab_max_grant_frames(),
1396				    gnttab_free_count, req_entries);
1397		return -ENOSPC;
1398	}
1399
1400	rc = gnttab_map(cur, cur + extra - 1);
1401	if (rc == 0)
1402		rc = grow_gnttab_list(extra);
1403
1404	return rc;
1405}
1406
1407int gnttab_init(void)
1408{
1409	int i;
1410	unsigned long max_nr_grant_frames;
1411	unsigned int max_nr_glist_frames, nr_glist_frames;
1412	unsigned int nr_init_grefs;
1413	int ret;
1414
1415	gnttab_request_version();
1416	max_nr_grant_frames = gnttab_max_grant_frames();
1417	nr_grant_frames = 1;
1418
1419	/* Determine the maximum number of frames required for the
1420	 * grant reference free list on the current hypervisor.
1421	 */
 
1422	max_nr_glist_frames = (max_nr_grant_frames *
1423			       gnttab_interface->grefs_per_grant_frame / RPP);
1424
1425	gnttab_list = kmalloc_array(max_nr_glist_frames,
1426				    sizeof(grant_ref_t *),
1427				    GFP_KERNEL);
1428	if (gnttab_list == NULL)
1429		return -ENOMEM;
1430
1431	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1432	for (i = 0; i < nr_glist_frames; i++) {
1433		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1434		if (gnttab_list[i] == NULL) {
1435			ret = -ENOMEM;
1436			goto ini_nomem;
1437		}
1438	}
1439
1440	ret = arch_gnttab_init(max_nr_grant_frames,
1441			       nr_status_frames(max_nr_grant_frames));
1442	if (ret < 0)
1443		goto ini_nomem;
1444
1445	if (gnttab_setup() < 0) {
1446		ret = -ENODEV;
1447		goto ini_nomem;
1448	}
1449
1450	nr_init_grefs = nr_grant_frames *
1451			gnttab_interface->grefs_per_grant_frame;
1452
1453	for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1454		gnttab_entry(i) = i + 1;
1455
1456	gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
1457	gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
1458	gnttab_free_head  = NR_RESERVED_ENTRIES;
1459
1460	printk("Grant table initialized\n");
1461	return 0;
1462
1463 ini_nomem:
1464	for (i--; i >= 0; i--)
1465		free_page((unsigned long)gnttab_list[i]);
1466	kfree(gnttab_list);
1467	return ret;
1468}
1469EXPORT_SYMBOL_GPL(gnttab_init);
1470
1471static int __gnttab_init(void)
1472{
1473	if (!xen_domain())
1474		return -ENODEV;
1475
1476	/* Delay grant-table initialization in the PV on HVM case */
1477	if (xen_hvm_domain() && !xen_pvh_domain())
1478		return 0;
1479
1480	return gnttab_init();
1481}
1482/* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1483 * beforehand to initialize xen_auto_xlat_grant_frames. */
1484core_initcall_sync(__gnttab_init);
v5.4
   1/******************************************************************************
   2 * grant_table.c
   3 *
   4 * Granting foreign access to our memory reservation.
   5 *
   6 * Copyright (c) 2005-2006, Christopher Clark
   7 * Copyright (c) 2004-2005, K A Fraser
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License version 2
  11 * as published by the Free Software Foundation; or, when distributed
  12 * separately from the Linux kernel or incorporated into other
  13 * software packages, subject to the following license:
  14 *
  15 * Permission is hereby granted, free of charge, to any person obtaining a copy
  16 * of this source file (the "Software"), to deal in the Software without
  17 * restriction, including without limitation the rights to use, copy, modify,
  18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  19 * and to permit persons to whom the Software is furnished to do so, subject to
  20 * the following conditions:
  21 *
  22 * The above copyright notice and this permission notice shall be included in
  23 * all copies or substantial portions of the Software.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  31 * IN THE SOFTWARE.
  32 */
  33
  34#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  35
  36#include <linux/memblock.h>
  37#include <linux/sched.h>
  38#include <linux/mm.h>
  39#include <linux/slab.h>
  40#include <linux/vmalloc.h>
  41#include <linux/uaccess.h>
  42#include <linux/io.h>
  43#include <linux/delay.h>
  44#include <linux/hardirq.h>
  45#include <linux/workqueue.h>
  46#include <linux/ratelimit.h>
  47#include <linux/moduleparam.h>
  48#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
  49#include <linux/dma-mapping.h>
  50#endif
  51
  52#include <xen/xen.h>
  53#include <xen/interface/xen.h>
  54#include <xen/page.h>
  55#include <xen/grant_table.h>
  56#include <xen/interface/memory.h>
  57#include <xen/hvc-console.h>
  58#include <xen/swiotlb-xen.h>
  59#include <xen/balloon.h>
  60#ifdef CONFIG_X86
  61#include <asm/xen/cpuid.h>
  62#endif
  63#include <xen/mem-reservation.h>
  64#include <asm/xen/hypercall.h>
  65#include <asm/xen/interface.h>
  66
  67#include <asm/pgtable.h>
  68#include <asm/sync_bitops.h>
  69
  70/* External tools reserve first few grant table entries. */
  71#define NR_RESERVED_ENTRIES 8
  72#define GNTTAB_LIST_END 0xffffffff
  73
  74static grant_ref_t **gnttab_list;
  75static unsigned int nr_grant_frames;
  76static int gnttab_free_count;
  77static grant_ref_t gnttab_free_head;
  78static DEFINE_SPINLOCK(gnttab_list_lock);
  79struct grant_frames xen_auto_xlat_grant_frames;
  80static unsigned int xen_gnttab_version;
  81module_param_named(version, xen_gnttab_version, uint, 0);
  82
  83static union {
  84	struct grant_entry_v1 *v1;
  85	union grant_entry_v2 *v2;
  86	void *addr;
  87} gnttab_shared;
  88
  89/*This is a structure of function pointers for grant table*/
  90struct gnttab_ops {
  91	/*
  92	 * Version of the grant interface.
  93	 */
  94	unsigned int version;
  95	/*
  96	 * Grant refs per grant frame.
  97	 */
  98	unsigned int grefs_per_grant_frame;
  99	/*
 100	 * Mapping a list of frames for storing grant entries. Frames parameter
 101	 * is used to store grant table address when grant table being setup,
 102	 * nr_gframes is the number of frames to map grant table. Returning
 103	 * GNTST_okay means success and negative value means failure.
 104	 */
 105	int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
 106	/*
 107	 * Release a list of frames which are mapped in map_frames for grant
 108	 * entry status.
 109	 */
 110	void (*unmap_frames)(void);
 111	/*
 112	 * Introducing a valid entry into the grant table, granting the frame of
 113	 * this grant entry to domain for accessing or transfering. Ref
 114	 * parameter is reference of this introduced grant entry, domid is id of
 115	 * granted domain, frame is the page frame to be granted, and flags is
 116	 * status of the grant entry to be updated.
 117	 */
 118	void (*update_entry)(grant_ref_t ref, domid_t domid,
 119			     unsigned long frame, unsigned flags);
 120	/*
 121	 * Stop granting a grant entry to domain for accessing. Ref parameter is
 122	 * reference of a grant entry whose grant access will be stopped,
 123	 * readonly is not in use in this function. If the grant entry is
 124	 * currently mapped for reading or writing, just return failure(==0)
 125	 * directly and don't tear down the grant access. Otherwise, stop grant
 126	 * access for this entry and return success(==1).
 127	 */
 128	int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
 129	/*
 130	 * Stop granting a grant entry to domain for transfer. Ref parameter is
 131	 * reference of a grant entry whose grant transfer will be stopped. If
 132	 * tranfer has not started, just reclaim the grant entry and return
 133	 * failure(==0). Otherwise, wait for the transfer to complete and then
 134	 * return the frame.
 135	 */
 136	unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
 137	/*
 138	 * Query the status of a grant entry. Ref parameter is reference of
 139	 * queried grant entry, return value is the status of queried entry.
 140	 * Detailed status(writing/reading) can be gotten from the return value
 141	 * by bit operations.
 142	 */
 143	int (*query_foreign_access)(grant_ref_t ref);
 144};
 145
 146struct unmap_refs_callback_data {
 147	struct completion completion;
 148	int result;
 149};
 150
 151static const struct gnttab_ops *gnttab_interface;
 152
 153/* This reflects status of grant entries, so act as a global value. */
 154static grant_status_t *grstatus;
 155
 156static struct gnttab_free_callback *gnttab_free_callback_list;
 157
 158static int gnttab_expand(unsigned int req_entries);
 159
 160#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
 161#define SPP (PAGE_SIZE / sizeof(grant_status_t))
 162
 163static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
 164{
 165	return &gnttab_list[(entry) / RPP][(entry) % RPP];
 166}
 167/* This can be used as an l-value */
 168#define gnttab_entry(entry) (*__gnttab_entry(entry))
 169
 170static int get_free_entries(unsigned count)
 171{
 172	unsigned long flags;
 173	int ref, rc = 0;
 174	grant_ref_t head;
 175
 176	spin_lock_irqsave(&gnttab_list_lock, flags);
 177
 178	if ((gnttab_free_count < count) &&
 179	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
 180		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 181		return rc;
 182	}
 183
 184	ref = head = gnttab_free_head;
 185	gnttab_free_count -= count;
 186	while (count-- > 1)
 187		head = gnttab_entry(head);
 188	gnttab_free_head = gnttab_entry(head);
 189	gnttab_entry(head) = GNTTAB_LIST_END;
 190
 191	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 192
 193	return ref;
 194}
 195
 196static void do_free_callbacks(void)
 197{
 198	struct gnttab_free_callback *callback, *next;
 199
 200	callback = gnttab_free_callback_list;
 201	gnttab_free_callback_list = NULL;
 202
 203	while (callback != NULL) {
 204		next = callback->next;
 205		if (gnttab_free_count >= callback->count) {
 206			callback->next = NULL;
 207			callback->fn(callback->arg);
 208		} else {
 209			callback->next = gnttab_free_callback_list;
 210			gnttab_free_callback_list = callback;
 211		}
 212		callback = next;
 213	}
 214}
 215
 216static inline void check_free_callbacks(void)
 217{
 218	if (unlikely(gnttab_free_callback_list))
 219		do_free_callbacks();
 220}
 221
 222static void put_free_entry(grant_ref_t ref)
 223{
 224	unsigned long flags;
 225	spin_lock_irqsave(&gnttab_list_lock, flags);
 226	gnttab_entry(ref) = gnttab_free_head;
 227	gnttab_free_head = ref;
 228	gnttab_free_count++;
 229	check_free_callbacks();
 230	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 231}
 232
 233/*
 234 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
 235 * Introducing a valid entry into the grant table:
 236 *  1. Write ent->domid.
 237 *  2. Write ent->frame:
 238 *      GTF_permit_access:   Frame to which access is permitted.
 239 *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
 240 *                           frame, or zero if none.
 241 *  3. Write memory barrier (WMB).
 242 *  4. Write ent->flags, inc. valid type.
 243 */
 244static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
 245				   unsigned long frame, unsigned flags)
 246{
 247	gnttab_shared.v1[ref].domid = domid;
 248	gnttab_shared.v1[ref].frame = frame;
 249	wmb();
 250	gnttab_shared.v1[ref].flags = flags;
 251}
 252
 253static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
 254				   unsigned long frame, unsigned int flags)
 255{
 256	gnttab_shared.v2[ref].hdr.domid = domid;
 257	gnttab_shared.v2[ref].full_page.frame = frame;
 258	wmb();	/* Hypervisor concurrent accesses. */
 259	gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
 260}
 261
 262/*
 263 * Public grant-issuing interface functions
 264 */
 265void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
 266				     unsigned long frame, int readonly)
 267{
 268	gnttab_interface->update_entry(ref, domid, frame,
 269			   GTF_permit_access | (readonly ? GTF_readonly : 0));
 270}
 271EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
 272
 273int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
 274				int readonly)
 275{
 276	int ref;
 277
 278	ref = get_free_entries(1);
 279	if (unlikely(ref < 0))
 280		return -ENOSPC;
 281
 282	gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
 283
 284	return ref;
 285}
 286EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
 287
 288static int gnttab_query_foreign_access_v1(grant_ref_t ref)
 289{
 290	return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
 291}
 292
 293static int gnttab_query_foreign_access_v2(grant_ref_t ref)
 294{
 295	return grstatus[ref] & (GTF_reading|GTF_writing);
 296}
 297
 298int gnttab_query_foreign_access(grant_ref_t ref)
 299{
 300	return gnttab_interface->query_foreign_access(ref);
 301}
 302EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
 303
 304static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
 305{
 306	u16 flags, nflags;
 307	u16 *pflags;
 308
 309	pflags = &gnttab_shared.v1[ref].flags;
 310	nflags = *pflags;
 311	do {
 312		flags = nflags;
 313		if (flags & (GTF_reading|GTF_writing))
 314			return 0;
 315	} while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
 316
 317	return 1;
 318}
 319
 320static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
 321{
 322	gnttab_shared.v2[ref].hdr.flags = 0;
 323	mb();	/* Concurrent access by hypervisor. */
 324	if (grstatus[ref] & (GTF_reading|GTF_writing)) {
 325		return 0;
 326	} else {
 327		/*
 328		 * The read of grstatus needs to have acquire semantics.
 329		 *  On x86, reads already have that, and we just need to
 330		 * protect against compiler reorderings.
 331		 * On other architectures we may need a full barrier.
 332		 */
 333#ifdef CONFIG_X86
 334		barrier();
 335#else
 336		mb();
 337#endif
 338	}
 339
 340	return 1;
 341}
 342
 343static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
 344{
 345	return gnttab_interface->end_foreign_access_ref(ref, readonly);
 346}
 347
 348int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
 349{
 350	if (_gnttab_end_foreign_access_ref(ref, readonly))
 351		return 1;
 352	pr_warn("WARNING: g.e. %#x still in use!\n", ref);
 353	return 0;
 354}
 355EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
 356
 357struct deferred_entry {
 358	struct list_head list;
 359	grant_ref_t ref;
 360	bool ro;
 361	uint16_t warn_delay;
 362	struct page *page;
 363};
 364static LIST_HEAD(deferred_list);
 365static void gnttab_handle_deferred(struct timer_list *);
 366static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
 367
 368static void gnttab_handle_deferred(struct timer_list *unused)
 369{
 370	unsigned int nr = 10;
 371	struct deferred_entry *first = NULL;
 372	unsigned long flags;
 373
 374	spin_lock_irqsave(&gnttab_list_lock, flags);
 375	while (nr--) {
 376		struct deferred_entry *entry
 377			= list_first_entry(&deferred_list,
 378					   struct deferred_entry, list);
 379
 380		if (entry == first)
 381			break;
 382		list_del(&entry->list);
 383		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 384		if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
 385			put_free_entry(entry->ref);
 386			if (entry->page) {
 387				pr_debug("freeing g.e. %#x (pfn %#lx)\n",
 388					 entry->ref, page_to_pfn(entry->page));
 389				put_page(entry->page);
 390			} else
 391				pr_info("freeing g.e. %#x\n", entry->ref);
 392			kfree(entry);
 393			entry = NULL;
 394		} else {
 395			if (!--entry->warn_delay)
 396				pr_info("g.e. %#x still pending\n", entry->ref);
 397			if (!first)
 398				first = entry;
 399		}
 400		spin_lock_irqsave(&gnttab_list_lock, flags);
 401		if (entry)
 402			list_add_tail(&entry->list, &deferred_list);
 403		else if (list_empty(&deferred_list))
 404			break;
 405	}
 406	if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
 407		deferred_timer.expires = jiffies + HZ;
 408		add_timer(&deferred_timer);
 409	}
 410	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 411}
 412
 413static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
 414				struct page *page)
 415{
 416	struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
 417	const char *what = KERN_WARNING "leaking";
 418
 419	if (entry) {
 420		unsigned long flags;
 421
 422		entry->ref = ref;
 423		entry->ro = readonly;
 424		entry->page = page;
 425		entry->warn_delay = 60;
 426		spin_lock_irqsave(&gnttab_list_lock, flags);
 427		list_add_tail(&entry->list, &deferred_list);
 428		if (!timer_pending(&deferred_timer)) {
 429			deferred_timer.expires = jiffies + HZ;
 430			add_timer(&deferred_timer);
 431		}
 432		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 433		what = KERN_DEBUG "deferring";
 434	}
 435	printk("%s g.e. %#x (pfn %#lx)\n",
 436	       what, ref, page ? page_to_pfn(page) : -1);
 437}
 438
 439void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
 440			       unsigned long page)
 441{
 442	if (gnttab_end_foreign_access_ref(ref, readonly)) {
 443		put_free_entry(ref);
 444		if (page != 0)
 445			put_page(virt_to_page(page));
 446	} else
 447		gnttab_add_deferred(ref, readonly,
 448				    page ? virt_to_page(page) : NULL);
 449}
 450EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
 451
 452int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
 453{
 454	int ref;
 455
 456	ref = get_free_entries(1);
 457	if (unlikely(ref < 0))
 458		return -ENOSPC;
 459	gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
 460
 461	return ref;
 462}
 463EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
 464
 465void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
 466				       unsigned long pfn)
 467{
 468	gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
 469}
 470EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
 471
 472static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
 473{
 474	unsigned long frame;
 475	u16           flags;
 476	u16          *pflags;
 477
 478	pflags = &gnttab_shared.v1[ref].flags;
 479
 480	/*
 481	 * If a transfer is not even yet started, try to reclaim the grant
 482	 * reference and return failure (== 0).
 483	 */
 484	while (!((flags = *pflags) & GTF_transfer_committed)) {
 485		if (sync_cmpxchg(pflags, flags, 0) == flags)
 486			return 0;
 487		cpu_relax();
 488	}
 489
 490	/* If a transfer is in progress then wait until it is completed. */
 491	while (!(flags & GTF_transfer_completed)) {
 492		flags = *pflags;
 493		cpu_relax();
 494	}
 495
 496	rmb();	/* Read the frame number /after/ reading completion status. */
 497	frame = gnttab_shared.v1[ref].frame;
 498	BUG_ON(frame == 0);
 499
 500	return frame;
 501}
 502
 503static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
 504{
 505	unsigned long frame;
 506	u16           flags;
 507	u16          *pflags;
 508
 509	pflags = &gnttab_shared.v2[ref].hdr.flags;
 510
 511	/*
 512	 * If a transfer is not even yet started, try to reclaim the grant
 513	 * reference and return failure (== 0).
 514	 */
 515	while (!((flags = *pflags) & GTF_transfer_committed)) {
 516		if (sync_cmpxchg(pflags, flags, 0) == flags)
 517			return 0;
 518		cpu_relax();
 519	}
 520
 521	/* If a transfer is in progress then wait until it is completed. */
 522	while (!(flags & GTF_transfer_completed)) {
 523		flags = *pflags;
 524		cpu_relax();
 525	}
 526
 527	rmb();  /* Read the frame number /after/ reading completion status. */
 528	frame = gnttab_shared.v2[ref].full_page.frame;
 529	BUG_ON(frame == 0);
 530
 531	return frame;
 532}
 533
 534unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
 535{
 536	return gnttab_interface->end_foreign_transfer_ref(ref);
 537}
 538EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
 539
 540unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
 541{
 542	unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
 543	put_free_entry(ref);
 544	return frame;
 545}
 546EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
 547
 548void gnttab_free_grant_reference(grant_ref_t ref)
 549{
 550	put_free_entry(ref);
 551}
 552EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
 553
 554void gnttab_free_grant_references(grant_ref_t head)
 555{
 556	grant_ref_t ref;
 557	unsigned long flags;
 558	int count = 1;
 559	if (head == GNTTAB_LIST_END)
 560		return;
 561	spin_lock_irqsave(&gnttab_list_lock, flags);
 562	ref = head;
 563	while (gnttab_entry(ref) != GNTTAB_LIST_END) {
 564		ref = gnttab_entry(ref);
 565		count++;
 566	}
 567	gnttab_entry(ref) = gnttab_free_head;
 568	gnttab_free_head = head;
 569	gnttab_free_count += count;
 570	check_free_callbacks();
 571	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 572}
 573EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
 574
 575int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
 576{
 577	int h = get_free_entries(count);
 578
 579	if (h < 0)
 580		return -ENOSPC;
 581
 582	*head = h;
 583
 584	return 0;
 585}
 586EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
 587
 588int gnttab_empty_grant_references(const grant_ref_t *private_head)
 589{
 590	return (*private_head == GNTTAB_LIST_END);
 591}
 592EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
 593
 594int gnttab_claim_grant_reference(grant_ref_t *private_head)
 595{
 596	grant_ref_t g = *private_head;
 597	if (unlikely(g == GNTTAB_LIST_END))
 598		return -ENOSPC;
 599	*private_head = gnttab_entry(g);
 600	return g;
 601}
 602EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
 603
 604void gnttab_release_grant_reference(grant_ref_t *private_head,
 605				    grant_ref_t release)
 606{
 607	gnttab_entry(release) = *private_head;
 608	*private_head = release;
 609}
 610EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
 611
 612void gnttab_request_free_callback(struct gnttab_free_callback *callback,
 613				  void (*fn)(void *), void *arg, u16 count)
 614{
 615	unsigned long flags;
 616	struct gnttab_free_callback *cb;
 617
 618	spin_lock_irqsave(&gnttab_list_lock, flags);
 619
 620	/* Check if the callback is already on the list */
 621	cb = gnttab_free_callback_list;
 622	while (cb) {
 623		if (cb == callback)
 624			goto out;
 625		cb = cb->next;
 626	}
 627
 628	callback->fn = fn;
 629	callback->arg = arg;
 630	callback->count = count;
 631	callback->next = gnttab_free_callback_list;
 632	gnttab_free_callback_list = callback;
 633	check_free_callbacks();
 634out:
 635	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 636}
 637EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
 638
 639void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
 640{
 641	struct gnttab_free_callback **pcb;
 642	unsigned long flags;
 643
 644	spin_lock_irqsave(&gnttab_list_lock, flags);
 645	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
 646		if (*pcb == callback) {
 647			*pcb = callback->next;
 648			break;
 649		}
 650	}
 651	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 652}
 653EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
 654
 655static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
 656{
 657	return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
 658	       align;
 659}
 660
 661static int grow_gnttab_list(unsigned int more_frames)
 662{
 663	unsigned int new_nr_grant_frames, extra_entries, i;
 664	unsigned int nr_glist_frames, new_nr_glist_frames;
 665	unsigned int grefs_per_frame;
 666
 667	BUG_ON(gnttab_interface == NULL);
 668	grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
 669
 670	new_nr_grant_frames = nr_grant_frames + more_frames;
 671	extra_entries = more_frames * grefs_per_frame;
 672
 673	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
 674	new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
 675	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
 676		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
 677		if (!gnttab_list[i])
 678			goto grow_nomem;
 679	}
 680
 681
 682	for (i = grefs_per_frame * nr_grant_frames;
 683	     i < grefs_per_frame * new_nr_grant_frames - 1; i++)
 684		gnttab_entry(i) = i + 1;
 685
 686	gnttab_entry(i) = gnttab_free_head;
 687	gnttab_free_head = grefs_per_frame * nr_grant_frames;
 688	gnttab_free_count += extra_entries;
 689
 690	nr_grant_frames = new_nr_grant_frames;
 691
 692	check_free_callbacks();
 693
 694	return 0;
 695
 696grow_nomem:
 697	while (i-- > nr_glist_frames)
 698		free_page((unsigned long) gnttab_list[i]);
 699	return -ENOMEM;
 700}
 701
 702static unsigned int __max_nr_grant_frames(void)
 703{
 704	struct gnttab_query_size query;
 705	int rc;
 706
 707	query.dom = DOMID_SELF;
 708
 709	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
 710	if ((rc < 0) || (query.status != GNTST_okay))
 711		return 4; /* Legacy max supported number of frames */
 712
 713	return query.max_nr_frames;
 714}
 715
 716unsigned int gnttab_max_grant_frames(void)
 717{
 718	unsigned int xen_max = __max_nr_grant_frames();
 719	static unsigned int boot_max_nr_grant_frames;
 720
 721	/* First time, initialize it properly. */
 722	if (!boot_max_nr_grant_frames)
 723		boot_max_nr_grant_frames = __max_nr_grant_frames();
 724
 725	if (xen_max > boot_max_nr_grant_frames)
 726		return boot_max_nr_grant_frames;
 727	return xen_max;
 728}
 729EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
 730
 731int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
 732{
 733	xen_pfn_t *pfn;
 734	unsigned int max_nr_gframes = __max_nr_grant_frames();
 735	unsigned int i;
 736	void *vaddr;
 737
 738	if (xen_auto_xlat_grant_frames.count)
 739		return -EINVAL;
 740
 741	vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
 742	if (vaddr == NULL) {
 743		pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
 744			&addr);
 745		return -ENOMEM;
 746	}
 747	pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
 748	if (!pfn) {
 749		xen_unmap(vaddr);
 750		return -ENOMEM;
 751	}
 752	for (i = 0; i < max_nr_gframes; i++)
 753		pfn[i] = XEN_PFN_DOWN(addr) + i;
 754
 755	xen_auto_xlat_grant_frames.vaddr = vaddr;
 756	xen_auto_xlat_grant_frames.pfn = pfn;
 757	xen_auto_xlat_grant_frames.count = max_nr_gframes;
 758
 759	return 0;
 760}
 761EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
 762
 763void gnttab_free_auto_xlat_frames(void)
 764{
 765	if (!xen_auto_xlat_grant_frames.count)
 766		return;
 767	kfree(xen_auto_xlat_grant_frames.pfn);
 768	xen_unmap(xen_auto_xlat_grant_frames.vaddr);
 769
 770	xen_auto_xlat_grant_frames.pfn = NULL;
 771	xen_auto_xlat_grant_frames.count = 0;
 772	xen_auto_xlat_grant_frames.vaddr = NULL;
 773}
 774EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
 775
 776int gnttab_pages_set_private(int nr_pages, struct page **pages)
 777{
 778	int i;
 779
 780	for (i = 0; i < nr_pages; i++) {
 781#if BITS_PER_LONG < 64
 782		struct xen_page_foreign *foreign;
 783
 784		foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
 785		if (!foreign)
 786			return -ENOMEM;
 787
 788		set_page_private(pages[i], (unsigned long)foreign);
 789#endif
 790		SetPagePrivate(pages[i]);
 791	}
 792
 793	return 0;
 794}
 795EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
 796
 797/**
 798 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
 799 * @nr_pages: number of pages to alloc
 800 * @pages: returns the pages
 801 */
 802int gnttab_alloc_pages(int nr_pages, struct page **pages)
 803{
 804	int ret;
 805
 806	ret = alloc_xenballooned_pages(nr_pages, pages);
 807	if (ret < 0)
 808		return ret;
 809
 810	ret = gnttab_pages_set_private(nr_pages, pages);
 811	if (ret < 0)
 812		gnttab_free_pages(nr_pages, pages);
 813
 814	return ret;
 815}
 816EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
 817
 818void gnttab_pages_clear_private(int nr_pages, struct page **pages)
 819{
 820	int i;
 821
 822	for (i = 0; i < nr_pages; i++) {
 823		if (PagePrivate(pages[i])) {
 824#if BITS_PER_LONG < 64
 825			kfree((void *)page_private(pages[i]));
 826#endif
 827			ClearPagePrivate(pages[i]);
 828		}
 829	}
 830}
 831EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
 832
 833/**
 834 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
 835 * @nr_pages; number of pages to free
 836 * @pages: the pages
 837 */
 838void gnttab_free_pages(int nr_pages, struct page **pages)
 839{
 840	gnttab_pages_clear_private(nr_pages, pages);
 841	free_xenballooned_pages(nr_pages, pages);
 842}
 843EXPORT_SYMBOL_GPL(gnttab_free_pages);
 844
 845#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 846/**
 847 * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
 848 * @args: arguments to the function
 849 */
 850int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
 851{
 852	unsigned long pfn, start_pfn;
 853	size_t size;
 854	int i, ret;
 855
 856	size = args->nr_pages << PAGE_SHIFT;
 857	if (args->coherent)
 858		args->vaddr = dma_alloc_coherent(args->dev, size,
 859						 &args->dev_bus_addr,
 860						 GFP_KERNEL | __GFP_NOWARN);
 861	else
 862		args->vaddr = dma_alloc_wc(args->dev, size,
 863					   &args->dev_bus_addr,
 864					   GFP_KERNEL | __GFP_NOWARN);
 865	if (!args->vaddr) {
 866		pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
 867		return -ENOMEM;
 868	}
 869
 870	start_pfn = __phys_to_pfn(args->dev_bus_addr);
 871	for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
 872			pfn++, i++) {
 873		struct page *page = pfn_to_page(pfn);
 874
 875		args->pages[i] = page;
 876		args->frames[i] = xen_page_to_gfn(page);
 877		xenmem_reservation_scrub_page(page);
 878	}
 879
 880	xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
 881
 882	ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
 883	if (ret != args->nr_pages) {
 884		pr_debug("Failed to decrease reservation for DMA buffer\n");
 885		ret = -EFAULT;
 886		goto fail;
 887	}
 888
 889	ret = gnttab_pages_set_private(args->nr_pages, args->pages);
 890	if (ret < 0)
 891		goto fail;
 892
 893	return 0;
 894
 895fail:
 896	gnttab_dma_free_pages(args);
 897	return ret;
 898}
 899EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
 900
 901/**
 902 * gnttab_dma_free_pages - free DMAable pages
 903 * @args: arguments to the function
 904 */
 905int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
 906{
 907	size_t size;
 908	int i, ret;
 909
 910	gnttab_pages_clear_private(args->nr_pages, args->pages);
 911
 912	for (i = 0; i < args->nr_pages; i++)
 913		args->frames[i] = page_to_xen_pfn(args->pages[i]);
 914
 915	ret = xenmem_reservation_increase(args->nr_pages, args->frames);
 916	if (ret != args->nr_pages) {
 917		pr_debug("Failed to increase reservation for DMA buffer\n");
 918		ret = -EFAULT;
 919	} else {
 920		ret = 0;
 921	}
 922
 923	xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
 924					     args->frames);
 925
 926	size = args->nr_pages << PAGE_SHIFT;
 927	if (args->coherent)
 928		dma_free_coherent(args->dev, size,
 929				  args->vaddr, args->dev_bus_addr);
 930	else
 931		dma_free_wc(args->dev, size,
 932			    args->vaddr, args->dev_bus_addr);
 933	return ret;
 934}
 935EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
 936#endif
 937
 938/* Handling of paged out grant targets (GNTST_eagain) */
 939#define MAX_DELAY 256
 940static inline void
 941gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
 942						const char *func)
 943{
 944	unsigned delay = 1;
 945
 946	do {
 947		BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
 948		if (*status == GNTST_eagain)
 949			msleep(delay++);
 950	} while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
 951
 952	if (delay >= MAX_DELAY) {
 953		pr_err("%s: %s eagain grant\n", func, current->comm);
 954		*status = GNTST_bad_page;
 955	}
 956}
 957
 958void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
 959{
 960	struct gnttab_map_grant_ref *op;
 961
 962	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
 963		BUG();
 964	for (op = batch; op < batch + count; op++)
 965		if (op->status == GNTST_eagain)
 966			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
 967						&op->status, __func__);
 968}
 969EXPORT_SYMBOL_GPL(gnttab_batch_map);
 970
 971void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
 972{
 973	struct gnttab_copy *op;
 974
 975	if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
 976		BUG();
 977	for (op = batch; op < batch + count; op++)
 978		if (op->status == GNTST_eagain)
 979			gnttab_retry_eagain_gop(GNTTABOP_copy, op,
 980						&op->status, __func__);
 981}
 982EXPORT_SYMBOL_GPL(gnttab_batch_copy);
 983
 984void gnttab_foreach_grant_in_range(struct page *page,
 985				   unsigned int offset,
 986				   unsigned int len,
 987				   xen_grant_fn_t fn,
 988				   void *data)
 989{
 990	unsigned int goffset;
 991	unsigned int glen;
 992	unsigned long xen_pfn;
 993
 994	len = min_t(unsigned int, PAGE_SIZE - offset, len);
 995	goffset = xen_offset_in_page(offset);
 996
 997	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
 998
 999	while (len) {
1000		glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
1001		fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1002
1003		goffset = 0;
1004		xen_pfn++;
1005		len -= glen;
1006	}
1007}
1008EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1009
1010void gnttab_foreach_grant(struct page **pages,
1011			  unsigned int nr_grefs,
1012			  xen_grant_fn_t fn,
1013			  void *data)
1014{
1015	unsigned int goffset = 0;
1016	unsigned long xen_pfn = 0;
1017	unsigned int i;
1018
1019	for (i = 0; i < nr_grefs; i++) {
1020		if ((i % XEN_PFN_PER_PAGE) == 0) {
1021			xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1022			goffset = 0;
1023		}
1024
1025		fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1026
1027		goffset += XEN_PAGE_SIZE;
1028		xen_pfn++;
1029	}
1030}
1031
1032int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1033		    struct gnttab_map_grant_ref *kmap_ops,
1034		    struct page **pages, unsigned int count)
1035{
1036	int i, ret;
1037
1038	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1039	if (ret)
1040		return ret;
1041
1042	for (i = 0; i < count; i++) {
1043		switch (map_ops[i].status) {
1044		case GNTST_okay:
1045		{
1046			struct xen_page_foreign *foreign;
1047
1048			SetPageForeign(pages[i]);
1049			foreign = xen_page_foreign(pages[i]);
1050			foreign->domid = map_ops[i].dom;
1051			foreign->gref = map_ops[i].ref;
1052			break;
1053		}
1054
1055		case GNTST_no_device_space:
1056			pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1057			break;
1058
1059		case GNTST_eagain:
1060			/* Retry eagain maps */
1061			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1062						map_ops + i,
1063						&map_ops[i].status, __func__);
1064			/* Test status in next loop iteration. */
1065			i--;
1066			break;
1067
1068		default:
1069			break;
1070		}
1071	}
1072
1073	return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1074}
1075EXPORT_SYMBOL_GPL(gnttab_map_refs);
1076
1077int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1078		      struct gnttab_unmap_grant_ref *kunmap_ops,
1079		      struct page **pages, unsigned int count)
1080{
1081	unsigned int i;
1082	int ret;
1083
1084	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1085	if (ret)
1086		return ret;
1087
1088	for (i = 0; i < count; i++)
1089		ClearPageForeign(pages[i]);
1090
1091	return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1092}
1093EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1094
1095#define GNTTAB_UNMAP_REFS_DELAY 5
1096
1097static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1098
1099static void gnttab_unmap_work(struct work_struct *work)
1100{
1101	struct gntab_unmap_queue_data
1102		*unmap_data = container_of(work, 
1103					   struct gntab_unmap_queue_data,
1104					   gnttab_work.work);
1105	if (unmap_data->age != UINT_MAX)
1106		unmap_data->age++;
1107	__gnttab_unmap_refs_async(unmap_data);
1108}
1109
1110static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1111{
1112	int ret;
1113	int pc;
1114
1115	for (pc = 0; pc < item->count; pc++) {
1116		if (page_count(item->pages[pc]) > 1) {
1117			unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1118			schedule_delayed_work(&item->gnttab_work,
1119					      msecs_to_jiffies(delay));
1120			return;
1121		}
1122	}
1123
1124	ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1125				item->pages, item->count);
1126	item->done(ret, item);
1127}
1128
1129void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1130{
1131	INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1132	item->age = 0;
1133
1134	__gnttab_unmap_refs_async(item);
1135}
1136EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1137
1138static void unmap_refs_callback(int result,
1139		struct gntab_unmap_queue_data *data)
1140{
1141	struct unmap_refs_callback_data *d = data->data;
1142
1143	d->result = result;
1144	complete(&d->completion);
1145}
1146
1147int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1148{
1149	struct unmap_refs_callback_data data;
1150
1151	init_completion(&data.completion);
1152	item->data = &data;
1153	item->done = &unmap_refs_callback;
1154	gnttab_unmap_refs_async(item);
1155	wait_for_completion(&data.completion);
1156
1157	return data.result;
1158}
1159EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1160
1161static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1162{
1163	BUG_ON(gnttab_interface == NULL);
1164	return gnttab_frames(nr_grant_frames, SPP);
1165}
1166
1167static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1168{
1169	int rc;
1170
1171	rc = arch_gnttab_map_shared(frames, nr_gframes,
1172				    gnttab_max_grant_frames(),
1173				    &gnttab_shared.addr);
1174	BUG_ON(rc);
1175
1176	return 0;
1177}
1178
1179static void gnttab_unmap_frames_v1(void)
1180{
1181	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1182}
1183
1184static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1185{
1186	uint64_t *sframes;
1187	unsigned int nr_sframes;
1188	struct gnttab_get_status_frames getframes;
1189	int rc;
1190
1191	nr_sframes = nr_status_frames(nr_gframes);
1192
1193	/* No need for kzalloc as it is initialized in following hypercall
1194	 * GNTTABOP_get_status_frames.
1195	 */
1196	sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1197	if (!sframes)
1198		return -ENOMEM;
1199
1200	getframes.dom        = DOMID_SELF;
1201	getframes.nr_frames  = nr_sframes;
1202	set_xen_guest_handle(getframes.frame_list, sframes);
1203
1204	rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1205				       &getframes, 1);
1206	if (rc == -ENOSYS) {
1207		kfree(sframes);
1208		return -ENOSYS;
1209	}
1210
1211	BUG_ON(rc || getframes.status);
1212
1213	rc = arch_gnttab_map_status(sframes, nr_sframes,
1214				    nr_status_frames(gnttab_max_grant_frames()),
1215				    &grstatus);
1216	BUG_ON(rc);
1217	kfree(sframes);
1218
1219	rc = arch_gnttab_map_shared(frames, nr_gframes,
1220				    gnttab_max_grant_frames(),
1221				    &gnttab_shared.addr);
1222	BUG_ON(rc);
1223
1224	return 0;
1225}
1226
1227static void gnttab_unmap_frames_v2(void)
1228{
1229	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1230	arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1231}
1232
1233static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1234{
1235	struct gnttab_setup_table setup;
1236	xen_pfn_t *frames;
1237	unsigned int nr_gframes = end_idx + 1;
1238	int rc;
1239
1240	if (xen_feature(XENFEAT_auto_translated_physmap)) {
1241		struct xen_add_to_physmap xatp;
1242		unsigned int i = end_idx;
1243		rc = 0;
1244		BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1245		/*
1246		 * Loop backwards, so that the first hypercall has the largest
1247		 * index, ensuring that the table will grow only once.
1248		 */
1249		do {
1250			xatp.domid = DOMID_SELF;
1251			xatp.idx = i;
1252			xatp.space = XENMAPSPACE_grant_table;
1253			xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1254			rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1255			if (rc != 0) {
1256				pr_warn("grant table add_to_physmap failed, err=%d\n",
1257					rc);
1258				break;
1259			}
1260		} while (i-- > start_idx);
1261
1262		return rc;
1263	}
1264
1265	/* No need for kzalloc as it is initialized in following hypercall
1266	 * GNTTABOP_setup_table.
1267	 */
1268	frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1269	if (!frames)
1270		return -ENOMEM;
1271
1272	setup.dom        = DOMID_SELF;
1273	setup.nr_frames  = nr_gframes;
1274	set_xen_guest_handle(setup.frame_list, frames);
1275
1276	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1277	if (rc == -ENOSYS) {
1278		kfree(frames);
1279		return -ENOSYS;
1280	}
1281
1282	BUG_ON(rc || setup.status);
1283
1284	rc = gnttab_interface->map_frames(frames, nr_gframes);
1285
1286	kfree(frames);
1287
1288	return rc;
1289}
1290
1291static const struct gnttab_ops gnttab_v1_ops = {
1292	.version			= 1,
1293	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1294					  sizeof(struct grant_entry_v1),
1295	.map_frames			= gnttab_map_frames_v1,
1296	.unmap_frames			= gnttab_unmap_frames_v1,
1297	.update_entry			= gnttab_update_entry_v1,
1298	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v1,
1299	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v1,
1300	.query_foreign_access		= gnttab_query_foreign_access_v1,
1301};
1302
1303static const struct gnttab_ops gnttab_v2_ops = {
1304	.version			= 2,
1305	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1306					  sizeof(union grant_entry_v2),
1307	.map_frames			= gnttab_map_frames_v2,
1308	.unmap_frames			= gnttab_unmap_frames_v2,
1309	.update_entry			= gnttab_update_entry_v2,
1310	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v2,
1311	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v2,
1312	.query_foreign_access		= gnttab_query_foreign_access_v2,
1313};
1314
1315static bool gnttab_need_v2(void)
1316{
1317#ifdef CONFIG_X86
1318	uint32_t base, width;
1319
1320	if (xen_pv_domain()) {
1321		base = xen_cpuid_base();
1322		if (cpuid_eax(base) < 5)
1323			return false;	/* Information not available, use V1. */
1324		width = cpuid_ebx(base + 5) &
1325			XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1326		return width > 32 + PAGE_SHIFT;
1327	}
1328#endif
1329	return !!(max_possible_pfn >> 32);
1330}
1331
1332static void gnttab_request_version(void)
1333{
1334	long rc;
1335	struct gnttab_set_version gsv;
1336
1337	if (gnttab_need_v2())
1338		gsv.version = 2;
1339	else
1340		gsv.version = 1;
1341
1342	/* Boot parameter overrides automatic selection. */
1343	if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1344		gsv.version = xen_gnttab_version;
1345
1346	rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1347	if (rc == 0 && gsv.version == 2)
1348		gnttab_interface = &gnttab_v2_ops;
1349	else
1350		gnttab_interface = &gnttab_v1_ops;
1351	pr_info("Grant tables using version %d layout\n",
1352		gnttab_interface->version);
1353}
1354
1355static int gnttab_setup(void)
1356{
1357	unsigned int max_nr_gframes;
1358
1359	max_nr_gframes = gnttab_max_grant_frames();
1360	if (max_nr_gframes < nr_grant_frames)
1361		return -ENOSYS;
1362
1363	if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1364		gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1365		if (gnttab_shared.addr == NULL) {
1366			pr_warn("gnttab share frames is not mapped!\n");
1367			return -ENOMEM;
1368		}
1369	}
1370	return gnttab_map(0, nr_grant_frames - 1);
1371}
1372
1373int gnttab_resume(void)
1374{
1375	gnttab_request_version();
1376	return gnttab_setup();
1377}
1378
1379int gnttab_suspend(void)
1380{
1381	if (!xen_feature(XENFEAT_auto_translated_physmap))
1382		gnttab_interface->unmap_frames();
1383	return 0;
1384}
1385
1386static int gnttab_expand(unsigned int req_entries)
1387{
1388	int rc;
1389	unsigned int cur, extra;
1390
1391	BUG_ON(gnttab_interface == NULL);
1392	cur = nr_grant_frames;
1393	extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1394		 gnttab_interface->grefs_per_grant_frame);
1395	if (cur + extra > gnttab_max_grant_frames()) {
1396		pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1397				    " cur=%u extra=%u limit=%u"
1398				    " gnttab_free_count=%u req_entries=%u\n",
1399				    cur, extra, gnttab_max_grant_frames(),
1400				    gnttab_free_count, req_entries);
1401		return -ENOSPC;
1402	}
1403
1404	rc = gnttab_map(cur, cur + extra - 1);
1405	if (rc == 0)
1406		rc = grow_gnttab_list(extra);
1407
1408	return rc;
1409}
1410
1411int gnttab_init(void)
1412{
1413	int i;
1414	unsigned long max_nr_grant_frames;
1415	unsigned int max_nr_glist_frames, nr_glist_frames;
1416	unsigned int nr_init_grefs;
1417	int ret;
1418
1419	gnttab_request_version();
1420	max_nr_grant_frames = gnttab_max_grant_frames();
1421	nr_grant_frames = 1;
1422
1423	/* Determine the maximum number of frames required for the
1424	 * grant reference free list on the current hypervisor.
1425	 */
1426	BUG_ON(gnttab_interface == NULL);
1427	max_nr_glist_frames = (max_nr_grant_frames *
1428			       gnttab_interface->grefs_per_grant_frame / RPP);
1429
1430	gnttab_list = kmalloc_array(max_nr_glist_frames,
1431				    sizeof(grant_ref_t *),
1432				    GFP_KERNEL);
1433	if (gnttab_list == NULL)
1434		return -ENOMEM;
1435
1436	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1437	for (i = 0; i < nr_glist_frames; i++) {
1438		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1439		if (gnttab_list[i] == NULL) {
1440			ret = -ENOMEM;
1441			goto ini_nomem;
1442		}
1443	}
1444
1445	ret = arch_gnttab_init(max_nr_grant_frames,
1446			       nr_status_frames(max_nr_grant_frames));
1447	if (ret < 0)
1448		goto ini_nomem;
1449
1450	if (gnttab_setup() < 0) {
1451		ret = -ENODEV;
1452		goto ini_nomem;
1453	}
1454
1455	nr_init_grefs = nr_grant_frames *
1456			gnttab_interface->grefs_per_grant_frame;
1457
1458	for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1459		gnttab_entry(i) = i + 1;
1460
1461	gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
1462	gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
1463	gnttab_free_head  = NR_RESERVED_ENTRIES;
1464
1465	printk("Grant table initialized\n");
1466	return 0;
1467
1468 ini_nomem:
1469	for (i--; i >= 0; i--)
1470		free_page((unsigned long)gnttab_list[i]);
1471	kfree(gnttab_list);
1472	return ret;
1473}
1474EXPORT_SYMBOL_GPL(gnttab_init);
1475
1476static int __gnttab_init(void)
1477{
1478	if (!xen_domain())
1479		return -ENODEV;
1480
1481	/* Delay grant-table initialization in the PV on HVM case */
1482	if (xen_hvm_domain() && !xen_pvh_domain())
1483		return 0;
1484
1485	return gnttab_init();
1486}
1487/* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1488 * beforehand to initialize xen_auto_xlat_grant_frames. */
1489core_initcall_sync(__gnttab_init);