Linux Audio

Check our new training course

Loading...
v5.9
   1/******************************************************************************
   2 * grant_table.c
   3 *
   4 * Granting foreign access to our memory reservation.
   5 *
   6 * Copyright (c) 2005-2006, Christopher Clark
   7 * Copyright (c) 2004-2005, K A Fraser
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License version 2
  11 * as published by the Free Software Foundation; or, when distributed
  12 * separately from the Linux kernel or incorporated into other
  13 * software packages, subject to the following license:
  14 *
  15 * Permission is hereby granted, free of charge, to any person obtaining a copy
  16 * of this source file (the "Software"), to deal in the Software without
  17 * restriction, including without limitation the rights to use, copy, modify,
  18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  19 * and to permit persons to whom the Software is furnished to do so, subject to
  20 * the following conditions:
  21 *
  22 * The above copyright notice and this permission notice shall be included in
  23 * all copies or substantial portions of the Software.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  31 * IN THE SOFTWARE.
  32 */
  33
  34#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  35
  36#include <linux/memblock.h>
  37#include <linux/sched.h>
  38#include <linux/mm.h>
  39#include <linux/slab.h>
  40#include <linux/vmalloc.h>
  41#include <linux/uaccess.h>
  42#include <linux/io.h>
  43#include <linux/delay.h>
  44#include <linux/hardirq.h>
  45#include <linux/workqueue.h>
  46#include <linux/ratelimit.h>
  47#include <linux/moduleparam.h>
  48#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
  49#include <linux/dma-mapping.h>
  50#endif
  51
  52#include <xen/xen.h>
  53#include <xen/interface/xen.h>
  54#include <xen/page.h>
  55#include <xen/grant_table.h>
  56#include <xen/interface/memory.h>
  57#include <xen/hvc-console.h>
  58#include <xen/swiotlb-xen.h>
  59#include <xen/balloon.h>
  60#ifdef CONFIG_X86
  61#include <asm/xen/cpuid.h>
  62#endif
  63#include <xen/mem-reservation.h>
  64#include <asm/xen/hypercall.h>
  65#include <asm/xen/interface.h>
  66
  67#include <asm/sync_bitops.h>
  68
  69/* External tools reserve first few grant table entries. */
  70#define NR_RESERVED_ENTRIES 8
  71#define GNTTAB_LIST_END 0xffffffff
  72
  73static grant_ref_t **gnttab_list;
  74static unsigned int nr_grant_frames;
  75static int gnttab_free_count;
  76static grant_ref_t gnttab_free_head;
  77static DEFINE_SPINLOCK(gnttab_list_lock);
  78struct grant_frames xen_auto_xlat_grant_frames;
  79static unsigned int xen_gnttab_version;
  80module_param_named(version, xen_gnttab_version, uint, 0);
  81
  82static union {
  83	struct grant_entry_v1 *v1;
  84	union grant_entry_v2 *v2;
  85	void *addr;
  86} gnttab_shared;
  87
  88/*This is a structure of function pointers for grant table*/
  89struct gnttab_ops {
  90	/*
  91	 * Version of the grant interface.
  92	 */
  93	unsigned int version;
  94	/*
  95	 * Grant refs per grant frame.
  96	 */
  97	unsigned int grefs_per_grant_frame;
  98	/*
  99	 * Mapping a list of frames for storing grant entries. Frames parameter
 100	 * is used to store grant table address when grant table being setup,
 101	 * nr_gframes is the number of frames to map grant table. Returning
 102	 * GNTST_okay means success and negative value means failure.
 103	 */
 104	int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
 105	/*
 106	 * Release a list of frames which are mapped in map_frames for grant
 107	 * entry status.
 108	 */
 109	void (*unmap_frames)(void);
 110	/*
 111	 * Introducing a valid entry into the grant table, granting the frame of
 112	 * this grant entry to domain for accessing or transfering. Ref
 113	 * parameter is reference of this introduced grant entry, domid is id of
 114	 * granted domain, frame is the page frame to be granted, and flags is
 115	 * status of the grant entry to be updated.
 116	 */
 117	void (*update_entry)(grant_ref_t ref, domid_t domid,
 118			     unsigned long frame, unsigned flags);
 119	/*
 120	 * Stop granting a grant entry to domain for accessing. Ref parameter is
 121	 * reference of a grant entry whose grant access will be stopped,
 122	 * readonly is not in use in this function. If the grant entry is
 123	 * currently mapped for reading or writing, just return failure(==0)
 124	 * directly and don't tear down the grant access. Otherwise, stop grant
 125	 * access for this entry and return success(==1).
 126	 */
 127	int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
 128	/*
 129	 * Stop granting a grant entry to domain for transfer. Ref parameter is
 130	 * reference of a grant entry whose grant transfer will be stopped. If
 131	 * tranfer has not started, just reclaim the grant entry and return
 132	 * failure(==0). Otherwise, wait for the transfer to complete and then
 133	 * return the frame.
 134	 */
 135	unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
 136	/*
 137	 * Query the status of a grant entry. Ref parameter is reference of
 138	 * queried grant entry, return value is the status of queried entry.
 139	 * Detailed status(writing/reading) can be gotten from the return value
 140	 * by bit operations.
 141	 */
 142	int (*query_foreign_access)(grant_ref_t ref);
 143};
 144
 145struct unmap_refs_callback_data {
 146	struct completion completion;
 147	int result;
 148};
 149
 150static const struct gnttab_ops *gnttab_interface;
 151
 152/* This reflects status of grant entries, so act as a global value. */
 153static grant_status_t *grstatus;
 154
 155static struct gnttab_free_callback *gnttab_free_callback_list;
 156
 157static int gnttab_expand(unsigned int req_entries);
 158
 159#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
 160#define SPP (PAGE_SIZE / sizeof(grant_status_t))
 161
 162static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
 163{
 164	return &gnttab_list[(entry) / RPP][(entry) % RPP];
 165}
 166/* This can be used as an l-value */
 167#define gnttab_entry(entry) (*__gnttab_entry(entry))
 168
 169static int get_free_entries(unsigned count)
 170{
 171	unsigned long flags;
 172	int ref, rc = 0;
 173	grant_ref_t head;
 174
 175	spin_lock_irqsave(&gnttab_list_lock, flags);
 176
 177	if ((gnttab_free_count < count) &&
 178	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
 179		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 180		return rc;
 181	}
 182
 183	ref = head = gnttab_free_head;
 184	gnttab_free_count -= count;
 185	while (count-- > 1)
 186		head = gnttab_entry(head);
 187	gnttab_free_head = gnttab_entry(head);
 188	gnttab_entry(head) = GNTTAB_LIST_END;
 189
 190	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 191
 192	return ref;
 193}
 194
 195static void do_free_callbacks(void)
 196{
 197	struct gnttab_free_callback *callback, *next;
 198
 199	callback = gnttab_free_callback_list;
 200	gnttab_free_callback_list = NULL;
 201
 202	while (callback != NULL) {
 203		next = callback->next;
 204		if (gnttab_free_count >= callback->count) {
 205			callback->next = NULL;
 206			callback->fn(callback->arg);
 207		} else {
 208			callback->next = gnttab_free_callback_list;
 209			gnttab_free_callback_list = callback;
 210		}
 211		callback = next;
 212	}
 213}
 214
 215static inline void check_free_callbacks(void)
 216{
 217	if (unlikely(gnttab_free_callback_list))
 218		do_free_callbacks();
 219}
 220
 221static void put_free_entry(grant_ref_t ref)
 222{
 223	unsigned long flags;
 224	spin_lock_irqsave(&gnttab_list_lock, flags);
 225	gnttab_entry(ref) = gnttab_free_head;
 226	gnttab_free_head = ref;
 227	gnttab_free_count++;
 228	check_free_callbacks();
 229	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 230}
 231
 232/*
 233 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
 234 * Introducing a valid entry into the grant table:
 235 *  1. Write ent->domid.
 236 *  2. Write ent->frame:
 237 *      GTF_permit_access:   Frame to which access is permitted.
 238 *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
 239 *                           frame, or zero if none.
 240 *  3. Write memory barrier (WMB).
 241 *  4. Write ent->flags, inc. valid type.
 242 */
 243static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
 244				   unsigned long frame, unsigned flags)
 245{
 246	gnttab_shared.v1[ref].domid = domid;
 247	gnttab_shared.v1[ref].frame = frame;
 248	wmb();
 249	gnttab_shared.v1[ref].flags = flags;
 250}
 251
 252static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
 253				   unsigned long frame, unsigned int flags)
 254{
 255	gnttab_shared.v2[ref].hdr.domid = domid;
 256	gnttab_shared.v2[ref].full_page.frame = frame;
 257	wmb();	/* Hypervisor concurrent accesses. */
 258	gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
 259}
 260
 261/*
 262 * Public grant-issuing interface functions
 263 */
 264void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
 265				     unsigned long frame, int readonly)
 266{
 267	gnttab_interface->update_entry(ref, domid, frame,
 268			   GTF_permit_access | (readonly ? GTF_readonly : 0));
 269}
 270EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
 271
 272int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
 273				int readonly)
 274{
 275	int ref;
 276
 277	ref = get_free_entries(1);
 278	if (unlikely(ref < 0))
 279		return -ENOSPC;
 280
 281	gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
 282
 283	return ref;
 284}
 285EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
 286
 287static int gnttab_query_foreign_access_v1(grant_ref_t ref)
 288{
 289	return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
 290}
 291
 292static int gnttab_query_foreign_access_v2(grant_ref_t ref)
 293{
 294	return grstatus[ref] & (GTF_reading|GTF_writing);
 295}
 296
 297int gnttab_query_foreign_access(grant_ref_t ref)
 298{
 299	return gnttab_interface->query_foreign_access(ref);
 300}
 301EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
 302
 303static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
 304{
 305	u16 flags, nflags;
 306	u16 *pflags;
 307
 308	pflags = &gnttab_shared.v1[ref].flags;
 309	nflags = *pflags;
 310	do {
 311		flags = nflags;
 312		if (flags & (GTF_reading|GTF_writing))
 313			return 0;
 314	} while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
 315
 316	return 1;
 317}
 318
 319static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
 320{
 321	gnttab_shared.v2[ref].hdr.flags = 0;
 322	mb();	/* Concurrent access by hypervisor. */
 323	if (grstatus[ref] & (GTF_reading|GTF_writing)) {
 324		return 0;
 325	} else {
 326		/*
 327		 * The read of grstatus needs to have acquire semantics.
 328		 *  On x86, reads already have that, and we just need to
 329		 * protect against compiler reorderings.
 330		 * On other architectures we may need a full barrier.
 331		 */
 332#ifdef CONFIG_X86
 333		barrier();
 334#else
 335		mb();
 336#endif
 337	}
 338
 339	return 1;
 340}
 341
 342static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
 343{
 344	return gnttab_interface->end_foreign_access_ref(ref, readonly);
 345}
 346
 347int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
 348{
 349	if (_gnttab_end_foreign_access_ref(ref, readonly))
 350		return 1;
 351	pr_warn("WARNING: g.e. %#x still in use!\n", ref);
 352	return 0;
 353}
 354EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
 355
 356struct deferred_entry {
 357	struct list_head list;
 358	grant_ref_t ref;
 359	bool ro;
 360	uint16_t warn_delay;
 361	struct page *page;
 362};
 363static LIST_HEAD(deferred_list);
 364static void gnttab_handle_deferred(struct timer_list *);
 365static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
 366
 367static void gnttab_handle_deferred(struct timer_list *unused)
 368{
 369	unsigned int nr = 10;
 370	struct deferred_entry *first = NULL;
 371	unsigned long flags;
 372
 373	spin_lock_irqsave(&gnttab_list_lock, flags);
 374	while (nr--) {
 375		struct deferred_entry *entry
 376			= list_first_entry(&deferred_list,
 377					   struct deferred_entry, list);
 378
 379		if (entry == first)
 380			break;
 381		list_del(&entry->list);
 382		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 383		if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
 384			put_free_entry(entry->ref);
 385			if (entry->page) {
 386				pr_debug("freeing g.e. %#x (pfn %#lx)\n",
 387					 entry->ref, page_to_pfn(entry->page));
 388				put_page(entry->page);
 389			} else
 390				pr_info("freeing g.e. %#x\n", entry->ref);
 391			kfree(entry);
 392			entry = NULL;
 393		} else {
 394			if (!--entry->warn_delay)
 395				pr_info("g.e. %#x still pending\n", entry->ref);
 396			if (!first)
 397				first = entry;
 398		}
 399		spin_lock_irqsave(&gnttab_list_lock, flags);
 400		if (entry)
 401			list_add_tail(&entry->list, &deferred_list);
 402		else if (list_empty(&deferred_list))
 403			break;
 404	}
 405	if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
 406		deferred_timer.expires = jiffies + HZ;
 407		add_timer(&deferred_timer);
 408	}
 409	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 410}
 411
 412static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
 413				struct page *page)
 414{
 415	struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
 416	const char *what = KERN_WARNING "leaking";
 417
 418	if (entry) {
 419		unsigned long flags;
 420
 421		entry->ref = ref;
 422		entry->ro = readonly;
 423		entry->page = page;
 424		entry->warn_delay = 60;
 425		spin_lock_irqsave(&gnttab_list_lock, flags);
 426		list_add_tail(&entry->list, &deferred_list);
 427		if (!timer_pending(&deferred_timer)) {
 428			deferred_timer.expires = jiffies + HZ;
 429			add_timer(&deferred_timer);
 430		}
 431		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 432		what = KERN_DEBUG "deferring";
 433	}
 434	printk("%s g.e. %#x (pfn %#lx)\n",
 435	       what, ref, page ? page_to_pfn(page) : -1);
 436}
 437
 438void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
 439			       unsigned long page)
 440{
 441	if (gnttab_end_foreign_access_ref(ref, readonly)) {
 442		put_free_entry(ref);
 443		if (page != 0)
 444			put_page(virt_to_page(page));
 445	} else
 446		gnttab_add_deferred(ref, readonly,
 447				    page ? virt_to_page(page) : NULL);
 448}
 449EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
 450
 451int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
 452{
 453	int ref;
 454
 455	ref = get_free_entries(1);
 456	if (unlikely(ref < 0))
 457		return -ENOSPC;
 458	gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
 459
 460	return ref;
 461}
 462EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
 463
 464void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
 465				       unsigned long pfn)
 466{
 467	gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
 468}
 469EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
 470
 471static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
 472{
 473	unsigned long frame;
 474	u16           flags;
 475	u16          *pflags;
 476
 477	pflags = &gnttab_shared.v1[ref].flags;
 478
 479	/*
 480	 * If a transfer is not even yet started, try to reclaim the grant
 481	 * reference and return failure (== 0).
 482	 */
 483	while (!((flags = *pflags) & GTF_transfer_committed)) {
 484		if (sync_cmpxchg(pflags, flags, 0) == flags)
 485			return 0;
 486		cpu_relax();
 487	}
 488
 489	/* If a transfer is in progress then wait until it is completed. */
 490	while (!(flags & GTF_transfer_completed)) {
 491		flags = *pflags;
 492		cpu_relax();
 493	}
 494
 495	rmb();	/* Read the frame number /after/ reading completion status. */
 496	frame = gnttab_shared.v1[ref].frame;
 497	BUG_ON(frame == 0);
 498
 499	return frame;
 500}
 501
 502static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
 503{
 504	unsigned long frame;
 505	u16           flags;
 506	u16          *pflags;
 507
 508	pflags = &gnttab_shared.v2[ref].hdr.flags;
 509
 510	/*
 511	 * If a transfer is not even yet started, try to reclaim the grant
 512	 * reference and return failure (== 0).
 513	 */
 514	while (!((flags = *pflags) & GTF_transfer_committed)) {
 515		if (sync_cmpxchg(pflags, flags, 0) == flags)
 516			return 0;
 517		cpu_relax();
 518	}
 519
 520	/* If a transfer is in progress then wait until it is completed. */
 521	while (!(flags & GTF_transfer_completed)) {
 522		flags = *pflags;
 523		cpu_relax();
 524	}
 525
 526	rmb();  /* Read the frame number /after/ reading completion status. */
 527	frame = gnttab_shared.v2[ref].full_page.frame;
 528	BUG_ON(frame == 0);
 529
 530	return frame;
 531}
 532
 533unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
 534{
 535	return gnttab_interface->end_foreign_transfer_ref(ref);
 536}
 537EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
 538
 539unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
 540{
 541	unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
 542	put_free_entry(ref);
 543	return frame;
 544}
 545EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
 546
 547void gnttab_free_grant_reference(grant_ref_t ref)
 548{
 549	put_free_entry(ref);
 550}
 551EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
 552
 553void gnttab_free_grant_references(grant_ref_t head)
 554{
 555	grant_ref_t ref;
 556	unsigned long flags;
 557	int count = 1;
 558	if (head == GNTTAB_LIST_END)
 559		return;
 560	spin_lock_irqsave(&gnttab_list_lock, flags);
 561	ref = head;
 562	while (gnttab_entry(ref) != GNTTAB_LIST_END) {
 563		ref = gnttab_entry(ref);
 564		count++;
 565	}
 566	gnttab_entry(ref) = gnttab_free_head;
 567	gnttab_free_head = head;
 568	gnttab_free_count += count;
 569	check_free_callbacks();
 570	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 571}
 572EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
 573
 574int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
 575{
 576	int h = get_free_entries(count);
 577
 578	if (h < 0)
 579		return -ENOSPC;
 580
 581	*head = h;
 582
 583	return 0;
 584}
 585EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
 586
 587int gnttab_empty_grant_references(const grant_ref_t *private_head)
 588{
 589	return (*private_head == GNTTAB_LIST_END);
 590}
 591EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
 592
 593int gnttab_claim_grant_reference(grant_ref_t *private_head)
 594{
 595	grant_ref_t g = *private_head;
 596	if (unlikely(g == GNTTAB_LIST_END))
 597		return -ENOSPC;
 598	*private_head = gnttab_entry(g);
 599	return g;
 600}
 601EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
 602
 603void gnttab_release_grant_reference(grant_ref_t *private_head,
 604				    grant_ref_t release)
 605{
 606	gnttab_entry(release) = *private_head;
 607	*private_head = release;
 608}
 609EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
 610
 611void gnttab_request_free_callback(struct gnttab_free_callback *callback,
 612				  void (*fn)(void *), void *arg, u16 count)
 613{
 614	unsigned long flags;
 615	struct gnttab_free_callback *cb;
 616
 617	spin_lock_irqsave(&gnttab_list_lock, flags);
 618
 619	/* Check if the callback is already on the list */
 620	cb = gnttab_free_callback_list;
 621	while (cb) {
 622		if (cb == callback)
 623			goto out;
 624		cb = cb->next;
 625	}
 626
 627	callback->fn = fn;
 628	callback->arg = arg;
 629	callback->count = count;
 630	callback->next = gnttab_free_callback_list;
 631	gnttab_free_callback_list = callback;
 632	check_free_callbacks();
 633out:
 634	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 635}
 636EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
 637
 638void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
 639{
 640	struct gnttab_free_callback **pcb;
 641	unsigned long flags;
 642
 643	spin_lock_irqsave(&gnttab_list_lock, flags);
 644	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
 645		if (*pcb == callback) {
 646			*pcb = callback->next;
 647			break;
 648		}
 649	}
 650	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 651}
 652EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
 653
 654static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
 655{
 656	return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
 657	       align;
 658}
 659
 660static int grow_gnttab_list(unsigned int more_frames)
 661{
 662	unsigned int new_nr_grant_frames, extra_entries, i;
 663	unsigned int nr_glist_frames, new_nr_glist_frames;
 664	unsigned int grefs_per_frame;
 665
 666	grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
 667
 668	new_nr_grant_frames = nr_grant_frames + more_frames;
 669	extra_entries = more_frames * grefs_per_frame;
 670
 671	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
 672	new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
 673	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
 674		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
 675		if (!gnttab_list[i])
 676			goto grow_nomem;
 677	}
 678
 679
 680	for (i = grefs_per_frame * nr_grant_frames;
 681	     i < grefs_per_frame * new_nr_grant_frames - 1; i++)
 682		gnttab_entry(i) = i + 1;
 683
 684	gnttab_entry(i) = gnttab_free_head;
 685	gnttab_free_head = grefs_per_frame * nr_grant_frames;
 686	gnttab_free_count += extra_entries;
 687
 688	nr_grant_frames = new_nr_grant_frames;
 689
 690	check_free_callbacks();
 691
 692	return 0;
 693
 694grow_nomem:
 695	while (i-- > nr_glist_frames)
 696		free_page((unsigned long) gnttab_list[i]);
 697	return -ENOMEM;
 698}
 699
 700static unsigned int __max_nr_grant_frames(void)
 701{
 702	struct gnttab_query_size query;
 703	int rc;
 704
 705	query.dom = DOMID_SELF;
 706
 707	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
 708	if ((rc < 0) || (query.status != GNTST_okay))
 709		return 4; /* Legacy max supported number of frames */
 710
 711	return query.max_nr_frames;
 712}
 713
 714unsigned int gnttab_max_grant_frames(void)
 715{
 716	unsigned int xen_max = __max_nr_grant_frames();
 717	static unsigned int boot_max_nr_grant_frames;
 718
 719	/* First time, initialize it properly. */
 720	if (!boot_max_nr_grant_frames)
 721		boot_max_nr_grant_frames = __max_nr_grant_frames();
 722
 723	if (xen_max > boot_max_nr_grant_frames)
 724		return boot_max_nr_grant_frames;
 725	return xen_max;
 726}
 727EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
 728
 729int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
 730{
 731	xen_pfn_t *pfn;
 732	unsigned int max_nr_gframes = __max_nr_grant_frames();
 733	unsigned int i;
 734	void *vaddr;
 735
 736	if (xen_auto_xlat_grant_frames.count)
 737		return -EINVAL;
 738
 739	vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
 740	if (vaddr == NULL) {
 741		pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
 742			&addr);
 743		return -ENOMEM;
 744	}
 745	pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
 746	if (!pfn) {
 747		xen_unmap(vaddr);
 748		return -ENOMEM;
 749	}
 750	for (i = 0; i < max_nr_gframes; i++)
 751		pfn[i] = XEN_PFN_DOWN(addr) + i;
 752
 753	xen_auto_xlat_grant_frames.vaddr = vaddr;
 754	xen_auto_xlat_grant_frames.pfn = pfn;
 755	xen_auto_xlat_grant_frames.count = max_nr_gframes;
 756
 757	return 0;
 758}
 759EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
 760
 761void gnttab_free_auto_xlat_frames(void)
 762{
 763	if (!xen_auto_xlat_grant_frames.count)
 764		return;
 765	kfree(xen_auto_xlat_grant_frames.pfn);
 766	xen_unmap(xen_auto_xlat_grant_frames.vaddr);
 767
 768	xen_auto_xlat_grant_frames.pfn = NULL;
 769	xen_auto_xlat_grant_frames.count = 0;
 770	xen_auto_xlat_grant_frames.vaddr = NULL;
 771}
 772EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
 773
 774int gnttab_pages_set_private(int nr_pages, struct page **pages)
 775{
 776	int i;
 777
 778	for (i = 0; i < nr_pages; i++) {
 779#if BITS_PER_LONG < 64
 780		struct xen_page_foreign *foreign;
 781
 782		foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
 783		if (!foreign)
 784			return -ENOMEM;
 785
 786		set_page_private(pages[i], (unsigned long)foreign);
 787#endif
 788		SetPagePrivate(pages[i]);
 789	}
 790
 791	return 0;
 792}
 793EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
 794
 795/**
 796 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
 797 * @nr_pages: number of pages to alloc
 798 * @pages: returns the pages
 799 */
 800int gnttab_alloc_pages(int nr_pages, struct page **pages)
 801{
 802	int ret;
 803
 804	ret = xen_alloc_unpopulated_pages(nr_pages, pages);
 805	if (ret < 0)
 806		return ret;
 807
 808	ret = gnttab_pages_set_private(nr_pages, pages);
 809	if (ret < 0)
 810		gnttab_free_pages(nr_pages, pages);
 811
 812	return ret;
 813}
 814EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
 815
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 816void gnttab_pages_clear_private(int nr_pages, struct page **pages)
 817{
 818	int i;
 819
 820	for (i = 0; i < nr_pages; i++) {
 821		if (PagePrivate(pages[i])) {
 822#if BITS_PER_LONG < 64
 823			kfree((void *)page_private(pages[i]));
 824#endif
 825			ClearPagePrivate(pages[i]);
 826		}
 827	}
 828}
 829EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
 830
 831/**
 832 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
 833 * @nr_pages; number of pages to free
 834 * @pages: the pages
 835 */
 836void gnttab_free_pages(int nr_pages, struct page **pages)
 837{
 838	gnttab_pages_clear_private(nr_pages, pages);
 839	xen_free_unpopulated_pages(nr_pages, pages);
 840}
 841EXPORT_SYMBOL_GPL(gnttab_free_pages);
 842
 843#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 844/**
 845 * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
 846 * @args: arguments to the function
 847 */
 848int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
 849{
 850	unsigned long pfn, start_pfn;
 851	size_t size;
 852	int i, ret;
 853
 854	size = args->nr_pages << PAGE_SHIFT;
 855	if (args->coherent)
 856		args->vaddr = dma_alloc_coherent(args->dev, size,
 857						 &args->dev_bus_addr,
 858						 GFP_KERNEL | __GFP_NOWARN);
 859	else
 860		args->vaddr = dma_alloc_wc(args->dev, size,
 861					   &args->dev_bus_addr,
 862					   GFP_KERNEL | __GFP_NOWARN);
 863	if (!args->vaddr) {
 864		pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
 865		return -ENOMEM;
 866	}
 867
 868	start_pfn = __phys_to_pfn(args->dev_bus_addr);
 869	for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
 870			pfn++, i++) {
 871		struct page *page = pfn_to_page(pfn);
 872
 873		args->pages[i] = page;
 874		args->frames[i] = xen_page_to_gfn(page);
 875		xenmem_reservation_scrub_page(page);
 876	}
 877
 878	xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
 879
 880	ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
 881	if (ret != args->nr_pages) {
 882		pr_debug("Failed to decrease reservation for DMA buffer\n");
 883		ret = -EFAULT;
 884		goto fail;
 885	}
 886
 887	ret = gnttab_pages_set_private(args->nr_pages, args->pages);
 888	if (ret < 0)
 889		goto fail;
 890
 891	return 0;
 892
 893fail:
 894	gnttab_dma_free_pages(args);
 895	return ret;
 896}
 897EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
 898
 899/**
 900 * gnttab_dma_free_pages - free DMAable pages
 901 * @args: arguments to the function
 902 */
 903int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
 904{
 905	size_t size;
 906	int i, ret;
 907
 908	gnttab_pages_clear_private(args->nr_pages, args->pages);
 909
 910	for (i = 0; i < args->nr_pages; i++)
 911		args->frames[i] = page_to_xen_pfn(args->pages[i]);
 912
 913	ret = xenmem_reservation_increase(args->nr_pages, args->frames);
 914	if (ret != args->nr_pages) {
 915		pr_debug("Failed to increase reservation for DMA buffer\n");
 916		ret = -EFAULT;
 917	} else {
 918		ret = 0;
 919	}
 920
 921	xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
 922					     args->frames);
 923
 924	size = args->nr_pages << PAGE_SHIFT;
 925	if (args->coherent)
 926		dma_free_coherent(args->dev, size,
 927				  args->vaddr, args->dev_bus_addr);
 928	else
 929		dma_free_wc(args->dev, size,
 930			    args->vaddr, args->dev_bus_addr);
 931	return ret;
 932}
 933EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
 934#endif
 935
 936/* Handling of paged out grant targets (GNTST_eagain) */
 937#define MAX_DELAY 256
 938static inline void
 939gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
 940						const char *func)
 941{
 942	unsigned delay = 1;
 943
 944	do {
 945		BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
 946		if (*status == GNTST_eagain)
 947			msleep(delay++);
 948	} while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
 949
 950	if (delay >= MAX_DELAY) {
 951		pr_err("%s: %s eagain grant\n", func, current->comm);
 952		*status = GNTST_bad_page;
 953	}
 954}
 955
 956void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
 957{
 958	struct gnttab_map_grant_ref *op;
 959
 960	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
 961		BUG();
 962	for (op = batch; op < batch + count; op++)
 963		if (op->status == GNTST_eagain)
 964			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
 965						&op->status, __func__);
 966}
 967EXPORT_SYMBOL_GPL(gnttab_batch_map);
 968
 969void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
 970{
 971	struct gnttab_copy *op;
 972
 973	if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
 974		BUG();
 975	for (op = batch; op < batch + count; op++)
 976		if (op->status == GNTST_eagain)
 977			gnttab_retry_eagain_gop(GNTTABOP_copy, op,
 978						&op->status, __func__);
 979}
 980EXPORT_SYMBOL_GPL(gnttab_batch_copy);
 981
 982void gnttab_foreach_grant_in_range(struct page *page,
 983				   unsigned int offset,
 984				   unsigned int len,
 985				   xen_grant_fn_t fn,
 986				   void *data)
 987{
 988	unsigned int goffset;
 989	unsigned int glen;
 990	unsigned long xen_pfn;
 991
 992	len = min_t(unsigned int, PAGE_SIZE - offset, len);
 993	goffset = xen_offset_in_page(offset);
 994
 995	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
 996
 997	while (len) {
 998		glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
 999		fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1000
1001		goffset = 0;
1002		xen_pfn++;
1003		len -= glen;
1004	}
1005}
1006EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1007
1008void gnttab_foreach_grant(struct page **pages,
1009			  unsigned int nr_grefs,
1010			  xen_grant_fn_t fn,
1011			  void *data)
1012{
1013	unsigned int goffset = 0;
1014	unsigned long xen_pfn = 0;
1015	unsigned int i;
1016
1017	for (i = 0; i < nr_grefs; i++) {
1018		if ((i % XEN_PFN_PER_PAGE) == 0) {
1019			xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1020			goffset = 0;
1021		}
1022
1023		fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1024
1025		goffset += XEN_PAGE_SIZE;
1026		xen_pfn++;
1027	}
1028}
1029
1030int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1031		    struct gnttab_map_grant_ref *kmap_ops,
1032		    struct page **pages, unsigned int count)
1033{
1034	int i, ret;
1035
1036	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1037	if (ret)
1038		return ret;
1039
1040	for (i = 0; i < count; i++) {
1041		switch (map_ops[i].status) {
1042		case GNTST_okay:
1043		{
1044			struct xen_page_foreign *foreign;
1045
1046			SetPageForeign(pages[i]);
1047			foreign = xen_page_foreign(pages[i]);
1048			foreign->domid = map_ops[i].dom;
1049			foreign->gref = map_ops[i].ref;
1050			break;
1051		}
1052
1053		case GNTST_no_device_space:
1054			pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1055			break;
1056
1057		case GNTST_eagain:
1058			/* Retry eagain maps */
1059			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1060						map_ops + i,
1061						&map_ops[i].status, __func__);
1062			/* Test status in next loop iteration. */
1063			i--;
1064			break;
1065
1066		default:
1067			break;
1068		}
1069	}
1070
1071	return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1072}
1073EXPORT_SYMBOL_GPL(gnttab_map_refs);
1074
1075int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1076		      struct gnttab_unmap_grant_ref *kunmap_ops,
1077		      struct page **pages, unsigned int count)
1078{
1079	unsigned int i;
1080	int ret;
1081
1082	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1083	if (ret)
1084		return ret;
1085
1086	for (i = 0; i < count; i++)
1087		ClearPageForeign(pages[i]);
1088
1089	return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1090}
1091EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1092
1093#define GNTTAB_UNMAP_REFS_DELAY 5
1094
1095static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1096
1097static void gnttab_unmap_work(struct work_struct *work)
1098{
1099	struct gntab_unmap_queue_data
1100		*unmap_data = container_of(work, 
1101					   struct gntab_unmap_queue_data,
1102					   gnttab_work.work);
1103	if (unmap_data->age != UINT_MAX)
1104		unmap_data->age++;
1105	__gnttab_unmap_refs_async(unmap_data);
1106}
1107
1108static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1109{
1110	int ret;
1111	int pc;
1112
1113	for (pc = 0; pc < item->count; pc++) {
1114		if (page_count(item->pages[pc]) > 1) {
1115			unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1116			schedule_delayed_work(&item->gnttab_work,
1117					      msecs_to_jiffies(delay));
1118			return;
1119		}
1120	}
1121
1122	ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1123				item->pages, item->count);
1124	item->done(ret, item);
1125}
1126
1127void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1128{
1129	INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1130	item->age = 0;
1131
1132	__gnttab_unmap_refs_async(item);
1133}
1134EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1135
1136static void unmap_refs_callback(int result,
1137		struct gntab_unmap_queue_data *data)
1138{
1139	struct unmap_refs_callback_data *d = data->data;
1140
1141	d->result = result;
1142	complete(&d->completion);
1143}
1144
1145int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1146{
1147	struct unmap_refs_callback_data data;
1148
1149	init_completion(&data.completion);
1150	item->data = &data;
1151	item->done = &unmap_refs_callback;
1152	gnttab_unmap_refs_async(item);
1153	wait_for_completion(&data.completion);
1154
1155	return data.result;
1156}
1157EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1158
1159static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1160{
1161	return gnttab_frames(nr_grant_frames, SPP);
1162}
1163
1164static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1165{
1166	int rc;
1167
1168	rc = arch_gnttab_map_shared(frames, nr_gframes,
1169				    gnttab_max_grant_frames(),
1170				    &gnttab_shared.addr);
1171	BUG_ON(rc);
1172
1173	return 0;
1174}
1175
1176static void gnttab_unmap_frames_v1(void)
1177{
1178	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1179}
1180
1181static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1182{
1183	uint64_t *sframes;
1184	unsigned int nr_sframes;
1185	struct gnttab_get_status_frames getframes;
1186	int rc;
1187
1188	nr_sframes = nr_status_frames(nr_gframes);
1189
1190	/* No need for kzalloc as it is initialized in following hypercall
1191	 * GNTTABOP_get_status_frames.
1192	 */
1193	sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1194	if (!sframes)
1195		return -ENOMEM;
1196
1197	getframes.dom        = DOMID_SELF;
1198	getframes.nr_frames  = nr_sframes;
1199	set_xen_guest_handle(getframes.frame_list, sframes);
1200
1201	rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1202				       &getframes, 1);
1203	if (rc == -ENOSYS) {
1204		kfree(sframes);
1205		return -ENOSYS;
1206	}
1207
1208	BUG_ON(rc || getframes.status);
1209
1210	rc = arch_gnttab_map_status(sframes, nr_sframes,
1211				    nr_status_frames(gnttab_max_grant_frames()),
1212				    &grstatus);
1213	BUG_ON(rc);
1214	kfree(sframes);
1215
1216	rc = arch_gnttab_map_shared(frames, nr_gframes,
1217				    gnttab_max_grant_frames(),
1218				    &gnttab_shared.addr);
1219	BUG_ON(rc);
1220
1221	return 0;
1222}
1223
1224static void gnttab_unmap_frames_v2(void)
1225{
1226	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1227	arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1228}
1229
1230static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1231{
1232	struct gnttab_setup_table setup;
1233	xen_pfn_t *frames;
1234	unsigned int nr_gframes = end_idx + 1;
1235	int rc;
1236
1237	if (xen_feature(XENFEAT_auto_translated_physmap)) {
1238		struct xen_add_to_physmap xatp;
1239		unsigned int i = end_idx;
1240		rc = 0;
1241		BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1242		/*
1243		 * Loop backwards, so that the first hypercall has the largest
1244		 * index, ensuring that the table will grow only once.
1245		 */
1246		do {
1247			xatp.domid = DOMID_SELF;
1248			xatp.idx = i;
1249			xatp.space = XENMAPSPACE_grant_table;
1250			xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1251			rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1252			if (rc != 0) {
1253				pr_warn("grant table add_to_physmap failed, err=%d\n",
1254					rc);
1255				break;
1256			}
1257		} while (i-- > start_idx);
1258
1259		return rc;
1260	}
1261
1262	/* No need for kzalloc as it is initialized in following hypercall
1263	 * GNTTABOP_setup_table.
1264	 */
1265	frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1266	if (!frames)
1267		return -ENOMEM;
1268
1269	setup.dom        = DOMID_SELF;
1270	setup.nr_frames  = nr_gframes;
1271	set_xen_guest_handle(setup.frame_list, frames);
1272
1273	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1274	if (rc == -ENOSYS) {
1275		kfree(frames);
1276		return -ENOSYS;
1277	}
1278
1279	BUG_ON(rc || setup.status);
1280
1281	rc = gnttab_interface->map_frames(frames, nr_gframes);
1282
1283	kfree(frames);
1284
1285	return rc;
1286}
1287
1288static const struct gnttab_ops gnttab_v1_ops = {
1289	.version			= 1,
1290	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1291					  sizeof(struct grant_entry_v1),
1292	.map_frames			= gnttab_map_frames_v1,
1293	.unmap_frames			= gnttab_unmap_frames_v1,
1294	.update_entry			= gnttab_update_entry_v1,
1295	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v1,
1296	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v1,
1297	.query_foreign_access		= gnttab_query_foreign_access_v1,
1298};
1299
1300static const struct gnttab_ops gnttab_v2_ops = {
1301	.version			= 2,
1302	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1303					  sizeof(union grant_entry_v2),
1304	.map_frames			= gnttab_map_frames_v2,
1305	.unmap_frames			= gnttab_unmap_frames_v2,
1306	.update_entry			= gnttab_update_entry_v2,
1307	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v2,
1308	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v2,
1309	.query_foreign_access		= gnttab_query_foreign_access_v2,
1310};
1311
1312static bool gnttab_need_v2(void)
1313{
1314#ifdef CONFIG_X86
1315	uint32_t base, width;
1316
1317	if (xen_pv_domain()) {
1318		base = xen_cpuid_base();
1319		if (cpuid_eax(base) < 5)
1320			return false;	/* Information not available, use V1. */
1321		width = cpuid_ebx(base + 5) &
1322			XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1323		return width > 32 + PAGE_SHIFT;
1324	}
1325#endif
1326	return !!(max_possible_pfn >> 32);
1327}
1328
1329static void gnttab_request_version(void)
1330{
1331	long rc;
1332	struct gnttab_set_version gsv;
1333
1334	if (gnttab_need_v2())
1335		gsv.version = 2;
1336	else
1337		gsv.version = 1;
1338
1339	/* Boot parameter overrides automatic selection. */
1340	if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1341		gsv.version = xen_gnttab_version;
1342
1343	rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1344	if (rc == 0 && gsv.version == 2)
1345		gnttab_interface = &gnttab_v2_ops;
1346	else
1347		gnttab_interface = &gnttab_v1_ops;
1348	pr_info("Grant tables using version %d layout\n",
1349		gnttab_interface->version);
1350}
1351
1352static int gnttab_setup(void)
1353{
1354	unsigned int max_nr_gframes;
1355
1356	max_nr_gframes = gnttab_max_grant_frames();
1357	if (max_nr_gframes < nr_grant_frames)
1358		return -ENOSYS;
1359
1360	if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1361		gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1362		if (gnttab_shared.addr == NULL) {
1363			pr_warn("gnttab share frames is not mapped!\n");
1364			return -ENOMEM;
1365		}
1366	}
1367	return gnttab_map(0, nr_grant_frames - 1);
1368}
1369
1370int gnttab_resume(void)
1371{
1372	gnttab_request_version();
1373	return gnttab_setup();
1374}
1375
1376int gnttab_suspend(void)
1377{
1378	if (!xen_feature(XENFEAT_auto_translated_physmap))
1379		gnttab_interface->unmap_frames();
1380	return 0;
1381}
1382
1383static int gnttab_expand(unsigned int req_entries)
1384{
1385	int rc;
1386	unsigned int cur, extra;
1387
1388	cur = nr_grant_frames;
1389	extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1390		 gnttab_interface->grefs_per_grant_frame);
1391	if (cur + extra > gnttab_max_grant_frames()) {
1392		pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1393				    " cur=%u extra=%u limit=%u"
1394				    " gnttab_free_count=%u req_entries=%u\n",
1395				    cur, extra, gnttab_max_grant_frames(),
1396				    gnttab_free_count, req_entries);
1397		return -ENOSPC;
1398	}
1399
1400	rc = gnttab_map(cur, cur + extra - 1);
1401	if (rc == 0)
1402		rc = grow_gnttab_list(extra);
1403
1404	return rc;
1405}
1406
1407int gnttab_init(void)
1408{
1409	int i;
1410	unsigned long max_nr_grant_frames;
1411	unsigned int max_nr_glist_frames, nr_glist_frames;
1412	unsigned int nr_init_grefs;
1413	int ret;
1414
1415	gnttab_request_version();
1416	max_nr_grant_frames = gnttab_max_grant_frames();
1417	nr_grant_frames = 1;
1418
1419	/* Determine the maximum number of frames required for the
1420	 * grant reference free list on the current hypervisor.
1421	 */
1422	max_nr_glist_frames = (max_nr_grant_frames *
1423			       gnttab_interface->grefs_per_grant_frame / RPP);
1424
1425	gnttab_list = kmalloc_array(max_nr_glist_frames,
1426				    sizeof(grant_ref_t *),
1427				    GFP_KERNEL);
1428	if (gnttab_list == NULL)
1429		return -ENOMEM;
1430
1431	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1432	for (i = 0; i < nr_glist_frames; i++) {
1433		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1434		if (gnttab_list[i] == NULL) {
1435			ret = -ENOMEM;
1436			goto ini_nomem;
1437		}
1438	}
1439
1440	ret = arch_gnttab_init(max_nr_grant_frames,
1441			       nr_status_frames(max_nr_grant_frames));
1442	if (ret < 0)
1443		goto ini_nomem;
1444
1445	if (gnttab_setup() < 0) {
1446		ret = -ENODEV;
1447		goto ini_nomem;
1448	}
1449
1450	nr_init_grefs = nr_grant_frames *
1451			gnttab_interface->grefs_per_grant_frame;
1452
1453	for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1454		gnttab_entry(i) = i + 1;
1455
1456	gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
1457	gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
1458	gnttab_free_head  = NR_RESERVED_ENTRIES;
1459
1460	printk("Grant table initialized\n");
1461	return 0;
1462
1463 ini_nomem:
1464	for (i--; i >= 0; i--)
1465		free_page((unsigned long)gnttab_list[i]);
1466	kfree(gnttab_list);
1467	return ret;
1468}
1469EXPORT_SYMBOL_GPL(gnttab_init);
1470
1471static int __gnttab_init(void)
1472{
1473	if (!xen_domain())
1474		return -ENODEV;
1475
1476	/* Delay grant-table initialization in the PV on HVM case */
1477	if (xen_hvm_domain() && !xen_pvh_domain())
1478		return 0;
1479
1480	return gnttab_init();
1481}
1482/* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1483 * beforehand to initialize xen_auto_xlat_grant_frames. */
1484core_initcall_sync(__gnttab_init);
v5.14.15
   1/******************************************************************************
   2 * grant_table.c
   3 *
   4 * Granting foreign access to our memory reservation.
   5 *
   6 * Copyright (c) 2005-2006, Christopher Clark
   7 * Copyright (c) 2004-2005, K A Fraser
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License version 2
  11 * as published by the Free Software Foundation; or, when distributed
  12 * separately from the Linux kernel or incorporated into other
  13 * software packages, subject to the following license:
  14 *
  15 * Permission is hereby granted, free of charge, to any person obtaining a copy
  16 * of this source file (the "Software"), to deal in the Software without
  17 * restriction, including without limitation the rights to use, copy, modify,
  18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  19 * and to permit persons to whom the Software is furnished to do so, subject to
  20 * the following conditions:
  21 *
  22 * The above copyright notice and this permission notice shall be included in
  23 * all copies or substantial portions of the Software.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  31 * IN THE SOFTWARE.
  32 */
  33
  34#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  35
  36#include <linux/memblock.h>
  37#include <linux/sched.h>
  38#include <linux/mm.h>
  39#include <linux/slab.h>
  40#include <linux/vmalloc.h>
  41#include <linux/uaccess.h>
  42#include <linux/io.h>
  43#include <linux/delay.h>
  44#include <linux/hardirq.h>
  45#include <linux/workqueue.h>
  46#include <linux/ratelimit.h>
  47#include <linux/moduleparam.h>
  48#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
  49#include <linux/dma-mapping.h>
  50#endif
  51
  52#include <xen/xen.h>
  53#include <xen/interface/xen.h>
  54#include <xen/page.h>
  55#include <xen/grant_table.h>
  56#include <xen/interface/memory.h>
  57#include <xen/hvc-console.h>
  58#include <xen/swiotlb-xen.h>
  59#include <xen/balloon.h>
  60#ifdef CONFIG_X86
  61#include <asm/xen/cpuid.h>
  62#endif
  63#include <xen/mem-reservation.h>
  64#include <asm/xen/hypercall.h>
  65#include <asm/xen/interface.h>
  66
  67#include <asm/sync_bitops.h>
  68
  69/* External tools reserve first few grant table entries. */
  70#define NR_RESERVED_ENTRIES 8
  71#define GNTTAB_LIST_END 0xffffffff
  72
  73static grant_ref_t **gnttab_list;
  74static unsigned int nr_grant_frames;
  75static int gnttab_free_count;
  76static grant_ref_t gnttab_free_head;
  77static DEFINE_SPINLOCK(gnttab_list_lock);
  78struct grant_frames xen_auto_xlat_grant_frames;
  79static unsigned int xen_gnttab_version;
  80module_param_named(version, xen_gnttab_version, uint, 0);
  81
  82static union {
  83	struct grant_entry_v1 *v1;
  84	union grant_entry_v2 *v2;
  85	void *addr;
  86} gnttab_shared;
  87
  88/*This is a structure of function pointers for grant table*/
  89struct gnttab_ops {
  90	/*
  91	 * Version of the grant interface.
  92	 */
  93	unsigned int version;
  94	/*
  95	 * Grant refs per grant frame.
  96	 */
  97	unsigned int grefs_per_grant_frame;
  98	/*
  99	 * Mapping a list of frames for storing grant entries. Frames parameter
 100	 * is used to store grant table address when grant table being setup,
 101	 * nr_gframes is the number of frames to map grant table. Returning
 102	 * GNTST_okay means success and negative value means failure.
 103	 */
 104	int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
 105	/*
 106	 * Release a list of frames which are mapped in map_frames for grant
 107	 * entry status.
 108	 */
 109	void (*unmap_frames)(void);
 110	/*
 111	 * Introducing a valid entry into the grant table, granting the frame of
 112	 * this grant entry to domain for accessing or transfering. Ref
 113	 * parameter is reference of this introduced grant entry, domid is id of
 114	 * granted domain, frame is the page frame to be granted, and flags is
 115	 * status of the grant entry to be updated.
 116	 */
 117	void (*update_entry)(grant_ref_t ref, domid_t domid,
 118			     unsigned long frame, unsigned flags);
 119	/*
 120	 * Stop granting a grant entry to domain for accessing. Ref parameter is
 121	 * reference of a grant entry whose grant access will be stopped,
 122	 * readonly is not in use in this function. If the grant entry is
 123	 * currently mapped for reading or writing, just return failure(==0)
 124	 * directly and don't tear down the grant access. Otherwise, stop grant
 125	 * access for this entry and return success(==1).
 126	 */
 127	int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
 128	/*
 129	 * Stop granting a grant entry to domain for transfer. Ref parameter is
 130	 * reference of a grant entry whose grant transfer will be stopped. If
 131	 * tranfer has not started, just reclaim the grant entry and return
 132	 * failure(==0). Otherwise, wait for the transfer to complete and then
 133	 * return the frame.
 134	 */
 135	unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
 136	/*
 137	 * Query the status of a grant entry. Ref parameter is reference of
 138	 * queried grant entry, return value is the status of queried entry.
 139	 * Detailed status(writing/reading) can be gotten from the return value
 140	 * by bit operations.
 141	 */
 142	int (*query_foreign_access)(grant_ref_t ref);
 143};
 144
 145struct unmap_refs_callback_data {
 146	struct completion completion;
 147	int result;
 148};
 149
 150static const struct gnttab_ops *gnttab_interface;
 151
 152/* This reflects status of grant entries, so act as a global value. */
 153static grant_status_t *grstatus;
 154
 155static struct gnttab_free_callback *gnttab_free_callback_list;
 156
 157static int gnttab_expand(unsigned int req_entries);
 158
 159#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
 160#define SPP (PAGE_SIZE / sizeof(grant_status_t))
 161
 162static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
 163{
 164	return &gnttab_list[(entry) / RPP][(entry) % RPP];
 165}
 166/* This can be used as an l-value */
 167#define gnttab_entry(entry) (*__gnttab_entry(entry))
 168
 169static int get_free_entries(unsigned count)
 170{
 171	unsigned long flags;
 172	int ref, rc = 0;
 173	grant_ref_t head;
 174
 175	spin_lock_irqsave(&gnttab_list_lock, flags);
 176
 177	if ((gnttab_free_count < count) &&
 178	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
 179		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 180		return rc;
 181	}
 182
 183	ref = head = gnttab_free_head;
 184	gnttab_free_count -= count;
 185	while (count-- > 1)
 186		head = gnttab_entry(head);
 187	gnttab_free_head = gnttab_entry(head);
 188	gnttab_entry(head) = GNTTAB_LIST_END;
 189
 190	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 191
 192	return ref;
 193}
 194
 195static void do_free_callbacks(void)
 196{
 197	struct gnttab_free_callback *callback, *next;
 198
 199	callback = gnttab_free_callback_list;
 200	gnttab_free_callback_list = NULL;
 201
 202	while (callback != NULL) {
 203		next = callback->next;
 204		if (gnttab_free_count >= callback->count) {
 205			callback->next = NULL;
 206			callback->fn(callback->arg);
 207		} else {
 208			callback->next = gnttab_free_callback_list;
 209			gnttab_free_callback_list = callback;
 210		}
 211		callback = next;
 212	}
 213}
 214
 215static inline void check_free_callbacks(void)
 216{
 217	if (unlikely(gnttab_free_callback_list))
 218		do_free_callbacks();
 219}
 220
 221static void put_free_entry(grant_ref_t ref)
 222{
 223	unsigned long flags;
 224	spin_lock_irqsave(&gnttab_list_lock, flags);
 225	gnttab_entry(ref) = gnttab_free_head;
 226	gnttab_free_head = ref;
 227	gnttab_free_count++;
 228	check_free_callbacks();
 229	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 230}
 231
 232/*
 233 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
 234 * Introducing a valid entry into the grant table:
 235 *  1. Write ent->domid.
 236 *  2. Write ent->frame:
 237 *      GTF_permit_access:   Frame to which access is permitted.
 238 *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
 239 *                           frame, or zero if none.
 240 *  3. Write memory barrier (WMB).
 241 *  4. Write ent->flags, inc. valid type.
 242 */
 243static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
 244				   unsigned long frame, unsigned flags)
 245{
 246	gnttab_shared.v1[ref].domid = domid;
 247	gnttab_shared.v1[ref].frame = frame;
 248	wmb();
 249	gnttab_shared.v1[ref].flags = flags;
 250}
 251
 252static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
 253				   unsigned long frame, unsigned int flags)
 254{
 255	gnttab_shared.v2[ref].hdr.domid = domid;
 256	gnttab_shared.v2[ref].full_page.frame = frame;
 257	wmb();	/* Hypervisor concurrent accesses. */
 258	gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
 259}
 260
 261/*
 262 * Public grant-issuing interface functions
 263 */
 264void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
 265				     unsigned long frame, int readonly)
 266{
 267	gnttab_interface->update_entry(ref, domid, frame,
 268			   GTF_permit_access | (readonly ? GTF_readonly : 0));
 269}
 270EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
 271
 272int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
 273				int readonly)
 274{
 275	int ref;
 276
 277	ref = get_free_entries(1);
 278	if (unlikely(ref < 0))
 279		return -ENOSPC;
 280
 281	gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
 282
 283	return ref;
 284}
 285EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
 286
 287static int gnttab_query_foreign_access_v1(grant_ref_t ref)
 288{
 289	return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
 290}
 291
 292static int gnttab_query_foreign_access_v2(grant_ref_t ref)
 293{
 294	return grstatus[ref] & (GTF_reading|GTF_writing);
 295}
 296
 297int gnttab_query_foreign_access(grant_ref_t ref)
 298{
 299	return gnttab_interface->query_foreign_access(ref);
 300}
 301EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
 302
 303static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
 304{
 305	u16 flags, nflags;
 306	u16 *pflags;
 307
 308	pflags = &gnttab_shared.v1[ref].flags;
 309	nflags = *pflags;
 310	do {
 311		flags = nflags;
 312		if (flags & (GTF_reading|GTF_writing))
 313			return 0;
 314	} while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
 315
 316	return 1;
 317}
 318
 319static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
 320{
 321	gnttab_shared.v2[ref].hdr.flags = 0;
 322	mb();	/* Concurrent access by hypervisor. */
 323	if (grstatus[ref] & (GTF_reading|GTF_writing)) {
 324		return 0;
 325	} else {
 326		/*
 327		 * The read of grstatus needs to have acquire semantics.
 328		 *  On x86, reads already have that, and we just need to
 329		 * protect against compiler reorderings.
 330		 * On other architectures we may need a full barrier.
 331		 */
 332#ifdef CONFIG_X86
 333		barrier();
 334#else
 335		mb();
 336#endif
 337	}
 338
 339	return 1;
 340}
 341
 342static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
 343{
 344	return gnttab_interface->end_foreign_access_ref(ref, readonly);
 345}
 346
 347int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
 348{
 349	if (_gnttab_end_foreign_access_ref(ref, readonly))
 350		return 1;
 351	pr_warn("WARNING: g.e. %#x still in use!\n", ref);
 352	return 0;
 353}
 354EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
 355
 356struct deferred_entry {
 357	struct list_head list;
 358	grant_ref_t ref;
 359	bool ro;
 360	uint16_t warn_delay;
 361	struct page *page;
 362};
 363static LIST_HEAD(deferred_list);
 364static void gnttab_handle_deferred(struct timer_list *);
 365static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
 366
 367static void gnttab_handle_deferred(struct timer_list *unused)
 368{
 369	unsigned int nr = 10;
 370	struct deferred_entry *first = NULL;
 371	unsigned long flags;
 372
 373	spin_lock_irqsave(&gnttab_list_lock, flags);
 374	while (nr--) {
 375		struct deferred_entry *entry
 376			= list_first_entry(&deferred_list,
 377					   struct deferred_entry, list);
 378
 379		if (entry == first)
 380			break;
 381		list_del(&entry->list);
 382		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 383		if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
 384			put_free_entry(entry->ref);
 385			if (entry->page) {
 386				pr_debug("freeing g.e. %#x (pfn %#lx)\n",
 387					 entry->ref, page_to_pfn(entry->page));
 388				put_page(entry->page);
 389			} else
 390				pr_info("freeing g.e. %#x\n", entry->ref);
 391			kfree(entry);
 392			entry = NULL;
 393		} else {
 394			if (!--entry->warn_delay)
 395				pr_info("g.e. %#x still pending\n", entry->ref);
 396			if (!first)
 397				first = entry;
 398		}
 399		spin_lock_irqsave(&gnttab_list_lock, flags);
 400		if (entry)
 401			list_add_tail(&entry->list, &deferred_list);
 402		else if (list_empty(&deferred_list))
 403			break;
 404	}
 405	if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
 406		deferred_timer.expires = jiffies + HZ;
 407		add_timer(&deferred_timer);
 408	}
 409	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 410}
 411
 412static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
 413				struct page *page)
 414{
 415	struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
 416	const char *what = KERN_WARNING "leaking";
 417
 418	if (entry) {
 419		unsigned long flags;
 420
 421		entry->ref = ref;
 422		entry->ro = readonly;
 423		entry->page = page;
 424		entry->warn_delay = 60;
 425		spin_lock_irqsave(&gnttab_list_lock, flags);
 426		list_add_tail(&entry->list, &deferred_list);
 427		if (!timer_pending(&deferred_timer)) {
 428			deferred_timer.expires = jiffies + HZ;
 429			add_timer(&deferred_timer);
 430		}
 431		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 432		what = KERN_DEBUG "deferring";
 433	}
 434	printk("%s g.e. %#x (pfn %#lx)\n",
 435	       what, ref, page ? page_to_pfn(page) : -1);
 436}
 437
 438void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
 439			       unsigned long page)
 440{
 441	if (gnttab_end_foreign_access_ref(ref, readonly)) {
 442		put_free_entry(ref);
 443		if (page != 0)
 444			put_page(virt_to_page(page));
 445	} else
 446		gnttab_add_deferred(ref, readonly,
 447				    page ? virt_to_page(page) : NULL);
 448}
 449EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
 450
 451int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
 452{
 453	int ref;
 454
 455	ref = get_free_entries(1);
 456	if (unlikely(ref < 0))
 457		return -ENOSPC;
 458	gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
 459
 460	return ref;
 461}
 462EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
 463
 464void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
 465				       unsigned long pfn)
 466{
 467	gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
 468}
 469EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
 470
 471static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
 472{
 473	unsigned long frame;
 474	u16           flags;
 475	u16          *pflags;
 476
 477	pflags = &gnttab_shared.v1[ref].flags;
 478
 479	/*
 480	 * If a transfer is not even yet started, try to reclaim the grant
 481	 * reference and return failure (== 0).
 482	 */
 483	while (!((flags = *pflags) & GTF_transfer_committed)) {
 484		if (sync_cmpxchg(pflags, flags, 0) == flags)
 485			return 0;
 486		cpu_relax();
 487	}
 488
 489	/* If a transfer is in progress then wait until it is completed. */
 490	while (!(flags & GTF_transfer_completed)) {
 491		flags = *pflags;
 492		cpu_relax();
 493	}
 494
 495	rmb();	/* Read the frame number /after/ reading completion status. */
 496	frame = gnttab_shared.v1[ref].frame;
 497	BUG_ON(frame == 0);
 498
 499	return frame;
 500}
 501
 502static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
 503{
 504	unsigned long frame;
 505	u16           flags;
 506	u16          *pflags;
 507
 508	pflags = &gnttab_shared.v2[ref].hdr.flags;
 509
 510	/*
 511	 * If a transfer is not even yet started, try to reclaim the grant
 512	 * reference and return failure (== 0).
 513	 */
 514	while (!((flags = *pflags) & GTF_transfer_committed)) {
 515		if (sync_cmpxchg(pflags, flags, 0) == flags)
 516			return 0;
 517		cpu_relax();
 518	}
 519
 520	/* If a transfer is in progress then wait until it is completed. */
 521	while (!(flags & GTF_transfer_completed)) {
 522		flags = *pflags;
 523		cpu_relax();
 524	}
 525
 526	rmb();  /* Read the frame number /after/ reading completion status. */
 527	frame = gnttab_shared.v2[ref].full_page.frame;
 528	BUG_ON(frame == 0);
 529
 530	return frame;
 531}
 532
 533unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
 534{
 535	return gnttab_interface->end_foreign_transfer_ref(ref);
 536}
 537EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
 538
 539unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
 540{
 541	unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
 542	put_free_entry(ref);
 543	return frame;
 544}
 545EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
 546
 547void gnttab_free_grant_reference(grant_ref_t ref)
 548{
 549	put_free_entry(ref);
 550}
 551EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
 552
 553void gnttab_free_grant_references(grant_ref_t head)
 554{
 555	grant_ref_t ref;
 556	unsigned long flags;
 557	int count = 1;
 558	if (head == GNTTAB_LIST_END)
 559		return;
 560	spin_lock_irqsave(&gnttab_list_lock, flags);
 561	ref = head;
 562	while (gnttab_entry(ref) != GNTTAB_LIST_END) {
 563		ref = gnttab_entry(ref);
 564		count++;
 565	}
 566	gnttab_entry(ref) = gnttab_free_head;
 567	gnttab_free_head = head;
 568	gnttab_free_count += count;
 569	check_free_callbacks();
 570	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 571}
 572EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
 573
 574int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
 575{
 576	int h = get_free_entries(count);
 577
 578	if (h < 0)
 579		return -ENOSPC;
 580
 581	*head = h;
 582
 583	return 0;
 584}
 585EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
 586
 587int gnttab_empty_grant_references(const grant_ref_t *private_head)
 588{
 589	return (*private_head == GNTTAB_LIST_END);
 590}
 591EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
 592
 593int gnttab_claim_grant_reference(grant_ref_t *private_head)
 594{
 595	grant_ref_t g = *private_head;
 596	if (unlikely(g == GNTTAB_LIST_END))
 597		return -ENOSPC;
 598	*private_head = gnttab_entry(g);
 599	return g;
 600}
 601EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
 602
 603void gnttab_release_grant_reference(grant_ref_t *private_head,
 604				    grant_ref_t release)
 605{
 606	gnttab_entry(release) = *private_head;
 607	*private_head = release;
 608}
 609EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
 610
 611void gnttab_request_free_callback(struct gnttab_free_callback *callback,
 612				  void (*fn)(void *), void *arg, u16 count)
 613{
 614	unsigned long flags;
 615	struct gnttab_free_callback *cb;
 616
 617	spin_lock_irqsave(&gnttab_list_lock, flags);
 618
 619	/* Check if the callback is already on the list */
 620	cb = gnttab_free_callback_list;
 621	while (cb) {
 622		if (cb == callback)
 623			goto out;
 624		cb = cb->next;
 625	}
 626
 627	callback->fn = fn;
 628	callback->arg = arg;
 629	callback->count = count;
 630	callback->next = gnttab_free_callback_list;
 631	gnttab_free_callback_list = callback;
 632	check_free_callbacks();
 633out:
 634	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 635}
 636EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
 637
 638void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
 639{
 640	struct gnttab_free_callback **pcb;
 641	unsigned long flags;
 642
 643	spin_lock_irqsave(&gnttab_list_lock, flags);
 644	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
 645		if (*pcb == callback) {
 646			*pcb = callback->next;
 647			break;
 648		}
 649	}
 650	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 651}
 652EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
 653
 654static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
 655{
 656	return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
 657	       align;
 658}
 659
 660static int grow_gnttab_list(unsigned int more_frames)
 661{
 662	unsigned int new_nr_grant_frames, extra_entries, i;
 663	unsigned int nr_glist_frames, new_nr_glist_frames;
 664	unsigned int grefs_per_frame;
 665
 666	grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
 667
 668	new_nr_grant_frames = nr_grant_frames + more_frames;
 669	extra_entries = more_frames * grefs_per_frame;
 670
 671	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
 672	new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
 673	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
 674		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
 675		if (!gnttab_list[i])
 676			goto grow_nomem;
 677	}
 678
 679
 680	for (i = grefs_per_frame * nr_grant_frames;
 681	     i < grefs_per_frame * new_nr_grant_frames - 1; i++)
 682		gnttab_entry(i) = i + 1;
 683
 684	gnttab_entry(i) = gnttab_free_head;
 685	gnttab_free_head = grefs_per_frame * nr_grant_frames;
 686	gnttab_free_count += extra_entries;
 687
 688	nr_grant_frames = new_nr_grant_frames;
 689
 690	check_free_callbacks();
 691
 692	return 0;
 693
 694grow_nomem:
 695	while (i-- > nr_glist_frames)
 696		free_page((unsigned long) gnttab_list[i]);
 697	return -ENOMEM;
 698}
 699
 700static unsigned int __max_nr_grant_frames(void)
 701{
 702	struct gnttab_query_size query;
 703	int rc;
 704
 705	query.dom = DOMID_SELF;
 706
 707	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
 708	if ((rc < 0) || (query.status != GNTST_okay))
 709		return 4; /* Legacy max supported number of frames */
 710
 711	return query.max_nr_frames;
 712}
 713
 714unsigned int gnttab_max_grant_frames(void)
 715{
 716	unsigned int xen_max = __max_nr_grant_frames();
 717	static unsigned int boot_max_nr_grant_frames;
 718
 719	/* First time, initialize it properly. */
 720	if (!boot_max_nr_grant_frames)
 721		boot_max_nr_grant_frames = __max_nr_grant_frames();
 722
 723	if (xen_max > boot_max_nr_grant_frames)
 724		return boot_max_nr_grant_frames;
 725	return xen_max;
 726}
 727EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
 728
 729int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
 730{
 731	xen_pfn_t *pfn;
 732	unsigned int max_nr_gframes = __max_nr_grant_frames();
 733	unsigned int i;
 734	void *vaddr;
 735
 736	if (xen_auto_xlat_grant_frames.count)
 737		return -EINVAL;
 738
 739	vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
 740	if (vaddr == NULL) {
 741		pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
 742			&addr);
 743		return -ENOMEM;
 744	}
 745	pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
 746	if (!pfn) {
 747		xen_unmap(vaddr);
 748		return -ENOMEM;
 749	}
 750	for (i = 0; i < max_nr_gframes; i++)
 751		pfn[i] = XEN_PFN_DOWN(addr) + i;
 752
 753	xen_auto_xlat_grant_frames.vaddr = vaddr;
 754	xen_auto_xlat_grant_frames.pfn = pfn;
 755	xen_auto_xlat_grant_frames.count = max_nr_gframes;
 756
 757	return 0;
 758}
 759EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
 760
 761void gnttab_free_auto_xlat_frames(void)
 762{
 763	if (!xen_auto_xlat_grant_frames.count)
 764		return;
 765	kfree(xen_auto_xlat_grant_frames.pfn);
 766	xen_unmap(xen_auto_xlat_grant_frames.vaddr);
 767
 768	xen_auto_xlat_grant_frames.pfn = NULL;
 769	xen_auto_xlat_grant_frames.count = 0;
 770	xen_auto_xlat_grant_frames.vaddr = NULL;
 771}
 772EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
 773
 774int gnttab_pages_set_private(int nr_pages, struct page **pages)
 775{
 776	int i;
 777
 778	for (i = 0; i < nr_pages; i++) {
 779#if BITS_PER_LONG < 64
 780		struct xen_page_foreign *foreign;
 781
 782		foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
 783		if (!foreign)
 784			return -ENOMEM;
 785
 786		set_page_private(pages[i], (unsigned long)foreign);
 787#endif
 788		SetPagePrivate(pages[i]);
 789	}
 790
 791	return 0;
 792}
 793EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
 794
 795/**
 796 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
 797 * @nr_pages: number of pages to alloc
 798 * @pages: returns the pages
 799 */
 800int gnttab_alloc_pages(int nr_pages, struct page **pages)
 801{
 802	int ret;
 803
 804	ret = xen_alloc_unpopulated_pages(nr_pages, pages);
 805	if (ret < 0)
 806		return ret;
 807
 808	ret = gnttab_pages_set_private(nr_pages, pages);
 809	if (ret < 0)
 810		gnttab_free_pages(nr_pages, pages);
 811
 812	return ret;
 813}
 814EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
 815
 816#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
 817static inline void cache_init(struct gnttab_page_cache *cache)
 818{
 819	cache->pages = NULL;
 820}
 821
 822static inline bool cache_empty(struct gnttab_page_cache *cache)
 823{
 824	return !cache->pages;
 825}
 826
 827static inline struct page *cache_deq(struct gnttab_page_cache *cache)
 828{
 829	struct page *page;
 830
 831	page = cache->pages;
 832	cache->pages = page->zone_device_data;
 833
 834	return page;
 835}
 836
 837static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
 838{
 839	page->zone_device_data = cache->pages;
 840	cache->pages = page;
 841}
 842#else
 843static inline void cache_init(struct gnttab_page_cache *cache)
 844{
 845	INIT_LIST_HEAD(&cache->pages);
 846}
 847
 848static inline bool cache_empty(struct gnttab_page_cache *cache)
 849{
 850	return list_empty(&cache->pages);
 851}
 852
 853static inline struct page *cache_deq(struct gnttab_page_cache *cache)
 854{
 855	struct page *page;
 856
 857	page = list_first_entry(&cache->pages, struct page, lru);
 858	list_del(&page->lru);
 859
 860	return page;
 861}
 862
 863static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
 864{
 865	list_add(&page->lru, &cache->pages);
 866}
 867#endif
 868
 869void gnttab_page_cache_init(struct gnttab_page_cache *cache)
 870{
 871	spin_lock_init(&cache->lock);
 872	cache_init(cache);
 873	cache->num_pages = 0;
 874}
 875EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
 876
 877int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
 878{
 879	unsigned long flags;
 880
 881	spin_lock_irqsave(&cache->lock, flags);
 882
 883	if (cache_empty(cache)) {
 884		spin_unlock_irqrestore(&cache->lock, flags);
 885		return gnttab_alloc_pages(1, page);
 886	}
 887
 888	page[0] = cache_deq(cache);
 889	cache->num_pages--;
 890
 891	spin_unlock_irqrestore(&cache->lock, flags);
 892
 893	return 0;
 894}
 895EXPORT_SYMBOL_GPL(gnttab_page_cache_get);
 896
 897void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
 898			   unsigned int num)
 899{
 900	unsigned long flags;
 901	unsigned int i;
 902
 903	spin_lock_irqsave(&cache->lock, flags);
 904
 905	for (i = 0; i < num; i++)
 906		cache_enq(cache, page[i]);
 907	cache->num_pages += num;
 908
 909	spin_unlock_irqrestore(&cache->lock, flags);
 910}
 911EXPORT_SYMBOL_GPL(gnttab_page_cache_put);
 912
 913void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
 914{
 915	struct page *page[10];
 916	unsigned int i = 0;
 917	unsigned long flags;
 918
 919	spin_lock_irqsave(&cache->lock, flags);
 920
 921	while (cache->num_pages > num) {
 922		page[i] = cache_deq(cache);
 923		cache->num_pages--;
 924		if (++i == ARRAY_SIZE(page)) {
 925			spin_unlock_irqrestore(&cache->lock, flags);
 926			gnttab_free_pages(i, page);
 927			i = 0;
 928			spin_lock_irqsave(&cache->lock, flags);
 929		}
 930	}
 931
 932	spin_unlock_irqrestore(&cache->lock, flags);
 933
 934	if (i != 0)
 935		gnttab_free_pages(i, page);
 936}
 937EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);
 938
 939void gnttab_pages_clear_private(int nr_pages, struct page **pages)
 940{
 941	int i;
 942
 943	for (i = 0; i < nr_pages; i++) {
 944		if (PagePrivate(pages[i])) {
 945#if BITS_PER_LONG < 64
 946			kfree((void *)page_private(pages[i]));
 947#endif
 948			ClearPagePrivate(pages[i]);
 949		}
 950	}
 951}
 952EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
 953
 954/**
 955 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
 956 * @nr_pages; number of pages to free
 957 * @pages: the pages
 958 */
 959void gnttab_free_pages(int nr_pages, struct page **pages)
 960{
 961	gnttab_pages_clear_private(nr_pages, pages);
 962	xen_free_unpopulated_pages(nr_pages, pages);
 963}
 964EXPORT_SYMBOL_GPL(gnttab_free_pages);
 965
 966#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 967/**
 968 * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
 969 * @args: arguments to the function
 970 */
 971int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
 972{
 973	unsigned long pfn, start_pfn;
 974	size_t size;
 975	int i, ret;
 976
 977	size = args->nr_pages << PAGE_SHIFT;
 978	if (args->coherent)
 979		args->vaddr = dma_alloc_coherent(args->dev, size,
 980						 &args->dev_bus_addr,
 981						 GFP_KERNEL | __GFP_NOWARN);
 982	else
 983		args->vaddr = dma_alloc_wc(args->dev, size,
 984					   &args->dev_bus_addr,
 985					   GFP_KERNEL | __GFP_NOWARN);
 986	if (!args->vaddr) {
 987		pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
 988		return -ENOMEM;
 989	}
 990
 991	start_pfn = __phys_to_pfn(args->dev_bus_addr);
 992	for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
 993			pfn++, i++) {
 994		struct page *page = pfn_to_page(pfn);
 995
 996		args->pages[i] = page;
 997		args->frames[i] = xen_page_to_gfn(page);
 998		xenmem_reservation_scrub_page(page);
 999	}
1000
1001	xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
1002
1003	ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
1004	if (ret != args->nr_pages) {
1005		pr_debug("Failed to decrease reservation for DMA buffer\n");
1006		ret = -EFAULT;
1007		goto fail;
1008	}
1009
1010	ret = gnttab_pages_set_private(args->nr_pages, args->pages);
1011	if (ret < 0)
1012		goto fail;
1013
1014	return 0;
1015
1016fail:
1017	gnttab_dma_free_pages(args);
1018	return ret;
1019}
1020EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
1021
1022/**
1023 * gnttab_dma_free_pages - free DMAable pages
1024 * @args: arguments to the function
1025 */
1026int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
1027{
1028	size_t size;
1029	int i, ret;
1030
1031	gnttab_pages_clear_private(args->nr_pages, args->pages);
1032
1033	for (i = 0; i < args->nr_pages; i++)
1034		args->frames[i] = page_to_xen_pfn(args->pages[i]);
1035
1036	ret = xenmem_reservation_increase(args->nr_pages, args->frames);
1037	if (ret != args->nr_pages) {
1038		pr_debug("Failed to increase reservation for DMA buffer\n");
1039		ret = -EFAULT;
1040	} else {
1041		ret = 0;
1042	}
1043
1044	xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
1045					     args->frames);
1046
1047	size = args->nr_pages << PAGE_SHIFT;
1048	if (args->coherent)
1049		dma_free_coherent(args->dev, size,
1050				  args->vaddr, args->dev_bus_addr);
1051	else
1052		dma_free_wc(args->dev, size,
1053			    args->vaddr, args->dev_bus_addr);
1054	return ret;
1055}
1056EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
1057#endif
1058
1059/* Handling of paged out grant targets (GNTST_eagain) */
1060#define MAX_DELAY 256
1061static inline void
1062gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
1063						const char *func)
1064{
1065	unsigned delay = 1;
1066
1067	do {
1068		BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
1069		if (*status == GNTST_eagain)
1070			msleep(delay++);
1071	} while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
1072
1073	if (delay >= MAX_DELAY) {
1074		pr_err("%s: %s eagain grant\n", func, current->comm);
1075		*status = GNTST_bad_page;
1076	}
1077}
1078
1079void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
1080{
1081	struct gnttab_map_grant_ref *op;
1082
1083	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
1084		BUG();
1085	for (op = batch; op < batch + count; op++)
1086		if (op->status == GNTST_eagain)
1087			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
1088						&op->status, __func__);
1089}
1090EXPORT_SYMBOL_GPL(gnttab_batch_map);
1091
1092void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
1093{
1094	struct gnttab_copy *op;
1095
1096	if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
1097		BUG();
1098	for (op = batch; op < batch + count; op++)
1099		if (op->status == GNTST_eagain)
1100			gnttab_retry_eagain_gop(GNTTABOP_copy, op,
1101						&op->status, __func__);
1102}
1103EXPORT_SYMBOL_GPL(gnttab_batch_copy);
1104
1105void gnttab_foreach_grant_in_range(struct page *page,
1106				   unsigned int offset,
1107				   unsigned int len,
1108				   xen_grant_fn_t fn,
1109				   void *data)
1110{
1111	unsigned int goffset;
1112	unsigned int glen;
1113	unsigned long xen_pfn;
1114
1115	len = min_t(unsigned int, PAGE_SIZE - offset, len);
1116	goffset = xen_offset_in_page(offset);
1117
1118	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
1119
1120	while (len) {
1121		glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
1122		fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1123
1124		goffset = 0;
1125		xen_pfn++;
1126		len -= glen;
1127	}
1128}
1129EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1130
1131void gnttab_foreach_grant(struct page **pages,
1132			  unsigned int nr_grefs,
1133			  xen_grant_fn_t fn,
1134			  void *data)
1135{
1136	unsigned int goffset = 0;
1137	unsigned long xen_pfn = 0;
1138	unsigned int i;
1139
1140	for (i = 0; i < nr_grefs; i++) {
1141		if ((i % XEN_PFN_PER_PAGE) == 0) {
1142			xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1143			goffset = 0;
1144		}
1145
1146		fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1147
1148		goffset += XEN_PAGE_SIZE;
1149		xen_pfn++;
1150	}
1151}
1152
1153int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1154		    struct gnttab_map_grant_ref *kmap_ops,
1155		    struct page **pages, unsigned int count)
1156{
1157	int i, ret;
1158
1159	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1160	if (ret)
1161		return ret;
1162
1163	for (i = 0; i < count; i++) {
1164		switch (map_ops[i].status) {
1165		case GNTST_okay:
1166		{
1167			struct xen_page_foreign *foreign;
1168
1169			SetPageForeign(pages[i]);
1170			foreign = xen_page_foreign(pages[i]);
1171			foreign->domid = map_ops[i].dom;
1172			foreign->gref = map_ops[i].ref;
1173			break;
1174		}
1175
1176		case GNTST_no_device_space:
1177			pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1178			break;
1179
1180		case GNTST_eagain:
1181			/* Retry eagain maps */
1182			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1183						map_ops + i,
1184						&map_ops[i].status, __func__);
1185			/* Test status in next loop iteration. */
1186			i--;
1187			break;
1188
1189		default:
1190			break;
1191		}
1192	}
1193
1194	return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1195}
1196EXPORT_SYMBOL_GPL(gnttab_map_refs);
1197
1198int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1199		      struct gnttab_unmap_grant_ref *kunmap_ops,
1200		      struct page **pages, unsigned int count)
1201{
1202	unsigned int i;
1203	int ret;
1204
1205	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1206	if (ret)
1207		return ret;
1208
1209	for (i = 0; i < count; i++)
1210		ClearPageForeign(pages[i]);
1211
1212	return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1213}
1214EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1215
1216#define GNTTAB_UNMAP_REFS_DELAY 5
1217
1218static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1219
1220static void gnttab_unmap_work(struct work_struct *work)
1221{
1222	struct gntab_unmap_queue_data
1223		*unmap_data = container_of(work, 
1224					   struct gntab_unmap_queue_data,
1225					   gnttab_work.work);
1226	if (unmap_data->age != UINT_MAX)
1227		unmap_data->age++;
1228	__gnttab_unmap_refs_async(unmap_data);
1229}
1230
1231static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1232{
1233	int ret;
1234	int pc;
1235
1236	for (pc = 0; pc < item->count; pc++) {
1237		if (page_count(item->pages[pc]) > 1) {
1238			unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1239			schedule_delayed_work(&item->gnttab_work,
1240					      msecs_to_jiffies(delay));
1241			return;
1242		}
1243	}
1244
1245	ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1246				item->pages, item->count);
1247	item->done(ret, item);
1248}
1249
1250void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1251{
1252	INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1253	item->age = 0;
1254
1255	__gnttab_unmap_refs_async(item);
1256}
1257EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1258
1259static void unmap_refs_callback(int result,
1260		struct gntab_unmap_queue_data *data)
1261{
1262	struct unmap_refs_callback_data *d = data->data;
1263
1264	d->result = result;
1265	complete(&d->completion);
1266}
1267
1268int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1269{
1270	struct unmap_refs_callback_data data;
1271
1272	init_completion(&data.completion);
1273	item->data = &data;
1274	item->done = &unmap_refs_callback;
1275	gnttab_unmap_refs_async(item);
1276	wait_for_completion(&data.completion);
1277
1278	return data.result;
1279}
1280EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1281
1282static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1283{
1284	return gnttab_frames(nr_grant_frames, SPP);
1285}
1286
1287static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1288{
1289	int rc;
1290
1291	rc = arch_gnttab_map_shared(frames, nr_gframes,
1292				    gnttab_max_grant_frames(),
1293				    &gnttab_shared.addr);
1294	BUG_ON(rc);
1295
1296	return 0;
1297}
1298
1299static void gnttab_unmap_frames_v1(void)
1300{
1301	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1302}
1303
1304static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1305{
1306	uint64_t *sframes;
1307	unsigned int nr_sframes;
1308	struct gnttab_get_status_frames getframes;
1309	int rc;
1310
1311	nr_sframes = nr_status_frames(nr_gframes);
1312
1313	/* No need for kzalloc as it is initialized in following hypercall
1314	 * GNTTABOP_get_status_frames.
1315	 */
1316	sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1317	if (!sframes)
1318		return -ENOMEM;
1319
1320	getframes.dom        = DOMID_SELF;
1321	getframes.nr_frames  = nr_sframes;
1322	set_xen_guest_handle(getframes.frame_list, sframes);
1323
1324	rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1325				       &getframes, 1);
1326	if (rc == -ENOSYS) {
1327		kfree(sframes);
1328		return -ENOSYS;
1329	}
1330
1331	BUG_ON(rc || getframes.status);
1332
1333	rc = arch_gnttab_map_status(sframes, nr_sframes,
1334				    nr_status_frames(gnttab_max_grant_frames()),
1335				    &grstatus);
1336	BUG_ON(rc);
1337	kfree(sframes);
1338
1339	rc = arch_gnttab_map_shared(frames, nr_gframes,
1340				    gnttab_max_grant_frames(),
1341				    &gnttab_shared.addr);
1342	BUG_ON(rc);
1343
1344	return 0;
1345}
1346
1347static void gnttab_unmap_frames_v2(void)
1348{
1349	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1350	arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1351}
1352
1353static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1354{
1355	struct gnttab_setup_table setup;
1356	xen_pfn_t *frames;
1357	unsigned int nr_gframes = end_idx + 1;
1358	int rc;
1359
1360	if (xen_feature(XENFEAT_auto_translated_physmap)) {
1361		struct xen_add_to_physmap xatp;
1362		unsigned int i = end_idx;
1363		rc = 0;
1364		BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1365		/*
1366		 * Loop backwards, so that the first hypercall has the largest
1367		 * index, ensuring that the table will grow only once.
1368		 */
1369		do {
1370			xatp.domid = DOMID_SELF;
1371			xatp.idx = i;
1372			xatp.space = XENMAPSPACE_grant_table;
1373			xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1374			rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1375			if (rc != 0) {
1376				pr_warn("grant table add_to_physmap failed, err=%d\n",
1377					rc);
1378				break;
1379			}
1380		} while (i-- > start_idx);
1381
1382		return rc;
1383	}
1384
1385	/* No need for kzalloc as it is initialized in following hypercall
1386	 * GNTTABOP_setup_table.
1387	 */
1388	frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1389	if (!frames)
1390		return -ENOMEM;
1391
1392	setup.dom        = DOMID_SELF;
1393	setup.nr_frames  = nr_gframes;
1394	set_xen_guest_handle(setup.frame_list, frames);
1395
1396	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1397	if (rc == -ENOSYS) {
1398		kfree(frames);
1399		return -ENOSYS;
1400	}
1401
1402	BUG_ON(rc || setup.status);
1403
1404	rc = gnttab_interface->map_frames(frames, nr_gframes);
1405
1406	kfree(frames);
1407
1408	return rc;
1409}
1410
1411static const struct gnttab_ops gnttab_v1_ops = {
1412	.version			= 1,
1413	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1414					  sizeof(struct grant_entry_v1),
1415	.map_frames			= gnttab_map_frames_v1,
1416	.unmap_frames			= gnttab_unmap_frames_v1,
1417	.update_entry			= gnttab_update_entry_v1,
1418	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v1,
1419	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v1,
1420	.query_foreign_access		= gnttab_query_foreign_access_v1,
1421};
1422
1423static const struct gnttab_ops gnttab_v2_ops = {
1424	.version			= 2,
1425	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1426					  sizeof(union grant_entry_v2),
1427	.map_frames			= gnttab_map_frames_v2,
1428	.unmap_frames			= gnttab_unmap_frames_v2,
1429	.update_entry			= gnttab_update_entry_v2,
1430	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v2,
1431	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v2,
1432	.query_foreign_access		= gnttab_query_foreign_access_v2,
1433};
1434
1435static bool gnttab_need_v2(void)
1436{
1437#ifdef CONFIG_X86
1438	uint32_t base, width;
1439
1440	if (xen_pv_domain()) {
1441		base = xen_cpuid_base();
1442		if (cpuid_eax(base) < 5)
1443			return false;	/* Information not available, use V1. */
1444		width = cpuid_ebx(base + 5) &
1445			XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1446		return width > 32 + PAGE_SHIFT;
1447	}
1448#endif
1449	return !!(max_possible_pfn >> 32);
1450}
1451
1452static void gnttab_request_version(void)
1453{
1454	long rc;
1455	struct gnttab_set_version gsv;
1456
1457	if (gnttab_need_v2())
1458		gsv.version = 2;
1459	else
1460		gsv.version = 1;
1461
1462	/* Boot parameter overrides automatic selection. */
1463	if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1464		gsv.version = xen_gnttab_version;
1465
1466	rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1467	if (rc == 0 && gsv.version == 2)
1468		gnttab_interface = &gnttab_v2_ops;
1469	else
1470		gnttab_interface = &gnttab_v1_ops;
1471	pr_info("Grant tables using version %d layout\n",
1472		gnttab_interface->version);
1473}
1474
1475static int gnttab_setup(void)
1476{
1477	unsigned int max_nr_gframes;
1478
1479	max_nr_gframes = gnttab_max_grant_frames();
1480	if (max_nr_gframes < nr_grant_frames)
1481		return -ENOSYS;
1482
1483	if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1484		gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1485		if (gnttab_shared.addr == NULL) {
1486			pr_warn("gnttab share frames is not mapped!\n");
1487			return -ENOMEM;
1488		}
1489	}
1490	return gnttab_map(0, nr_grant_frames - 1);
1491}
1492
1493int gnttab_resume(void)
1494{
1495	gnttab_request_version();
1496	return gnttab_setup();
1497}
1498
1499int gnttab_suspend(void)
1500{
1501	if (!xen_feature(XENFEAT_auto_translated_physmap))
1502		gnttab_interface->unmap_frames();
1503	return 0;
1504}
1505
1506static int gnttab_expand(unsigned int req_entries)
1507{
1508	int rc;
1509	unsigned int cur, extra;
1510
1511	cur = nr_grant_frames;
1512	extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1513		 gnttab_interface->grefs_per_grant_frame);
1514	if (cur + extra > gnttab_max_grant_frames()) {
1515		pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1516				    " cur=%u extra=%u limit=%u"
1517				    " gnttab_free_count=%u req_entries=%u\n",
1518				    cur, extra, gnttab_max_grant_frames(),
1519				    gnttab_free_count, req_entries);
1520		return -ENOSPC;
1521	}
1522
1523	rc = gnttab_map(cur, cur + extra - 1);
1524	if (rc == 0)
1525		rc = grow_gnttab_list(extra);
1526
1527	return rc;
1528}
1529
1530int gnttab_init(void)
1531{
1532	int i;
1533	unsigned long max_nr_grant_frames;
1534	unsigned int max_nr_glist_frames, nr_glist_frames;
1535	unsigned int nr_init_grefs;
1536	int ret;
1537
1538	gnttab_request_version();
1539	max_nr_grant_frames = gnttab_max_grant_frames();
1540	nr_grant_frames = 1;
1541
1542	/* Determine the maximum number of frames required for the
1543	 * grant reference free list on the current hypervisor.
1544	 */
1545	max_nr_glist_frames = (max_nr_grant_frames *
1546			       gnttab_interface->grefs_per_grant_frame / RPP);
1547
1548	gnttab_list = kmalloc_array(max_nr_glist_frames,
1549				    sizeof(grant_ref_t *),
1550				    GFP_KERNEL);
1551	if (gnttab_list == NULL)
1552		return -ENOMEM;
1553
1554	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1555	for (i = 0; i < nr_glist_frames; i++) {
1556		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1557		if (gnttab_list[i] == NULL) {
1558			ret = -ENOMEM;
1559			goto ini_nomem;
1560		}
1561	}
1562
1563	ret = arch_gnttab_init(max_nr_grant_frames,
1564			       nr_status_frames(max_nr_grant_frames));
1565	if (ret < 0)
1566		goto ini_nomem;
1567
1568	if (gnttab_setup() < 0) {
1569		ret = -ENODEV;
1570		goto ini_nomem;
1571	}
1572
1573	nr_init_grefs = nr_grant_frames *
1574			gnttab_interface->grefs_per_grant_frame;
1575
1576	for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1577		gnttab_entry(i) = i + 1;
1578
1579	gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
1580	gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
1581	gnttab_free_head  = NR_RESERVED_ENTRIES;
1582
1583	printk("Grant table initialized\n");
1584	return 0;
1585
1586 ini_nomem:
1587	for (i--; i >= 0; i--)
1588		free_page((unsigned long)gnttab_list[i]);
1589	kfree(gnttab_list);
1590	return ret;
1591}
1592EXPORT_SYMBOL_GPL(gnttab_init);
1593
1594static int __gnttab_init(void)
1595{
1596	if (!xen_domain())
1597		return -ENODEV;
1598
1599	/* Delay grant-table initialization in the PV on HVM case */
1600	if (xen_hvm_domain() && !xen_pvh_domain())
1601		return 0;
1602
1603	return gnttab_init();
1604}
1605/* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1606 * beforehand to initialize xen_auto_xlat_grant_frames. */
1607core_initcall_sync(__gnttab_init);