Linux Audio

Check our new training course

Loading...
v6.13.7
   1/******************************************************************************
   2 * grant_table.c
   3 *
   4 * Granting foreign access to our memory reservation.
   5 *
   6 * Copyright (c) 2005-2006, Christopher Clark
   7 * Copyright (c) 2004-2005, K A Fraser
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License version 2
  11 * as published by the Free Software Foundation; or, when distributed
  12 * separately from the Linux kernel or incorporated into other
  13 * software packages, subject to the following license:
  14 *
  15 * Permission is hereby granted, free of charge, to any person obtaining a copy
  16 * of this source file (the "Software"), to deal in the Software without
  17 * restriction, including without limitation the rights to use, copy, modify,
  18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  19 * and to permit persons to whom the Software is furnished to do so, subject to
  20 * the following conditions:
  21 *
  22 * The above copyright notice and this permission notice shall be included in
  23 * all copies or substantial portions of the Software.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  31 * IN THE SOFTWARE.
  32 */
  33
  34#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  35
  36#include <linux/bitmap.h>
  37#include <linux/memblock.h>
  38#include <linux/sched.h>
  39#include <linux/mm.h>
  40#include <linux/slab.h>
  41#include <linux/vmalloc.h>
  42#include <linux/uaccess.h>
  43#include <linux/io.h>
  44#include <linux/delay.h>
  45#include <linux/hardirq.h>
  46#include <linux/workqueue.h>
  47#include <linux/ratelimit.h>
  48#include <linux/moduleparam.h>
  49#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
  50#include <linux/dma-mapping.h>
  51#endif
  52
  53#include <xen/xen.h>
  54#include <xen/interface/xen.h>
  55#include <xen/page.h>
  56#include <xen/grant_table.h>
  57#include <xen/interface/memory.h>
  58#include <xen/hvc-console.h>
  59#include <xen/swiotlb-xen.h>
  60#include <xen/balloon.h>
  61#ifdef CONFIG_X86
  62#include <asm/xen/cpuid.h>
  63#endif
  64#include <xen/mem-reservation.h>
  65#include <asm/xen/hypercall.h>
  66#include <asm/xen/interface.h>
  67
  68#include <asm/sync_bitops.h>
  69
 
 
  70#define GNTTAB_LIST_END 0xffffffff
  71
  72static grant_ref_t **gnttab_list;
  73static unsigned int nr_grant_frames;
  74
  75/*
  76 * Handling of free grants:
  77 *
  78 * Free grants are in a simple list anchored in gnttab_free_head. They are
  79 * linked by grant ref, the last element contains GNTTAB_LIST_END. The number
  80 * of free entries is stored in gnttab_free_count.
  81 * Additionally there is a bitmap of free entries anchored in
  82 * gnttab_free_bitmap. This is being used for simplifying allocation of
  83 * multiple consecutive grants, which is needed e.g. for support of virtio.
  84 * gnttab_last_free is used to add free entries of new frames at the end of
  85 * the free list.
  86 * gnttab_free_tail_ptr specifies the variable which references the start
  87 * of consecutive free grants ending with gnttab_last_free. This pointer is
  88 * updated in a rather defensive way, in order to avoid performance hits in
  89 * hot paths.
  90 * All those variables are protected by gnttab_list_lock.
  91 */
  92static int gnttab_free_count;
  93static unsigned int gnttab_size;
  94static grant_ref_t gnttab_free_head = GNTTAB_LIST_END;
  95static grant_ref_t gnttab_last_free = GNTTAB_LIST_END;
  96static grant_ref_t *gnttab_free_tail_ptr;
  97static unsigned long *gnttab_free_bitmap;
  98static DEFINE_SPINLOCK(gnttab_list_lock);
  99
 100struct grant_frames xen_auto_xlat_grant_frames;
 101static unsigned int xen_gnttab_version;
 102module_param_named(version, xen_gnttab_version, uint, 0);
 103
 104static union {
 105	struct grant_entry_v1 *v1;
 106	union grant_entry_v2 *v2;
 107	void *addr;
 108} gnttab_shared;
 109
 110/*This is a structure of function pointers for grant table*/
 111struct gnttab_ops {
 112	/*
 113	 * Version of the grant interface.
 114	 */
 115	unsigned int version;
 116	/*
 117	 * Grant refs per grant frame.
 118	 */
 119	unsigned int grefs_per_grant_frame;
 120	/*
 121	 * Mapping a list of frames for storing grant entries. Frames parameter
 122	 * is used to store grant table address when grant table being setup,
 123	 * nr_gframes is the number of frames to map grant table. Returning
 124	 * GNTST_okay means success and negative value means failure.
 125	 */
 126	int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
 127	/*
 128	 * Release a list of frames which are mapped in map_frames for grant
 129	 * entry status.
 130	 */
 131	void (*unmap_frames)(void);
 132	/*
 133	 * Introducing a valid entry into the grant table, granting the frame of
 134	 * this grant entry to domain for accessing. Ref
 135	 * parameter is reference of this introduced grant entry, domid is id of
 136	 * granted domain, frame is the page frame to be granted, and flags is
 137	 * status of the grant entry to be updated.
 138	 */
 139	void (*update_entry)(grant_ref_t ref, domid_t domid,
 140			     unsigned long frame, unsigned flags);
 141	/*
 142	 * Stop granting a grant entry to domain for accessing. Ref parameter is
 143	 * reference of a grant entry whose grant access will be stopped.
 144	 * If the grant entry is currently mapped for reading or writing, just
 145	 * return failure(==0) directly and don't tear down the grant access.
 146	 * Otherwise, stop grant access for this entry and return success(==1).
 
 147	 */
 148	int (*end_foreign_access_ref)(grant_ref_t ref);
 149	/*
 150	 * Read the frame number related to a given grant reference.
 
 
 
 
 151	 */
 152	unsigned long (*read_frame)(grant_ref_t ref);
 
 
 
 
 
 
 
 153};
 154
 155struct unmap_refs_callback_data {
 156	struct completion completion;
 157	int result;
 158};
 159
 160static const struct gnttab_ops *gnttab_interface;
 161
 162/* This reflects status of grant entries, so act as a global value. */
 163static grant_status_t *grstatus;
 164
 165static struct gnttab_free_callback *gnttab_free_callback_list;
 166
 167static int gnttab_expand(unsigned int req_entries);
 168
 169#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
 170#define SPP (PAGE_SIZE / sizeof(grant_status_t))
 171
 172static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
 173{
 174	return &gnttab_list[(entry) / RPP][(entry) % RPP];
 175}
 176/* This can be used as an l-value */
 177#define gnttab_entry(entry) (*__gnttab_entry(entry))
 178
 179static int get_free_entries(unsigned count)
 180{
 181	unsigned long flags;
 182	int ref, rc = 0;
 183	grant_ref_t head;
 184
 185	spin_lock_irqsave(&gnttab_list_lock, flags);
 186
 187	if ((gnttab_free_count < count) &&
 188	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
 189		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 190		return rc;
 191	}
 192
 193	ref = head = gnttab_free_head;
 194	gnttab_free_count -= count;
 195	while (count--) {
 196		bitmap_clear(gnttab_free_bitmap, head, 1);
 197		if (gnttab_free_tail_ptr == __gnttab_entry(head))
 198			gnttab_free_tail_ptr = &gnttab_free_head;
 199		if (count)
 200			head = gnttab_entry(head);
 201	}
 202	gnttab_free_head = gnttab_entry(head);
 203	gnttab_entry(head) = GNTTAB_LIST_END;
 204
 205	if (!gnttab_free_count) {
 206		gnttab_last_free = GNTTAB_LIST_END;
 207		gnttab_free_tail_ptr = NULL;
 208	}
 209
 210	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 211
 212	return ref;
 213}
 214
 215static int get_seq_entry_count(void)
 216{
 217	if (gnttab_last_free == GNTTAB_LIST_END || !gnttab_free_tail_ptr ||
 218	    *gnttab_free_tail_ptr == GNTTAB_LIST_END)
 219		return 0;
 220
 221	return gnttab_last_free - *gnttab_free_tail_ptr + 1;
 222}
 223
 224/* Rebuilds the free grant list and tries to find count consecutive entries. */
 225static int get_free_seq(unsigned int count)
 226{
 227	int ret = -ENOSPC;
 228	unsigned int from, to;
 229	grant_ref_t *last;
 230
 231	gnttab_free_tail_ptr = &gnttab_free_head;
 232	last = &gnttab_free_head;
 233
 234	for (from = find_first_bit(gnttab_free_bitmap, gnttab_size);
 235	     from < gnttab_size;
 236	     from = find_next_bit(gnttab_free_bitmap, gnttab_size, to + 1)) {
 237		to = find_next_zero_bit(gnttab_free_bitmap, gnttab_size,
 238					from + 1);
 239		if (ret < 0 && to - from >= count) {
 240			ret = from;
 241			bitmap_clear(gnttab_free_bitmap, ret, count);
 242			from += count;
 243			gnttab_free_count -= count;
 244			if (from == to)
 245				continue;
 246		}
 247
 248		/*
 249		 * Recreate the free list in order to have it properly sorted.
 250		 * This is needed to make sure that the free tail has the maximum
 251		 * possible size.
 252		 */
 253		while (from < to) {
 254			*last = from;
 255			last = __gnttab_entry(from);
 256			gnttab_last_free = from;
 257			from++;
 258		}
 259		if (to < gnttab_size)
 260			gnttab_free_tail_ptr = __gnttab_entry(to - 1);
 261	}
 262
 263	*last = GNTTAB_LIST_END;
 264	if (gnttab_last_free != gnttab_size - 1)
 265		gnttab_free_tail_ptr = NULL;
 266
 267	return ret;
 268}
 269
 270static int get_free_entries_seq(unsigned int count)
 271{
 272	unsigned long flags;
 273	int ret = 0;
 274
 275	spin_lock_irqsave(&gnttab_list_lock, flags);
 276
 277	if (gnttab_free_count < count) {
 278		ret = gnttab_expand(count - gnttab_free_count);
 279		if (ret < 0)
 280			goto out;
 281	}
 282
 283	if (get_seq_entry_count() < count) {
 284		ret = get_free_seq(count);
 285		if (ret >= 0)
 286			goto out;
 287		ret = gnttab_expand(count - get_seq_entry_count());
 288		if (ret < 0)
 289			goto out;
 290	}
 291
 292	ret = *gnttab_free_tail_ptr;
 293	*gnttab_free_tail_ptr = gnttab_entry(ret + count - 1);
 294	gnttab_free_count -= count;
 295	if (!gnttab_free_count)
 296		gnttab_free_tail_ptr = NULL;
 297	bitmap_clear(gnttab_free_bitmap, ret, count);
 298
 299 out:
 300	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 301
 302	return ret;
 303}
 304
 305static void do_free_callbacks(void)
 306{
 307	struct gnttab_free_callback *callback, *next;
 308
 309	callback = gnttab_free_callback_list;
 310	gnttab_free_callback_list = NULL;
 311
 312	while (callback != NULL) {
 313		next = callback->next;
 314		if (gnttab_free_count >= callback->count) {
 315			callback->next = NULL;
 316			callback->fn(callback->arg);
 317		} else {
 318			callback->next = gnttab_free_callback_list;
 319			gnttab_free_callback_list = callback;
 320		}
 321		callback = next;
 322	}
 323}
 324
 325static inline void check_free_callbacks(void)
 326{
 327	if (unlikely(gnttab_free_callback_list))
 328		do_free_callbacks();
 329}
 330
 331static void put_free_entry_locked(grant_ref_t ref)
 332{
 333	if (unlikely(ref < GNTTAB_NR_RESERVED_ENTRIES))
 334		return;
 335
 336	gnttab_entry(ref) = gnttab_free_head;
 337	gnttab_free_head = ref;
 338	if (!gnttab_free_count)
 339		gnttab_last_free = ref;
 340	if (gnttab_free_tail_ptr == &gnttab_free_head)
 341		gnttab_free_tail_ptr = __gnttab_entry(ref);
 342	gnttab_free_count++;
 343	bitmap_set(gnttab_free_bitmap, ref, 1);
 344}
 345
 346static void put_free_entry(grant_ref_t ref)
 347{
 348	unsigned long flags;
 349
 350	spin_lock_irqsave(&gnttab_list_lock, flags);
 351	put_free_entry_locked(ref);
 
 
 352	check_free_callbacks();
 353	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 354}
 355
 356static void gnttab_set_free(unsigned int start, unsigned int n)
 357{
 358	unsigned int i;
 359
 360	for (i = start; i < start + n - 1; i++)
 361		gnttab_entry(i) = i + 1;
 362
 363	gnttab_entry(i) = GNTTAB_LIST_END;
 364	if (!gnttab_free_count) {
 365		gnttab_free_head = start;
 366		gnttab_free_tail_ptr = &gnttab_free_head;
 367	} else {
 368		gnttab_entry(gnttab_last_free) = start;
 369	}
 370	gnttab_free_count += n;
 371	gnttab_last_free = i;
 372
 373	bitmap_set(gnttab_free_bitmap, start, n);
 374}
 375
 376/*
 377 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
 378 * Introducing a valid entry into the grant table:
 379 *  1. Write ent->domid.
 380 *  2. Write ent->frame: Frame to which access is permitted.
 
 
 
 381 *  3. Write memory barrier (WMB).
 382 *  4. Write ent->flags, inc. valid type.
 383 */
 384static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
 385				   unsigned long frame, unsigned flags)
 386{
 387	gnttab_shared.v1[ref].domid = domid;
 388	gnttab_shared.v1[ref].frame = frame;
 389	wmb();
 390	gnttab_shared.v1[ref].flags = flags;
 391}
 392
 393static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
 394				   unsigned long frame, unsigned int flags)
 395{
 396	gnttab_shared.v2[ref].hdr.domid = domid;
 397	gnttab_shared.v2[ref].full_page.frame = frame;
 398	wmb();	/* Hypervisor concurrent accesses. */
 399	gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
 400}
 401
 402/*
 403 * Public grant-issuing interface functions
 404 */
 405void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
 406				     unsigned long frame, int readonly)
 407{
 408	gnttab_interface->update_entry(ref, domid, frame,
 409			   GTF_permit_access | (readonly ? GTF_readonly : 0));
 410}
 411EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
 412
 413int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
 414				int readonly)
 415{
 416	int ref;
 417
 418	ref = get_free_entries(1);
 419	if (unlikely(ref < 0))
 420		return -ENOSPC;
 421
 422	gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
 423
 424	return ref;
 425}
 426EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
 427
 428static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 429{
 430	u16 *pflags = &gnttab_shared.v1[ref].flags;
 431	u16 flags;
 432
 433	flags = *pflags;
 
 434	do {
 
 435		if (flags & (GTF_reading|GTF_writing))
 436			return 0;
 437	} while (!sync_try_cmpxchg(pflags, &flags, 0));
 438
 439	return 1;
 440}
 441
 442static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref)
 443{
 444	gnttab_shared.v2[ref].hdr.flags = 0;
 445	mb();	/* Concurrent access by hypervisor. */
 446	if (grstatus[ref] & (GTF_reading|GTF_writing)) {
 447		return 0;
 448	} else {
 449		/*
 450		 * The read of grstatus needs to have acquire semantics.
 451		 *  On x86, reads already have that, and we just need to
 452		 * protect against compiler reorderings.
 453		 * On other architectures we may need a full barrier.
 454		 */
 455#ifdef CONFIG_X86
 456		barrier();
 457#else
 458		mb();
 459#endif
 460	}
 461
 462	return 1;
 463}
 464
 465static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref)
 466{
 467	return gnttab_interface->end_foreign_access_ref(ref);
 468}
 469
 470int gnttab_end_foreign_access_ref(grant_ref_t ref)
 471{
 472	if (_gnttab_end_foreign_access_ref(ref))
 473		return 1;
 474	pr_warn("WARNING: g.e. %#x still in use!\n", ref);
 475	return 0;
 476}
 477EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
 478
 479static unsigned long gnttab_read_frame_v1(grant_ref_t ref)
 480{
 481	return gnttab_shared.v1[ref].frame;
 482}
 483
 484static unsigned long gnttab_read_frame_v2(grant_ref_t ref)
 485{
 486	return gnttab_shared.v2[ref].full_page.frame;
 487}
 488
 489struct deferred_entry {
 490	struct list_head list;
 491	grant_ref_t ref;
 
 492	uint16_t warn_delay;
 493	struct page *page;
 494};
 495static LIST_HEAD(deferred_list);
 496static void gnttab_handle_deferred(struct timer_list *);
 497static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
 498
 499static atomic64_t deferred_count;
 500static atomic64_t leaked_count;
 501static unsigned int free_per_iteration = 10;
 502module_param(free_per_iteration, uint, 0600);
 503
 504static void gnttab_handle_deferred(struct timer_list *unused)
 505{
 506	unsigned int nr = READ_ONCE(free_per_iteration);
 507	const bool ignore_limit = nr == 0;
 508	struct deferred_entry *first = NULL;
 509	unsigned long flags;
 510	size_t freed = 0;
 511
 512	spin_lock_irqsave(&gnttab_list_lock, flags);
 513	while ((ignore_limit || nr--) && !list_empty(&deferred_list)) {
 514		struct deferred_entry *entry
 515			= list_first_entry(&deferred_list,
 516					   struct deferred_entry, list);
 517
 518		if (entry == first)
 519			break;
 520		list_del(&entry->list);
 521		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 522		if (_gnttab_end_foreign_access_ref(entry->ref)) {
 523			uint64_t ret = atomic64_dec_return(&deferred_count);
 524
 525			put_free_entry(entry->ref);
 526			pr_debug("freeing g.e. %#x (pfn %#lx), %llu remaining\n",
 527				 entry->ref, page_to_pfn(entry->page),
 528				 (unsigned long long)ret);
 529			put_page(entry->page);
 530			freed++;
 
 531			kfree(entry);
 532			entry = NULL;
 533		} else {
 534			if (!--entry->warn_delay)
 535				pr_info("g.e. %#x still pending\n", entry->ref);
 536			if (!first)
 537				first = entry;
 538		}
 539		spin_lock_irqsave(&gnttab_list_lock, flags);
 540		if (entry)
 541			list_add_tail(&entry->list, &deferred_list);
 
 
 542	}
 543	if (list_empty(&deferred_list))
 544		WARN_ON(atomic64_read(&deferred_count));
 545	else if (!timer_pending(&deferred_timer)) {
 546		deferred_timer.expires = jiffies + HZ;
 547		add_timer(&deferred_timer);
 548	}
 549	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 550	pr_debug("Freed %zu references", freed);
 551}
 552
 553static void gnttab_add_deferred(grant_ref_t ref, struct page *page)
 
 554{
 555	struct deferred_entry *entry;
 556	gfp_t gfp = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
 557	uint64_t leaked, deferred;
 558
 559	entry = kmalloc(sizeof(*entry), gfp);
 560	if (!page) {
 561		unsigned long gfn = gnttab_interface->read_frame(ref);
 562
 563		page = pfn_to_page(gfn_to_pfn(gfn));
 564		get_page(page);
 565	}
 566
 567	if (entry) {
 568		unsigned long flags;
 569
 570		entry->ref = ref;
 
 571		entry->page = page;
 572		entry->warn_delay = 60;
 573		spin_lock_irqsave(&gnttab_list_lock, flags);
 574		list_add_tail(&entry->list, &deferred_list);
 575		if (!timer_pending(&deferred_timer)) {
 576			deferred_timer.expires = jiffies + HZ;
 577			add_timer(&deferred_timer);
 578		}
 579		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 580		deferred = atomic64_inc_return(&deferred_count);
 581		leaked = atomic64_read(&leaked_count);
 582		pr_debug("deferring g.e. %#x (pfn %#lx) (total deferred %llu, total leaked %llu)\n",
 583			 ref, page ? page_to_pfn(page) : -1, deferred, leaked);
 584	} else {
 585		deferred = atomic64_read(&deferred_count);
 586		leaked = atomic64_inc_return(&leaked_count);
 587		pr_warn("leaking g.e. %#x (pfn %#lx) (total deferred %llu, total leaked %llu)\n",
 588			ref, page ? page_to_pfn(page) : -1, deferred, leaked);
 589	}
 
 
 590}
 591
 592int gnttab_try_end_foreign_access(grant_ref_t ref)
 
 593{
 594	int ret = _gnttab_end_foreign_access_ref(ref);
 595
 596	if (ret)
 597		put_free_entry(ref);
 
 
 
 
 
 
 
 598
 599	return ret;
 
 
 
 
 
 
 
 
 
 600}
 601EXPORT_SYMBOL_GPL(gnttab_try_end_foreign_access);
 602
 603void gnttab_end_foreign_access(grant_ref_t ref, struct page *page)
 
 604{
 605	if (gnttab_try_end_foreign_access(ref)) {
 606		if (page)
 607			put_page(page);
 608	} else
 609		gnttab_add_deferred(ref, page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 610}
 611EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 612
 613void gnttab_free_grant_reference(grant_ref_t ref)
 614{
 615	put_free_entry(ref);
 616}
 617EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
 618
 619void gnttab_free_grant_references(grant_ref_t head)
 620{
 621	grant_ref_t ref;
 622	unsigned long flags;
 623
 
 
 624	spin_lock_irqsave(&gnttab_list_lock, flags);
 625	while (head != GNTTAB_LIST_END) {
 626		ref = gnttab_entry(head);
 627		put_free_entry_locked(head);
 628		head = ref;
 629	}
 
 
 
 630	check_free_callbacks();
 631	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 632}
 633EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
 634
 635void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count)
 636{
 637	unsigned long flags;
 638	unsigned int i;
 639
 640	spin_lock_irqsave(&gnttab_list_lock, flags);
 641	for (i = count; i > 0; i--)
 642		put_free_entry_locked(head + i - 1);
 643	check_free_callbacks();
 644	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 645}
 646EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_seq);
 647
 648int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
 649{
 650	int h = get_free_entries(count);
 651
 652	if (h < 0)
 653		return -ENOSPC;
 654
 655	*head = h;
 656
 657	return 0;
 658}
 659EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
 660
 661int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first)
 662{
 663	int h;
 664
 665	if (count == 1)
 666		h = get_free_entries(1);
 667	else
 668		h = get_free_entries_seq(count);
 669
 670	if (h < 0)
 671		return -ENOSPC;
 672
 673	*first = h;
 674
 675	return 0;
 676}
 677EXPORT_SYMBOL_GPL(gnttab_alloc_grant_reference_seq);
 678
 679int gnttab_empty_grant_references(const grant_ref_t *private_head)
 680{
 681	return (*private_head == GNTTAB_LIST_END);
 682}
 683EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
 684
 685int gnttab_claim_grant_reference(grant_ref_t *private_head)
 686{
 687	grant_ref_t g = *private_head;
 688	if (unlikely(g == GNTTAB_LIST_END))
 689		return -ENOSPC;
 690	*private_head = gnttab_entry(g);
 691	return g;
 692}
 693EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
 694
 695void gnttab_release_grant_reference(grant_ref_t *private_head,
 696				    grant_ref_t release)
 697{
 698	gnttab_entry(release) = *private_head;
 699	*private_head = release;
 700}
 701EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
 702
 703void gnttab_request_free_callback(struct gnttab_free_callback *callback,
 704				  void (*fn)(void *), void *arg, u16 count)
 705{
 706	unsigned long flags;
 707	struct gnttab_free_callback *cb;
 708
 709	spin_lock_irqsave(&gnttab_list_lock, flags);
 710
 711	/* Check if the callback is already on the list */
 712	cb = gnttab_free_callback_list;
 713	while (cb) {
 714		if (cb == callback)
 715			goto out;
 716		cb = cb->next;
 717	}
 718
 719	callback->fn = fn;
 720	callback->arg = arg;
 721	callback->count = count;
 722	callback->next = gnttab_free_callback_list;
 723	gnttab_free_callback_list = callback;
 724	check_free_callbacks();
 725out:
 726	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 727}
 728EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
 729
 730void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
 731{
 732	struct gnttab_free_callback **pcb;
 733	unsigned long flags;
 734
 735	spin_lock_irqsave(&gnttab_list_lock, flags);
 736	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
 737		if (*pcb == callback) {
 738			*pcb = callback->next;
 739			break;
 740		}
 741	}
 742	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 743}
 744EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
 745
 746static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
 747{
 748	return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
 749	       align;
 750}
 751
 752static int grow_gnttab_list(unsigned int more_frames)
 753{
 754	unsigned int new_nr_grant_frames, extra_entries, i;
 755	unsigned int nr_glist_frames, new_nr_glist_frames;
 756	unsigned int grefs_per_frame;
 757
 758	grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
 759
 760	new_nr_grant_frames = nr_grant_frames + more_frames;
 761	extra_entries = more_frames * grefs_per_frame;
 762
 763	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
 764	new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
 765	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
 766		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
 767		if (!gnttab_list[i])
 768			goto grow_nomem;
 769	}
 770
 771	gnttab_set_free(gnttab_size, extra_entries);
 772
 773	if (!gnttab_free_tail_ptr)
 774		gnttab_free_tail_ptr = __gnttab_entry(gnttab_size);
 
 
 
 
 
 775
 776	nr_grant_frames = new_nr_grant_frames;
 777	gnttab_size += extra_entries;
 778
 779	check_free_callbacks();
 780
 781	return 0;
 782
 783grow_nomem:
 784	while (i-- > nr_glist_frames)
 785		free_page((unsigned long) gnttab_list[i]);
 786	return -ENOMEM;
 787}
 788
 789static unsigned int __max_nr_grant_frames(void)
 790{
 791	struct gnttab_query_size query;
 792	int rc;
 793
 794	query.dom = DOMID_SELF;
 795
 796	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
 797	if ((rc < 0) || (query.status != GNTST_okay))
 798		return 4; /* Legacy max supported number of frames */
 799
 800	return query.max_nr_frames;
 801}
 802
 803unsigned int gnttab_max_grant_frames(void)
 804{
 805	unsigned int xen_max = __max_nr_grant_frames();
 806	static unsigned int boot_max_nr_grant_frames;
 807
 808	/* First time, initialize it properly. */
 809	if (!boot_max_nr_grant_frames)
 810		boot_max_nr_grant_frames = __max_nr_grant_frames();
 811
 812	if (xen_max > boot_max_nr_grant_frames)
 813		return boot_max_nr_grant_frames;
 814	return xen_max;
 815}
 816EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
 817
 818int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
 819{
 820	xen_pfn_t *pfn;
 821	unsigned int max_nr_gframes = __max_nr_grant_frames();
 822	unsigned int i;
 823	void *vaddr;
 824
 825	if (xen_auto_xlat_grant_frames.count)
 826		return -EINVAL;
 827
 828	vaddr = memremap(addr, XEN_PAGE_SIZE * max_nr_gframes, MEMREMAP_WB);
 829	if (vaddr == NULL) {
 830		pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
 831			&addr);
 832		return -ENOMEM;
 833	}
 834	pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
 835	if (!pfn) {
 836		memunmap(vaddr);
 837		return -ENOMEM;
 838	}
 839	for (i = 0; i < max_nr_gframes; i++)
 840		pfn[i] = XEN_PFN_DOWN(addr) + i;
 841
 842	xen_auto_xlat_grant_frames.vaddr = vaddr;
 843	xen_auto_xlat_grant_frames.pfn = pfn;
 844	xen_auto_xlat_grant_frames.count = max_nr_gframes;
 845
 846	return 0;
 847}
 848EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
 849
 850void gnttab_free_auto_xlat_frames(void)
 851{
 852	if (!xen_auto_xlat_grant_frames.count)
 853		return;
 854	kfree(xen_auto_xlat_grant_frames.pfn);
 855	memunmap(xen_auto_xlat_grant_frames.vaddr);
 856
 857	xen_auto_xlat_grant_frames.pfn = NULL;
 858	xen_auto_xlat_grant_frames.count = 0;
 859	xen_auto_xlat_grant_frames.vaddr = NULL;
 860}
 861EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
 862
 863int gnttab_pages_set_private(int nr_pages, struct page **pages)
 864{
 865	int i;
 866
 867	for (i = 0; i < nr_pages; i++) {
 868#if BITS_PER_LONG < 64
 869		struct xen_page_foreign *foreign;
 870
 871		foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
 872		if (!foreign)
 873			return -ENOMEM;
 874
 875		set_page_private(pages[i], (unsigned long)foreign);
 876#endif
 877		SetPagePrivate(pages[i]);
 878	}
 879
 880	return 0;
 881}
 882EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
 883
 884/**
 885 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
 886 * @nr_pages: number of pages to alloc
 887 * @pages: returns the pages
 888 */
 889int gnttab_alloc_pages(int nr_pages, struct page **pages)
 890{
 891	int ret;
 892
 893	ret = xen_alloc_unpopulated_pages(nr_pages, pages);
 894	if (ret < 0)
 895		return ret;
 896
 897	ret = gnttab_pages_set_private(nr_pages, pages);
 898	if (ret < 0)
 899		gnttab_free_pages(nr_pages, pages);
 900
 901	return ret;
 902}
 903EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
 904
 905#ifdef CONFIG_XEN_UNPOPULATED_ALLOC
 906static inline void cache_init(struct gnttab_page_cache *cache)
 907{
 908	cache->pages = NULL;
 909}
 910
 911static inline bool cache_empty(struct gnttab_page_cache *cache)
 912{
 913	return !cache->pages;
 914}
 915
 916static inline struct page *cache_deq(struct gnttab_page_cache *cache)
 917{
 918	struct page *page;
 919
 920	page = cache->pages;
 921	cache->pages = page->zone_device_data;
 922
 923	return page;
 924}
 925
 926static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
 927{
 928	page->zone_device_data = cache->pages;
 929	cache->pages = page;
 930}
 931#else
 932static inline void cache_init(struct gnttab_page_cache *cache)
 933{
 934	INIT_LIST_HEAD(&cache->pages);
 935}
 936
 937static inline bool cache_empty(struct gnttab_page_cache *cache)
 938{
 939	return list_empty(&cache->pages);
 940}
 941
 942static inline struct page *cache_deq(struct gnttab_page_cache *cache)
 943{
 944	struct page *page;
 945
 946	page = list_first_entry(&cache->pages, struct page, lru);
 947	list_del(&page->lru);
 948
 949	return page;
 950}
 951
 952static inline void cache_enq(struct gnttab_page_cache *cache, struct page *page)
 953{
 954	list_add(&page->lru, &cache->pages);
 955}
 956#endif
 957
 958void gnttab_page_cache_init(struct gnttab_page_cache *cache)
 959{
 960	spin_lock_init(&cache->lock);
 961	cache_init(cache);
 962	cache->num_pages = 0;
 963}
 964EXPORT_SYMBOL_GPL(gnttab_page_cache_init);
 965
 966int gnttab_page_cache_get(struct gnttab_page_cache *cache, struct page **page)
 967{
 968	unsigned long flags;
 969
 970	spin_lock_irqsave(&cache->lock, flags);
 971
 972	if (cache_empty(cache)) {
 973		spin_unlock_irqrestore(&cache->lock, flags);
 974		return gnttab_alloc_pages(1, page);
 975	}
 976
 977	page[0] = cache_deq(cache);
 978	cache->num_pages--;
 979
 980	spin_unlock_irqrestore(&cache->lock, flags);
 981
 982	return 0;
 983}
 984EXPORT_SYMBOL_GPL(gnttab_page_cache_get);
 985
 986void gnttab_page_cache_put(struct gnttab_page_cache *cache, struct page **page,
 987			   unsigned int num)
 988{
 989	unsigned long flags;
 990	unsigned int i;
 991
 992	spin_lock_irqsave(&cache->lock, flags);
 993
 994	for (i = 0; i < num; i++)
 995		cache_enq(cache, page[i]);
 996	cache->num_pages += num;
 997
 998	spin_unlock_irqrestore(&cache->lock, flags);
 999}
1000EXPORT_SYMBOL_GPL(gnttab_page_cache_put);
1001
1002void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int num)
1003{
1004	struct page *page[10];
1005	unsigned int i = 0;
1006	unsigned long flags;
1007
1008	spin_lock_irqsave(&cache->lock, flags);
1009
1010	while (cache->num_pages > num) {
1011		page[i] = cache_deq(cache);
1012		cache->num_pages--;
1013		if (++i == ARRAY_SIZE(page)) {
1014			spin_unlock_irqrestore(&cache->lock, flags);
1015			gnttab_free_pages(i, page);
1016			i = 0;
1017			spin_lock_irqsave(&cache->lock, flags);
1018		}
1019	}
1020
1021	spin_unlock_irqrestore(&cache->lock, flags);
1022
1023	if (i != 0)
1024		gnttab_free_pages(i, page);
1025}
1026EXPORT_SYMBOL_GPL(gnttab_page_cache_shrink);
1027
1028void gnttab_pages_clear_private(int nr_pages, struct page **pages)
1029{
1030	int i;
1031
1032	for (i = 0; i < nr_pages; i++) {
1033		if (PagePrivate(pages[i])) {
1034#if BITS_PER_LONG < 64
1035			kfree((void *)page_private(pages[i]));
1036#endif
1037			ClearPagePrivate(pages[i]);
1038		}
1039	}
1040}
1041EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
1042
1043/**
1044 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
1045 * @nr_pages: number of pages to free
1046 * @pages: the pages
1047 */
1048void gnttab_free_pages(int nr_pages, struct page **pages)
1049{
1050	gnttab_pages_clear_private(nr_pages, pages);
1051	xen_free_unpopulated_pages(nr_pages, pages);
1052}
1053EXPORT_SYMBOL_GPL(gnttab_free_pages);
1054
1055#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
1056/**
1057 * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
1058 * @args: arguments to the function
1059 */
1060int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
1061{
1062	unsigned long pfn, start_pfn;
1063	size_t size;
1064	int i, ret;
1065
1066	if (args->nr_pages < 0 || args->nr_pages > (INT_MAX >> PAGE_SHIFT))
1067		return -ENOMEM;
1068
1069	size = args->nr_pages << PAGE_SHIFT;
1070	if (args->coherent)
1071		args->vaddr = dma_alloc_coherent(args->dev, size,
1072						 &args->dev_bus_addr,
1073						 GFP_KERNEL | __GFP_NOWARN);
1074	else
1075		args->vaddr = dma_alloc_wc(args->dev, size,
1076					   &args->dev_bus_addr,
1077					   GFP_KERNEL | __GFP_NOWARN);
1078	if (!args->vaddr) {
1079		pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
1080		return -ENOMEM;
1081	}
1082
1083	start_pfn = __phys_to_pfn(args->dev_bus_addr);
1084	for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
1085			pfn++, i++) {
1086		struct page *page = pfn_to_page(pfn);
1087
1088		args->pages[i] = page;
1089		args->frames[i] = xen_page_to_gfn(page);
1090		xenmem_reservation_scrub_page(page);
1091	}
1092
1093	xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
1094
1095	ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
1096	if (ret != args->nr_pages) {
1097		pr_debug("Failed to decrease reservation for DMA buffer\n");
1098		ret = -EFAULT;
1099		goto fail;
1100	}
1101
1102	ret = gnttab_pages_set_private(args->nr_pages, args->pages);
1103	if (ret < 0)
1104		goto fail;
1105
1106	return 0;
1107
1108fail:
1109	gnttab_dma_free_pages(args);
1110	return ret;
1111}
1112EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
1113
1114/**
1115 * gnttab_dma_free_pages - free DMAable pages
1116 * @args: arguments to the function
1117 */
1118int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
1119{
1120	size_t size;
1121	int i, ret;
1122
1123	gnttab_pages_clear_private(args->nr_pages, args->pages);
1124
1125	for (i = 0; i < args->nr_pages; i++)
1126		args->frames[i] = page_to_xen_pfn(args->pages[i]);
1127
1128	ret = xenmem_reservation_increase(args->nr_pages, args->frames);
1129	if (ret != args->nr_pages) {
1130		pr_debug("Failed to increase reservation for DMA buffer\n");
1131		ret = -EFAULT;
1132	} else {
1133		ret = 0;
1134	}
1135
1136	xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
1137					     args->frames);
1138
1139	size = args->nr_pages << PAGE_SHIFT;
1140	if (args->coherent)
1141		dma_free_coherent(args->dev, size,
1142				  args->vaddr, args->dev_bus_addr);
1143	else
1144		dma_free_wc(args->dev, size,
1145			    args->vaddr, args->dev_bus_addr);
1146	return ret;
1147}
1148EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
1149#endif
1150
1151/* Handling of paged out grant targets (GNTST_eagain) */
1152#define MAX_DELAY 256
1153static inline void
1154gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
1155						const char *func)
1156{
1157	unsigned delay = 1;
1158
1159	do {
1160		BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
1161		if (*status == GNTST_eagain)
1162			msleep(delay++);
1163	} while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
1164
1165	if (delay >= MAX_DELAY) {
1166		pr_err("%s: %s eagain grant\n", func, current->comm);
1167		*status = GNTST_bad_page;
1168	}
1169}
1170
1171void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
1172{
1173	struct gnttab_map_grant_ref *op;
1174
1175	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
1176		BUG();
1177	for (op = batch; op < batch + count; op++)
1178		if (op->status == GNTST_eagain)
1179			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
1180						&op->status, __func__);
1181}
1182EXPORT_SYMBOL_GPL(gnttab_batch_map);
1183
1184void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
1185{
1186	struct gnttab_copy *op;
1187
1188	if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
1189		BUG();
1190	for (op = batch; op < batch + count; op++)
1191		if (op->status == GNTST_eagain)
1192			gnttab_retry_eagain_gop(GNTTABOP_copy, op,
1193						&op->status, __func__);
1194}
1195EXPORT_SYMBOL_GPL(gnttab_batch_copy);
1196
1197void gnttab_foreach_grant_in_range(struct page *page,
1198				   unsigned int offset,
1199				   unsigned int len,
1200				   xen_grant_fn_t fn,
1201				   void *data)
1202{
1203	unsigned int goffset;
1204	unsigned int glen;
1205	unsigned long xen_pfn;
1206
1207	len = min_t(unsigned int, PAGE_SIZE - offset, len);
1208	goffset = xen_offset_in_page(offset);
1209
1210	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
1211
1212	while (len) {
1213		glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
1214		fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1215
1216		goffset = 0;
1217		xen_pfn++;
1218		len -= glen;
1219	}
1220}
1221EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1222
1223void gnttab_foreach_grant(struct page **pages,
1224			  unsigned int nr_grefs,
1225			  xen_grant_fn_t fn,
1226			  void *data)
1227{
1228	unsigned int goffset = 0;
1229	unsigned long xen_pfn = 0;
1230	unsigned int i;
1231
1232	for (i = 0; i < nr_grefs; i++) {
1233		if ((i % XEN_PFN_PER_PAGE) == 0) {
1234			xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1235			goffset = 0;
1236		}
1237
1238		fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1239
1240		goffset += XEN_PAGE_SIZE;
1241		xen_pfn++;
1242	}
1243}
1244
1245int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1246		    struct gnttab_map_grant_ref *kmap_ops,
1247		    struct page **pages, unsigned int count)
1248{
1249	int i, ret;
1250
1251	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1252	if (ret)
1253		return ret;
1254
1255	for (i = 0; i < count; i++) {
1256		switch (map_ops[i].status) {
1257		case GNTST_okay:
1258		{
1259			struct xen_page_foreign *foreign;
1260
1261			SetPageForeign(pages[i]);
1262			foreign = xen_page_foreign(pages[i]);
1263			foreign->domid = map_ops[i].dom;
1264			foreign->gref = map_ops[i].ref;
1265			break;
1266		}
1267
1268		case GNTST_no_device_space:
1269			pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1270			break;
1271
1272		case GNTST_eagain:
1273			/* Retry eagain maps */
1274			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1275						map_ops + i,
1276						&map_ops[i].status, __func__);
1277			/* Test status in next loop iteration. */
1278			i--;
1279			break;
1280
1281		default:
1282			break;
1283		}
1284	}
1285
1286	return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1287}
1288EXPORT_SYMBOL_GPL(gnttab_map_refs);
1289
1290int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1291		      struct gnttab_unmap_grant_ref *kunmap_ops,
1292		      struct page **pages, unsigned int count)
1293{
1294	unsigned int i;
1295	int ret;
1296
1297	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1298	if (ret)
1299		return ret;
1300
1301	for (i = 0; i < count; i++)
1302		ClearPageForeign(pages[i]);
1303
1304	return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1305}
1306EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1307
1308#define GNTTAB_UNMAP_REFS_DELAY 5
1309
1310static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1311
1312static void gnttab_unmap_work(struct work_struct *work)
1313{
1314	struct gntab_unmap_queue_data
1315		*unmap_data = container_of(work, 
1316					   struct gntab_unmap_queue_data,
1317					   gnttab_work.work);
1318	if (unmap_data->age != UINT_MAX)
1319		unmap_data->age++;
1320	__gnttab_unmap_refs_async(unmap_data);
1321}
1322
1323static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1324{
1325	int ret;
1326	int pc;
1327
1328	for (pc = 0; pc < item->count; pc++) {
1329		if (page_count(item->pages[pc]) > 1) {
1330			unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1331			schedule_delayed_work(&item->gnttab_work,
1332					      msecs_to_jiffies(delay));
1333			return;
1334		}
1335	}
1336
1337	ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1338				item->pages, item->count);
1339	item->done(ret, item);
1340}
1341
1342void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1343{
1344	INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1345	item->age = 0;
1346
1347	__gnttab_unmap_refs_async(item);
1348}
1349EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1350
1351static void unmap_refs_callback(int result,
1352		struct gntab_unmap_queue_data *data)
1353{
1354	struct unmap_refs_callback_data *d = data->data;
1355
1356	d->result = result;
1357	complete(&d->completion);
1358}
1359
1360int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1361{
1362	struct unmap_refs_callback_data data;
1363
1364	init_completion(&data.completion);
1365	item->data = &data;
1366	item->done = &unmap_refs_callback;
1367	gnttab_unmap_refs_async(item);
1368	wait_for_completion(&data.completion);
1369
1370	return data.result;
1371}
1372EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1373
1374static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1375{
1376	return gnttab_frames(nr_grant_frames, SPP);
1377}
1378
1379static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1380{
1381	int rc;
1382
1383	rc = arch_gnttab_map_shared(frames, nr_gframes,
1384				    gnttab_max_grant_frames(),
1385				    &gnttab_shared.addr);
1386	BUG_ON(rc);
1387
1388	return 0;
1389}
1390
1391static void gnttab_unmap_frames_v1(void)
1392{
1393	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1394}
1395
1396static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1397{
1398	uint64_t *sframes;
1399	unsigned int nr_sframes;
1400	struct gnttab_get_status_frames getframes;
1401	int rc;
1402
1403	nr_sframes = nr_status_frames(nr_gframes);
1404
1405	/* No need for kzalloc as it is initialized in following hypercall
1406	 * GNTTABOP_get_status_frames.
1407	 */
1408	sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1409	if (!sframes)
1410		return -ENOMEM;
1411
1412	getframes.dom        = DOMID_SELF;
1413	getframes.nr_frames  = nr_sframes;
1414	set_xen_guest_handle(getframes.frame_list, sframes);
1415
1416	rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1417				       &getframes, 1);
1418	if (rc == -ENOSYS) {
1419		kfree(sframes);
1420		return -ENOSYS;
1421	}
1422
1423	BUG_ON(rc || getframes.status);
1424
1425	rc = arch_gnttab_map_status(sframes, nr_sframes,
1426				    nr_status_frames(gnttab_max_grant_frames()),
1427				    &grstatus);
1428	BUG_ON(rc);
1429	kfree(sframes);
1430
1431	rc = arch_gnttab_map_shared(frames, nr_gframes,
1432				    gnttab_max_grant_frames(),
1433				    &gnttab_shared.addr);
1434	BUG_ON(rc);
1435
1436	return 0;
1437}
1438
1439static void gnttab_unmap_frames_v2(void)
1440{
1441	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1442	arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1443}
1444
1445static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1446{
1447	struct gnttab_setup_table setup;
1448	xen_pfn_t *frames;
1449	unsigned int nr_gframes = end_idx + 1;
1450	int rc;
1451
1452	if (xen_feature(XENFEAT_auto_translated_physmap)) {
1453		struct xen_add_to_physmap xatp;
1454		unsigned int i = end_idx;
1455		rc = 0;
1456		BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1457		/*
1458		 * Loop backwards, so that the first hypercall has the largest
1459		 * index, ensuring that the table will grow only once.
1460		 */
1461		do {
1462			xatp.domid = DOMID_SELF;
1463			xatp.idx = i;
1464			xatp.space = XENMAPSPACE_grant_table;
1465			xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1466			rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1467			if (rc != 0) {
1468				pr_warn("grant table add_to_physmap failed, err=%d\n",
1469					rc);
1470				break;
1471			}
1472		} while (i-- > start_idx);
1473
1474		return rc;
1475	}
1476
1477	/* No need for kzalloc as it is initialized in following hypercall
1478	 * GNTTABOP_setup_table.
1479	 */
1480	frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1481	if (!frames)
1482		return -ENOMEM;
1483
1484	setup.dom        = DOMID_SELF;
1485	setup.nr_frames  = nr_gframes;
1486	set_xen_guest_handle(setup.frame_list, frames);
1487
1488	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1489	if (rc == -ENOSYS) {
1490		kfree(frames);
1491		return -ENOSYS;
1492	}
1493
1494	BUG_ON(rc || setup.status);
1495
1496	rc = gnttab_interface->map_frames(frames, nr_gframes);
1497
1498	kfree(frames);
1499
1500	return rc;
1501}
1502
1503static const struct gnttab_ops gnttab_v1_ops = {
1504	.version			= 1,
1505	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1506					  sizeof(struct grant_entry_v1),
1507	.map_frames			= gnttab_map_frames_v1,
1508	.unmap_frames			= gnttab_unmap_frames_v1,
1509	.update_entry			= gnttab_update_entry_v1,
1510	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v1,
1511	.read_frame			= gnttab_read_frame_v1,
 
1512};
1513
1514static const struct gnttab_ops gnttab_v2_ops = {
1515	.version			= 2,
1516	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1517					  sizeof(union grant_entry_v2),
1518	.map_frames			= gnttab_map_frames_v2,
1519	.unmap_frames			= gnttab_unmap_frames_v2,
1520	.update_entry			= gnttab_update_entry_v2,
1521	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v2,
1522	.read_frame			= gnttab_read_frame_v2,
 
1523};
1524
1525static bool gnttab_need_v2(void)
1526{
1527#ifdef CONFIG_X86
1528	uint32_t base, width;
1529
1530	if (xen_pv_domain()) {
1531		base = xen_cpuid_base();
1532		if (cpuid_eax(base) < 5)
1533			return false;	/* Information not available, use V1. */
1534		width = cpuid_ebx(base + 5) &
1535			XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1536		return width > 32 + PAGE_SHIFT;
1537	}
1538#endif
1539	return !!(max_possible_pfn >> 32);
1540}
1541
1542static void gnttab_request_version(void)
1543{
1544	long rc;
1545	struct gnttab_set_version gsv;
1546
1547	if (gnttab_need_v2())
1548		gsv.version = 2;
1549	else
1550		gsv.version = 1;
1551
1552	/* Boot parameter overrides automatic selection. */
1553	if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1554		gsv.version = xen_gnttab_version;
1555
1556	rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1557	if (rc == 0 && gsv.version == 2)
1558		gnttab_interface = &gnttab_v2_ops;
1559	else
1560		gnttab_interface = &gnttab_v1_ops;
1561	pr_info("Grant tables using version %d layout\n",
1562		gnttab_interface->version);
1563}
1564
1565static int gnttab_setup(void)
1566{
1567	unsigned int max_nr_gframes;
1568
1569	max_nr_gframes = gnttab_max_grant_frames();
1570	if (max_nr_gframes < nr_grant_frames)
1571		return -ENOSYS;
1572
1573	if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1574		gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1575		if (gnttab_shared.addr == NULL) {
1576			pr_warn("gnttab share frames is not mapped!\n");
1577			return -ENOMEM;
1578		}
1579	}
1580	return gnttab_map(0, nr_grant_frames - 1);
1581}
1582
1583int gnttab_resume(void)
1584{
1585	gnttab_request_version();
1586	return gnttab_setup();
1587}
1588
1589int gnttab_suspend(void)
1590{
1591	if (!xen_feature(XENFEAT_auto_translated_physmap))
1592		gnttab_interface->unmap_frames();
1593	return 0;
1594}
1595
1596static int gnttab_expand(unsigned int req_entries)
1597{
1598	int rc;
1599	unsigned int cur, extra;
1600
1601	cur = nr_grant_frames;
1602	extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1603		 gnttab_interface->grefs_per_grant_frame);
1604	if (cur + extra > gnttab_max_grant_frames()) {
1605		pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1606				    " cur=%u extra=%u limit=%u"
1607				    " gnttab_free_count=%u req_entries=%u\n",
1608				    cur, extra, gnttab_max_grant_frames(),
1609				    gnttab_free_count, req_entries);
1610		return -ENOSPC;
1611	}
1612
1613	rc = gnttab_map(cur, cur + extra - 1);
1614	if (rc == 0)
1615		rc = grow_gnttab_list(extra);
1616
1617	return rc;
1618}
1619
1620int gnttab_init(void)
1621{
1622	int i;
1623	unsigned long max_nr_grant_frames, max_nr_grefs;
1624	unsigned int max_nr_glist_frames, nr_glist_frames;
 
1625	int ret;
1626
1627	gnttab_request_version();
1628	max_nr_grant_frames = gnttab_max_grant_frames();
1629	max_nr_grefs = max_nr_grant_frames *
1630			gnttab_interface->grefs_per_grant_frame;
1631	nr_grant_frames = 1;
1632
1633	/* Determine the maximum number of frames required for the
1634	 * grant reference free list on the current hypervisor.
1635	 */
1636	max_nr_glist_frames = max_nr_grefs / RPP;
 
1637
1638	gnttab_list = kmalloc_array(max_nr_glist_frames,
1639				    sizeof(grant_ref_t *),
1640				    GFP_KERNEL);
1641	if (gnttab_list == NULL)
1642		return -ENOMEM;
1643
1644	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1645	for (i = 0; i < nr_glist_frames; i++) {
1646		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1647		if (gnttab_list[i] == NULL) {
1648			ret = -ENOMEM;
1649			goto ini_nomem;
1650		}
1651	}
1652
1653	gnttab_free_bitmap = bitmap_zalloc(max_nr_grefs, GFP_KERNEL);
1654	if (!gnttab_free_bitmap) {
1655		ret = -ENOMEM;
1656		goto ini_nomem;
1657	}
1658
1659	ret = arch_gnttab_init(max_nr_grant_frames,
1660			       nr_status_frames(max_nr_grant_frames));
1661	if (ret < 0)
1662		goto ini_nomem;
1663
1664	if (gnttab_setup() < 0) {
1665		ret = -ENODEV;
1666		goto ini_nomem;
1667	}
1668
1669	gnttab_size = nr_grant_frames * gnttab_interface->grefs_per_grant_frame;
 
 
 
 
1670
1671	gnttab_set_free(GNTTAB_NR_RESERVED_ENTRIES,
1672			gnttab_size - GNTTAB_NR_RESERVED_ENTRIES);
 
1673
1674	printk("Grant table initialized\n");
1675	return 0;
1676
1677 ini_nomem:
1678	for (i--; i >= 0; i--)
1679		free_page((unsigned long)gnttab_list[i]);
1680	kfree(gnttab_list);
1681	bitmap_free(gnttab_free_bitmap);
1682	return ret;
1683}
1684EXPORT_SYMBOL_GPL(gnttab_init);
1685
1686static int __gnttab_init(void)
1687{
1688	if (!xen_domain())
1689		return -ENODEV;
1690
1691	/* Delay grant-table initialization in the PV on HVM case */
1692	if (xen_hvm_domain() && !xen_pvh_domain())
1693		return 0;
1694
1695	return gnttab_init();
1696}
1697/* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1698 * beforehand to initialize xen_auto_xlat_grant_frames. */
1699core_initcall_sync(__gnttab_init);
v5.9
   1/******************************************************************************
   2 * grant_table.c
   3 *
   4 * Granting foreign access to our memory reservation.
   5 *
   6 * Copyright (c) 2005-2006, Christopher Clark
   7 * Copyright (c) 2004-2005, K A Fraser
   8 *
   9 * This program is free software; you can redistribute it and/or
  10 * modify it under the terms of the GNU General Public License version 2
  11 * as published by the Free Software Foundation; or, when distributed
  12 * separately from the Linux kernel or incorporated into other
  13 * software packages, subject to the following license:
  14 *
  15 * Permission is hereby granted, free of charge, to any person obtaining a copy
  16 * of this source file (the "Software"), to deal in the Software without
  17 * restriction, including without limitation the rights to use, copy, modify,
  18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
  19 * and to permit persons to whom the Software is furnished to do so, subject to
  20 * the following conditions:
  21 *
  22 * The above copyright notice and this permission notice shall be included in
  23 * all copies or substantial portions of the Software.
  24 *
  25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  31 * IN THE SOFTWARE.
  32 */
  33
  34#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
  35
 
  36#include <linux/memblock.h>
  37#include <linux/sched.h>
  38#include <linux/mm.h>
  39#include <linux/slab.h>
  40#include <linux/vmalloc.h>
  41#include <linux/uaccess.h>
  42#include <linux/io.h>
  43#include <linux/delay.h>
  44#include <linux/hardirq.h>
  45#include <linux/workqueue.h>
  46#include <linux/ratelimit.h>
  47#include <linux/moduleparam.h>
  48#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
  49#include <linux/dma-mapping.h>
  50#endif
  51
  52#include <xen/xen.h>
  53#include <xen/interface/xen.h>
  54#include <xen/page.h>
  55#include <xen/grant_table.h>
  56#include <xen/interface/memory.h>
  57#include <xen/hvc-console.h>
  58#include <xen/swiotlb-xen.h>
  59#include <xen/balloon.h>
  60#ifdef CONFIG_X86
  61#include <asm/xen/cpuid.h>
  62#endif
  63#include <xen/mem-reservation.h>
  64#include <asm/xen/hypercall.h>
  65#include <asm/xen/interface.h>
  66
  67#include <asm/sync_bitops.h>
  68
  69/* External tools reserve first few grant table entries. */
  70#define NR_RESERVED_ENTRIES 8
  71#define GNTTAB_LIST_END 0xffffffff
  72
  73static grant_ref_t **gnttab_list;
  74static unsigned int nr_grant_frames;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  75static int gnttab_free_count;
  76static grant_ref_t gnttab_free_head;
 
 
 
 
  77static DEFINE_SPINLOCK(gnttab_list_lock);
 
  78struct grant_frames xen_auto_xlat_grant_frames;
  79static unsigned int xen_gnttab_version;
  80module_param_named(version, xen_gnttab_version, uint, 0);
  81
  82static union {
  83	struct grant_entry_v1 *v1;
  84	union grant_entry_v2 *v2;
  85	void *addr;
  86} gnttab_shared;
  87
  88/*This is a structure of function pointers for grant table*/
  89struct gnttab_ops {
  90	/*
  91	 * Version of the grant interface.
  92	 */
  93	unsigned int version;
  94	/*
  95	 * Grant refs per grant frame.
  96	 */
  97	unsigned int grefs_per_grant_frame;
  98	/*
  99	 * Mapping a list of frames for storing grant entries. Frames parameter
 100	 * is used to store grant table address when grant table being setup,
 101	 * nr_gframes is the number of frames to map grant table. Returning
 102	 * GNTST_okay means success and negative value means failure.
 103	 */
 104	int (*map_frames)(xen_pfn_t *frames, unsigned int nr_gframes);
 105	/*
 106	 * Release a list of frames which are mapped in map_frames for grant
 107	 * entry status.
 108	 */
 109	void (*unmap_frames)(void);
 110	/*
 111	 * Introducing a valid entry into the grant table, granting the frame of
 112	 * this grant entry to domain for accessing or transfering. Ref
 113	 * parameter is reference of this introduced grant entry, domid is id of
 114	 * granted domain, frame is the page frame to be granted, and flags is
 115	 * status of the grant entry to be updated.
 116	 */
 117	void (*update_entry)(grant_ref_t ref, domid_t domid,
 118			     unsigned long frame, unsigned flags);
 119	/*
 120	 * Stop granting a grant entry to domain for accessing. Ref parameter is
 121	 * reference of a grant entry whose grant access will be stopped,
 122	 * readonly is not in use in this function. If the grant entry is
 123	 * currently mapped for reading or writing, just return failure(==0)
 124	 * directly and don't tear down the grant access. Otherwise, stop grant
 125	 * access for this entry and return success(==1).
 126	 */
 127	int (*end_foreign_access_ref)(grant_ref_t ref, int readonly);
 128	/*
 129	 * Stop granting a grant entry to domain for transfer. Ref parameter is
 130	 * reference of a grant entry whose grant transfer will be stopped. If
 131	 * tranfer has not started, just reclaim the grant entry and return
 132	 * failure(==0). Otherwise, wait for the transfer to complete and then
 133	 * return the frame.
 134	 */
 135	unsigned long (*end_foreign_transfer_ref)(grant_ref_t ref);
 136	/*
 137	 * Query the status of a grant entry. Ref parameter is reference of
 138	 * queried grant entry, return value is the status of queried entry.
 139	 * Detailed status(writing/reading) can be gotten from the return value
 140	 * by bit operations.
 141	 */
 142	int (*query_foreign_access)(grant_ref_t ref);
 143};
 144
 145struct unmap_refs_callback_data {
 146	struct completion completion;
 147	int result;
 148};
 149
 150static const struct gnttab_ops *gnttab_interface;
 151
 152/* This reflects status of grant entries, so act as a global value. */
 153static grant_status_t *grstatus;
 154
 155static struct gnttab_free_callback *gnttab_free_callback_list;
 156
 157static int gnttab_expand(unsigned int req_entries);
 158
 159#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
 160#define SPP (PAGE_SIZE / sizeof(grant_status_t))
 161
 162static inline grant_ref_t *__gnttab_entry(grant_ref_t entry)
 163{
 164	return &gnttab_list[(entry) / RPP][(entry) % RPP];
 165}
 166/* This can be used as an l-value */
 167#define gnttab_entry(entry) (*__gnttab_entry(entry))
 168
 169static int get_free_entries(unsigned count)
 170{
 171	unsigned long flags;
 172	int ref, rc = 0;
 173	grant_ref_t head;
 174
 175	spin_lock_irqsave(&gnttab_list_lock, flags);
 176
 177	if ((gnttab_free_count < count) &&
 178	    ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
 179		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 180		return rc;
 181	}
 182
 183	ref = head = gnttab_free_head;
 184	gnttab_free_count -= count;
 185	while (count-- > 1)
 186		head = gnttab_entry(head);
 
 
 
 
 
 187	gnttab_free_head = gnttab_entry(head);
 188	gnttab_entry(head) = GNTTAB_LIST_END;
 189
 
 
 
 
 
 190	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 191
 192	return ref;
 193}
 194
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 195static void do_free_callbacks(void)
 196{
 197	struct gnttab_free_callback *callback, *next;
 198
 199	callback = gnttab_free_callback_list;
 200	gnttab_free_callback_list = NULL;
 201
 202	while (callback != NULL) {
 203		next = callback->next;
 204		if (gnttab_free_count >= callback->count) {
 205			callback->next = NULL;
 206			callback->fn(callback->arg);
 207		} else {
 208			callback->next = gnttab_free_callback_list;
 209			gnttab_free_callback_list = callback;
 210		}
 211		callback = next;
 212	}
 213}
 214
 215static inline void check_free_callbacks(void)
 216{
 217	if (unlikely(gnttab_free_callback_list))
 218		do_free_callbacks();
 219}
 220
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 221static void put_free_entry(grant_ref_t ref)
 222{
 223	unsigned long flags;
 
 224	spin_lock_irqsave(&gnttab_list_lock, flags);
 225	gnttab_entry(ref) = gnttab_free_head;
 226	gnttab_free_head = ref;
 227	gnttab_free_count++;
 228	check_free_callbacks();
 229	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 230}
 231
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 232/*
 233 * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2.
 234 * Introducing a valid entry into the grant table:
 235 *  1. Write ent->domid.
 236 *  2. Write ent->frame:
 237 *      GTF_permit_access:   Frame to which access is permitted.
 238 *      GTF_accept_transfer: Pseudo-phys frame slot being filled by new
 239 *                           frame, or zero if none.
 240 *  3. Write memory barrier (WMB).
 241 *  4. Write ent->flags, inc. valid type.
 242 */
 243static void gnttab_update_entry_v1(grant_ref_t ref, domid_t domid,
 244				   unsigned long frame, unsigned flags)
 245{
 246	gnttab_shared.v1[ref].domid = domid;
 247	gnttab_shared.v1[ref].frame = frame;
 248	wmb();
 249	gnttab_shared.v1[ref].flags = flags;
 250}
 251
 252static void gnttab_update_entry_v2(grant_ref_t ref, domid_t domid,
 253				   unsigned long frame, unsigned int flags)
 254{
 255	gnttab_shared.v2[ref].hdr.domid = domid;
 256	gnttab_shared.v2[ref].full_page.frame = frame;
 257	wmb();	/* Hypervisor concurrent accesses. */
 258	gnttab_shared.v2[ref].hdr.flags = GTF_permit_access | flags;
 259}
 260
 261/*
 262 * Public grant-issuing interface functions
 263 */
 264void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
 265				     unsigned long frame, int readonly)
 266{
 267	gnttab_interface->update_entry(ref, domid, frame,
 268			   GTF_permit_access | (readonly ? GTF_readonly : 0));
 269}
 270EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
 271
 272int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
 273				int readonly)
 274{
 275	int ref;
 276
 277	ref = get_free_entries(1);
 278	if (unlikely(ref < 0))
 279		return -ENOSPC;
 280
 281	gnttab_grant_foreign_access_ref(ref, domid, frame, readonly);
 282
 283	return ref;
 284}
 285EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
 286
 287static int gnttab_query_foreign_access_v1(grant_ref_t ref)
 288{
 289	return gnttab_shared.v1[ref].flags & (GTF_reading|GTF_writing);
 290}
 291
 292static int gnttab_query_foreign_access_v2(grant_ref_t ref)
 293{
 294	return grstatus[ref] & (GTF_reading|GTF_writing);
 295}
 296
 297int gnttab_query_foreign_access(grant_ref_t ref)
 298{
 299	return gnttab_interface->query_foreign_access(ref);
 300}
 301EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
 302
 303static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
 304{
 305	u16 flags, nflags;
 306	u16 *pflags;
 307
 308	pflags = &gnttab_shared.v1[ref].flags;
 309	nflags = *pflags;
 310	do {
 311		flags = nflags;
 312		if (flags & (GTF_reading|GTF_writing))
 313			return 0;
 314	} while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
 315
 316	return 1;
 317}
 318
 319static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
 320{
 321	gnttab_shared.v2[ref].hdr.flags = 0;
 322	mb();	/* Concurrent access by hypervisor. */
 323	if (grstatus[ref] & (GTF_reading|GTF_writing)) {
 324		return 0;
 325	} else {
 326		/*
 327		 * The read of grstatus needs to have acquire semantics.
 328		 *  On x86, reads already have that, and we just need to
 329		 * protect against compiler reorderings.
 330		 * On other architectures we may need a full barrier.
 331		 */
 332#ifdef CONFIG_X86
 333		barrier();
 334#else
 335		mb();
 336#endif
 337	}
 338
 339	return 1;
 340}
 341
 342static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
 343{
 344	return gnttab_interface->end_foreign_access_ref(ref, readonly);
 345}
 346
 347int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
 348{
 349	if (_gnttab_end_foreign_access_ref(ref, readonly))
 350		return 1;
 351	pr_warn("WARNING: g.e. %#x still in use!\n", ref);
 352	return 0;
 353}
 354EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
 355
 
 
 
 
 
 
 
 
 
 
 356struct deferred_entry {
 357	struct list_head list;
 358	grant_ref_t ref;
 359	bool ro;
 360	uint16_t warn_delay;
 361	struct page *page;
 362};
 363static LIST_HEAD(deferred_list);
 364static void gnttab_handle_deferred(struct timer_list *);
 365static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred);
 366
 
 
 
 
 
 367static void gnttab_handle_deferred(struct timer_list *unused)
 368{
 369	unsigned int nr = 10;
 
 370	struct deferred_entry *first = NULL;
 371	unsigned long flags;
 
 372
 373	spin_lock_irqsave(&gnttab_list_lock, flags);
 374	while (nr--) {
 375		struct deferred_entry *entry
 376			= list_first_entry(&deferred_list,
 377					   struct deferred_entry, list);
 378
 379		if (entry == first)
 380			break;
 381		list_del(&entry->list);
 382		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 383		if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
 
 
 384			put_free_entry(entry->ref);
 385			if (entry->page) {
 386				pr_debug("freeing g.e. %#x (pfn %#lx)\n",
 387					 entry->ref, page_to_pfn(entry->page));
 388				put_page(entry->page);
 389			} else
 390				pr_info("freeing g.e. %#x\n", entry->ref);
 391			kfree(entry);
 392			entry = NULL;
 393		} else {
 394			if (!--entry->warn_delay)
 395				pr_info("g.e. %#x still pending\n", entry->ref);
 396			if (!first)
 397				first = entry;
 398		}
 399		spin_lock_irqsave(&gnttab_list_lock, flags);
 400		if (entry)
 401			list_add_tail(&entry->list, &deferred_list);
 402		else if (list_empty(&deferred_list))
 403			break;
 404	}
 405	if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
 
 
 406		deferred_timer.expires = jiffies + HZ;
 407		add_timer(&deferred_timer);
 408	}
 409	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 
 410}
 411
 412static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
 413				struct page *page)
 414{
 415	struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
 416	const char *what = KERN_WARNING "leaking";
 
 
 
 
 
 
 
 
 
 417
 418	if (entry) {
 419		unsigned long flags;
 420
 421		entry->ref = ref;
 422		entry->ro = readonly;
 423		entry->page = page;
 424		entry->warn_delay = 60;
 425		spin_lock_irqsave(&gnttab_list_lock, flags);
 426		list_add_tail(&entry->list, &deferred_list);
 427		if (!timer_pending(&deferred_timer)) {
 428			deferred_timer.expires = jiffies + HZ;
 429			add_timer(&deferred_timer);
 430		}
 431		spin_unlock_irqrestore(&gnttab_list_lock, flags);
 432		what = KERN_DEBUG "deferring";
 
 
 
 
 
 
 
 
 433	}
 434	printk("%s g.e. %#x (pfn %#lx)\n",
 435	       what, ref, page ? page_to_pfn(page) : -1);
 436}
 437
 438void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
 439			       unsigned long page)
 440{
 441	if (gnttab_end_foreign_access_ref(ref, readonly)) {
 
 
 442		put_free_entry(ref);
 443		if (page != 0)
 444			put_page(virt_to_page(page));
 445	} else
 446		gnttab_add_deferred(ref, readonly,
 447				    page ? virt_to_page(page) : NULL);
 448}
 449EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
 450
 451int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
 452{
 453	int ref;
 454
 455	ref = get_free_entries(1);
 456	if (unlikely(ref < 0))
 457		return -ENOSPC;
 458	gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
 459
 460	return ref;
 461}
 462EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
 463
 464void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
 465				       unsigned long pfn)
 466{
 467	gnttab_interface->update_entry(ref, domid, pfn, GTF_accept_transfer);
 468}
 469EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
 470
 471static unsigned long gnttab_end_foreign_transfer_ref_v1(grant_ref_t ref)
 472{
 473	unsigned long frame;
 474	u16           flags;
 475	u16          *pflags;
 476
 477	pflags = &gnttab_shared.v1[ref].flags;
 478
 479	/*
 480	 * If a transfer is not even yet started, try to reclaim the grant
 481	 * reference and return failure (== 0).
 482	 */
 483	while (!((flags = *pflags) & GTF_transfer_committed)) {
 484		if (sync_cmpxchg(pflags, flags, 0) == flags)
 485			return 0;
 486		cpu_relax();
 487	}
 488
 489	/* If a transfer is in progress then wait until it is completed. */
 490	while (!(flags & GTF_transfer_completed)) {
 491		flags = *pflags;
 492		cpu_relax();
 493	}
 494
 495	rmb();	/* Read the frame number /after/ reading completion status. */
 496	frame = gnttab_shared.v1[ref].frame;
 497	BUG_ON(frame == 0);
 498
 499	return frame;
 500}
 501
 502static unsigned long gnttab_end_foreign_transfer_ref_v2(grant_ref_t ref)
 503{
 504	unsigned long frame;
 505	u16           flags;
 506	u16          *pflags;
 507
 508	pflags = &gnttab_shared.v2[ref].hdr.flags;
 509
 510	/*
 511	 * If a transfer is not even yet started, try to reclaim the grant
 512	 * reference and return failure (== 0).
 513	 */
 514	while (!((flags = *pflags) & GTF_transfer_committed)) {
 515		if (sync_cmpxchg(pflags, flags, 0) == flags)
 516			return 0;
 517		cpu_relax();
 518	}
 519
 520	/* If a transfer is in progress then wait until it is completed. */
 521	while (!(flags & GTF_transfer_completed)) {
 522		flags = *pflags;
 523		cpu_relax();
 524	}
 525
 526	rmb();  /* Read the frame number /after/ reading completion status. */
 527	frame = gnttab_shared.v2[ref].full_page.frame;
 528	BUG_ON(frame == 0);
 529
 530	return frame;
 531}
 532
 533unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
 534{
 535	return gnttab_interface->end_foreign_transfer_ref(ref);
 536}
 537EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
 538
 539unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
 540{
 541	unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
 542	put_free_entry(ref);
 543	return frame;
 544}
 545EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
 546
 547void gnttab_free_grant_reference(grant_ref_t ref)
 548{
 549	put_free_entry(ref);
 550}
 551EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
 552
 553void gnttab_free_grant_references(grant_ref_t head)
 554{
 555	grant_ref_t ref;
 556	unsigned long flags;
 557	int count = 1;
 558	if (head == GNTTAB_LIST_END)
 559		return;
 560	spin_lock_irqsave(&gnttab_list_lock, flags);
 561	ref = head;
 562	while (gnttab_entry(ref) != GNTTAB_LIST_END) {
 563		ref = gnttab_entry(ref);
 564		count++;
 565	}
 566	gnttab_entry(ref) = gnttab_free_head;
 567	gnttab_free_head = head;
 568	gnttab_free_count += count;
 569	check_free_callbacks();
 570	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 571}
 572EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
 573
 
 
 
 
 
 
 
 
 
 
 
 
 
 574int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
 575{
 576	int h = get_free_entries(count);
 577
 578	if (h < 0)
 579		return -ENOSPC;
 580
 581	*head = h;
 582
 583	return 0;
 584}
 585EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
 586
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 587int gnttab_empty_grant_references(const grant_ref_t *private_head)
 588{
 589	return (*private_head == GNTTAB_LIST_END);
 590}
 591EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
 592
 593int gnttab_claim_grant_reference(grant_ref_t *private_head)
 594{
 595	grant_ref_t g = *private_head;
 596	if (unlikely(g == GNTTAB_LIST_END))
 597		return -ENOSPC;
 598	*private_head = gnttab_entry(g);
 599	return g;
 600}
 601EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
 602
 603void gnttab_release_grant_reference(grant_ref_t *private_head,
 604				    grant_ref_t release)
 605{
 606	gnttab_entry(release) = *private_head;
 607	*private_head = release;
 608}
 609EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
 610
 611void gnttab_request_free_callback(struct gnttab_free_callback *callback,
 612				  void (*fn)(void *), void *arg, u16 count)
 613{
 614	unsigned long flags;
 615	struct gnttab_free_callback *cb;
 616
 617	spin_lock_irqsave(&gnttab_list_lock, flags);
 618
 619	/* Check if the callback is already on the list */
 620	cb = gnttab_free_callback_list;
 621	while (cb) {
 622		if (cb == callback)
 623			goto out;
 624		cb = cb->next;
 625	}
 626
 627	callback->fn = fn;
 628	callback->arg = arg;
 629	callback->count = count;
 630	callback->next = gnttab_free_callback_list;
 631	gnttab_free_callback_list = callback;
 632	check_free_callbacks();
 633out:
 634	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 635}
 636EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
 637
 638void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
 639{
 640	struct gnttab_free_callback **pcb;
 641	unsigned long flags;
 642
 643	spin_lock_irqsave(&gnttab_list_lock, flags);
 644	for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
 645		if (*pcb == callback) {
 646			*pcb = callback->next;
 647			break;
 648		}
 649	}
 650	spin_unlock_irqrestore(&gnttab_list_lock, flags);
 651}
 652EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
 653
 654static unsigned int gnttab_frames(unsigned int frames, unsigned int align)
 655{
 656	return (frames * gnttab_interface->grefs_per_grant_frame + align - 1) /
 657	       align;
 658}
 659
 660static int grow_gnttab_list(unsigned int more_frames)
 661{
 662	unsigned int new_nr_grant_frames, extra_entries, i;
 663	unsigned int nr_glist_frames, new_nr_glist_frames;
 664	unsigned int grefs_per_frame;
 665
 666	grefs_per_frame = gnttab_interface->grefs_per_grant_frame;
 667
 668	new_nr_grant_frames = nr_grant_frames + more_frames;
 669	extra_entries = more_frames * grefs_per_frame;
 670
 671	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
 672	new_nr_glist_frames = gnttab_frames(new_nr_grant_frames, RPP);
 673	for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
 674		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
 675		if (!gnttab_list[i])
 676			goto grow_nomem;
 677	}
 678
 
 679
 680	for (i = grefs_per_frame * nr_grant_frames;
 681	     i < grefs_per_frame * new_nr_grant_frames - 1; i++)
 682		gnttab_entry(i) = i + 1;
 683
 684	gnttab_entry(i) = gnttab_free_head;
 685	gnttab_free_head = grefs_per_frame * nr_grant_frames;
 686	gnttab_free_count += extra_entries;
 687
 688	nr_grant_frames = new_nr_grant_frames;
 
 689
 690	check_free_callbacks();
 691
 692	return 0;
 693
 694grow_nomem:
 695	while (i-- > nr_glist_frames)
 696		free_page((unsigned long) gnttab_list[i]);
 697	return -ENOMEM;
 698}
 699
 700static unsigned int __max_nr_grant_frames(void)
 701{
 702	struct gnttab_query_size query;
 703	int rc;
 704
 705	query.dom = DOMID_SELF;
 706
 707	rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
 708	if ((rc < 0) || (query.status != GNTST_okay))
 709		return 4; /* Legacy max supported number of frames */
 710
 711	return query.max_nr_frames;
 712}
 713
 714unsigned int gnttab_max_grant_frames(void)
 715{
 716	unsigned int xen_max = __max_nr_grant_frames();
 717	static unsigned int boot_max_nr_grant_frames;
 718
 719	/* First time, initialize it properly. */
 720	if (!boot_max_nr_grant_frames)
 721		boot_max_nr_grant_frames = __max_nr_grant_frames();
 722
 723	if (xen_max > boot_max_nr_grant_frames)
 724		return boot_max_nr_grant_frames;
 725	return xen_max;
 726}
 727EXPORT_SYMBOL_GPL(gnttab_max_grant_frames);
 728
 729int gnttab_setup_auto_xlat_frames(phys_addr_t addr)
 730{
 731	xen_pfn_t *pfn;
 732	unsigned int max_nr_gframes = __max_nr_grant_frames();
 733	unsigned int i;
 734	void *vaddr;
 735
 736	if (xen_auto_xlat_grant_frames.count)
 737		return -EINVAL;
 738
 739	vaddr = xen_remap(addr, XEN_PAGE_SIZE * max_nr_gframes);
 740	if (vaddr == NULL) {
 741		pr_warn("Failed to ioremap gnttab share frames (addr=%pa)!\n",
 742			&addr);
 743		return -ENOMEM;
 744	}
 745	pfn = kcalloc(max_nr_gframes, sizeof(pfn[0]), GFP_KERNEL);
 746	if (!pfn) {
 747		xen_unmap(vaddr);
 748		return -ENOMEM;
 749	}
 750	for (i = 0; i < max_nr_gframes; i++)
 751		pfn[i] = XEN_PFN_DOWN(addr) + i;
 752
 753	xen_auto_xlat_grant_frames.vaddr = vaddr;
 754	xen_auto_xlat_grant_frames.pfn = pfn;
 755	xen_auto_xlat_grant_frames.count = max_nr_gframes;
 756
 757	return 0;
 758}
 759EXPORT_SYMBOL_GPL(gnttab_setup_auto_xlat_frames);
 760
 761void gnttab_free_auto_xlat_frames(void)
 762{
 763	if (!xen_auto_xlat_grant_frames.count)
 764		return;
 765	kfree(xen_auto_xlat_grant_frames.pfn);
 766	xen_unmap(xen_auto_xlat_grant_frames.vaddr);
 767
 768	xen_auto_xlat_grant_frames.pfn = NULL;
 769	xen_auto_xlat_grant_frames.count = 0;
 770	xen_auto_xlat_grant_frames.vaddr = NULL;
 771}
 772EXPORT_SYMBOL_GPL(gnttab_free_auto_xlat_frames);
 773
 774int gnttab_pages_set_private(int nr_pages, struct page **pages)
 775{
 776	int i;
 777
 778	for (i = 0; i < nr_pages; i++) {
 779#if BITS_PER_LONG < 64
 780		struct xen_page_foreign *foreign;
 781
 782		foreign = kzalloc(sizeof(*foreign), GFP_KERNEL);
 783		if (!foreign)
 784			return -ENOMEM;
 785
 786		set_page_private(pages[i], (unsigned long)foreign);
 787#endif
 788		SetPagePrivate(pages[i]);
 789	}
 790
 791	return 0;
 792}
 793EXPORT_SYMBOL_GPL(gnttab_pages_set_private);
 794
 795/**
 796 * gnttab_alloc_pages - alloc pages suitable for grant mapping into
 797 * @nr_pages: number of pages to alloc
 798 * @pages: returns the pages
 799 */
 800int gnttab_alloc_pages(int nr_pages, struct page **pages)
 801{
 802	int ret;
 803
 804	ret = xen_alloc_unpopulated_pages(nr_pages, pages);
 805	if (ret < 0)
 806		return ret;
 807
 808	ret = gnttab_pages_set_private(nr_pages, pages);
 809	if (ret < 0)
 810		gnttab_free_pages(nr_pages, pages);
 811
 812	return ret;
 813}
 814EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
 815
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 816void gnttab_pages_clear_private(int nr_pages, struct page **pages)
 817{
 818	int i;
 819
 820	for (i = 0; i < nr_pages; i++) {
 821		if (PagePrivate(pages[i])) {
 822#if BITS_PER_LONG < 64
 823			kfree((void *)page_private(pages[i]));
 824#endif
 825			ClearPagePrivate(pages[i]);
 826		}
 827	}
 828}
 829EXPORT_SYMBOL_GPL(gnttab_pages_clear_private);
 830
 831/**
 832 * gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
 833 * @nr_pages; number of pages to free
 834 * @pages: the pages
 835 */
 836void gnttab_free_pages(int nr_pages, struct page **pages)
 837{
 838	gnttab_pages_clear_private(nr_pages, pages);
 839	xen_free_unpopulated_pages(nr_pages, pages);
 840}
 841EXPORT_SYMBOL_GPL(gnttab_free_pages);
 842
 843#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
 844/**
 845 * gnttab_dma_alloc_pages - alloc DMAable pages suitable for grant mapping into
 846 * @args: arguments to the function
 847 */
 848int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args)
 849{
 850	unsigned long pfn, start_pfn;
 851	size_t size;
 852	int i, ret;
 853
 
 
 
 854	size = args->nr_pages << PAGE_SHIFT;
 855	if (args->coherent)
 856		args->vaddr = dma_alloc_coherent(args->dev, size,
 857						 &args->dev_bus_addr,
 858						 GFP_KERNEL | __GFP_NOWARN);
 859	else
 860		args->vaddr = dma_alloc_wc(args->dev, size,
 861					   &args->dev_bus_addr,
 862					   GFP_KERNEL | __GFP_NOWARN);
 863	if (!args->vaddr) {
 864		pr_debug("Failed to allocate DMA buffer of size %zu\n", size);
 865		return -ENOMEM;
 866	}
 867
 868	start_pfn = __phys_to_pfn(args->dev_bus_addr);
 869	for (pfn = start_pfn, i = 0; pfn < start_pfn + args->nr_pages;
 870			pfn++, i++) {
 871		struct page *page = pfn_to_page(pfn);
 872
 873		args->pages[i] = page;
 874		args->frames[i] = xen_page_to_gfn(page);
 875		xenmem_reservation_scrub_page(page);
 876	}
 877
 878	xenmem_reservation_va_mapping_reset(args->nr_pages, args->pages);
 879
 880	ret = xenmem_reservation_decrease(args->nr_pages, args->frames);
 881	if (ret != args->nr_pages) {
 882		pr_debug("Failed to decrease reservation for DMA buffer\n");
 883		ret = -EFAULT;
 884		goto fail;
 885	}
 886
 887	ret = gnttab_pages_set_private(args->nr_pages, args->pages);
 888	if (ret < 0)
 889		goto fail;
 890
 891	return 0;
 892
 893fail:
 894	gnttab_dma_free_pages(args);
 895	return ret;
 896}
 897EXPORT_SYMBOL_GPL(gnttab_dma_alloc_pages);
 898
 899/**
 900 * gnttab_dma_free_pages - free DMAable pages
 901 * @args: arguments to the function
 902 */
 903int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args)
 904{
 905	size_t size;
 906	int i, ret;
 907
 908	gnttab_pages_clear_private(args->nr_pages, args->pages);
 909
 910	for (i = 0; i < args->nr_pages; i++)
 911		args->frames[i] = page_to_xen_pfn(args->pages[i]);
 912
 913	ret = xenmem_reservation_increase(args->nr_pages, args->frames);
 914	if (ret != args->nr_pages) {
 915		pr_debug("Failed to increase reservation for DMA buffer\n");
 916		ret = -EFAULT;
 917	} else {
 918		ret = 0;
 919	}
 920
 921	xenmem_reservation_va_mapping_update(args->nr_pages, args->pages,
 922					     args->frames);
 923
 924	size = args->nr_pages << PAGE_SHIFT;
 925	if (args->coherent)
 926		dma_free_coherent(args->dev, size,
 927				  args->vaddr, args->dev_bus_addr);
 928	else
 929		dma_free_wc(args->dev, size,
 930			    args->vaddr, args->dev_bus_addr);
 931	return ret;
 932}
 933EXPORT_SYMBOL_GPL(gnttab_dma_free_pages);
 934#endif
 935
 936/* Handling of paged out grant targets (GNTST_eagain) */
 937#define MAX_DELAY 256
 938static inline void
 939gnttab_retry_eagain_gop(unsigned int cmd, void *gop, int16_t *status,
 940						const char *func)
 941{
 942	unsigned delay = 1;
 943
 944	do {
 945		BUG_ON(HYPERVISOR_grant_table_op(cmd, gop, 1));
 946		if (*status == GNTST_eagain)
 947			msleep(delay++);
 948	} while ((*status == GNTST_eagain) && (delay < MAX_DELAY));
 949
 950	if (delay >= MAX_DELAY) {
 951		pr_err("%s: %s eagain grant\n", func, current->comm);
 952		*status = GNTST_bad_page;
 953	}
 954}
 955
 956void gnttab_batch_map(struct gnttab_map_grant_ref *batch, unsigned count)
 957{
 958	struct gnttab_map_grant_ref *op;
 959
 960	if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, batch, count))
 961		BUG();
 962	for (op = batch; op < batch + count; op++)
 963		if (op->status == GNTST_eagain)
 964			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref, op,
 965						&op->status, __func__);
 966}
 967EXPORT_SYMBOL_GPL(gnttab_batch_map);
 968
 969void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
 970{
 971	struct gnttab_copy *op;
 972
 973	if (HYPERVISOR_grant_table_op(GNTTABOP_copy, batch, count))
 974		BUG();
 975	for (op = batch; op < batch + count; op++)
 976		if (op->status == GNTST_eagain)
 977			gnttab_retry_eagain_gop(GNTTABOP_copy, op,
 978						&op->status, __func__);
 979}
 980EXPORT_SYMBOL_GPL(gnttab_batch_copy);
 981
 982void gnttab_foreach_grant_in_range(struct page *page,
 983				   unsigned int offset,
 984				   unsigned int len,
 985				   xen_grant_fn_t fn,
 986				   void *data)
 987{
 988	unsigned int goffset;
 989	unsigned int glen;
 990	unsigned long xen_pfn;
 991
 992	len = min_t(unsigned int, PAGE_SIZE - offset, len);
 993	goffset = xen_offset_in_page(offset);
 994
 995	xen_pfn = page_to_xen_pfn(page) + XEN_PFN_DOWN(offset);
 996
 997	while (len) {
 998		glen = min_t(unsigned int, XEN_PAGE_SIZE - goffset, len);
 999		fn(pfn_to_gfn(xen_pfn), goffset, glen, data);
1000
1001		goffset = 0;
1002		xen_pfn++;
1003		len -= glen;
1004	}
1005}
1006EXPORT_SYMBOL_GPL(gnttab_foreach_grant_in_range);
1007
1008void gnttab_foreach_grant(struct page **pages,
1009			  unsigned int nr_grefs,
1010			  xen_grant_fn_t fn,
1011			  void *data)
1012{
1013	unsigned int goffset = 0;
1014	unsigned long xen_pfn = 0;
1015	unsigned int i;
1016
1017	for (i = 0; i < nr_grefs; i++) {
1018		if ((i % XEN_PFN_PER_PAGE) == 0) {
1019			xen_pfn = page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
1020			goffset = 0;
1021		}
1022
1023		fn(pfn_to_gfn(xen_pfn), goffset, XEN_PAGE_SIZE, data);
1024
1025		goffset += XEN_PAGE_SIZE;
1026		xen_pfn++;
1027	}
1028}
1029
1030int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
1031		    struct gnttab_map_grant_ref *kmap_ops,
1032		    struct page **pages, unsigned int count)
1033{
1034	int i, ret;
1035
1036	ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
1037	if (ret)
1038		return ret;
1039
1040	for (i = 0; i < count; i++) {
1041		switch (map_ops[i].status) {
1042		case GNTST_okay:
1043		{
1044			struct xen_page_foreign *foreign;
1045
1046			SetPageForeign(pages[i]);
1047			foreign = xen_page_foreign(pages[i]);
1048			foreign->domid = map_ops[i].dom;
1049			foreign->gref = map_ops[i].ref;
1050			break;
1051		}
1052
1053		case GNTST_no_device_space:
1054			pr_warn_ratelimited("maptrack limit reached, can't map all guest pages\n");
1055			break;
1056
1057		case GNTST_eagain:
1058			/* Retry eagain maps */
1059			gnttab_retry_eagain_gop(GNTTABOP_map_grant_ref,
1060						map_ops + i,
1061						&map_ops[i].status, __func__);
1062			/* Test status in next loop iteration. */
1063			i--;
1064			break;
1065
1066		default:
1067			break;
1068		}
1069	}
1070
1071	return set_foreign_p2m_mapping(map_ops, kmap_ops, pages, count);
1072}
1073EXPORT_SYMBOL_GPL(gnttab_map_refs);
1074
1075int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
1076		      struct gnttab_unmap_grant_ref *kunmap_ops,
1077		      struct page **pages, unsigned int count)
1078{
1079	unsigned int i;
1080	int ret;
1081
1082	ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
1083	if (ret)
1084		return ret;
1085
1086	for (i = 0; i < count; i++)
1087		ClearPageForeign(pages[i]);
1088
1089	return clear_foreign_p2m_mapping(unmap_ops, kunmap_ops, pages, count);
1090}
1091EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
1092
1093#define GNTTAB_UNMAP_REFS_DELAY 5
1094
1095static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
1096
1097static void gnttab_unmap_work(struct work_struct *work)
1098{
1099	struct gntab_unmap_queue_data
1100		*unmap_data = container_of(work, 
1101					   struct gntab_unmap_queue_data,
1102					   gnttab_work.work);
1103	if (unmap_data->age != UINT_MAX)
1104		unmap_data->age++;
1105	__gnttab_unmap_refs_async(unmap_data);
1106}
1107
1108static void __gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1109{
1110	int ret;
1111	int pc;
1112
1113	for (pc = 0; pc < item->count; pc++) {
1114		if (page_count(item->pages[pc]) > 1) {
1115			unsigned long delay = GNTTAB_UNMAP_REFS_DELAY * (item->age + 1);
1116			schedule_delayed_work(&item->gnttab_work,
1117					      msecs_to_jiffies(delay));
1118			return;
1119		}
1120	}
1121
1122	ret = gnttab_unmap_refs(item->unmap_ops, item->kunmap_ops,
1123				item->pages, item->count);
1124	item->done(ret, item);
1125}
1126
1127void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
1128{
1129	INIT_DELAYED_WORK(&item->gnttab_work, gnttab_unmap_work);
1130	item->age = 0;
1131
1132	__gnttab_unmap_refs_async(item);
1133}
1134EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
1135
1136static void unmap_refs_callback(int result,
1137		struct gntab_unmap_queue_data *data)
1138{
1139	struct unmap_refs_callback_data *d = data->data;
1140
1141	d->result = result;
1142	complete(&d->completion);
1143}
1144
1145int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
1146{
1147	struct unmap_refs_callback_data data;
1148
1149	init_completion(&data.completion);
1150	item->data = &data;
1151	item->done = &unmap_refs_callback;
1152	gnttab_unmap_refs_async(item);
1153	wait_for_completion(&data.completion);
1154
1155	return data.result;
1156}
1157EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
1158
1159static unsigned int nr_status_frames(unsigned int nr_grant_frames)
1160{
1161	return gnttab_frames(nr_grant_frames, SPP);
1162}
1163
1164static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
1165{
1166	int rc;
1167
1168	rc = arch_gnttab_map_shared(frames, nr_gframes,
1169				    gnttab_max_grant_frames(),
1170				    &gnttab_shared.addr);
1171	BUG_ON(rc);
1172
1173	return 0;
1174}
1175
1176static void gnttab_unmap_frames_v1(void)
1177{
1178	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1179}
1180
1181static int gnttab_map_frames_v2(xen_pfn_t *frames, unsigned int nr_gframes)
1182{
1183	uint64_t *sframes;
1184	unsigned int nr_sframes;
1185	struct gnttab_get_status_frames getframes;
1186	int rc;
1187
1188	nr_sframes = nr_status_frames(nr_gframes);
1189
1190	/* No need for kzalloc as it is initialized in following hypercall
1191	 * GNTTABOP_get_status_frames.
1192	 */
1193	sframes = kmalloc_array(nr_sframes, sizeof(uint64_t), GFP_ATOMIC);
1194	if (!sframes)
1195		return -ENOMEM;
1196
1197	getframes.dom        = DOMID_SELF;
1198	getframes.nr_frames  = nr_sframes;
1199	set_xen_guest_handle(getframes.frame_list, sframes);
1200
1201	rc = HYPERVISOR_grant_table_op(GNTTABOP_get_status_frames,
1202				       &getframes, 1);
1203	if (rc == -ENOSYS) {
1204		kfree(sframes);
1205		return -ENOSYS;
1206	}
1207
1208	BUG_ON(rc || getframes.status);
1209
1210	rc = arch_gnttab_map_status(sframes, nr_sframes,
1211				    nr_status_frames(gnttab_max_grant_frames()),
1212				    &grstatus);
1213	BUG_ON(rc);
1214	kfree(sframes);
1215
1216	rc = arch_gnttab_map_shared(frames, nr_gframes,
1217				    gnttab_max_grant_frames(),
1218				    &gnttab_shared.addr);
1219	BUG_ON(rc);
1220
1221	return 0;
1222}
1223
1224static void gnttab_unmap_frames_v2(void)
1225{
1226	arch_gnttab_unmap(gnttab_shared.addr, nr_grant_frames);
1227	arch_gnttab_unmap(grstatus, nr_status_frames(nr_grant_frames));
1228}
1229
1230static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
1231{
1232	struct gnttab_setup_table setup;
1233	xen_pfn_t *frames;
1234	unsigned int nr_gframes = end_idx + 1;
1235	int rc;
1236
1237	if (xen_feature(XENFEAT_auto_translated_physmap)) {
1238		struct xen_add_to_physmap xatp;
1239		unsigned int i = end_idx;
1240		rc = 0;
1241		BUG_ON(xen_auto_xlat_grant_frames.count < nr_gframes);
1242		/*
1243		 * Loop backwards, so that the first hypercall has the largest
1244		 * index, ensuring that the table will grow only once.
1245		 */
1246		do {
1247			xatp.domid = DOMID_SELF;
1248			xatp.idx = i;
1249			xatp.space = XENMAPSPACE_grant_table;
1250			xatp.gpfn = xen_auto_xlat_grant_frames.pfn[i];
1251			rc = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp);
1252			if (rc != 0) {
1253				pr_warn("grant table add_to_physmap failed, err=%d\n",
1254					rc);
1255				break;
1256			}
1257		} while (i-- > start_idx);
1258
1259		return rc;
1260	}
1261
1262	/* No need for kzalloc as it is initialized in following hypercall
1263	 * GNTTABOP_setup_table.
1264	 */
1265	frames = kmalloc_array(nr_gframes, sizeof(unsigned long), GFP_ATOMIC);
1266	if (!frames)
1267		return -ENOMEM;
1268
1269	setup.dom        = DOMID_SELF;
1270	setup.nr_frames  = nr_gframes;
1271	set_xen_guest_handle(setup.frame_list, frames);
1272
1273	rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
1274	if (rc == -ENOSYS) {
1275		kfree(frames);
1276		return -ENOSYS;
1277	}
1278
1279	BUG_ON(rc || setup.status);
1280
1281	rc = gnttab_interface->map_frames(frames, nr_gframes);
1282
1283	kfree(frames);
1284
1285	return rc;
1286}
1287
1288static const struct gnttab_ops gnttab_v1_ops = {
1289	.version			= 1,
1290	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1291					  sizeof(struct grant_entry_v1),
1292	.map_frames			= gnttab_map_frames_v1,
1293	.unmap_frames			= gnttab_unmap_frames_v1,
1294	.update_entry			= gnttab_update_entry_v1,
1295	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v1,
1296	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v1,
1297	.query_foreign_access		= gnttab_query_foreign_access_v1,
1298};
1299
1300static const struct gnttab_ops gnttab_v2_ops = {
1301	.version			= 2,
1302	.grefs_per_grant_frame		= XEN_PAGE_SIZE /
1303					  sizeof(union grant_entry_v2),
1304	.map_frames			= gnttab_map_frames_v2,
1305	.unmap_frames			= gnttab_unmap_frames_v2,
1306	.update_entry			= gnttab_update_entry_v2,
1307	.end_foreign_access_ref		= gnttab_end_foreign_access_ref_v2,
1308	.end_foreign_transfer_ref	= gnttab_end_foreign_transfer_ref_v2,
1309	.query_foreign_access		= gnttab_query_foreign_access_v2,
1310};
1311
1312static bool gnttab_need_v2(void)
1313{
1314#ifdef CONFIG_X86
1315	uint32_t base, width;
1316
1317	if (xen_pv_domain()) {
1318		base = xen_cpuid_base();
1319		if (cpuid_eax(base) < 5)
1320			return false;	/* Information not available, use V1. */
1321		width = cpuid_ebx(base + 5) &
1322			XEN_CPUID_MACHINE_ADDRESS_WIDTH_MASK;
1323		return width > 32 + PAGE_SHIFT;
1324	}
1325#endif
1326	return !!(max_possible_pfn >> 32);
1327}
1328
1329static void gnttab_request_version(void)
1330{
1331	long rc;
1332	struct gnttab_set_version gsv;
1333
1334	if (gnttab_need_v2())
1335		gsv.version = 2;
1336	else
1337		gsv.version = 1;
1338
1339	/* Boot parameter overrides automatic selection. */
1340	if (xen_gnttab_version >= 1 && xen_gnttab_version <= 2)
1341		gsv.version = xen_gnttab_version;
1342
1343	rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
1344	if (rc == 0 && gsv.version == 2)
1345		gnttab_interface = &gnttab_v2_ops;
1346	else
1347		gnttab_interface = &gnttab_v1_ops;
1348	pr_info("Grant tables using version %d layout\n",
1349		gnttab_interface->version);
1350}
1351
1352static int gnttab_setup(void)
1353{
1354	unsigned int max_nr_gframes;
1355
1356	max_nr_gframes = gnttab_max_grant_frames();
1357	if (max_nr_gframes < nr_grant_frames)
1358		return -ENOSYS;
1359
1360	if (xen_feature(XENFEAT_auto_translated_physmap) && gnttab_shared.addr == NULL) {
1361		gnttab_shared.addr = xen_auto_xlat_grant_frames.vaddr;
1362		if (gnttab_shared.addr == NULL) {
1363			pr_warn("gnttab share frames is not mapped!\n");
1364			return -ENOMEM;
1365		}
1366	}
1367	return gnttab_map(0, nr_grant_frames - 1);
1368}
1369
1370int gnttab_resume(void)
1371{
1372	gnttab_request_version();
1373	return gnttab_setup();
1374}
1375
1376int gnttab_suspend(void)
1377{
1378	if (!xen_feature(XENFEAT_auto_translated_physmap))
1379		gnttab_interface->unmap_frames();
1380	return 0;
1381}
1382
1383static int gnttab_expand(unsigned int req_entries)
1384{
1385	int rc;
1386	unsigned int cur, extra;
1387
1388	cur = nr_grant_frames;
1389	extra = ((req_entries + gnttab_interface->grefs_per_grant_frame - 1) /
1390		 gnttab_interface->grefs_per_grant_frame);
1391	if (cur + extra > gnttab_max_grant_frames()) {
1392		pr_warn_ratelimited("xen/grant-table: max_grant_frames reached"
1393				    " cur=%u extra=%u limit=%u"
1394				    " gnttab_free_count=%u req_entries=%u\n",
1395				    cur, extra, gnttab_max_grant_frames(),
1396				    gnttab_free_count, req_entries);
1397		return -ENOSPC;
1398	}
1399
1400	rc = gnttab_map(cur, cur + extra - 1);
1401	if (rc == 0)
1402		rc = grow_gnttab_list(extra);
1403
1404	return rc;
1405}
1406
1407int gnttab_init(void)
1408{
1409	int i;
1410	unsigned long max_nr_grant_frames;
1411	unsigned int max_nr_glist_frames, nr_glist_frames;
1412	unsigned int nr_init_grefs;
1413	int ret;
1414
1415	gnttab_request_version();
1416	max_nr_grant_frames = gnttab_max_grant_frames();
 
 
1417	nr_grant_frames = 1;
1418
1419	/* Determine the maximum number of frames required for the
1420	 * grant reference free list on the current hypervisor.
1421	 */
1422	max_nr_glist_frames = (max_nr_grant_frames *
1423			       gnttab_interface->grefs_per_grant_frame / RPP);
1424
1425	gnttab_list = kmalloc_array(max_nr_glist_frames,
1426				    sizeof(grant_ref_t *),
1427				    GFP_KERNEL);
1428	if (gnttab_list == NULL)
1429		return -ENOMEM;
1430
1431	nr_glist_frames = gnttab_frames(nr_grant_frames, RPP);
1432	for (i = 0; i < nr_glist_frames; i++) {
1433		gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
1434		if (gnttab_list[i] == NULL) {
1435			ret = -ENOMEM;
1436			goto ini_nomem;
1437		}
1438	}
1439
 
 
 
 
 
 
1440	ret = arch_gnttab_init(max_nr_grant_frames,
1441			       nr_status_frames(max_nr_grant_frames));
1442	if (ret < 0)
1443		goto ini_nomem;
1444
1445	if (gnttab_setup() < 0) {
1446		ret = -ENODEV;
1447		goto ini_nomem;
1448	}
1449
1450	nr_init_grefs = nr_grant_frames *
1451			gnttab_interface->grefs_per_grant_frame;
1452
1453	for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
1454		gnttab_entry(i) = i + 1;
1455
1456	gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
1457	gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
1458	gnttab_free_head  = NR_RESERVED_ENTRIES;
1459
1460	printk("Grant table initialized\n");
1461	return 0;
1462
1463 ini_nomem:
1464	for (i--; i >= 0; i--)
1465		free_page((unsigned long)gnttab_list[i]);
1466	kfree(gnttab_list);
 
1467	return ret;
1468}
1469EXPORT_SYMBOL_GPL(gnttab_init);
1470
1471static int __gnttab_init(void)
1472{
1473	if (!xen_domain())
1474		return -ENODEV;
1475
1476	/* Delay grant-table initialization in the PV on HVM case */
1477	if (xen_hvm_domain() && !xen_pvh_domain())
1478		return 0;
1479
1480	return gnttab_init();
1481}
1482/* Starts after core_initcall so that xen_pvh_gnttab_setup can be called
1483 * beforehand to initialize xen_auto_xlat_grant_frames. */
1484core_initcall_sync(__gnttab_init);