Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
   4 * 
   5 * Rewrite, cleanup, new allocation schemes, virtual merging: 
   6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
   7 *               and  Ben. Herrenschmidt, IBM Corporation
   8 *
   9 * Dynamic DMA mapping support, bus-independent parts.
  10 */
  11
  12
  13#include <linux/init.h>
  14#include <linux/types.h>
  15#include <linux/slab.h>
  16#include <linux/mm.h>
  17#include <linux/spinlock.h>
  18#include <linux/string.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/bitmap.h>
  21#include <linux/iommu-helper.h>
  22#include <linux/crash_dump.h>
  23#include <linux/hash.h>
  24#include <linux/fault-inject.h>
  25#include <linux/pci.h>
  26#include <linux/iommu.h>
  27#include <linux/sched.h>
  28#include <linux/debugfs.h>
  29#include <asm/io.h>
 
  30#include <asm/iommu.h>
  31#include <asm/pci-bridge.h>
  32#include <asm/machdep.h>
  33#include <asm/kdump.h>
  34#include <asm/fadump.h>
  35#include <asm/vio.h>
  36#include <asm/tce.h>
  37#include <asm/mmu_context.h>
  38
  39#define DBG(...)
  40
  41#ifdef CONFIG_IOMMU_DEBUGFS
  42static int iommu_debugfs_weight_get(void *data, u64 *val)
  43{
  44	struct iommu_table *tbl = data;
  45	*val = bitmap_weight(tbl->it_map, tbl->it_size);
  46	return 0;
  47}
  48DEFINE_DEBUGFS_ATTRIBUTE(iommu_debugfs_fops_weight, iommu_debugfs_weight_get, NULL, "%llu\n");
  49
  50static void iommu_debugfs_add(struct iommu_table *tbl)
  51{
  52	char name[10];
  53	struct dentry *liobn_entry;
  54
  55	sprintf(name, "%08lx", tbl->it_index);
  56	liobn_entry = debugfs_create_dir(name, iommu_debugfs_dir);
  57
  58	debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight);
  59	debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size);
  60	debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift);
  61	debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start);
  62	debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end);
  63	debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels);
  64	debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size);
  65}
  66
  67static void iommu_debugfs_del(struct iommu_table *tbl)
  68{
  69	char name[10];
  70	struct dentry *liobn_entry;
  71
  72	sprintf(name, "%08lx", tbl->it_index);
  73	liobn_entry = debugfs_lookup(name, iommu_debugfs_dir);
  74	debugfs_remove(liobn_entry);
  75}
  76#else
  77static void iommu_debugfs_add(struct iommu_table *tbl){}
  78static void iommu_debugfs_del(struct iommu_table *tbl){}
  79#endif
  80
  81static int novmerge;
  82
  83static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
  84
  85static int __init setup_iommu(char *str)
  86{
  87	if (!strcmp(str, "novmerge"))
  88		novmerge = 1;
  89	else if (!strcmp(str, "vmerge"))
  90		novmerge = 0;
  91	return 1;
  92}
  93
  94__setup("iommu=", setup_iommu);
  95
  96static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
  97
  98/*
  99 * We precalculate the hash to avoid doing it on every allocation.
 100 *
 101 * The hash is important to spread CPUs across all the pools. For example,
 102 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
 103 * with 4 pools all primary threads would map to the same pool.
 104 */
 105static int __init setup_iommu_pool_hash(void)
 106{
 107	unsigned int i;
 108
 109	for_each_possible_cpu(i)
 110		per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
 111
 112	return 0;
 113}
 114subsys_initcall(setup_iommu_pool_hash);
 115
 116#ifdef CONFIG_FAIL_IOMMU
 117
 118static DECLARE_FAULT_ATTR(fail_iommu);
 119
 120static int __init setup_fail_iommu(char *str)
 121{
 122	return setup_fault_attr(&fail_iommu, str);
 123}
 124__setup("fail_iommu=", setup_fail_iommu);
 125
 126static bool should_fail_iommu(struct device *dev)
 127{
 128	return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
 129}
 130
 131static int __init fail_iommu_debugfs(void)
 132{
 133	struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
 134						       NULL, &fail_iommu);
 135
 136	return PTR_ERR_OR_ZERO(dir);
 137}
 138late_initcall(fail_iommu_debugfs);
 139
 140static ssize_t fail_iommu_show(struct device *dev,
 141			       struct device_attribute *attr, char *buf)
 142{
 143	return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
 144}
 145
 146static ssize_t fail_iommu_store(struct device *dev,
 147				struct device_attribute *attr, const char *buf,
 148				size_t count)
 149{
 150	int i;
 151
 152	if (count > 0 && sscanf(buf, "%d", &i) > 0)
 153		dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
 154
 155	return count;
 156}
 157
 158static DEVICE_ATTR_RW(fail_iommu);
 159
 160static int fail_iommu_bus_notify(struct notifier_block *nb,
 161				 unsigned long action, void *data)
 162{
 163	struct device *dev = data;
 164
 165	if (action == BUS_NOTIFY_ADD_DEVICE) {
 166		if (device_create_file(dev, &dev_attr_fail_iommu))
 167			pr_warn("Unable to create IOMMU fault injection sysfs "
 168				"entries\n");
 169	} else if (action == BUS_NOTIFY_DEL_DEVICE) {
 170		device_remove_file(dev, &dev_attr_fail_iommu);
 171	}
 172
 173	return 0;
 174}
 175
 176static struct notifier_block fail_iommu_bus_notifier = {
 177	.notifier_call = fail_iommu_bus_notify
 178};
 179
 180static int __init fail_iommu_setup(void)
 181{
 182#ifdef CONFIG_PCI
 183	bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
 184#endif
 185#ifdef CONFIG_IBMVIO
 186	bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
 187#endif
 188
 189	return 0;
 190}
 191/*
 192 * Must execute after PCI and VIO subsystem have initialised but before
 193 * devices are probed.
 194 */
 195arch_initcall(fail_iommu_setup);
 196#else
 197static inline bool should_fail_iommu(struct device *dev)
 198{
 199	return false;
 200}
 201#endif
 202
 203static unsigned long iommu_range_alloc(struct device *dev,
 204				       struct iommu_table *tbl,
 205                                       unsigned long npages,
 206                                       unsigned long *handle,
 207                                       unsigned long mask,
 208                                       unsigned int align_order)
 209{ 
 210	unsigned long n, end, start;
 211	unsigned long limit;
 212	int largealloc = npages > 15;
 213	int pass = 0;
 214	unsigned long align_mask;
 
 215	unsigned long flags;
 216	unsigned int pool_nr;
 217	struct iommu_pool *pool;
 218
 219	align_mask = (1ull << align_order) - 1;
 220
 221	/* This allocator was derived from x86_64's bit string search */
 222
 223	/* Sanity check */
 224	if (unlikely(npages == 0)) {
 225		if (printk_ratelimit())
 226			WARN_ON(1);
 227		return DMA_MAPPING_ERROR;
 228	}
 229
 230	if (should_fail_iommu(dev))
 231		return DMA_MAPPING_ERROR;
 232
 233	/*
 234	 * We don't need to disable preemption here because any CPU can
 235	 * safely use any IOMMU pool.
 236	 */
 237	pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
 238
 239	if (largealloc)
 240		pool = &(tbl->large_pool);
 241	else
 242		pool = &(tbl->pools[pool_nr]);
 243
 244	spin_lock_irqsave(&(pool->lock), flags);
 245
 246again:
 247	if ((pass == 0) && handle && *handle &&
 248	    (*handle >= pool->start) && (*handle < pool->end))
 249		start = *handle;
 250	else
 251		start = pool->hint;
 252
 253	limit = pool->end;
 254
 255	/* The case below can happen if we have a small segment appended
 256	 * to a large, or when the previous alloc was at the very end of
 257	 * the available space. If so, go back to the initial start.
 258	 */
 259	if (start >= limit)
 260		start = pool->start;
 261
 262	if (limit + tbl->it_offset > mask) {
 263		limit = mask - tbl->it_offset + 1;
 264		/* If we're constrained on address range, first try
 265		 * at the masked hint to avoid O(n) search complexity,
 266		 * but on second pass, start at 0 in pool 0.
 267		 */
 268		if ((start & mask) >= limit || pass > 0) {
 269			spin_unlock(&(pool->lock));
 270			pool = &(tbl->pools[0]);
 271			spin_lock(&(pool->lock));
 272			start = pool->start;
 273		} else {
 274			start &= mask;
 275		}
 276	}
 277
 
 
 
 
 
 
 
 278	n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
 279			dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift),
 280			align_mask);
 281	if (n == -1) {
 282		if (likely(pass == 0)) {
 283			/* First try the pool from the start */
 284			pool->hint = pool->start;
 285			pass++;
 286			goto again;
 287
 288		} else if (pass <= tbl->nr_pools) {
 289			/* Now try scanning all the other pools */
 290			spin_unlock(&(pool->lock));
 291			pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
 292			pool = &tbl->pools[pool_nr];
 293			spin_lock(&(pool->lock));
 294			pool->hint = pool->start;
 295			pass++;
 296			goto again;
 297
 298		} else if (pass == tbl->nr_pools + 1) {
 299			/* Last resort: try largepool */
 300			spin_unlock(&pool->lock);
 301			pool = &tbl->large_pool;
 302			spin_lock(&pool->lock);
 303			pool->hint = pool->start;
 304			pass++;
 305			goto again;
 306
 307		} else {
 308			/* Give up */
 309			spin_unlock_irqrestore(&(pool->lock), flags);
 310			return DMA_MAPPING_ERROR;
 311		}
 312	}
 313
 314	end = n + npages;
 315
 316	/* Bump the hint to a new block for small allocs. */
 317	if (largealloc) {
 318		/* Don't bump to new block to avoid fragmentation */
 319		pool->hint = end;
 320	} else {
 321		/* Overflow will be taken care of at the next allocation */
 322		pool->hint = (end + tbl->it_blocksize - 1) &
 323		                ~(tbl->it_blocksize - 1);
 324	}
 325
 326	/* Update handle for SG allocations */
 327	if (handle)
 328		*handle = end;
 329
 330	spin_unlock_irqrestore(&(pool->lock), flags);
 331
 332	return n;
 333}
 334
 335static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
 336			      void *page, unsigned int npages,
 337			      enum dma_data_direction direction,
 338			      unsigned long mask, unsigned int align_order,
 339			      unsigned long attrs)
 340{
 341	unsigned long entry;
 342	dma_addr_t ret = DMA_MAPPING_ERROR;
 343	int build_fail;
 344
 345	entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
 346
 347	if (unlikely(entry == DMA_MAPPING_ERROR))
 348		return DMA_MAPPING_ERROR;
 349
 350	entry += tbl->it_offset;	/* Offset into real TCE table */
 351	ret = entry << tbl->it_page_shift;	/* Set the return dma address */
 352
 353	/* Put the TCEs in the HW table */
 354	build_fail = tbl->it_ops->set(tbl, entry, npages,
 355				      (unsigned long)page &
 356				      IOMMU_PAGE_MASK(tbl), direction, attrs);
 357
 358	/* tbl->it_ops->set() only returns non-zero for transient errors.
 359	 * Clean up the table bitmap in this case and return
 360	 * DMA_MAPPING_ERROR. For all other errors the functionality is
 361	 * not altered.
 362	 */
 363	if (unlikely(build_fail)) {
 364		__iommu_free(tbl, ret, npages);
 365		return DMA_MAPPING_ERROR;
 366	}
 367
 368	/* Flush/invalidate TLB caches if necessary */
 369	if (tbl->it_ops->flush)
 370		tbl->it_ops->flush(tbl);
 371
 372	/* Make sure updates are seen by hardware */
 373	mb();
 374
 375	return ret;
 376}
 377
 378static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
 379			     unsigned int npages)
 380{
 381	unsigned long entry, free_entry;
 382
 383	entry = dma_addr >> tbl->it_page_shift;
 384	free_entry = entry - tbl->it_offset;
 385
 386	if (((free_entry + npages) > tbl->it_size) ||
 387	    (entry < tbl->it_offset)) {
 388		if (printk_ratelimit()) {
 389			printk(KERN_INFO "iommu_free: invalid entry\n");
 390			printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
 391			printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
 392			printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
 393			printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
 394			printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
 395			printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
 396			printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
 397			WARN_ON(1);
 398		}
 399
 400		return false;
 401	}
 402
 403	return true;
 404}
 405
 406static struct iommu_pool *get_pool(struct iommu_table *tbl,
 407				   unsigned long entry)
 408{
 409	struct iommu_pool *p;
 410	unsigned long largepool_start = tbl->large_pool.start;
 411
 412	/* The large pool is the last pool at the top of the table */
 413	if (entry >= largepool_start) {
 414		p = &tbl->large_pool;
 415	} else {
 416		unsigned int pool_nr = entry / tbl->poolsize;
 417
 418		BUG_ON(pool_nr > tbl->nr_pools);
 419		p = &tbl->pools[pool_nr];
 420	}
 421
 422	return p;
 423}
 424
 425static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
 426			 unsigned int npages)
 427{
 428	unsigned long entry, free_entry;
 429	unsigned long flags;
 430	struct iommu_pool *pool;
 431
 432	entry = dma_addr >> tbl->it_page_shift;
 433	free_entry = entry - tbl->it_offset;
 434
 435	pool = get_pool(tbl, free_entry);
 436
 437	if (!iommu_free_check(tbl, dma_addr, npages))
 438		return;
 439
 440	tbl->it_ops->clear(tbl, entry, npages);
 441
 442	spin_lock_irqsave(&(pool->lock), flags);
 443	bitmap_clear(tbl->it_map, free_entry, npages);
 444	spin_unlock_irqrestore(&(pool->lock), flags);
 445}
 446
 447static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
 448		unsigned int npages)
 449{
 450	__iommu_free(tbl, dma_addr, npages);
 451
 452	/* Make sure TLB cache is flushed if the HW needs it. We do
 453	 * not do an mb() here on purpose, it is not needed on any of
 454	 * the current platforms.
 455	 */
 456	if (tbl->it_ops->flush)
 457		tbl->it_ops->flush(tbl);
 458}
 459
 460int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
 461		     struct scatterlist *sglist, int nelems,
 462		     unsigned long mask, enum dma_data_direction direction,
 463		     unsigned long attrs)
 464{
 465	dma_addr_t dma_next = 0, dma_addr;
 466	struct scatterlist *s, *outs, *segstart;
 467	int outcount, incount, i, build_fail = 0;
 468	unsigned int align;
 469	unsigned long handle;
 470	unsigned int max_seg_size;
 471
 472	BUG_ON(direction == DMA_NONE);
 473
 474	if ((nelems == 0) || !tbl)
 475		return -EINVAL;
 476
 477	outs = s = segstart = &sglist[0];
 478	outcount = 1;
 479	incount = nelems;
 480	handle = 0;
 481
 482	/* Init first segment length for backout at failure */
 483	outs->dma_length = 0;
 484
 485	DBG("sg mapping %d elements:\n", nelems);
 486
 487	max_seg_size = dma_get_max_seg_size(dev);
 488	for_each_sg(sglist, s, nelems, i) {
 489		unsigned long vaddr, npages, entry, slen;
 490
 491		slen = s->length;
 492		/* Sanity check */
 493		if (slen == 0) {
 494			dma_next = 0;
 495			continue;
 496		}
 497		/* Allocate iommu entries for that segment */
 498		vaddr = (unsigned long) sg_virt(s);
 499		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
 500		align = 0;
 501		if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
 502		    (vaddr & ~PAGE_MASK) == 0)
 503			align = PAGE_SHIFT - tbl->it_page_shift;
 504		entry = iommu_range_alloc(dev, tbl, npages, &handle,
 505					  mask >> tbl->it_page_shift, align);
 506
 507		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
 508
 509		/* Handle failure */
 510		if (unlikely(entry == DMA_MAPPING_ERROR)) {
 511			if (!(attrs & DMA_ATTR_NO_WARN) &&
 512			    printk_ratelimit())
 513				dev_info(dev, "iommu_alloc failed, tbl %p "
 514					 "vaddr %lx npages %lu\n", tbl, vaddr,
 515					 npages);
 516			goto failure;
 517		}
 518
 519		/* Convert entry to a dma_addr_t */
 520		entry += tbl->it_offset;
 521		dma_addr = entry << tbl->it_page_shift;
 522		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
 523
 524		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
 525			    npages, entry, dma_addr);
 526
 527		/* Insert into HW table */
 528		build_fail = tbl->it_ops->set(tbl, entry, npages,
 529					      vaddr & IOMMU_PAGE_MASK(tbl),
 530					      direction, attrs);
 531		if(unlikely(build_fail))
 532			goto failure;
 533
 534		/* If we are in an open segment, try merging */
 535		if (segstart != s) {
 536			DBG("  - trying merge...\n");
 537			/* We cannot merge if:
 538			 * - allocated dma_addr isn't contiguous to previous allocation
 539			 */
 540			if (novmerge || (dma_addr != dma_next) ||
 541			    (outs->dma_length + s->length > max_seg_size)) {
 542				/* Can't merge: create a new segment */
 543				segstart = s;
 544				outcount++;
 545				outs = sg_next(outs);
 546				DBG("    can't merge, new segment.\n");
 547			} else {
 548				outs->dma_length += s->length;
 549				DBG("    merged, new len: %ux\n", outs->dma_length);
 550			}
 551		}
 552
 553		if (segstart == s) {
 554			/* This is a new segment, fill entries */
 555			DBG("  - filling new segment.\n");
 556			outs->dma_address = dma_addr;
 557			outs->dma_length = slen;
 558		}
 559
 560		/* Calculate next page pointer for contiguous check */
 561		dma_next = dma_addr + slen;
 562
 563		DBG("  - dma next is: %lx\n", dma_next);
 564	}
 565
 566	/* Flush/invalidate TLB caches if necessary */
 567	if (tbl->it_ops->flush)
 568		tbl->it_ops->flush(tbl);
 569
 570	DBG("mapped %d elements:\n", outcount);
 571
 572	/* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
 573	 * next entry of the sglist if we didn't fill the list completely
 574	 */
 575	if (outcount < incount) {
 576		outs = sg_next(outs);
 
 577		outs->dma_length = 0;
 578	}
 579
 580	/* Make sure updates are seen by hardware */
 581	mb();
 582
 583	return outcount;
 584
 585 failure:
 586	for_each_sg(sglist, s, nelems, i) {
 587		if (s->dma_length != 0) {
 588			unsigned long vaddr, npages;
 589
 590			vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
 591			npages = iommu_num_pages(s->dma_address, s->dma_length,
 592						 IOMMU_PAGE_SIZE(tbl));
 593			__iommu_free(tbl, vaddr, npages);
 
 594			s->dma_length = 0;
 595		}
 596		if (s == outs)
 597			break;
 598	}
 599	return -EIO;
 600}
 601
 602
 603void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
 604			int nelems, enum dma_data_direction direction,
 605			unsigned long attrs)
 606{
 607	struct scatterlist *sg;
 608
 609	BUG_ON(direction == DMA_NONE);
 610
 611	if (!tbl)
 612		return;
 613
 614	sg = sglist;
 615	while (nelems--) {
 616		unsigned int npages;
 617		dma_addr_t dma_handle = sg->dma_address;
 618
 619		if (sg->dma_length == 0)
 620			break;
 621		npages = iommu_num_pages(dma_handle, sg->dma_length,
 622					 IOMMU_PAGE_SIZE(tbl));
 623		__iommu_free(tbl, dma_handle, npages);
 624		sg = sg_next(sg);
 625	}
 626
 627	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
 628	 * do not do an mb() here, the affected platforms do not need it
 629	 * when freeing.
 630	 */
 631	if (tbl->it_ops->flush)
 632		tbl->it_ops->flush(tbl);
 633}
 634
 635static void iommu_table_clear(struct iommu_table *tbl)
 636{
 637	/*
 638	 * In case of firmware assisted dump system goes through clean
 639	 * reboot process at the time of system crash. Hence it's safe to
 640	 * clear the TCE entries if firmware assisted dump is active.
 641	 */
 642	if (!is_kdump_kernel() || is_fadump_active()) {
 643		/* Clear the table in case firmware left allocations in it */
 644		tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
 645		return;
 646	}
 647
 648#ifdef CONFIG_CRASH_DUMP
 649	if (tbl->it_ops->get) {
 650		unsigned long index, tceval, tcecount = 0;
 651
 652		/* Reserve the existing mappings left by the first kernel. */
 653		for (index = 0; index < tbl->it_size; index++) {
 654			tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
 655			/*
 656			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
 657			 */
 658			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
 659				__set_bit(index, tbl->it_map);
 660				tcecount++;
 661			}
 662		}
 663
 664		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
 665			printk(KERN_WARNING "TCE table is full; freeing ");
 666			printk(KERN_WARNING "%d entries for the kdump boot\n",
 667				KDUMP_MIN_TCE_ENTRIES);
 668			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
 669				index < tbl->it_size; index++)
 670				__clear_bit(index, tbl->it_map);
 671		}
 672	}
 673#endif
 674}
 675
 676static void iommu_table_reserve_pages(struct iommu_table *tbl,
 677		unsigned long res_start, unsigned long res_end)
 678{
 679	int i;
 680
 681	WARN_ON_ONCE(res_end < res_start);
 682	/*
 683	 * Reserve page 0 so it will not be used for any mappings.
 684	 * This avoids buggy drivers that consider page 0 to be invalid
 685	 * to crash the machine or even lose data.
 686	 */
 687	if (tbl->it_offset == 0)
 688		set_bit(0, tbl->it_map);
 689
 690	if (res_start < tbl->it_offset)
 691		res_start = tbl->it_offset;
 692
 693	if (res_end > (tbl->it_offset + tbl->it_size))
 694		res_end = tbl->it_offset + tbl->it_size;
 695
 696	/* Check if res_start..res_end is a valid range in the table */
 697	if (res_start >= res_end) {
 698		tbl->it_reserved_start = tbl->it_offset;
 699		tbl->it_reserved_end = tbl->it_offset;
 700		return;
 701	}
 702
 703	tbl->it_reserved_start = res_start;
 704	tbl->it_reserved_end = res_end;
 705
 
 
 
 
 
 
 706	for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
 707		set_bit(i - tbl->it_offset, tbl->it_map);
 708}
 709
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 710/*
 711 * Build a iommu_table structure.  This contains a bit map which
 712 * is used to manage allocation of the tce space.
 713 */
 714struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
 715		unsigned long res_start, unsigned long res_end)
 716{
 717	unsigned long sz;
 718	static int welcomed = 0;
 
 719	unsigned int i;
 720	struct iommu_pool *p;
 721
 722	BUG_ON(!tbl->it_ops);
 723
 724	/* number of bytes needed for the bitmap */
 725	sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
 726
 727	tbl->it_map = vzalloc_node(sz, nid);
 728	if (!tbl->it_map) {
 729		pr_err("%s: Can't allocate %ld bytes\n", __func__, sz);
 730		return NULL;
 731	}
 732
 733	iommu_table_reserve_pages(tbl, res_start, res_end);
 734
 735	/* We only split the IOMMU table if we have 1GB or more of space */
 736	if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
 737		tbl->nr_pools = IOMMU_NR_POOLS;
 738	else
 739		tbl->nr_pools = 1;
 740
 741	/* We reserve the top 1/4 of the table for large allocations */
 742	tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
 743
 744	for (i = 0; i < tbl->nr_pools; i++) {
 745		p = &tbl->pools[i];
 746		spin_lock_init(&(p->lock));
 747		p->start = tbl->poolsize * i;
 748		p->hint = p->start;
 749		p->end = p->start + tbl->poolsize;
 750	}
 751
 752	p = &tbl->large_pool;
 753	spin_lock_init(&(p->lock));
 754	p->start = tbl->poolsize * i;
 755	p->hint = p->start;
 756	p->end = tbl->it_size;
 757
 758	iommu_table_clear(tbl);
 759
 760	if (!welcomed) {
 761		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
 762		       novmerge ? "disabled" : "enabled");
 763		welcomed = 1;
 764	}
 765
 766	iommu_debugfs_add(tbl);
 767
 768	return tbl;
 769}
 770
 771bool iommu_table_in_use(struct iommu_table *tbl)
 772{
 773	unsigned long start = 0, end;
 774
 775	/* ignore reserved bit0 */
 776	if (tbl->it_offset == 0)
 777		start = 1;
 778
 779	/* Simple case with no reserved MMIO32 region */
 780	if (!tbl->it_reserved_start && !tbl->it_reserved_end)
 781		return find_next_bit(tbl->it_map, tbl->it_size, start) != tbl->it_size;
 782
 783	end = tbl->it_reserved_start - tbl->it_offset;
 784	if (find_next_bit(tbl->it_map, end, start) != end)
 785		return true;
 786
 787	start = tbl->it_reserved_end - tbl->it_offset;
 788	end = tbl->it_size;
 789	return find_next_bit(tbl->it_map, end, start) != end;
 790}
 791
 792static void iommu_table_free(struct kref *kref)
 793{
 
 
 794	struct iommu_table *tbl;
 795
 796	tbl = container_of(kref, struct iommu_table, it_kref);
 797
 798	if (tbl->it_ops->free)
 799		tbl->it_ops->free(tbl);
 800
 801	if (!tbl->it_map) {
 802		kfree(tbl);
 803		return;
 804	}
 805
 806	iommu_debugfs_del(tbl);
 807
 808	/* verify that table contains no entries */
 809	if (iommu_table_in_use(tbl))
 810		pr_warn("%s: Unexpected TCEs\n", __func__);
 811
 
 
 
 812	/* free bitmap */
 813	vfree(tbl->it_map);
 
 814
 815	/* free table */
 816	kfree(tbl);
 817}
 818
 819struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
 820{
 821	if (kref_get_unless_zero(&tbl->it_kref))
 822		return tbl;
 823
 824	return NULL;
 825}
 826EXPORT_SYMBOL_GPL(iommu_tce_table_get);
 827
 828int iommu_tce_table_put(struct iommu_table *tbl)
 829{
 830	if (WARN_ON(!tbl))
 831		return 0;
 832
 833	return kref_put(&tbl->it_kref, iommu_table_free);
 834}
 835EXPORT_SYMBOL_GPL(iommu_tce_table_put);
 836
 837/* Creates TCEs for a user provided buffer.  The user buffer must be
 838 * contiguous real kernel storage (not vmalloc).  The address passed here
 839 * comprises a page address and offset into that page. The dma_addr_t
 840 * returned will point to the same byte within the page as was passed in.
 841 */
 842dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
 843			  struct page *page, unsigned long offset, size_t size,
 844			  unsigned long mask, enum dma_data_direction direction,
 845			  unsigned long attrs)
 846{
 847	dma_addr_t dma_handle = DMA_MAPPING_ERROR;
 848	void *vaddr;
 849	unsigned long uaddr;
 850	unsigned int npages, align;
 851
 852	BUG_ON(direction == DMA_NONE);
 853
 854	vaddr = page_address(page) + offset;
 855	uaddr = (unsigned long)vaddr;
 856
 857	if (tbl) {
 858		npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
 859		align = 0;
 860		if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
 861		    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
 862			align = PAGE_SHIFT - tbl->it_page_shift;
 863
 864		dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
 865					 mask >> tbl->it_page_shift, align,
 866					 attrs);
 867		if (dma_handle == DMA_MAPPING_ERROR) {
 868			if (!(attrs & DMA_ATTR_NO_WARN) &&
 869			    printk_ratelimit())  {
 870				dev_info(dev, "iommu_alloc failed, tbl %p "
 871					 "vaddr %p npages %d\n", tbl, vaddr,
 872					 npages);
 873			}
 874		} else
 875			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
 876	}
 877
 878	return dma_handle;
 879}
 880
 881void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
 882		      size_t size, enum dma_data_direction direction,
 883		      unsigned long attrs)
 884{
 885	unsigned int npages;
 886
 887	BUG_ON(direction == DMA_NONE);
 888
 889	if (tbl) {
 890		npages = iommu_num_pages(dma_handle, size,
 891					 IOMMU_PAGE_SIZE(tbl));
 892		iommu_free(tbl, dma_handle, npages);
 893	}
 894}
 895
 896/* Allocates a contiguous real buffer and creates mappings over it.
 897 * Returns the virtual address of the buffer and sets dma_handle
 898 * to the dma address (mapping) of the first page.
 899 */
 900void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
 901			   size_t size,	dma_addr_t *dma_handle,
 902			   unsigned long mask, gfp_t flag, int node)
 903{
 904	void *ret = NULL;
 905	dma_addr_t mapping;
 906	unsigned int order;
 907	unsigned int nio_pages, io_order;
 908	struct page *page;
 909
 910	size = PAGE_ALIGN(size);
 911	order = get_order(size);
 912
 913 	/*
 914	 * Client asked for way too much space.  This is checked later
 915	 * anyway.  It is easier to debug here for the drivers than in
 916	 * the tce tables.
 917	 */
 918	if (order >= IOMAP_MAX_ORDER) {
 919		dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
 920			 size);
 921		return NULL;
 922	}
 923
 924	if (!tbl)
 925		return NULL;
 926
 927	/* Alloc enough pages (and possibly more) */
 928	page = alloc_pages_node(node, flag, order);
 929	if (!page)
 930		return NULL;
 931	ret = page_address(page);
 932	memset(ret, 0, size);
 933
 934	/* Set up tces to cover the allocated range */
 935	nio_pages = size >> tbl->it_page_shift;
 936	io_order = get_iommu_order(size, tbl);
 937	mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
 938			      mask >> tbl->it_page_shift, io_order, 0);
 939	if (mapping == DMA_MAPPING_ERROR) {
 940		free_pages((unsigned long)ret, order);
 941		return NULL;
 942	}
 943	*dma_handle = mapping;
 944	return ret;
 945}
 946
 947void iommu_free_coherent(struct iommu_table *tbl, size_t size,
 948			 void *vaddr, dma_addr_t dma_handle)
 949{
 950	if (tbl) {
 951		unsigned int nio_pages;
 952
 953		size = PAGE_ALIGN(size);
 954		nio_pages = size >> tbl->it_page_shift;
 955		iommu_free(tbl, dma_handle, nio_pages);
 956		size = PAGE_ALIGN(size);
 957		free_pages((unsigned long)vaddr, get_order(size));
 958	}
 959}
 960
 961unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
 962{
 963	switch (dir) {
 964	case DMA_BIDIRECTIONAL:
 965		return TCE_PCI_READ | TCE_PCI_WRITE;
 966	case DMA_FROM_DEVICE:
 967		return TCE_PCI_WRITE;
 968	case DMA_TO_DEVICE:
 969		return TCE_PCI_READ;
 970	default:
 971		return 0;
 972	}
 973}
 974EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
 975
 976#ifdef CONFIG_IOMMU_API
 977/*
 978 * SPAPR TCE API
 979 */
 980static void group_release(void *iommu_data)
 981{
 982	struct iommu_table_group *table_group = iommu_data;
 983
 984	table_group->group = NULL;
 985}
 986
 987void iommu_register_group(struct iommu_table_group *table_group,
 988		int pci_domain_number, unsigned long pe_num)
 989{
 990	struct iommu_group *grp;
 991	char *name;
 992
 993	grp = iommu_group_alloc();
 994	if (IS_ERR(grp)) {
 995		pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
 996				PTR_ERR(grp));
 997		return;
 998	}
 999	table_group->group = grp;
1000	iommu_group_set_iommudata(grp, table_group, group_release);
1001	name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
1002			pci_domain_number, pe_num);
1003	if (!name)
1004		return;
1005	iommu_group_set_name(grp, name);
1006	kfree(name);
1007}
1008
1009enum dma_data_direction iommu_tce_direction(unsigned long tce)
1010{
1011	if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
1012		return DMA_BIDIRECTIONAL;
1013	else if (tce & TCE_PCI_READ)
1014		return DMA_TO_DEVICE;
1015	else if (tce & TCE_PCI_WRITE)
1016		return DMA_FROM_DEVICE;
1017	else
1018		return DMA_NONE;
1019}
1020EXPORT_SYMBOL_GPL(iommu_tce_direction);
1021
1022void iommu_flush_tce(struct iommu_table *tbl)
1023{
1024	/* Flush/invalidate TLB caches if necessary */
1025	if (tbl->it_ops->flush)
1026		tbl->it_ops->flush(tbl);
1027
1028	/* Make sure updates are seen by hardware */
1029	mb();
1030}
1031EXPORT_SYMBOL_GPL(iommu_flush_tce);
1032
1033int iommu_tce_check_ioba(unsigned long page_shift,
1034		unsigned long offset, unsigned long size,
1035		unsigned long ioba, unsigned long npages)
1036{
1037	unsigned long mask = (1UL << page_shift) - 1;
1038
1039	if (ioba & mask)
1040		return -EINVAL;
1041
1042	ioba >>= page_shift;
1043	if (ioba < offset)
1044		return -EINVAL;
1045
1046	if ((ioba + 1) > (offset + size))
1047		return -EINVAL;
1048
1049	return 0;
1050}
1051EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
1052
1053int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
1054{
1055	unsigned long mask = (1UL << page_shift) - 1;
1056
1057	if (gpa & mask)
1058		return -EINVAL;
1059
1060	return 0;
1061}
1062EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
1063
1064extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
1065		struct iommu_table *tbl,
1066		unsigned long entry, unsigned long *hpa,
1067		enum dma_data_direction *direction)
1068{
1069	long ret;
1070	unsigned long size = 0;
1071
1072	ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction);
1073	if (!ret && ((*direction == DMA_FROM_DEVICE) ||
1074			(*direction == DMA_BIDIRECTIONAL)) &&
1075			!mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
1076					&size))
1077		SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
1078
1079	return ret;
1080}
1081EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill);
1082
1083void iommu_tce_kill(struct iommu_table *tbl,
1084		unsigned long entry, unsigned long pages)
1085{
1086	if (tbl->it_ops->tce_kill)
1087		tbl->it_ops->tce_kill(tbl, entry, pages);
1088}
1089EXPORT_SYMBOL_GPL(iommu_tce_kill);
1090
1091int iommu_take_ownership(struct iommu_table *tbl)
1092{
1093	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1094	int ret = 0;
1095
1096	/*
1097	 * VFIO does not control TCE entries allocation and the guest
1098	 * can write new TCEs on top of existing ones so iommu_tce_build()
1099	 * must be able to release old pages. This functionality
1100	 * requires exchange() callback defined so if it is not
1101	 * implemented, we disallow taking ownership over the table.
1102	 */
1103	if (!tbl->it_ops->xchg_no_kill)
1104		return -EINVAL;
1105
1106	spin_lock_irqsave(&tbl->large_pool.lock, flags);
1107	for (i = 0; i < tbl->nr_pools; i++)
1108		spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
 
 
1109
1110	if (iommu_table_in_use(tbl)) {
1111		pr_err("iommu_tce: it_map is not empty");
1112		ret = -EBUSY;
 
 
 
1113	} else {
1114		memset(tbl->it_map, 0xff, sz);
1115	}
1116
1117	for (i = 0; i < tbl->nr_pools; i++)
1118		spin_unlock(&tbl->pools[i].lock);
1119	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1120
1121	return ret;
1122}
1123EXPORT_SYMBOL_GPL(iommu_take_ownership);
1124
1125void iommu_release_ownership(struct iommu_table *tbl)
1126{
1127	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1128
1129	spin_lock_irqsave(&tbl->large_pool.lock, flags);
1130	for (i = 0; i < tbl->nr_pools; i++)
1131		spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
1132
1133	memset(tbl->it_map, 0, sz);
1134
1135	iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1136			tbl->it_reserved_end);
1137
1138	for (i = 0; i < tbl->nr_pools; i++)
1139		spin_unlock(&tbl->pools[i].lock);
1140	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1141}
1142EXPORT_SYMBOL_GPL(iommu_release_ownership);
1143
1144int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
1145{
1146	/*
1147	 * The sysfs entries should be populated before
1148	 * binding IOMMU group. If sysfs entries isn't
1149	 * ready, we simply bail.
1150	 */
1151	if (!device_is_registered(dev))
1152		return -ENOENT;
1153
1154	if (device_iommu_mapped(dev)) {
1155		pr_debug("%s: Skipping device %s with iommu group %d\n",
1156			 __func__, dev_name(dev),
1157			 iommu_group_id(dev->iommu_group));
1158		return -EBUSY;
1159	}
1160
1161	pr_debug("%s: Adding %s to iommu group %d\n",
1162		 __func__, dev_name(dev),  iommu_group_id(table_group->group));
1163
1164	return iommu_group_add_device(table_group->group, dev);
1165}
1166EXPORT_SYMBOL_GPL(iommu_add_device);
1167
1168void iommu_del_device(struct device *dev)
1169{
1170	/*
1171	 * Some devices might not have IOMMU table and group
1172	 * and we needn't detach them from the associated
1173	 * IOMMU groups
1174	 */
1175	if (!device_iommu_mapped(dev)) {
1176		pr_debug("iommu_tce: skipping device %s with no tbl\n",
1177			 dev_name(dev));
1178		return;
1179	}
1180
1181	iommu_group_remove_device(dev);
1182}
1183EXPORT_SYMBOL_GPL(iommu_del_device);
1184#endif /* CONFIG_IOMMU_API */
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
   4 * 
   5 * Rewrite, cleanup, new allocation schemes, virtual merging: 
   6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
   7 *               and  Ben. Herrenschmidt, IBM Corporation
   8 *
   9 * Dynamic DMA mapping support, bus-independent parts.
  10 */
  11
  12
  13#include <linux/init.h>
  14#include <linux/types.h>
  15#include <linux/slab.h>
  16#include <linux/mm.h>
  17#include <linux/spinlock.h>
  18#include <linux/string.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/bitmap.h>
  21#include <linux/iommu-helper.h>
  22#include <linux/crash_dump.h>
  23#include <linux/hash.h>
  24#include <linux/fault-inject.h>
  25#include <linux/pci.h>
  26#include <linux/iommu.h>
  27#include <linux/sched.h>
 
  28#include <asm/io.h>
  29#include <asm/prom.h>
  30#include <asm/iommu.h>
  31#include <asm/pci-bridge.h>
  32#include <asm/machdep.h>
  33#include <asm/kdump.h>
  34#include <asm/fadump.h>
  35#include <asm/vio.h>
  36#include <asm/tce.h>
  37#include <asm/mmu_context.h>
  38
  39#define DBG(...)
  40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  41static int novmerge;
  42
  43static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
  44
  45static int __init setup_iommu(char *str)
  46{
  47	if (!strcmp(str, "novmerge"))
  48		novmerge = 1;
  49	else if (!strcmp(str, "vmerge"))
  50		novmerge = 0;
  51	return 1;
  52}
  53
  54__setup("iommu=", setup_iommu);
  55
  56static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
  57
  58/*
  59 * We precalculate the hash to avoid doing it on every allocation.
  60 *
  61 * The hash is important to spread CPUs across all the pools. For example,
  62 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
  63 * with 4 pools all primary threads would map to the same pool.
  64 */
  65static int __init setup_iommu_pool_hash(void)
  66{
  67	unsigned int i;
  68
  69	for_each_possible_cpu(i)
  70		per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
  71
  72	return 0;
  73}
  74subsys_initcall(setup_iommu_pool_hash);
  75
  76#ifdef CONFIG_FAIL_IOMMU
  77
  78static DECLARE_FAULT_ATTR(fail_iommu);
  79
  80static int __init setup_fail_iommu(char *str)
  81{
  82	return setup_fault_attr(&fail_iommu, str);
  83}
  84__setup("fail_iommu=", setup_fail_iommu);
  85
  86static bool should_fail_iommu(struct device *dev)
  87{
  88	return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
  89}
  90
  91static int __init fail_iommu_debugfs(void)
  92{
  93	struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
  94						       NULL, &fail_iommu);
  95
  96	return PTR_ERR_OR_ZERO(dir);
  97}
  98late_initcall(fail_iommu_debugfs);
  99
 100static ssize_t fail_iommu_show(struct device *dev,
 101			       struct device_attribute *attr, char *buf)
 102{
 103	return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
 104}
 105
 106static ssize_t fail_iommu_store(struct device *dev,
 107				struct device_attribute *attr, const char *buf,
 108				size_t count)
 109{
 110	int i;
 111
 112	if (count > 0 && sscanf(buf, "%d", &i) > 0)
 113		dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
 114
 115	return count;
 116}
 117
 118static DEVICE_ATTR_RW(fail_iommu);
 119
 120static int fail_iommu_bus_notify(struct notifier_block *nb,
 121				 unsigned long action, void *data)
 122{
 123	struct device *dev = data;
 124
 125	if (action == BUS_NOTIFY_ADD_DEVICE) {
 126		if (device_create_file(dev, &dev_attr_fail_iommu))
 127			pr_warn("Unable to create IOMMU fault injection sysfs "
 128				"entries\n");
 129	} else if (action == BUS_NOTIFY_DEL_DEVICE) {
 130		device_remove_file(dev, &dev_attr_fail_iommu);
 131	}
 132
 133	return 0;
 134}
 135
 136static struct notifier_block fail_iommu_bus_notifier = {
 137	.notifier_call = fail_iommu_bus_notify
 138};
 139
 140static int __init fail_iommu_setup(void)
 141{
 142#ifdef CONFIG_PCI
 143	bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
 144#endif
 145#ifdef CONFIG_IBMVIO
 146	bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
 147#endif
 148
 149	return 0;
 150}
 151/*
 152 * Must execute after PCI and VIO subsystem have initialised but before
 153 * devices are probed.
 154 */
 155arch_initcall(fail_iommu_setup);
 156#else
 157static inline bool should_fail_iommu(struct device *dev)
 158{
 159	return false;
 160}
 161#endif
 162
 163static unsigned long iommu_range_alloc(struct device *dev,
 164				       struct iommu_table *tbl,
 165                                       unsigned long npages,
 166                                       unsigned long *handle,
 167                                       unsigned long mask,
 168                                       unsigned int align_order)
 169{ 
 170	unsigned long n, end, start;
 171	unsigned long limit;
 172	int largealloc = npages > 15;
 173	int pass = 0;
 174	unsigned long align_mask;
 175	unsigned long boundary_size;
 176	unsigned long flags;
 177	unsigned int pool_nr;
 178	struct iommu_pool *pool;
 179
 180	align_mask = (1ull << align_order) - 1;
 181
 182	/* This allocator was derived from x86_64's bit string search */
 183
 184	/* Sanity check */
 185	if (unlikely(npages == 0)) {
 186		if (printk_ratelimit())
 187			WARN_ON(1);
 188		return DMA_MAPPING_ERROR;
 189	}
 190
 191	if (should_fail_iommu(dev))
 192		return DMA_MAPPING_ERROR;
 193
 194	/*
 195	 * We don't need to disable preemption here because any CPU can
 196	 * safely use any IOMMU pool.
 197	 */
 198	pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
 199
 200	if (largealloc)
 201		pool = &(tbl->large_pool);
 202	else
 203		pool = &(tbl->pools[pool_nr]);
 204
 205	spin_lock_irqsave(&(pool->lock), flags);
 206
 207again:
 208	if ((pass == 0) && handle && *handle &&
 209	    (*handle >= pool->start) && (*handle < pool->end))
 210		start = *handle;
 211	else
 212		start = pool->hint;
 213
 214	limit = pool->end;
 215
 216	/* The case below can happen if we have a small segment appended
 217	 * to a large, or when the previous alloc was at the very end of
 218	 * the available space. If so, go back to the initial start.
 219	 */
 220	if (start >= limit)
 221		start = pool->start;
 222
 223	if (limit + tbl->it_offset > mask) {
 224		limit = mask - tbl->it_offset + 1;
 225		/* If we're constrained on address range, first try
 226		 * at the masked hint to avoid O(n) search complexity,
 227		 * but on second pass, start at 0 in pool 0.
 228		 */
 229		if ((start & mask) >= limit || pass > 0) {
 230			spin_unlock(&(pool->lock));
 231			pool = &(tbl->pools[0]);
 232			spin_lock(&(pool->lock));
 233			start = pool->start;
 234		} else {
 235			start &= mask;
 236		}
 237	}
 238
 239	if (dev)
 240		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
 241				      1 << tbl->it_page_shift);
 242	else
 243		boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
 244	/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
 245
 246	n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
 247			     boundary_size >> tbl->it_page_shift, align_mask);
 
 248	if (n == -1) {
 249		if (likely(pass == 0)) {
 250			/* First try the pool from the start */
 251			pool->hint = pool->start;
 252			pass++;
 253			goto again;
 254
 255		} else if (pass <= tbl->nr_pools) {
 256			/* Now try scanning all the other pools */
 257			spin_unlock(&(pool->lock));
 258			pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
 259			pool = &tbl->pools[pool_nr];
 260			spin_lock(&(pool->lock));
 261			pool->hint = pool->start;
 262			pass++;
 263			goto again;
 264
 
 
 
 
 
 
 
 
 
 265		} else {
 266			/* Give up */
 267			spin_unlock_irqrestore(&(pool->lock), flags);
 268			return DMA_MAPPING_ERROR;
 269		}
 270	}
 271
 272	end = n + npages;
 273
 274	/* Bump the hint to a new block for small allocs. */
 275	if (largealloc) {
 276		/* Don't bump to new block to avoid fragmentation */
 277		pool->hint = end;
 278	} else {
 279		/* Overflow will be taken care of at the next allocation */
 280		pool->hint = (end + tbl->it_blocksize - 1) &
 281		                ~(tbl->it_blocksize - 1);
 282	}
 283
 284	/* Update handle for SG allocations */
 285	if (handle)
 286		*handle = end;
 287
 288	spin_unlock_irqrestore(&(pool->lock), flags);
 289
 290	return n;
 291}
 292
 293static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
 294			      void *page, unsigned int npages,
 295			      enum dma_data_direction direction,
 296			      unsigned long mask, unsigned int align_order,
 297			      unsigned long attrs)
 298{
 299	unsigned long entry;
 300	dma_addr_t ret = DMA_MAPPING_ERROR;
 301	int build_fail;
 302
 303	entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
 304
 305	if (unlikely(entry == DMA_MAPPING_ERROR))
 306		return DMA_MAPPING_ERROR;
 307
 308	entry += tbl->it_offset;	/* Offset into real TCE table */
 309	ret = entry << tbl->it_page_shift;	/* Set the return dma address */
 310
 311	/* Put the TCEs in the HW table */
 312	build_fail = tbl->it_ops->set(tbl, entry, npages,
 313				      (unsigned long)page &
 314				      IOMMU_PAGE_MASK(tbl), direction, attrs);
 315
 316	/* tbl->it_ops->set() only returns non-zero for transient errors.
 317	 * Clean up the table bitmap in this case and return
 318	 * DMA_MAPPING_ERROR. For all other errors the functionality is
 319	 * not altered.
 320	 */
 321	if (unlikely(build_fail)) {
 322		__iommu_free(tbl, ret, npages);
 323		return DMA_MAPPING_ERROR;
 324	}
 325
 326	/* Flush/invalidate TLB caches if necessary */
 327	if (tbl->it_ops->flush)
 328		tbl->it_ops->flush(tbl);
 329
 330	/* Make sure updates are seen by hardware */
 331	mb();
 332
 333	return ret;
 334}
 335
 336static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
 337			     unsigned int npages)
 338{
 339	unsigned long entry, free_entry;
 340
 341	entry = dma_addr >> tbl->it_page_shift;
 342	free_entry = entry - tbl->it_offset;
 343
 344	if (((free_entry + npages) > tbl->it_size) ||
 345	    (entry < tbl->it_offset)) {
 346		if (printk_ratelimit()) {
 347			printk(KERN_INFO "iommu_free: invalid entry\n");
 348			printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
 349			printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
 350			printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
 351			printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
 352			printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
 353			printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
 354			printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
 355			WARN_ON(1);
 356		}
 357
 358		return false;
 359	}
 360
 361	return true;
 362}
 363
 364static struct iommu_pool *get_pool(struct iommu_table *tbl,
 365				   unsigned long entry)
 366{
 367	struct iommu_pool *p;
 368	unsigned long largepool_start = tbl->large_pool.start;
 369
 370	/* The large pool is the last pool at the top of the table */
 371	if (entry >= largepool_start) {
 372		p = &tbl->large_pool;
 373	} else {
 374		unsigned int pool_nr = entry / tbl->poolsize;
 375
 376		BUG_ON(pool_nr > tbl->nr_pools);
 377		p = &tbl->pools[pool_nr];
 378	}
 379
 380	return p;
 381}
 382
 383static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
 384			 unsigned int npages)
 385{
 386	unsigned long entry, free_entry;
 387	unsigned long flags;
 388	struct iommu_pool *pool;
 389
 390	entry = dma_addr >> tbl->it_page_shift;
 391	free_entry = entry - tbl->it_offset;
 392
 393	pool = get_pool(tbl, free_entry);
 394
 395	if (!iommu_free_check(tbl, dma_addr, npages))
 396		return;
 397
 398	tbl->it_ops->clear(tbl, entry, npages);
 399
 400	spin_lock_irqsave(&(pool->lock), flags);
 401	bitmap_clear(tbl->it_map, free_entry, npages);
 402	spin_unlock_irqrestore(&(pool->lock), flags);
 403}
 404
 405static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
 406		unsigned int npages)
 407{
 408	__iommu_free(tbl, dma_addr, npages);
 409
 410	/* Make sure TLB cache is flushed if the HW needs it. We do
 411	 * not do an mb() here on purpose, it is not needed on any of
 412	 * the current platforms.
 413	 */
 414	if (tbl->it_ops->flush)
 415		tbl->it_ops->flush(tbl);
 416}
 417
 418int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
 419		     struct scatterlist *sglist, int nelems,
 420		     unsigned long mask, enum dma_data_direction direction,
 421		     unsigned long attrs)
 422{
 423	dma_addr_t dma_next = 0, dma_addr;
 424	struct scatterlist *s, *outs, *segstart;
 425	int outcount, incount, i, build_fail = 0;
 426	unsigned int align;
 427	unsigned long handle;
 428	unsigned int max_seg_size;
 429
 430	BUG_ON(direction == DMA_NONE);
 431
 432	if ((nelems == 0) || !tbl)
 433		return 0;
 434
 435	outs = s = segstart = &sglist[0];
 436	outcount = 1;
 437	incount = nelems;
 438	handle = 0;
 439
 440	/* Init first segment length for backout at failure */
 441	outs->dma_length = 0;
 442
 443	DBG("sg mapping %d elements:\n", nelems);
 444
 445	max_seg_size = dma_get_max_seg_size(dev);
 446	for_each_sg(sglist, s, nelems, i) {
 447		unsigned long vaddr, npages, entry, slen;
 448
 449		slen = s->length;
 450		/* Sanity check */
 451		if (slen == 0) {
 452			dma_next = 0;
 453			continue;
 454		}
 455		/* Allocate iommu entries for that segment */
 456		vaddr = (unsigned long) sg_virt(s);
 457		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
 458		align = 0;
 459		if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
 460		    (vaddr & ~PAGE_MASK) == 0)
 461			align = PAGE_SHIFT - tbl->it_page_shift;
 462		entry = iommu_range_alloc(dev, tbl, npages, &handle,
 463					  mask >> tbl->it_page_shift, align);
 464
 465		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
 466
 467		/* Handle failure */
 468		if (unlikely(entry == DMA_MAPPING_ERROR)) {
 469			if (!(attrs & DMA_ATTR_NO_WARN) &&
 470			    printk_ratelimit())
 471				dev_info(dev, "iommu_alloc failed, tbl %p "
 472					 "vaddr %lx npages %lu\n", tbl, vaddr,
 473					 npages);
 474			goto failure;
 475		}
 476
 477		/* Convert entry to a dma_addr_t */
 478		entry += tbl->it_offset;
 479		dma_addr = entry << tbl->it_page_shift;
 480		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
 481
 482		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
 483			    npages, entry, dma_addr);
 484
 485		/* Insert into HW table */
 486		build_fail = tbl->it_ops->set(tbl, entry, npages,
 487					      vaddr & IOMMU_PAGE_MASK(tbl),
 488					      direction, attrs);
 489		if(unlikely(build_fail))
 490			goto failure;
 491
 492		/* If we are in an open segment, try merging */
 493		if (segstart != s) {
 494			DBG("  - trying merge...\n");
 495			/* We cannot merge if:
 496			 * - allocated dma_addr isn't contiguous to previous allocation
 497			 */
 498			if (novmerge || (dma_addr != dma_next) ||
 499			    (outs->dma_length + s->length > max_seg_size)) {
 500				/* Can't merge: create a new segment */
 501				segstart = s;
 502				outcount++;
 503				outs = sg_next(outs);
 504				DBG("    can't merge, new segment.\n");
 505			} else {
 506				outs->dma_length += s->length;
 507				DBG("    merged, new len: %ux\n", outs->dma_length);
 508			}
 509		}
 510
 511		if (segstart == s) {
 512			/* This is a new segment, fill entries */
 513			DBG("  - filling new segment.\n");
 514			outs->dma_address = dma_addr;
 515			outs->dma_length = slen;
 516		}
 517
 518		/* Calculate next page pointer for contiguous check */
 519		dma_next = dma_addr + slen;
 520
 521		DBG("  - dma next is: %lx\n", dma_next);
 522	}
 523
 524	/* Flush/invalidate TLB caches if necessary */
 525	if (tbl->it_ops->flush)
 526		tbl->it_ops->flush(tbl);
 527
 528	DBG("mapped %d elements:\n", outcount);
 529
 530	/* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
 531	 * next entry of the sglist if we didn't fill the list completely
 532	 */
 533	if (outcount < incount) {
 534		outs = sg_next(outs);
 535		outs->dma_address = DMA_MAPPING_ERROR;
 536		outs->dma_length = 0;
 537	}
 538
 539	/* Make sure updates are seen by hardware */
 540	mb();
 541
 542	return outcount;
 543
 544 failure:
 545	for_each_sg(sglist, s, nelems, i) {
 546		if (s->dma_length != 0) {
 547			unsigned long vaddr, npages;
 548
 549			vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
 550			npages = iommu_num_pages(s->dma_address, s->dma_length,
 551						 IOMMU_PAGE_SIZE(tbl));
 552			__iommu_free(tbl, vaddr, npages);
 553			s->dma_address = DMA_MAPPING_ERROR;
 554			s->dma_length = 0;
 555		}
 556		if (s == outs)
 557			break;
 558	}
 559	return 0;
 560}
 561
 562
 563void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
 564			int nelems, enum dma_data_direction direction,
 565			unsigned long attrs)
 566{
 567	struct scatterlist *sg;
 568
 569	BUG_ON(direction == DMA_NONE);
 570
 571	if (!tbl)
 572		return;
 573
 574	sg = sglist;
 575	while (nelems--) {
 576		unsigned int npages;
 577		dma_addr_t dma_handle = sg->dma_address;
 578
 579		if (sg->dma_length == 0)
 580			break;
 581		npages = iommu_num_pages(dma_handle, sg->dma_length,
 582					 IOMMU_PAGE_SIZE(tbl));
 583		__iommu_free(tbl, dma_handle, npages);
 584		sg = sg_next(sg);
 585	}
 586
 587	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
 588	 * do not do an mb() here, the affected platforms do not need it
 589	 * when freeing.
 590	 */
 591	if (tbl->it_ops->flush)
 592		tbl->it_ops->flush(tbl);
 593}
 594
 595static void iommu_table_clear(struct iommu_table *tbl)
 596{
 597	/*
 598	 * In case of firmware assisted dump system goes through clean
 599	 * reboot process at the time of system crash. Hence it's safe to
 600	 * clear the TCE entries if firmware assisted dump is active.
 601	 */
 602	if (!is_kdump_kernel() || is_fadump_active()) {
 603		/* Clear the table in case firmware left allocations in it */
 604		tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
 605		return;
 606	}
 607
 608#ifdef CONFIG_CRASH_DUMP
 609	if (tbl->it_ops->get) {
 610		unsigned long index, tceval, tcecount = 0;
 611
 612		/* Reserve the existing mappings left by the first kernel. */
 613		for (index = 0; index < tbl->it_size; index++) {
 614			tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
 615			/*
 616			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
 617			 */
 618			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
 619				__set_bit(index, tbl->it_map);
 620				tcecount++;
 621			}
 622		}
 623
 624		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
 625			printk(KERN_WARNING "TCE table is full; freeing ");
 626			printk(KERN_WARNING "%d entries for the kdump boot\n",
 627				KDUMP_MIN_TCE_ENTRIES);
 628			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
 629				index < tbl->it_size; index++)
 630				__clear_bit(index, tbl->it_map);
 631		}
 632	}
 633#endif
 634}
 635
 636static void iommu_table_reserve_pages(struct iommu_table *tbl,
 637		unsigned long res_start, unsigned long res_end)
 638{
 639	int i;
 640
 641	WARN_ON_ONCE(res_end < res_start);
 642	/*
 643	 * Reserve page 0 so it will not be used for any mappings.
 644	 * This avoids buggy drivers that consider page 0 to be invalid
 645	 * to crash the machine or even lose data.
 646	 */
 647	if (tbl->it_offset == 0)
 648		set_bit(0, tbl->it_map);
 649
 
 
 
 
 
 
 
 
 
 
 
 
 
 650	tbl->it_reserved_start = res_start;
 651	tbl->it_reserved_end = res_end;
 652
 653	/* Check if res_start..res_end isn't empty and overlaps the table */
 654	if (res_start && res_end &&
 655			(tbl->it_offset + tbl->it_size < res_start ||
 656			 res_end < tbl->it_offset))
 657		return;
 658
 659	for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
 660		set_bit(i - tbl->it_offset, tbl->it_map);
 661}
 662
 663static void iommu_table_release_pages(struct iommu_table *tbl)
 664{
 665	int i;
 666
 667	/*
 668	 * In case we have reserved the first bit, we should not emit
 669	 * the warning below.
 670	 */
 671	if (tbl->it_offset == 0)
 672		clear_bit(0, tbl->it_map);
 673
 674	for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
 675		clear_bit(i - tbl->it_offset, tbl->it_map);
 676}
 677
 678/*
 679 * Build a iommu_table structure.  This contains a bit map which
 680 * is used to manage allocation of the tce space.
 681 */
 682struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
 683		unsigned long res_start, unsigned long res_end)
 684{
 685	unsigned long sz;
 686	static int welcomed = 0;
 687	struct page *page;
 688	unsigned int i;
 689	struct iommu_pool *p;
 690
 691	BUG_ON(!tbl->it_ops);
 692
 693	/* number of bytes needed for the bitmap */
 694	sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
 695
 696	page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
 697	if (!page)
 698		panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
 699	tbl->it_map = page_address(page);
 700	memset(tbl->it_map, 0, sz);
 701
 702	iommu_table_reserve_pages(tbl, res_start, res_end);
 703
 704	/* We only split the IOMMU table if we have 1GB or more of space */
 705	if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
 706		tbl->nr_pools = IOMMU_NR_POOLS;
 707	else
 708		tbl->nr_pools = 1;
 709
 710	/* We reserve the top 1/4 of the table for large allocations */
 711	tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
 712
 713	for (i = 0; i < tbl->nr_pools; i++) {
 714		p = &tbl->pools[i];
 715		spin_lock_init(&(p->lock));
 716		p->start = tbl->poolsize * i;
 717		p->hint = p->start;
 718		p->end = p->start + tbl->poolsize;
 719	}
 720
 721	p = &tbl->large_pool;
 722	spin_lock_init(&(p->lock));
 723	p->start = tbl->poolsize * i;
 724	p->hint = p->start;
 725	p->end = tbl->it_size;
 726
 727	iommu_table_clear(tbl);
 728
 729	if (!welcomed) {
 730		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
 731		       novmerge ? "disabled" : "enabled");
 732		welcomed = 1;
 733	}
 734
 
 
 735	return tbl;
 736}
 737
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 738static void iommu_table_free(struct kref *kref)
 739{
 740	unsigned long bitmap_sz;
 741	unsigned int order;
 742	struct iommu_table *tbl;
 743
 744	tbl = container_of(kref, struct iommu_table, it_kref);
 745
 746	if (tbl->it_ops->free)
 747		tbl->it_ops->free(tbl);
 748
 749	if (!tbl->it_map) {
 750		kfree(tbl);
 751		return;
 752	}
 753
 754	iommu_table_release_pages(tbl);
 755
 756	/* verify that table contains no entries */
 757	if (!bitmap_empty(tbl->it_map, tbl->it_size))
 758		pr_warn("%s: Unexpected TCEs\n", __func__);
 759
 760	/* calculate bitmap size in bytes */
 761	bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
 762
 763	/* free bitmap */
 764	order = get_order(bitmap_sz);
 765	free_pages((unsigned long) tbl->it_map, order);
 766
 767	/* free table */
 768	kfree(tbl);
 769}
 770
 771struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
 772{
 773	if (kref_get_unless_zero(&tbl->it_kref))
 774		return tbl;
 775
 776	return NULL;
 777}
 778EXPORT_SYMBOL_GPL(iommu_tce_table_get);
 779
 780int iommu_tce_table_put(struct iommu_table *tbl)
 781{
 782	if (WARN_ON(!tbl))
 783		return 0;
 784
 785	return kref_put(&tbl->it_kref, iommu_table_free);
 786}
 787EXPORT_SYMBOL_GPL(iommu_tce_table_put);
 788
 789/* Creates TCEs for a user provided buffer.  The user buffer must be
 790 * contiguous real kernel storage (not vmalloc).  The address passed here
 791 * comprises a page address and offset into that page. The dma_addr_t
 792 * returned will point to the same byte within the page as was passed in.
 793 */
 794dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
 795			  struct page *page, unsigned long offset, size_t size,
 796			  unsigned long mask, enum dma_data_direction direction,
 797			  unsigned long attrs)
 798{
 799	dma_addr_t dma_handle = DMA_MAPPING_ERROR;
 800	void *vaddr;
 801	unsigned long uaddr;
 802	unsigned int npages, align;
 803
 804	BUG_ON(direction == DMA_NONE);
 805
 806	vaddr = page_address(page) + offset;
 807	uaddr = (unsigned long)vaddr;
 808
 809	if (tbl) {
 810		npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
 811		align = 0;
 812		if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
 813		    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
 814			align = PAGE_SHIFT - tbl->it_page_shift;
 815
 816		dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
 817					 mask >> tbl->it_page_shift, align,
 818					 attrs);
 819		if (dma_handle == DMA_MAPPING_ERROR) {
 820			if (!(attrs & DMA_ATTR_NO_WARN) &&
 821			    printk_ratelimit())  {
 822				dev_info(dev, "iommu_alloc failed, tbl %p "
 823					 "vaddr %p npages %d\n", tbl, vaddr,
 824					 npages);
 825			}
 826		} else
 827			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
 828	}
 829
 830	return dma_handle;
 831}
 832
 833void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
 834		      size_t size, enum dma_data_direction direction,
 835		      unsigned long attrs)
 836{
 837	unsigned int npages;
 838
 839	BUG_ON(direction == DMA_NONE);
 840
 841	if (tbl) {
 842		npages = iommu_num_pages(dma_handle, size,
 843					 IOMMU_PAGE_SIZE(tbl));
 844		iommu_free(tbl, dma_handle, npages);
 845	}
 846}
 847
 848/* Allocates a contiguous real buffer and creates mappings over it.
 849 * Returns the virtual address of the buffer and sets dma_handle
 850 * to the dma address (mapping) of the first page.
 851 */
 852void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
 853			   size_t size,	dma_addr_t *dma_handle,
 854			   unsigned long mask, gfp_t flag, int node)
 855{
 856	void *ret = NULL;
 857	dma_addr_t mapping;
 858	unsigned int order;
 859	unsigned int nio_pages, io_order;
 860	struct page *page;
 861
 862	size = PAGE_ALIGN(size);
 863	order = get_order(size);
 864
 865 	/*
 866	 * Client asked for way too much space.  This is checked later
 867	 * anyway.  It is easier to debug here for the drivers than in
 868	 * the tce tables.
 869	 */
 870	if (order >= IOMAP_MAX_ORDER) {
 871		dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
 872			 size);
 873		return NULL;
 874	}
 875
 876	if (!tbl)
 877		return NULL;
 878
 879	/* Alloc enough pages (and possibly more) */
 880	page = alloc_pages_node(node, flag, order);
 881	if (!page)
 882		return NULL;
 883	ret = page_address(page);
 884	memset(ret, 0, size);
 885
 886	/* Set up tces to cover the allocated range */
 887	nio_pages = size >> tbl->it_page_shift;
 888	io_order = get_iommu_order(size, tbl);
 889	mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
 890			      mask >> tbl->it_page_shift, io_order, 0);
 891	if (mapping == DMA_MAPPING_ERROR) {
 892		free_pages((unsigned long)ret, order);
 893		return NULL;
 894	}
 895	*dma_handle = mapping;
 896	return ret;
 897}
 898
 899void iommu_free_coherent(struct iommu_table *tbl, size_t size,
 900			 void *vaddr, dma_addr_t dma_handle)
 901{
 902	if (tbl) {
 903		unsigned int nio_pages;
 904
 905		size = PAGE_ALIGN(size);
 906		nio_pages = size >> tbl->it_page_shift;
 907		iommu_free(tbl, dma_handle, nio_pages);
 908		size = PAGE_ALIGN(size);
 909		free_pages((unsigned long)vaddr, get_order(size));
 910	}
 911}
 912
 913unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
 914{
 915	switch (dir) {
 916	case DMA_BIDIRECTIONAL:
 917		return TCE_PCI_READ | TCE_PCI_WRITE;
 918	case DMA_FROM_DEVICE:
 919		return TCE_PCI_WRITE;
 920	case DMA_TO_DEVICE:
 921		return TCE_PCI_READ;
 922	default:
 923		return 0;
 924	}
 925}
 926EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
 927
 928#ifdef CONFIG_IOMMU_API
 929/*
 930 * SPAPR TCE API
 931 */
 932static void group_release(void *iommu_data)
 933{
 934	struct iommu_table_group *table_group = iommu_data;
 935
 936	table_group->group = NULL;
 937}
 938
 939void iommu_register_group(struct iommu_table_group *table_group,
 940		int pci_domain_number, unsigned long pe_num)
 941{
 942	struct iommu_group *grp;
 943	char *name;
 944
 945	grp = iommu_group_alloc();
 946	if (IS_ERR(grp)) {
 947		pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
 948				PTR_ERR(grp));
 949		return;
 950	}
 951	table_group->group = grp;
 952	iommu_group_set_iommudata(grp, table_group, group_release);
 953	name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
 954			pci_domain_number, pe_num);
 955	if (!name)
 956		return;
 957	iommu_group_set_name(grp, name);
 958	kfree(name);
 959}
 960
 961enum dma_data_direction iommu_tce_direction(unsigned long tce)
 962{
 963	if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
 964		return DMA_BIDIRECTIONAL;
 965	else if (tce & TCE_PCI_READ)
 966		return DMA_TO_DEVICE;
 967	else if (tce & TCE_PCI_WRITE)
 968		return DMA_FROM_DEVICE;
 969	else
 970		return DMA_NONE;
 971}
 972EXPORT_SYMBOL_GPL(iommu_tce_direction);
 973
 974void iommu_flush_tce(struct iommu_table *tbl)
 975{
 976	/* Flush/invalidate TLB caches if necessary */
 977	if (tbl->it_ops->flush)
 978		tbl->it_ops->flush(tbl);
 979
 980	/* Make sure updates are seen by hardware */
 981	mb();
 982}
 983EXPORT_SYMBOL_GPL(iommu_flush_tce);
 984
 985int iommu_tce_check_ioba(unsigned long page_shift,
 986		unsigned long offset, unsigned long size,
 987		unsigned long ioba, unsigned long npages)
 988{
 989	unsigned long mask = (1UL << page_shift) - 1;
 990
 991	if (ioba & mask)
 992		return -EINVAL;
 993
 994	ioba >>= page_shift;
 995	if (ioba < offset)
 996		return -EINVAL;
 997
 998	if ((ioba + 1) > (offset + size))
 999		return -EINVAL;
1000
1001	return 0;
1002}
1003EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
1004
1005int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
1006{
1007	unsigned long mask = (1UL << page_shift) - 1;
1008
1009	if (gpa & mask)
1010		return -EINVAL;
1011
1012	return 0;
1013}
1014EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
1015
1016extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
1017		struct iommu_table *tbl,
1018		unsigned long entry, unsigned long *hpa,
1019		enum dma_data_direction *direction)
1020{
1021	long ret;
1022	unsigned long size = 0;
1023
1024	ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false);
1025	if (!ret && ((*direction == DMA_FROM_DEVICE) ||
1026			(*direction == DMA_BIDIRECTIONAL)) &&
1027			!mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
1028					&size))
1029		SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
1030
1031	return ret;
1032}
1033EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill);
1034
1035void iommu_tce_kill(struct iommu_table *tbl,
1036		unsigned long entry, unsigned long pages)
1037{
1038	if (tbl->it_ops->tce_kill)
1039		tbl->it_ops->tce_kill(tbl, entry, pages, false);
1040}
1041EXPORT_SYMBOL_GPL(iommu_tce_kill);
1042
1043int iommu_take_ownership(struct iommu_table *tbl)
1044{
1045	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1046	int ret = 0;
1047
1048	/*
1049	 * VFIO does not control TCE entries allocation and the guest
1050	 * can write new TCEs on top of existing ones so iommu_tce_build()
1051	 * must be able to release old pages. This functionality
1052	 * requires exchange() callback defined so if it is not
1053	 * implemented, we disallow taking ownership over the table.
1054	 */
1055	if (!tbl->it_ops->xchg_no_kill)
1056		return -EINVAL;
1057
1058	spin_lock_irqsave(&tbl->large_pool.lock, flags);
1059	for (i = 0; i < tbl->nr_pools; i++)
1060		spin_lock(&tbl->pools[i].lock);
1061
1062	iommu_table_release_pages(tbl);
1063
1064	if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
1065		pr_err("iommu_tce: it_map is not empty");
1066		ret = -EBUSY;
1067		/* Undo iommu_table_release_pages, i.e. restore bit#0, etc */
1068		iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1069				tbl->it_reserved_end);
1070	} else {
1071		memset(tbl->it_map, 0xff, sz);
1072	}
1073
1074	for (i = 0; i < tbl->nr_pools; i++)
1075		spin_unlock(&tbl->pools[i].lock);
1076	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1077
1078	return ret;
1079}
1080EXPORT_SYMBOL_GPL(iommu_take_ownership);
1081
1082void iommu_release_ownership(struct iommu_table *tbl)
1083{
1084	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1085
1086	spin_lock_irqsave(&tbl->large_pool.lock, flags);
1087	for (i = 0; i < tbl->nr_pools; i++)
1088		spin_lock(&tbl->pools[i].lock);
1089
1090	memset(tbl->it_map, 0, sz);
1091
1092	iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1093			tbl->it_reserved_end);
1094
1095	for (i = 0; i < tbl->nr_pools; i++)
1096		spin_unlock(&tbl->pools[i].lock);
1097	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1098}
1099EXPORT_SYMBOL_GPL(iommu_release_ownership);
1100
1101int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
1102{
1103	/*
1104	 * The sysfs entries should be populated before
1105	 * binding IOMMU group. If sysfs entries isn't
1106	 * ready, we simply bail.
1107	 */
1108	if (!device_is_registered(dev))
1109		return -ENOENT;
1110
1111	if (device_iommu_mapped(dev)) {
1112		pr_debug("%s: Skipping device %s with iommu group %d\n",
1113			 __func__, dev_name(dev),
1114			 iommu_group_id(dev->iommu_group));
1115		return -EBUSY;
1116	}
1117
1118	pr_debug("%s: Adding %s to iommu group %d\n",
1119		 __func__, dev_name(dev),  iommu_group_id(table_group->group));
1120
1121	return iommu_group_add_device(table_group->group, dev);
1122}
1123EXPORT_SYMBOL_GPL(iommu_add_device);
1124
1125void iommu_del_device(struct device *dev)
1126{
1127	/*
1128	 * Some devices might not have IOMMU table and group
1129	 * and we needn't detach them from the associated
1130	 * IOMMU groups
1131	 */
1132	if (!device_iommu_mapped(dev)) {
1133		pr_debug("iommu_tce: skipping device %s with no tbl\n",
1134			 dev_name(dev));
1135		return;
1136	}
1137
1138	iommu_group_remove_device(dev);
1139}
1140EXPORT_SYMBOL_GPL(iommu_del_device);
1141#endif /* CONFIG_IOMMU_API */