Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
   4 * 
   5 * Rewrite, cleanup, new allocation schemes, virtual merging: 
   6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
   7 *               and  Ben. Herrenschmidt, IBM Corporation
   8 *
   9 * Dynamic DMA mapping support, bus-independent parts.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  10 */
  11
  12
  13#include <linux/init.h>
  14#include <linux/types.h>
  15#include <linux/slab.h>
  16#include <linux/mm.h>
  17#include <linux/spinlock.h>
  18#include <linux/string.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/bitmap.h>
  21#include <linux/iommu-helper.h>
  22#include <linux/crash_dump.h>
  23#include <linux/hash.h>
  24#include <linux/fault-inject.h>
  25#include <linux/pci.h>
  26#include <linux/iommu.h>
  27#include <linux/sched.h>
  28#include <asm/io.h>
  29#include <asm/prom.h>
  30#include <asm/iommu.h>
  31#include <asm/pci-bridge.h>
  32#include <asm/machdep.h>
  33#include <asm/kdump.h>
  34#include <asm/fadump.h>
  35#include <asm/vio.h>
  36#include <asm/tce.h>
  37#include <asm/mmu_context.h>
  38
  39#define DBG(...)
  40
  41static int novmerge;
  42
  43static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
  44
  45static int __init setup_iommu(char *str)
  46{
  47	if (!strcmp(str, "novmerge"))
  48		novmerge = 1;
  49	else if (!strcmp(str, "vmerge"))
  50		novmerge = 0;
  51	return 1;
  52}
  53
  54__setup("iommu=", setup_iommu);
  55
  56static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
  57
  58/*
  59 * We precalculate the hash to avoid doing it on every allocation.
  60 *
  61 * The hash is important to spread CPUs across all the pools. For example,
  62 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
  63 * with 4 pools all primary threads would map to the same pool.
  64 */
  65static int __init setup_iommu_pool_hash(void)
  66{
  67	unsigned int i;
  68
  69	for_each_possible_cpu(i)
  70		per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
  71
  72	return 0;
  73}
  74subsys_initcall(setup_iommu_pool_hash);
  75
  76#ifdef CONFIG_FAIL_IOMMU
  77
  78static DECLARE_FAULT_ATTR(fail_iommu);
  79
  80static int __init setup_fail_iommu(char *str)
  81{
  82	return setup_fault_attr(&fail_iommu, str);
  83}
  84__setup("fail_iommu=", setup_fail_iommu);
  85
  86static bool should_fail_iommu(struct device *dev)
  87{
  88	return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
  89}
  90
  91static int __init fail_iommu_debugfs(void)
  92{
  93	struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
  94						       NULL, &fail_iommu);
  95
  96	return PTR_ERR_OR_ZERO(dir);
  97}
  98late_initcall(fail_iommu_debugfs);
  99
 100static ssize_t fail_iommu_show(struct device *dev,
 101			       struct device_attribute *attr, char *buf)
 102{
 103	return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
 104}
 105
 106static ssize_t fail_iommu_store(struct device *dev,
 107				struct device_attribute *attr, const char *buf,
 108				size_t count)
 109{
 110	int i;
 111
 112	if (count > 0 && sscanf(buf, "%d", &i) > 0)
 113		dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
 114
 115	return count;
 116}
 117
 118static DEVICE_ATTR_RW(fail_iommu);
 
 119
 120static int fail_iommu_bus_notify(struct notifier_block *nb,
 121				 unsigned long action, void *data)
 122{
 123	struct device *dev = data;
 124
 125	if (action == BUS_NOTIFY_ADD_DEVICE) {
 126		if (device_create_file(dev, &dev_attr_fail_iommu))
 127			pr_warn("Unable to create IOMMU fault injection sysfs "
 128				"entries\n");
 129	} else if (action == BUS_NOTIFY_DEL_DEVICE) {
 130		device_remove_file(dev, &dev_attr_fail_iommu);
 131	}
 132
 133	return 0;
 134}
 135
 136static struct notifier_block fail_iommu_bus_notifier = {
 137	.notifier_call = fail_iommu_bus_notify
 138};
 139
 140static int __init fail_iommu_setup(void)
 141{
 142#ifdef CONFIG_PCI
 143	bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
 144#endif
 145#ifdef CONFIG_IBMVIO
 146	bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
 147#endif
 148
 149	return 0;
 150}
 151/*
 152 * Must execute after PCI and VIO subsystem have initialised but before
 153 * devices are probed.
 154 */
 155arch_initcall(fail_iommu_setup);
 156#else
 157static inline bool should_fail_iommu(struct device *dev)
 158{
 159	return false;
 160}
 161#endif
 162
 163static unsigned long iommu_range_alloc(struct device *dev,
 164				       struct iommu_table *tbl,
 165                                       unsigned long npages,
 166                                       unsigned long *handle,
 167                                       unsigned long mask,
 168                                       unsigned int align_order)
 169{ 
 170	unsigned long n, end, start;
 171	unsigned long limit;
 172	int largealloc = npages > 15;
 173	int pass = 0;
 174	unsigned long align_mask;
 175	unsigned long boundary_size;
 176	unsigned long flags;
 177	unsigned int pool_nr;
 178	struct iommu_pool *pool;
 179
 180	align_mask = (1ull << align_order) - 1;
 181
 182	/* This allocator was derived from x86_64's bit string search */
 183
 184	/* Sanity check */
 185	if (unlikely(npages == 0)) {
 186		if (printk_ratelimit())
 187			WARN_ON(1);
 188		return DMA_MAPPING_ERROR;
 189	}
 190
 191	if (should_fail_iommu(dev))
 192		return DMA_MAPPING_ERROR;
 193
 194	/*
 195	 * We don't need to disable preemption here because any CPU can
 196	 * safely use any IOMMU pool.
 197	 */
 198	pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
 199
 200	if (largealloc)
 201		pool = &(tbl->large_pool);
 202	else
 203		pool = &(tbl->pools[pool_nr]);
 204
 205	spin_lock_irqsave(&(pool->lock), flags);
 206
 207again:
 208	if ((pass == 0) && handle && *handle &&
 209	    (*handle >= pool->start) && (*handle < pool->end))
 210		start = *handle;
 211	else
 212		start = pool->hint;
 213
 214	limit = pool->end;
 215
 216	/* The case below can happen if we have a small segment appended
 217	 * to a large, or when the previous alloc was at the very end of
 218	 * the available space. If so, go back to the initial start.
 219	 */
 220	if (start >= limit)
 221		start = pool->start;
 222
 223	if (limit + tbl->it_offset > mask) {
 224		limit = mask - tbl->it_offset + 1;
 225		/* If we're constrained on address range, first try
 226		 * at the masked hint to avoid O(n) search complexity,
 227		 * but on second pass, start at 0 in pool 0.
 228		 */
 229		if ((start & mask) >= limit || pass > 0) {
 230			spin_unlock(&(pool->lock));
 231			pool = &(tbl->pools[0]);
 232			spin_lock(&(pool->lock));
 233			start = pool->start;
 234		} else {
 235			start &= mask;
 236		}
 237	}
 238
 239	if (dev)
 240		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
 241				      1 << tbl->it_page_shift);
 242	else
 243		boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
 244	/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
 245
 246	n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
 247			     boundary_size >> tbl->it_page_shift, align_mask);
 248	if (n == -1) {
 249		if (likely(pass == 0)) {
 250			/* First try the pool from the start */
 251			pool->hint = pool->start;
 252			pass++;
 253			goto again;
 254
 255		} else if (pass <= tbl->nr_pools) {
 256			/* Now try scanning all the other pools */
 257			spin_unlock(&(pool->lock));
 258			pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
 259			pool = &tbl->pools[pool_nr];
 260			spin_lock(&(pool->lock));
 261			pool->hint = pool->start;
 262			pass++;
 263			goto again;
 264
 265		} else {
 266			/* Give up */
 267			spin_unlock_irqrestore(&(pool->lock), flags);
 268			return DMA_MAPPING_ERROR;
 269		}
 270	}
 271
 272	end = n + npages;
 273
 274	/* Bump the hint to a new block for small allocs. */
 275	if (largealloc) {
 276		/* Don't bump to new block to avoid fragmentation */
 277		pool->hint = end;
 278	} else {
 279		/* Overflow will be taken care of at the next allocation */
 280		pool->hint = (end + tbl->it_blocksize - 1) &
 281		                ~(tbl->it_blocksize - 1);
 282	}
 283
 284	/* Update handle for SG allocations */
 285	if (handle)
 286		*handle = end;
 287
 288	spin_unlock_irqrestore(&(pool->lock), flags);
 289
 290	return n;
 291}
 292
 293static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
 294			      void *page, unsigned int npages,
 295			      enum dma_data_direction direction,
 296			      unsigned long mask, unsigned int align_order,
 297			      unsigned long attrs)
 298{
 299	unsigned long entry;
 300	dma_addr_t ret = DMA_MAPPING_ERROR;
 301	int build_fail;
 302
 303	entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
 304
 305	if (unlikely(entry == DMA_MAPPING_ERROR))
 306		return DMA_MAPPING_ERROR;
 307
 308	entry += tbl->it_offset;	/* Offset into real TCE table */
 309	ret = entry << tbl->it_page_shift;	/* Set the return dma address */
 310
 311	/* Put the TCEs in the HW table */
 312	build_fail = tbl->it_ops->set(tbl, entry, npages,
 313				      (unsigned long)page &
 314				      IOMMU_PAGE_MASK(tbl), direction, attrs);
 315
 316	/* tbl->it_ops->set() only returns non-zero for transient errors.
 317	 * Clean up the table bitmap in this case and return
 318	 * DMA_MAPPING_ERROR. For all other errors the functionality is
 319	 * not altered.
 320	 */
 321	if (unlikely(build_fail)) {
 322		__iommu_free(tbl, ret, npages);
 323		return DMA_MAPPING_ERROR;
 324	}
 325
 326	/* Flush/invalidate TLB caches if necessary */
 327	if (tbl->it_ops->flush)
 328		tbl->it_ops->flush(tbl);
 329
 330	/* Make sure updates are seen by hardware */
 331	mb();
 332
 333	return ret;
 334}
 335
 336static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
 337			     unsigned int npages)
 338{
 339	unsigned long entry, free_entry;
 340
 341	entry = dma_addr >> tbl->it_page_shift;
 342	free_entry = entry - tbl->it_offset;
 343
 344	if (((free_entry + npages) > tbl->it_size) ||
 345	    (entry < tbl->it_offset)) {
 346		if (printk_ratelimit()) {
 347			printk(KERN_INFO "iommu_free: invalid entry\n");
 348			printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
 349			printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
 350			printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
 351			printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
 352			printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
 353			printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
 354			printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
 355			WARN_ON(1);
 356		}
 357
 358		return false;
 359	}
 360
 361	return true;
 362}
 363
 364static struct iommu_pool *get_pool(struct iommu_table *tbl,
 365				   unsigned long entry)
 366{
 367	struct iommu_pool *p;
 368	unsigned long largepool_start = tbl->large_pool.start;
 369
 370	/* The large pool is the last pool at the top of the table */
 371	if (entry >= largepool_start) {
 372		p = &tbl->large_pool;
 373	} else {
 374		unsigned int pool_nr = entry / tbl->poolsize;
 375
 376		BUG_ON(pool_nr > tbl->nr_pools);
 377		p = &tbl->pools[pool_nr];
 378	}
 379
 380	return p;
 381}
 382
 383static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
 384			 unsigned int npages)
 385{
 386	unsigned long entry, free_entry;
 387	unsigned long flags;
 388	struct iommu_pool *pool;
 389
 390	entry = dma_addr >> tbl->it_page_shift;
 391	free_entry = entry - tbl->it_offset;
 392
 393	pool = get_pool(tbl, free_entry);
 394
 395	if (!iommu_free_check(tbl, dma_addr, npages))
 396		return;
 397
 398	tbl->it_ops->clear(tbl, entry, npages);
 399
 400	spin_lock_irqsave(&(pool->lock), flags);
 401	bitmap_clear(tbl->it_map, free_entry, npages);
 402	spin_unlock_irqrestore(&(pool->lock), flags);
 403}
 404
 405static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
 406		unsigned int npages)
 407{
 408	__iommu_free(tbl, dma_addr, npages);
 409
 410	/* Make sure TLB cache is flushed if the HW needs it. We do
 411	 * not do an mb() here on purpose, it is not needed on any of
 412	 * the current platforms.
 413	 */
 414	if (tbl->it_ops->flush)
 415		tbl->it_ops->flush(tbl);
 416}
 417
 418int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
 419		     struct scatterlist *sglist, int nelems,
 420		     unsigned long mask, enum dma_data_direction direction,
 421		     unsigned long attrs)
 422{
 423	dma_addr_t dma_next = 0, dma_addr;
 424	struct scatterlist *s, *outs, *segstart;
 425	int outcount, incount, i, build_fail = 0;
 426	unsigned int align;
 427	unsigned long handle;
 428	unsigned int max_seg_size;
 429
 430	BUG_ON(direction == DMA_NONE);
 431
 432	if ((nelems == 0) || !tbl)
 433		return 0;
 434
 435	outs = s = segstart = &sglist[0];
 436	outcount = 1;
 437	incount = nelems;
 438	handle = 0;
 439
 440	/* Init first segment length for backout at failure */
 441	outs->dma_length = 0;
 442
 443	DBG("sg mapping %d elements:\n", nelems);
 444
 445	max_seg_size = dma_get_max_seg_size(dev);
 446	for_each_sg(sglist, s, nelems, i) {
 447		unsigned long vaddr, npages, entry, slen;
 448
 449		slen = s->length;
 450		/* Sanity check */
 451		if (slen == 0) {
 452			dma_next = 0;
 453			continue;
 454		}
 455		/* Allocate iommu entries for that segment */
 456		vaddr = (unsigned long) sg_virt(s);
 457		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
 458		align = 0;
 459		if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
 460		    (vaddr & ~PAGE_MASK) == 0)
 461			align = PAGE_SHIFT - tbl->it_page_shift;
 462		entry = iommu_range_alloc(dev, tbl, npages, &handle,
 463					  mask >> tbl->it_page_shift, align);
 464
 465		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
 466
 467		/* Handle failure */
 468		if (unlikely(entry == DMA_MAPPING_ERROR)) {
 469			if (!(attrs & DMA_ATTR_NO_WARN) &&
 470			    printk_ratelimit())
 471				dev_info(dev, "iommu_alloc failed, tbl %p "
 472					 "vaddr %lx npages %lu\n", tbl, vaddr,
 473					 npages);
 474			goto failure;
 475		}
 476
 477		/* Convert entry to a dma_addr_t */
 478		entry += tbl->it_offset;
 479		dma_addr = entry << tbl->it_page_shift;
 480		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
 481
 482		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
 483			    npages, entry, dma_addr);
 484
 485		/* Insert into HW table */
 486		build_fail = tbl->it_ops->set(tbl, entry, npages,
 487					      vaddr & IOMMU_PAGE_MASK(tbl),
 488					      direction, attrs);
 489		if(unlikely(build_fail))
 490			goto failure;
 491
 492		/* If we are in an open segment, try merging */
 493		if (segstart != s) {
 494			DBG("  - trying merge...\n");
 495			/* We cannot merge if:
 496			 * - allocated dma_addr isn't contiguous to previous allocation
 497			 */
 498			if (novmerge || (dma_addr != dma_next) ||
 499			    (outs->dma_length + s->length > max_seg_size)) {
 500				/* Can't merge: create a new segment */
 501				segstart = s;
 502				outcount++;
 503				outs = sg_next(outs);
 504				DBG("    can't merge, new segment.\n");
 505			} else {
 506				outs->dma_length += s->length;
 507				DBG("    merged, new len: %ux\n", outs->dma_length);
 508			}
 509		}
 510
 511		if (segstart == s) {
 512			/* This is a new segment, fill entries */
 513			DBG("  - filling new segment.\n");
 514			outs->dma_address = dma_addr;
 515			outs->dma_length = slen;
 516		}
 517
 518		/* Calculate next page pointer for contiguous check */
 519		dma_next = dma_addr + slen;
 520
 521		DBG("  - dma next is: %lx\n", dma_next);
 522	}
 523
 524	/* Flush/invalidate TLB caches if necessary */
 525	if (tbl->it_ops->flush)
 526		tbl->it_ops->flush(tbl);
 527
 528	DBG("mapped %d elements:\n", outcount);
 529
 530	/* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
 531	 * next entry of the sglist if we didn't fill the list completely
 532	 */
 533	if (outcount < incount) {
 534		outs = sg_next(outs);
 535		outs->dma_address = DMA_MAPPING_ERROR;
 536		outs->dma_length = 0;
 537	}
 538
 539	/* Make sure updates are seen by hardware */
 540	mb();
 541
 542	return outcount;
 543
 544 failure:
 545	for_each_sg(sglist, s, nelems, i) {
 546		if (s->dma_length != 0) {
 547			unsigned long vaddr, npages;
 548
 549			vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
 550			npages = iommu_num_pages(s->dma_address, s->dma_length,
 551						 IOMMU_PAGE_SIZE(tbl));
 552			__iommu_free(tbl, vaddr, npages);
 553			s->dma_address = DMA_MAPPING_ERROR;
 554			s->dma_length = 0;
 555		}
 556		if (s == outs)
 557			break;
 558	}
 559	return 0;
 560}
 561
 562
 563void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
 564			int nelems, enum dma_data_direction direction,
 565			unsigned long attrs)
 566{
 567	struct scatterlist *sg;
 568
 569	BUG_ON(direction == DMA_NONE);
 570
 571	if (!tbl)
 572		return;
 573
 574	sg = sglist;
 575	while (nelems--) {
 576		unsigned int npages;
 577		dma_addr_t dma_handle = sg->dma_address;
 578
 579		if (sg->dma_length == 0)
 580			break;
 581		npages = iommu_num_pages(dma_handle, sg->dma_length,
 582					 IOMMU_PAGE_SIZE(tbl));
 583		__iommu_free(tbl, dma_handle, npages);
 584		sg = sg_next(sg);
 585	}
 586
 587	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
 588	 * do not do an mb() here, the affected platforms do not need it
 589	 * when freeing.
 590	 */
 591	if (tbl->it_ops->flush)
 592		tbl->it_ops->flush(tbl);
 593}
 594
 595static void iommu_table_clear(struct iommu_table *tbl)
 596{
 597	/*
 598	 * In case of firmware assisted dump system goes through clean
 599	 * reboot process at the time of system crash. Hence it's safe to
 600	 * clear the TCE entries if firmware assisted dump is active.
 601	 */
 602	if (!is_kdump_kernel() || is_fadump_active()) {
 603		/* Clear the table in case firmware left allocations in it */
 604		tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
 605		return;
 606	}
 607
 608#ifdef CONFIG_CRASH_DUMP
 609	if (tbl->it_ops->get) {
 610		unsigned long index, tceval, tcecount = 0;
 611
 612		/* Reserve the existing mappings left by the first kernel. */
 613		for (index = 0; index < tbl->it_size; index++) {
 614			tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
 615			/*
 616			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
 617			 */
 618			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
 619				__set_bit(index, tbl->it_map);
 620				tcecount++;
 621			}
 622		}
 623
 624		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
 625			printk(KERN_WARNING "TCE table is full; freeing ");
 626			printk(KERN_WARNING "%d entries for the kdump boot\n",
 627				KDUMP_MIN_TCE_ENTRIES);
 628			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
 629				index < tbl->it_size; index++)
 630				__clear_bit(index, tbl->it_map);
 631		}
 632	}
 633#endif
 634}
 635
 636static void iommu_table_reserve_pages(struct iommu_table *tbl,
 637		unsigned long res_start, unsigned long res_end)
 638{
 639	int i;
 640
 641	WARN_ON_ONCE(res_end < res_start);
 642	/*
 643	 * Reserve page 0 so it will not be used for any mappings.
 644	 * This avoids buggy drivers that consider page 0 to be invalid
 645	 * to crash the machine or even lose data.
 646	 */
 647	if (tbl->it_offset == 0)
 648		set_bit(0, tbl->it_map);
 649
 650	tbl->it_reserved_start = res_start;
 651	tbl->it_reserved_end = res_end;
 652
 653	/* Check if res_start..res_end isn't empty and overlaps the table */
 654	if (res_start && res_end &&
 655			(tbl->it_offset + tbl->it_size < res_start ||
 656			 res_end < tbl->it_offset))
 657		return;
 658
 659	for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
 660		set_bit(i - tbl->it_offset, tbl->it_map);
 661}
 662
 663static void iommu_table_release_pages(struct iommu_table *tbl)
 664{
 665	int i;
 666
 667	/*
 668	 * In case we have reserved the first bit, we should not emit
 669	 * the warning below.
 670	 */
 671	if (tbl->it_offset == 0)
 672		clear_bit(0, tbl->it_map);
 673
 674	for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
 675		clear_bit(i - tbl->it_offset, tbl->it_map);
 676}
 677
 678/*
 679 * Build a iommu_table structure.  This contains a bit map which
 680 * is used to manage allocation of the tce space.
 681 */
 682struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
 683		unsigned long res_start, unsigned long res_end)
 684{
 685	unsigned long sz;
 686	static int welcomed = 0;
 687	struct page *page;
 688	unsigned int i;
 689	struct iommu_pool *p;
 690
 691	BUG_ON(!tbl->it_ops);
 692
 693	/* number of bytes needed for the bitmap */
 694	sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
 695
 696	page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
 697	if (!page)
 698		panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
 699	tbl->it_map = page_address(page);
 700	memset(tbl->it_map, 0, sz);
 701
 702	iommu_table_reserve_pages(tbl, res_start, res_end);
 
 
 
 
 
 
 703
 704	/* We only split the IOMMU table if we have 1GB or more of space */
 705	if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
 706		tbl->nr_pools = IOMMU_NR_POOLS;
 707	else
 708		tbl->nr_pools = 1;
 709
 710	/* We reserve the top 1/4 of the table for large allocations */
 711	tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
 712
 713	for (i = 0; i < tbl->nr_pools; i++) {
 714		p = &tbl->pools[i];
 715		spin_lock_init(&(p->lock));
 716		p->start = tbl->poolsize * i;
 717		p->hint = p->start;
 718		p->end = p->start + tbl->poolsize;
 719	}
 720
 721	p = &tbl->large_pool;
 722	spin_lock_init(&(p->lock));
 723	p->start = tbl->poolsize * i;
 724	p->hint = p->start;
 725	p->end = tbl->it_size;
 726
 727	iommu_table_clear(tbl);
 728
 729	if (!welcomed) {
 730		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
 731		       novmerge ? "disabled" : "enabled");
 732		welcomed = 1;
 733	}
 734
 735	return tbl;
 736}
 737
 738static void iommu_table_free(struct kref *kref)
 739{
 740	unsigned long bitmap_sz;
 741	unsigned int order;
 742	struct iommu_table *tbl;
 743
 744	tbl = container_of(kref, struct iommu_table, it_kref);
 745
 746	if (tbl->it_ops->free)
 747		tbl->it_ops->free(tbl);
 748
 749	if (!tbl->it_map) {
 750		kfree(tbl);
 751		return;
 752	}
 753
 754	iommu_table_release_pages(tbl);
 
 
 
 
 
 755
 756	/* verify that table contains no entries */
 757	if (!bitmap_empty(tbl->it_map, tbl->it_size))
 758		pr_warn("%s: Unexpected TCEs\n", __func__);
 759
 760	/* calculate bitmap size in bytes */
 761	bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
 762
 763	/* free bitmap */
 764	order = get_order(bitmap_sz);
 765	free_pages((unsigned long) tbl->it_map, order);
 766
 767	/* free table */
 768	kfree(tbl);
 769}
 770
 771struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
 772{
 773	if (kref_get_unless_zero(&tbl->it_kref))
 774		return tbl;
 775
 776	return NULL;
 777}
 778EXPORT_SYMBOL_GPL(iommu_tce_table_get);
 779
 780int iommu_tce_table_put(struct iommu_table *tbl)
 781{
 782	if (WARN_ON(!tbl))
 783		return 0;
 784
 785	return kref_put(&tbl->it_kref, iommu_table_free);
 786}
 787EXPORT_SYMBOL_GPL(iommu_tce_table_put);
 788
 789/* Creates TCEs for a user provided buffer.  The user buffer must be
 790 * contiguous real kernel storage (not vmalloc).  The address passed here
 791 * comprises a page address and offset into that page. The dma_addr_t
 792 * returned will point to the same byte within the page as was passed in.
 793 */
 794dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
 795			  struct page *page, unsigned long offset, size_t size,
 796			  unsigned long mask, enum dma_data_direction direction,
 797			  unsigned long attrs)
 798{
 799	dma_addr_t dma_handle = DMA_MAPPING_ERROR;
 800	void *vaddr;
 801	unsigned long uaddr;
 802	unsigned int npages, align;
 803
 804	BUG_ON(direction == DMA_NONE);
 805
 806	vaddr = page_address(page) + offset;
 807	uaddr = (unsigned long)vaddr;
 
 808
 809	if (tbl) {
 810		npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
 811		align = 0;
 812		if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
 813		    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
 814			align = PAGE_SHIFT - tbl->it_page_shift;
 815
 816		dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
 817					 mask >> tbl->it_page_shift, align,
 818					 attrs);
 819		if (dma_handle == DMA_MAPPING_ERROR) {
 820			if (!(attrs & DMA_ATTR_NO_WARN) &&
 821			    printk_ratelimit())  {
 822				dev_info(dev, "iommu_alloc failed, tbl %p "
 823					 "vaddr %p npages %d\n", tbl, vaddr,
 824					 npages);
 825			}
 826		} else
 827			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
 828	}
 829
 830	return dma_handle;
 831}
 832
 833void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
 834		      size_t size, enum dma_data_direction direction,
 835		      unsigned long attrs)
 836{
 837	unsigned int npages;
 838
 839	BUG_ON(direction == DMA_NONE);
 840
 841	if (tbl) {
 842		npages = iommu_num_pages(dma_handle, size,
 843					 IOMMU_PAGE_SIZE(tbl));
 844		iommu_free(tbl, dma_handle, npages);
 845	}
 846}
 847
 848/* Allocates a contiguous real buffer and creates mappings over it.
 849 * Returns the virtual address of the buffer and sets dma_handle
 850 * to the dma address (mapping) of the first page.
 851 */
 852void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
 853			   size_t size,	dma_addr_t *dma_handle,
 854			   unsigned long mask, gfp_t flag, int node)
 855{
 856	void *ret = NULL;
 857	dma_addr_t mapping;
 858	unsigned int order;
 859	unsigned int nio_pages, io_order;
 860	struct page *page;
 861
 862	size = PAGE_ALIGN(size);
 863	order = get_order(size);
 864
 865 	/*
 866	 * Client asked for way too much space.  This is checked later
 867	 * anyway.  It is easier to debug here for the drivers than in
 868	 * the tce tables.
 869	 */
 870	if (order >= IOMAP_MAX_ORDER) {
 871		dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
 872			 size);
 873		return NULL;
 874	}
 875
 876	if (!tbl)
 877		return NULL;
 878
 879	/* Alloc enough pages (and possibly more) */
 880	page = alloc_pages_node(node, flag, order);
 881	if (!page)
 882		return NULL;
 883	ret = page_address(page);
 884	memset(ret, 0, size);
 885
 886	/* Set up tces to cover the allocated range */
 887	nio_pages = size >> tbl->it_page_shift;
 888	io_order = get_iommu_order(size, tbl);
 889	mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
 890			      mask >> tbl->it_page_shift, io_order, 0);
 891	if (mapping == DMA_MAPPING_ERROR) {
 892		free_pages((unsigned long)ret, order);
 893		return NULL;
 894	}
 895	*dma_handle = mapping;
 896	return ret;
 897}
 898
 899void iommu_free_coherent(struct iommu_table *tbl, size_t size,
 900			 void *vaddr, dma_addr_t dma_handle)
 901{
 902	if (tbl) {
 903		unsigned int nio_pages;
 904
 905		size = PAGE_ALIGN(size);
 906		nio_pages = size >> tbl->it_page_shift;
 907		iommu_free(tbl, dma_handle, nio_pages);
 908		size = PAGE_ALIGN(size);
 909		free_pages((unsigned long)vaddr, get_order(size));
 910	}
 911}
 912
 913unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
 914{
 915	switch (dir) {
 916	case DMA_BIDIRECTIONAL:
 917		return TCE_PCI_READ | TCE_PCI_WRITE;
 918	case DMA_FROM_DEVICE:
 919		return TCE_PCI_WRITE;
 920	case DMA_TO_DEVICE:
 921		return TCE_PCI_READ;
 922	default:
 923		return 0;
 924	}
 925}
 926EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
 927
 928#ifdef CONFIG_IOMMU_API
 929/*
 930 * SPAPR TCE API
 931 */
 932static void group_release(void *iommu_data)
 933{
 934	struct iommu_table_group *table_group = iommu_data;
 935
 936	table_group->group = NULL;
 937}
 938
 939void iommu_register_group(struct iommu_table_group *table_group,
 940		int pci_domain_number, unsigned long pe_num)
 941{
 942	struct iommu_group *grp;
 943	char *name;
 944
 945	grp = iommu_group_alloc();
 946	if (IS_ERR(grp)) {
 947		pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
 948				PTR_ERR(grp));
 949		return;
 950	}
 951	table_group->group = grp;
 952	iommu_group_set_iommudata(grp, table_group, group_release);
 953	name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
 954			pci_domain_number, pe_num);
 955	if (!name)
 956		return;
 957	iommu_group_set_name(grp, name);
 958	kfree(name);
 959}
 960
 961enum dma_data_direction iommu_tce_direction(unsigned long tce)
 962{
 963	if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
 964		return DMA_BIDIRECTIONAL;
 965	else if (tce & TCE_PCI_READ)
 966		return DMA_TO_DEVICE;
 967	else if (tce & TCE_PCI_WRITE)
 968		return DMA_FROM_DEVICE;
 969	else
 970		return DMA_NONE;
 971}
 972EXPORT_SYMBOL_GPL(iommu_tce_direction);
 973
 974void iommu_flush_tce(struct iommu_table *tbl)
 975{
 976	/* Flush/invalidate TLB caches if necessary */
 977	if (tbl->it_ops->flush)
 978		tbl->it_ops->flush(tbl);
 979
 980	/* Make sure updates are seen by hardware */
 981	mb();
 982}
 983EXPORT_SYMBOL_GPL(iommu_flush_tce);
 984
 985int iommu_tce_check_ioba(unsigned long page_shift,
 986		unsigned long offset, unsigned long size,
 987		unsigned long ioba, unsigned long npages)
 988{
 989	unsigned long mask = (1UL << page_shift) - 1;
 
 
 990
 991	if (ioba & mask)
 992		return -EINVAL;
 993
 994	ioba >>= page_shift;
 995	if (ioba < offset)
 996		return -EINVAL;
 997
 998	if ((ioba + 1) > (offset + size))
 999		return -EINVAL;
1000
1001	return 0;
1002}
1003EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
1004
1005int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
 
1006{
1007	unsigned long mask = (1UL << page_shift) - 1;
 
 
 
 
1008
1009	if (gpa & mask)
 
 
 
 
1010		return -EINVAL;
1011
1012	return 0;
1013}
1014EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
1015
1016extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
1017		struct iommu_table *tbl,
1018		unsigned long entry, unsigned long *hpa,
1019		enum dma_data_direction *direction)
1020{
1021	long ret;
1022	unsigned long size = 0;
1023
1024	ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false);
 
1025	if (!ret && ((*direction == DMA_FROM_DEVICE) ||
1026			(*direction == DMA_BIDIRECTIONAL)) &&
1027			!mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
1028					&size))
1029		SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
1030
1031	return ret;
1032}
1033EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill);
 
1034
1035void iommu_tce_kill(struct iommu_table *tbl,
1036		unsigned long entry, unsigned long pages)
1037{
1038	if (tbl->it_ops->tce_kill)
1039		tbl->it_ops->tce_kill(tbl, entry, pages, false);
1040}
1041EXPORT_SYMBOL_GPL(iommu_tce_kill);
1042
1043int iommu_take_ownership(struct iommu_table *tbl)
1044{
1045	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1046	int ret = 0;
1047
1048	/*
1049	 * VFIO does not control TCE entries allocation and the guest
1050	 * can write new TCEs on top of existing ones so iommu_tce_build()
1051	 * must be able to release old pages. This functionality
1052	 * requires exchange() callback defined so if it is not
1053	 * implemented, we disallow taking ownership over the table.
1054	 */
1055	if (!tbl->it_ops->xchg_no_kill)
1056		return -EINVAL;
1057
1058	spin_lock_irqsave(&tbl->large_pool.lock, flags);
1059	for (i = 0; i < tbl->nr_pools; i++)
1060		spin_lock(&tbl->pools[i].lock);
1061
1062	iommu_table_release_pages(tbl);
 
1063
1064	if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
1065		pr_err("iommu_tce: it_map is not empty");
1066		ret = -EBUSY;
1067		/* Undo iommu_table_release_pages, i.e. restore bit#0, etc */
1068		iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1069				tbl->it_reserved_end);
1070	} else {
1071		memset(tbl->it_map, 0xff, sz);
1072	}
1073
1074	for (i = 0; i < tbl->nr_pools; i++)
1075		spin_unlock(&tbl->pools[i].lock);
1076	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1077
1078	return ret;
1079}
1080EXPORT_SYMBOL_GPL(iommu_take_ownership);
1081
1082void iommu_release_ownership(struct iommu_table *tbl)
1083{
1084	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1085
1086	spin_lock_irqsave(&tbl->large_pool.lock, flags);
1087	for (i = 0; i < tbl->nr_pools; i++)
1088		spin_lock(&tbl->pools[i].lock);
1089
1090	memset(tbl->it_map, 0, sz);
1091
1092	iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1093			tbl->it_reserved_end);
 
1094
1095	for (i = 0; i < tbl->nr_pools; i++)
1096		spin_unlock(&tbl->pools[i].lock);
1097	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1098}
1099EXPORT_SYMBOL_GPL(iommu_release_ownership);
1100
1101int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
1102{
 
 
 
1103	/*
1104	 * The sysfs entries should be populated before
1105	 * binding IOMMU group. If sysfs entries isn't
1106	 * ready, we simply bail.
1107	 */
1108	if (!device_is_registered(dev))
1109		return -ENOENT;
1110
1111	if (device_iommu_mapped(dev)) {
1112		pr_debug("%s: Skipping device %s with iommu group %d\n",
1113			 __func__, dev_name(dev),
1114			 iommu_group_id(dev->iommu_group));
1115		return -EBUSY;
1116	}
1117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1118	pr_debug("%s: Adding %s to iommu group %d\n",
1119		 __func__, dev_name(dev),  iommu_group_id(table_group->group));
 
 
 
 
 
 
 
 
1120
1121	return iommu_group_add_device(table_group->group, dev);
1122}
1123EXPORT_SYMBOL_GPL(iommu_add_device);
1124
1125void iommu_del_device(struct device *dev)
1126{
1127	/*
1128	 * Some devices might not have IOMMU table and group
1129	 * and we needn't detach them from the associated
1130	 * IOMMU groups
1131	 */
1132	if (!device_iommu_mapped(dev)) {
1133		pr_debug("iommu_tce: skipping device %s with no tbl\n",
1134			 dev_name(dev));
1135		return;
1136	}
1137
1138	iommu_group_remove_device(dev);
1139}
1140EXPORT_SYMBOL_GPL(iommu_del_device);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1141#endif /* CONFIG_IOMMU_API */
v4.10.11
 
   1/*
   2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
   3 * 
   4 * Rewrite, cleanup, new allocation schemes, virtual merging: 
   5 * Copyright (C) 2004 Olof Johansson, IBM Corporation
   6 *               and  Ben. Herrenschmidt, IBM Corporation
   7 *
   8 * Dynamic DMA mapping support, bus-independent parts.
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License as published by
  12 * the Free Software Foundation; either version 2 of the License, or
  13 * (at your option) any later version.
  14 * 
  15 * This program is distributed in the hope that it will be useful,
  16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  18 * GNU General Public License for more details.
  19 * 
  20 * You should have received a copy of the GNU General Public License
  21 * along with this program; if not, write to the Free Software
  22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  23 */
  24
  25
  26#include <linux/init.h>
  27#include <linux/types.h>
  28#include <linux/slab.h>
  29#include <linux/mm.h>
  30#include <linux/spinlock.h>
  31#include <linux/string.h>
  32#include <linux/dma-mapping.h>
  33#include <linux/bitmap.h>
  34#include <linux/iommu-helper.h>
  35#include <linux/crash_dump.h>
  36#include <linux/hash.h>
  37#include <linux/fault-inject.h>
  38#include <linux/pci.h>
  39#include <linux/iommu.h>
  40#include <linux/sched.h>
  41#include <asm/io.h>
  42#include <asm/prom.h>
  43#include <asm/iommu.h>
  44#include <asm/pci-bridge.h>
  45#include <asm/machdep.h>
  46#include <asm/kdump.h>
  47#include <asm/fadump.h>
  48#include <asm/vio.h>
  49#include <asm/tce.h>
 
  50
  51#define DBG(...)
  52
  53static int novmerge;
  54
  55static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
  56
  57static int __init setup_iommu(char *str)
  58{
  59	if (!strcmp(str, "novmerge"))
  60		novmerge = 1;
  61	else if (!strcmp(str, "vmerge"))
  62		novmerge = 0;
  63	return 1;
  64}
  65
  66__setup("iommu=", setup_iommu);
  67
  68static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
  69
  70/*
  71 * We precalculate the hash to avoid doing it on every allocation.
  72 *
  73 * The hash is important to spread CPUs across all the pools. For example,
  74 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
  75 * with 4 pools all primary threads would map to the same pool.
  76 */
  77static int __init setup_iommu_pool_hash(void)
  78{
  79	unsigned int i;
  80
  81	for_each_possible_cpu(i)
  82		per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
  83
  84	return 0;
  85}
  86subsys_initcall(setup_iommu_pool_hash);
  87
  88#ifdef CONFIG_FAIL_IOMMU
  89
  90static DECLARE_FAULT_ATTR(fail_iommu);
  91
  92static int __init setup_fail_iommu(char *str)
  93{
  94	return setup_fault_attr(&fail_iommu, str);
  95}
  96__setup("fail_iommu=", setup_fail_iommu);
  97
  98static bool should_fail_iommu(struct device *dev)
  99{
 100	return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
 101}
 102
 103static int __init fail_iommu_debugfs(void)
 104{
 105	struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
 106						       NULL, &fail_iommu);
 107
 108	return PTR_ERR_OR_ZERO(dir);
 109}
 110late_initcall(fail_iommu_debugfs);
 111
 112static ssize_t fail_iommu_show(struct device *dev,
 113			       struct device_attribute *attr, char *buf)
 114{
 115	return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
 116}
 117
 118static ssize_t fail_iommu_store(struct device *dev,
 119				struct device_attribute *attr, const char *buf,
 120				size_t count)
 121{
 122	int i;
 123
 124	if (count > 0 && sscanf(buf, "%d", &i) > 0)
 125		dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
 126
 127	return count;
 128}
 129
 130static DEVICE_ATTR(fail_iommu, S_IRUGO|S_IWUSR, fail_iommu_show,
 131		   fail_iommu_store);
 132
 133static int fail_iommu_bus_notify(struct notifier_block *nb,
 134				 unsigned long action, void *data)
 135{
 136	struct device *dev = data;
 137
 138	if (action == BUS_NOTIFY_ADD_DEVICE) {
 139		if (device_create_file(dev, &dev_attr_fail_iommu))
 140			pr_warn("Unable to create IOMMU fault injection sysfs "
 141				"entries\n");
 142	} else if (action == BUS_NOTIFY_DEL_DEVICE) {
 143		device_remove_file(dev, &dev_attr_fail_iommu);
 144	}
 145
 146	return 0;
 147}
 148
 149static struct notifier_block fail_iommu_bus_notifier = {
 150	.notifier_call = fail_iommu_bus_notify
 151};
 152
 153static int __init fail_iommu_setup(void)
 154{
 155#ifdef CONFIG_PCI
 156	bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
 157#endif
 158#ifdef CONFIG_IBMVIO
 159	bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
 160#endif
 161
 162	return 0;
 163}
 164/*
 165 * Must execute after PCI and VIO subsystem have initialised but before
 166 * devices are probed.
 167 */
 168arch_initcall(fail_iommu_setup);
 169#else
 170static inline bool should_fail_iommu(struct device *dev)
 171{
 172	return false;
 173}
 174#endif
 175
 176static unsigned long iommu_range_alloc(struct device *dev,
 177				       struct iommu_table *tbl,
 178                                       unsigned long npages,
 179                                       unsigned long *handle,
 180                                       unsigned long mask,
 181                                       unsigned int align_order)
 182{ 
 183	unsigned long n, end, start;
 184	unsigned long limit;
 185	int largealloc = npages > 15;
 186	int pass = 0;
 187	unsigned long align_mask;
 188	unsigned long boundary_size;
 189	unsigned long flags;
 190	unsigned int pool_nr;
 191	struct iommu_pool *pool;
 192
 193	align_mask = 0xffffffffffffffffl >> (64 - align_order);
 194
 195	/* This allocator was derived from x86_64's bit string search */
 196
 197	/* Sanity check */
 198	if (unlikely(npages == 0)) {
 199		if (printk_ratelimit())
 200			WARN_ON(1);
 201		return DMA_ERROR_CODE;
 202	}
 203
 204	if (should_fail_iommu(dev))
 205		return DMA_ERROR_CODE;
 206
 207	/*
 208	 * We don't need to disable preemption here because any CPU can
 209	 * safely use any IOMMU pool.
 210	 */
 211	pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
 212
 213	if (largealloc)
 214		pool = &(tbl->large_pool);
 215	else
 216		pool = &(tbl->pools[pool_nr]);
 217
 218	spin_lock_irqsave(&(pool->lock), flags);
 219
 220again:
 221	if ((pass == 0) && handle && *handle &&
 222	    (*handle >= pool->start) && (*handle < pool->end))
 223		start = *handle;
 224	else
 225		start = pool->hint;
 226
 227	limit = pool->end;
 228
 229	/* The case below can happen if we have a small segment appended
 230	 * to a large, or when the previous alloc was at the very end of
 231	 * the available space. If so, go back to the initial start.
 232	 */
 233	if (start >= limit)
 234		start = pool->start;
 235
 236	if (limit + tbl->it_offset > mask) {
 237		limit = mask - tbl->it_offset + 1;
 238		/* If we're constrained on address range, first try
 239		 * at the masked hint to avoid O(n) search complexity,
 240		 * but on second pass, start at 0 in pool 0.
 241		 */
 242		if ((start & mask) >= limit || pass > 0) {
 243			spin_unlock(&(pool->lock));
 244			pool = &(tbl->pools[0]);
 245			spin_lock(&(pool->lock));
 246			start = pool->start;
 247		} else {
 248			start &= mask;
 249		}
 250	}
 251
 252	if (dev)
 253		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
 254				      1 << tbl->it_page_shift);
 255	else
 256		boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
 257	/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
 258
 259	n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
 260			     boundary_size >> tbl->it_page_shift, align_mask);
 261	if (n == -1) {
 262		if (likely(pass == 0)) {
 263			/* First try the pool from the start */
 264			pool->hint = pool->start;
 265			pass++;
 266			goto again;
 267
 268		} else if (pass <= tbl->nr_pools) {
 269			/* Now try scanning all the other pools */
 270			spin_unlock(&(pool->lock));
 271			pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
 272			pool = &tbl->pools[pool_nr];
 273			spin_lock(&(pool->lock));
 274			pool->hint = pool->start;
 275			pass++;
 276			goto again;
 277
 278		} else {
 279			/* Give up */
 280			spin_unlock_irqrestore(&(pool->lock), flags);
 281			return DMA_ERROR_CODE;
 282		}
 283	}
 284
 285	end = n + npages;
 286
 287	/* Bump the hint to a new block for small allocs. */
 288	if (largealloc) {
 289		/* Don't bump to new block to avoid fragmentation */
 290		pool->hint = end;
 291	} else {
 292		/* Overflow will be taken care of at the next allocation */
 293		pool->hint = (end + tbl->it_blocksize - 1) &
 294		                ~(tbl->it_blocksize - 1);
 295	}
 296
 297	/* Update handle for SG allocations */
 298	if (handle)
 299		*handle = end;
 300
 301	spin_unlock_irqrestore(&(pool->lock), flags);
 302
 303	return n;
 304}
 305
 306static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
 307			      void *page, unsigned int npages,
 308			      enum dma_data_direction direction,
 309			      unsigned long mask, unsigned int align_order,
 310			      unsigned long attrs)
 311{
 312	unsigned long entry;
 313	dma_addr_t ret = DMA_ERROR_CODE;
 314	int build_fail;
 315
 316	entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
 317
 318	if (unlikely(entry == DMA_ERROR_CODE))
 319		return DMA_ERROR_CODE;
 320
 321	entry += tbl->it_offset;	/* Offset into real TCE table */
 322	ret = entry << tbl->it_page_shift;	/* Set the return dma address */
 323
 324	/* Put the TCEs in the HW table */
 325	build_fail = tbl->it_ops->set(tbl, entry, npages,
 326				      (unsigned long)page &
 327				      IOMMU_PAGE_MASK(tbl), direction, attrs);
 328
 329	/* tbl->it_ops->set() only returns non-zero for transient errors.
 330	 * Clean up the table bitmap in this case and return
 331	 * DMA_ERROR_CODE. For all other errors the functionality is
 332	 * not altered.
 333	 */
 334	if (unlikely(build_fail)) {
 335		__iommu_free(tbl, ret, npages);
 336		return DMA_ERROR_CODE;
 337	}
 338
 339	/* Flush/invalidate TLB caches if necessary */
 340	if (tbl->it_ops->flush)
 341		tbl->it_ops->flush(tbl);
 342
 343	/* Make sure updates are seen by hardware */
 344	mb();
 345
 346	return ret;
 347}
 348
 349static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
 350			     unsigned int npages)
 351{
 352	unsigned long entry, free_entry;
 353
 354	entry = dma_addr >> tbl->it_page_shift;
 355	free_entry = entry - tbl->it_offset;
 356
 357	if (((free_entry + npages) > tbl->it_size) ||
 358	    (entry < tbl->it_offset)) {
 359		if (printk_ratelimit()) {
 360			printk(KERN_INFO "iommu_free: invalid entry\n");
 361			printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
 362			printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
 363			printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
 364			printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
 365			printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
 366			printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
 367			printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
 368			WARN_ON(1);
 369		}
 370
 371		return false;
 372	}
 373
 374	return true;
 375}
 376
 377static struct iommu_pool *get_pool(struct iommu_table *tbl,
 378				   unsigned long entry)
 379{
 380	struct iommu_pool *p;
 381	unsigned long largepool_start = tbl->large_pool.start;
 382
 383	/* The large pool is the last pool at the top of the table */
 384	if (entry >= largepool_start) {
 385		p = &tbl->large_pool;
 386	} else {
 387		unsigned int pool_nr = entry / tbl->poolsize;
 388
 389		BUG_ON(pool_nr > tbl->nr_pools);
 390		p = &tbl->pools[pool_nr];
 391	}
 392
 393	return p;
 394}
 395
 396static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
 397			 unsigned int npages)
 398{
 399	unsigned long entry, free_entry;
 400	unsigned long flags;
 401	struct iommu_pool *pool;
 402
 403	entry = dma_addr >> tbl->it_page_shift;
 404	free_entry = entry - tbl->it_offset;
 405
 406	pool = get_pool(tbl, free_entry);
 407
 408	if (!iommu_free_check(tbl, dma_addr, npages))
 409		return;
 410
 411	tbl->it_ops->clear(tbl, entry, npages);
 412
 413	spin_lock_irqsave(&(pool->lock), flags);
 414	bitmap_clear(tbl->it_map, free_entry, npages);
 415	spin_unlock_irqrestore(&(pool->lock), flags);
 416}
 417
 418static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
 419		unsigned int npages)
 420{
 421	__iommu_free(tbl, dma_addr, npages);
 422
 423	/* Make sure TLB cache is flushed if the HW needs it. We do
 424	 * not do an mb() here on purpose, it is not needed on any of
 425	 * the current platforms.
 426	 */
 427	if (tbl->it_ops->flush)
 428		tbl->it_ops->flush(tbl);
 429}
 430
 431int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
 432		     struct scatterlist *sglist, int nelems,
 433		     unsigned long mask, enum dma_data_direction direction,
 434		     unsigned long attrs)
 435{
 436	dma_addr_t dma_next = 0, dma_addr;
 437	struct scatterlist *s, *outs, *segstart;
 438	int outcount, incount, i, build_fail = 0;
 439	unsigned int align;
 440	unsigned long handle;
 441	unsigned int max_seg_size;
 442
 443	BUG_ON(direction == DMA_NONE);
 444
 445	if ((nelems == 0) || !tbl)
 446		return 0;
 447
 448	outs = s = segstart = &sglist[0];
 449	outcount = 1;
 450	incount = nelems;
 451	handle = 0;
 452
 453	/* Init first segment length for backout at failure */
 454	outs->dma_length = 0;
 455
 456	DBG("sg mapping %d elements:\n", nelems);
 457
 458	max_seg_size = dma_get_max_seg_size(dev);
 459	for_each_sg(sglist, s, nelems, i) {
 460		unsigned long vaddr, npages, entry, slen;
 461
 462		slen = s->length;
 463		/* Sanity check */
 464		if (slen == 0) {
 465			dma_next = 0;
 466			continue;
 467		}
 468		/* Allocate iommu entries for that segment */
 469		vaddr = (unsigned long) sg_virt(s);
 470		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
 471		align = 0;
 472		if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
 473		    (vaddr & ~PAGE_MASK) == 0)
 474			align = PAGE_SHIFT - tbl->it_page_shift;
 475		entry = iommu_range_alloc(dev, tbl, npages, &handle,
 476					  mask >> tbl->it_page_shift, align);
 477
 478		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
 479
 480		/* Handle failure */
 481		if (unlikely(entry == DMA_ERROR_CODE)) {
 482			if (!(attrs & DMA_ATTR_NO_WARN) &&
 483			    printk_ratelimit())
 484				dev_info(dev, "iommu_alloc failed, tbl %p "
 485					 "vaddr %lx npages %lu\n", tbl, vaddr,
 486					 npages);
 487			goto failure;
 488		}
 489
 490		/* Convert entry to a dma_addr_t */
 491		entry += tbl->it_offset;
 492		dma_addr = entry << tbl->it_page_shift;
 493		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
 494
 495		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
 496			    npages, entry, dma_addr);
 497
 498		/* Insert into HW table */
 499		build_fail = tbl->it_ops->set(tbl, entry, npages,
 500					      vaddr & IOMMU_PAGE_MASK(tbl),
 501					      direction, attrs);
 502		if(unlikely(build_fail))
 503			goto failure;
 504
 505		/* If we are in an open segment, try merging */
 506		if (segstart != s) {
 507			DBG("  - trying merge...\n");
 508			/* We cannot merge if:
 509			 * - allocated dma_addr isn't contiguous to previous allocation
 510			 */
 511			if (novmerge || (dma_addr != dma_next) ||
 512			    (outs->dma_length + s->length > max_seg_size)) {
 513				/* Can't merge: create a new segment */
 514				segstart = s;
 515				outcount++;
 516				outs = sg_next(outs);
 517				DBG("    can't merge, new segment.\n");
 518			} else {
 519				outs->dma_length += s->length;
 520				DBG("    merged, new len: %ux\n", outs->dma_length);
 521			}
 522		}
 523
 524		if (segstart == s) {
 525			/* This is a new segment, fill entries */
 526			DBG("  - filling new segment.\n");
 527			outs->dma_address = dma_addr;
 528			outs->dma_length = slen;
 529		}
 530
 531		/* Calculate next page pointer for contiguous check */
 532		dma_next = dma_addr + slen;
 533
 534		DBG("  - dma next is: %lx\n", dma_next);
 535	}
 536
 537	/* Flush/invalidate TLB caches if necessary */
 538	if (tbl->it_ops->flush)
 539		tbl->it_ops->flush(tbl);
 540
 541	DBG("mapped %d elements:\n", outcount);
 542
 543	/* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
 544	 * next entry of the sglist if we didn't fill the list completely
 545	 */
 546	if (outcount < incount) {
 547		outs = sg_next(outs);
 548		outs->dma_address = DMA_ERROR_CODE;
 549		outs->dma_length = 0;
 550	}
 551
 552	/* Make sure updates are seen by hardware */
 553	mb();
 554
 555	return outcount;
 556
 557 failure:
 558	for_each_sg(sglist, s, nelems, i) {
 559		if (s->dma_length != 0) {
 560			unsigned long vaddr, npages;
 561
 562			vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
 563			npages = iommu_num_pages(s->dma_address, s->dma_length,
 564						 IOMMU_PAGE_SIZE(tbl));
 565			__iommu_free(tbl, vaddr, npages);
 566			s->dma_address = DMA_ERROR_CODE;
 567			s->dma_length = 0;
 568		}
 569		if (s == outs)
 570			break;
 571	}
 572	return 0;
 573}
 574
 575
 576void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
 577			int nelems, enum dma_data_direction direction,
 578			unsigned long attrs)
 579{
 580	struct scatterlist *sg;
 581
 582	BUG_ON(direction == DMA_NONE);
 583
 584	if (!tbl)
 585		return;
 586
 587	sg = sglist;
 588	while (nelems--) {
 589		unsigned int npages;
 590		dma_addr_t dma_handle = sg->dma_address;
 591
 592		if (sg->dma_length == 0)
 593			break;
 594		npages = iommu_num_pages(dma_handle, sg->dma_length,
 595					 IOMMU_PAGE_SIZE(tbl));
 596		__iommu_free(tbl, dma_handle, npages);
 597		sg = sg_next(sg);
 598	}
 599
 600	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
 601	 * do not do an mb() here, the affected platforms do not need it
 602	 * when freeing.
 603	 */
 604	if (tbl->it_ops->flush)
 605		tbl->it_ops->flush(tbl);
 606}
 607
 608static void iommu_table_clear(struct iommu_table *tbl)
 609{
 610	/*
 611	 * In case of firmware assisted dump system goes through clean
 612	 * reboot process at the time of system crash. Hence it's safe to
 613	 * clear the TCE entries if firmware assisted dump is active.
 614	 */
 615	if (!is_kdump_kernel() || is_fadump_active()) {
 616		/* Clear the table in case firmware left allocations in it */
 617		tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
 618		return;
 619	}
 620
 621#ifdef CONFIG_CRASH_DUMP
 622	if (tbl->it_ops->get) {
 623		unsigned long index, tceval, tcecount = 0;
 624
 625		/* Reserve the existing mappings left by the first kernel. */
 626		for (index = 0; index < tbl->it_size; index++) {
 627			tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
 628			/*
 629			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
 630			 */
 631			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
 632				__set_bit(index, tbl->it_map);
 633				tcecount++;
 634			}
 635		}
 636
 637		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
 638			printk(KERN_WARNING "TCE table is full; freeing ");
 639			printk(KERN_WARNING "%d entries for the kdump boot\n",
 640				KDUMP_MIN_TCE_ENTRIES);
 641			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
 642				index < tbl->it_size; index++)
 643				__clear_bit(index, tbl->it_map);
 644		}
 645	}
 646#endif
 647}
 648
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 649/*
 650 * Build a iommu_table structure.  This contains a bit map which
 651 * is used to manage allocation of the tce space.
 652 */
 653struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid)
 
 654{
 655	unsigned long sz;
 656	static int welcomed = 0;
 657	struct page *page;
 658	unsigned int i;
 659	struct iommu_pool *p;
 660
 661	BUG_ON(!tbl->it_ops);
 662
 663	/* number of bytes needed for the bitmap */
 664	sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
 665
 666	page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
 667	if (!page)
 668		panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
 669	tbl->it_map = page_address(page);
 670	memset(tbl->it_map, 0, sz);
 671
 672	/*
 673	 * Reserve page 0 so it will not be used for any mappings.
 674	 * This avoids buggy drivers that consider page 0 to be invalid
 675	 * to crash the machine or even lose data.
 676	 */
 677	if (tbl->it_offset == 0)
 678		set_bit(0, tbl->it_map);
 679
 680	/* We only split the IOMMU table if we have 1GB or more of space */
 681	if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
 682		tbl->nr_pools = IOMMU_NR_POOLS;
 683	else
 684		tbl->nr_pools = 1;
 685
 686	/* We reserve the top 1/4 of the table for large allocations */
 687	tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
 688
 689	for (i = 0; i < tbl->nr_pools; i++) {
 690		p = &tbl->pools[i];
 691		spin_lock_init(&(p->lock));
 692		p->start = tbl->poolsize * i;
 693		p->hint = p->start;
 694		p->end = p->start + tbl->poolsize;
 695	}
 696
 697	p = &tbl->large_pool;
 698	spin_lock_init(&(p->lock));
 699	p->start = tbl->poolsize * i;
 700	p->hint = p->start;
 701	p->end = tbl->it_size;
 702
 703	iommu_table_clear(tbl);
 704
 705	if (!welcomed) {
 706		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
 707		       novmerge ? "disabled" : "enabled");
 708		welcomed = 1;
 709	}
 710
 711	return tbl;
 712}
 713
 714void iommu_free_table(struct iommu_table *tbl, const char *node_name)
 715{
 716	unsigned long bitmap_sz;
 717	unsigned int order;
 
 
 
 718
 719	if (!tbl)
 720		return;
 721
 722	if (!tbl->it_map) {
 723		kfree(tbl);
 724		return;
 725	}
 726
 727	/*
 728	 * In case we have reserved the first bit, we should not emit
 729	 * the warning below.
 730	 */
 731	if (tbl->it_offset == 0)
 732		clear_bit(0, tbl->it_map);
 733
 734	/* verify that table contains no entries */
 735	if (!bitmap_empty(tbl->it_map, tbl->it_size))
 736		pr_warn("%s: Unexpected TCEs for %s\n", __func__, node_name);
 737
 738	/* calculate bitmap size in bytes */
 739	bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
 740
 741	/* free bitmap */
 742	order = get_order(bitmap_sz);
 743	free_pages((unsigned long) tbl->it_map, order);
 744
 745	/* free table */
 746	kfree(tbl);
 747}
 748
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 749/* Creates TCEs for a user provided buffer.  The user buffer must be
 750 * contiguous real kernel storage (not vmalloc).  The address passed here
 751 * comprises a page address and offset into that page. The dma_addr_t
 752 * returned will point to the same byte within the page as was passed in.
 753 */
 754dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
 755			  struct page *page, unsigned long offset, size_t size,
 756			  unsigned long mask, enum dma_data_direction direction,
 757			  unsigned long attrs)
 758{
 759	dma_addr_t dma_handle = DMA_ERROR_CODE;
 760	void *vaddr;
 761	unsigned long uaddr;
 762	unsigned int npages, align;
 763
 764	BUG_ON(direction == DMA_NONE);
 765
 766	vaddr = page_address(page) + offset;
 767	uaddr = (unsigned long)vaddr;
 768	npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
 769
 770	if (tbl) {
 
 771		align = 0;
 772		if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
 773		    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
 774			align = PAGE_SHIFT - tbl->it_page_shift;
 775
 776		dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
 777					 mask >> tbl->it_page_shift, align,
 778					 attrs);
 779		if (dma_handle == DMA_ERROR_CODE) {
 780			if (!(attrs & DMA_ATTR_NO_WARN) &&
 781			    printk_ratelimit())  {
 782				dev_info(dev, "iommu_alloc failed, tbl %p "
 783					 "vaddr %p npages %d\n", tbl, vaddr,
 784					 npages);
 785			}
 786		} else
 787			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
 788	}
 789
 790	return dma_handle;
 791}
 792
 793void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
 794		      size_t size, enum dma_data_direction direction,
 795		      unsigned long attrs)
 796{
 797	unsigned int npages;
 798
 799	BUG_ON(direction == DMA_NONE);
 800
 801	if (tbl) {
 802		npages = iommu_num_pages(dma_handle, size,
 803					 IOMMU_PAGE_SIZE(tbl));
 804		iommu_free(tbl, dma_handle, npages);
 805	}
 806}
 807
 808/* Allocates a contiguous real buffer and creates mappings over it.
 809 * Returns the virtual address of the buffer and sets dma_handle
 810 * to the dma address (mapping) of the first page.
 811 */
 812void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
 813			   size_t size,	dma_addr_t *dma_handle,
 814			   unsigned long mask, gfp_t flag, int node)
 815{
 816	void *ret = NULL;
 817	dma_addr_t mapping;
 818	unsigned int order;
 819	unsigned int nio_pages, io_order;
 820	struct page *page;
 821
 822	size = PAGE_ALIGN(size);
 823	order = get_order(size);
 824
 825 	/*
 826	 * Client asked for way too much space.  This is checked later
 827	 * anyway.  It is easier to debug here for the drivers than in
 828	 * the tce tables.
 829	 */
 830	if (order >= IOMAP_MAX_ORDER) {
 831		dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
 832			 size);
 833		return NULL;
 834	}
 835
 836	if (!tbl)
 837		return NULL;
 838
 839	/* Alloc enough pages (and possibly more) */
 840	page = alloc_pages_node(node, flag, order);
 841	if (!page)
 842		return NULL;
 843	ret = page_address(page);
 844	memset(ret, 0, size);
 845
 846	/* Set up tces to cover the allocated range */
 847	nio_pages = size >> tbl->it_page_shift;
 848	io_order = get_iommu_order(size, tbl);
 849	mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
 850			      mask >> tbl->it_page_shift, io_order, 0);
 851	if (mapping == DMA_ERROR_CODE) {
 852		free_pages((unsigned long)ret, order);
 853		return NULL;
 854	}
 855	*dma_handle = mapping;
 856	return ret;
 857}
 858
 859void iommu_free_coherent(struct iommu_table *tbl, size_t size,
 860			 void *vaddr, dma_addr_t dma_handle)
 861{
 862	if (tbl) {
 863		unsigned int nio_pages;
 864
 865		size = PAGE_ALIGN(size);
 866		nio_pages = size >> tbl->it_page_shift;
 867		iommu_free(tbl, dma_handle, nio_pages);
 868		size = PAGE_ALIGN(size);
 869		free_pages((unsigned long)vaddr, get_order(size));
 870	}
 871}
 872
 873unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
 874{
 875	switch (dir) {
 876	case DMA_BIDIRECTIONAL:
 877		return TCE_PCI_READ | TCE_PCI_WRITE;
 878	case DMA_FROM_DEVICE:
 879		return TCE_PCI_WRITE;
 880	case DMA_TO_DEVICE:
 881		return TCE_PCI_READ;
 882	default:
 883		return 0;
 884	}
 885}
 886EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
 887
 888#ifdef CONFIG_IOMMU_API
 889/*
 890 * SPAPR TCE API
 891 */
 892static void group_release(void *iommu_data)
 893{
 894	struct iommu_table_group *table_group = iommu_data;
 895
 896	table_group->group = NULL;
 897}
 898
 899void iommu_register_group(struct iommu_table_group *table_group,
 900		int pci_domain_number, unsigned long pe_num)
 901{
 902	struct iommu_group *grp;
 903	char *name;
 904
 905	grp = iommu_group_alloc();
 906	if (IS_ERR(grp)) {
 907		pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
 908				PTR_ERR(grp));
 909		return;
 910	}
 911	table_group->group = grp;
 912	iommu_group_set_iommudata(grp, table_group, group_release);
 913	name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
 914			pci_domain_number, pe_num);
 915	if (!name)
 916		return;
 917	iommu_group_set_name(grp, name);
 918	kfree(name);
 919}
 920
 921enum dma_data_direction iommu_tce_direction(unsigned long tce)
 922{
 923	if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
 924		return DMA_BIDIRECTIONAL;
 925	else if (tce & TCE_PCI_READ)
 926		return DMA_TO_DEVICE;
 927	else if (tce & TCE_PCI_WRITE)
 928		return DMA_FROM_DEVICE;
 929	else
 930		return DMA_NONE;
 931}
 932EXPORT_SYMBOL_GPL(iommu_tce_direction);
 933
 934void iommu_flush_tce(struct iommu_table *tbl)
 935{
 936	/* Flush/invalidate TLB caches if necessary */
 937	if (tbl->it_ops->flush)
 938		tbl->it_ops->flush(tbl);
 939
 940	/* Make sure updates are seen by hardware */
 941	mb();
 942}
 943EXPORT_SYMBOL_GPL(iommu_flush_tce);
 944
 945int iommu_tce_clear_param_check(struct iommu_table *tbl,
 946		unsigned long ioba, unsigned long tce_value,
 947		unsigned long npages)
 948{
 949	/* tbl->it_ops->clear() does not support any value but 0 */
 950	if (tce_value)
 951		return -EINVAL;
 952
 953	if (ioba & ~IOMMU_PAGE_MASK(tbl))
 954		return -EINVAL;
 955
 956	ioba >>= tbl->it_page_shift;
 957	if (ioba < tbl->it_offset)
 958		return -EINVAL;
 959
 960	if ((ioba + npages) > (tbl->it_offset + tbl->it_size))
 961		return -EINVAL;
 962
 963	return 0;
 964}
 965EXPORT_SYMBOL_GPL(iommu_tce_clear_param_check);
 966
 967int iommu_tce_put_param_check(struct iommu_table *tbl,
 968		unsigned long ioba, unsigned long tce)
 969{
 970	if (tce & ~IOMMU_PAGE_MASK(tbl))
 971		return -EINVAL;
 972
 973	if (ioba & ~IOMMU_PAGE_MASK(tbl))
 974		return -EINVAL;
 975
 976	ioba >>= tbl->it_page_shift;
 977	if (ioba < tbl->it_offset)
 978		return -EINVAL;
 979
 980	if ((ioba + 1) > (tbl->it_offset + tbl->it_size))
 981		return -EINVAL;
 982
 983	return 0;
 984}
 985EXPORT_SYMBOL_GPL(iommu_tce_put_param_check);
 986
 987long iommu_tce_xchg(struct iommu_table *tbl, unsigned long entry,
 988		unsigned long *hpa, enum dma_data_direction *direction)
 
 
 989{
 990	long ret;
 
 991
 992	ret = tbl->it_ops->exchange(tbl, entry, hpa, direction);
 993
 994	if (!ret && ((*direction == DMA_FROM_DEVICE) ||
 995			(*direction == DMA_BIDIRECTIONAL)))
 
 
 996		SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
 997
 998	/* if (unlikely(ret))
 999		pr_err("iommu_tce: %s failed on hwaddr=%lx ioba=%lx kva=%lx ret=%d\n",
1000			__func__, hwaddr, entry << tbl->it_page_shift,
1001				hwaddr, ret); */
1002
1003	return ret;
 
 
 
 
1004}
1005EXPORT_SYMBOL_GPL(iommu_tce_xchg);
1006
1007int iommu_take_ownership(struct iommu_table *tbl)
1008{
1009	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1010	int ret = 0;
1011
1012	/*
1013	 * VFIO does not control TCE entries allocation and the guest
1014	 * can write new TCEs on top of existing ones so iommu_tce_build()
1015	 * must be able to release old pages. This functionality
1016	 * requires exchange() callback defined so if it is not
1017	 * implemented, we disallow taking ownership over the table.
1018	 */
1019	if (!tbl->it_ops->exchange)
1020		return -EINVAL;
1021
1022	spin_lock_irqsave(&tbl->large_pool.lock, flags);
1023	for (i = 0; i < tbl->nr_pools; i++)
1024		spin_lock(&tbl->pools[i].lock);
1025
1026	if (tbl->it_offset == 0)
1027		clear_bit(0, tbl->it_map);
1028
1029	if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
1030		pr_err("iommu_tce: it_map is not empty");
1031		ret = -EBUSY;
1032		/* Restore bit#0 set by iommu_init_table() */
1033		if (tbl->it_offset == 0)
1034			set_bit(0, tbl->it_map);
1035	} else {
1036		memset(tbl->it_map, 0xff, sz);
1037	}
1038
1039	for (i = 0; i < tbl->nr_pools; i++)
1040		spin_unlock(&tbl->pools[i].lock);
1041	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1042
1043	return ret;
1044}
1045EXPORT_SYMBOL_GPL(iommu_take_ownership);
1046
1047void iommu_release_ownership(struct iommu_table *tbl)
1048{
1049	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1050
1051	spin_lock_irqsave(&tbl->large_pool.lock, flags);
1052	for (i = 0; i < tbl->nr_pools; i++)
1053		spin_lock(&tbl->pools[i].lock);
1054
1055	memset(tbl->it_map, 0, sz);
1056
1057	/* Restore bit#0 set by iommu_init_table() */
1058	if (tbl->it_offset == 0)
1059		set_bit(0, tbl->it_map);
1060
1061	for (i = 0; i < tbl->nr_pools; i++)
1062		spin_unlock(&tbl->pools[i].lock);
1063	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1064}
1065EXPORT_SYMBOL_GPL(iommu_release_ownership);
1066
1067int iommu_add_device(struct device *dev)
1068{
1069	struct iommu_table *tbl;
1070	struct iommu_table_group_link *tgl;
1071
1072	/*
1073	 * The sysfs entries should be populated before
1074	 * binding IOMMU group. If sysfs entries isn't
1075	 * ready, we simply bail.
1076	 */
1077	if (!device_is_registered(dev))
1078		return -ENOENT;
1079
1080	if (dev->iommu_group) {
1081		pr_debug("%s: Skipping device %s with iommu group %d\n",
1082			 __func__, dev_name(dev),
1083			 iommu_group_id(dev->iommu_group));
1084		return -EBUSY;
1085	}
1086
1087	tbl = get_iommu_table_base(dev);
1088	if (!tbl) {
1089		pr_debug("%s: Skipping device %s with no tbl\n",
1090			 __func__, dev_name(dev));
1091		return 0;
1092	}
1093
1094	tgl = list_first_entry_or_null(&tbl->it_group_list,
1095			struct iommu_table_group_link, next);
1096	if (!tgl) {
1097		pr_debug("%s: Skipping device %s with no group\n",
1098			 __func__, dev_name(dev));
1099		return 0;
1100	}
1101	pr_debug("%s: Adding %s to iommu group %d\n",
1102		 __func__, dev_name(dev),
1103		 iommu_group_id(tgl->table_group->group));
1104
1105	if (PAGE_SIZE < IOMMU_PAGE_SIZE(tbl)) {
1106		pr_err("%s: Invalid IOMMU page size %lx (%lx) on %s\n",
1107		       __func__, IOMMU_PAGE_SIZE(tbl),
1108		       PAGE_SIZE, dev_name(dev));
1109		return -EINVAL;
1110	}
1111
1112	return iommu_group_add_device(tgl->table_group->group, dev);
1113}
1114EXPORT_SYMBOL_GPL(iommu_add_device);
1115
1116void iommu_del_device(struct device *dev)
1117{
1118	/*
1119	 * Some devices might not have IOMMU table and group
1120	 * and we needn't detach them from the associated
1121	 * IOMMU groups
1122	 */
1123	if (!dev->iommu_group) {
1124		pr_debug("iommu_tce: skipping device %s with no tbl\n",
1125			 dev_name(dev));
1126		return;
1127	}
1128
1129	iommu_group_remove_device(dev);
1130}
1131EXPORT_SYMBOL_GPL(iommu_del_device);
1132
1133static int tce_iommu_bus_notifier(struct notifier_block *nb,
1134                unsigned long action, void *data)
1135{
1136        struct device *dev = data;
1137
1138        switch (action) {
1139        case BUS_NOTIFY_ADD_DEVICE:
1140                return iommu_add_device(dev);
1141        case BUS_NOTIFY_DEL_DEVICE:
1142                if (dev->iommu_group)
1143                        iommu_del_device(dev);
1144                return 0;
1145        default:
1146                return 0;
1147        }
1148}
1149
1150static struct notifier_block tce_iommu_bus_nb = {
1151        .notifier_call = tce_iommu_bus_notifier,
1152};
1153
1154int __init tce_iommu_bus_notifier_init(void)
1155{
1156        bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
1157        return 0;
1158}
1159#endif /* CONFIG_IOMMU_API */