Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
   4 * 
   5 * Rewrite, cleanup, new allocation schemes, virtual merging: 
   6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
   7 *               and  Ben. Herrenschmidt, IBM Corporation
   8 *
   9 * Dynamic DMA mapping support, bus-independent parts.
  10 */
  11
  12
  13#include <linux/init.h>
  14#include <linux/types.h>
  15#include <linux/slab.h>
  16#include <linux/mm.h>
  17#include <linux/spinlock.h>
  18#include <linux/string.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/bitmap.h>
  21#include <linux/iommu-helper.h>
  22#include <linux/crash_dump.h>
  23#include <linux/hash.h>
  24#include <linux/fault-inject.h>
  25#include <linux/pci.h>
  26#include <linux/iommu.h>
  27#include <linux/sched.h>
 
  28#include <asm/io.h>
  29#include <asm/prom.h>
  30#include <asm/iommu.h>
  31#include <asm/pci-bridge.h>
  32#include <asm/machdep.h>
  33#include <asm/kdump.h>
  34#include <asm/fadump.h>
  35#include <asm/vio.h>
  36#include <asm/tce.h>
  37#include <asm/mmu_context.h>
 
  38
  39#define DBG(...)
  40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  41static int novmerge;
  42
  43static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
  44
  45static int __init setup_iommu(char *str)
  46{
  47	if (!strcmp(str, "novmerge"))
  48		novmerge = 1;
  49	else if (!strcmp(str, "vmerge"))
  50		novmerge = 0;
  51	return 1;
  52}
  53
  54__setup("iommu=", setup_iommu);
  55
  56static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
  57
  58/*
  59 * We precalculate the hash to avoid doing it on every allocation.
  60 *
  61 * The hash is important to spread CPUs across all the pools. For example,
  62 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
  63 * with 4 pools all primary threads would map to the same pool.
  64 */
  65static int __init setup_iommu_pool_hash(void)
  66{
  67	unsigned int i;
  68
  69	for_each_possible_cpu(i)
  70		per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
  71
  72	return 0;
  73}
  74subsys_initcall(setup_iommu_pool_hash);
  75
  76#ifdef CONFIG_FAIL_IOMMU
  77
  78static DECLARE_FAULT_ATTR(fail_iommu);
  79
  80static int __init setup_fail_iommu(char *str)
  81{
  82	return setup_fault_attr(&fail_iommu, str);
  83}
  84__setup("fail_iommu=", setup_fail_iommu);
  85
  86static bool should_fail_iommu(struct device *dev)
  87{
  88	return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
  89}
  90
  91static int __init fail_iommu_debugfs(void)
  92{
  93	struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
  94						       NULL, &fail_iommu);
  95
  96	return PTR_ERR_OR_ZERO(dir);
  97}
  98late_initcall(fail_iommu_debugfs);
  99
 100static ssize_t fail_iommu_show(struct device *dev,
 101			       struct device_attribute *attr, char *buf)
 102{
 103	return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
 104}
 105
 106static ssize_t fail_iommu_store(struct device *dev,
 107				struct device_attribute *attr, const char *buf,
 108				size_t count)
 109{
 110	int i;
 111
 112	if (count > 0 && sscanf(buf, "%d", &i) > 0)
 113		dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
 114
 115	return count;
 116}
 117
 118static DEVICE_ATTR_RW(fail_iommu);
 119
 120static int fail_iommu_bus_notify(struct notifier_block *nb,
 121				 unsigned long action, void *data)
 122{
 123	struct device *dev = data;
 124
 125	if (action == BUS_NOTIFY_ADD_DEVICE) {
 126		if (device_create_file(dev, &dev_attr_fail_iommu))
 127			pr_warn("Unable to create IOMMU fault injection sysfs "
 128				"entries\n");
 129	} else if (action == BUS_NOTIFY_DEL_DEVICE) {
 130		device_remove_file(dev, &dev_attr_fail_iommu);
 131	}
 132
 133	return 0;
 134}
 135
 136static struct notifier_block fail_iommu_bus_notifier = {
 
 
 
 
 
 137	.notifier_call = fail_iommu_bus_notify
 138};
 139
 
 
 
 
 
 
 140static int __init fail_iommu_setup(void)
 141{
 142#ifdef CONFIG_PCI
 143	bus_register_notifier(&pci_bus_type, &fail_iommu_bus_notifier);
 144#endif
 145#ifdef CONFIG_IBMVIO
 146	bus_register_notifier(&vio_bus_type, &fail_iommu_bus_notifier);
 147#endif
 148
 149	return 0;
 150}
 151/*
 152 * Must execute after PCI and VIO subsystem have initialised but before
 153 * devices are probed.
 154 */
 155arch_initcall(fail_iommu_setup);
 156#else
 157static inline bool should_fail_iommu(struct device *dev)
 158{
 159	return false;
 160}
 161#endif
 162
 163static unsigned long iommu_range_alloc(struct device *dev,
 164				       struct iommu_table *tbl,
 165                                       unsigned long npages,
 166                                       unsigned long *handle,
 167                                       unsigned long mask,
 168                                       unsigned int align_order)
 169{ 
 170	unsigned long n, end, start;
 171	unsigned long limit;
 172	int largealloc = npages > 15;
 173	int pass = 0;
 174	unsigned long align_mask;
 175	unsigned long boundary_size;
 176	unsigned long flags;
 177	unsigned int pool_nr;
 178	struct iommu_pool *pool;
 179
 180	align_mask = (1ull << align_order) - 1;
 181
 182	/* This allocator was derived from x86_64's bit string search */
 183
 184	/* Sanity check */
 185	if (unlikely(npages == 0)) {
 186		if (printk_ratelimit())
 187			WARN_ON(1);
 188		return DMA_MAPPING_ERROR;
 189	}
 190
 191	if (should_fail_iommu(dev))
 192		return DMA_MAPPING_ERROR;
 193
 194	/*
 195	 * We don't need to disable preemption here because any CPU can
 196	 * safely use any IOMMU pool.
 197	 */
 198	pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
 199
 200	if (largealloc)
 201		pool = &(tbl->large_pool);
 202	else
 203		pool = &(tbl->pools[pool_nr]);
 204
 205	spin_lock_irqsave(&(pool->lock), flags);
 206
 207again:
 208	if ((pass == 0) && handle && *handle &&
 209	    (*handle >= pool->start) && (*handle < pool->end))
 210		start = *handle;
 211	else
 212		start = pool->hint;
 213
 214	limit = pool->end;
 215
 216	/* The case below can happen if we have a small segment appended
 217	 * to a large, or when the previous alloc was at the very end of
 218	 * the available space. If so, go back to the initial start.
 219	 */
 220	if (start >= limit)
 221		start = pool->start;
 222
 223	if (limit + tbl->it_offset > mask) {
 224		limit = mask - tbl->it_offset + 1;
 225		/* If we're constrained on address range, first try
 226		 * at the masked hint to avoid O(n) search complexity,
 227		 * but on second pass, start at 0 in pool 0.
 228		 */
 229		if ((start & mask) >= limit || pass > 0) {
 230			spin_unlock(&(pool->lock));
 231			pool = &(tbl->pools[0]);
 232			spin_lock(&(pool->lock));
 233			start = pool->start;
 234		} else {
 235			start &= mask;
 236		}
 237	}
 238
 239	if (dev)
 240		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
 241				      1 << tbl->it_page_shift);
 242	else
 243		boundary_size = ALIGN(1UL << 32, 1 << tbl->it_page_shift);
 244	/* 4GB boundary for iseries_hv_alloc and iseries_hv_map */
 245
 246	n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
 247			     boundary_size >> tbl->it_page_shift, align_mask);
 
 248	if (n == -1) {
 249		if (likely(pass == 0)) {
 250			/* First try the pool from the start */
 251			pool->hint = pool->start;
 252			pass++;
 253			goto again;
 254
 255		} else if (pass <= tbl->nr_pools) {
 256			/* Now try scanning all the other pools */
 257			spin_unlock(&(pool->lock));
 258			pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
 259			pool = &tbl->pools[pool_nr];
 260			spin_lock(&(pool->lock));
 261			pool->hint = pool->start;
 262			pass++;
 263			goto again;
 264
 
 
 
 
 
 
 
 
 
 265		} else {
 266			/* Give up */
 267			spin_unlock_irqrestore(&(pool->lock), flags);
 268			return DMA_MAPPING_ERROR;
 269		}
 270	}
 271
 272	end = n + npages;
 273
 274	/* Bump the hint to a new block for small allocs. */
 275	if (largealloc) {
 276		/* Don't bump to new block to avoid fragmentation */
 277		pool->hint = end;
 278	} else {
 279		/* Overflow will be taken care of at the next allocation */
 280		pool->hint = (end + tbl->it_blocksize - 1) &
 281		                ~(tbl->it_blocksize - 1);
 282	}
 283
 284	/* Update handle for SG allocations */
 285	if (handle)
 286		*handle = end;
 287
 288	spin_unlock_irqrestore(&(pool->lock), flags);
 289
 290	return n;
 291}
 292
 293static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
 294			      void *page, unsigned int npages,
 295			      enum dma_data_direction direction,
 296			      unsigned long mask, unsigned int align_order,
 297			      unsigned long attrs)
 298{
 299	unsigned long entry;
 300	dma_addr_t ret = DMA_MAPPING_ERROR;
 301	int build_fail;
 302
 303	entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
 304
 305	if (unlikely(entry == DMA_MAPPING_ERROR))
 306		return DMA_MAPPING_ERROR;
 307
 308	entry += tbl->it_offset;	/* Offset into real TCE table */
 309	ret = entry << tbl->it_page_shift;	/* Set the return dma address */
 310
 311	/* Put the TCEs in the HW table */
 312	build_fail = tbl->it_ops->set(tbl, entry, npages,
 313				      (unsigned long)page &
 314				      IOMMU_PAGE_MASK(tbl), direction, attrs);
 315
 316	/* tbl->it_ops->set() only returns non-zero for transient errors.
 317	 * Clean up the table bitmap in this case and return
 318	 * DMA_MAPPING_ERROR. For all other errors the functionality is
 319	 * not altered.
 320	 */
 321	if (unlikely(build_fail)) {
 322		__iommu_free(tbl, ret, npages);
 323		return DMA_MAPPING_ERROR;
 324	}
 325
 326	/* Flush/invalidate TLB caches if necessary */
 327	if (tbl->it_ops->flush)
 328		tbl->it_ops->flush(tbl);
 329
 330	/* Make sure updates are seen by hardware */
 331	mb();
 332
 333	return ret;
 334}
 335
 336static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
 337			     unsigned int npages)
 338{
 339	unsigned long entry, free_entry;
 340
 341	entry = dma_addr >> tbl->it_page_shift;
 342	free_entry = entry - tbl->it_offset;
 343
 344	if (((free_entry + npages) > tbl->it_size) ||
 345	    (entry < tbl->it_offset)) {
 346		if (printk_ratelimit()) {
 347			printk(KERN_INFO "iommu_free: invalid entry\n");
 348			printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
 349			printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
 350			printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
 351			printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
 352			printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
 353			printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
 354			printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
 355			WARN_ON(1);
 356		}
 357
 358		return false;
 359	}
 360
 361	return true;
 362}
 363
 364static struct iommu_pool *get_pool(struct iommu_table *tbl,
 365				   unsigned long entry)
 366{
 367	struct iommu_pool *p;
 368	unsigned long largepool_start = tbl->large_pool.start;
 369
 370	/* The large pool is the last pool at the top of the table */
 371	if (entry >= largepool_start) {
 372		p = &tbl->large_pool;
 373	} else {
 374		unsigned int pool_nr = entry / tbl->poolsize;
 375
 376		BUG_ON(pool_nr > tbl->nr_pools);
 377		p = &tbl->pools[pool_nr];
 378	}
 379
 380	return p;
 381}
 382
 383static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
 384			 unsigned int npages)
 385{
 386	unsigned long entry, free_entry;
 387	unsigned long flags;
 388	struct iommu_pool *pool;
 389
 390	entry = dma_addr >> tbl->it_page_shift;
 391	free_entry = entry - tbl->it_offset;
 392
 393	pool = get_pool(tbl, free_entry);
 394
 395	if (!iommu_free_check(tbl, dma_addr, npages))
 396		return;
 397
 398	tbl->it_ops->clear(tbl, entry, npages);
 399
 400	spin_lock_irqsave(&(pool->lock), flags);
 401	bitmap_clear(tbl->it_map, free_entry, npages);
 402	spin_unlock_irqrestore(&(pool->lock), flags);
 403}
 404
 405static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
 406		unsigned int npages)
 407{
 408	__iommu_free(tbl, dma_addr, npages);
 409
 410	/* Make sure TLB cache is flushed if the HW needs it. We do
 411	 * not do an mb() here on purpose, it is not needed on any of
 412	 * the current platforms.
 413	 */
 414	if (tbl->it_ops->flush)
 415		tbl->it_ops->flush(tbl);
 416}
 417
 418int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
 419		     struct scatterlist *sglist, int nelems,
 420		     unsigned long mask, enum dma_data_direction direction,
 421		     unsigned long attrs)
 422{
 423	dma_addr_t dma_next = 0, dma_addr;
 424	struct scatterlist *s, *outs, *segstart;
 425	int outcount, incount, i, build_fail = 0;
 426	unsigned int align;
 427	unsigned long handle;
 428	unsigned int max_seg_size;
 429
 430	BUG_ON(direction == DMA_NONE);
 431
 432	if ((nelems == 0) || !tbl)
 433		return 0;
 434
 435	outs = s = segstart = &sglist[0];
 436	outcount = 1;
 437	incount = nelems;
 438	handle = 0;
 439
 440	/* Init first segment length for backout at failure */
 441	outs->dma_length = 0;
 442
 443	DBG("sg mapping %d elements:\n", nelems);
 444
 445	max_seg_size = dma_get_max_seg_size(dev);
 446	for_each_sg(sglist, s, nelems, i) {
 447		unsigned long vaddr, npages, entry, slen;
 448
 449		slen = s->length;
 450		/* Sanity check */
 451		if (slen == 0) {
 452			dma_next = 0;
 453			continue;
 454		}
 455		/* Allocate iommu entries for that segment */
 456		vaddr = (unsigned long) sg_virt(s);
 457		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
 458		align = 0;
 459		if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
 460		    (vaddr & ~PAGE_MASK) == 0)
 461			align = PAGE_SHIFT - tbl->it_page_shift;
 462		entry = iommu_range_alloc(dev, tbl, npages, &handle,
 463					  mask >> tbl->it_page_shift, align);
 464
 465		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
 466
 467		/* Handle failure */
 468		if (unlikely(entry == DMA_MAPPING_ERROR)) {
 469			if (!(attrs & DMA_ATTR_NO_WARN) &&
 470			    printk_ratelimit())
 471				dev_info(dev, "iommu_alloc failed, tbl %p "
 472					 "vaddr %lx npages %lu\n", tbl, vaddr,
 473					 npages);
 474			goto failure;
 475		}
 476
 477		/* Convert entry to a dma_addr_t */
 478		entry += tbl->it_offset;
 479		dma_addr = entry << tbl->it_page_shift;
 480		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
 481
 482		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
 483			    npages, entry, dma_addr);
 484
 485		/* Insert into HW table */
 486		build_fail = tbl->it_ops->set(tbl, entry, npages,
 487					      vaddr & IOMMU_PAGE_MASK(tbl),
 488					      direction, attrs);
 489		if(unlikely(build_fail))
 490			goto failure;
 491
 492		/* If we are in an open segment, try merging */
 493		if (segstart != s) {
 494			DBG("  - trying merge...\n");
 495			/* We cannot merge if:
 496			 * - allocated dma_addr isn't contiguous to previous allocation
 497			 */
 498			if (novmerge || (dma_addr != dma_next) ||
 499			    (outs->dma_length + s->length > max_seg_size)) {
 500				/* Can't merge: create a new segment */
 501				segstart = s;
 502				outcount++;
 503				outs = sg_next(outs);
 504				DBG("    can't merge, new segment.\n");
 505			} else {
 506				outs->dma_length += s->length;
 507				DBG("    merged, new len: %ux\n", outs->dma_length);
 508			}
 509		}
 510
 511		if (segstart == s) {
 512			/* This is a new segment, fill entries */
 513			DBG("  - filling new segment.\n");
 514			outs->dma_address = dma_addr;
 515			outs->dma_length = slen;
 516		}
 517
 518		/* Calculate next page pointer for contiguous check */
 519		dma_next = dma_addr + slen;
 520
 521		DBG("  - dma next is: %lx\n", dma_next);
 522	}
 523
 524	/* Flush/invalidate TLB caches if necessary */
 525	if (tbl->it_ops->flush)
 526		tbl->it_ops->flush(tbl);
 527
 528	DBG("mapped %d elements:\n", outcount);
 529
 530	/* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
 531	 * next entry of the sglist if we didn't fill the list completely
 532	 */
 533	if (outcount < incount) {
 534		outs = sg_next(outs);
 535		outs->dma_address = DMA_MAPPING_ERROR;
 536		outs->dma_length = 0;
 537	}
 538
 539	/* Make sure updates are seen by hardware */
 540	mb();
 541
 542	return outcount;
 543
 544 failure:
 545	for_each_sg(sglist, s, nelems, i) {
 546		if (s->dma_length != 0) {
 547			unsigned long vaddr, npages;
 548
 549			vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
 550			npages = iommu_num_pages(s->dma_address, s->dma_length,
 551						 IOMMU_PAGE_SIZE(tbl));
 552			__iommu_free(tbl, vaddr, npages);
 553			s->dma_address = DMA_MAPPING_ERROR;
 554			s->dma_length = 0;
 555		}
 556		if (s == outs)
 557			break;
 558	}
 559	return 0;
 560}
 561
 562
 563void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
 564			int nelems, enum dma_data_direction direction,
 565			unsigned long attrs)
 566{
 567	struct scatterlist *sg;
 568
 569	BUG_ON(direction == DMA_NONE);
 570
 571	if (!tbl)
 572		return;
 573
 574	sg = sglist;
 575	while (nelems--) {
 576		unsigned int npages;
 577		dma_addr_t dma_handle = sg->dma_address;
 578
 579		if (sg->dma_length == 0)
 580			break;
 581		npages = iommu_num_pages(dma_handle, sg->dma_length,
 582					 IOMMU_PAGE_SIZE(tbl));
 583		__iommu_free(tbl, dma_handle, npages);
 584		sg = sg_next(sg);
 585	}
 586
 587	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
 588	 * do not do an mb() here, the affected platforms do not need it
 589	 * when freeing.
 590	 */
 591	if (tbl->it_ops->flush)
 592		tbl->it_ops->flush(tbl);
 593}
 594
 595static void iommu_table_clear(struct iommu_table *tbl)
 596{
 597	/*
 598	 * In case of firmware assisted dump system goes through clean
 599	 * reboot process at the time of system crash. Hence it's safe to
 600	 * clear the TCE entries if firmware assisted dump is active.
 601	 */
 602	if (!is_kdump_kernel() || is_fadump_active()) {
 603		/* Clear the table in case firmware left allocations in it */
 604		tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
 605		return;
 606	}
 607
 608#ifdef CONFIG_CRASH_DUMP
 609	if (tbl->it_ops->get) {
 610		unsigned long index, tceval, tcecount = 0;
 611
 612		/* Reserve the existing mappings left by the first kernel. */
 613		for (index = 0; index < tbl->it_size; index++) {
 614			tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
 615			/*
 616			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
 617			 */
 618			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
 619				__set_bit(index, tbl->it_map);
 620				tcecount++;
 621			}
 622		}
 623
 624		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
 625			printk(KERN_WARNING "TCE table is full; freeing ");
 626			printk(KERN_WARNING "%d entries for the kdump boot\n",
 627				KDUMP_MIN_TCE_ENTRIES);
 628			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
 629				index < tbl->it_size; index++)
 630				__clear_bit(index, tbl->it_map);
 631		}
 632	}
 633#endif
 634}
 635
 636static void iommu_table_reserve_pages(struct iommu_table *tbl,
 637		unsigned long res_start, unsigned long res_end)
 638{
 639	int i;
 640
 641	WARN_ON_ONCE(res_end < res_start);
 642	/*
 643	 * Reserve page 0 so it will not be used for any mappings.
 644	 * This avoids buggy drivers that consider page 0 to be invalid
 645	 * to crash the machine or even lose data.
 646	 */
 647	if (tbl->it_offset == 0)
 648		set_bit(0, tbl->it_map);
 649
 650	tbl->it_reserved_start = res_start;
 651	tbl->it_reserved_end = res_end;
 652
 653	/* Check if res_start..res_end isn't empty and overlaps the table */
 654	if (res_start && res_end &&
 655			(tbl->it_offset + tbl->it_size < res_start ||
 656			 res_end < tbl->it_offset))
 657		return;
 658
 659	for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
 660		set_bit(i - tbl->it_offset, tbl->it_map);
 661}
 662
 663static void iommu_table_release_pages(struct iommu_table *tbl)
 664{
 665	int i;
 666
 667	/*
 668	 * In case we have reserved the first bit, we should not emit
 669	 * the warning below.
 670	 */
 671	if (tbl->it_offset == 0)
 672		clear_bit(0, tbl->it_map);
 673
 674	for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
 675		clear_bit(i - tbl->it_offset, tbl->it_map);
 676}
 677
 678/*
 679 * Build a iommu_table structure.  This contains a bit map which
 680 * is used to manage allocation of the tce space.
 681 */
 682struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
 683		unsigned long res_start, unsigned long res_end)
 684{
 685	unsigned long sz;
 686	static int welcomed = 0;
 687	struct page *page;
 688	unsigned int i;
 689	struct iommu_pool *p;
 690
 691	BUG_ON(!tbl->it_ops);
 692
 693	/* number of bytes needed for the bitmap */
 694	sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
 695
 696	page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz));
 697	if (!page)
 698		panic("iommu_init_table: Can't allocate %ld bytes\n", sz);
 699	tbl->it_map = page_address(page);
 700	memset(tbl->it_map, 0, sz);
 701
 702	iommu_table_reserve_pages(tbl, res_start, res_end);
 703
 704	/* We only split the IOMMU table if we have 1GB or more of space */
 705	if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
 706		tbl->nr_pools = IOMMU_NR_POOLS;
 707	else
 708		tbl->nr_pools = 1;
 709
 710	/* We reserve the top 1/4 of the table for large allocations */
 711	tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
 712
 713	for (i = 0; i < tbl->nr_pools; i++) {
 714		p = &tbl->pools[i];
 715		spin_lock_init(&(p->lock));
 716		p->start = tbl->poolsize * i;
 717		p->hint = p->start;
 718		p->end = p->start + tbl->poolsize;
 719	}
 720
 721	p = &tbl->large_pool;
 722	spin_lock_init(&(p->lock));
 723	p->start = tbl->poolsize * i;
 724	p->hint = p->start;
 725	p->end = tbl->it_size;
 726
 727	iommu_table_clear(tbl);
 728
 729	if (!welcomed) {
 730		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
 731		       novmerge ? "disabled" : "enabled");
 732		welcomed = 1;
 733	}
 734
 
 
 735	return tbl;
 736}
 737
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 738static void iommu_table_free(struct kref *kref)
 739{
 740	unsigned long bitmap_sz;
 741	unsigned int order;
 742	struct iommu_table *tbl;
 743
 744	tbl = container_of(kref, struct iommu_table, it_kref);
 745
 746	if (tbl->it_ops->free)
 747		tbl->it_ops->free(tbl);
 748
 749	if (!tbl->it_map) {
 750		kfree(tbl);
 751		return;
 752	}
 753
 754	iommu_table_release_pages(tbl);
 755
 756	/* verify that table contains no entries */
 757	if (!bitmap_empty(tbl->it_map, tbl->it_size))
 758		pr_warn("%s: Unexpected TCEs\n", __func__);
 759
 760	/* calculate bitmap size in bytes */
 761	bitmap_sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
 762
 763	/* free bitmap */
 764	order = get_order(bitmap_sz);
 765	free_pages((unsigned long) tbl->it_map, order);
 766
 767	/* free table */
 768	kfree(tbl);
 769}
 770
 771struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
 772{
 773	if (kref_get_unless_zero(&tbl->it_kref))
 774		return tbl;
 775
 776	return NULL;
 777}
 778EXPORT_SYMBOL_GPL(iommu_tce_table_get);
 779
 780int iommu_tce_table_put(struct iommu_table *tbl)
 781{
 782	if (WARN_ON(!tbl))
 783		return 0;
 784
 785	return kref_put(&tbl->it_kref, iommu_table_free);
 786}
 787EXPORT_SYMBOL_GPL(iommu_tce_table_put);
 788
 789/* Creates TCEs for a user provided buffer.  The user buffer must be
 790 * contiguous real kernel storage (not vmalloc).  The address passed here
 791 * comprises a page address and offset into that page. The dma_addr_t
 792 * returned will point to the same byte within the page as was passed in.
 793 */
 794dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
 795			  struct page *page, unsigned long offset, size_t size,
 796			  unsigned long mask, enum dma_data_direction direction,
 797			  unsigned long attrs)
 798{
 799	dma_addr_t dma_handle = DMA_MAPPING_ERROR;
 800	void *vaddr;
 801	unsigned long uaddr;
 802	unsigned int npages, align;
 803
 804	BUG_ON(direction == DMA_NONE);
 805
 806	vaddr = page_address(page) + offset;
 807	uaddr = (unsigned long)vaddr;
 808
 809	if (tbl) {
 810		npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
 811		align = 0;
 812		if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
 813		    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
 814			align = PAGE_SHIFT - tbl->it_page_shift;
 815
 816		dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
 817					 mask >> tbl->it_page_shift, align,
 818					 attrs);
 819		if (dma_handle == DMA_MAPPING_ERROR) {
 820			if (!(attrs & DMA_ATTR_NO_WARN) &&
 821			    printk_ratelimit())  {
 822				dev_info(dev, "iommu_alloc failed, tbl %p "
 823					 "vaddr %p npages %d\n", tbl, vaddr,
 824					 npages);
 825			}
 826		} else
 827			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
 828	}
 829
 830	return dma_handle;
 831}
 832
 833void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
 834		      size_t size, enum dma_data_direction direction,
 835		      unsigned long attrs)
 836{
 837	unsigned int npages;
 838
 839	BUG_ON(direction == DMA_NONE);
 840
 841	if (tbl) {
 842		npages = iommu_num_pages(dma_handle, size,
 843					 IOMMU_PAGE_SIZE(tbl));
 844		iommu_free(tbl, dma_handle, npages);
 845	}
 846}
 847
 848/* Allocates a contiguous real buffer and creates mappings over it.
 849 * Returns the virtual address of the buffer and sets dma_handle
 850 * to the dma address (mapping) of the first page.
 851 */
 852void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
 853			   size_t size,	dma_addr_t *dma_handle,
 854			   unsigned long mask, gfp_t flag, int node)
 855{
 856	void *ret = NULL;
 857	dma_addr_t mapping;
 858	unsigned int order;
 859	unsigned int nio_pages, io_order;
 860	struct page *page;
 
 861
 862	size = PAGE_ALIGN(size);
 863	order = get_order(size);
 864
 865 	/*
 866	 * Client asked for way too much space.  This is checked later
 867	 * anyway.  It is easier to debug here for the drivers than in
 868	 * the tce tables.
 869	 */
 870	if (order >= IOMAP_MAX_ORDER) {
 871		dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
 872			 size);
 873		return NULL;
 874	}
 875
 876	if (!tbl)
 877		return NULL;
 878
 879	/* Alloc enough pages (and possibly more) */
 880	page = alloc_pages_node(node, flag, order);
 881	if (!page)
 882		return NULL;
 883	ret = page_address(page);
 884	memset(ret, 0, size);
 885
 886	/* Set up tces to cover the allocated range */
 887	nio_pages = size >> tbl->it_page_shift;
 
 888	io_order = get_iommu_order(size, tbl);
 889	mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
 890			      mask >> tbl->it_page_shift, io_order, 0);
 891	if (mapping == DMA_MAPPING_ERROR) {
 892		free_pages((unsigned long)ret, order);
 893		return NULL;
 894	}
 895	*dma_handle = mapping;
 
 896	return ret;
 897}
 898
 899void iommu_free_coherent(struct iommu_table *tbl, size_t size,
 900			 void *vaddr, dma_addr_t dma_handle)
 901{
 902	if (tbl) {
 903		unsigned int nio_pages;
 904
 905		size = PAGE_ALIGN(size);
 906		nio_pages = size >> tbl->it_page_shift;
 907		iommu_free(tbl, dma_handle, nio_pages);
 908		size = PAGE_ALIGN(size);
 909		free_pages((unsigned long)vaddr, get_order(size));
 910	}
 911}
 912
 913unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
 914{
 915	switch (dir) {
 916	case DMA_BIDIRECTIONAL:
 917		return TCE_PCI_READ | TCE_PCI_WRITE;
 918	case DMA_FROM_DEVICE:
 919		return TCE_PCI_WRITE;
 920	case DMA_TO_DEVICE:
 921		return TCE_PCI_READ;
 922	default:
 923		return 0;
 924	}
 925}
 926EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
 927
 928#ifdef CONFIG_IOMMU_API
 929/*
 930 * SPAPR TCE API
 931 */
 932static void group_release(void *iommu_data)
 933{
 934	struct iommu_table_group *table_group = iommu_data;
 935
 936	table_group->group = NULL;
 937}
 938
 939void iommu_register_group(struct iommu_table_group *table_group,
 940		int pci_domain_number, unsigned long pe_num)
 941{
 942	struct iommu_group *grp;
 943	char *name;
 944
 945	grp = iommu_group_alloc();
 946	if (IS_ERR(grp)) {
 947		pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
 948				PTR_ERR(grp));
 949		return;
 950	}
 951	table_group->group = grp;
 952	iommu_group_set_iommudata(grp, table_group, group_release);
 953	name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
 954			pci_domain_number, pe_num);
 955	if (!name)
 956		return;
 957	iommu_group_set_name(grp, name);
 958	kfree(name);
 959}
 960
 961enum dma_data_direction iommu_tce_direction(unsigned long tce)
 962{
 963	if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
 964		return DMA_BIDIRECTIONAL;
 965	else if (tce & TCE_PCI_READ)
 966		return DMA_TO_DEVICE;
 967	else if (tce & TCE_PCI_WRITE)
 968		return DMA_FROM_DEVICE;
 969	else
 970		return DMA_NONE;
 971}
 972EXPORT_SYMBOL_GPL(iommu_tce_direction);
 973
 974void iommu_flush_tce(struct iommu_table *tbl)
 975{
 976	/* Flush/invalidate TLB caches if necessary */
 977	if (tbl->it_ops->flush)
 978		tbl->it_ops->flush(tbl);
 979
 980	/* Make sure updates are seen by hardware */
 981	mb();
 982}
 983EXPORT_SYMBOL_GPL(iommu_flush_tce);
 984
 985int iommu_tce_check_ioba(unsigned long page_shift,
 986		unsigned long offset, unsigned long size,
 987		unsigned long ioba, unsigned long npages)
 988{
 989	unsigned long mask = (1UL << page_shift) - 1;
 990
 991	if (ioba & mask)
 992		return -EINVAL;
 993
 994	ioba >>= page_shift;
 995	if (ioba < offset)
 996		return -EINVAL;
 997
 998	if ((ioba + 1) > (offset + size))
 999		return -EINVAL;
1000
1001	return 0;
1002}
1003EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
1004
1005int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
1006{
1007	unsigned long mask = (1UL << page_shift) - 1;
1008
1009	if (gpa & mask)
1010		return -EINVAL;
1011
1012	return 0;
1013}
1014EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
1015
1016extern long iommu_tce_xchg_no_kill(struct mm_struct *mm,
1017		struct iommu_table *tbl,
1018		unsigned long entry, unsigned long *hpa,
1019		enum dma_data_direction *direction)
1020{
1021	long ret;
1022	unsigned long size = 0;
1023
1024	ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction, false);
1025	if (!ret && ((*direction == DMA_FROM_DEVICE) ||
1026			(*direction == DMA_BIDIRECTIONAL)) &&
1027			!mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
1028					&size))
1029		SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
1030
1031	return ret;
1032}
1033EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill);
1034
1035void iommu_tce_kill(struct iommu_table *tbl,
1036		unsigned long entry, unsigned long pages)
1037{
1038	if (tbl->it_ops->tce_kill)
1039		tbl->it_ops->tce_kill(tbl, entry, pages, false);
1040}
1041EXPORT_SYMBOL_GPL(iommu_tce_kill);
1042
1043int iommu_take_ownership(struct iommu_table *tbl)
 
1044{
1045	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1046	int ret = 0;
1047
1048	/*
1049	 * VFIO does not control TCE entries allocation and the guest
1050	 * can write new TCEs on top of existing ones so iommu_tce_build()
1051	 * must be able to release old pages. This functionality
1052	 * requires exchange() callback defined so if it is not
1053	 * implemented, we disallow taking ownership over the table.
1054	 */
1055	if (!tbl->it_ops->xchg_no_kill)
1056		return -EINVAL;
1057
1058	spin_lock_irqsave(&tbl->large_pool.lock, flags);
1059	for (i = 0; i < tbl->nr_pools; i++)
1060		spin_lock(&tbl->pools[i].lock);
1061
1062	iommu_table_release_pages(tbl);
1063
1064	if (!bitmap_empty(tbl->it_map, tbl->it_size)) {
1065		pr_err("iommu_tce: it_map is not empty");
1066		ret = -EBUSY;
1067		/* Undo iommu_table_release_pages, i.e. restore bit#0, etc */
1068		iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1069				tbl->it_reserved_end);
1070	} else {
1071		memset(tbl->it_map, 0xff, sz);
1072	}
1073
1074	for (i = 0; i < tbl->nr_pools; i++)
1075		spin_unlock(&tbl->pools[i].lock);
1076	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1077
1078	return ret;
1079}
1080EXPORT_SYMBOL_GPL(iommu_take_ownership);
1081
1082void iommu_release_ownership(struct iommu_table *tbl)
1083{
1084	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1085
1086	spin_lock_irqsave(&tbl->large_pool.lock, flags);
1087	for (i = 0; i < tbl->nr_pools; i++)
1088		spin_lock(&tbl->pools[i].lock);
1089
1090	memset(tbl->it_map, 0, sz);
1091
1092	iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1093			tbl->it_reserved_end);
1094
1095	for (i = 0; i < tbl->nr_pools; i++)
1096		spin_unlock(&tbl->pools[i].lock);
1097	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1098}
1099EXPORT_SYMBOL_GPL(iommu_release_ownership);
1100
1101int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
1102{
1103	/*
1104	 * The sysfs entries should be populated before
1105	 * binding IOMMU group. If sysfs entries isn't
1106	 * ready, we simply bail.
1107	 */
1108	if (!device_is_registered(dev))
1109		return -ENOENT;
1110
1111	if (device_iommu_mapped(dev)) {
1112		pr_debug("%s: Skipping device %s with iommu group %d\n",
1113			 __func__, dev_name(dev),
1114			 iommu_group_id(dev->iommu_group));
1115		return -EBUSY;
1116	}
1117
1118	pr_debug("%s: Adding %s to iommu group %d\n",
1119		 __func__, dev_name(dev),  iommu_group_id(table_group->group));
1120
1121	return iommu_group_add_device(table_group->group, dev);
 
 
 
 
 
 
1122}
1123EXPORT_SYMBOL_GPL(iommu_add_device);
1124
1125void iommu_del_device(struct device *dev)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1126{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1127	/*
1128	 * Some devices might not have IOMMU table and group
1129	 * and we needn't detach them from the associated
1130	 * IOMMU groups
1131	 */
1132	if (!device_iommu_mapped(dev)) {
1133		pr_debug("iommu_tce: skipping device %s with no tbl\n",
1134			 dev_name(dev));
1135		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1136	}
1137
1138	iommu_group_remove_device(dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1139}
1140EXPORT_SYMBOL_GPL(iommu_del_device);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1141#endif /* CONFIG_IOMMU_API */
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
   4 * 
   5 * Rewrite, cleanup, new allocation schemes, virtual merging: 
   6 * Copyright (C) 2004 Olof Johansson, IBM Corporation
   7 *               and  Ben. Herrenschmidt, IBM Corporation
   8 *
   9 * Dynamic DMA mapping support, bus-independent parts.
  10 */
  11
  12
  13#include <linux/init.h>
  14#include <linux/types.h>
  15#include <linux/slab.h>
  16#include <linux/mm.h>
  17#include <linux/spinlock.h>
  18#include <linux/string.h>
  19#include <linux/dma-mapping.h>
  20#include <linux/bitmap.h>
  21#include <linux/iommu-helper.h>
  22#include <linux/crash_dump.h>
  23#include <linux/hash.h>
  24#include <linux/fault-inject.h>
  25#include <linux/pci.h>
  26#include <linux/iommu.h>
  27#include <linux/sched.h>
  28#include <linux/debugfs.h>
  29#include <asm/io.h>
 
  30#include <asm/iommu.h>
  31#include <asm/pci-bridge.h>
  32#include <asm/machdep.h>
  33#include <asm/kdump.h>
  34#include <asm/fadump.h>
  35#include <asm/vio.h>
  36#include <asm/tce.h>
  37#include <asm/mmu_context.h>
  38#include <asm/ppc-pci.h>
  39
  40#define DBG(...)
  41
  42#ifdef CONFIG_IOMMU_DEBUGFS
  43static int iommu_debugfs_weight_get(void *data, u64 *val)
  44{
  45	struct iommu_table *tbl = data;
  46	*val = bitmap_weight(tbl->it_map, tbl->it_size);
  47	return 0;
  48}
  49DEFINE_DEBUGFS_ATTRIBUTE(iommu_debugfs_fops_weight, iommu_debugfs_weight_get, NULL, "%llu\n");
  50
  51static void iommu_debugfs_add(struct iommu_table *tbl)
  52{
  53	char name[10];
  54	struct dentry *liobn_entry;
  55
  56	sprintf(name, "%08lx", tbl->it_index);
  57	liobn_entry = debugfs_create_dir(name, iommu_debugfs_dir);
  58
  59	debugfs_create_file_unsafe("weight", 0400, liobn_entry, tbl, &iommu_debugfs_fops_weight);
  60	debugfs_create_ulong("it_size", 0400, liobn_entry, &tbl->it_size);
  61	debugfs_create_ulong("it_page_shift", 0400, liobn_entry, &tbl->it_page_shift);
  62	debugfs_create_ulong("it_reserved_start", 0400, liobn_entry, &tbl->it_reserved_start);
  63	debugfs_create_ulong("it_reserved_end", 0400, liobn_entry, &tbl->it_reserved_end);
  64	debugfs_create_ulong("it_indirect_levels", 0400, liobn_entry, &tbl->it_indirect_levels);
  65	debugfs_create_ulong("it_level_size", 0400, liobn_entry, &tbl->it_level_size);
  66}
  67
  68static void iommu_debugfs_del(struct iommu_table *tbl)
  69{
  70	char name[10];
  71
  72	sprintf(name, "%08lx", tbl->it_index);
  73	debugfs_lookup_and_remove(name, iommu_debugfs_dir);
  74}
  75#else
  76static void iommu_debugfs_add(struct iommu_table *tbl){}
  77static void iommu_debugfs_del(struct iommu_table *tbl){}
  78#endif
  79
  80static int novmerge;
  81
  82static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
  83
  84static int __init setup_iommu(char *str)
  85{
  86	if (!strcmp(str, "novmerge"))
  87		novmerge = 1;
  88	else if (!strcmp(str, "vmerge"))
  89		novmerge = 0;
  90	return 1;
  91}
  92
  93__setup("iommu=", setup_iommu);
  94
  95static DEFINE_PER_CPU(unsigned int, iommu_pool_hash);
  96
  97/*
  98 * We precalculate the hash to avoid doing it on every allocation.
  99 *
 100 * The hash is important to spread CPUs across all the pools. For example,
 101 * on a POWER7 with 4 way SMT we want interrupts on the primary threads and
 102 * with 4 pools all primary threads would map to the same pool.
 103 */
 104static int __init setup_iommu_pool_hash(void)
 105{
 106	unsigned int i;
 107
 108	for_each_possible_cpu(i)
 109		per_cpu(iommu_pool_hash, i) = hash_32(i, IOMMU_POOL_HASHBITS);
 110
 111	return 0;
 112}
 113subsys_initcall(setup_iommu_pool_hash);
 114
 115#ifdef CONFIG_FAIL_IOMMU
 116
 117static DECLARE_FAULT_ATTR(fail_iommu);
 118
 119static int __init setup_fail_iommu(char *str)
 120{
 121	return setup_fault_attr(&fail_iommu, str);
 122}
 123__setup("fail_iommu=", setup_fail_iommu);
 124
 125static bool should_fail_iommu(struct device *dev)
 126{
 127	return dev->archdata.fail_iommu && should_fail(&fail_iommu, 1);
 128}
 129
 130static int __init fail_iommu_debugfs(void)
 131{
 132	struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
 133						       NULL, &fail_iommu);
 134
 135	return PTR_ERR_OR_ZERO(dir);
 136}
 137late_initcall(fail_iommu_debugfs);
 138
 139static ssize_t fail_iommu_show(struct device *dev,
 140			       struct device_attribute *attr, char *buf)
 141{
 142	return sprintf(buf, "%d\n", dev->archdata.fail_iommu);
 143}
 144
 145static ssize_t fail_iommu_store(struct device *dev,
 146				struct device_attribute *attr, const char *buf,
 147				size_t count)
 148{
 149	int i;
 150
 151	if (count > 0 && sscanf(buf, "%d", &i) > 0)
 152		dev->archdata.fail_iommu = (i == 0) ? 0 : 1;
 153
 154	return count;
 155}
 156
 157static DEVICE_ATTR_RW(fail_iommu);
 158
 159static int fail_iommu_bus_notify(struct notifier_block *nb,
 160				 unsigned long action, void *data)
 161{
 162	struct device *dev = data;
 163
 164	if (action == BUS_NOTIFY_ADD_DEVICE) {
 165		if (device_create_file(dev, &dev_attr_fail_iommu))
 166			pr_warn("Unable to create IOMMU fault injection sysfs "
 167				"entries\n");
 168	} else if (action == BUS_NOTIFY_DEL_DEVICE) {
 169		device_remove_file(dev, &dev_attr_fail_iommu);
 170	}
 171
 172	return 0;
 173}
 174
 175/*
 176 * PCI and VIO buses need separate notifier_block structs, since they're linked
 177 * list nodes.  Sharing a notifier_block would mean that any notifiers later
 178 * registered for PCI buses would also get called by VIO buses and vice versa.
 179 */
 180static struct notifier_block fail_iommu_pci_bus_notifier = {
 181	.notifier_call = fail_iommu_bus_notify
 182};
 183
 184#ifdef CONFIG_IBMVIO
 185static struct notifier_block fail_iommu_vio_bus_notifier = {
 186	.notifier_call = fail_iommu_bus_notify
 187};
 188#endif
 189
 190static int __init fail_iommu_setup(void)
 191{
 192#ifdef CONFIG_PCI
 193	bus_register_notifier(&pci_bus_type, &fail_iommu_pci_bus_notifier);
 194#endif
 195#ifdef CONFIG_IBMVIO
 196	bus_register_notifier(&vio_bus_type, &fail_iommu_vio_bus_notifier);
 197#endif
 198
 199	return 0;
 200}
 201/*
 202 * Must execute after PCI and VIO subsystem have initialised but before
 203 * devices are probed.
 204 */
 205arch_initcall(fail_iommu_setup);
 206#else
 207static inline bool should_fail_iommu(struct device *dev)
 208{
 209	return false;
 210}
 211#endif
 212
 213static unsigned long iommu_range_alloc(struct device *dev,
 214				       struct iommu_table *tbl,
 215                                       unsigned long npages,
 216                                       unsigned long *handle,
 217                                       unsigned long mask,
 218                                       unsigned int align_order)
 219{ 
 220	unsigned long n, end, start;
 221	unsigned long limit;
 222	int largealloc = npages > 15;
 223	int pass = 0;
 224	unsigned long align_mask;
 
 225	unsigned long flags;
 226	unsigned int pool_nr;
 227	struct iommu_pool *pool;
 228
 229	align_mask = (1ull << align_order) - 1;
 230
 231	/* This allocator was derived from x86_64's bit string search */
 232
 233	/* Sanity check */
 234	if (unlikely(npages == 0)) {
 235		if (printk_ratelimit())
 236			WARN_ON(1);
 237		return DMA_MAPPING_ERROR;
 238	}
 239
 240	if (should_fail_iommu(dev))
 241		return DMA_MAPPING_ERROR;
 242
 243	/*
 244	 * We don't need to disable preemption here because any CPU can
 245	 * safely use any IOMMU pool.
 246	 */
 247	pool_nr = raw_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
 248
 249	if (largealloc)
 250		pool = &(tbl->large_pool);
 251	else
 252		pool = &(tbl->pools[pool_nr]);
 253
 254	spin_lock_irqsave(&(pool->lock), flags);
 255
 256again:
 257	if ((pass == 0) && handle && *handle &&
 258	    (*handle >= pool->start) && (*handle < pool->end))
 259		start = *handle;
 260	else
 261		start = pool->hint;
 262
 263	limit = pool->end;
 264
 265	/* The case below can happen if we have a small segment appended
 266	 * to a large, or when the previous alloc was at the very end of
 267	 * the available space. If so, go back to the initial start.
 268	 */
 269	if (start >= limit)
 270		start = pool->start;
 271
 272	if (limit + tbl->it_offset > mask) {
 273		limit = mask - tbl->it_offset + 1;
 274		/* If we're constrained on address range, first try
 275		 * at the masked hint to avoid O(n) search complexity,
 276		 * but on second pass, start at 0 in pool 0.
 277		 */
 278		if ((start & mask) >= limit || pass > 0) {
 279			spin_unlock(&(pool->lock));
 280			pool = &(tbl->pools[0]);
 281			spin_lock(&(pool->lock));
 282			start = pool->start;
 283		} else {
 284			start &= mask;
 285		}
 286	}
 287
 
 
 
 
 
 
 
 288	n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset,
 289			dma_get_seg_boundary_nr_pages(dev, tbl->it_page_shift),
 290			align_mask);
 291	if (n == -1) {
 292		if (likely(pass == 0)) {
 293			/* First try the pool from the start */
 294			pool->hint = pool->start;
 295			pass++;
 296			goto again;
 297
 298		} else if (pass <= tbl->nr_pools) {
 299			/* Now try scanning all the other pools */
 300			spin_unlock(&(pool->lock));
 301			pool_nr = (pool_nr + 1) & (tbl->nr_pools - 1);
 302			pool = &tbl->pools[pool_nr];
 303			spin_lock(&(pool->lock));
 304			pool->hint = pool->start;
 305			pass++;
 306			goto again;
 307
 308		} else if (pass == tbl->nr_pools + 1) {
 309			/* Last resort: try largepool */
 310			spin_unlock(&pool->lock);
 311			pool = &tbl->large_pool;
 312			spin_lock(&pool->lock);
 313			pool->hint = pool->start;
 314			pass++;
 315			goto again;
 316
 317		} else {
 318			/* Give up */
 319			spin_unlock_irqrestore(&(pool->lock), flags);
 320			return DMA_MAPPING_ERROR;
 321		}
 322	}
 323
 324	end = n + npages;
 325
 326	/* Bump the hint to a new block for small allocs. */
 327	if (largealloc) {
 328		/* Don't bump to new block to avoid fragmentation */
 329		pool->hint = end;
 330	} else {
 331		/* Overflow will be taken care of at the next allocation */
 332		pool->hint = (end + tbl->it_blocksize - 1) &
 333		                ~(tbl->it_blocksize - 1);
 334	}
 335
 336	/* Update handle for SG allocations */
 337	if (handle)
 338		*handle = end;
 339
 340	spin_unlock_irqrestore(&(pool->lock), flags);
 341
 342	return n;
 343}
 344
 345static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl,
 346			      void *page, unsigned int npages,
 347			      enum dma_data_direction direction,
 348			      unsigned long mask, unsigned int align_order,
 349			      unsigned long attrs)
 350{
 351	unsigned long entry;
 352	dma_addr_t ret = DMA_MAPPING_ERROR;
 353	int build_fail;
 354
 355	entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order);
 356
 357	if (unlikely(entry == DMA_MAPPING_ERROR))
 358		return DMA_MAPPING_ERROR;
 359
 360	entry += tbl->it_offset;	/* Offset into real TCE table */
 361	ret = entry << tbl->it_page_shift;	/* Set the return dma address */
 362
 363	/* Put the TCEs in the HW table */
 364	build_fail = tbl->it_ops->set(tbl, entry, npages,
 365				      (unsigned long)page &
 366				      IOMMU_PAGE_MASK(tbl), direction, attrs);
 367
 368	/* tbl->it_ops->set() only returns non-zero for transient errors.
 369	 * Clean up the table bitmap in this case and return
 370	 * DMA_MAPPING_ERROR. For all other errors the functionality is
 371	 * not altered.
 372	 */
 373	if (unlikely(build_fail)) {
 374		__iommu_free(tbl, ret, npages);
 375		return DMA_MAPPING_ERROR;
 376	}
 377
 378	/* Flush/invalidate TLB caches if necessary */
 379	if (tbl->it_ops->flush)
 380		tbl->it_ops->flush(tbl);
 381
 382	/* Make sure updates are seen by hardware */
 383	mb();
 384
 385	return ret;
 386}
 387
 388static bool iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr,
 389			     unsigned int npages)
 390{
 391	unsigned long entry, free_entry;
 392
 393	entry = dma_addr >> tbl->it_page_shift;
 394	free_entry = entry - tbl->it_offset;
 395
 396	if (((free_entry + npages) > tbl->it_size) ||
 397	    (entry < tbl->it_offset)) {
 398		if (printk_ratelimit()) {
 399			printk(KERN_INFO "iommu_free: invalid entry\n");
 400			printk(KERN_INFO "\tentry     = 0x%lx\n", entry); 
 401			printk(KERN_INFO "\tdma_addr  = 0x%llx\n", (u64)dma_addr);
 402			printk(KERN_INFO "\tTable     = 0x%llx\n", (u64)tbl);
 403			printk(KERN_INFO "\tbus#      = 0x%llx\n", (u64)tbl->it_busno);
 404			printk(KERN_INFO "\tsize      = 0x%llx\n", (u64)tbl->it_size);
 405			printk(KERN_INFO "\tstartOff  = 0x%llx\n", (u64)tbl->it_offset);
 406			printk(KERN_INFO "\tindex     = 0x%llx\n", (u64)tbl->it_index);
 407			WARN_ON(1);
 408		}
 409
 410		return false;
 411	}
 412
 413	return true;
 414}
 415
 416static struct iommu_pool *get_pool(struct iommu_table *tbl,
 417				   unsigned long entry)
 418{
 419	struct iommu_pool *p;
 420	unsigned long largepool_start = tbl->large_pool.start;
 421
 422	/* The large pool is the last pool at the top of the table */
 423	if (entry >= largepool_start) {
 424		p = &tbl->large_pool;
 425	} else {
 426		unsigned int pool_nr = entry / tbl->poolsize;
 427
 428		BUG_ON(pool_nr > tbl->nr_pools);
 429		p = &tbl->pools[pool_nr];
 430	}
 431
 432	return p;
 433}
 434
 435static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
 436			 unsigned int npages)
 437{
 438	unsigned long entry, free_entry;
 439	unsigned long flags;
 440	struct iommu_pool *pool;
 441
 442	entry = dma_addr >> tbl->it_page_shift;
 443	free_entry = entry - tbl->it_offset;
 444
 445	pool = get_pool(tbl, free_entry);
 446
 447	if (!iommu_free_check(tbl, dma_addr, npages))
 448		return;
 449
 450	tbl->it_ops->clear(tbl, entry, npages);
 451
 452	spin_lock_irqsave(&(pool->lock), flags);
 453	bitmap_clear(tbl->it_map, free_entry, npages);
 454	spin_unlock_irqrestore(&(pool->lock), flags);
 455}
 456
 457static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr,
 458		unsigned int npages)
 459{
 460	__iommu_free(tbl, dma_addr, npages);
 461
 462	/* Make sure TLB cache is flushed if the HW needs it. We do
 463	 * not do an mb() here on purpose, it is not needed on any of
 464	 * the current platforms.
 465	 */
 466	if (tbl->it_ops->flush)
 467		tbl->it_ops->flush(tbl);
 468}
 469
 470int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
 471		     struct scatterlist *sglist, int nelems,
 472		     unsigned long mask, enum dma_data_direction direction,
 473		     unsigned long attrs)
 474{
 475	dma_addr_t dma_next = 0, dma_addr;
 476	struct scatterlist *s, *outs, *segstart;
 477	int outcount, incount, i, build_fail = 0;
 478	unsigned int align;
 479	unsigned long handle;
 480	unsigned int max_seg_size;
 481
 482	BUG_ON(direction == DMA_NONE);
 483
 484	if ((nelems == 0) || !tbl)
 485		return -EINVAL;
 486
 487	outs = s = segstart = &sglist[0];
 488	outcount = 1;
 489	incount = nelems;
 490	handle = 0;
 491
 492	/* Init first segment length for backout at failure */
 493	outs->dma_length = 0;
 494
 495	DBG("sg mapping %d elements:\n", nelems);
 496
 497	max_seg_size = dma_get_max_seg_size(dev);
 498	for_each_sg(sglist, s, nelems, i) {
 499		unsigned long vaddr, npages, entry, slen;
 500
 501		slen = s->length;
 502		/* Sanity check */
 503		if (slen == 0) {
 504			dma_next = 0;
 505			continue;
 506		}
 507		/* Allocate iommu entries for that segment */
 508		vaddr = (unsigned long) sg_virt(s);
 509		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
 510		align = 0;
 511		if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
 512		    (vaddr & ~PAGE_MASK) == 0)
 513			align = PAGE_SHIFT - tbl->it_page_shift;
 514		entry = iommu_range_alloc(dev, tbl, npages, &handle,
 515					  mask >> tbl->it_page_shift, align);
 516
 517		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);
 518
 519		/* Handle failure */
 520		if (unlikely(entry == DMA_MAPPING_ERROR)) {
 521			if (!(attrs & DMA_ATTR_NO_WARN) &&
 522			    printk_ratelimit())
 523				dev_info(dev, "iommu_alloc failed, tbl %p "
 524					 "vaddr %lx npages %lu\n", tbl, vaddr,
 525					 npages);
 526			goto failure;
 527		}
 528
 529		/* Convert entry to a dma_addr_t */
 530		entry += tbl->it_offset;
 531		dma_addr = entry << tbl->it_page_shift;
 532		dma_addr |= (vaddr & ~IOMMU_PAGE_MASK(tbl));
 533
 534		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
 535			    npages, entry, dma_addr);
 536
 537		/* Insert into HW table */
 538		build_fail = tbl->it_ops->set(tbl, entry, npages,
 539					      vaddr & IOMMU_PAGE_MASK(tbl),
 540					      direction, attrs);
 541		if(unlikely(build_fail))
 542			goto failure;
 543
 544		/* If we are in an open segment, try merging */
 545		if (segstart != s) {
 546			DBG("  - trying merge...\n");
 547			/* We cannot merge if:
 548			 * - allocated dma_addr isn't contiguous to previous allocation
 549			 */
 550			if (novmerge || (dma_addr != dma_next) ||
 551			    (outs->dma_length + s->length > max_seg_size)) {
 552				/* Can't merge: create a new segment */
 553				segstart = s;
 554				outcount++;
 555				outs = sg_next(outs);
 556				DBG("    can't merge, new segment.\n");
 557			} else {
 558				outs->dma_length += s->length;
 559				DBG("    merged, new len: %ux\n", outs->dma_length);
 560			}
 561		}
 562
 563		if (segstart == s) {
 564			/* This is a new segment, fill entries */
 565			DBG("  - filling new segment.\n");
 566			outs->dma_address = dma_addr;
 567			outs->dma_length = slen;
 568		}
 569
 570		/* Calculate next page pointer for contiguous check */
 571		dma_next = dma_addr + slen;
 572
 573		DBG("  - dma next is: %lx\n", dma_next);
 574	}
 575
 576	/* Flush/invalidate TLB caches if necessary */
 577	if (tbl->it_ops->flush)
 578		tbl->it_ops->flush(tbl);
 579
 580	DBG("mapped %d elements:\n", outcount);
 581
 582	/* For the sake of ppc_iommu_unmap_sg, we clear out the length in the
 583	 * next entry of the sglist if we didn't fill the list completely
 584	 */
 585	if (outcount < incount) {
 586		outs = sg_next(outs);
 
 587		outs->dma_length = 0;
 588	}
 589
 590	/* Make sure updates are seen by hardware */
 591	mb();
 592
 593	return outcount;
 594
 595 failure:
 596	for_each_sg(sglist, s, nelems, i) {
 597		if (s->dma_length != 0) {
 598			unsigned long vaddr, npages;
 599
 600			vaddr = s->dma_address & IOMMU_PAGE_MASK(tbl);
 601			npages = iommu_num_pages(s->dma_address, s->dma_length,
 602						 IOMMU_PAGE_SIZE(tbl));
 603			__iommu_free(tbl, vaddr, npages);
 
 604			s->dma_length = 0;
 605		}
 606		if (s == outs)
 607			break;
 608	}
 609	return -EIO;
 610}
 611
 612
 613void ppc_iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
 614			int nelems, enum dma_data_direction direction,
 615			unsigned long attrs)
 616{
 617	struct scatterlist *sg;
 618
 619	BUG_ON(direction == DMA_NONE);
 620
 621	if (!tbl)
 622		return;
 623
 624	sg = sglist;
 625	while (nelems--) {
 626		unsigned int npages;
 627		dma_addr_t dma_handle = sg->dma_address;
 628
 629		if (sg->dma_length == 0)
 630			break;
 631		npages = iommu_num_pages(dma_handle, sg->dma_length,
 632					 IOMMU_PAGE_SIZE(tbl));
 633		__iommu_free(tbl, dma_handle, npages);
 634		sg = sg_next(sg);
 635	}
 636
 637	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
 638	 * do not do an mb() here, the affected platforms do not need it
 639	 * when freeing.
 640	 */
 641	if (tbl->it_ops->flush)
 642		tbl->it_ops->flush(tbl);
 643}
 644
 645static void iommu_table_clear(struct iommu_table *tbl)
 646{
 647	/*
 648	 * In case of firmware assisted dump system goes through clean
 649	 * reboot process at the time of system crash. Hence it's safe to
 650	 * clear the TCE entries if firmware assisted dump is active.
 651	 */
 652	if (!is_kdump_kernel() || is_fadump_active()) {
 653		/* Clear the table in case firmware left allocations in it */
 654		tbl->it_ops->clear(tbl, tbl->it_offset, tbl->it_size);
 655		return;
 656	}
 657
 658#ifdef CONFIG_CRASH_DUMP
 659	if (tbl->it_ops->get) {
 660		unsigned long index, tceval, tcecount = 0;
 661
 662		/* Reserve the existing mappings left by the first kernel. */
 663		for (index = 0; index < tbl->it_size; index++) {
 664			tceval = tbl->it_ops->get(tbl, index + tbl->it_offset);
 665			/*
 666			 * Freed TCE entry contains 0x7fffffffffffffff on JS20
 667			 */
 668			if (tceval && (tceval != 0x7fffffffffffffffUL)) {
 669				__set_bit(index, tbl->it_map);
 670				tcecount++;
 671			}
 672		}
 673
 674		if ((tbl->it_size - tcecount) < KDUMP_MIN_TCE_ENTRIES) {
 675			printk(KERN_WARNING "TCE table is full; freeing ");
 676			printk(KERN_WARNING "%d entries for the kdump boot\n",
 677				KDUMP_MIN_TCE_ENTRIES);
 678			for (index = tbl->it_size - KDUMP_MIN_TCE_ENTRIES;
 679				index < tbl->it_size; index++)
 680				__clear_bit(index, tbl->it_map);
 681		}
 682	}
 683#endif
 684}
 685
 686static void iommu_table_reserve_pages(struct iommu_table *tbl,
 687		unsigned long res_start, unsigned long res_end)
 688{
 689	int i;
 690
 691	WARN_ON_ONCE(res_end < res_start);
 692	/*
 693	 * Reserve page 0 so it will not be used for any mappings.
 694	 * This avoids buggy drivers that consider page 0 to be invalid
 695	 * to crash the machine or even lose data.
 696	 */
 697	if (tbl->it_offset == 0)
 698		set_bit(0, tbl->it_map);
 699
 700	if (res_start < tbl->it_offset)
 701		res_start = tbl->it_offset;
 702
 703	if (res_end > (tbl->it_offset + tbl->it_size))
 704		res_end = tbl->it_offset + tbl->it_size;
 
 
 
 705
 706	/* Check if res_start..res_end is a valid range in the table */
 707	if (res_start >= res_end) {
 708		tbl->it_reserved_start = tbl->it_offset;
 709		tbl->it_reserved_end = tbl->it_offset;
 710		return;
 711	}
 
 712
 713	tbl->it_reserved_start = res_start;
 714	tbl->it_reserved_end = res_end;
 
 
 
 
 715
 716	for (i = tbl->it_reserved_start; i < tbl->it_reserved_end; ++i)
 717		set_bit(i - tbl->it_offset, tbl->it_map);
 718}
 719
 720/*
 721 * Build a iommu_table structure.  This contains a bit map which
 722 * is used to manage allocation of the tce space.
 723 */
 724struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid,
 725		unsigned long res_start, unsigned long res_end)
 726{
 727	unsigned long sz;
 728	static int welcomed = 0;
 
 729	unsigned int i;
 730	struct iommu_pool *p;
 731
 732	BUG_ON(!tbl->it_ops);
 733
 734	/* number of bytes needed for the bitmap */
 735	sz = BITS_TO_LONGS(tbl->it_size) * sizeof(unsigned long);
 736
 737	tbl->it_map = vzalloc_node(sz, nid);
 738	if (!tbl->it_map) {
 739		pr_err("%s: Can't allocate %ld bytes\n", __func__, sz);
 740		return NULL;
 741	}
 742
 743	iommu_table_reserve_pages(tbl, res_start, res_end);
 744
 745	/* We only split the IOMMU table if we have 1GB or more of space */
 746	if ((tbl->it_size << tbl->it_page_shift) >= (1UL * 1024 * 1024 * 1024))
 747		tbl->nr_pools = IOMMU_NR_POOLS;
 748	else
 749		tbl->nr_pools = 1;
 750
 751	/* We reserve the top 1/4 of the table for large allocations */
 752	tbl->poolsize = (tbl->it_size * 3 / 4) / tbl->nr_pools;
 753
 754	for (i = 0; i < tbl->nr_pools; i++) {
 755		p = &tbl->pools[i];
 756		spin_lock_init(&(p->lock));
 757		p->start = tbl->poolsize * i;
 758		p->hint = p->start;
 759		p->end = p->start + tbl->poolsize;
 760	}
 761
 762	p = &tbl->large_pool;
 763	spin_lock_init(&(p->lock));
 764	p->start = tbl->poolsize * i;
 765	p->hint = p->start;
 766	p->end = tbl->it_size;
 767
 768	iommu_table_clear(tbl);
 769
 770	if (!welcomed) {
 771		printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n",
 772		       novmerge ? "disabled" : "enabled");
 773		welcomed = 1;
 774	}
 775
 776	iommu_debugfs_add(tbl);
 777
 778	return tbl;
 779}
 780
 781bool iommu_table_in_use(struct iommu_table *tbl)
 782{
 783	unsigned long start = 0, end;
 784
 785	/* ignore reserved bit0 */
 786	if (tbl->it_offset == 0)
 787		start = 1;
 788
 789	/* Simple case with no reserved MMIO32 region */
 790	if (!tbl->it_reserved_start && !tbl->it_reserved_end)
 791		return find_next_bit(tbl->it_map, tbl->it_size, start) != tbl->it_size;
 792
 793	end = tbl->it_reserved_start - tbl->it_offset;
 794	if (find_next_bit(tbl->it_map, end, start) != end)
 795		return true;
 796
 797	start = tbl->it_reserved_end - tbl->it_offset;
 798	end = tbl->it_size;
 799	return find_next_bit(tbl->it_map, end, start) != end;
 800}
 801
 802static void iommu_table_free(struct kref *kref)
 803{
 
 
 804	struct iommu_table *tbl;
 805
 806	tbl = container_of(kref, struct iommu_table, it_kref);
 807
 808	if (tbl->it_ops->free)
 809		tbl->it_ops->free(tbl);
 810
 811	if (!tbl->it_map) {
 812		kfree(tbl);
 813		return;
 814	}
 815
 816	iommu_debugfs_del(tbl);
 817
 818	/* verify that table contains no entries */
 819	if (iommu_table_in_use(tbl))
 820		pr_warn("%s: Unexpected TCEs\n", __func__);
 821
 
 
 
 822	/* free bitmap */
 823	vfree(tbl->it_map);
 
 824
 825	/* free table */
 826	kfree(tbl);
 827}
 828
 829struct iommu_table *iommu_tce_table_get(struct iommu_table *tbl)
 830{
 831	if (kref_get_unless_zero(&tbl->it_kref))
 832		return tbl;
 833
 834	return NULL;
 835}
 836EXPORT_SYMBOL_GPL(iommu_tce_table_get);
 837
 838int iommu_tce_table_put(struct iommu_table *tbl)
 839{
 840	if (WARN_ON(!tbl))
 841		return 0;
 842
 843	return kref_put(&tbl->it_kref, iommu_table_free);
 844}
 845EXPORT_SYMBOL_GPL(iommu_tce_table_put);
 846
 847/* Creates TCEs for a user provided buffer.  The user buffer must be
 848 * contiguous real kernel storage (not vmalloc).  The address passed here
 849 * comprises a page address and offset into that page. The dma_addr_t
 850 * returned will point to the same byte within the page as was passed in.
 851 */
 852dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl,
 853			  struct page *page, unsigned long offset, size_t size,
 854			  unsigned long mask, enum dma_data_direction direction,
 855			  unsigned long attrs)
 856{
 857	dma_addr_t dma_handle = DMA_MAPPING_ERROR;
 858	void *vaddr;
 859	unsigned long uaddr;
 860	unsigned int npages, align;
 861
 862	BUG_ON(direction == DMA_NONE);
 863
 864	vaddr = page_address(page) + offset;
 865	uaddr = (unsigned long)vaddr;
 866
 867	if (tbl) {
 868		npages = iommu_num_pages(uaddr, size, IOMMU_PAGE_SIZE(tbl));
 869		align = 0;
 870		if (tbl->it_page_shift < PAGE_SHIFT && size >= PAGE_SIZE &&
 871		    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
 872			align = PAGE_SHIFT - tbl->it_page_shift;
 873
 874		dma_handle = iommu_alloc(dev, tbl, vaddr, npages, direction,
 875					 mask >> tbl->it_page_shift, align,
 876					 attrs);
 877		if (dma_handle == DMA_MAPPING_ERROR) {
 878			if (!(attrs & DMA_ATTR_NO_WARN) &&
 879			    printk_ratelimit())  {
 880				dev_info(dev, "iommu_alloc failed, tbl %p "
 881					 "vaddr %p npages %d\n", tbl, vaddr,
 882					 npages);
 883			}
 884		} else
 885			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK(tbl));
 886	}
 887
 888	return dma_handle;
 889}
 890
 891void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
 892		      size_t size, enum dma_data_direction direction,
 893		      unsigned long attrs)
 894{
 895	unsigned int npages;
 896
 897	BUG_ON(direction == DMA_NONE);
 898
 899	if (tbl) {
 900		npages = iommu_num_pages(dma_handle, size,
 901					 IOMMU_PAGE_SIZE(tbl));
 902		iommu_free(tbl, dma_handle, npages);
 903	}
 904}
 905
 906/* Allocates a contiguous real buffer and creates mappings over it.
 907 * Returns the virtual address of the buffer and sets dma_handle
 908 * to the dma address (mapping) of the first page.
 909 */
 910void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
 911			   size_t size,	dma_addr_t *dma_handle,
 912			   unsigned long mask, gfp_t flag, int node)
 913{
 914	void *ret = NULL;
 915	dma_addr_t mapping;
 916	unsigned int order;
 917	unsigned int nio_pages, io_order;
 918	struct page *page;
 919	int tcesize = (1 << tbl->it_page_shift);
 920
 921	size = PAGE_ALIGN(size);
 922	order = get_order(size);
 923
 924 	/*
 925	 * Client asked for way too much space.  This is checked later
 926	 * anyway.  It is easier to debug here for the drivers than in
 927	 * the tce tables.
 928	 */
 929	if (order >= IOMAP_MAX_ORDER) {
 930		dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n",
 931			 size);
 932		return NULL;
 933	}
 934
 935	if (!tbl)
 936		return NULL;
 937
 938	/* Alloc enough pages (and possibly more) */
 939	page = alloc_pages_node(node, flag, order);
 940	if (!page)
 941		return NULL;
 942	ret = page_address(page);
 943	memset(ret, 0, size);
 944
 945	/* Set up tces to cover the allocated range */
 946	nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
 947
 948	io_order = get_iommu_order(size, tbl);
 949	mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
 950			      mask >> tbl->it_page_shift, io_order, 0);
 951	if (mapping == DMA_MAPPING_ERROR) {
 952		free_pages((unsigned long)ret, order);
 953		return NULL;
 954	}
 955
 956	*dma_handle = mapping | ((u64)ret & (tcesize - 1));
 957	return ret;
 958}
 959
 960void iommu_free_coherent(struct iommu_table *tbl, size_t size,
 961			 void *vaddr, dma_addr_t dma_handle)
 962{
 963	if (tbl) {
 964		unsigned int nio_pages;
 965
 966		size = PAGE_ALIGN(size);
 967		nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
 968		iommu_free(tbl, dma_handle, nio_pages);
 969		size = PAGE_ALIGN(size);
 970		free_pages((unsigned long)vaddr, get_order(size));
 971	}
 972}
 973
 974unsigned long iommu_direction_to_tce_perm(enum dma_data_direction dir)
 975{
 976	switch (dir) {
 977	case DMA_BIDIRECTIONAL:
 978		return TCE_PCI_READ | TCE_PCI_WRITE;
 979	case DMA_FROM_DEVICE:
 980		return TCE_PCI_WRITE;
 981	case DMA_TO_DEVICE:
 982		return TCE_PCI_READ;
 983	default:
 984		return 0;
 985	}
 986}
 987EXPORT_SYMBOL_GPL(iommu_direction_to_tce_perm);
 988
 989#ifdef CONFIG_IOMMU_API
 990/*
 991 * SPAPR TCE API
 992 */
 993static void group_release(void *iommu_data)
 994{
 995	struct iommu_table_group *table_group = iommu_data;
 996
 997	table_group->group = NULL;
 998}
 999
1000void iommu_register_group(struct iommu_table_group *table_group,
1001		int pci_domain_number, unsigned long pe_num)
1002{
1003	struct iommu_group *grp;
1004	char *name;
1005
1006	grp = iommu_group_alloc();
1007	if (IS_ERR(grp)) {
1008		pr_warn("powerpc iommu api: cannot create new group, err=%ld\n",
1009				PTR_ERR(grp));
1010		return;
1011	}
1012	table_group->group = grp;
1013	iommu_group_set_iommudata(grp, table_group, group_release);
1014	name = kasprintf(GFP_KERNEL, "domain%d-pe%lx",
1015			pci_domain_number, pe_num);
1016	if (!name)
1017		return;
1018	iommu_group_set_name(grp, name);
1019	kfree(name);
1020}
1021
1022enum dma_data_direction iommu_tce_direction(unsigned long tce)
1023{
1024	if ((tce & TCE_PCI_READ) && (tce & TCE_PCI_WRITE))
1025		return DMA_BIDIRECTIONAL;
1026	else if (tce & TCE_PCI_READ)
1027		return DMA_TO_DEVICE;
1028	else if (tce & TCE_PCI_WRITE)
1029		return DMA_FROM_DEVICE;
1030	else
1031		return DMA_NONE;
1032}
1033EXPORT_SYMBOL_GPL(iommu_tce_direction);
1034
1035void iommu_flush_tce(struct iommu_table *tbl)
1036{
1037	/* Flush/invalidate TLB caches if necessary */
1038	if (tbl->it_ops->flush)
1039		tbl->it_ops->flush(tbl);
1040
1041	/* Make sure updates are seen by hardware */
1042	mb();
1043}
1044EXPORT_SYMBOL_GPL(iommu_flush_tce);
1045
1046int iommu_tce_check_ioba(unsigned long page_shift,
1047		unsigned long offset, unsigned long size,
1048		unsigned long ioba, unsigned long npages)
1049{
1050	unsigned long mask = (1UL << page_shift) - 1;
1051
1052	if (ioba & mask)
1053		return -EINVAL;
1054
1055	ioba >>= page_shift;
1056	if (ioba < offset)
1057		return -EINVAL;
1058
1059	if ((ioba + 1) > (offset + size))
1060		return -EINVAL;
1061
1062	return 0;
1063}
1064EXPORT_SYMBOL_GPL(iommu_tce_check_ioba);
1065
1066int iommu_tce_check_gpa(unsigned long page_shift, unsigned long gpa)
1067{
1068	unsigned long mask = (1UL << page_shift) - 1;
1069
1070	if (gpa & mask)
1071		return -EINVAL;
1072
1073	return 0;
1074}
1075EXPORT_SYMBOL_GPL(iommu_tce_check_gpa);
1076
1077long iommu_tce_xchg_no_kill(struct mm_struct *mm,
1078			    struct iommu_table *tbl,
1079			    unsigned long entry, unsigned long *hpa,
1080			    enum dma_data_direction *direction)
1081{
1082	long ret;
1083	unsigned long size = 0;
1084
1085	ret = tbl->it_ops->xchg_no_kill(tbl, entry, hpa, direction);
1086	if (!ret && ((*direction == DMA_FROM_DEVICE) ||
1087			(*direction == DMA_BIDIRECTIONAL)) &&
1088			!mm_iommu_is_devmem(mm, *hpa, tbl->it_page_shift,
1089					&size))
1090		SetPageDirty(pfn_to_page(*hpa >> PAGE_SHIFT));
1091
1092	return ret;
1093}
1094EXPORT_SYMBOL_GPL(iommu_tce_xchg_no_kill);
1095
1096void iommu_tce_kill(struct iommu_table *tbl,
1097		unsigned long entry, unsigned long pages)
1098{
1099	if (tbl->it_ops->tce_kill)
1100		tbl->it_ops->tce_kill(tbl, entry, pages);
1101}
1102EXPORT_SYMBOL_GPL(iommu_tce_kill);
1103
1104#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1105static int iommu_take_ownership(struct iommu_table *tbl)
1106{
1107	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1108	int ret = 0;
1109
1110	/*
1111	 * VFIO does not control TCE entries allocation and the guest
1112	 * can write new TCEs on top of existing ones so iommu_tce_build()
1113	 * must be able to release old pages. This functionality
1114	 * requires exchange() callback defined so if it is not
1115	 * implemented, we disallow taking ownership over the table.
1116	 */
1117	if (!tbl->it_ops->xchg_no_kill)
1118		return -EINVAL;
1119
1120	spin_lock_irqsave(&tbl->large_pool.lock, flags);
1121	for (i = 0; i < tbl->nr_pools; i++)
1122		spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
 
 
1123
1124	if (iommu_table_in_use(tbl)) {
1125		pr_err("iommu_tce: it_map is not empty");
1126		ret = -EBUSY;
 
 
 
1127	} else {
1128		memset(tbl->it_map, 0xff, sz);
1129	}
1130
1131	for (i = 0; i < tbl->nr_pools; i++)
1132		spin_unlock(&tbl->pools[i].lock);
1133	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1134
1135	return ret;
1136}
 
1137
1138static void iommu_release_ownership(struct iommu_table *tbl)
1139{
1140	unsigned long flags, i, sz = (tbl->it_size + 7) >> 3;
1141
1142	spin_lock_irqsave(&tbl->large_pool.lock, flags);
1143	for (i = 0; i < tbl->nr_pools; i++)
1144		spin_lock_nest_lock(&tbl->pools[i].lock, &tbl->large_pool.lock);
1145
1146	memset(tbl->it_map, 0, sz);
1147
1148	iommu_table_reserve_pages(tbl, tbl->it_reserved_start,
1149			tbl->it_reserved_end);
1150
1151	for (i = 0; i < tbl->nr_pools; i++)
1152		spin_unlock(&tbl->pools[i].lock);
1153	spin_unlock_irqrestore(&tbl->large_pool.lock, flags);
1154}
1155#endif
1156
1157int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
1158{
1159	/*
1160	 * The sysfs entries should be populated before
1161	 * binding IOMMU group. If sysfs entries isn't
1162	 * ready, we simply bail.
1163	 */
1164	if (!device_is_registered(dev))
1165		return -ENOENT;
1166
1167	if (device_iommu_mapped(dev)) {
1168		pr_debug("%s: Skipping device %s with iommu group %d\n",
1169			 __func__, dev_name(dev),
1170			 iommu_group_id(dev->iommu_group));
1171		return -EBUSY;
1172	}
1173
1174	pr_debug("%s: Adding %s to iommu group %d\n",
1175		 __func__, dev_name(dev),  iommu_group_id(table_group->group));
1176	/*
1177	 * This is still not adding devices via the IOMMU bus notifier because
1178	 * of pcibios_init() from arch/powerpc/kernel/pci_64.c which calls
1179	 * pcibios_scan_phb() first (and this guy adds devices and triggers
1180	 * the notifier) and only then it calls pci_bus_add_devices() which
1181	 * configures DMA for buses which also creates PEs and IOMMU groups.
1182	 */
1183	return iommu_probe_device(dev);
1184}
1185EXPORT_SYMBOL_GPL(iommu_add_device);
1186
1187#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1188/*
1189 * A simple iommu_table_group_ops which only allows reusing the existing
1190 * iommu_table. This handles VFIO for POWER7 or the nested KVM.
1191 * The ops does not allow creating windows and only allows reusing the existing
1192 * one if it matches table_group->tce32_start/tce32_size/page_shift.
1193 */
1194static unsigned long spapr_tce_get_table_size(__u32 page_shift,
1195					      __u64 window_size, __u32 levels)
1196{
1197	unsigned long size;
1198
1199	if (levels > 1)
1200		return ~0U;
1201	size = window_size >> (page_shift - 3);
1202	return size;
1203}
1204
1205static long spapr_tce_create_table(struct iommu_table_group *table_group, int num,
1206				   __u32 page_shift, __u64 window_size, __u32 levels,
1207				   struct iommu_table **ptbl)
1208{
1209	struct iommu_table *tbl = table_group->tables[0];
1210
1211	if (num > 0)
1212		return -EPERM;
1213
1214	if (tbl->it_page_shift != page_shift ||
1215	    tbl->it_size != (window_size >> page_shift) ||
1216	    tbl->it_indirect_levels != levels - 1)
1217		return -EINVAL;
1218
1219	*ptbl = iommu_tce_table_get(tbl);
1220	return 0;
1221}
1222
1223static long spapr_tce_set_window(struct iommu_table_group *table_group,
1224				 int num, struct iommu_table *tbl)
1225{
1226	return tbl == table_group->tables[num] ? 0 : -EPERM;
1227}
1228
1229static long spapr_tce_unset_window(struct iommu_table_group *table_group, int num)
1230{
1231	return 0;
1232}
1233
1234static long spapr_tce_take_ownership(struct iommu_table_group *table_group)
1235{
1236	int i, j, rc = 0;
1237
1238	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1239		struct iommu_table *tbl = table_group->tables[i];
1240
1241		if (!tbl || !tbl->it_map)
1242			continue;
1243
1244		rc = iommu_take_ownership(tbl);
1245		if (!rc)
1246			continue;
1247
1248		for (j = 0; j < i; ++j)
1249			iommu_release_ownership(table_group->tables[j]);
1250		return rc;
1251	}
1252	return 0;
1253}
1254
1255static void spapr_tce_release_ownership(struct iommu_table_group *table_group)
1256{
1257	int i;
1258
1259	for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
1260		struct iommu_table *tbl = table_group->tables[i];
1261
1262		if (!tbl)
1263			continue;
1264
1265		iommu_table_clear(tbl);
1266		if (tbl->it_map)
1267			iommu_release_ownership(tbl);
1268	}
1269}
1270
1271struct iommu_table_group_ops spapr_tce_table_group_ops = {
1272	.get_table_size = spapr_tce_get_table_size,
1273	.create_table = spapr_tce_create_table,
1274	.set_window = spapr_tce_set_window,
1275	.unset_window = spapr_tce_unset_window,
1276	.take_ownership = spapr_tce_take_ownership,
1277	.release_ownership = spapr_tce_release_ownership,
1278};
1279
1280/*
1281 * A simple iommu_ops to allow less cruft in generic VFIO code.
1282 */
1283static int
1284spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
1285				    struct device *dev)
1286{
1287	struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
1288	struct iommu_table_group *table_group;
1289	struct iommu_group *grp;
1290
1291	/* At first attach the ownership is already set */
1292	if (!domain)
1293		return 0;
1294
1295	grp = iommu_group_get(dev);
1296	table_group = iommu_group_get_iommudata(grp);
1297	/*
1298	 * The domain being set to PLATFORM from earlier
1299	 * BLOCKED. The table_group ownership has to be released.
 
1300	 */
1301	table_group->ops->release_ownership(table_group);
1302	iommu_group_put(grp);
1303
1304	return 0;
1305}
1306
1307static const struct iommu_domain_ops spapr_tce_platform_domain_ops = {
1308	.attach_dev = spapr_tce_platform_iommu_attach_dev,
1309};
1310
1311static struct iommu_domain spapr_tce_platform_domain = {
1312	.type = IOMMU_DOMAIN_PLATFORM,
1313	.ops = &spapr_tce_platform_domain_ops,
1314};
1315
1316static int
1317spapr_tce_blocked_iommu_attach_dev(struct iommu_domain *platform_domain,
1318				     struct device *dev)
1319{
1320	struct iommu_group *grp = iommu_group_get(dev);
1321	struct iommu_table_group *table_group;
1322	int ret = -EINVAL;
1323
1324	/*
1325	 * FIXME: SPAPR mixes blocked and platform behaviors, the blocked domain
1326	 * also sets the dma_api ops
1327	 */
1328	table_group = iommu_group_get_iommudata(grp);
1329	ret = table_group->ops->take_ownership(table_group);
1330	iommu_group_put(grp);
1331
1332	return ret;
1333}
1334
1335static const struct iommu_domain_ops spapr_tce_blocked_domain_ops = {
1336	.attach_dev = spapr_tce_blocked_iommu_attach_dev,
1337};
1338
1339static struct iommu_domain spapr_tce_blocked_domain = {
1340	.type = IOMMU_DOMAIN_BLOCKED,
1341	.ops = &spapr_tce_blocked_domain_ops,
1342};
1343
1344static bool spapr_tce_iommu_capable(struct device *dev, enum iommu_cap cap)
1345{
1346	switch (cap) {
1347	case IOMMU_CAP_CACHE_COHERENCY:
1348		return true;
1349	default:
1350		break;
1351	}
1352
1353	return false;
1354}
1355
1356static struct iommu_device *spapr_tce_iommu_probe_device(struct device *dev)
1357{
1358	struct pci_dev *pdev;
1359	struct pci_controller *hose;
1360
1361	if (!dev_is_pci(dev))
1362		return ERR_PTR(-ENODEV);
1363
1364	pdev = to_pci_dev(dev);
1365	hose = pdev->bus->sysdata;
1366
1367	return &hose->iommu;
1368}
1369
1370static void spapr_tce_iommu_release_device(struct device *dev)
1371{
1372}
1373
1374static struct iommu_group *spapr_tce_iommu_device_group(struct device *dev)
1375{
1376	struct pci_controller *hose;
1377	struct pci_dev *pdev;
1378
1379	pdev = to_pci_dev(dev);
1380	hose = pdev->bus->sysdata;
1381
1382	if (!hose->controller_ops.device_group)
1383		return ERR_PTR(-ENOENT);
1384
1385	return hose->controller_ops.device_group(hose, pdev);
1386}
1387
1388static const struct iommu_ops spapr_tce_iommu_ops = {
1389	.default_domain = &spapr_tce_platform_domain,
1390	.blocked_domain = &spapr_tce_blocked_domain,
1391	.capable = spapr_tce_iommu_capable,
1392	.probe_device = spapr_tce_iommu_probe_device,
1393	.release_device = spapr_tce_iommu_release_device,
1394	.device_group = spapr_tce_iommu_device_group,
1395};
1396
1397static struct attribute *spapr_tce_iommu_attrs[] = {
1398	NULL,
1399};
1400
1401static struct attribute_group spapr_tce_iommu_group = {
1402	.name = "spapr-tce-iommu",
1403	.attrs = spapr_tce_iommu_attrs,
1404};
1405
1406static const struct attribute_group *spapr_tce_iommu_groups[] = {
1407	&spapr_tce_iommu_group,
1408	NULL,
1409};
1410
1411void ppc_iommu_register_device(struct pci_controller *phb)
1412{
1413	iommu_device_sysfs_add(&phb->iommu, phb->parent,
1414				spapr_tce_iommu_groups, "iommu-phb%04x",
1415				phb->global_number);
1416	iommu_device_register(&phb->iommu, &spapr_tce_iommu_ops,
1417				phb->parent);
1418}
1419
1420void ppc_iommu_unregister_device(struct pci_controller *phb)
1421{
1422	iommu_device_unregister(&phb->iommu);
1423	iommu_device_sysfs_remove(&phb->iommu);
1424}
1425
1426/*
1427 * This registers IOMMU devices of PHBs. This needs to happen
1428 * after core_initcall(iommu_init) + postcore_initcall(pci_driver_init) and
1429 * before subsys_initcall(iommu_subsys_init).
1430 */
1431static int __init spapr_tce_setup_phb_iommus_initcall(void)
1432{
1433	struct pci_controller *hose;
1434
1435	list_for_each_entry(hose, &hose_list, list_node) {
1436		ppc_iommu_register_device(hose);
1437	}
1438	return 0;
1439}
1440postcore_initcall_sync(spapr_tce_setup_phb_iommus_initcall);
1441#endif
1442
1443#endif /* CONFIG_IOMMU_API */