Linux Audio

Check our new training course

Loading...
v3.5.6
  1/* iommu.c: Generic sparc64 IOMMU support.
  2 *
  3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
  4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
  5 */
  6
  7#include <linux/kernel.h>
  8#include <linux/export.h>
  9#include <linux/slab.h>
 10#include <linux/delay.h>
 11#include <linux/device.h>
 12#include <linux/dma-mapping.h>
 13#include <linux/errno.h>
 14#include <linux/iommu-helper.h>
 15#include <linux/bitmap.h>
 
 16
 17#ifdef CONFIG_PCI
 18#include <linux/pci.h>
 19#endif
 20
 21#include <asm/iommu.h>
 22
 23#include "iommu_common.h"
 
 24
 25#define STC_CTXMATCH_ADDR(STC, CTX)	\
 26	((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
 27#define STC_FLUSHFLAG_INIT(STC) \
 28	(*((STC)->strbuf_flushflag) = 0UL)
 29#define STC_FLUSHFLAG_SET(STC) \
 30	(*((STC)->strbuf_flushflag) != 0UL)
 31
 32#define iommu_read(__reg) \
 33({	u64 __ret; \
 34	__asm__ __volatile__("ldxa [%1] %2, %0" \
 35			     : "=r" (__ret) \
 36			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
 37			     : "memory"); \
 38	__ret; \
 39})
 40#define iommu_write(__reg, __val) \
 41	__asm__ __volatile__("stxa %0, [%1] %2" \
 42			     : /* no outputs */ \
 43			     : "r" (__val), "r" (__reg), \
 44			       "i" (ASI_PHYS_BYPASS_EC_E))
 45
 46/* Must be invoked under the IOMMU lock. */
 47static void iommu_flushall(struct iommu *iommu)
 48{
 
 49	if (iommu->iommu_flushinv) {
 50		iommu_write(iommu->iommu_flushinv, ~(u64)0);
 51	} else {
 52		unsigned long tag;
 53		int entry;
 54
 55		tag = iommu->iommu_tags;
 56		for (entry = 0; entry < 16; entry++) {
 57			iommu_write(tag, 0);
 58			tag += 8;
 59		}
 60
 61		/* Ensure completion of previous PIO writes. */
 62		(void) iommu_read(iommu->write_complete_reg);
 63	}
 64}
 65
 66#define IOPTE_CONSISTENT(CTX) \
 67	(IOPTE_VALID | IOPTE_CACHE | \
 68	 (((CTX) << 47) & IOPTE_CONTEXT))
 69
 70#define IOPTE_STREAMING(CTX) \
 71	(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
 72
 73/* Existing mappings are never marked invalid, instead they
 74 * are pointed to a dummy page.
 75 */
 76#define IOPTE_IS_DUMMY(iommu, iopte)	\
 77	((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
 78
 79static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
 80{
 81	unsigned long val = iopte_val(*iopte);
 82
 83	val &= ~IOPTE_PAGE;
 84	val |= iommu->dummy_page_pa;
 85
 86	iopte_val(*iopte) = val;
 87}
 88
 89/* Based almost entirely upon the ppc64 iommu allocator.  If you use the 'handle'
 90 * facility it must all be done in one pass while under the iommu lock.
 91 *
 92 * On sun4u platforms, we only flush the IOMMU once every time we've passed
 93 * over the entire page table doing allocations.  Therefore we only ever advance
 94 * the hint and cannot backtrack it.
 95 */
 96unsigned long iommu_range_alloc(struct device *dev,
 97				struct iommu *iommu,
 98				unsigned long npages,
 99				unsigned long *handle)
100{
101	unsigned long n, end, start, limit, boundary_size;
102	struct iommu_arena *arena = &iommu->arena;
103	int pass = 0;
104
105	/* This allocator was derived from x86_64's bit string search */
106
107	/* Sanity check */
108	if (unlikely(npages == 0)) {
109		if (printk_ratelimit())
110			WARN_ON(1);
111		return DMA_ERROR_CODE;
112	}
113
114	if (handle && *handle)
115		start = *handle;
116	else
117		start = arena->hint;
118
119	limit = arena->limit;
120
121	/* The case below can happen if we have a small segment appended
122	 * to a large, or when the previous alloc was at the very end of
123	 * the available space. If so, go back to the beginning and flush.
124	 */
125	if (start >= limit) {
126		start = 0;
127		if (iommu->flush_all)
128			iommu->flush_all(iommu);
129	}
130
131 again:
132
133	if (dev)
134		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
135				      1 << IO_PAGE_SHIFT);
136	else
137		boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
138
139	n = iommu_area_alloc(arena->map, limit, start, npages,
140			     iommu->page_table_map_base >> IO_PAGE_SHIFT,
141			     boundary_size >> IO_PAGE_SHIFT, 0);
142	if (n == -1) {
143		if (likely(pass < 1)) {
144			/* First failure, rescan from the beginning.  */
145			start = 0;
146			if (iommu->flush_all)
147				iommu->flush_all(iommu);
148			pass++;
149			goto again;
150		} else {
151			/* Second failure, give up */
152			return DMA_ERROR_CODE;
153		}
154	}
155
156	end = n + npages;
157
158	arena->hint = end;
159
160	/* Update handle for SG allocations */
161	if (handle)
162		*handle = end;
163
164	return n;
165}
166
167void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
168{
169	struct iommu_arena *arena = &iommu->arena;
170	unsigned long entry;
171
172	entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
173
174	bitmap_clear(arena->map, entry, npages);
175}
176
177int iommu_table_init(struct iommu *iommu, int tsbsize,
178		     u32 dma_offset, u32 dma_addr_mask,
179		     int numa_node)
180{
181	unsigned long i, order, sz, num_tsb_entries;
182	struct page *page;
183
184	num_tsb_entries = tsbsize / sizeof(iopte_t);
185
186	/* Setup initial software IOMMU state. */
187	spin_lock_init(&iommu->lock);
188	iommu->ctx_lowest_free = 1;
189	iommu->page_table_map_base = dma_offset;
190	iommu->dma_addr_mask = dma_addr_mask;
191
192	/* Allocate and initialize the free area map.  */
193	sz = num_tsb_entries / 8;
194	sz = (sz + 7UL) & ~7UL;
195	iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
196	if (!iommu->arena.map) {
197		printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
198		return -ENOMEM;
199	}
200	memset(iommu->arena.map, 0, sz);
201	iommu->arena.limit = num_tsb_entries;
202
203	if (tlb_type != hypervisor)
204		iommu->flush_all = iommu_flushall;
 
205
206	/* Allocate and initialize the dummy page which we
207	 * set inactive IO PTEs to point to.
208	 */
209	page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
210	if (!page) {
211		printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
212		goto out_free_map;
213	}
214	iommu->dummy_page = (unsigned long) page_address(page);
215	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
216	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
217
218	/* Now allocate and setup the IOMMU page table itself.  */
219	order = get_order(tsbsize);
220	page = alloc_pages_node(numa_node, GFP_KERNEL, order);
221	if (!page) {
222		printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
223		goto out_free_dummy_page;
224	}
225	iommu->page_table = (iopte_t *)page_address(page);
226
227	for (i = 0; i < num_tsb_entries; i++)
228		iopte_make_dummy(iommu, &iommu->page_table[i]);
229
230	return 0;
231
232out_free_dummy_page:
233	free_page(iommu->dummy_page);
234	iommu->dummy_page = 0UL;
235
236out_free_map:
237	kfree(iommu->arena.map);
238	iommu->arena.map = NULL;
239
240	return -ENOMEM;
241}
242
243static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
 
244				    unsigned long npages)
245{
246	unsigned long entry;
247
248	entry = iommu_range_alloc(dev, iommu, npages, NULL);
249	if (unlikely(entry == DMA_ERROR_CODE))
 
250		return NULL;
251
252	return iommu->page_table + entry;
253}
254
255static int iommu_alloc_ctx(struct iommu *iommu)
256{
257	int lowest = iommu->ctx_lowest_free;
258	int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
259
260	if (unlikely(n == IOMMU_NUM_CTXS)) {
261		n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
262		if (unlikely(n == lowest)) {
263			printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
264			n = 0;
265		}
266	}
267	if (n)
268		__set_bit(n, iommu->ctx_bitmap);
269
270	return n;
271}
272
273static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
274{
275	if (likely(ctx)) {
276		__clear_bit(ctx, iommu->ctx_bitmap);
277		if (ctx < iommu->ctx_lowest_free)
278			iommu->ctx_lowest_free = ctx;
279	}
280}
281
282static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
283				   dma_addr_t *dma_addrp, gfp_t gfp,
284				   struct dma_attrs *attrs)
285{
286	unsigned long flags, order, first_page;
287	struct iommu *iommu;
288	struct page *page;
289	int npages, nid;
290	iopte_t *iopte;
291	void *ret;
292
293	size = IO_PAGE_ALIGN(size);
294	order = get_order(size);
295	if (order >= 10)
296		return NULL;
297
298	nid = dev->archdata.numa_node;
299	page = alloc_pages_node(nid, gfp, order);
300	if (unlikely(!page))
301		return NULL;
302
303	first_page = (unsigned long) page_address(page);
304	memset((char *)first_page, 0, PAGE_SIZE << order);
305
306	iommu = dev->archdata.iommu;
307
308	spin_lock_irqsave(&iommu->lock, flags);
309	iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
310	spin_unlock_irqrestore(&iommu->lock, flags);
311
312	if (unlikely(iopte == NULL)) {
313		free_pages(first_page, order);
314		return NULL;
315	}
316
317	*dma_addrp = (iommu->page_table_map_base +
318		      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
319	ret = (void *) first_page;
320	npages = size >> IO_PAGE_SHIFT;
321	first_page = __pa(first_page);
322	while (npages--) {
323		iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
324				     IOPTE_WRITE |
325				     (first_page & IOPTE_PAGE));
326		iopte++;
327		first_page += IO_PAGE_SIZE;
328	}
329
330	return ret;
331}
332
333static void dma_4u_free_coherent(struct device *dev, size_t size,
334				 void *cpu, dma_addr_t dvma,
335				 struct dma_attrs *attrs)
336{
337	struct iommu *iommu;
338	unsigned long flags, order, npages;
339
340	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
341	iommu = dev->archdata.iommu;
342
343	spin_lock_irqsave(&iommu->lock, flags);
344
345	iommu_range_free(iommu, dvma, npages);
346
347	spin_unlock_irqrestore(&iommu->lock, flags);
348
349	order = get_order(size);
350	if (order < 10)
351		free_pages((unsigned long)cpu, order);
352}
353
354static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
355				  unsigned long offset, size_t sz,
356				  enum dma_data_direction direction,
357				  struct dma_attrs *attrs)
358{
359	struct iommu *iommu;
360	struct strbuf *strbuf;
361	iopte_t *base;
362	unsigned long flags, npages, oaddr;
363	unsigned long i, base_paddr, ctx;
364	u32 bus_addr, ret;
365	unsigned long iopte_protection;
366
367	iommu = dev->archdata.iommu;
368	strbuf = dev->archdata.stc;
369
370	if (unlikely(direction == DMA_NONE))
371		goto bad_no_ctx;
372
373	oaddr = (unsigned long)(page_address(page) + offset);
374	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
375	npages >>= IO_PAGE_SHIFT;
376
377	spin_lock_irqsave(&iommu->lock, flags);
378	base = alloc_npages(dev, iommu, npages);
 
379	ctx = 0;
380	if (iommu->iommu_ctxflush)
381		ctx = iommu_alloc_ctx(iommu);
382	spin_unlock_irqrestore(&iommu->lock, flags);
383
384	if (unlikely(!base))
385		goto bad;
386
387	bus_addr = (iommu->page_table_map_base +
388		    ((base - iommu->page_table) << IO_PAGE_SHIFT));
389	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
390	base_paddr = __pa(oaddr & IO_PAGE_MASK);
391	if (strbuf->strbuf_enabled)
392		iopte_protection = IOPTE_STREAMING(ctx);
393	else
394		iopte_protection = IOPTE_CONSISTENT(ctx);
395	if (direction != DMA_TO_DEVICE)
396		iopte_protection |= IOPTE_WRITE;
397
398	for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
399		iopte_val(*base) = iopte_protection | base_paddr;
400
401	return ret;
402
403bad:
404	iommu_free_ctx(iommu, ctx);
405bad_no_ctx:
406	if (printk_ratelimit())
407		WARN_ON(1);
408	return DMA_ERROR_CODE;
409}
410
411static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
412			 u32 vaddr, unsigned long ctx, unsigned long npages,
413			 enum dma_data_direction direction)
414{
415	int limit;
416
417	if (strbuf->strbuf_ctxflush &&
418	    iommu->iommu_ctxflush) {
419		unsigned long matchreg, flushreg;
420		u64 val;
421
422		flushreg = strbuf->strbuf_ctxflush;
423		matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
424
425		iommu_write(flushreg, ctx);
426		val = iommu_read(matchreg);
427		val &= 0xffff;
428		if (!val)
429			goto do_flush_sync;
430
431		while (val) {
432			if (val & 0x1)
433				iommu_write(flushreg, ctx);
434			val >>= 1;
435		}
436		val = iommu_read(matchreg);
437		if (unlikely(val)) {
438			printk(KERN_WARNING "strbuf_flush: ctx flush "
439			       "timeout matchreg[%llx] ctx[%lx]\n",
440			       val, ctx);
441			goto do_page_flush;
442		}
443	} else {
444		unsigned long i;
445
446	do_page_flush:
447		for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
448			iommu_write(strbuf->strbuf_pflush, vaddr);
449	}
450
451do_flush_sync:
452	/* If the device could not have possibly put dirty data into
453	 * the streaming cache, no flush-flag synchronization needs
454	 * to be performed.
455	 */
456	if (direction == DMA_TO_DEVICE)
457		return;
458
459	STC_FLUSHFLAG_INIT(strbuf);
460	iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
461	(void) iommu_read(iommu->write_complete_reg);
462
463	limit = 100000;
464	while (!STC_FLUSHFLAG_SET(strbuf)) {
465		limit--;
466		if (!limit)
467			break;
468		udelay(1);
469		rmb();
470	}
471	if (!limit)
472		printk(KERN_WARNING "strbuf_flush: flushflag timeout "
473		       "vaddr[%08x] ctx[%lx] npages[%ld]\n",
474		       vaddr, ctx, npages);
475}
476
477static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
478			      size_t sz, enum dma_data_direction direction,
479			      struct dma_attrs *attrs)
480{
481	struct iommu *iommu;
482	struct strbuf *strbuf;
483	iopte_t *base;
484	unsigned long flags, npages, ctx, i;
485
486	if (unlikely(direction == DMA_NONE)) {
487		if (printk_ratelimit())
488			WARN_ON(1);
489		return;
490	}
491
492	iommu = dev->archdata.iommu;
493	strbuf = dev->archdata.stc;
494
495	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
496	npages >>= IO_PAGE_SHIFT;
497	base = iommu->page_table +
498		((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
499	bus_addr &= IO_PAGE_MASK;
500
501	spin_lock_irqsave(&iommu->lock, flags);
502
503	/* Record the context, if any. */
504	ctx = 0;
505	if (iommu->iommu_ctxflush)
506		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
507
508	/* Step 1: Kick data out of streaming buffers if necessary. */
509	if (strbuf->strbuf_enabled)
510		strbuf_flush(strbuf, iommu, bus_addr, ctx,
511			     npages, direction);
512
513	/* Step 2: Clear out TSB entries. */
514	for (i = 0; i < npages; i++)
515		iopte_make_dummy(iommu, base + i);
516
517	iommu_range_free(iommu, bus_addr, npages);
518
519	iommu_free_ctx(iommu, ctx);
520
521	spin_unlock_irqrestore(&iommu->lock, flags);
 
 
522}
523
524static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
525			 int nelems, enum dma_data_direction direction,
526			 struct dma_attrs *attrs)
527{
528	struct scatterlist *s, *outs, *segstart;
529	unsigned long flags, handle, prot, ctx;
530	dma_addr_t dma_next = 0, dma_addr;
531	unsigned int max_seg_size;
532	unsigned long seg_boundary_size;
533	int outcount, incount, i;
534	struct strbuf *strbuf;
535	struct iommu *iommu;
536	unsigned long base_shift;
537
538	BUG_ON(direction == DMA_NONE);
539
540	iommu = dev->archdata.iommu;
541	strbuf = dev->archdata.stc;
542	if (nelems == 0 || !iommu)
543		return 0;
544
545	spin_lock_irqsave(&iommu->lock, flags);
546
547	ctx = 0;
548	if (iommu->iommu_ctxflush)
549		ctx = iommu_alloc_ctx(iommu);
550
551	if (strbuf->strbuf_enabled)
552		prot = IOPTE_STREAMING(ctx);
553	else
554		prot = IOPTE_CONSISTENT(ctx);
555	if (direction != DMA_TO_DEVICE)
556		prot |= IOPTE_WRITE;
557
558	outs = s = segstart = &sglist[0];
559	outcount = 1;
560	incount = nelems;
561	handle = 0;
562
563	/* Init first segment length for backout at failure */
564	outs->dma_length = 0;
565
566	max_seg_size = dma_get_max_seg_size(dev);
567	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
568				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
569	base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
570	for_each_sg(sglist, s, nelems, i) {
571		unsigned long paddr, npages, entry, out_entry = 0, slen;
572		iopte_t *base;
573
574		slen = s->length;
575		/* Sanity check */
576		if (slen == 0) {
577			dma_next = 0;
578			continue;
579		}
580		/* Allocate iommu entries for that segment */
581		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
582		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
583		entry = iommu_range_alloc(dev, iommu, npages, &handle);
 
584
585		/* Handle failure */
586		if (unlikely(entry == DMA_ERROR_CODE)) {
587			if (printk_ratelimit())
588				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
589				       " npages %lx\n", iommu, paddr, npages);
590			goto iommu_map_failed;
591		}
592
593		base = iommu->page_table + entry;
594
595		/* Convert entry to a dma_addr_t */
596		dma_addr = iommu->page_table_map_base +
597			(entry << IO_PAGE_SHIFT);
598		dma_addr |= (s->offset & ~IO_PAGE_MASK);
599
600		/* Insert into HW table */
601		paddr &= IO_PAGE_MASK;
602		while (npages--) {
603			iopte_val(*base) = prot | paddr;
604			base++;
605			paddr += IO_PAGE_SIZE;
606		}
607
608		/* If we are in an open segment, try merging */
609		if (segstart != s) {
610			/* We cannot merge if:
611			 * - allocated dma_addr isn't contiguous to previous allocation
612			 */
613			if ((dma_addr != dma_next) ||
614			    (outs->dma_length + s->length > max_seg_size) ||
615			    (is_span_boundary(out_entry, base_shift,
616					      seg_boundary_size, outs, s))) {
617				/* Can't merge: create a new segment */
618				segstart = s;
619				outcount++;
620				outs = sg_next(outs);
621			} else {
622				outs->dma_length += s->length;
623			}
624		}
625
626		if (segstart == s) {
627			/* This is a new segment, fill entries */
628			outs->dma_address = dma_addr;
629			outs->dma_length = slen;
630			out_entry = entry;
631		}
632
633		/* Calculate next page pointer for contiguous check */
634		dma_next = dma_addr + slen;
635	}
636
637	spin_unlock_irqrestore(&iommu->lock, flags);
638
639	if (outcount < incount) {
640		outs = sg_next(outs);
641		outs->dma_address = DMA_ERROR_CODE;
642		outs->dma_length = 0;
643	}
644
645	return outcount;
646
647iommu_map_failed:
648	for_each_sg(sglist, s, nelems, i) {
649		if (s->dma_length != 0) {
650			unsigned long vaddr, npages, entry, j;
651			iopte_t *base;
652
653			vaddr = s->dma_address & IO_PAGE_MASK;
654			npages = iommu_num_pages(s->dma_address, s->dma_length,
655						 IO_PAGE_SIZE);
656			iommu_range_free(iommu, vaddr, npages);
657
658			entry = (vaddr - iommu->page_table_map_base)
659				>> IO_PAGE_SHIFT;
660			base = iommu->page_table + entry;
661
662			for (j = 0; j < npages; j++)
663				iopte_make_dummy(iommu, base + j);
664
 
 
 
665			s->dma_address = DMA_ERROR_CODE;
666			s->dma_length = 0;
667		}
668		if (s == outs)
669			break;
670	}
671	spin_unlock_irqrestore(&iommu->lock, flags);
672
673	return 0;
674}
675
676/* If contexts are being used, they are the same in all of the mappings
677 * we make for a particular SG.
678 */
679static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
680{
681	unsigned long ctx = 0;
682
683	if (iommu->iommu_ctxflush) {
684		iopte_t *base;
685		u32 bus_addr;
 
686
687		bus_addr = sg->dma_address & IO_PAGE_MASK;
688		base = iommu->page_table +
689			((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
690
691		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
692	}
693	return ctx;
694}
695
696static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
697			    int nelems, enum dma_data_direction direction,
698			    struct dma_attrs *attrs)
699{
700	unsigned long flags, ctx;
701	struct scatterlist *sg;
702	struct strbuf *strbuf;
703	struct iommu *iommu;
704
705	BUG_ON(direction == DMA_NONE);
706
707	iommu = dev->archdata.iommu;
708	strbuf = dev->archdata.stc;
709
710	ctx = fetch_sg_ctx(iommu, sglist);
711
712	spin_lock_irqsave(&iommu->lock, flags);
713
714	sg = sglist;
715	while (nelems--) {
716		dma_addr_t dma_handle = sg->dma_address;
717		unsigned int len = sg->dma_length;
718		unsigned long npages, entry;
719		iopte_t *base;
720		int i;
721
722		if (!len)
723			break;
724		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
725		iommu_range_free(iommu, dma_handle, npages);
726
727		entry = ((dma_handle - iommu->page_table_map_base)
728			 >> IO_PAGE_SHIFT);
729		base = iommu->page_table + entry;
730
731		dma_handle &= IO_PAGE_MASK;
732		if (strbuf->strbuf_enabled)
733			strbuf_flush(strbuf, iommu, dma_handle, ctx,
734				     npages, direction);
735
736		for (i = 0; i < npages; i++)
737			iopte_make_dummy(iommu, base + i);
738
 
 
739		sg = sg_next(sg);
740	}
741
742	iommu_free_ctx(iommu, ctx);
743
744	spin_unlock_irqrestore(&iommu->lock, flags);
745}
746
747static void dma_4u_sync_single_for_cpu(struct device *dev,
748				       dma_addr_t bus_addr, size_t sz,
749				       enum dma_data_direction direction)
750{
751	struct iommu *iommu;
752	struct strbuf *strbuf;
753	unsigned long flags, ctx, npages;
754
755	iommu = dev->archdata.iommu;
756	strbuf = dev->archdata.stc;
757
758	if (!strbuf->strbuf_enabled)
759		return;
760
761	spin_lock_irqsave(&iommu->lock, flags);
762
763	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
764	npages >>= IO_PAGE_SHIFT;
765	bus_addr &= IO_PAGE_MASK;
766
767	/* Step 1: Record the context, if any. */
768	ctx = 0;
769	if (iommu->iommu_ctxflush &&
770	    strbuf->strbuf_ctxflush) {
771		iopte_t *iopte;
 
772
773		iopte = iommu->page_table +
774			((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
775		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
776	}
777
778	/* Step 2: Kick data out of streaming buffers. */
779	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
780
781	spin_unlock_irqrestore(&iommu->lock, flags);
782}
783
784static void dma_4u_sync_sg_for_cpu(struct device *dev,
785				   struct scatterlist *sglist, int nelems,
786				   enum dma_data_direction direction)
787{
788	struct iommu *iommu;
789	struct strbuf *strbuf;
790	unsigned long flags, ctx, npages, i;
791	struct scatterlist *sg, *sgprv;
792	u32 bus_addr;
793
794	iommu = dev->archdata.iommu;
795	strbuf = dev->archdata.stc;
796
797	if (!strbuf->strbuf_enabled)
798		return;
799
800	spin_lock_irqsave(&iommu->lock, flags);
801
802	/* Step 1: Record the context, if any. */
803	ctx = 0;
804	if (iommu->iommu_ctxflush &&
805	    strbuf->strbuf_ctxflush) {
806		iopte_t *iopte;
 
807
808		iopte = iommu->page_table +
809			((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
810		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
811	}
812
813	/* Step 2: Kick data out of streaming buffers. */
814	bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
815	sgprv = NULL;
816	for_each_sg(sglist, sg, nelems, i) {
817		if (sg->dma_length == 0)
818			break;
819		sgprv = sg;
820	}
821
822	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
823		  - bus_addr) >> IO_PAGE_SHIFT;
824	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
825
826	spin_unlock_irqrestore(&iommu->lock, flags);
827}
828
829static struct dma_map_ops sun4u_dma_ops = {
830	.alloc			= dma_4u_alloc_coherent,
831	.free			= dma_4u_free_coherent,
832	.map_page		= dma_4u_map_page,
833	.unmap_page		= dma_4u_unmap_page,
834	.map_sg			= dma_4u_map_sg,
835	.unmap_sg		= dma_4u_unmap_sg,
836	.sync_single_for_cpu	= dma_4u_sync_single_for_cpu,
837	.sync_sg_for_cpu	= dma_4u_sync_sg_for_cpu,
838};
839
840struct dma_map_ops *dma_ops = &sun4u_dma_ops;
841EXPORT_SYMBOL(dma_ops);
842
843extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
844
845int dma_supported(struct device *dev, u64 device_mask)
846{
847	struct iommu *iommu = dev->archdata.iommu;
848	u64 dma_addr_mask = iommu->dma_addr_mask;
849
850	if (device_mask >= (1UL << 32UL))
851		return 0;
852
853	if ((device_mask & dma_addr_mask) == dma_addr_mask)
854		return 1;
855
856#ifdef CONFIG_PCI
857	if (dev->bus == &pci_bus_type)
858		return pci64_dma_supported(to_pci_dev(dev), device_mask);
859#endif
860
861	return 0;
862}
863EXPORT_SYMBOL(dma_supported);
v4.6
  1/* iommu.c: Generic sparc64 IOMMU support.
  2 *
  3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
  4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
  5 */
  6
  7#include <linux/kernel.h>
  8#include <linux/export.h>
  9#include <linux/slab.h>
 10#include <linux/delay.h>
 11#include <linux/device.h>
 12#include <linux/dma-mapping.h>
 13#include <linux/errno.h>
 14#include <linux/iommu-helper.h>
 15#include <linux/bitmap.h>
 16#include <linux/iommu-common.h>
 17
 18#ifdef CONFIG_PCI
 19#include <linux/pci.h>
 20#endif
 21
 22#include <asm/iommu.h>
 23
 24#include "iommu_common.h"
 25#include "kernel.h"
 26
 27#define STC_CTXMATCH_ADDR(STC, CTX)	\
 28	((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
 29#define STC_FLUSHFLAG_INIT(STC) \
 30	(*((STC)->strbuf_flushflag) = 0UL)
 31#define STC_FLUSHFLAG_SET(STC) \
 32	(*((STC)->strbuf_flushflag) != 0UL)
 33
 34#define iommu_read(__reg) \
 35({	u64 __ret; \
 36	__asm__ __volatile__("ldxa [%1] %2, %0" \
 37			     : "=r" (__ret) \
 38			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
 39			     : "memory"); \
 40	__ret; \
 41})
 42#define iommu_write(__reg, __val) \
 43	__asm__ __volatile__("stxa %0, [%1] %2" \
 44			     : /* no outputs */ \
 45			     : "r" (__val), "r" (__reg), \
 46			       "i" (ASI_PHYS_BYPASS_EC_E))
 47
 48/* Must be invoked under the IOMMU lock. */
 49static void iommu_flushall(struct iommu_map_table *iommu_map_table)
 50{
 51	struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
 52	if (iommu->iommu_flushinv) {
 53		iommu_write(iommu->iommu_flushinv, ~(u64)0);
 54	} else {
 55		unsigned long tag;
 56		int entry;
 57
 58		tag = iommu->iommu_tags;
 59		for (entry = 0; entry < 16; entry++) {
 60			iommu_write(tag, 0);
 61			tag += 8;
 62		}
 63
 64		/* Ensure completion of previous PIO writes. */
 65		(void) iommu_read(iommu->write_complete_reg);
 66	}
 67}
 68
 69#define IOPTE_CONSISTENT(CTX) \
 70	(IOPTE_VALID | IOPTE_CACHE | \
 71	 (((CTX) << 47) & IOPTE_CONTEXT))
 72
 73#define IOPTE_STREAMING(CTX) \
 74	(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
 75
 76/* Existing mappings are never marked invalid, instead they
 77 * are pointed to a dummy page.
 78 */
 79#define IOPTE_IS_DUMMY(iommu, iopte)	\
 80	((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
 81
 82static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
 83{
 84	unsigned long val = iopte_val(*iopte);
 85
 86	val &= ~IOPTE_PAGE;
 87	val |= iommu->dummy_page_pa;
 88
 89	iopte_val(*iopte) = val;
 90}
 91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 92int iommu_table_init(struct iommu *iommu, int tsbsize,
 93		     u32 dma_offset, u32 dma_addr_mask,
 94		     int numa_node)
 95{
 96	unsigned long i, order, sz, num_tsb_entries;
 97	struct page *page;
 98
 99	num_tsb_entries = tsbsize / sizeof(iopte_t);
100
101	/* Setup initial software IOMMU state. */
102	spin_lock_init(&iommu->lock);
103	iommu->ctx_lowest_free = 1;
104	iommu->tbl.table_map_base = dma_offset;
105	iommu->dma_addr_mask = dma_addr_mask;
106
107	/* Allocate and initialize the free area map.  */
108	sz = num_tsb_entries / 8;
109	sz = (sz + 7UL) & ~7UL;
110	iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
111	if (!iommu->tbl.map)
 
112		return -ENOMEM;
113	memset(iommu->tbl.map, 0, sz);
 
 
114
115	iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
116			    (tlb_type != hypervisor ? iommu_flushall : NULL),
117			    false, 1, false);
118
119	/* Allocate and initialize the dummy page which we
120	 * set inactive IO PTEs to point to.
121	 */
122	page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
123	if (!page) {
124		printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
125		goto out_free_map;
126	}
127	iommu->dummy_page = (unsigned long) page_address(page);
128	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
129	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
130
131	/* Now allocate and setup the IOMMU page table itself.  */
132	order = get_order(tsbsize);
133	page = alloc_pages_node(numa_node, GFP_KERNEL, order);
134	if (!page) {
135		printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
136		goto out_free_dummy_page;
137	}
138	iommu->page_table = (iopte_t *)page_address(page);
139
140	for (i = 0; i < num_tsb_entries; i++)
141		iopte_make_dummy(iommu, &iommu->page_table[i]);
142
143	return 0;
144
145out_free_dummy_page:
146	free_page(iommu->dummy_page);
147	iommu->dummy_page = 0UL;
148
149out_free_map:
150	kfree(iommu->tbl.map);
151	iommu->tbl.map = NULL;
152
153	return -ENOMEM;
154}
155
156static inline iopte_t *alloc_npages(struct device *dev,
157				    struct iommu *iommu,
158				    unsigned long npages)
159{
160	unsigned long entry;
161
162	entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
163				      (unsigned long)(-1), 0);
164	if (unlikely(entry == IOMMU_ERROR_CODE))
165		return NULL;
166
167	return iommu->page_table + entry;
168}
169
170static int iommu_alloc_ctx(struct iommu *iommu)
171{
172	int lowest = iommu->ctx_lowest_free;
173	int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
174
175	if (unlikely(n == IOMMU_NUM_CTXS)) {
176		n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
177		if (unlikely(n == lowest)) {
178			printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
179			n = 0;
180		}
181	}
182	if (n)
183		__set_bit(n, iommu->ctx_bitmap);
184
185	return n;
186}
187
188static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
189{
190	if (likely(ctx)) {
191		__clear_bit(ctx, iommu->ctx_bitmap);
192		if (ctx < iommu->ctx_lowest_free)
193			iommu->ctx_lowest_free = ctx;
194	}
195}
196
197static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
198				   dma_addr_t *dma_addrp, gfp_t gfp,
199				   struct dma_attrs *attrs)
200{
201	unsigned long order, first_page;
202	struct iommu *iommu;
203	struct page *page;
204	int npages, nid;
205	iopte_t *iopte;
206	void *ret;
207
208	size = IO_PAGE_ALIGN(size);
209	order = get_order(size);
210	if (order >= 10)
211		return NULL;
212
213	nid = dev->archdata.numa_node;
214	page = alloc_pages_node(nid, gfp, order);
215	if (unlikely(!page))
216		return NULL;
217
218	first_page = (unsigned long) page_address(page);
219	memset((char *)first_page, 0, PAGE_SIZE << order);
220
221	iommu = dev->archdata.iommu;
222
 
223	iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
 
224
225	if (unlikely(iopte == NULL)) {
226		free_pages(first_page, order);
227		return NULL;
228	}
229
230	*dma_addrp = (iommu->tbl.table_map_base +
231		      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
232	ret = (void *) first_page;
233	npages = size >> IO_PAGE_SHIFT;
234	first_page = __pa(first_page);
235	while (npages--) {
236		iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
237				     IOPTE_WRITE |
238				     (first_page & IOPTE_PAGE));
239		iopte++;
240		first_page += IO_PAGE_SIZE;
241	}
242
243	return ret;
244}
245
246static void dma_4u_free_coherent(struct device *dev, size_t size,
247				 void *cpu, dma_addr_t dvma,
248				 struct dma_attrs *attrs)
249{
250	struct iommu *iommu;
251	unsigned long order, npages;
252
253	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
254	iommu = dev->archdata.iommu;
255
256	iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
 
 
 
 
257
258	order = get_order(size);
259	if (order < 10)
260		free_pages((unsigned long)cpu, order);
261}
262
263static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
264				  unsigned long offset, size_t sz,
265				  enum dma_data_direction direction,
266				  struct dma_attrs *attrs)
267{
268	struct iommu *iommu;
269	struct strbuf *strbuf;
270	iopte_t *base;
271	unsigned long flags, npages, oaddr;
272	unsigned long i, base_paddr, ctx;
273	u32 bus_addr, ret;
274	unsigned long iopte_protection;
275
276	iommu = dev->archdata.iommu;
277	strbuf = dev->archdata.stc;
278
279	if (unlikely(direction == DMA_NONE))
280		goto bad_no_ctx;
281
282	oaddr = (unsigned long)(page_address(page) + offset);
283	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
284	npages >>= IO_PAGE_SHIFT;
285
 
286	base = alloc_npages(dev, iommu, npages);
287	spin_lock_irqsave(&iommu->lock, flags);
288	ctx = 0;
289	if (iommu->iommu_ctxflush)
290		ctx = iommu_alloc_ctx(iommu);
291	spin_unlock_irqrestore(&iommu->lock, flags);
292
293	if (unlikely(!base))
294		goto bad;
295
296	bus_addr = (iommu->tbl.table_map_base +
297		    ((base - iommu->page_table) << IO_PAGE_SHIFT));
298	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
299	base_paddr = __pa(oaddr & IO_PAGE_MASK);
300	if (strbuf->strbuf_enabled)
301		iopte_protection = IOPTE_STREAMING(ctx);
302	else
303		iopte_protection = IOPTE_CONSISTENT(ctx);
304	if (direction != DMA_TO_DEVICE)
305		iopte_protection |= IOPTE_WRITE;
306
307	for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
308		iopte_val(*base) = iopte_protection | base_paddr;
309
310	return ret;
311
312bad:
313	iommu_free_ctx(iommu, ctx);
314bad_no_ctx:
315	if (printk_ratelimit())
316		WARN_ON(1);
317	return DMA_ERROR_CODE;
318}
319
320static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
321			 u32 vaddr, unsigned long ctx, unsigned long npages,
322			 enum dma_data_direction direction)
323{
324	int limit;
325
326	if (strbuf->strbuf_ctxflush &&
327	    iommu->iommu_ctxflush) {
328		unsigned long matchreg, flushreg;
329		u64 val;
330
331		flushreg = strbuf->strbuf_ctxflush;
332		matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
333
334		iommu_write(flushreg, ctx);
335		val = iommu_read(matchreg);
336		val &= 0xffff;
337		if (!val)
338			goto do_flush_sync;
339
340		while (val) {
341			if (val & 0x1)
342				iommu_write(flushreg, ctx);
343			val >>= 1;
344		}
345		val = iommu_read(matchreg);
346		if (unlikely(val)) {
347			printk(KERN_WARNING "strbuf_flush: ctx flush "
348			       "timeout matchreg[%llx] ctx[%lx]\n",
349			       val, ctx);
350			goto do_page_flush;
351		}
352	} else {
353		unsigned long i;
354
355	do_page_flush:
356		for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
357			iommu_write(strbuf->strbuf_pflush, vaddr);
358	}
359
360do_flush_sync:
361	/* If the device could not have possibly put dirty data into
362	 * the streaming cache, no flush-flag synchronization needs
363	 * to be performed.
364	 */
365	if (direction == DMA_TO_DEVICE)
366		return;
367
368	STC_FLUSHFLAG_INIT(strbuf);
369	iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
370	(void) iommu_read(iommu->write_complete_reg);
371
372	limit = 100000;
373	while (!STC_FLUSHFLAG_SET(strbuf)) {
374		limit--;
375		if (!limit)
376			break;
377		udelay(1);
378		rmb();
379	}
380	if (!limit)
381		printk(KERN_WARNING "strbuf_flush: flushflag timeout "
382		       "vaddr[%08x] ctx[%lx] npages[%ld]\n",
383		       vaddr, ctx, npages);
384}
385
386static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
387			      size_t sz, enum dma_data_direction direction,
388			      struct dma_attrs *attrs)
389{
390	struct iommu *iommu;
391	struct strbuf *strbuf;
392	iopte_t *base;
393	unsigned long flags, npages, ctx, i;
394
395	if (unlikely(direction == DMA_NONE)) {
396		if (printk_ratelimit())
397			WARN_ON(1);
398		return;
399	}
400
401	iommu = dev->archdata.iommu;
402	strbuf = dev->archdata.stc;
403
404	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
405	npages >>= IO_PAGE_SHIFT;
406	base = iommu->page_table +
407		((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
408	bus_addr &= IO_PAGE_MASK;
409
410	spin_lock_irqsave(&iommu->lock, flags);
411
412	/* Record the context, if any. */
413	ctx = 0;
414	if (iommu->iommu_ctxflush)
415		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
416
417	/* Step 1: Kick data out of streaming buffers if necessary. */
418	if (strbuf->strbuf_enabled)
419		strbuf_flush(strbuf, iommu, bus_addr, ctx,
420			     npages, direction);
421
422	/* Step 2: Clear out TSB entries. */
423	for (i = 0; i < npages; i++)
424		iopte_make_dummy(iommu, base + i);
425
 
 
426	iommu_free_ctx(iommu, ctx);
 
427	spin_unlock_irqrestore(&iommu->lock, flags);
428
429	iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
430}
431
432static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
433			 int nelems, enum dma_data_direction direction,
434			 struct dma_attrs *attrs)
435{
436	struct scatterlist *s, *outs, *segstart;
437	unsigned long flags, handle, prot, ctx;
438	dma_addr_t dma_next = 0, dma_addr;
439	unsigned int max_seg_size;
440	unsigned long seg_boundary_size;
441	int outcount, incount, i;
442	struct strbuf *strbuf;
443	struct iommu *iommu;
444	unsigned long base_shift;
445
446	BUG_ON(direction == DMA_NONE);
447
448	iommu = dev->archdata.iommu;
449	strbuf = dev->archdata.stc;
450	if (nelems == 0 || !iommu)
451		return 0;
452
453	spin_lock_irqsave(&iommu->lock, flags);
454
455	ctx = 0;
456	if (iommu->iommu_ctxflush)
457		ctx = iommu_alloc_ctx(iommu);
458
459	if (strbuf->strbuf_enabled)
460		prot = IOPTE_STREAMING(ctx);
461	else
462		prot = IOPTE_CONSISTENT(ctx);
463	if (direction != DMA_TO_DEVICE)
464		prot |= IOPTE_WRITE;
465
466	outs = s = segstart = &sglist[0];
467	outcount = 1;
468	incount = nelems;
469	handle = 0;
470
471	/* Init first segment length for backout at failure */
472	outs->dma_length = 0;
473
474	max_seg_size = dma_get_max_seg_size(dev);
475	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
476				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
477	base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
478	for_each_sg(sglist, s, nelems, i) {
479		unsigned long paddr, npages, entry, out_entry = 0, slen;
480		iopte_t *base;
481
482		slen = s->length;
483		/* Sanity check */
484		if (slen == 0) {
485			dma_next = 0;
486			continue;
487		}
488		/* Allocate iommu entries for that segment */
489		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
490		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
491		entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
492					      &handle, (unsigned long)(-1), 0);
493
494		/* Handle failure */
495		if (unlikely(entry == IOMMU_ERROR_CODE)) {
496			if (printk_ratelimit())
497				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
498				       " npages %lx\n", iommu, paddr, npages);
499			goto iommu_map_failed;
500		}
501
502		base = iommu->page_table + entry;
503
504		/* Convert entry to a dma_addr_t */
505		dma_addr = iommu->tbl.table_map_base +
506			(entry << IO_PAGE_SHIFT);
507		dma_addr |= (s->offset & ~IO_PAGE_MASK);
508
509		/* Insert into HW table */
510		paddr &= IO_PAGE_MASK;
511		while (npages--) {
512			iopte_val(*base) = prot | paddr;
513			base++;
514			paddr += IO_PAGE_SIZE;
515		}
516
517		/* If we are in an open segment, try merging */
518		if (segstart != s) {
519			/* We cannot merge if:
520			 * - allocated dma_addr isn't contiguous to previous allocation
521			 */
522			if ((dma_addr != dma_next) ||
523			    (outs->dma_length + s->length > max_seg_size) ||
524			    (is_span_boundary(out_entry, base_shift,
525					      seg_boundary_size, outs, s))) {
526				/* Can't merge: create a new segment */
527				segstart = s;
528				outcount++;
529				outs = sg_next(outs);
530			} else {
531				outs->dma_length += s->length;
532			}
533		}
534
535		if (segstart == s) {
536			/* This is a new segment, fill entries */
537			outs->dma_address = dma_addr;
538			outs->dma_length = slen;
539			out_entry = entry;
540		}
541
542		/* Calculate next page pointer for contiguous check */
543		dma_next = dma_addr + slen;
544	}
545
546	spin_unlock_irqrestore(&iommu->lock, flags);
547
548	if (outcount < incount) {
549		outs = sg_next(outs);
550		outs->dma_address = DMA_ERROR_CODE;
551		outs->dma_length = 0;
552	}
553
554	return outcount;
555
556iommu_map_failed:
557	for_each_sg(sglist, s, nelems, i) {
558		if (s->dma_length != 0) {
559			unsigned long vaddr, npages, entry, j;
560			iopte_t *base;
561
562			vaddr = s->dma_address & IO_PAGE_MASK;
563			npages = iommu_num_pages(s->dma_address, s->dma_length,
564						 IO_PAGE_SIZE);
 
565
566			entry = (vaddr - iommu->tbl.table_map_base)
567				>> IO_PAGE_SHIFT;
568			base = iommu->page_table + entry;
569
570			for (j = 0; j < npages; j++)
571				iopte_make_dummy(iommu, base + j);
572
573			iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
574					     IOMMU_ERROR_CODE);
575
576			s->dma_address = DMA_ERROR_CODE;
577			s->dma_length = 0;
578		}
579		if (s == outs)
580			break;
581	}
582	spin_unlock_irqrestore(&iommu->lock, flags);
583
584	return 0;
585}
586
587/* If contexts are being used, they are the same in all of the mappings
588 * we make for a particular SG.
589 */
590static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
591{
592	unsigned long ctx = 0;
593
594	if (iommu->iommu_ctxflush) {
595		iopte_t *base;
596		u32 bus_addr;
597		struct iommu_map_table *tbl = &iommu->tbl;
598
599		bus_addr = sg->dma_address & IO_PAGE_MASK;
600		base = iommu->page_table +
601			((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT);
602
603		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
604	}
605	return ctx;
606}
607
608static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
609			    int nelems, enum dma_data_direction direction,
610			    struct dma_attrs *attrs)
611{
612	unsigned long flags, ctx;
613	struct scatterlist *sg;
614	struct strbuf *strbuf;
615	struct iommu *iommu;
616
617	BUG_ON(direction == DMA_NONE);
618
619	iommu = dev->archdata.iommu;
620	strbuf = dev->archdata.stc;
621
622	ctx = fetch_sg_ctx(iommu, sglist);
623
624	spin_lock_irqsave(&iommu->lock, flags);
625
626	sg = sglist;
627	while (nelems--) {
628		dma_addr_t dma_handle = sg->dma_address;
629		unsigned int len = sg->dma_length;
630		unsigned long npages, entry;
631		iopte_t *base;
632		int i;
633
634		if (!len)
635			break;
636		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
 
637
638		entry = ((dma_handle - iommu->tbl.table_map_base)
639			 >> IO_PAGE_SHIFT);
640		base = iommu->page_table + entry;
641
642		dma_handle &= IO_PAGE_MASK;
643		if (strbuf->strbuf_enabled)
644			strbuf_flush(strbuf, iommu, dma_handle, ctx,
645				     npages, direction);
646
647		for (i = 0; i < npages; i++)
648			iopte_make_dummy(iommu, base + i);
649
650		iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
651				     IOMMU_ERROR_CODE);
652		sg = sg_next(sg);
653	}
654
655	iommu_free_ctx(iommu, ctx);
656
657	spin_unlock_irqrestore(&iommu->lock, flags);
658}
659
660static void dma_4u_sync_single_for_cpu(struct device *dev,
661				       dma_addr_t bus_addr, size_t sz,
662				       enum dma_data_direction direction)
663{
664	struct iommu *iommu;
665	struct strbuf *strbuf;
666	unsigned long flags, ctx, npages;
667
668	iommu = dev->archdata.iommu;
669	strbuf = dev->archdata.stc;
670
671	if (!strbuf->strbuf_enabled)
672		return;
673
674	spin_lock_irqsave(&iommu->lock, flags);
675
676	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
677	npages >>= IO_PAGE_SHIFT;
678	bus_addr &= IO_PAGE_MASK;
679
680	/* Step 1: Record the context, if any. */
681	ctx = 0;
682	if (iommu->iommu_ctxflush &&
683	    strbuf->strbuf_ctxflush) {
684		iopte_t *iopte;
685		struct iommu_map_table *tbl = &iommu->tbl;
686
687		iopte = iommu->page_table +
688			((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT);
689		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
690	}
691
692	/* Step 2: Kick data out of streaming buffers. */
693	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
694
695	spin_unlock_irqrestore(&iommu->lock, flags);
696}
697
698static void dma_4u_sync_sg_for_cpu(struct device *dev,
699				   struct scatterlist *sglist, int nelems,
700				   enum dma_data_direction direction)
701{
702	struct iommu *iommu;
703	struct strbuf *strbuf;
704	unsigned long flags, ctx, npages, i;
705	struct scatterlist *sg, *sgprv;
706	u32 bus_addr;
707
708	iommu = dev->archdata.iommu;
709	strbuf = dev->archdata.stc;
710
711	if (!strbuf->strbuf_enabled)
712		return;
713
714	spin_lock_irqsave(&iommu->lock, flags);
715
716	/* Step 1: Record the context, if any. */
717	ctx = 0;
718	if (iommu->iommu_ctxflush &&
719	    strbuf->strbuf_ctxflush) {
720		iopte_t *iopte;
721		struct iommu_map_table *tbl = &iommu->tbl;
722
723		iopte = iommu->page_table + ((sglist[0].dma_address -
724			tbl->table_map_base) >> IO_PAGE_SHIFT);
725		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
726	}
727
728	/* Step 2: Kick data out of streaming buffers. */
729	bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
730	sgprv = NULL;
731	for_each_sg(sglist, sg, nelems, i) {
732		if (sg->dma_length == 0)
733			break;
734		sgprv = sg;
735	}
736
737	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
738		  - bus_addr) >> IO_PAGE_SHIFT;
739	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
740
741	spin_unlock_irqrestore(&iommu->lock, flags);
742}
743
744static struct dma_map_ops sun4u_dma_ops = {
745	.alloc			= dma_4u_alloc_coherent,
746	.free			= dma_4u_free_coherent,
747	.map_page		= dma_4u_map_page,
748	.unmap_page		= dma_4u_unmap_page,
749	.map_sg			= dma_4u_map_sg,
750	.unmap_sg		= dma_4u_unmap_sg,
751	.sync_single_for_cpu	= dma_4u_sync_single_for_cpu,
752	.sync_sg_for_cpu	= dma_4u_sync_sg_for_cpu,
753};
754
755struct dma_map_ops *dma_ops = &sun4u_dma_ops;
756EXPORT_SYMBOL(dma_ops);
757
 
 
758int dma_supported(struct device *dev, u64 device_mask)
759{
760	struct iommu *iommu = dev->archdata.iommu;
761	u64 dma_addr_mask = iommu->dma_addr_mask;
762
763	if (device_mask >= (1UL << 32UL))
764		return 0;
765
766	if ((device_mask & dma_addr_mask) == dma_addr_mask)
767		return 1;
768
769#ifdef CONFIG_PCI
770	if (dev_is_pci(dev))
771		return pci64_dma_supported(to_pci_dev(dev), device_mask);
772#endif
773
774	return 0;
775}
776EXPORT_SYMBOL(dma_supported);