Linux Audio

Check our new training course

Loading...
v3.1
 
  1/* iommu.c: Generic sparc64 IOMMU support.
  2 *
  3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
  4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
  5 */
  6
  7#include <linux/kernel.h>
  8#include <linux/module.h>
  9#include <linux/slab.h>
 10#include <linux/delay.h>
 11#include <linux/device.h>
 12#include <linux/dma-mapping.h>
 13#include <linux/errno.h>
 14#include <linux/iommu-helper.h>
 15#include <linux/bitmap.h>
 
 16
 17#ifdef CONFIG_PCI
 18#include <linux/pci.h>
 19#endif
 20
 21#include <asm/iommu.h>
 22
 23#include "iommu_common.h"
 
 24
 25#define STC_CTXMATCH_ADDR(STC, CTX)	\
 26	((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
 27#define STC_FLUSHFLAG_INIT(STC) \
 28	(*((STC)->strbuf_flushflag) = 0UL)
 29#define STC_FLUSHFLAG_SET(STC) \
 30	(*((STC)->strbuf_flushflag) != 0UL)
 31
 32#define iommu_read(__reg) \
 33({	u64 __ret; \
 34	__asm__ __volatile__("ldxa [%1] %2, %0" \
 35			     : "=r" (__ret) \
 36			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
 37			     : "memory"); \
 38	__ret; \
 39})
 40#define iommu_write(__reg, __val) \
 41	__asm__ __volatile__("stxa %0, [%1] %2" \
 42			     : /* no outputs */ \
 43			     : "r" (__val), "r" (__reg), \
 44			       "i" (ASI_PHYS_BYPASS_EC_E))
 45
 46/* Must be invoked under the IOMMU lock. */
 47static void iommu_flushall(struct iommu *iommu)
 48{
 
 49	if (iommu->iommu_flushinv) {
 50		iommu_write(iommu->iommu_flushinv, ~(u64)0);
 51	} else {
 52		unsigned long tag;
 53		int entry;
 54
 55		tag = iommu->iommu_tags;
 56		for (entry = 0; entry < 16; entry++) {
 57			iommu_write(tag, 0);
 58			tag += 8;
 59		}
 60
 61		/* Ensure completion of previous PIO writes. */
 62		(void) iommu_read(iommu->write_complete_reg);
 63	}
 64}
 65
 66#define IOPTE_CONSISTENT(CTX) \
 67	(IOPTE_VALID | IOPTE_CACHE | \
 68	 (((CTX) << 47) & IOPTE_CONTEXT))
 69
 70#define IOPTE_STREAMING(CTX) \
 71	(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
 72
 73/* Existing mappings are never marked invalid, instead they
 74 * are pointed to a dummy page.
 75 */
 76#define IOPTE_IS_DUMMY(iommu, iopte)	\
 77	((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
 78
 79static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
 80{
 81	unsigned long val = iopte_val(*iopte);
 82
 83	val &= ~IOPTE_PAGE;
 84	val |= iommu->dummy_page_pa;
 85
 86	iopte_val(*iopte) = val;
 87}
 88
 89/* Based almost entirely upon the ppc64 iommu allocator.  If you use the 'handle'
 90 * facility it must all be done in one pass while under the iommu lock.
 91 *
 92 * On sun4u platforms, we only flush the IOMMU once every time we've passed
 93 * over the entire page table doing allocations.  Therefore we only ever advance
 94 * the hint and cannot backtrack it.
 95 */
 96unsigned long iommu_range_alloc(struct device *dev,
 97				struct iommu *iommu,
 98				unsigned long npages,
 99				unsigned long *handle)
100{
101	unsigned long n, end, start, limit, boundary_size;
102	struct iommu_arena *arena = &iommu->arena;
103	int pass = 0;
104
105	/* This allocator was derived from x86_64's bit string search */
106
107	/* Sanity check */
108	if (unlikely(npages == 0)) {
109		if (printk_ratelimit())
110			WARN_ON(1);
111		return DMA_ERROR_CODE;
112	}
113
114	if (handle && *handle)
115		start = *handle;
116	else
117		start = arena->hint;
118
119	limit = arena->limit;
120
121	/* The case below can happen if we have a small segment appended
122	 * to a large, or when the previous alloc was at the very end of
123	 * the available space. If so, go back to the beginning and flush.
124	 */
125	if (start >= limit) {
126		start = 0;
127		if (iommu->flush_all)
128			iommu->flush_all(iommu);
129	}
130
131 again:
132
133	if (dev)
134		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
135				      1 << IO_PAGE_SHIFT);
136	else
137		boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
138
139	n = iommu_area_alloc(arena->map, limit, start, npages,
140			     iommu->page_table_map_base >> IO_PAGE_SHIFT,
141			     boundary_size >> IO_PAGE_SHIFT, 0);
142	if (n == -1) {
143		if (likely(pass < 1)) {
144			/* First failure, rescan from the beginning.  */
145			start = 0;
146			if (iommu->flush_all)
147				iommu->flush_all(iommu);
148			pass++;
149			goto again;
150		} else {
151			/* Second failure, give up */
152			return DMA_ERROR_CODE;
153		}
154	}
155
156	end = n + npages;
157
158	arena->hint = end;
159
160	/* Update handle for SG allocations */
161	if (handle)
162		*handle = end;
163
164	return n;
165}
166
167void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
168{
169	struct iommu_arena *arena = &iommu->arena;
170	unsigned long entry;
171
172	entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
173
174	bitmap_clear(arena->map, entry, npages);
175}
176
177int iommu_table_init(struct iommu *iommu, int tsbsize,
178		     u32 dma_offset, u32 dma_addr_mask,
179		     int numa_node)
180{
181	unsigned long i, order, sz, num_tsb_entries;
182	struct page *page;
183
184	num_tsb_entries = tsbsize / sizeof(iopte_t);
185
186	/* Setup initial software IOMMU state. */
187	spin_lock_init(&iommu->lock);
188	iommu->ctx_lowest_free = 1;
189	iommu->page_table_map_base = dma_offset;
190	iommu->dma_addr_mask = dma_addr_mask;
191
192	/* Allocate and initialize the free area map.  */
193	sz = num_tsb_entries / 8;
194	sz = (sz + 7UL) & ~7UL;
195	iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
196	if (!iommu->arena.map) {
197		printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
198		return -ENOMEM;
199	}
200	memset(iommu->arena.map, 0, sz);
201	iommu->arena.limit = num_tsb_entries;
202
203	if (tlb_type != hypervisor)
204		iommu->flush_all = iommu_flushall;
 
205
206	/* Allocate and initialize the dummy page which we
207	 * set inactive IO PTEs to point to.
208	 */
209	page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
210	if (!page) {
211		printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
212		goto out_free_map;
213	}
214	iommu->dummy_page = (unsigned long) page_address(page);
215	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
216	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
217
218	/* Now allocate and setup the IOMMU page table itself.  */
219	order = get_order(tsbsize);
220	page = alloc_pages_node(numa_node, GFP_KERNEL, order);
221	if (!page) {
222		printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
223		goto out_free_dummy_page;
224	}
225	iommu->page_table = (iopte_t *)page_address(page);
226
227	for (i = 0; i < num_tsb_entries; i++)
228		iopte_make_dummy(iommu, &iommu->page_table[i]);
229
230	return 0;
231
232out_free_dummy_page:
233	free_page(iommu->dummy_page);
234	iommu->dummy_page = 0UL;
235
236out_free_map:
237	kfree(iommu->arena.map);
238	iommu->arena.map = NULL;
239
240	return -ENOMEM;
241}
242
243static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
 
244				    unsigned long npages)
245{
246	unsigned long entry;
247
248	entry = iommu_range_alloc(dev, iommu, npages, NULL);
249	if (unlikely(entry == DMA_ERROR_CODE))
 
250		return NULL;
251
252	return iommu->page_table + entry;
253}
254
255static int iommu_alloc_ctx(struct iommu *iommu)
256{
257	int lowest = iommu->ctx_lowest_free;
258	int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
259
260	if (unlikely(n == IOMMU_NUM_CTXS)) {
261		n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
262		if (unlikely(n == lowest)) {
263			printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
264			n = 0;
265		}
266	}
267	if (n)
268		__set_bit(n, iommu->ctx_bitmap);
269
270	return n;
271}
272
273static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
274{
275	if (likely(ctx)) {
276		__clear_bit(ctx, iommu->ctx_bitmap);
277		if (ctx < iommu->ctx_lowest_free)
278			iommu->ctx_lowest_free = ctx;
279	}
280}
281
282static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
283				   dma_addr_t *dma_addrp, gfp_t gfp)
 
284{
285	unsigned long flags, order, first_page;
286	struct iommu *iommu;
287	struct page *page;
288	int npages, nid;
289	iopte_t *iopte;
290	void *ret;
291
292	size = IO_PAGE_ALIGN(size);
293	order = get_order(size);
294	if (order >= 10)
295		return NULL;
296
297	nid = dev->archdata.numa_node;
298	page = alloc_pages_node(nid, gfp, order);
299	if (unlikely(!page))
300		return NULL;
301
302	first_page = (unsigned long) page_address(page);
303	memset((char *)first_page, 0, PAGE_SIZE << order);
304
305	iommu = dev->archdata.iommu;
306
307	spin_lock_irqsave(&iommu->lock, flags);
308	iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
309	spin_unlock_irqrestore(&iommu->lock, flags);
310
311	if (unlikely(iopte == NULL)) {
312		free_pages(first_page, order);
313		return NULL;
314	}
315
316	*dma_addrp = (iommu->page_table_map_base +
317		      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
318	ret = (void *) first_page;
319	npages = size >> IO_PAGE_SHIFT;
320	first_page = __pa(first_page);
321	while (npages--) {
322		iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
323				     IOPTE_WRITE |
324				     (first_page & IOPTE_PAGE));
325		iopte++;
326		first_page += IO_PAGE_SIZE;
327	}
328
329	return ret;
330}
331
332static void dma_4u_free_coherent(struct device *dev, size_t size,
333				 void *cpu, dma_addr_t dvma)
 
334{
335	struct iommu *iommu;
336	unsigned long flags, order, npages;
337
338	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
339	iommu = dev->archdata.iommu;
340
341	spin_lock_irqsave(&iommu->lock, flags);
342
343	iommu_range_free(iommu, dvma, npages);
344
345	spin_unlock_irqrestore(&iommu->lock, flags);
346
347	order = get_order(size);
348	if (order < 10)
349		free_pages((unsigned long)cpu, order);
350}
351
352static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
353				  unsigned long offset, size_t sz,
354				  enum dma_data_direction direction,
355				  struct dma_attrs *attrs)
356{
357	struct iommu *iommu;
358	struct strbuf *strbuf;
359	iopte_t *base;
360	unsigned long flags, npages, oaddr;
361	unsigned long i, base_paddr, ctx;
362	u32 bus_addr, ret;
363	unsigned long iopte_protection;
364
365	iommu = dev->archdata.iommu;
366	strbuf = dev->archdata.stc;
367
368	if (unlikely(direction == DMA_NONE))
369		goto bad_no_ctx;
370
371	oaddr = (unsigned long)(page_address(page) + offset);
372	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
373	npages >>= IO_PAGE_SHIFT;
374
375	spin_lock_irqsave(&iommu->lock, flags);
376	base = alloc_npages(dev, iommu, npages);
 
377	ctx = 0;
378	if (iommu->iommu_ctxflush)
379		ctx = iommu_alloc_ctx(iommu);
380	spin_unlock_irqrestore(&iommu->lock, flags);
381
382	if (unlikely(!base))
383		goto bad;
384
385	bus_addr = (iommu->page_table_map_base +
386		    ((base - iommu->page_table) << IO_PAGE_SHIFT));
387	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
388	base_paddr = __pa(oaddr & IO_PAGE_MASK);
389	if (strbuf->strbuf_enabled)
390		iopte_protection = IOPTE_STREAMING(ctx);
391	else
392		iopte_protection = IOPTE_CONSISTENT(ctx);
393	if (direction != DMA_TO_DEVICE)
394		iopte_protection |= IOPTE_WRITE;
395
396	for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
397		iopte_val(*base) = iopte_protection | base_paddr;
398
399	return ret;
400
401bad:
402	iommu_free_ctx(iommu, ctx);
403bad_no_ctx:
404	if (printk_ratelimit())
405		WARN_ON(1);
406	return DMA_ERROR_CODE;
407}
408
409static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
410			 u32 vaddr, unsigned long ctx, unsigned long npages,
411			 enum dma_data_direction direction)
412{
413	int limit;
414
415	if (strbuf->strbuf_ctxflush &&
416	    iommu->iommu_ctxflush) {
417		unsigned long matchreg, flushreg;
418		u64 val;
419
420		flushreg = strbuf->strbuf_ctxflush;
421		matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
422
423		iommu_write(flushreg, ctx);
424		val = iommu_read(matchreg);
425		val &= 0xffff;
426		if (!val)
427			goto do_flush_sync;
428
429		while (val) {
430			if (val & 0x1)
431				iommu_write(flushreg, ctx);
432			val >>= 1;
433		}
434		val = iommu_read(matchreg);
435		if (unlikely(val)) {
436			printk(KERN_WARNING "strbuf_flush: ctx flush "
437			       "timeout matchreg[%llx] ctx[%lx]\n",
438			       val, ctx);
439			goto do_page_flush;
440		}
441	} else {
442		unsigned long i;
443
444	do_page_flush:
445		for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
446			iommu_write(strbuf->strbuf_pflush, vaddr);
447	}
448
449do_flush_sync:
450	/* If the device could not have possibly put dirty data into
451	 * the streaming cache, no flush-flag synchronization needs
452	 * to be performed.
453	 */
454	if (direction == DMA_TO_DEVICE)
455		return;
456
457	STC_FLUSHFLAG_INIT(strbuf);
458	iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
459	(void) iommu_read(iommu->write_complete_reg);
460
461	limit = 100000;
462	while (!STC_FLUSHFLAG_SET(strbuf)) {
463		limit--;
464		if (!limit)
465			break;
466		udelay(1);
467		rmb();
468	}
469	if (!limit)
470		printk(KERN_WARNING "strbuf_flush: flushflag timeout "
471		       "vaddr[%08x] ctx[%lx] npages[%ld]\n",
472		       vaddr, ctx, npages);
473}
474
475static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
476			      size_t sz, enum dma_data_direction direction,
477			      struct dma_attrs *attrs)
478{
479	struct iommu *iommu;
480	struct strbuf *strbuf;
481	iopte_t *base;
482	unsigned long flags, npages, ctx, i;
483
484	if (unlikely(direction == DMA_NONE)) {
485		if (printk_ratelimit())
486			WARN_ON(1);
487		return;
488	}
489
490	iommu = dev->archdata.iommu;
491	strbuf = dev->archdata.stc;
492
493	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
494	npages >>= IO_PAGE_SHIFT;
495	base = iommu->page_table +
496		((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
497	bus_addr &= IO_PAGE_MASK;
498
499	spin_lock_irqsave(&iommu->lock, flags);
500
501	/* Record the context, if any. */
502	ctx = 0;
503	if (iommu->iommu_ctxflush)
504		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
505
506	/* Step 1: Kick data out of streaming buffers if necessary. */
507	if (strbuf->strbuf_enabled)
508		strbuf_flush(strbuf, iommu, bus_addr, ctx,
509			     npages, direction);
510
511	/* Step 2: Clear out TSB entries. */
512	for (i = 0; i < npages; i++)
513		iopte_make_dummy(iommu, base + i);
514
515	iommu_range_free(iommu, bus_addr, npages);
516
517	iommu_free_ctx(iommu, ctx);
518
519	spin_unlock_irqrestore(&iommu->lock, flags);
 
 
520}
521
522static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
523			 int nelems, enum dma_data_direction direction,
524			 struct dma_attrs *attrs)
525{
526	struct scatterlist *s, *outs, *segstart;
527	unsigned long flags, handle, prot, ctx;
528	dma_addr_t dma_next = 0, dma_addr;
529	unsigned int max_seg_size;
530	unsigned long seg_boundary_size;
531	int outcount, incount, i;
532	struct strbuf *strbuf;
533	struct iommu *iommu;
534	unsigned long base_shift;
535
536	BUG_ON(direction == DMA_NONE);
537
538	iommu = dev->archdata.iommu;
539	strbuf = dev->archdata.stc;
540	if (nelems == 0 || !iommu)
541		return 0;
542
543	spin_lock_irqsave(&iommu->lock, flags);
544
545	ctx = 0;
546	if (iommu->iommu_ctxflush)
547		ctx = iommu_alloc_ctx(iommu);
548
549	if (strbuf->strbuf_enabled)
550		prot = IOPTE_STREAMING(ctx);
551	else
552		prot = IOPTE_CONSISTENT(ctx);
553	if (direction != DMA_TO_DEVICE)
554		prot |= IOPTE_WRITE;
555
556	outs = s = segstart = &sglist[0];
557	outcount = 1;
558	incount = nelems;
559	handle = 0;
560
561	/* Init first segment length for backout at failure */
562	outs->dma_length = 0;
563
564	max_seg_size = dma_get_max_seg_size(dev);
565	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
566				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
567	base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
568	for_each_sg(sglist, s, nelems, i) {
569		unsigned long paddr, npages, entry, out_entry = 0, slen;
570		iopte_t *base;
571
572		slen = s->length;
573		/* Sanity check */
574		if (slen == 0) {
575			dma_next = 0;
576			continue;
577		}
578		/* Allocate iommu entries for that segment */
579		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
580		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
581		entry = iommu_range_alloc(dev, iommu, npages, &handle);
 
582
583		/* Handle failure */
584		if (unlikely(entry == DMA_ERROR_CODE)) {
585			if (printk_ratelimit())
586				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
587				       " npages %lx\n", iommu, paddr, npages);
588			goto iommu_map_failed;
589		}
590
591		base = iommu->page_table + entry;
592
593		/* Convert entry to a dma_addr_t */
594		dma_addr = iommu->page_table_map_base +
595			(entry << IO_PAGE_SHIFT);
596		dma_addr |= (s->offset & ~IO_PAGE_MASK);
597
598		/* Insert into HW table */
599		paddr &= IO_PAGE_MASK;
600		while (npages--) {
601			iopte_val(*base) = prot | paddr;
602			base++;
603			paddr += IO_PAGE_SIZE;
604		}
605
606		/* If we are in an open segment, try merging */
607		if (segstart != s) {
608			/* We cannot merge if:
609			 * - allocated dma_addr isn't contiguous to previous allocation
610			 */
611			if ((dma_addr != dma_next) ||
612			    (outs->dma_length + s->length > max_seg_size) ||
613			    (is_span_boundary(out_entry, base_shift,
614					      seg_boundary_size, outs, s))) {
615				/* Can't merge: create a new segment */
616				segstart = s;
617				outcount++;
618				outs = sg_next(outs);
619			} else {
620				outs->dma_length += s->length;
621			}
622		}
623
624		if (segstart == s) {
625			/* This is a new segment, fill entries */
626			outs->dma_address = dma_addr;
627			outs->dma_length = slen;
628			out_entry = entry;
629		}
630
631		/* Calculate next page pointer for contiguous check */
632		dma_next = dma_addr + slen;
633	}
634
635	spin_unlock_irqrestore(&iommu->lock, flags);
636
637	if (outcount < incount) {
638		outs = sg_next(outs);
639		outs->dma_address = DMA_ERROR_CODE;
640		outs->dma_length = 0;
641	}
642
643	return outcount;
644
645iommu_map_failed:
646	for_each_sg(sglist, s, nelems, i) {
647		if (s->dma_length != 0) {
648			unsigned long vaddr, npages, entry, j;
649			iopte_t *base;
650
651			vaddr = s->dma_address & IO_PAGE_MASK;
652			npages = iommu_num_pages(s->dma_address, s->dma_length,
653						 IO_PAGE_SIZE);
654			iommu_range_free(iommu, vaddr, npages);
655
656			entry = (vaddr - iommu->page_table_map_base)
657				>> IO_PAGE_SHIFT;
658			base = iommu->page_table + entry;
659
660			for (j = 0; j < npages; j++)
661				iopte_make_dummy(iommu, base + j);
662
663			s->dma_address = DMA_ERROR_CODE;
 
 
 
664			s->dma_length = 0;
665		}
666		if (s == outs)
667			break;
668	}
669	spin_unlock_irqrestore(&iommu->lock, flags);
670
671	return 0;
672}
673
674/* If contexts are being used, they are the same in all of the mappings
675 * we make for a particular SG.
676 */
677static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
678{
679	unsigned long ctx = 0;
680
681	if (iommu->iommu_ctxflush) {
682		iopte_t *base;
683		u32 bus_addr;
 
684
685		bus_addr = sg->dma_address & IO_PAGE_MASK;
686		base = iommu->page_table +
687			((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
688
689		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
690	}
691	return ctx;
692}
693
694static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
695			    int nelems, enum dma_data_direction direction,
696			    struct dma_attrs *attrs)
697{
698	unsigned long flags, ctx;
699	struct scatterlist *sg;
700	struct strbuf *strbuf;
701	struct iommu *iommu;
702
703	BUG_ON(direction == DMA_NONE);
704
705	iommu = dev->archdata.iommu;
706	strbuf = dev->archdata.stc;
707
708	ctx = fetch_sg_ctx(iommu, sglist);
709
710	spin_lock_irqsave(&iommu->lock, flags);
711
712	sg = sglist;
713	while (nelems--) {
714		dma_addr_t dma_handle = sg->dma_address;
715		unsigned int len = sg->dma_length;
716		unsigned long npages, entry;
717		iopte_t *base;
718		int i;
719
720		if (!len)
721			break;
722		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
723		iommu_range_free(iommu, dma_handle, npages);
724
725		entry = ((dma_handle - iommu->page_table_map_base)
726			 >> IO_PAGE_SHIFT);
727		base = iommu->page_table + entry;
728
729		dma_handle &= IO_PAGE_MASK;
730		if (strbuf->strbuf_enabled)
731			strbuf_flush(strbuf, iommu, dma_handle, ctx,
732				     npages, direction);
733
734		for (i = 0; i < npages; i++)
735			iopte_make_dummy(iommu, base + i);
736
 
 
737		sg = sg_next(sg);
738	}
739
740	iommu_free_ctx(iommu, ctx);
741
742	spin_unlock_irqrestore(&iommu->lock, flags);
743}
744
745static void dma_4u_sync_single_for_cpu(struct device *dev,
746				       dma_addr_t bus_addr, size_t sz,
747				       enum dma_data_direction direction)
748{
749	struct iommu *iommu;
750	struct strbuf *strbuf;
751	unsigned long flags, ctx, npages;
752
753	iommu = dev->archdata.iommu;
754	strbuf = dev->archdata.stc;
755
756	if (!strbuf->strbuf_enabled)
757		return;
758
759	spin_lock_irqsave(&iommu->lock, flags);
760
761	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
762	npages >>= IO_PAGE_SHIFT;
763	bus_addr &= IO_PAGE_MASK;
764
765	/* Step 1: Record the context, if any. */
766	ctx = 0;
767	if (iommu->iommu_ctxflush &&
768	    strbuf->strbuf_ctxflush) {
769		iopte_t *iopte;
 
770
771		iopte = iommu->page_table +
772			((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
773		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
774	}
775
776	/* Step 2: Kick data out of streaming buffers. */
777	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
778
779	spin_unlock_irqrestore(&iommu->lock, flags);
780}
781
782static void dma_4u_sync_sg_for_cpu(struct device *dev,
783				   struct scatterlist *sglist, int nelems,
784				   enum dma_data_direction direction)
785{
786	struct iommu *iommu;
787	struct strbuf *strbuf;
788	unsigned long flags, ctx, npages, i;
789	struct scatterlist *sg, *sgprv;
790	u32 bus_addr;
791
792	iommu = dev->archdata.iommu;
793	strbuf = dev->archdata.stc;
794
795	if (!strbuf->strbuf_enabled)
796		return;
797
798	spin_lock_irqsave(&iommu->lock, flags);
799
800	/* Step 1: Record the context, if any. */
801	ctx = 0;
802	if (iommu->iommu_ctxflush &&
803	    strbuf->strbuf_ctxflush) {
804		iopte_t *iopte;
 
805
806		iopte = iommu->page_table +
807			((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
808		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
809	}
810
811	/* Step 2: Kick data out of streaming buffers. */
812	bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
813	sgprv = NULL;
814	for_each_sg(sglist, sg, nelems, i) {
815		if (sg->dma_length == 0)
816			break;
817		sgprv = sg;
818	}
819
820	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
821		  - bus_addr) >> IO_PAGE_SHIFT;
822	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
823
824	spin_unlock_irqrestore(&iommu->lock, flags);
825}
826
827static struct dma_map_ops sun4u_dma_ops = {
828	.alloc_coherent		= dma_4u_alloc_coherent,
829	.free_coherent		= dma_4u_free_coherent,
 
 
 
 
 
 
 
 
 
 
 
 
830	.map_page		= dma_4u_map_page,
831	.unmap_page		= dma_4u_unmap_page,
832	.map_sg			= dma_4u_map_sg,
833	.unmap_sg		= dma_4u_unmap_sg,
834	.sync_single_for_cpu	= dma_4u_sync_single_for_cpu,
835	.sync_sg_for_cpu	= dma_4u_sync_sg_for_cpu,
 
836};
837
838struct dma_map_ops *dma_ops = &sun4u_dma_ops;
839EXPORT_SYMBOL(dma_ops);
840
841extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
842
843int dma_supported(struct device *dev, u64 device_mask)
844{
845	struct iommu *iommu = dev->archdata.iommu;
846	u64 dma_addr_mask = iommu->dma_addr_mask;
847
848	if (device_mask >= (1UL << 32UL))
849		return 0;
850
851	if ((device_mask & dma_addr_mask) == dma_addr_mask)
852		return 1;
853
854#ifdef CONFIG_PCI
855	if (dev->bus == &pci_bus_type)
856		return pci64_dma_supported(to_pci_dev(dev), device_mask);
857#endif
858
859	return 0;
860}
861EXPORT_SYMBOL(dma_supported);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/* iommu.c: Generic sparc64 IOMMU support.
  3 *
  4 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
  5 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
  6 */
  7
  8#include <linux/kernel.h>
  9#include <linux/export.h>
 10#include <linux/slab.h>
 11#include <linux/delay.h>
 12#include <linux/device.h>
 13#include <linux/dma-map-ops.h>
 14#include <linux/errno.h>
 15#include <linux/iommu-helper.h>
 16#include <linux/bitmap.h>
 17#include <asm/iommu-common.h>
 18
 19#ifdef CONFIG_PCI
 20#include <linux/pci.h>
 21#endif
 22
 23#include <asm/iommu.h>
 24
 25#include "iommu_common.h"
 26#include "kernel.h"
 27
 28#define STC_CTXMATCH_ADDR(STC, CTX)	\
 29	((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
 30#define STC_FLUSHFLAG_INIT(STC) \
 31	(*((STC)->strbuf_flushflag) = 0UL)
 32#define STC_FLUSHFLAG_SET(STC) \
 33	(*((STC)->strbuf_flushflag) != 0UL)
 34
 35#define iommu_read(__reg) \
 36({	u64 __ret; \
 37	__asm__ __volatile__("ldxa [%1] %2, %0" \
 38			     : "=r" (__ret) \
 39			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
 40			     : "memory"); \
 41	__ret; \
 42})
 43#define iommu_write(__reg, __val) \
 44	__asm__ __volatile__("stxa %0, [%1] %2" \
 45			     : /* no outputs */ \
 46			     : "r" (__val), "r" (__reg), \
 47			       "i" (ASI_PHYS_BYPASS_EC_E))
 48
 49/* Must be invoked under the IOMMU lock. */
 50static void iommu_flushall(struct iommu_map_table *iommu_map_table)
 51{
 52	struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
 53	if (iommu->iommu_flushinv) {
 54		iommu_write(iommu->iommu_flushinv, ~(u64)0);
 55	} else {
 56		unsigned long tag;
 57		int entry;
 58
 59		tag = iommu->iommu_tags;
 60		for (entry = 0; entry < 16; entry++) {
 61			iommu_write(tag, 0);
 62			tag += 8;
 63		}
 64
 65		/* Ensure completion of previous PIO writes. */
 66		(void) iommu_read(iommu->write_complete_reg);
 67	}
 68}
 69
 70#define IOPTE_CONSISTENT(CTX) \
 71	(IOPTE_VALID | IOPTE_CACHE | \
 72	 (((CTX) << 47) & IOPTE_CONTEXT))
 73
 74#define IOPTE_STREAMING(CTX) \
 75	(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
 76
 77/* Existing mappings are never marked invalid, instead they
 78 * are pointed to a dummy page.
 79 */
 80#define IOPTE_IS_DUMMY(iommu, iopte)	\
 81	((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
 82
 83static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
 84{
 85	unsigned long val = iopte_val(*iopte);
 86
 87	val &= ~IOPTE_PAGE;
 88	val |= iommu->dummy_page_pa;
 89
 90	iopte_val(*iopte) = val;
 91}
 92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93int iommu_table_init(struct iommu *iommu, int tsbsize,
 94		     u32 dma_offset, u32 dma_addr_mask,
 95		     int numa_node)
 96{
 97	unsigned long i, order, sz, num_tsb_entries;
 98	struct page *page;
 99
100	num_tsb_entries = tsbsize / sizeof(iopte_t);
101
102	/* Setup initial software IOMMU state. */
103	spin_lock_init(&iommu->lock);
104	iommu->ctx_lowest_free = 1;
105	iommu->tbl.table_map_base = dma_offset;
106	iommu->dma_addr_mask = dma_addr_mask;
107
108	/* Allocate and initialize the free area map.  */
109	sz = num_tsb_entries / 8;
110	sz = (sz + 7UL) & ~7UL;
111	iommu->tbl.map = kzalloc_node(sz, GFP_KERNEL, numa_node);
112	if (!iommu->tbl.map)
 
113		return -ENOMEM;
 
 
 
114
115	iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
116			    (tlb_type != hypervisor ? iommu_flushall : NULL),
117			    false, 1, false);
118
119	/* Allocate and initialize the dummy page which we
120	 * set inactive IO PTEs to point to.
121	 */
122	page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
123	if (!page) {
124		printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
125		goto out_free_map;
126	}
127	iommu->dummy_page = (unsigned long) page_address(page);
128	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
129	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
130
131	/* Now allocate and setup the IOMMU page table itself.  */
132	order = get_order(tsbsize);
133	page = alloc_pages_node(numa_node, GFP_KERNEL, order);
134	if (!page) {
135		printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
136		goto out_free_dummy_page;
137	}
138	iommu->page_table = (iopte_t *)page_address(page);
139
140	for (i = 0; i < num_tsb_entries; i++)
141		iopte_make_dummy(iommu, &iommu->page_table[i]);
142
143	return 0;
144
145out_free_dummy_page:
146	free_page(iommu->dummy_page);
147	iommu->dummy_page = 0UL;
148
149out_free_map:
150	kfree(iommu->tbl.map);
151	iommu->tbl.map = NULL;
152
153	return -ENOMEM;
154}
155
156static inline iopte_t *alloc_npages(struct device *dev,
157				    struct iommu *iommu,
158				    unsigned long npages)
159{
160	unsigned long entry;
161
162	entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
163				      (unsigned long)(-1), 0);
164	if (unlikely(entry == IOMMU_ERROR_CODE))
165		return NULL;
166
167	return iommu->page_table + entry;
168}
169
170static int iommu_alloc_ctx(struct iommu *iommu)
171{
172	int lowest = iommu->ctx_lowest_free;
173	int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
174
175	if (unlikely(n == IOMMU_NUM_CTXS)) {
176		n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
177		if (unlikely(n == lowest)) {
178			printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
179			n = 0;
180		}
181	}
182	if (n)
183		__set_bit(n, iommu->ctx_bitmap);
184
185	return n;
186}
187
188static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
189{
190	if (likely(ctx)) {
191		__clear_bit(ctx, iommu->ctx_bitmap);
192		if (ctx < iommu->ctx_lowest_free)
193			iommu->ctx_lowest_free = ctx;
194	}
195}
196
197static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
198				   dma_addr_t *dma_addrp, gfp_t gfp,
199				   unsigned long attrs)
200{
201	unsigned long order, first_page;
202	struct iommu *iommu;
203	struct page *page;
204	int npages, nid;
205	iopte_t *iopte;
206	void *ret;
207
208	size = IO_PAGE_ALIGN(size);
209	order = get_order(size);
210	if (order >= 10)
211		return NULL;
212
213	nid = dev->archdata.numa_node;
214	page = alloc_pages_node(nid, gfp, order);
215	if (unlikely(!page))
216		return NULL;
217
218	first_page = (unsigned long) page_address(page);
219	memset((char *)first_page, 0, PAGE_SIZE << order);
220
221	iommu = dev->archdata.iommu;
222
 
223	iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
 
224
225	if (unlikely(iopte == NULL)) {
226		free_pages(first_page, order);
227		return NULL;
228	}
229
230	*dma_addrp = (iommu->tbl.table_map_base +
231		      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
232	ret = (void *) first_page;
233	npages = size >> IO_PAGE_SHIFT;
234	first_page = __pa(first_page);
235	while (npages--) {
236		iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
237				     IOPTE_WRITE |
238				     (first_page & IOPTE_PAGE));
239		iopte++;
240		first_page += IO_PAGE_SIZE;
241	}
242
243	return ret;
244}
245
246static void dma_4u_free_coherent(struct device *dev, size_t size,
247				 void *cpu, dma_addr_t dvma,
248				 unsigned long attrs)
249{
250	struct iommu *iommu;
251	unsigned long order, npages;
252
253	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
254	iommu = dev->archdata.iommu;
255
256	iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
 
 
 
 
257
258	order = get_order(size);
259	if (order < 10)
260		free_pages((unsigned long)cpu, order);
261}
262
263static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
264				  unsigned long offset, size_t sz,
265				  enum dma_data_direction direction,
266				  unsigned long attrs)
267{
268	struct iommu *iommu;
269	struct strbuf *strbuf;
270	iopte_t *base;
271	unsigned long flags, npages, oaddr;
272	unsigned long i, base_paddr, ctx;
273	u32 bus_addr, ret;
274	unsigned long iopte_protection;
275
276	iommu = dev->archdata.iommu;
277	strbuf = dev->archdata.stc;
278
279	if (unlikely(direction == DMA_NONE))
280		goto bad_no_ctx;
281
282	oaddr = (unsigned long)(page_address(page) + offset);
283	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
284	npages >>= IO_PAGE_SHIFT;
285
 
286	base = alloc_npages(dev, iommu, npages);
287	spin_lock_irqsave(&iommu->lock, flags);
288	ctx = 0;
289	if (iommu->iommu_ctxflush)
290		ctx = iommu_alloc_ctx(iommu);
291	spin_unlock_irqrestore(&iommu->lock, flags);
292
293	if (unlikely(!base))
294		goto bad;
295
296	bus_addr = (iommu->tbl.table_map_base +
297		    ((base - iommu->page_table) << IO_PAGE_SHIFT));
298	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
299	base_paddr = __pa(oaddr & IO_PAGE_MASK);
300	if (strbuf->strbuf_enabled)
301		iopte_protection = IOPTE_STREAMING(ctx);
302	else
303		iopte_protection = IOPTE_CONSISTENT(ctx);
304	if (direction != DMA_TO_DEVICE)
305		iopte_protection |= IOPTE_WRITE;
306
307	for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
308		iopte_val(*base) = iopte_protection | base_paddr;
309
310	return ret;
311
312bad:
313	iommu_free_ctx(iommu, ctx);
314bad_no_ctx:
315	if (printk_ratelimit())
316		WARN_ON(1);
317	return DMA_MAPPING_ERROR;
318}
319
320static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
321			 u32 vaddr, unsigned long ctx, unsigned long npages,
322			 enum dma_data_direction direction)
323{
324	int limit;
325
326	if (strbuf->strbuf_ctxflush &&
327	    iommu->iommu_ctxflush) {
328		unsigned long matchreg, flushreg;
329		u64 val;
330
331		flushreg = strbuf->strbuf_ctxflush;
332		matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
333
334		iommu_write(flushreg, ctx);
335		val = iommu_read(matchreg);
336		val &= 0xffff;
337		if (!val)
338			goto do_flush_sync;
339
340		while (val) {
341			if (val & 0x1)
342				iommu_write(flushreg, ctx);
343			val >>= 1;
344		}
345		val = iommu_read(matchreg);
346		if (unlikely(val)) {
347			printk(KERN_WARNING "strbuf_flush: ctx flush "
348			       "timeout matchreg[%llx] ctx[%lx]\n",
349			       val, ctx);
350			goto do_page_flush;
351		}
352	} else {
353		unsigned long i;
354
355	do_page_flush:
356		for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
357			iommu_write(strbuf->strbuf_pflush, vaddr);
358	}
359
360do_flush_sync:
361	/* If the device could not have possibly put dirty data into
362	 * the streaming cache, no flush-flag synchronization needs
363	 * to be performed.
364	 */
365	if (direction == DMA_TO_DEVICE)
366		return;
367
368	STC_FLUSHFLAG_INIT(strbuf);
369	iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
370	(void) iommu_read(iommu->write_complete_reg);
371
372	limit = 100000;
373	while (!STC_FLUSHFLAG_SET(strbuf)) {
374		limit--;
375		if (!limit)
376			break;
377		udelay(1);
378		rmb();
379	}
380	if (!limit)
381		printk(KERN_WARNING "strbuf_flush: flushflag timeout "
382		       "vaddr[%08x] ctx[%lx] npages[%ld]\n",
383		       vaddr, ctx, npages);
384}
385
386static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
387			      size_t sz, enum dma_data_direction direction,
388			      unsigned long attrs)
389{
390	struct iommu *iommu;
391	struct strbuf *strbuf;
392	iopte_t *base;
393	unsigned long flags, npages, ctx, i;
394
395	if (unlikely(direction == DMA_NONE)) {
396		if (printk_ratelimit())
397			WARN_ON(1);
398		return;
399	}
400
401	iommu = dev->archdata.iommu;
402	strbuf = dev->archdata.stc;
403
404	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
405	npages >>= IO_PAGE_SHIFT;
406	base = iommu->page_table +
407		((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
408	bus_addr &= IO_PAGE_MASK;
409
410	spin_lock_irqsave(&iommu->lock, flags);
411
412	/* Record the context, if any. */
413	ctx = 0;
414	if (iommu->iommu_ctxflush)
415		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
416
417	/* Step 1: Kick data out of streaming buffers if necessary. */
418	if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
419		strbuf_flush(strbuf, iommu, bus_addr, ctx,
420			     npages, direction);
421
422	/* Step 2: Clear out TSB entries. */
423	for (i = 0; i < npages; i++)
424		iopte_make_dummy(iommu, base + i);
425
 
 
426	iommu_free_ctx(iommu, ctx);
 
427	spin_unlock_irqrestore(&iommu->lock, flags);
428
429	iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
430}
431
432static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
433			 int nelems, enum dma_data_direction direction,
434			 unsigned long attrs)
435{
436	struct scatterlist *s, *outs, *segstart;
437	unsigned long flags, handle, prot, ctx;
438	dma_addr_t dma_next = 0, dma_addr;
439	unsigned int max_seg_size;
440	unsigned long seg_boundary_size;
441	int outcount, incount, i;
442	struct strbuf *strbuf;
443	struct iommu *iommu;
444	unsigned long base_shift;
445
446	BUG_ON(direction == DMA_NONE);
447
448	iommu = dev->archdata.iommu;
449	strbuf = dev->archdata.stc;
450	if (nelems == 0 || !iommu)
451		return 0;
452
453	spin_lock_irqsave(&iommu->lock, flags);
454
455	ctx = 0;
456	if (iommu->iommu_ctxflush)
457		ctx = iommu_alloc_ctx(iommu);
458
459	if (strbuf->strbuf_enabled)
460		prot = IOPTE_STREAMING(ctx);
461	else
462		prot = IOPTE_CONSISTENT(ctx);
463	if (direction != DMA_TO_DEVICE)
464		prot |= IOPTE_WRITE;
465
466	outs = s = segstart = &sglist[0];
467	outcount = 1;
468	incount = nelems;
469	handle = 0;
470
471	/* Init first segment length for backout at failure */
472	outs->dma_length = 0;
473
474	max_seg_size = dma_get_max_seg_size(dev);
475	seg_boundary_size = dma_get_seg_boundary_nr_pages(dev, IO_PAGE_SHIFT);
476	base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
 
477	for_each_sg(sglist, s, nelems, i) {
478		unsigned long paddr, npages, entry, out_entry = 0, slen;
479		iopte_t *base;
480
481		slen = s->length;
482		/* Sanity check */
483		if (slen == 0) {
484			dma_next = 0;
485			continue;
486		}
487		/* Allocate iommu entries for that segment */
488		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
489		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
490		entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
491					      &handle, (unsigned long)(-1), 0);
492
493		/* Handle failure */
494		if (unlikely(entry == IOMMU_ERROR_CODE)) {
495			if (printk_ratelimit())
496				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
497				       " npages %lx\n", iommu, paddr, npages);
498			goto iommu_map_failed;
499		}
500
501		base = iommu->page_table + entry;
502
503		/* Convert entry to a dma_addr_t */
504		dma_addr = iommu->tbl.table_map_base +
505			(entry << IO_PAGE_SHIFT);
506		dma_addr |= (s->offset & ~IO_PAGE_MASK);
507
508		/* Insert into HW table */
509		paddr &= IO_PAGE_MASK;
510		while (npages--) {
511			iopte_val(*base) = prot | paddr;
512			base++;
513			paddr += IO_PAGE_SIZE;
514		}
515
516		/* If we are in an open segment, try merging */
517		if (segstart != s) {
518			/* We cannot merge if:
519			 * - allocated dma_addr isn't contiguous to previous allocation
520			 */
521			if ((dma_addr != dma_next) ||
522			    (outs->dma_length + s->length > max_seg_size) ||
523			    (is_span_boundary(out_entry, base_shift,
524					      seg_boundary_size, outs, s))) {
525				/* Can't merge: create a new segment */
526				segstart = s;
527				outcount++;
528				outs = sg_next(outs);
529			} else {
530				outs->dma_length += s->length;
531			}
532		}
533
534		if (segstart == s) {
535			/* This is a new segment, fill entries */
536			outs->dma_address = dma_addr;
537			outs->dma_length = slen;
538			out_entry = entry;
539		}
540
541		/* Calculate next page pointer for contiguous check */
542		dma_next = dma_addr + slen;
543	}
544
545	spin_unlock_irqrestore(&iommu->lock, flags);
546
547	if (outcount < incount) {
548		outs = sg_next(outs);
549		outs->dma_address = DMA_MAPPING_ERROR;
550		outs->dma_length = 0;
551	}
552
553	return outcount;
554
555iommu_map_failed:
556	for_each_sg(sglist, s, nelems, i) {
557		if (s->dma_length != 0) {
558			unsigned long vaddr, npages, entry, j;
559			iopte_t *base;
560
561			vaddr = s->dma_address & IO_PAGE_MASK;
562			npages = iommu_num_pages(s->dma_address, s->dma_length,
563						 IO_PAGE_SIZE);
 
564
565			entry = (vaddr - iommu->tbl.table_map_base)
566				>> IO_PAGE_SHIFT;
567			base = iommu->page_table + entry;
568
569			for (j = 0; j < npages; j++)
570				iopte_make_dummy(iommu, base + j);
571
572			iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
573					     IOMMU_ERROR_CODE);
574
575			s->dma_address = DMA_MAPPING_ERROR;
576			s->dma_length = 0;
577		}
578		if (s == outs)
579			break;
580	}
581	spin_unlock_irqrestore(&iommu->lock, flags);
582
583	return 0;
584}
585
586/* If contexts are being used, they are the same in all of the mappings
587 * we make for a particular SG.
588 */
589static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
590{
591	unsigned long ctx = 0;
592
593	if (iommu->iommu_ctxflush) {
594		iopte_t *base;
595		u32 bus_addr;
596		struct iommu_map_table *tbl = &iommu->tbl;
597
598		bus_addr = sg->dma_address & IO_PAGE_MASK;
599		base = iommu->page_table +
600			((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT);
601
602		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
603	}
604	return ctx;
605}
606
607static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
608			    int nelems, enum dma_data_direction direction,
609			    unsigned long attrs)
610{
611	unsigned long flags, ctx;
612	struct scatterlist *sg;
613	struct strbuf *strbuf;
614	struct iommu *iommu;
615
616	BUG_ON(direction == DMA_NONE);
617
618	iommu = dev->archdata.iommu;
619	strbuf = dev->archdata.stc;
620
621	ctx = fetch_sg_ctx(iommu, sglist);
622
623	spin_lock_irqsave(&iommu->lock, flags);
624
625	sg = sglist;
626	while (nelems--) {
627		dma_addr_t dma_handle = sg->dma_address;
628		unsigned int len = sg->dma_length;
629		unsigned long npages, entry;
630		iopte_t *base;
631		int i;
632
633		if (!len)
634			break;
635		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
 
636
637		entry = ((dma_handle - iommu->tbl.table_map_base)
638			 >> IO_PAGE_SHIFT);
639		base = iommu->page_table + entry;
640
641		dma_handle &= IO_PAGE_MASK;
642		if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
643			strbuf_flush(strbuf, iommu, dma_handle, ctx,
644				     npages, direction);
645
646		for (i = 0; i < npages; i++)
647			iopte_make_dummy(iommu, base + i);
648
649		iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
650				     IOMMU_ERROR_CODE);
651		sg = sg_next(sg);
652	}
653
654	iommu_free_ctx(iommu, ctx);
655
656	spin_unlock_irqrestore(&iommu->lock, flags);
657}
658
659static void dma_4u_sync_single_for_cpu(struct device *dev,
660				       dma_addr_t bus_addr, size_t sz,
661				       enum dma_data_direction direction)
662{
663	struct iommu *iommu;
664	struct strbuf *strbuf;
665	unsigned long flags, ctx, npages;
666
667	iommu = dev->archdata.iommu;
668	strbuf = dev->archdata.stc;
669
670	if (!strbuf->strbuf_enabled)
671		return;
672
673	spin_lock_irqsave(&iommu->lock, flags);
674
675	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
676	npages >>= IO_PAGE_SHIFT;
677	bus_addr &= IO_PAGE_MASK;
678
679	/* Step 1: Record the context, if any. */
680	ctx = 0;
681	if (iommu->iommu_ctxflush &&
682	    strbuf->strbuf_ctxflush) {
683		iopte_t *iopte;
684		struct iommu_map_table *tbl = &iommu->tbl;
685
686		iopte = iommu->page_table +
687			((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT);
688		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
689	}
690
691	/* Step 2: Kick data out of streaming buffers. */
692	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
693
694	spin_unlock_irqrestore(&iommu->lock, flags);
695}
696
697static void dma_4u_sync_sg_for_cpu(struct device *dev,
698				   struct scatterlist *sglist, int nelems,
699				   enum dma_data_direction direction)
700{
701	struct iommu *iommu;
702	struct strbuf *strbuf;
703	unsigned long flags, ctx, npages, i;
704	struct scatterlist *sg, *sgprv;
705	u32 bus_addr;
706
707	iommu = dev->archdata.iommu;
708	strbuf = dev->archdata.stc;
709
710	if (!strbuf->strbuf_enabled)
711		return;
712
713	spin_lock_irqsave(&iommu->lock, flags);
714
715	/* Step 1: Record the context, if any. */
716	ctx = 0;
717	if (iommu->iommu_ctxflush &&
718	    strbuf->strbuf_ctxflush) {
719		iopte_t *iopte;
720		struct iommu_map_table *tbl = &iommu->tbl;
721
722		iopte = iommu->page_table + ((sglist[0].dma_address -
723			tbl->table_map_base) >> IO_PAGE_SHIFT);
724		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
725	}
726
727	/* Step 2: Kick data out of streaming buffers. */
728	bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
729	sgprv = NULL;
730	for_each_sg(sglist, sg, nelems, i) {
731		if (sg->dma_length == 0)
732			break;
733		sgprv = sg;
734	}
735
736	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
737		  - bus_addr) >> IO_PAGE_SHIFT;
738	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
739
740	spin_unlock_irqrestore(&iommu->lock, flags);
741}
742
743static int dma_4u_supported(struct device *dev, u64 device_mask)
744{
745	struct iommu *iommu = dev->archdata.iommu;
746
747	if (ali_sound_dma_hack(dev, device_mask))
748		return 1;
749
750	if (device_mask < iommu->dma_addr_mask)
751		return 0;
752	return 1;
753}
754
755static const struct dma_map_ops sun4u_dma_ops = {
756	.alloc			= dma_4u_alloc_coherent,
757	.free			= dma_4u_free_coherent,
758	.map_page		= dma_4u_map_page,
759	.unmap_page		= dma_4u_unmap_page,
760	.map_sg			= dma_4u_map_sg,
761	.unmap_sg		= dma_4u_unmap_sg,
762	.sync_single_for_cpu	= dma_4u_sync_single_for_cpu,
763	.sync_sg_for_cpu	= dma_4u_sync_sg_for_cpu,
764	.dma_supported		= dma_4u_supported,
765};
766
767const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
768EXPORT_SYMBOL(dma_ops);