Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/* iommu.c: Generic sparc64 IOMMU support.
  3 *
  4 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
  5 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
  6 */
  7
  8#include <linux/kernel.h>
  9#include <linux/export.h>
 10#include <linux/slab.h>
 11#include <linux/delay.h>
 12#include <linux/device.h>
 13#include <linux/dma-mapping.h>
 14#include <linux/errno.h>
 15#include <linux/iommu-helper.h>
 16#include <linux/bitmap.h>
 17#include <linux/iommu-common.h>
 18
 19#ifdef CONFIG_PCI
 20#include <linux/pci.h>
 21#endif
 22
 23#include <asm/iommu.h>
 24
 25#include "iommu_common.h"
 26#include "kernel.h"
 27
 28#define STC_CTXMATCH_ADDR(STC, CTX)	\
 29	((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
 30#define STC_FLUSHFLAG_INIT(STC) \
 31	(*((STC)->strbuf_flushflag) = 0UL)
 32#define STC_FLUSHFLAG_SET(STC) \
 33	(*((STC)->strbuf_flushflag) != 0UL)
 34
 35#define iommu_read(__reg) \
 36({	u64 __ret; \
 37	__asm__ __volatile__("ldxa [%1] %2, %0" \
 38			     : "=r" (__ret) \
 39			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
 40			     : "memory"); \
 41	__ret; \
 42})
 43#define iommu_write(__reg, __val) \
 44	__asm__ __volatile__("stxa %0, [%1] %2" \
 45			     : /* no outputs */ \
 46			     : "r" (__val), "r" (__reg), \
 47			       "i" (ASI_PHYS_BYPASS_EC_E))
 48
 49/* Must be invoked under the IOMMU lock. */
 50static void iommu_flushall(struct iommu_map_table *iommu_map_table)
 51{
 52	struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
 53	if (iommu->iommu_flushinv) {
 54		iommu_write(iommu->iommu_flushinv, ~(u64)0);
 55	} else {
 56		unsigned long tag;
 57		int entry;
 58
 59		tag = iommu->iommu_tags;
 60		for (entry = 0; entry < 16; entry++) {
 61			iommu_write(tag, 0);
 62			tag += 8;
 63		}
 64
 65		/* Ensure completion of previous PIO writes. */
 66		(void) iommu_read(iommu->write_complete_reg);
 67	}
 68}
 69
 70#define IOPTE_CONSISTENT(CTX) \
 71	(IOPTE_VALID | IOPTE_CACHE | \
 72	 (((CTX) << 47) & IOPTE_CONTEXT))
 73
 74#define IOPTE_STREAMING(CTX) \
 75	(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
 76
 77/* Existing mappings are never marked invalid, instead they
 78 * are pointed to a dummy page.
 79 */
 80#define IOPTE_IS_DUMMY(iommu, iopte)	\
 81	((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
 82
 83static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
 84{
 85	unsigned long val = iopte_val(*iopte);
 86
 87	val &= ~IOPTE_PAGE;
 88	val |= iommu->dummy_page_pa;
 89
 90	iopte_val(*iopte) = val;
 91}
 92
 93int iommu_table_init(struct iommu *iommu, int tsbsize,
 94		     u32 dma_offset, u32 dma_addr_mask,
 95		     int numa_node)
 96{
 97	unsigned long i, order, sz, num_tsb_entries;
 98	struct page *page;
 99
100	num_tsb_entries = tsbsize / sizeof(iopte_t);
101
102	/* Setup initial software IOMMU state. */
103	spin_lock_init(&iommu->lock);
104	iommu->ctx_lowest_free = 1;
105	iommu->tbl.table_map_base = dma_offset;
106	iommu->dma_addr_mask = dma_addr_mask;
107
108	/* Allocate and initialize the free area map.  */
109	sz = num_tsb_entries / 8;
110	sz = (sz + 7UL) & ~7UL;
111	iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
112	if (!iommu->tbl.map)
113		return -ENOMEM;
114	memset(iommu->tbl.map, 0, sz);
115
116	iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
117			    (tlb_type != hypervisor ? iommu_flushall : NULL),
118			    false, 1, false);
119
120	/* Allocate and initialize the dummy page which we
121	 * set inactive IO PTEs to point to.
122	 */
123	page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
124	if (!page) {
125		printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
126		goto out_free_map;
127	}
128	iommu->dummy_page = (unsigned long) page_address(page);
129	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
130	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
131
132	/* Now allocate and setup the IOMMU page table itself.  */
133	order = get_order(tsbsize);
134	page = alloc_pages_node(numa_node, GFP_KERNEL, order);
135	if (!page) {
136		printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
137		goto out_free_dummy_page;
138	}
139	iommu->page_table = (iopte_t *)page_address(page);
140
141	for (i = 0; i < num_tsb_entries; i++)
142		iopte_make_dummy(iommu, &iommu->page_table[i]);
143
144	return 0;
145
146out_free_dummy_page:
147	free_page(iommu->dummy_page);
148	iommu->dummy_page = 0UL;
149
150out_free_map:
151	kfree(iommu->tbl.map);
152	iommu->tbl.map = NULL;
153
154	return -ENOMEM;
155}
156
157static inline iopte_t *alloc_npages(struct device *dev,
158				    struct iommu *iommu,
159				    unsigned long npages)
160{
161	unsigned long entry;
162
163	entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
164				      (unsigned long)(-1), 0);
165	if (unlikely(entry == IOMMU_ERROR_CODE))
166		return NULL;
167
168	return iommu->page_table + entry;
169}
170
171static int iommu_alloc_ctx(struct iommu *iommu)
172{
173	int lowest = iommu->ctx_lowest_free;
174	int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
175
176	if (unlikely(n == IOMMU_NUM_CTXS)) {
177		n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
178		if (unlikely(n == lowest)) {
179			printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
180			n = 0;
181		}
182	}
183	if (n)
184		__set_bit(n, iommu->ctx_bitmap);
185
186	return n;
187}
188
189static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
190{
191	if (likely(ctx)) {
192		__clear_bit(ctx, iommu->ctx_bitmap);
193		if (ctx < iommu->ctx_lowest_free)
194			iommu->ctx_lowest_free = ctx;
195	}
196}
197
198static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
199				   dma_addr_t *dma_addrp, gfp_t gfp,
200				   unsigned long attrs)
201{
202	unsigned long order, first_page;
203	struct iommu *iommu;
204	struct page *page;
205	int npages, nid;
206	iopte_t *iopte;
207	void *ret;
208
209	size = IO_PAGE_ALIGN(size);
210	order = get_order(size);
211	if (order >= 10)
212		return NULL;
213
214	nid = dev->archdata.numa_node;
215	page = alloc_pages_node(nid, gfp, order);
216	if (unlikely(!page))
217		return NULL;
218
219	first_page = (unsigned long) page_address(page);
220	memset((char *)first_page, 0, PAGE_SIZE << order);
221
222	iommu = dev->archdata.iommu;
223
224	iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
225
226	if (unlikely(iopte == NULL)) {
227		free_pages(first_page, order);
228		return NULL;
229	}
230
231	*dma_addrp = (iommu->tbl.table_map_base +
232		      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
233	ret = (void *) first_page;
234	npages = size >> IO_PAGE_SHIFT;
235	first_page = __pa(first_page);
236	while (npages--) {
237		iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
238				     IOPTE_WRITE |
239				     (first_page & IOPTE_PAGE));
240		iopte++;
241		first_page += IO_PAGE_SIZE;
242	}
243
244	return ret;
245}
246
247static void dma_4u_free_coherent(struct device *dev, size_t size,
248				 void *cpu, dma_addr_t dvma,
249				 unsigned long attrs)
250{
251	struct iommu *iommu;
252	unsigned long order, npages;
253
254	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
255	iommu = dev->archdata.iommu;
256
257	iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
258
259	order = get_order(size);
260	if (order < 10)
261		free_pages((unsigned long)cpu, order);
262}
263
264static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
265				  unsigned long offset, size_t sz,
266				  enum dma_data_direction direction,
267				  unsigned long attrs)
268{
269	struct iommu *iommu;
270	struct strbuf *strbuf;
271	iopte_t *base;
272	unsigned long flags, npages, oaddr;
273	unsigned long i, base_paddr, ctx;
274	u32 bus_addr, ret;
275	unsigned long iopte_protection;
276
277	iommu = dev->archdata.iommu;
278	strbuf = dev->archdata.stc;
279
280	if (unlikely(direction == DMA_NONE))
281		goto bad_no_ctx;
282
283	oaddr = (unsigned long)(page_address(page) + offset);
284	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
285	npages >>= IO_PAGE_SHIFT;
286
287	base = alloc_npages(dev, iommu, npages);
288	spin_lock_irqsave(&iommu->lock, flags);
289	ctx = 0;
290	if (iommu->iommu_ctxflush)
291		ctx = iommu_alloc_ctx(iommu);
292	spin_unlock_irqrestore(&iommu->lock, flags);
293
294	if (unlikely(!base))
295		goto bad;
296
297	bus_addr = (iommu->tbl.table_map_base +
298		    ((base - iommu->page_table) << IO_PAGE_SHIFT));
299	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
300	base_paddr = __pa(oaddr & IO_PAGE_MASK);
301	if (strbuf->strbuf_enabled)
302		iopte_protection = IOPTE_STREAMING(ctx);
303	else
304		iopte_protection = IOPTE_CONSISTENT(ctx);
305	if (direction != DMA_TO_DEVICE)
306		iopte_protection |= IOPTE_WRITE;
307
308	for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
309		iopte_val(*base) = iopte_protection | base_paddr;
310
311	return ret;
312
313bad:
314	iommu_free_ctx(iommu, ctx);
315bad_no_ctx:
316	if (printk_ratelimit())
317		WARN_ON(1);
318	return SPARC_MAPPING_ERROR;
319}
320
321static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
322			 u32 vaddr, unsigned long ctx, unsigned long npages,
323			 enum dma_data_direction direction)
324{
325	int limit;
326
327	if (strbuf->strbuf_ctxflush &&
328	    iommu->iommu_ctxflush) {
329		unsigned long matchreg, flushreg;
330		u64 val;
331
332		flushreg = strbuf->strbuf_ctxflush;
333		matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
334
335		iommu_write(flushreg, ctx);
336		val = iommu_read(matchreg);
337		val &= 0xffff;
338		if (!val)
339			goto do_flush_sync;
340
341		while (val) {
342			if (val & 0x1)
343				iommu_write(flushreg, ctx);
344			val >>= 1;
345		}
346		val = iommu_read(matchreg);
347		if (unlikely(val)) {
348			printk(KERN_WARNING "strbuf_flush: ctx flush "
349			       "timeout matchreg[%llx] ctx[%lx]\n",
350			       val, ctx);
351			goto do_page_flush;
352		}
353	} else {
354		unsigned long i;
355
356	do_page_flush:
357		for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
358			iommu_write(strbuf->strbuf_pflush, vaddr);
359	}
360
361do_flush_sync:
362	/* If the device could not have possibly put dirty data into
363	 * the streaming cache, no flush-flag synchronization needs
364	 * to be performed.
365	 */
366	if (direction == DMA_TO_DEVICE)
367		return;
368
369	STC_FLUSHFLAG_INIT(strbuf);
370	iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
371	(void) iommu_read(iommu->write_complete_reg);
372
373	limit = 100000;
374	while (!STC_FLUSHFLAG_SET(strbuf)) {
375		limit--;
376		if (!limit)
377			break;
378		udelay(1);
379		rmb();
380	}
381	if (!limit)
382		printk(KERN_WARNING "strbuf_flush: flushflag timeout "
383		       "vaddr[%08x] ctx[%lx] npages[%ld]\n",
384		       vaddr, ctx, npages);
385}
386
387static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
388			      size_t sz, enum dma_data_direction direction,
389			      unsigned long attrs)
390{
391	struct iommu *iommu;
392	struct strbuf *strbuf;
393	iopte_t *base;
394	unsigned long flags, npages, ctx, i;
395
396	if (unlikely(direction == DMA_NONE)) {
397		if (printk_ratelimit())
398			WARN_ON(1);
399		return;
400	}
401
402	iommu = dev->archdata.iommu;
403	strbuf = dev->archdata.stc;
404
405	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
406	npages >>= IO_PAGE_SHIFT;
407	base = iommu->page_table +
408		((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
409	bus_addr &= IO_PAGE_MASK;
410
411	spin_lock_irqsave(&iommu->lock, flags);
412
413	/* Record the context, if any. */
414	ctx = 0;
415	if (iommu->iommu_ctxflush)
416		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
417
418	/* Step 1: Kick data out of streaming buffers if necessary. */
419	if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
420		strbuf_flush(strbuf, iommu, bus_addr, ctx,
421			     npages, direction);
422
423	/* Step 2: Clear out TSB entries. */
424	for (i = 0; i < npages; i++)
425		iopte_make_dummy(iommu, base + i);
426
427	iommu_free_ctx(iommu, ctx);
428	spin_unlock_irqrestore(&iommu->lock, flags);
429
430	iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
431}
432
433static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
434			 int nelems, enum dma_data_direction direction,
435			 unsigned long attrs)
436{
437	struct scatterlist *s, *outs, *segstart;
438	unsigned long flags, handle, prot, ctx;
439	dma_addr_t dma_next = 0, dma_addr;
440	unsigned int max_seg_size;
441	unsigned long seg_boundary_size;
442	int outcount, incount, i;
443	struct strbuf *strbuf;
444	struct iommu *iommu;
445	unsigned long base_shift;
446
447	BUG_ON(direction == DMA_NONE);
448
449	iommu = dev->archdata.iommu;
450	strbuf = dev->archdata.stc;
451	if (nelems == 0 || !iommu)
452		return 0;
453
454	spin_lock_irqsave(&iommu->lock, flags);
455
456	ctx = 0;
457	if (iommu->iommu_ctxflush)
458		ctx = iommu_alloc_ctx(iommu);
459
460	if (strbuf->strbuf_enabled)
461		prot = IOPTE_STREAMING(ctx);
462	else
463		prot = IOPTE_CONSISTENT(ctx);
464	if (direction != DMA_TO_DEVICE)
465		prot |= IOPTE_WRITE;
466
467	outs = s = segstart = &sglist[0];
468	outcount = 1;
469	incount = nelems;
470	handle = 0;
471
472	/* Init first segment length for backout at failure */
473	outs->dma_length = 0;
474
475	max_seg_size = dma_get_max_seg_size(dev);
476	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
477				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
478	base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
479	for_each_sg(sglist, s, nelems, i) {
480		unsigned long paddr, npages, entry, out_entry = 0, slen;
481		iopte_t *base;
482
483		slen = s->length;
484		/* Sanity check */
485		if (slen == 0) {
486			dma_next = 0;
487			continue;
488		}
489		/* Allocate iommu entries for that segment */
490		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
491		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
492		entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
493					      &handle, (unsigned long)(-1), 0);
494
495		/* Handle failure */
496		if (unlikely(entry == IOMMU_ERROR_CODE)) {
497			if (printk_ratelimit())
498				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
499				       " npages %lx\n", iommu, paddr, npages);
500			goto iommu_map_failed;
501		}
502
503		base = iommu->page_table + entry;
504
505		/* Convert entry to a dma_addr_t */
506		dma_addr = iommu->tbl.table_map_base +
507			(entry << IO_PAGE_SHIFT);
508		dma_addr |= (s->offset & ~IO_PAGE_MASK);
509
510		/* Insert into HW table */
511		paddr &= IO_PAGE_MASK;
512		while (npages--) {
513			iopte_val(*base) = prot | paddr;
514			base++;
515			paddr += IO_PAGE_SIZE;
516		}
517
518		/* If we are in an open segment, try merging */
519		if (segstart != s) {
520			/* We cannot merge if:
521			 * - allocated dma_addr isn't contiguous to previous allocation
522			 */
523			if ((dma_addr != dma_next) ||
524			    (outs->dma_length + s->length > max_seg_size) ||
525			    (is_span_boundary(out_entry, base_shift,
526					      seg_boundary_size, outs, s))) {
527				/* Can't merge: create a new segment */
528				segstart = s;
529				outcount++;
530				outs = sg_next(outs);
531			} else {
532				outs->dma_length += s->length;
533			}
534		}
535
536		if (segstart == s) {
537			/* This is a new segment, fill entries */
538			outs->dma_address = dma_addr;
539			outs->dma_length = slen;
540			out_entry = entry;
541		}
542
543		/* Calculate next page pointer for contiguous check */
544		dma_next = dma_addr + slen;
545	}
546
547	spin_unlock_irqrestore(&iommu->lock, flags);
548
549	if (outcount < incount) {
550		outs = sg_next(outs);
551		outs->dma_address = SPARC_MAPPING_ERROR;
552		outs->dma_length = 0;
553	}
554
555	return outcount;
556
557iommu_map_failed:
558	for_each_sg(sglist, s, nelems, i) {
559		if (s->dma_length != 0) {
560			unsigned long vaddr, npages, entry, j;
561			iopte_t *base;
562
563			vaddr = s->dma_address & IO_PAGE_MASK;
564			npages = iommu_num_pages(s->dma_address, s->dma_length,
565						 IO_PAGE_SIZE);
566
567			entry = (vaddr - iommu->tbl.table_map_base)
568				>> IO_PAGE_SHIFT;
569			base = iommu->page_table + entry;
570
571			for (j = 0; j < npages; j++)
572				iopte_make_dummy(iommu, base + j);
573
574			iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
575					     IOMMU_ERROR_CODE);
576
577			s->dma_address = SPARC_MAPPING_ERROR;
578			s->dma_length = 0;
579		}
580		if (s == outs)
581			break;
582	}
583	spin_unlock_irqrestore(&iommu->lock, flags);
584
585	return 0;
586}
587
588/* If contexts are being used, they are the same in all of the mappings
589 * we make for a particular SG.
590 */
591static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
592{
593	unsigned long ctx = 0;
594
595	if (iommu->iommu_ctxflush) {
596		iopte_t *base;
597		u32 bus_addr;
598		struct iommu_map_table *tbl = &iommu->tbl;
599
600		bus_addr = sg->dma_address & IO_PAGE_MASK;
601		base = iommu->page_table +
602			((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT);
603
604		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
605	}
606	return ctx;
607}
608
609static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
610			    int nelems, enum dma_data_direction direction,
611			    unsigned long attrs)
612{
613	unsigned long flags, ctx;
614	struct scatterlist *sg;
615	struct strbuf *strbuf;
616	struct iommu *iommu;
617
618	BUG_ON(direction == DMA_NONE);
619
620	iommu = dev->archdata.iommu;
621	strbuf = dev->archdata.stc;
622
623	ctx = fetch_sg_ctx(iommu, sglist);
624
625	spin_lock_irqsave(&iommu->lock, flags);
626
627	sg = sglist;
628	while (nelems--) {
629		dma_addr_t dma_handle = sg->dma_address;
630		unsigned int len = sg->dma_length;
631		unsigned long npages, entry;
632		iopte_t *base;
633		int i;
634
635		if (!len)
636			break;
637		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
638
639		entry = ((dma_handle - iommu->tbl.table_map_base)
640			 >> IO_PAGE_SHIFT);
641		base = iommu->page_table + entry;
642
643		dma_handle &= IO_PAGE_MASK;
644		if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
645			strbuf_flush(strbuf, iommu, dma_handle, ctx,
646				     npages, direction);
647
648		for (i = 0; i < npages; i++)
649			iopte_make_dummy(iommu, base + i);
650
651		iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
652				     IOMMU_ERROR_CODE);
653		sg = sg_next(sg);
654	}
655
656	iommu_free_ctx(iommu, ctx);
657
658	spin_unlock_irqrestore(&iommu->lock, flags);
659}
660
661static void dma_4u_sync_single_for_cpu(struct device *dev,
662				       dma_addr_t bus_addr, size_t sz,
663				       enum dma_data_direction direction)
664{
665	struct iommu *iommu;
666	struct strbuf *strbuf;
667	unsigned long flags, ctx, npages;
668
669	iommu = dev->archdata.iommu;
670	strbuf = dev->archdata.stc;
671
672	if (!strbuf->strbuf_enabled)
673		return;
674
675	spin_lock_irqsave(&iommu->lock, flags);
676
677	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
678	npages >>= IO_PAGE_SHIFT;
679	bus_addr &= IO_PAGE_MASK;
680
681	/* Step 1: Record the context, if any. */
682	ctx = 0;
683	if (iommu->iommu_ctxflush &&
684	    strbuf->strbuf_ctxflush) {
685		iopte_t *iopte;
686		struct iommu_map_table *tbl = &iommu->tbl;
687
688		iopte = iommu->page_table +
689			((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT);
690		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
691	}
692
693	/* Step 2: Kick data out of streaming buffers. */
694	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
695
696	spin_unlock_irqrestore(&iommu->lock, flags);
697}
698
699static void dma_4u_sync_sg_for_cpu(struct device *dev,
700				   struct scatterlist *sglist, int nelems,
701				   enum dma_data_direction direction)
702{
703	struct iommu *iommu;
704	struct strbuf *strbuf;
705	unsigned long flags, ctx, npages, i;
706	struct scatterlist *sg, *sgprv;
707	u32 bus_addr;
708
709	iommu = dev->archdata.iommu;
710	strbuf = dev->archdata.stc;
711
712	if (!strbuf->strbuf_enabled)
713		return;
714
715	spin_lock_irqsave(&iommu->lock, flags);
716
717	/* Step 1: Record the context, if any. */
718	ctx = 0;
719	if (iommu->iommu_ctxflush &&
720	    strbuf->strbuf_ctxflush) {
721		iopte_t *iopte;
722		struct iommu_map_table *tbl = &iommu->tbl;
723
724		iopte = iommu->page_table + ((sglist[0].dma_address -
725			tbl->table_map_base) >> IO_PAGE_SHIFT);
726		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
727	}
728
729	/* Step 2: Kick data out of streaming buffers. */
730	bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
731	sgprv = NULL;
732	for_each_sg(sglist, sg, nelems, i) {
733		if (sg->dma_length == 0)
734			break;
735		sgprv = sg;
736	}
737
738	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
739		  - bus_addr) >> IO_PAGE_SHIFT;
740	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
741
742	spin_unlock_irqrestore(&iommu->lock, flags);
743}
744
745static int dma_4u_mapping_error(struct device *dev, dma_addr_t dma_addr)
746{
747	return dma_addr == SPARC_MAPPING_ERROR;
748}
 
 
 
 
 
 
749
750static int dma_4u_supported(struct device *dev, u64 device_mask)
 
 
 
751{
752	struct iommu *iommu = dev->archdata.iommu;
 
753
754	if (device_mask > DMA_BIT_MASK(32))
755		return 0;
756	if ((device_mask & iommu->dma_addr_mask) == iommu->dma_addr_mask)
 
757		return 1;
 
758#ifdef CONFIG_PCI
759	if (dev_is_pci(dev))
760		return pci64_dma_supported(to_pci_dev(dev), device_mask);
761#endif
 
762	return 0;
763}
764
765static const struct dma_map_ops sun4u_dma_ops = {
766	.alloc			= dma_4u_alloc_coherent,
767	.free			= dma_4u_free_coherent,
768	.map_page		= dma_4u_map_page,
769	.unmap_page		= dma_4u_unmap_page,
770	.map_sg			= dma_4u_map_sg,
771	.unmap_sg		= dma_4u_unmap_sg,
772	.sync_single_for_cpu	= dma_4u_sync_single_for_cpu,
773	.sync_sg_for_cpu	= dma_4u_sync_sg_for_cpu,
774	.dma_supported		= dma_4u_supported,
775	.mapping_error		= dma_4u_mapping_error,
776};
777
778const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
779EXPORT_SYMBOL(dma_ops);
v4.6
 
  1/* iommu.c: Generic sparc64 IOMMU support.
  2 *
  3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
  4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
  5 */
  6
  7#include <linux/kernel.h>
  8#include <linux/export.h>
  9#include <linux/slab.h>
 10#include <linux/delay.h>
 11#include <linux/device.h>
 12#include <linux/dma-mapping.h>
 13#include <linux/errno.h>
 14#include <linux/iommu-helper.h>
 15#include <linux/bitmap.h>
 16#include <linux/iommu-common.h>
 17
 18#ifdef CONFIG_PCI
 19#include <linux/pci.h>
 20#endif
 21
 22#include <asm/iommu.h>
 23
 24#include "iommu_common.h"
 25#include "kernel.h"
 26
 27#define STC_CTXMATCH_ADDR(STC, CTX)	\
 28	((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
 29#define STC_FLUSHFLAG_INIT(STC) \
 30	(*((STC)->strbuf_flushflag) = 0UL)
 31#define STC_FLUSHFLAG_SET(STC) \
 32	(*((STC)->strbuf_flushflag) != 0UL)
 33
 34#define iommu_read(__reg) \
 35({	u64 __ret; \
 36	__asm__ __volatile__("ldxa [%1] %2, %0" \
 37			     : "=r" (__ret) \
 38			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
 39			     : "memory"); \
 40	__ret; \
 41})
 42#define iommu_write(__reg, __val) \
 43	__asm__ __volatile__("stxa %0, [%1] %2" \
 44			     : /* no outputs */ \
 45			     : "r" (__val), "r" (__reg), \
 46			       "i" (ASI_PHYS_BYPASS_EC_E))
 47
 48/* Must be invoked under the IOMMU lock. */
 49static void iommu_flushall(struct iommu_map_table *iommu_map_table)
 50{
 51	struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
 52	if (iommu->iommu_flushinv) {
 53		iommu_write(iommu->iommu_flushinv, ~(u64)0);
 54	} else {
 55		unsigned long tag;
 56		int entry;
 57
 58		tag = iommu->iommu_tags;
 59		for (entry = 0; entry < 16; entry++) {
 60			iommu_write(tag, 0);
 61			tag += 8;
 62		}
 63
 64		/* Ensure completion of previous PIO writes. */
 65		(void) iommu_read(iommu->write_complete_reg);
 66	}
 67}
 68
 69#define IOPTE_CONSISTENT(CTX) \
 70	(IOPTE_VALID | IOPTE_CACHE | \
 71	 (((CTX) << 47) & IOPTE_CONTEXT))
 72
 73#define IOPTE_STREAMING(CTX) \
 74	(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
 75
 76/* Existing mappings are never marked invalid, instead they
 77 * are pointed to a dummy page.
 78 */
 79#define IOPTE_IS_DUMMY(iommu, iopte)	\
 80	((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
 81
 82static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
 83{
 84	unsigned long val = iopte_val(*iopte);
 85
 86	val &= ~IOPTE_PAGE;
 87	val |= iommu->dummy_page_pa;
 88
 89	iopte_val(*iopte) = val;
 90}
 91
 92int iommu_table_init(struct iommu *iommu, int tsbsize,
 93		     u32 dma_offset, u32 dma_addr_mask,
 94		     int numa_node)
 95{
 96	unsigned long i, order, sz, num_tsb_entries;
 97	struct page *page;
 98
 99	num_tsb_entries = tsbsize / sizeof(iopte_t);
100
101	/* Setup initial software IOMMU state. */
102	spin_lock_init(&iommu->lock);
103	iommu->ctx_lowest_free = 1;
104	iommu->tbl.table_map_base = dma_offset;
105	iommu->dma_addr_mask = dma_addr_mask;
106
107	/* Allocate and initialize the free area map.  */
108	sz = num_tsb_entries / 8;
109	sz = (sz + 7UL) & ~7UL;
110	iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
111	if (!iommu->tbl.map)
112		return -ENOMEM;
113	memset(iommu->tbl.map, 0, sz);
114
115	iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
116			    (tlb_type != hypervisor ? iommu_flushall : NULL),
117			    false, 1, false);
118
119	/* Allocate and initialize the dummy page which we
120	 * set inactive IO PTEs to point to.
121	 */
122	page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
123	if (!page) {
124		printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
125		goto out_free_map;
126	}
127	iommu->dummy_page = (unsigned long) page_address(page);
128	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
129	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
130
131	/* Now allocate and setup the IOMMU page table itself.  */
132	order = get_order(tsbsize);
133	page = alloc_pages_node(numa_node, GFP_KERNEL, order);
134	if (!page) {
135		printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
136		goto out_free_dummy_page;
137	}
138	iommu->page_table = (iopte_t *)page_address(page);
139
140	for (i = 0; i < num_tsb_entries; i++)
141		iopte_make_dummy(iommu, &iommu->page_table[i]);
142
143	return 0;
144
145out_free_dummy_page:
146	free_page(iommu->dummy_page);
147	iommu->dummy_page = 0UL;
148
149out_free_map:
150	kfree(iommu->tbl.map);
151	iommu->tbl.map = NULL;
152
153	return -ENOMEM;
154}
155
156static inline iopte_t *alloc_npages(struct device *dev,
157				    struct iommu *iommu,
158				    unsigned long npages)
159{
160	unsigned long entry;
161
162	entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
163				      (unsigned long)(-1), 0);
164	if (unlikely(entry == IOMMU_ERROR_CODE))
165		return NULL;
166
167	return iommu->page_table + entry;
168}
169
170static int iommu_alloc_ctx(struct iommu *iommu)
171{
172	int lowest = iommu->ctx_lowest_free;
173	int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
174
175	if (unlikely(n == IOMMU_NUM_CTXS)) {
176		n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
177		if (unlikely(n == lowest)) {
178			printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
179			n = 0;
180		}
181	}
182	if (n)
183		__set_bit(n, iommu->ctx_bitmap);
184
185	return n;
186}
187
188static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
189{
190	if (likely(ctx)) {
191		__clear_bit(ctx, iommu->ctx_bitmap);
192		if (ctx < iommu->ctx_lowest_free)
193			iommu->ctx_lowest_free = ctx;
194	}
195}
196
197static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
198				   dma_addr_t *dma_addrp, gfp_t gfp,
199				   struct dma_attrs *attrs)
200{
201	unsigned long order, first_page;
202	struct iommu *iommu;
203	struct page *page;
204	int npages, nid;
205	iopte_t *iopte;
206	void *ret;
207
208	size = IO_PAGE_ALIGN(size);
209	order = get_order(size);
210	if (order >= 10)
211		return NULL;
212
213	nid = dev->archdata.numa_node;
214	page = alloc_pages_node(nid, gfp, order);
215	if (unlikely(!page))
216		return NULL;
217
218	first_page = (unsigned long) page_address(page);
219	memset((char *)first_page, 0, PAGE_SIZE << order);
220
221	iommu = dev->archdata.iommu;
222
223	iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
224
225	if (unlikely(iopte == NULL)) {
226		free_pages(first_page, order);
227		return NULL;
228	}
229
230	*dma_addrp = (iommu->tbl.table_map_base +
231		      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
232	ret = (void *) first_page;
233	npages = size >> IO_PAGE_SHIFT;
234	first_page = __pa(first_page);
235	while (npages--) {
236		iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
237				     IOPTE_WRITE |
238				     (first_page & IOPTE_PAGE));
239		iopte++;
240		first_page += IO_PAGE_SIZE;
241	}
242
243	return ret;
244}
245
246static void dma_4u_free_coherent(struct device *dev, size_t size,
247				 void *cpu, dma_addr_t dvma,
248				 struct dma_attrs *attrs)
249{
250	struct iommu *iommu;
251	unsigned long order, npages;
252
253	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
254	iommu = dev->archdata.iommu;
255
256	iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
257
258	order = get_order(size);
259	if (order < 10)
260		free_pages((unsigned long)cpu, order);
261}
262
263static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
264				  unsigned long offset, size_t sz,
265				  enum dma_data_direction direction,
266				  struct dma_attrs *attrs)
267{
268	struct iommu *iommu;
269	struct strbuf *strbuf;
270	iopte_t *base;
271	unsigned long flags, npages, oaddr;
272	unsigned long i, base_paddr, ctx;
273	u32 bus_addr, ret;
274	unsigned long iopte_protection;
275
276	iommu = dev->archdata.iommu;
277	strbuf = dev->archdata.stc;
278
279	if (unlikely(direction == DMA_NONE))
280		goto bad_no_ctx;
281
282	oaddr = (unsigned long)(page_address(page) + offset);
283	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
284	npages >>= IO_PAGE_SHIFT;
285
286	base = alloc_npages(dev, iommu, npages);
287	spin_lock_irqsave(&iommu->lock, flags);
288	ctx = 0;
289	if (iommu->iommu_ctxflush)
290		ctx = iommu_alloc_ctx(iommu);
291	spin_unlock_irqrestore(&iommu->lock, flags);
292
293	if (unlikely(!base))
294		goto bad;
295
296	bus_addr = (iommu->tbl.table_map_base +
297		    ((base - iommu->page_table) << IO_PAGE_SHIFT));
298	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
299	base_paddr = __pa(oaddr & IO_PAGE_MASK);
300	if (strbuf->strbuf_enabled)
301		iopte_protection = IOPTE_STREAMING(ctx);
302	else
303		iopte_protection = IOPTE_CONSISTENT(ctx);
304	if (direction != DMA_TO_DEVICE)
305		iopte_protection |= IOPTE_WRITE;
306
307	for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
308		iopte_val(*base) = iopte_protection | base_paddr;
309
310	return ret;
311
312bad:
313	iommu_free_ctx(iommu, ctx);
314bad_no_ctx:
315	if (printk_ratelimit())
316		WARN_ON(1);
317	return DMA_ERROR_CODE;
318}
319
320static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
321			 u32 vaddr, unsigned long ctx, unsigned long npages,
322			 enum dma_data_direction direction)
323{
324	int limit;
325
326	if (strbuf->strbuf_ctxflush &&
327	    iommu->iommu_ctxflush) {
328		unsigned long matchreg, flushreg;
329		u64 val;
330
331		flushreg = strbuf->strbuf_ctxflush;
332		matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
333
334		iommu_write(flushreg, ctx);
335		val = iommu_read(matchreg);
336		val &= 0xffff;
337		if (!val)
338			goto do_flush_sync;
339
340		while (val) {
341			if (val & 0x1)
342				iommu_write(flushreg, ctx);
343			val >>= 1;
344		}
345		val = iommu_read(matchreg);
346		if (unlikely(val)) {
347			printk(KERN_WARNING "strbuf_flush: ctx flush "
348			       "timeout matchreg[%llx] ctx[%lx]\n",
349			       val, ctx);
350			goto do_page_flush;
351		}
352	} else {
353		unsigned long i;
354
355	do_page_flush:
356		for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
357			iommu_write(strbuf->strbuf_pflush, vaddr);
358	}
359
360do_flush_sync:
361	/* If the device could not have possibly put dirty data into
362	 * the streaming cache, no flush-flag synchronization needs
363	 * to be performed.
364	 */
365	if (direction == DMA_TO_DEVICE)
366		return;
367
368	STC_FLUSHFLAG_INIT(strbuf);
369	iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
370	(void) iommu_read(iommu->write_complete_reg);
371
372	limit = 100000;
373	while (!STC_FLUSHFLAG_SET(strbuf)) {
374		limit--;
375		if (!limit)
376			break;
377		udelay(1);
378		rmb();
379	}
380	if (!limit)
381		printk(KERN_WARNING "strbuf_flush: flushflag timeout "
382		       "vaddr[%08x] ctx[%lx] npages[%ld]\n",
383		       vaddr, ctx, npages);
384}
385
386static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
387			      size_t sz, enum dma_data_direction direction,
388			      struct dma_attrs *attrs)
389{
390	struct iommu *iommu;
391	struct strbuf *strbuf;
392	iopte_t *base;
393	unsigned long flags, npages, ctx, i;
394
395	if (unlikely(direction == DMA_NONE)) {
396		if (printk_ratelimit())
397			WARN_ON(1);
398		return;
399	}
400
401	iommu = dev->archdata.iommu;
402	strbuf = dev->archdata.stc;
403
404	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
405	npages >>= IO_PAGE_SHIFT;
406	base = iommu->page_table +
407		((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
408	bus_addr &= IO_PAGE_MASK;
409
410	spin_lock_irqsave(&iommu->lock, flags);
411
412	/* Record the context, if any. */
413	ctx = 0;
414	if (iommu->iommu_ctxflush)
415		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
416
417	/* Step 1: Kick data out of streaming buffers if necessary. */
418	if (strbuf->strbuf_enabled)
419		strbuf_flush(strbuf, iommu, bus_addr, ctx,
420			     npages, direction);
421
422	/* Step 2: Clear out TSB entries. */
423	for (i = 0; i < npages; i++)
424		iopte_make_dummy(iommu, base + i);
425
426	iommu_free_ctx(iommu, ctx);
427	spin_unlock_irqrestore(&iommu->lock, flags);
428
429	iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
430}
431
432static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
433			 int nelems, enum dma_data_direction direction,
434			 struct dma_attrs *attrs)
435{
436	struct scatterlist *s, *outs, *segstart;
437	unsigned long flags, handle, prot, ctx;
438	dma_addr_t dma_next = 0, dma_addr;
439	unsigned int max_seg_size;
440	unsigned long seg_boundary_size;
441	int outcount, incount, i;
442	struct strbuf *strbuf;
443	struct iommu *iommu;
444	unsigned long base_shift;
445
446	BUG_ON(direction == DMA_NONE);
447
448	iommu = dev->archdata.iommu;
449	strbuf = dev->archdata.stc;
450	if (nelems == 0 || !iommu)
451		return 0;
452
453	spin_lock_irqsave(&iommu->lock, flags);
454
455	ctx = 0;
456	if (iommu->iommu_ctxflush)
457		ctx = iommu_alloc_ctx(iommu);
458
459	if (strbuf->strbuf_enabled)
460		prot = IOPTE_STREAMING(ctx);
461	else
462		prot = IOPTE_CONSISTENT(ctx);
463	if (direction != DMA_TO_DEVICE)
464		prot |= IOPTE_WRITE;
465
466	outs = s = segstart = &sglist[0];
467	outcount = 1;
468	incount = nelems;
469	handle = 0;
470
471	/* Init first segment length for backout at failure */
472	outs->dma_length = 0;
473
474	max_seg_size = dma_get_max_seg_size(dev);
475	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
476				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
477	base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
478	for_each_sg(sglist, s, nelems, i) {
479		unsigned long paddr, npages, entry, out_entry = 0, slen;
480		iopte_t *base;
481
482		slen = s->length;
483		/* Sanity check */
484		if (slen == 0) {
485			dma_next = 0;
486			continue;
487		}
488		/* Allocate iommu entries for that segment */
489		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
490		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
491		entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
492					      &handle, (unsigned long)(-1), 0);
493
494		/* Handle failure */
495		if (unlikely(entry == IOMMU_ERROR_CODE)) {
496			if (printk_ratelimit())
497				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
498				       " npages %lx\n", iommu, paddr, npages);
499			goto iommu_map_failed;
500		}
501
502		base = iommu->page_table + entry;
503
504		/* Convert entry to a dma_addr_t */
505		dma_addr = iommu->tbl.table_map_base +
506			(entry << IO_PAGE_SHIFT);
507		dma_addr |= (s->offset & ~IO_PAGE_MASK);
508
509		/* Insert into HW table */
510		paddr &= IO_PAGE_MASK;
511		while (npages--) {
512			iopte_val(*base) = prot | paddr;
513			base++;
514			paddr += IO_PAGE_SIZE;
515		}
516
517		/* If we are in an open segment, try merging */
518		if (segstart != s) {
519			/* We cannot merge if:
520			 * - allocated dma_addr isn't contiguous to previous allocation
521			 */
522			if ((dma_addr != dma_next) ||
523			    (outs->dma_length + s->length > max_seg_size) ||
524			    (is_span_boundary(out_entry, base_shift,
525					      seg_boundary_size, outs, s))) {
526				/* Can't merge: create a new segment */
527				segstart = s;
528				outcount++;
529				outs = sg_next(outs);
530			} else {
531				outs->dma_length += s->length;
532			}
533		}
534
535		if (segstart == s) {
536			/* This is a new segment, fill entries */
537			outs->dma_address = dma_addr;
538			outs->dma_length = slen;
539			out_entry = entry;
540		}
541
542		/* Calculate next page pointer for contiguous check */
543		dma_next = dma_addr + slen;
544	}
545
546	spin_unlock_irqrestore(&iommu->lock, flags);
547
548	if (outcount < incount) {
549		outs = sg_next(outs);
550		outs->dma_address = DMA_ERROR_CODE;
551		outs->dma_length = 0;
552	}
553
554	return outcount;
555
556iommu_map_failed:
557	for_each_sg(sglist, s, nelems, i) {
558		if (s->dma_length != 0) {
559			unsigned long vaddr, npages, entry, j;
560			iopte_t *base;
561
562			vaddr = s->dma_address & IO_PAGE_MASK;
563			npages = iommu_num_pages(s->dma_address, s->dma_length,
564						 IO_PAGE_SIZE);
565
566			entry = (vaddr - iommu->tbl.table_map_base)
567				>> IO_PAGE_SHIFT;
568			base = iommu->page_table + entry;
569
570			for (j = 0; j < npages; j++)
571				iopte_make_dummy(iommu, base + j);
572
573			iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
574					     IOMMU_ERROR_CODE);
575
576			s->dma_address = DMA_ERROR_CODE;
577			s->dma_length = 0;
578		}
579		if (s == outs)
580			break;
581	}
582	spin_unlock_irqrestore(&iommu->lock, flags);
583
584	return 0;
585}
586
587/* If contexts are being used, they are the same in all of the mappings
588 * we make for a particular SG.
589 */
590static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
591{
592	unsigned long ctx = 0;
593
594	if (iommu->iommu_ctxflush) {
595		iopte_t *base;
596		u32 bus_addr;
597		struct iommu_map_table *tbl = &iommu->tbl;
598
599		bus_addr = sg->dma_address & IO_PAGE_MASK;
600		base = iommu->page_table +
601			((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT);
602
603		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
604	}
605	return ctx;
606}
607
608static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
609			    int nelems, enum dma_data_direction direction,
610			    struct dma_attrs *attrs)
611{
612	unsigned long flags, ctx;
613	struct scatterlist *sg;
614	struct strbuf *strbuf;
615	struct iommu *iommu;
616
617	BUG_ON(direction == DMA_NONE);
618
619	iommu = dev->archdata.iommu;
620	strbuf = dev->archdata.stc;
621
622	ctx = fetch_sg_ctx(iommu, sglist);
623
624	spin_lock_irqsave(&iommu->lock, flags);
625
626	sg = sglist;
627	while (nelems--) {
628		dma_addr_t dma_handle = sg->dma_address;
629		unsigned int len = sg->dma_length;
630		unsigned long npages, entry;
631		iopte_t *base;
632		int i;
633
634		if (!len)
635			break;
636		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
637
638		entry = ((dma_handle - iommu->tbl.table_map_base)
639			 >> IO_PAGE_SHIFT);
640		base = iommu->page_table + entry;
641
642		dma_handle &= IO_PAGE_MASK;
643		if (strbuf->strbuf_enabled)
644			strbuf_flush(strbuf, iommu, dma_handle, ctx,
645				     npages, direction);
646
647		for (i = 0; i < npages; i++)
648			iopte_make_dummy(iommu, base + i);
649
650		iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
651				     IOMMU_ERROR_CODE);
652		sg = sg_next(sg);
653	}
654
655	iommu_free_ctx(iommu, ctx);
656
657	spin_unlock_irqrestore(&iommu->lock, flags);
658}
659
660static void dma_4u_sync_single_for_cpu(struct device *dev,
661				       dma_addr_t bus_addr, size_t sz,
662				       enum dma_data_direction direction)
663{
664	struct iommu *iommu;
665	struct strbuf *strbuf;
666	unsigned long flags, ctx, npages;
667
668	iommu = dev->archdata.iommu;
669	strbuf = dev->archdata.stc;
670
671	if (!strbuf->strbuf_enabled)
672		return;
673
674	spin_lock_irqsave(&iommu->lock, flags);
675
676	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
677	npages >>= IO_PAGE_SHIFT;
678	bus_addr &= IO_PAGE_MASK;
679
680	/* Step 1: Record the context, if any. */
681	ctx = 0;
682	if (iommu->iommu_ctxflush &&
683	    strbuf->strbuf_ctxflush) {
684		iopte_t *iopte;
685		struct iommu_map_table *tbl = &iommu->tbl;
686
687		iopte = iommu->page_table +
688			((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT);
689		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
690	}
691
692	/* Step 2: Kick data out of streaming buffers. */
693	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
694
695	spin_unlock_irqrestore(&iommu->lock, flags);
696}
697
698static void dma_4u_sync_sg_for_cpu(struct device *dev,
699				   struct scatterlist *sglist, int nelems,
700				   enum dma_data_direction direction)
701{
702	struct iommu *iommu;
703	struct strbuf *strbuf;
704	unsigned long flags, ctx, npages, i;
705	struct scatterlist *sg, *sgprv;
706	u32 bus_addr;
707
708	iommu = dev->archdata.iommu;
709	strbuf = dev->archdata.stc;
710
711	if (!strbuf->strbuf_enabled)
712		return;
713
714	spin_lock_irqsave(&iommu->lock, flags);
715
716	/* Step 1: Record the context, if any. */
717	ctx = 0;
718	if (iommu->iommu_ctxflush &&
719	    strbuf->strbuf_ctxflush) {
720		iopte_t *iopte;
721		struct iommu_map_table *tbl = &iommu->tbl;
722
723		iopte = iommu->page_table + ((sglist[0].dma_address -
724			tbl->table_map_base) >> IO_PAGE_SHIFT);
725		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
726	}
727
728	/* Step 2: Kick data out of streaming buffers. */
729	bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
730	sgprv = NULL;
731	for_each_sg(sglist, sg, nelems, i) {
732		if (sg->dma_length == 0)
733			break;
734		sgprv = sg;
735	}
736
737	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
738		  - bus_addr) >> IO_PAGE_SHIFT;
739	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
740
741	spin_unlock_irqrestore(&iommu->lock, flags);
742}
743
744static struct dma_map_ops sun4u_dma_ops = {
745	.alloc			= dma_4u_alloc_coherent,
746	.free			= dma_4u_free_coherent,
747	.map_page		= dma_4u_map_page,
748	.unmap_page		= dma_4u_unmap_page,
749	.map_sg			= dma_4u_map_sg,
750	.unmap_sg		= dma_4u_unmap_sg,
751	.sync_single_for_cpu	= dma_4u_sync_single_for_cpu,
752	.sync_sg_for_cpu	= dma_4u_sync_sg_for_cpu,
753};
754
755struct dma_map_ops *dma_ops = &sun4u_dma_ops;
756EXPORT_SYMBOL(dma_ops);
757
758int dma_supported(struct device *dev, u64 device_mask)
759{
760	struct iommu *iommu = dev->archdata.iommu;
761	u64 dma_addr_mask = iommu->dma_addr_mask;
762
763	if (device_mask >= (1UL << 32UL))
764		return 0;
765
766	if ((device_mask & dma_addr_mask) == dma_addr_mask)
767		return 1;
768
769#ifdef CONFIG_PCI
770	if (dev_is_pci(dev))
771		return pci64_dma_supported(to_pci_dev(dev), device_mask);
772#endif
773
774	return 0;
775}
776EXPORT_SYMBOL(dma_supported);