Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/* iommu.c: Generic sparc64 IOMMU support.
  3 *
  4 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
  5 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
  6 */
  7
  8#include <linux/kernel.h>
  9#include <linux/export.h>
 10#include <linux/slab.h>
 11#include <linux/delay.h>
 12#include <linux/device.h>
 13#include <linux/dma-mapping.h>
 14#include <linux/errno.h>
 15#include <linux/iommu-helper.h>
 16#include <linux/bitmap.h>
 17#include <linux/iommu-common.h>
 18
 19#ifdef CONFIG_PCI
 20#include <linux/pci.h>
 21#endif
 22
 23#include <asm/iommu.h>
 24
 25#include "iommu_common.h"
 26#include "kernel.h"
 27
 28#define STC_CTXMATCH_ADDR(STC, CTX)	\
 29	((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
 30#define STC_FLUSHFLAG_INIT(STC) \
 31	(*((STC)->strbuf_flushflag) = 0UL)
 32#define STC_FLUSHFLAG_SET(STC) \
 33	(*((STC)->strbuf_flushflag) != 0UL)
 34
 35#define iommu_read(__reg) \
 36({	u64 __ret; \
 37	__asm__ __volatile__("ldxa [%1] %2, %0" \
 38			     : "=r" (__ret) \
 39			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
 40			     : "memory"); \
 41	__ret; \
 42})
 43#define iommu_write(__reg, __val) \
 44	__asm__ __volatile__("stxa %0, [%1] %2" \
 45			     : /* no outputs */ \
 46			     : "r" (__val), "r" (__reg), \
 47			       "i" (ASI_PHYS_BYPASS_EC_E))
 48
 49/* Must be invoked under the IOMMU lock. */
 50static void iommu_flushall(struct iommu_map_table *iommu_map_table)
 51{
 52	struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
 53	if (iommu->iommu_flushinv) {
 54		iommu_write(iommu->iommu_flushinv, ~(u64)0);
 55	} else {
 56		unsigned long tag;
 57		int entry;
 58
 59		tag = iommu->iommu_tags;
 60		for (entry = 0; entry < 16; entry++) {
 61			iommu_write(tag, 0);
 62			tag += 8;
 63		}
 64
 65		/* Ensure completion of previous PIO writes. */
 66		(void) iommu_read(iommu->write_complete_reg);
 67	}
 68}
 69
 70#define IOPTE_CONSISTENT(CTX) \
 71	(IOPTE_VALID | IOPTE_CACHE | \
 72	 (((CTX) << 47) & IOPTE_CONTEXT))
 73
 74#define IOPTE_STREAMING(CTX) \
 75	(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
 76
 77/* Existing mappings are never marked invalid, instead they
 78 * are pointed to a dummy page.
 79 */
 80#define IOPTE_IS_DUMMY(iommu, iopte)	\
 81	((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
 82
 83static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
 84{
 85	unsigned long val = iopte_val(*iopte);
 86
 87	val &= ~IOPTE_PAGE;
 88	val |= iommu->dummy_page_pa;
 89
 90	iopte_val(*iopte) = val;
 91}
 92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93int iommu_table_init(struct iommu *iommu, int tsbsize,
 94		     u32 dma_offset, u32 dma_addr_mask,
 95		     int numa_node)
 96{
 97	unsigned long i, order, sz, num_tsb_entries;
 98	struct page *page;
 99
100	num_tsb_entries = tsbsize / sizeof(iopte_t);
101
102	/* Setup initial software IOMMU state. */
103	spin_lock_init(&iommu->lock);
104	iommu->ctx_lowest_free = 1;
105	iommu->tbl.table_map_base = dma_offset;
106	iommu->dma_addr_mask = dma_addr_mask;
107
108	/* Allocate and initialize the free area map.  */
109	sz = num_tsb_entries / 8;
110	sz = (sz + 7UL) & ~7UL;
111	iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
112	if (!iommu->tbl.map)
 
113		return -ENOMEM;
114	memset(iommu->tbl.map, 0, sz);
 
 
115
116	iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
117			    (tlb_type != hypervisor ? iommu_flushall : NULL),
118			    false, 1, false);
119
120	/* Allocate and initialize the dummy page which we
121	 * set inactive IO PTEs to point to.
122	 */
123	page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
124	if (!page) {
125		printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
126		goto out_free_map;
127	}
128	iommu->dummy_page = (unsigned long) page_address(page);
129	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
130	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
131
132	/* Now allocate and setup the IOMMU page table itself.  */
133	order = get_order(tsbsize);
134	page = alloc_pages_node(numa_node, GFP_KERNEL, order);
135	if (!page) {
136		printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
137		goto out_free_dummy_page;
138	}
139	iommu->page_table = (iopte_t *)page_address(page);
140
141	for (i = 0; i < num_tsb_entries; i++)
142		iopte_make_dummy(iommu, &iommu->page_table[i]);
143
144	return 0;
145
146out_free_dummy_page:
147	free_page(iommu->dummy_page);
148	iommu->dummy_page = 0UL;
149
150out_free_map:
151	kfree(iommu->tbl.map);
152	iommu->tbl.map = NULL;
153
154	return -ENOMEM;
155}
156
157static inline iopte_t *alloc_npages(struct device *dev,
158				    struct iommu *iommu,
159				    unsigned long npages)
160{
161	unsigned long entry;
162
163	entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
164				      (unsigned long)(-1), 0);
165	if (unlikely(entry == IOMMU_ERROR_CODE))
166		return NULL;
167
168	return iommu->page_table + entry;
169}
170
171static int iommu_alloc_ctx(struct iommu *iommu)
172{
173	int lowest = iommu->ctx_lowest_free;
174	int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
175
176	if (unlikely(n == IOMMU_NUM_CTXS)) {
177		n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
178		if (unlikely(n == lowest)) {
179			printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
180			n = 0;
181		}
182	}
183	if (n)
184		__set_bit(n, iommu->ctx_bitmap);
185
186	return n;
187}
188
189static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
190{
191	if (likely(ctx)) {
192		__clear_bit(ctx, iommu->ctx_bitmap);
193		if (ctx < iommu->ctx_lowest_free)
194			iommu->ctx_lowest_free = ctx;
195	}
196}
197
198static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
199				   dma_addr_t *dma_addrp, gfp_t gfp,
200				   unsigned long attrs)
201{
202	unsigned long order, first_page;
203	struct iommu *iommu;
204	struct page *page;
205	int npages, nid;
206	iopte_t *iopte;
207	void *ret;
208
209	size = IO_PAGE_ALIGN(size);
210	order = get_order(size);
211	if (order >= 10)
212		return NULL;
213
214	nid = dev->archdata.numa_node;
215	page = alloc_pages_node(nid, gfp, order);
216	if (unlikely(!page))
217		return NULL;
218
219	first_page = (unsigned long) page_address(page);
220	memset((char *)first_page, 0, PAGE_SIZE << order);
221
222	iommu = dev->archdata.iommu;
223
 
224	iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
 
225
226	if (unlikely(iopte == NULL)) {
227		free_pages(first_page, order);
228		return NULL;
229	}
230
231	*dma_addrp = (iommu->tbl.table_map_base +
232		      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
233	ret = (void *) first_page;
234	npages = size >> IO_PAGE_SHIFT;
235	first_page = __pa(first_page);
236	while (npages--) {
237		iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
238				     IOPTE_WRITE |
239				     (first_page & IOPTE_PAGE));
240		iopte++;
241		first_page += IO_PAGE_SIZE;
242	}
243
244	return ret;
245}
246
247static void dma_4u_free_coherent(struct device *dev, size_t size,
248				 void *cpu, dma_addr_t dvma,
249				 unsigned long attrs)
250{
251	struct iommu *iommu;
252	unsigned long order, npages;
253
254	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
255	iommu = dev->archdata.iommu;
256
257	iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
 
 
 
 
258
259	order = get_order(size);
260	if (order < 10)
261		free_pages((unsigned long)cpu, order);
262}
263
264static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
265				  unsigned long offset, size_t sz,
266				  enum dma_data_direction direction,
267				  unsigned long attrs)
268{
269	struct iommu *iommu;
270	struct strbuf *strbuf;
271	iopte_t *base;
272	unsigned long flags, npages, oaddr;
273	unsigned long i, base_paddr, ctx;
274	u32 bus_addr, ret;
275	unsigned long iopte_protection;
276
277	iommu = dev->archdata.iommu;
278	strbuf = dev->archdata.stc;
279
280	if (unlikely(direction == DMA_NONE))
281		goto bad_no_ctx;
282
283	oaddr = (unsigned long)(page_address(page) + offset);
284	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
285	npages >>= IO_PAGE_SHIFT;
286
287	base = alloc_npages(dev, iommu, npages);
288	spin_lock_irqsave(&iommu->lock, flags);
 
289	ctx = 0;
290	if (iommu->iommu_ctxflush)
291		ctx = iommu_alloc_ctx(iommu);
292	spin_unlock_irqrestore(&iommu->lock, flags);
293
294	if (unlikely(!base))
295		goto bad;
296
297	bus_addr = (iommu->tbl.table_map_base +
298		    ((base - iommu->page_table) << IO_PAGE_SHIFT));
299	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
300	base_paddr = __pa(oaddr & IO_PAGE_MASK);
301	if (strbuf->strbuf_enabled)
302		iopte_protection = IOPTE_STREAMING(ctx);
303	else
304		iopte_protection = IOPTE_CONSISTENT(ctx);
305	if (direction != DMA_TO_DEVICE)
306		iopte_protection |= IOPTE_WRITE;
307
308	for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
309		iopte_val(*base) = iopte_protection | base_paddr;
310
311	return ret;
312
313bad:
314	iommu_free_ctx(iommu, ctx);
315bad_no_ctx:
316	if (printk_ratelimit())
317		WARN_ON(1);
318	return SPARC_MAPPING_ERROR;
319}
320
321static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
322			 u32 vaddr, unsigned long ctx, unsigned long npages,
323			 enum dma_data_direction direction)
324{
325	int limit;
326
327	if (strbuf->strbuf_ctxflush &&
328	    iommu->iommu_ctxflush) {
329		unsigned long matchreg, flushreg;
330		u64 val;
331
332		flushreg = strbuf->strbuf_ctxflush;
333		matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
334
335		iommu_write(flushreg, ctx);
336		val = iommu_read(matchreg);
337		val &= 0xffff;
338		if (!val)
339			goto do_flush_sync;
340
341		while (val) {
342			if (val & 0x1)
343				iommu_write(flushreg, ctx);
344			val >>= 1;
345		}
346		val = iommu_read(matchreg);
347		if (unlikely(val)) {
348			printk(KERN_WARNING "strbuf_flush: ctx flush "
349			       "timeout matchreg[%llx] ctx[%lx]\n",
350			       val, ctx);
351			goto do_page_flush;
352		}
353	} else {
354		unsigned long i;
355
356	do_page_flush:
357		for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
358			iommu_write(strbuf->strbuf_pflush, vaddr);
359	}
360
361do_flush_sync:
362	/* If the device could not have possibly put dirty data into
363	 * the streaming cache, no flush-flag synchronization needs
364	 * to be performed.
365	 */
366	if (direction == DMA_TO_DEVICE)
367		return;
368
369	STC_FLUSHFLAG_INIT(strbuf);
370	iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
371	(void) iommu_read(iommu->write_complete_reg);
372
373	limit = 100000;
374	while (!STC_FLUSHFLAG_SET(strbuf)) {
375		limit--;
376		if (!limit)
377			break;
378		udelay(1);
379		rmb();
380	}
381	if (!limit)
382		printk(KERN_WARNING "strbuf_flush: flushflag timeout "
383		       "vaddr[%08x] ctx[%lx] npages[%ld]\n",
384		       vaddr, ctx, npages);
385}
386
387static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
388			      size_t sz, enum dma_data_direction direction,
389			      unsigned long attrs)
390{
391	struct iommu *iommu;
392	struct strbuf *strbuf;
393	iopte_t *base;
394	unsigned long flags, npages, ctx, i;
395
396	if (unlikely(direction == DMA_NONE)) {
397		if (printk_ratelimit())
398			WARN_ON(1);
399		return;
400	}
401
402	iommu = dev->archdata.iommu;
403	strbuf = dev->archdata.stc;
404
405	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
406	npages >>= IO_PAGE_SHIFT;
407	base = iommu->page_table +
408		((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
409	bus_addr &= IO_PAGE_MASK;
410
411	spin_lock_irqsave(&iommu->lock, flags);
412
413	/* Record the context, if any. */
414	ctx = 0;
415	if (iommu->iommu_ctxflush)
416		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
417
418	/* Step 1: Kick data out of streaming buffers if necessary. */
419	if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
420		strbuf_flush(strbuf, iommu, bus_addr, ctx,
421			     npages, direction);
422
423	/* Step 2: Clear out TSB entries. */
424	for (i = 0; i < npages; i++)
425		iopte_make_dummy(iommu, base + i);
426
 
 
427	iommu_free_ctx(iommu, ctx);
428	spin_unlock_irqrestore(&iommu->lock, flags);
429
430	iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
431}
432
433static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
434			 int nelems, enum dma_data_direction direction,
435			 unsigned long attrs)
436{
437	struct scatterlist *s, *outs, *segstart;
438	unsigned long flags, handle, prot, ctx;
439	dma_addr_t dma_next = 0, dma_addr;
440	unsigned int max_seg_size;
441	unsigned long seg_boundary_size;
442	int outcount, incount, i;
443	struct strbuf *strbuf;
444	struct iommu *iommu;
445	unsigned long base_shift;
446
447	BUG_ON(direction == DMA_NONE);
448
449	iommu = dev->archdata.iommu;
450	strbuf = dev->archdata.stc;
451	if (nelems == 0 || !iommu)
452		return 0;
453
454	spin_lock_irqsave(&iommu->lock, flags);
455
456	ctx = 0;
457	if (iommu->iommu_ctxflush)
458		ctx = iommu_alloc_ctx(iommu);
459
460	if (strbuf->strbuf_enabled)
461		prot = IOPTE_STREAMING(ctx);
462	else
463		prot = IOPTE_CONSISTENT(ctx);
464	if (direction != DMA_TO_DEVICE)
465		prot |= IOPTE_WRITE;
466
467	outs = s = segstart = &sglist[0];
468	outcount = 1;
469	incount = nelems;
470	handle = 0;
471
472	/* Init first segment length for backout at failure */
473	outs->dma_length = 0;
474
475	max_seg_size = dma_get_max_seg_size(dev);
476	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
477				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
478	base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
479	for_each_sg(sglist, s, nelems, i) {
480		unsigned long paddr, npages, entry, out_entry = 0, slen;
481		iopte_t *base;
482
483		slen = s->length;
484		/* Sanity check */
485		if (slen == 0) {
486			dma_next = 0;
487			continue;
488		}
489		/* Allocate iommu entries for that segment */
490		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
491		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
492		entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
493					      &handle, (unsigned long)(-1), 0);
494
495		/* Handle failure */
496		if (unlikely(entry == IOMMU_ERROR_CODE)) {
497			if (printk_ratelimit())
498				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
499				       " npages %lx\n", iommu, paddr, npages);
500			goto iommu_map_failed;
501		}
502
503		base = iommu->page_table + entry;
504
505		/* Convert entry to a dma_addr_t */
506		dma_addr = iommu->tbl.table_map_base +
507			(entry << IO_PAGE_SHIFT);
508		dma_addr |= (s->offset & ~IO_PAGE_MASK);
509
510		/* Insert into HW table */
511		paddr &= IO_PAGE_MASK;
512		while (npages--) {
513			iopte_val(*base) = prot | paddr;
514			base++;
515			paddr += IO_PAGE_SIZE;
516		}
517
518		/* If we are in an open segment, try merging */
519		if (segstart != s) {
520			/* We cannot merge if:
521			 * - allocated dma_addr isn't contiguous to previous allocation
522			 */
523			if ((dma_addr != dma_next) ||
524			    (outs->dma_length + s->length > max_seg_size) ||
525			    (is_span_boundary(out_entry, base_shift,
526					      seg_boundary_size, outs, s))) {
527				/* Can't merge: create a new segment */
528				segstart = s;
529				outcount++;
530				outs = sg_next(outs);
531			} else {
532				outs->dma_length += s->length;
533			}
534		}
535
536		if (segstart == s) {
537			/* This is a new segment, fill entries */
538			outs->dma_address = dma_addr;
539			outs->dma_length = slen;
540			out_entry = entry;
541		}
542
543		/* Calculate next page pointer for contiguous check */
544		dma_next = dma_addr + slen;
545	}
546
547	spin_unlock_irqrestore(&iommu->lock, flags);
548
549	if (outcount < incount) {
550		outs = sg_next(outs);
551		outs->dma_address = SPARC_MAPPING_ERROR;
552		outs->dma_length = 0;
553	}
554
555	return outcount;
556
557iommu_map_failed:
558	for_each_sg(sglist, s, nelems, i) {
559		if (s->dma_length != 0) {
560			unsigned long vaddr, npages, entry, j;
561			iopte_t *base;
562
563			vaddr = s->dma_address & IO_PAGE_MASK;
564			npages = iommu_num_pages(s->dma_address, s->dma_length,
565						 IO_PAGE_SIZE);
 
566
567			entry = (vaddr - iommu->tbl.table_map_base)
568				>> IO_PAGE_SHIFT;
569			base = iommu->page_table + entry;
570
571			for (j = 0; j < npages; j++)
572				iopte_make_dummy(iommu, base + j);
573
574			iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
575					     IOMMU_ERROR_CODE);
576
577			s->dma_address = SPARC_MAPPING_ERROR;
578			s->dma_length = 0;
579		}
580		if (s == outs)
581			break;
582	}
583	spin_unlock_irqrestore(&iommu->lock, flags);
584
585	return 0;
586}
587
588/* If contexts are being used, they are the same in all of the mappings
589 * we make for a particular SG.
590 */
591static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
592{
593	unsigned long ctx = 0;
594
595	if (iommu->iommu_ctxflush) {
596		iopte_t *base;
597		u32 bus_addr;
598		struct iommu_map_table *tbl = &iommu->tbl;
599
600		bus_addr = sg->dma_address & IO_PAGE_MASK;
601		base = iommu->page_table +
602			((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT);
603
604		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
605	}
606	return ctx;
607}
608
609static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
610			    int nelems, enum dma_data_direction direction,
611			    unsigned long attrs)
612{
613	unsigned long flags, ctx;
614	struct scatterlist *sg;
615	struct strbuf *strbuf;
616	struct iommu *iommu;
617
618	BUG_ON(direction == DMA_NONE);
619
620	iommu = dev->archdata.iommu;
621	strbuf = dev->archdata.stc;
622
623	ctx = fetch_sg_ctx(iommu, sglist);
624
625	spin_lock_irqsave(&iommu->lock, flags);
626
627	sg = sglist;
628	while (nelems--) {
629		dma_addr_t dma_handle = sg->dma_address;
630		unsigned int len = sg->dma_length;
631		unsigned long npages, entry;
632		iopte_t *base;
633		int i;
634
635		if (!len)
636			break;
637		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
 
638
639		entry = ((dma_handle - iommu->tbl.table_map_base)
640			 >> IO_PAGE_SHIFT);
641		base = iommu->page_table + entry;
642
643		dma_handle &= IO_PAGE_MASK;
644		if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
645			strbuf_flush(strbuf, iommu, dma_handle, ctx,
646				     npages, direction);
647
648		for (i = 0; i < npages; i++)
649			iopte_make_dummy(iommu, base + i);
650
651		iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
652				     IOMMU_ERROR_CODE);
653		sg = sg_next(sg);
654	}
655
656	iommu_free_ctx(iommu, ctx);
657
658	spin_unlock_irqrestore(&iommu->lock, flags);
659}
660
661static void dma_4u_sync_single_for_cpu(struct device *dev,
662				       dma_addr_t bus_addr, size_t sz,
663				       enum dma_data_direction direction)
664{
665	struct iommu *iommu;
666	struct strbuf *strbuf;
667	unsigned long flags, ctx, npages;
668
669	iommu = dev->archdata.iommu;
670	strbuf = dev->archdata.stc;
671
672	if (!strbuf->strbuf_enabled)
673		return;
674
675	spin_lock_irqsave(&iommu->lock, flags);
676
677	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
678	npages >>= IO_PAGE_SHIFT;
679	bus_addr &= IO_PAGE_MASK;
680
681	/* Step 1: Record the context, if any. */
682	ctx = 0;
683	if (iommu->iommu_ctxflush &&
684	    strbuf->strbuf_ctxflush) {
685		iopte_t *iopte;
686		struct iommu_map_table *tbl = &iommu->tbl;
687
688		iopte = iommu->page_table +
689			((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT);
690		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
691	}
692
693	/* Step 2: Kick data out of streaming buffers. */
694	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
695
696	spin_unlock_irqrestore(&iommu->lock, flags);
697}
698
699static void dma_4u_sync_sg_for_cpu(struct device *dev,
700				   struct scatterlist *sglist, int nelems,
701				   enum dma_data_direction direction)
702{
703	struct iommu *iommu;
704	struct strbuf *strbuf;
705	unsigned long flags, ctx, npages, i;
706	struct scatterlist *sg, *sgprv;
707	u32 bus_addr;
708
709	iommu = dev->archdata.iommu;
710	strbuf = dev->archdata.stc;
711
712	if (!strbuf->strbuf_enabled)
713		return;
714
715	spin_lock_irqsave(&iommu->lock, flags);
716
717	/* Step 1: Record the context, if any. */
718	ctx = 0;
719	if (iommu->iommu_ctxflush &&
720	    strbuf->strbuf_ctxflush) {
721		iopte_t *iopte;
722		struct iommu_map_table *tbl = &iommu->tbl;
723
724		iopte = iommu->page_table + ((sglist[0].dma_address -
725			tbl->table_map_base) >> IO_PAGE_SHIFT);
726		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
727	}
728
729	/* Step 2: Kick data out of streaming buffers. */
730	bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
731	sgprv = NULL;
732	for_each_sg(sglist, sg, nelems, i) {
733		if (sg->dma_length == 0)
734			break;
735		sgprv = sg;
736	}
737
738	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
739		  - bus_addr) >> IO_PAGE_SHIFT;
740	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
741
742	spin_unlock_irqrestore(&iommu->lock, flags);
743}
744
745static int dma_4u_mapping_error(struct device *dev, dma_addr_t dma_addr)
746{
747	return dma_addr == SPARC_MAPPING_ERROR;
748}
 
 
 
 
 
 
 
 
 
 
 
749
750static int dma_4u_supported(struct device *dev, u64 device_mask)
751{
752	struct iommu *iommu = dev->archdata.iommu;
 
753
754	if (device_mask > DMA_BIT_MASK(32))
755		return 0;
756	if ((device_mask & iommu->dma_addr_mask) == iommu->dma_addr_mask)
 
757		return 1;
 
758#ifdef CONFIG_PCI
759	if (dev_is_pci(dev))
760		return pci64_dma_supported(to_pci_dev(dev), device_mask);
761#endif
 
762	return 0;
763}
764
765static const struct dma_map_ops sun4u_dma_ops = {
766	.alloc			= dma_4u_alloc_coherent,
767	.free			= dma_4u_free_coherent,
768	.map_page		= dma_4u_map_page,
769	.unmap_page		= dma_4u_unmap_page,
770	.map_sg			= dma_4u_map_sg,
771	.unmap_sg		= dma_4u_unmap_sg,
772	.sync_single_for_cpu	= dma_4u_sync_single_for_cpu,
773	.sync_sg_for_cpu	= dma_4u_sync_sg_for_cpu,
774	.dma_supported		= dma_4u_supported,
775	.mapping_error		= dma_4u_mapping_error,
776};
777
778const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
779EXPORT_SYMBOL(dma_ops);
v3.1
 
  1/* iommu.c: Generic sparc64 IOMMU support.
  2 *
  3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
  4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
  5 */
  6
  7#include <linux/kernel.h>
  8#include <linux/module.h>
  9#include <linux/slab.h>
 10#include <linux/delay.h>
 11#include <linux/device.h>
 12#include <linux/dma-mapping.h>
 13#include <linux/errno.h>
 14#include <linux/iommu-helper.h>
 15#include <linux/bitmap.h>
 
 16
 17#ifdef CONFIG_PCI
 18#include <linux/pci.h>
 19#endif
 20
 21#include <asm/iommu.h>
 22
 23#include "iommu_common.h"
 
 24
 25#define STC_CTXMATCH_ADDR(STC, CTX)	\
 26	((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
 27#define STC_FLUSHFLAG_INIT(STC) \
 28	(*((STC)->strbuf_flushflag) = 0UL)
 29#define STC_FLUSHFLAG_SET(STC) \
 30	(*((STC)->strbuf_flushflag) != 0UL)
 31
 32#define iommu_read(__reg) \
 33({	u64 __ret; \
 34	__asm__ __volatile__("ldxa [%1] %2, %0" \
 35			     : "=r" (__ret) \
 36			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
 37			     : "memory"); \
 38	__ret; \
 39})
 40#define iommu_write(__reg, __val) \
 41	__asm__ __volatile__("stxa %0, [%1] %2" \
 42			     : /* no outputs */ \
 43			     : "r" (__val), "r" (__reg), \
 44			       "i" (ASI_PHYS_BYPASS_EC_E))
 45
 46/* Must be invoked under the IOMMU lock. */
 47static void iommu_flushall(struct iommu *iommu)
 48{
 
 49	if (iommu->iommu_flushinv) {
 50		iommu_write(iommu->iommu_flushinv, ~(u64)0);
 51	} else {
 52		unsigned long tag;
 53		int entry;
 54
 55		tag = iommu->iommu_tags;
 56		for (entry = 0; entry < 16; entry++) {
 57			iommu_write(tag, 0);
 58			tag += 8;
 59		}
 60
 61		/* Ensure completion of previous PIO writes. */
 62		(void) iommu_read(iommu->write_complete_reg);
 63	}
 64}
 65
 66#define IOPTE_CONSISTENT(CTX) \
 67	(IOPTE_VALID | IOPTE_CACHE | \
 68	 (((CTX) << 47) & IOPTE_CONTEXT))
 69
 70#define IOPTE_STREAMING(CTX) \
 71	(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
 72
 73/* Existing mappings are never marked invalid, instead they
 74 * are pointed to a dummy page.
 75 */
 76#define IOPTE_IS_DUMMY(iommu, iopte)	\
 77	((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
 78
 79static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
 80{
 81	unsigned long val = iopte_val(*iopte);
 82
 83	val &= ~IOPTE_PAGE;
 84	val |= iommu->dummy_page_pa;
 85
 86	iopte_val(*iopte) = val;
 87}
 88
 89/* Based almost entirely upon the ppc64 iommu allocator.  If you use the 'handle'
 90 * facility it must all be done in one pass while under the iommu lock.
 91 *
 92 * On sun4u platforms, we only flush the IOMMU once every time we've passed
 93 * over the entire page table doing allocations.  Therefore we only ever advance
 94 * the hint and cannot backtrack it.
 95 */
 96unsigned long iommu_range_alloc(struct device *dev,
 97				struct iommu *iommu,
 98				unsigned long npages,
 99				unsigned long *handle)
100{
101	unsigned long n, end, start, limit, boundary_size;
102	struct iommu_arena *arena = &iommu->arena;
103	int pass = 0;
104
105	/* This allocator was derived from x86_64's bit string search */
106
107	/* Sanity check */
108	if (unlikely(npages == 0)) {
109		if (printk_ratelimit())
110			WARN_ON(1);
111		return DMA_ERROR_CODE;
112	}
113
114	if (handle && *handle)
115		start = *handle;
116	else
117		start = arena->hint;
118
119	limit = arena->limit;
120
121	/* The case below can happen if we have a small segment appended
122	 * to a large, or when the previous alloc was at the very end of
123	 * the available space. If so, go back to the beginning and flush.
124	 */
125	if (start >= limit) {
126		start = 0;
127		if (iommu->flush_all)
128			iommu->flush_all(iommu);
129	}
130
131 again:
132
133	if (dev)
134		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
135				      1 << IO_PAGE_SHIFT);
136	else
137		boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
138
139	n = iommu_area_alloc(arena->map, limit, start, npages,
140			     iommu->page_table_map_base >> IO_PAGE_SHIFT,
141			     boundary_size >> IO_PAGE_SHIFT, 0);
142	if (n == -1) {
143		if (likely(pass < 1)) {
144			/* First failure, rescan from the beginning.  */
145			start = 0;
146			if (iommu->flush_all)
147				iommu->flush_all(iommu);
148			pass++;
149			goto again;
150		} else {
151			/* Second failure, give up */
152			return DMA_ERROR_CODE;
153		}
154	}
155
156	end = n + npages;
157
158	arena->hint = end;
159
160	/* Update handle for SG allocations */
161	if (handle)
162		*handle = end;
163
164	return n;
165}
166
167void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
168{
169	struct iommu_arena *arena = &iommu->arena;
170	unsigned long entry;
171
172	entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
173
174	bitmap_clear(arena->map, entry, npages);
175}
176
177int iommu_table_init(struct iommu *iommu, int tsbsize,
178		     u32 dma_offset, u32 dma_addr_mask,
179		     int numa_node)
180{
181	unsigned long i, order, sz, num_tsb_entries;
182	struct page *page;
183
184	num_tsb_entries = tsbsize / sizeof(iopte_t);
185
186	/* Setup initial software IOMMU state. */
187	spin_lock_init(&iommu->lock);
188	iommu->ctx_lowest_free = 1;
189	iommu->page_table_map_base = dma_offset;
190	iommu->dma_addr_mask = dma_addr_mask;
191
192	/* Allocate and initialize the free area map.  */
193	sz = num_tsb_entries / 8;
194	sz = (sz + 7UL) & ~7UL;
195	iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
196	if (!iommu->arena.map) {
197		printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
198		return -ENOMEM;
199	}
200	memset(iommu->arena.map, 0, sz);
201	iommu->arena.limit = num_tsb_entries;
202
203	if (tlb_type != hypervisor)
204		iommu->flush_all = iommu_flushall;
 
205
206	/* Allocate and initialize the dummy page which we
207	 * set inactive IO PTEs to point to.
208	 */
209	page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
210	if (!page) {
211		printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
212		goto out_free_map;
213	}
214	iommu->dummy_page = (unsigned long) page_address(page);
215	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
216	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
217
218	/* Now allocate and setup the IOMMU page table itself.  */
219	order = get_order(tsbsize);
220	page = alloc_pages_node(numa_node, GFP_KERNEL, order);
221	if (!page) {
222		printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
223		goto out_free_dummy_page;
224	}
225	iommu->page_table = (iopte_t *)page_address(page);
226
227	for (i = 0; i < num_tsb_entries; i++)
228		iopte_make_dummy(iommu, &iommu->page_table[i]);
229
230	return 0;
231
232out_free_dummy_page:
233	free_page(iommu->dummy_page);
234	iommu->dummy_page = 0UL;
235
236out_free_map:
237	kfree(iommu->arena.map);
238	iommu->arena.map = NULL;
239
240	return -ENOMEM;
241}
242
243static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
 
244				    unsigned long npages)
245{
246	unsigned long entry;
247
248	entry = iommu_range_alloc(dev, iommu, npages, NULL);
249	if (unlikely(entry == DMA_ERROR_CODE))
 
250		return NULL;
251
252	return iommu->page_table + entry;
253}
254
255static int iommu_alloc_ctx(struct iommu *iommu)
256{
257	int lowest = iommu->ctx_lowest_free;
258	int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
259
260	if (unlikely(n == IOMMU_NUM_CTXS)) {
261		n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
262		if (unlikely(n == lowest)) {
263			printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
264			n = 0;
265		}
266	}
267	if (n)
268		__set_bit(n, iommu->ctx_bitmap);
269
270	return n;
271}
272
273static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
274{
275	if (likely(ctx)) {
276		__clear_bit(ctx, iommu->ctx_bitmap);
277		if (ctx < iommu->ctx_lowest_free)
278			iommu->ctx_lowest_free = ctx;
279	}
280}
281
282static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
283				   dma_addr_t *dma_addrp, gfp_t gfp)
 
284{
285	unsigned long flags, order, first_page;
286	struct iommu *iommu;
287	struct page *page;
288	int npages, nid;
289	iopte_t *iopte;
290	void *ret;
291
292	size = IO_PAGE_ALIGN(size);
293	order = get_order(size);
294	if (order >= 10)
295		return NULL;
296
297	nid = dev->archdata.numa_node;
298	page = alloc_pages_node(nid, gfp, order);
299	if (unlikely(!page))
300		return NULL;
301
302	first_page = (unsigned long) page_address(page);
303	memset((char *)first_page, 0, PAGE_SIZE << order);
304
305	iommu = dev->archdata.iommu;
306
307	spin_lock_irqsave(&iommu->lock, flags);
308	iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
309	spin_unlock_irqrestore(&iommu->lock, flags);
310
311	if (unlikely(iopte == NULL)) {
312		free_pages(first_page, order);
313		return NULL;
314	}
315
316	*dma_addrp = (iommu->page_table_map_base +
317		      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
318	ret = (void *) first_page;
319	npages = size >> IO_PAGE_SHIFT;
320	first_page = __pa(first_page);
321	while (npages--) {
322		iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
323				     IOPTE_WRITE |
324				     (first_page & IOPTE_PAGE));
325		iopte++;
326		first_page += IO_PAGE_SIZE;
327	}
328
329	return ret;
330}
331
332static void dma_4u_free_coherent(struct device *dev, size_t size,
333				 void *cpu, dma_addr_t dvma)
 
334{
335	struct iommu *iommu;
336	unsigned long flags, order, npages;
337
338	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
339	iommu = dev->archdata.iommu;
340
341	spin_lock_irqsave(&iommu->lock, flags);
342
343	iommu_range_free(iommu, dvma, npages);
344
345	spin_unlock_irqrestore(&iommu->lock, flags);
346
347	order = get_order(size);
348	if (order < 10)
349		free_pages((unsigned long)cpu, order);
350}
351
352static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
353				  unsigned long offset, size_t sz,
354				  enum dma_data_direction direction,
355				  struct dma_attrs *attrs)
356{
357	struct iommu *iommu;
358	struct strbuf *strbuf;
359	iopte_t *base;
360	unsigned long flags, npages, oaddr;
361	unsigned long i, base_paddr, ctx;
362	u32 bus_addr, ret;
363	unsigned long iopte_protection;
364
365	iommu = dev->archdata.iommu;
366	strbuf = dev->archdata.stc;
367
368	if (unlikely(direction == DMA_NONE))
369		goto bad_no_ctx;
370
371	oaddr = (unsigned long)(page_address(page) + offset);
372	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
373	npages >>= IO_PAGE_SHIFT;
374
 
375	spin_lock_irqsave(&iommu->lock, flags);
376	base = alloc_npages(dev, iommu, npages);
377	ctx = 0;
378	if (iommu->iommu_ctxflush)
379		ctx = iommu_alloc_ctx(iommu);
380	spin_unlock_irqrestore(&iommu->lock, flags);
381
382	if (unlikely(!base))
383		goto bad;
384
385	bus_addr = (iommu->page_table_map_base +
386		    ((base - iommu->page_table) << IO_PAGE_SHIFT));
387	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
388	base_paddr = __pa(oaddr & IO_PAGE_MASK);
389	if (strbuf->strbuf_enabled)
390		iopte_protection = IOPTE_STREAMING(ctx);
391	else
392		iopte_protection = IOPTE_CONSISTENT(ctx);
393	if (direction != DMA_TO_DEVICE)
394		iopte_protection |= IOPTE_WRITE;
395
396	for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
397		iopte_val(*base) = iopte_protection | base_paddr;
398
399	return ret;
400
401bad:
402	iommu_free_ctx(iommu, ctx);
403bad_no_ctx:
404	if (printk_ratelimit())
405		WARN_ON(1);
406	return DMA_ERROR_CODE;
407}
408
409static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
410			 u32 vaddr, unsigned long ctx, unsigned long npages,
411			 enum dma_data_direction direction)
412{
413	int limit;
414
415	if (strbuf->strbuf_ctxflush &&
416	    iommu->iommu_ctxflush) {
417		unsigned long matchreg, flushreg;
418		u64 val;
419
420		flushreg = strbuf->strbuf_ctxflush;
421		matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
422
423		iommu_write(flushreg, ctx);
424		val = iommu_read(matchreg);
425		val &= 0xffff;
426		if (!val)
427			goto do_flush_sync;
428
429		while (val) {
430			if (val & 0x1)
431				iommu_write(flushreg, ctx);
432			val >>= 1;
433		}
434		val = iommu_read(matchreg);
435		if (unlikely(val)) {
436			printk(KERN_WARNING "strbuf_flush: ctx flush "
437			       "timeout matchreg[%llx] ctx[%lx]\n",
438			       val, ctx);
439			goto do_page_flush;
440		}
441	} else {
442		unsigned long i;
443
444	do_page_flush:
445		for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
446			iommu_write(strbuf->strbuf_pflush, vaddr);
447	}
448
449do_flush_sync:
450	/* If the device could not have possibly put dirty data into
451	 * the streaming cache, no flush-flag synchronization needs
452	 * to be performed.
453	 */
454	if (direction == DMA_TO_DEVICE)
455		return;
456
457	STC_FLUSHFLAG_INIT(strbuf);
458	iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
459	(void) iommu_read(iommu->write_complete_reg);
460
461	limit = 100000;
462	while (!STC_FLUSHFLAG_SET(strbuf)) {
463		limit--;
464		if (!limit)
465			break;
466		udelay(1);
467		rmb();
468	}
469	if (!limit)
470		printk(KERN_WARNING "strbuf_flush: flushflag timeout "
471		       "vaddr[%08x] ctx[%lx] npages[%ld]\n",
472		       vaddr, ctx, npages);
473}
474
475static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
476			      size_t sz, enum dma_data_direction direction,
477			      struct dma_attrs *attrs)
478{
479	struct iommu *iommu;
480	struct strbuf *strbuf;
481	iopte_t *base;
482	unsigned long flags, npages, ctx, i;
483
484	if (unlikely(direction == DMA_NONE)) {
485		if (printk_ratelimit())
486			WARN_ON(1);
487		return;
488	}
489
490	iommu = dev->archdata.iommu;
491	strbuf = dev->archdata.stc;
492
493	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
494	npages >>= IO_PAGE_SHIFT;
495	base = iommu->page_table +
496		((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
497	bus_addr &= IO_PAGE_MASK;
498
499	spin_lock_irqsave(&iommu->lock, flags);
500
501	/* Record the context, if any. */
502	ctx = 0;
503	if (iommu->iommu_ctxflush)
504		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
505
506	/* Step 1: Kick data out of streaming buffers if necessary. */
507	if (strbuf->strbuf_enabled)
508		strbuf_flush(strbuf, iommu, bus_addr, ctx,
509			     npages, direction);
510
511	/* Step 2: Clear out TSB entries. */
512	for (i = 0; i < npages; i++)
513		iopte_make_dummy(iommu, base + i);
514
515	iommu_range_free(iommu, bus_addr, npages);
516
517	iommu_free_ctx(iommu, ctx);
 
518
519	spin_unlock_irqrestore(&iommu->lock, flags);
520}
521
522static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
523			 int nelems, enum dma_data_direction direction,
524			 struct dma_attrs *attrs)
525{
526	struct scatterlist *s, *outs, *segstart;
527	unsigned long flags, handle, prot, ctx;
528	dma_addr_t dma_next = 0, dma_addr;
529	unsigned int max_seg_size;
530	unsigned long seg_boundary_size;
531	int outcount, incount, i;
532	struct strbuf *strbuf;
533	struct iommu *iommu;
534	unsigned long base_shift;
535
536	BUG_ON(direction == DMA_NONE);
537
538	iommu = dev->archdata.iommu;
539	strbuf = dev->archdata.stc;
540	if (nelems == 0 || !iommu)
541		return 0;
542
543	spin_lock_irqsave(&iommu->lock, flags);
544
545	ctx = 0;
546	if (iommu->iommu_ctxflush)
547		ctx = iommu_alloc_ctx(iommu);
548
549	if (strbuf->strbuf_enabled)
550		prot = IOPTE_STREAMING(ctx);
551	else
552		prot = IOPTE_CONSISTENT(ctx);
553	if (direction != DMA_TO_DEVICE)
554		prot |= IOPTE_WRITE;
555
556	outs = s = segstart = &sglist[0];
557	outcount = 1;
558	incount = nelems;
559	handle = 0;
560
561	/* Init first segment length for backout at failure */
562	outs->dma_length = 0;
563
564	max_seg_size = dma_get_max_seg_size(dev);
565	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
566				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
567	base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
568	for_each_sg(sglist, s, nelems, i) {
569		unsigned long paddr, npages, entry, out_entry = 0, slen;
570		iopte_t *base;
571
572		slen = s->length;
573		/* Sanity check */
574		if (slen == 0) {
575			dma_next = 0;
576			continue;
577		}
578		/* Allocate iommu entries for that segment */
579		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
580		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
581		entry = iommu_range_alloc(dev, iommu, npages, &handle);
 
582
583		/* Handle failure */
584		if (unlikely(entry == DMA_ERROR_CODE)) {
585			if (printk_ratelimit())
586				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
587				       " npages %lx\n", iommu, paddr, npages);
588			goto iommu_map_failed;
589		}
590
591		base = iommu->page_table + entry;
592
593		/* Convert entry to a dma_addr_t */
594		dma_addr = iommu->page_table_map_base +
595			(entry << IO_PAGE_SHIFT);
596		dma_addr |= (s->offset & ~IO_PAGE_MASK);
597
598		/* Insert into HW table */
599		paddr &= IO_PAGE_MASK;
600		while (npages--) {
601			iopte_val(*base) = prot | paddr;
602			base++;
603			paddr += IO_PAGE_SIZE;
604		}
605
606		/* If we are in an open segment, try merging */
607		if (segstart != s) {
608			/* We cannot merge if:
609			 * - allocated dma_addr isn't contiguous to previous allocation
610			 */
611			if ((dma_addr != dma_next) ||
612			    (outs->dma_length + s->length > max_seg_size) ||
613			    (is_span_boundary(out_entry, base_shift,
614					      seg_boundary_size, outs, s))) {
615				/* Can't merge: create a new segment */
616				segstart = s;
617				outcount++;
618				outs = sg_next(outs);
619			} else {
620				outs->dma_length += s->length;
621			}
622		}
623
624		if (segstart == s) {
625			/* This is a new segment, fill entries */
626			outs->dma_address = dma_addr;
627			outs->dma_length = slen;
628			out_entry = entry;
629		}
630
631		/* Calculate next page pointer for contiguous check */
632		dma_next = dma_addr + slen;
633	}
634
635	spin_unlock_irqrestore(&iommu->lock, flags);
636
637	if (outcount < incount) {
638		outs = sg_next(outs);
639		outs->dma_address = DMA_ERROR_CODE;
640		outs->dma_length = 0;
641	}
642
643	return outcount;
644
645iommu_map_failed:
646	for_each_sg(sglist, s, nelems, i) {
647		if (s->dma_length != 0) {
648			unsigned long vaddr, npages, entry, j;
649			iopte_t *base;
650
651			vaddr = s->dma_address & IO_PAGE_MASK;
652			npages = iommu_num_pages(s->dma_address, s->dma_length,
653						 IO_PAGE_SIZE);
654			iommu_range_free(iommu, vaddr, npages);
655
656			entry = (vaddr - iommu->page_table_map_base)
657				>> IO_PAGE_SHIFT;
658			base = iommu->page_table + entry;
659
660			for (j = 0; j < npages; j++)
661				iopte_make_dummy(iommu, base + j);
662
663			s->dma_address = DMA_ERROR_CODE;
 
 
 
664			s->dma_length = 0;
665		}
666		if (s == outs)
667			break;
668	}
669	spin_unlock_irqrestore(&iommu->lock, flags);
670
671	return 0;
672}
673
674/* If contexts are being used, they are the same in all of the mappings
675 * we make for a particular SG.
676 */
677static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
678{
679	unsigned long ctx = 0;
680
681	if (iommu->iommu_ctxflush) {
682		iopte_t *base;
683		u32 bus_addr;
 
684
685		bus_addr = sg->dma_address & IO_PAGE_MASK;
686		base = iommu->page_table +
687			((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
688
689		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
690	}
691	return ctx;
692}
693
694static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
695			    int nelems, enum dma_data_direction direction,
696			    struct dma_attrs *attrs)
697{
698	unsigned long flags, ctx;
699	struct scatterlist *sg;
700	struct strbuf *strbuf;
701	struct iommu *iommu;
702
703	BUG_ON(direction == DMA_NONE);
704
705	iommu = dev->archdata.iommu;
706	strbuf = dev->archdata.stc;
707
708	ctx = fetch_sg_ctx(iommu, sglist);
709
710	spin_lock_irqsave(&iommu->lock, flags);
711
712	sg = sglist;
713	while (nelems--) {
714		dma_addr_t dma_handle = sg->dma_address;
715		unsigned int len = sg->dma_length;
716		unsigned long npages, entry;
717		iopte_t *base;
718		int i;
719
720		if (!len)
721			break;
722		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
723		iommu_range_free(iommu, dma_handle, npages);
724
725		entry = ((dma_handle - iommu->page_table_map_base)
726			 >> IO_PAGE_SHIFT);
727		base = iommu->page_table + entry;
728
729		dma_handle &= IO_PAGE_MASK;
730		if (strbuf->strbuf_enabled)
731			strbuf_flush(strbuf, iommu, dma_handle, ctx,
732				     npages, direction);
733
734		for (i = 0; i < npages; i++)
735			iopte_make_dummy(iommu, base + i);
736
 
 
737		sg = sg_next(sg);
738	}
739
740	iommu_free_ctx(iommu, ctx);
741
742	spin_unlock_irqrestore(&iommu->lock, flags);
743}
744
745static void dma_4u_sync_single_for_cpu(struct device *dev,
746				       dma_addr_t bus_addr, size_t sz,
747				       enum dma_data_direction direction)
748{
749	struct iommu *iommu;
750	struct strbuf *strbuf;
751	unsigned long flags, ctx, npages;
752
753	iommu = dev->archdata.iommu;
754	strbuf = dev->archdata.stc;
755
756	if (!strbuf->strbuf_enabled)
757		return;
758
759	spin_lock_irqsave(&iommu->lock, flags);
760
761	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
762	npages >>= IO_PAGE_SHIFT;
763	bus_addr &= IO_PAGE_MASK;
764
765	/* Step 1: Record the context, if any. */
766	ctx = 0;
767	if (iommu->iommu_ctxflush &&
768	    strbuf->strbuf_ctxflush) {
769		iopte_t *iopte;
 
770
771		iopte = iommu->page_table +
772			((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
773		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
774	}
775
776	/* Step 2: Kick data out of streaming buffers. */
777	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
778
779	spin_unlock_irqrestore(&iommu->lock, flags);
780}
781
782static void dma_4u_sync_sg_for_cpu(struct device *dev,
783				   struct scatterlist *sglist, int nelems,
784				   enum dma_data_direction direction)
785{
786	struct iommu *iommu;
787	struct strbuf *strbuf;
788	unsigned long flags, ctx, npages, i;
789	struct scatterlist *sg, *sgprv;
790	u32 bus_addr;
791
792	iommu = dev->archdata.iommu;
793	strbuf = dev->archdata.stc;
794
795	if (!strbuf->strbuf_enabled)
796		return;
797
798	spin_lock_irqsave(&iommu->lock, flags);
799
800	/* Step 1: Record the context, if any. */
801	ctx = 0;
802	if (iommu->iommu_ctxflush &&
803	    strbuf->strbuf_ctxflush) {
804		iopte_t *iopte;
 
805
806		iopte = iommu->page_table +
807			((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
808		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
809	}
810
811	/* Step 2: Kick data out of streaming buffers. */
812	bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
813	sgprv = NULL;
814	for_each_sg(sglist, sg, nelems, i) {
815		if (sg->dma_length == 0)
816			break;
817		sgprv = sg;
818	}
819
820	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
821		  - bus_addr) >> IO_PAGE_SHIFT;
822	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
823
824	spin_unlock_irqrestore(&iommu->lock, flags);
825}
826
827static struct dma_map_ops sun4u_dma_ops = {
828	.alloc_coherent		= dma_4u_alloc_coherent,
829	.free_coherent		= dma_4u_free_coherent,
830	.map_page		= dma_4u_map_page,
831	.unmap_page		= dma_4u_unmap_page,
832	.map_sg			= dma_4u_map_sg,
833	.unmap_sg		= dma_4u_unmap_sg,
834	.sync_single_for_cpu	= dma_4u_sync_single_for_cpu,
835	.sync_sg_for_cpu	= dma_4u_sync_sg_for_cpu,
836};
837
838struct dma_map_ops *dma_ops = &sun4u_dma_ops;
839EXPORT_SYMBOL(dma_ops);
840
841extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
842
843int dma_supported(struct device *dev, u64 device_mask)
844{
845	struct iommu *iommu = dev->archdata.iommu;
846	u64 dma_addr_mask = iommu->dma_addr_mask;
847
848	if (device_mask >= (1UL << 32UL))
849		return 0;
850
851	if ((device_mask & dma_addr_mask) == dma_addr_mask)
852		return 1;
853
854#ifdef CONFIG_PCI
855	if (dev->bus == &pci_bus_type)
856		return pci64_dma_supported(to_pci_dev(dev), device_mask);
857#endif
858
859	return 0;
860}
861EXPORT_SYMBOL(dma_supported);