Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/* iommu.c: Generic sparc64 IOMMU support.
  3 *
  4 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
  5 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
  6 */
  7
  8#include <linux/kernel.h>
  9#include <linux/export.h>
 10#include <linux/slab.h>
 11#include <linux/delay.h>
 12#include <linux/device.h>
 13#include <linux/dma-mapping.h>
 14#include <linux/errno.h>
 15#include <linux/iommu-helper.h>
 16#include <linux/bitmap.h>
 17#include <linux/iommu-common.h>
 18
 19#ifdef CONFIG_PCI
 20#include <linux/pci.h>
 21#endif
 22
 23#include <asm/iommu.h>
 24
 25#include "iommu_common.h"
 26#include "kernel.h"
 27
 28#define STC_CTXMATCH_ADDR(STC, CTX)	\
 29	((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
 30#define STC_FLUSHFLAG_INIT(STC) \
 31	(*((STC)->strbuf_flushflag) = 0UL)
 32#define STC_FLUSHFLAG_SET(STC) \
 33	(*((STC)->strbuf_flushflag) != 0UL)
 34
 35#define iommu_read(__reg) \
 36({	u64 __ret; \
 37	__asm__ __volatile__("ldxa [%1] %2, %0" \
 38			     : "=r" (__ret) \
 39			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
 40			     : "memory"); \
 41	__ret; \
 42})
 43#define iommu_write(__reg, __val) \
 44	__asm__ __volatile__("stxa %0, [%1] %2" \
 45			     : /* no outputs */ \
 46			     : "r" (__val), "r" (__reg), \
 47			       "i" (ASI_PHYS_BYPASS_EC_E))
 48
 49/* Must be invoked under the IOMMU lock. */
 50static void iommu_flushall(struct iommu_map_table *iommu_map_table)
 51{
 52	struct iommu *iommu = container_of(iommu_map_table, struct iommu, tbl);
 53	if (iommu->iommu_flushinv) {
 54		iommu_write(iommu->iommu_flushinv, ~(u64)0);
 55	} else {
 56		unsigned long tag;
 57		int entry;
 58
 59		tag = iommu->iommu_tags;
 60		for (entry = 0; entry < 16; entry++) {
 61			iommu_write(tag, 0);
 62			tag += 8;
 63		}
 64
 65		/* Ensure completion of previous PIO writes. */
 66		(void) iommu_read(iommu->write_complete_reg);
 67	}
 68}
 69
 70#define IOPTE_CONSISTENT(CTX) \
 71	(IOPTE_VALID | IOPTE_CACHE | \
 72	 (((CTX) << 47) & IOPTE_CONTEXT))
 73
 74#define IOPTE_STREAMING(CTX) \
 75	(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
 76
 77/* Existing mappings are never marked invalid, instead they
 78 * are pointed to a dummy page.
 79 */
 80#define IOPTE_IS_DUMMY(iommu, iopte)	\
 81	((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
 82
 83static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
 84{
 85	unsigned long val = iopte_val(*iopte);
 86
 87	val &= ~IOPTE_PAGE;
 88	val |= iommu->dummy_page_pa;
 89
 90	iopte_val(*iopte) = val;
 91}
 92
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 93int iommu_table_init(struct iommu *iommu, int tsbsize,
 94		     u32 dma_offset, u32 dma_addr_mask,
 95		     int numa_node)
 96{
 97	unsigned long i, order, sz, num_tsb_entries;
 98	struct page *page;
 99
100	num_tsb_entries = tsbsize / sizeof(iopte_t);
101
102	/* Setup initial software IOMMU state. */
103	spin_lock_init(&iommu->lock);
104	iommu->ctx_lowest_free = 1;
105	iommu->tbl.table_map_base = dma_offset;
106	iommu->dma_addr_mask = dma_addr_mask;
107
108	/* Allocate and initialize the free area map.  */
109	sz = num_tsb_entries / 8;
110	sz = (sz + 7UL) & ~7UL;
111	iommu->tbl.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
112	if (!iommu->tbl.map)
 
113		return -ENOMEM;
114	memset(iommu->tbl.map, 0, sz);
 
 
115
116	iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
117			    (tlb_type != hypervisor ? iommu_flushall : NULL),
118			    false, 1, false);
119
120	/* Allocate and initialize the dummy page which we
121	 * set inactive IO PTEs to point to.
122	 */
123	page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
124	if (!page) {
125		printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
126		goto out_free_map;
127	}
128	iommu->dummy_page = (unsigned long) page_address(page);
129	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
130	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
131
132	/* Now allocate and setup the IOMMU page table itself.  */
133	order = get_order(tsbsize);
134	page = alloc_pages_node(numa_node, GFP_KERNEL, order);
135	if (!page) {
136		printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
137		goto out_free_dummy_page;
138	}
139	iommu->page_table = (iopte_t *)page_address(page);
140
141	for (i = 0; i < num_tsb_entries; i++)
142		iopte_make_dummy(iommu, &iommu->page_table[i]);
143
144	return 0;
145
146out_free_dummy_page:
147	free_page(iommu->dummy_page);
148	iommu->dummy_page = 0UL;
149
150out_free_map:
151	kfree(iommu->tbl.map);
152	iommu->tbl.map = NULL;
153
154	return -ENOMEM;
155}
156
157static inline iopte_t *alloc_npages(struct device *dev,
158				    struct iommu *iommu,
159				    unsigned long npages)
160{
161	unsigned long entry;
162
163	entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL,
164				      (unsigned long)(-1), 0);
165	if (unlikely(entry == IOMMU_ERROR_CODE))
166		return NULL;
167
168	return iommu->page_table + entry;
169}
170
171static int iommu_alloc_ctx(struct iommu *iommu)
172{
173	int lowest = iommu->ctx_lowest_free;
174	int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
175
176	if (unlikely(n == IOMMU_NUM_CTXS)) {
177		n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
178		if (unlikely(n == lowest)) {
179			printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
180			n = 0;
181		}
182	}
183	if (n)
184		__set_bit(n, iommu->ctx_bitmap);
185
186	return n;
187}
188
189static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
190{
191	if (likely(ctx)) {
192		__clear_bit(ctx, iommu->ctx_bitmap);
193		if (ctx < iommu->ctx_lowest_free)
194			iommu->ctx_lowest_free = ctx;
195	}
196}
197
198static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
199				   dma_addr_t *dma_addrp, gfp_t gfp,
200				   unsigned long attrs)
201{
202	unsigned long order, first_page;
203	struct iommu *iommu;
204	struct page *page;
205	int npages, nid;
206	iopte_t *iopte;
207	void *ret;
208
209	size = IO_PAGE_ALIGN(size);
210	order = get_order(size);
211	if (order >= 10)
212		return NULL;
213
214	nid = dev->archdata.numa_node;
215	page = alloc_pages_node(nid, gfp, order);
216	if (unlikely(!page))
217		return NULL;
218
219	first_page = (unsigned long) page_address(page);
220	memset((char *)first_page, 0, PAGE_SIZE << order);
221
222	iommu = dev->archdata.iommu;
223
 
224	iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
 
225
226	if (unlikely(iopte == NULL)) {
227		free_pages(first_page, order);
228		return NULL;
229	}
230
231	*dma_addrp = (iommu->tbl.table_map_base +
232		      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
233	ret = (void *) first_page;
234	npages = size >> IO_PAGE_SHIFT;
235	first_page = __pa(first_page);
236	while (npages--) {
237		iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
238				     IOPTE_WRITE |
239				     (first_page & IOPTE_PAGE));
240		iopte++;
241		first_page += IO_PAGE_SIZE;
242	}
243
244	return ret;
245}
246
247static void dma_4u_free_coherent(struct device *dev, size_t size,
248				 void *cpu, dma_addr_t dvma,
249				 unsigned long attrs)
250{
251	struct iommu *iommu;
252	unsigned long order, npages;
253
254	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
255	iommu = dev->archdata.iommu;
256
257	iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE);
 
 
 
 
258
259	order = get_order(size);
260	if (order < 10)
261		free_pages((unsigned long)cpu, order);
262}
263
264static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
265				  unsigned long offset, size_t sz,
266				  enum dma_data_direction direction,
267				  unsigned long attrs)
268{
269	struct iommu *iommu;
270	struct strbuf *strbuf;
271	iopte_t *base;
272	unsigned long flags, npages, oaddr;
273	unsigned long i, base_paddr, ctx;
274	u32 bus_addr, ret;
275	unsigned long iopte_protection;
276
277	iommu = dev->archdata.iommu;
278	strbuf = dev->archdata.stc;
279
280	if (unlikely(direction == DMA_NONE))
281		goto bad_no_ctx;
282
283	oaddr = (unsigned long)(page_address(page) + offset);
284	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
285	npages >>= IO_PAGE_SHIFT;
286
287	base = alloc_npages(dev, iommu, npages);
288	spin_lock_irqsave(&iommu->lock, flags);
 
289	ctx = 0;
290	if (iommu->iommu_ctxflush)
291		ctx = iommu_alloc_ctx(iommu);
292	spin_unlock_irqrestore(&iommu->lock, flags);
293
294	if (unlikely(!base))
295		goto bad;
296
297	bus_addr = (iommu->tbl.table_map_base +
298		    ((base - iommu->page_table) << IO_PAGE_SHIFT));
299	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
300	base_paddr = __pa(oaddr & IO_PAGE_MASK);
301	if (strbuf->strbuf_enabled)
302		iopte_protection = IOPTE_STREAMING(ctx);
303	else
304		iopte_protection = IOPTE_CONSISTENT(ctx);
305	if (direction != DMA_TO_DEVICE)
306		iopte_protection |= IOPTE_WRITE;
307
308	for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
309		iopte_val(*base) = iopte_protection | base_paddr;
310
311	return ret;
312
313bad:
314	iommu_free_ctx(iommu, ctx);
315bad_no_ctx:
316	if (printk_ratelimit())
317		WARN_ON(1);
318	return SPARC_MAPPING_ERROR;
319}
320
321static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
322			 u32 vaddr, unsigned long ctx, unsigned long npages,
323			 enum dma_data_direction direction)
324{
325	int limit;
326
327	if (strbuf->strbuf_ctxflush &&
328	    iommu->iommu_ctxflush) {
329		unsigned long matchreg, flushreg;
330		u64 val;
331
332		flushreg = strbuf->strbuf_ctxflush;
333		matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
334
335		iommu_write(flushreg, ctx);
336		val = iommu_read(matchreg);
337		val &= 0xffff;
338		if (!val)
339			goto do_flush_sync;
340
341		while (val) {
342			if (val & 0x1)
343				iommu_write(flushreg, ctx);
344			val >>= 1;
345		}
346		val = iommu_read(matchreg);
347		if (unlikely(val)) {
348			printk(KERN_WARNING "strbuf_flush: ctx flush "
349			       "timeout matchreg[%llx] ctx[%lx]\n",
350			       val, ctx);
351			goto do_page_flush;
352		}
353	} else {
354		unsigned long i;
355
356	do_page_flush:
357		for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
358			iommu_write(strbuf->strbuf_pflush, vaddr);
359	}
360
361do_flush_sync:
362	/* If the device could not have possibly put dirty data into
363	 * the streaming cache, no flush-flag synchronization needs
364	 * to be performed.
365	 */
366	if (direction == DMA_TO_DEVICE)
367		return;
368
369	STC_FLUSHFLAG_INIT(strbuf);
370	iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
371	(void) iommu_read(iommu->write_complete_reg);
372
373	limit = 100000;
374	while (!STC_FLUSHFLAG_SET(strbuf)) {
375		limit--;
376		if (!limit)
377			break;
378		udelay(1);
379		rmb();
380	}
381	if (!limit)
382		printk(KERN_WARNING "strbuf_flush: flushflag timeout "
383		       "vaddr[%08x] ctx[%lx] npages[%ld]\n",
384		       vaddr, ctx, npages);
385}
386
387static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
388			      size_t sz, enum dma_data_direction direction,
389			      unsigned long attrs)
390{
391	struct iommu *iommu;
392	struct strbuf *strbuf;
393	iopte_t *base;
394	unsigned long flags, npages, ctx, i;
395
396	if (unlikely(direction == DMA_NONE)) {
397		if (printk_ratelimit())
398			WARN_ON(1);
399		return;
400	}
401
402	iommu = dev->archdata.iommu;
403	strbuf = dev->archdata.stc;
404
405	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
406	npages >>= IO_PAGE_SHIFT;
407	base = iommu->page_table +
408		((bus_addr - iommu->tbl.table_map_base) >> IO_PAGE_SHIFT);
409	bus_addr &= IO_PAGE_MASK;
410
411	spin_lock_irqsave(&iommu->lock, flags);
412
413	/* Record the context, if any. */
414	ctx = 0;
415	if (iommu->iommu_ctxflush)
416		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
417
418	/* Step 1: Kick data out of streaming buffers if necessary. */
419	if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
420		strbuf_flush(strbuf, iommu, bus_addr, ctx,
421			     npages, direction);
422
423	/* Step 2: Clear out TSB entries. */
424	for (i = 0; i < npages; i++)
425		iopte_make_dummy(iommu, base + i);
426
 
 
427	iommu_free_ctx(iommu, ctx);
428	spin_unlock_irqrestore(&iommu->lock, flags);
429
430	iommu_tbl_range_free(&iommu->tbl, bus_addr, npages, IOMMU_ERROR_CODE);
431}
432
433static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
434			 int nelems, enum dma_data_direction direction,
435			 unsigned long attrs)
436{
437	struct scatterlist *s, *outs, *segstart;
438	unsigned long flags, handle, prot, ctx;
439	dma_addr_t dma_next = 0, dma_addr;
440	unsigned int max_seg_size;
441	unsigned long seg_boundary_size;
442	int outcount, incount, i;
443	struct strbuf *strbuf;
444	struct iommu *iommu;
445	unsigned long base_shift;
446
447	BUG_ON(direction == DMA_NONE);
448
449	iommu = dev->archdata.iommu;
450	strbuf = dev->archdata.stc;
451	if (nelems == 0 || !iommu)
452		return 0;
453
454	spin_lock_irqsave(&iommu->lock, flags);
455
456	ctx = 0;
457	if (iommu->iommu_ctxflush)
458		ctx = iommu_alloc_ctx(iommu);
459
460	if (strbuf->strbuf_enabled)
461		prot = IOPTE_STREAMING(ctx);
462	else
463		prot = IOPTE_CONSISTENT(ctx);
464	if (direction != DMA_TO_DEVICE)
465		prot |= IOPTE_WRITE;
466
467	outs = s = segstart = &sglist[0];
468	outcount = 1;
469	incount = nelems;
470	handle = 0;
471
472	/* Init first segment length for backout at failure */
473	outs->dma_length = 0;
474
475	max_seg_size = dma_get_max_seg_size(dev);
476	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
477				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
478	base_shift = iommu->tbl.table_map_base >> IO_PAGE_SHIFT;
479	for_each_sg(sglist, s, nelems, i) {
480		unsigned long paddr, npages, entry, out_entry = 0, slen;
481		iopte_t *base;
482
483		slen = s->length;
484		/* Sanity check */
485		if (slen == 0) {
486			dma_next = 0;
487			continue;
488		}
489		/* Allocate iommu entries for that segment */
490		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
491		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
492		entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages,
493					      &handle, (unsigned long)(-1), 0);
494
495		/* Handle failure */
496		if (unlikely(entry == IOMMU_ERROR_CODE)) {
497			if (printk_ratelimit())
498				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
499				       " npages %lx\n", iommu, paddr, npages);
500			goto iommu_map_failed;
501		}
502
503		base = iommu->page_table + entry;
504
505		/* Convert entry to a dma_addr_t */
506		dma_addr = iommu->tbl.table_map_base +
507			(entry << IO_PAGE_SHIFT);
508		dma_addr |= (s->offset & ~IO_PAGE_MASK);
509
510		/* Insert into HW table */
511		paddr &= IO_PAGE_MASK;
512		while (npages--) {
513			iopte_val(*base) = prot | paddr;
514			base++;
515			paddr += IO_PAGE_SIZE;
516		}
517
518		/* If we are in an open segment, try merging */
519		if (segstart != s) {
520			/* We cannot merge if:
521			 * - allocated dma_addr isn't contiguous to previous allocation
522			 */
523			if ((dma_addr != dma_next) ||
524			    (outs->dma_length + s->length > max_seg_size) ||
525			    (is_span_boundary(out_entry, base_shift,
526					      seg_boundary_size, outs, s))) {
527				/* Can't merge: create a new segment */
528				segstart = s;
529				outcount++;
530				outs = sg_next(outs);
531			} else {
532				outs->dma_length += s->length;
533			}
534		}
535
536		if (segstart == s) {
537			/* This is a new segment, fill entries */
538			outs->dma_address = dma_addr;
539			outs->dma_length = slen;
540			out_entry = entry;
541		}
542
543		/* Calculate next page pointer for contiguous check */
544		dma_next = dma_addr + slen;
545	}
546
547	spin_unlock_irqrestore(&iommu->lock, flags);
548
549	if (outcount < incount) {
550		outs = sg_next(outs);
551		outs->dma_address = SPARC_MAPPING_ERROR;
552		outs->dma_length = 0;
553	}
554
555	return outcount;
556
557iommu_map_failed:
558	for_each_sg(sglist, s, nelems, i) {
559		if (s->dma_length != 0) {
560			unsigned long vaddr, npages, entry, j;
561			iopte_t *base;
562
563			vaddr = s->dma_address & IO_PAGE_MASK;
564			npages = iommu_num_pages(s->dma_address, s->dma_length,
565						 IO_PAGE_SIZE);
 
566
567			entry = (vaddr - iommu->tbl.table_map_base)
568				>> IO_PAGE_SHIFT;
569			base = iommu->page_table + entry;
570
571			for (j = 0; j < npages; j++)
572				iopte_make_dummy(iommu, base + j);
573
574			iommu_tbl_range_free(&iommu->tbl, vaddr, npages,
575					     IOMMU_ERROR_CODE);
576
577			s->dma_address = SPARC_MAPPING_ERROR;
578			s->dma_length = 0;
579		}
580		if (s == outs)
581			break;
582	}
583	spin_unlock_irqrestore(&iommu->lock, flags);
584
585	return 0;
586}
587
588/* If contexts are being used, they are the same in all of the mappings
589 * we make for a particular SG.
590 */
591static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
592{
593	unsigned long ctx = 0;
594
595	if (iommu->iommu_ctxflush) {
596		iopte_t *base;
597		u32 bus_addr;
598		struct iommu_map_table *tbl = &iommu->tbl;
599
600		bus_addr = sg->dma_address & IO_PAGE_MASK;
601		base = iommu->page_table +
602			((bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT);
603
604		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
605	}
606	return ctx;
607}
608
609static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
610			    int nelems, enum dma_data_direction direction,
611			    unsigned long attrs)
612{
613	unsigned long flags, ctx;
614	struct scatterlist *sg;
615	struct strbuf *strbuf;
616	struct iommu *iommu;
617
618	BUG_ON(direction == DMA_NONE);
619
620	iommu = dev->archdata.iommu;
621	strbuf = dev->archdata.stc;
622
623	ctx = fetch_sg_ctx(iommu, sglist);
624
625	spin_lock_irqsave(&iommu->lock, flags);
626
627	sg = sglist;
628	while (nelems--) {
629		dma_addr_t dma_handle = sg->dma_address;
630		unsigned int len = sg->dma_length;
631		unsigned long npages, entry;
632		iopte_t *base;
633		int i;
634
635		if (!len)
636			break;
637		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
 
638
639		entry = ((dma_handle - iommu->tbl.table_map_base)
640			 >> IO_PAGE_SHIFT);
641		base = iommu->page_table + entry;
642
643		dma_handle &= IO_PAGE_MASK;
644		if (strbuf->strbuf_enabled && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
645			strbuf_flush(strbuf, iommu, dma_handle, ctx,
646				     npages, direction);
647
648		for (i = 0; i < npages; i++)
649			iopte_make_dummy(iommu, base + i);
650
651		iommu_tbl_range_free(&iommu->tbl, dma_handle, npages,
652				     IOMMU_ERROR_CODE);
653		sg = sg_next(sg);
654	}
655
656	iommu_free_ctx(iommu, ctx);
657
658	spin_unlock_irqrestore(&iommu->lock, flags);
659}
660
661static void dma_4u_sync_single_for_cpu(struct device *dev,
662				       dma_addr_t bus_addr, size_t sz,
663				       enum dma_data_direction direction)
664{
665	struct iommu *iommu;
666	struct strbuf *strbuf;
667	unsigned long flags, ctx, npages;
668
669	iommu = dev->archdata.iommu;
670	strbuf = dev->archdata.stc;
671
672	if (!strbuf->strbuf_enabled)
673		return;
674
675	spin_lock_irqsave(&iommu->lock, flags);
676
677	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
678	npages >>= IO_PAGE_SHIFT;
679	bus_addr &= IO_PAGE_MASK;
680
681	/* Step 1: Record the context, if any. */
682	ctx = 0;
683	if (iommu->iommu_ctxflush &&
684	    strbuf->strbuf_ctxflush) {
685		iopte_t *iopte;
686		struct iommu_map_table *tbl = &iommu->tbl;
687
688		iopte = iommu->page_table +
689			((bus_addr - tbl->table_map_base)>>IO_PAGE_SHIFT);
690		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
691	}
692
693	/* Step 2: Kick data out of streaming buffers. */
694	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
695
696	spin_unlock_irqrestore(&iommu->lock, flags);
697}
698
699static void dma_4u_sync_sg_for_cpu(struct device *dev,
700				   struct scatterlist *sglist, int nelems,
701				   enum dma_data_direction direction)
702{
703	struct iommu *iommu;
704	struct strbuf *strbuf;
705	unsigned long flags, ctx, npages, i;
706	struct scatterlist *sg, *sgprv;
707	u32 bus_addr;
708
709	iommu = dev->archdata.iommu;
710	strbuf = dev->archdata.stc;
711
712	if (!strbuf->strbuf_enabled)
713		return;
714
715	spin_lock_irqsave(&iommu->lock, flags);
716
717	/* Step 1: Record the context, if any. */
718	ctx = 0;
719	if (iommu->iommu_ctxflush &&
720	    strbuf->strbuf_ctxflush) {
721		iopte_t *iopte;
722		struct iommu_map_table *tbl = &iommu->tbl;
723
724		iopte = iommu->page_table + ((sglist[0].dma_address -
725			tbl->table_map_base) >> IO_PAGE_SHIFT);
726		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
727	}
728
729	/* Step 2: Kick data out of streaming buffers. */
730	bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
731	sgprv = NULL;
732	for_each_sg(sglist, sg, nelems, i) {
733		if (sg->dma_length == 0)
734			break;
735		sgprv = sg;
736	}
737
738	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
739		  - bus_addr) >> IO_PAGE_SHIFT;
740	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
741
742	spin_unlock_irqrestore(&iommu->lock, flags);
743}
744
745static int dma_4u_mapping_error(struct device *dev, dma_addr_t dma_addr)
746{
747	return dma_addr == SPARC_MAPPING_ERROR;
748}
749
750static int dma_4u_supported(struct device *dev, u64 device_mask)
751{
752	struct iommu *iommu = dev->archdata.iommu;
753
754	if (device_mask > DMA_BIT_MASK(32))
755		return 0;
756	if ((device_mask & iommu->dma_addr_mask) == iommu->dma_addr_mask)
757		return 1;
758#ifdef CONFIG_PCI
759	if (dev_is_pci(dev))
760		return pci64_dma_supported(to_pci_dev(dev), device_mask);
761#endif
762	return 0;
763}
764
765static const struct dma_map_ops sun4u_dma_ops = {
766	.alloc			= dma_4u_alloc_coherent,
767	.free			= dma_4u_free_coherent,
768	.map_page		= dma_4u_map_page,
769	.unmap_page		= dma_4u_unmap_page,
770	.map_sg			= dma_4u_map_sg,
771	.unmap_sg		= dma_4u_unmap_sg,
772	.sync_single_for_cpu	= dma_4u_sync_single_for_cpu,
773	.sync_sg_for_cpu	= dma_4u_sync_sg_for_cpu,
774	.dma_supported		= dma_4u_supported,
775	.mapping_error		= dma_4u_mapping_error,
776};
777
778const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
779EXPORT_SYMBOL(dma_ops);
v3.5.6
 
  1/* iommu.c: Generic sparc64 IOMMU support.
  2 *
  3 * Copyright (C) 1999, 2007, 2008 David S. Miller (davem@davemloft.net)
  4 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
  5 */
  6
  7#include <linux/kernel.h>
  8#include <linux/export.h>
  9#include <linux/slab.h>
 10#include <linux/delay.h>
 11#include <linux/device.h>
 12#include <linux/dma-mapping.h>
 13#include <linux/errno.h>
 14#include <linux/iommu-helper.h>
 15#include <linux/bitmap.h>
 
 16
 17#ifdef CONFIG_PCI
 18#include <linux/pci.h>
 19#endif
 20
 21#include <asm/iommu.h>
 22
 23#include "iommu_common.h"
 
 24
 25#define STC_CTXMATCH_ADDR(STC, CTX)	\
 26	((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
 27#define STC_FLUSHFLAG_INIT(STC) \
 28	(*((STC)->strbuf_flushflag) = 0UL)
 29#define STC_FLUSHFLAG_SET(STC) \
 30	(*((STC)->strbuf_flushflag) != 0UL)
 31
 32#define iommu_read(__reg) \
 33({	u64 __ret; \
 34	__asm__ __volatile__("ldxa [%1] %2, %0" \
 35			     : "=r" (__ret) \
 36			     : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
 37			     : "memory"); \
 38	__ret; \
 39})
 40#define iommu_write(__reg, __val) \
 41	__asm__ __volatile__("stxa %0, [%1] %2" \
 42			     : /* no outputs */ \
 43			     : "r" (__val), "r" (__reg), \
 44			       "i" (ASI_PHYS_BYPASS_EC_E))
 45
 46/* Must be invoked under the IOMMU lock. */
 47static void iommu_flushall(struct iommu *iommu)
 48{
 
 49	if (iommu->iommu_flushinv) {
 50		iommu_write(iommu->iommu_flushinv, ~(u64)0);
 51	} else {
 52		unsigned long tag;
 53		int entry;
 54
 55		tag = iommu->iommu_tags;
 56		for (entry = 0; entry < 16; entry++) {
 57			iommu_write(tag, 0);
 58			tag += 8;
 59		}
 60
 61		/* Ensure completion of previous PIO writes. */
 62		(void) iommu_read(iommu->write_complete_reg);
 63	}
 64}
 65
 66#define IOPTE_CONSISTENT(CTX) \
 67	(IOPTE_VALID | IOPTE_CACHE | \
 68	 (((CTX) << 47) & IOPTE_CONTEXT))
 69
 70#define IOPTE_STREAMING(CTX) \
 71	(IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
 72
 73/* Existing mappings are never marked invalid, instead they
 74 * are pointed to a dummy page.
 75 */
 76#define IOPTE_IS_DUMMY(iommu, iopte)	\
 77	((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
 78
 79static inline void iopte_make_dummy(struct iommu *iommu, iopte_t *iopte)
 80{
 81	unsigned long val = iopte_val(*iopte);
 82
 83	val &= ~IOPTE_PAGE;
 84	val |= iommu->dummy_page_pa;
 85
 86	iopte_val(*iopte) = val;
 87}
 88
 89/* Based almost entirely upon the ppc64 iommu allocator.  If you use the 'handle'
 90 * facility it must all be done in one pass while under the iommu lock.
 91 *
 92 * On sun4u platforms, we only flush the IOMMU once every time we've passed
 93 * over the entire page table doing allocations.  Therefore we only ever advance
 94 * the hint and cannot backtrack it.
 95 */
 96unsigned long iommu_range_alloc(struct device *dev,
 97				struct iommu *iommu,
 98				unsigned long npages,
 99				unsigned long *handle)
100{
101	unsigned long n, end, start, limit, boundary_size;
102	struct iommu_arena *arena = &iommu->arena;
103	int pass = 0;
104
105	/* This allocator was derived from x86_64's bit string search */
106
107	/* Sanity check */
108	if (unlikely(npages == 0)) {
109		if (printk_ratelimit())
110			WARN_ON(1);
111		return DMA_ERROR_CODE;
112	}
113
114	if (handle && *handle)
115		start = *handle;
116	else
117		start = arena->hint;
118
119	limit = arena->limit;
120
121	/* The case below can happen if we have a small segment appended
122	 * to a large, or when the previous alloc was at the very end of
123	 * the available space. If so, go back to the beginning and flush.
124	 */
125	if (start >= limit) {
126		start = 0;
127		if (iommu->flush_all)
128			iommu->flush_all(iommu);
129	}
130
131 again:
132
133	if (dev)
134		boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
135				      1 << IO_PAGE_SHIFT);
136	else
137		boundary_size = ALIGN(1UL << 32, 1 << IO_PAGE_SHIFT);
138
139	n = iommu_area_alloc(arena->map, limit, start, npages,
140			     iommu->page_table_map_base >> IO_PAGE_SHIFT,
141			     boundary_size >> IO_PAGE_SHIFT, 0);
142	if (n == -1) {
143		if (likely(pass < 1)) {
144			/* First failure, rescan from the beginning.  */
145			start = 0;
146			if (iommu->flush_all)
147				iommu->flush_all(iommu);
148			pass++;
149			goto again;
150		} else {
151			/* Second failure, give up */
152			return DMA_ERROR_CODE;
153		}
154	}
155
156	end = n + npages;
157
158	arena->hint = end;
159
160	/* Update handle for SG allocations */
161	if (handle)
162		*handle = end;
163
164	return n;
165}
166
167void iommu_range_free(struct iommu *iommu, dma_addr_t dma_addr, unsigned long npages)
168{
169	struct iommu_arena *arena = &iommu->arena;
170	unsigned long entry;
171
172	entry = (dma_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT;
173
174	bitmap_clear(arena->map, entry, npages);
175}
176
177int iommu_table_init(struct iommu *iommu, int tsbsize,
178		     u32 dma_offset, u32 dma_addr_mask,
179		     int numa_node)
180{
181	unsigned long i, order, sz, num_tsb_entries;
182	struct page *page;
183
184	num_tsb_entries = tsbsize / sizeof(iopte_t);
185
186	/* Setup initial software IOMMU state. */
187	spin_lock_init(&iommu->lock);
188	iommu->ctx_lowest_free = 1;
189	iommu->page_table_map_base = dma_offset;
190	iommu->dma_addr_mask = dma_addr_mask;
191
192	/* Allocate and initialize the free area map.  */
193	sz = num_tsb_entries / 8;
194	sz = (sz + 7UL) & ~7UL;
195	iommu->arena.map = kmalloc_node(sz, GFP_KERNEL, numa_node);
196	if (!iommu->arena.map) {
197		printk(KERN_ERR "IOMMU: Error, kmalloc(arena.map) failed.\n");
198		return -ENOMEM;
199	}
200	memset(iommu->arena.map, 0, sz);
201	iommu->arena.limit = num_tsb_entries;
202
203	if (tlb_type != hypervisor)
204		iommu->flush_all = iommu_flushall;
 
205
206	/* Allocate and initialize the dummy page which we
207	 * set inactive IO PTEs to point to.
208	 */
209	page = alloc_pages_node(numa_node, GFP_KERNEL, 0);
210	if (!page) {
211		printk(KERN_ERR "IOMMU: Error, gfp(dummy_page) failed.\n");
212		goto out_free_map;
213	}
214	iommu->dummy_page = (unsigned long) page_address(page);
215	memset((void *)iommu->dummy_page, 0, PAGE_SIZE);
216	iommu->dummy_page_pa = (unsigned long) __pa(iommu->dummy_page);
217
218	/* Now allocate and setup the IOMMU page table itself.  */
219	order = get_order(tsbsize);
220	page = alloc_pages_node(numa_node, GFP_KERNEL, order);
221	if (!page) {
222		printk(KERN_ERR "IOMMU: Error, gfp(tsb) failed.\n");
223		goto out_free_dummy_page;
224	}
225	iommu->page_table = (iopte_t *)page_address(page);
226
227	for (i = 0; i < num_tsb_entries; i++)
228		iopte_make_dummy(iommu, &iommu->page_table[i]);
229
230	return 0;
231
232out_free_dummy_page:
233	free_page(iommu->dummy_page);
234	iommu->dummy_page = 0UL;
235
236out_free_map:
237	kfree(iommu->arena.map);
238	iommu->arena.map = NULL;
239
240	return -ENOMEM;
241}
242
243static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu,
 
244				    unsigned long npages)
245{
246	unsigned long entry;
247
248	entry = iommu_range_alloc(dev, iommu, npages, NULL);
249	if (unlikely(entry == DMA_ERROR_CODE))
 
250		return NULL;
251
252	return iommu->page_table + entry;
253}
254
255static int iommu_alloc_ctx(struct iommu *iommu)
256{
257	int lowest = iommu->ctx_lowest_free;
258	int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest);
259
260	if (unlikely(n == IOMMU_NUM_CTXS)) {
261		n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
262		if (unlikely(n == lowest)) {
263			printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
264			n = 0;
265		}
266	}
267	if (n)
268		__set_bit(n, iommu->ctx_bitmap);
269
270	return n;
271}
272
273static inline void iommu_free_ctx(struct iommu *iommu, int ctx)
274{
275	if (likely(ctx)) {
276		__clear_bit(ctx, iommu->ctx_bitmap);
277		if (ctx < iommu->ctx_lowest_free)
278			iommu->ctx_lowest_free = ctx;
279	}
280}
281
282static void *dma_4u_alloc_coherent(struct device *dev, size_t size,
283				   dma_addr_t *dma_addrp, gfp_t gfp,
284				   struct dma_attrs *attrs)
285{
286	unsigned long flags, order, first_page;
287	struct iommu *iommu;
288	struct page *page;
289	int npages, nid;
290	iopte_t *iopte;
291	void *ret;
292
293	size = IO_PAGE_ALIGN(size);
294	order = get_order(size);
295	if (order >= 10)
296		return NULL;
297
298	nid = dev->archdata.numa_node;
299	page = alloc_pages_node(nid, gfp, order);
300	if (unlikely(!page))
301		return NULL;
302
303	first_page = (unsigned long) page_address(page);
304	memset((char *)first_page, 0, PAGE_SIZE << order);
305
306	iommu = dev->archdata.iommu;
307
308	spin_lock_irqsave(&iommu->lock, flags);
309	iopte = alloc_npages(dev, iommu, size >> IO_PAGE_SHIFT);
310	spin_unlock_irqrestore(&iommu->lock, flags);
311
312	if (unlikely(iopte == NULL)) {
313		free_pages(first_page, order);
314		return NULL;
315	}
316
317	*dma_addrp = (iommu->page_table_map_base +
318		      ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
319	ret = (void *) first_page;
320	npages = size >> IO_PAGE_SHIFT;
321	first_page = __pa(first_page);
322	while (npages--) {
323		iopte_val(*iopte) = (IOPTE_CONSISTENT(0UL) |
324				     IOPTE_WRITE |
325				     (first_page & IOPTE_PAGE));
326		iopte++;
327		first_page += IO_PAGE_SIZE;
328	}
329
330	return ret;
331}
332
333static void dma_4u_free_coherent(struct device *dev, size_t size,
334				 void *cpu, dma_addr_t dvma,
335				 struct dma_attrs *attrs)
336{
337	struct iommu *iommu;
338	unsigned long flags, order, npages;
339
340	npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
341	iommu = dev->archdata.iommu;
342
343	spin_lock_irqsave(&iommu->lock, flags);
344
345	iommu_range_free(iommu, dvma, npages);
346
347	spin_unlock_irqrestore(&iommu->lock, flags);
348
349	order = get_order(size);
350	if (order < 10)
351		free_pages((unsigned long)cpu, order);
352}
353
354static dma_addr_t dma_4u_map_page(struct device *dev, struct page *page,
355				  unsigned long offset, size_t sz,
356				  enum dma_data_direction direction,
357				  struct dma_attrs *attrs)
358{
359	struct iommu *iommu;
360	struct strbuf *strbuf;
361	iopte_t *base;
362	unsigned long flags, npages, oaddr;
363	unsigned long i, base_paddr, ctx;
364	u32 bus_addr, ret;
365	unsigned long iopte_protection;
366
367	iommu = dev->archdata.iommu;
368	strbuf = dev->archdata.stc;
369
370	if (unlikely(direction == DMA_NONE))
371		goto bad_no_ctx;
372
373	oaddr = (unsigned long)(page_address(page) + offset);
374	npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
375	npages >>= IO_PAGE_SHIFT;
376
 
377	spin_lock_irqsave(&iommu->lock, flags);
378	base = alloc_npages(dev, iommu, npages);
379	ctx = 0;
380	if (iommu->iommu_ctxflush)
381		ctx = iommu_alloc_ctx(iommu);
382	spin_unlock_irqrestore(&iommu->lock, flags);
383
384	if (unlikely(!base))
385		goto bad;
386
387	bus_addr = (iommu->page_table_map_base +
388		    ((base - iommu->page_table) << IO_PAGE_SHIFT));
389	ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
390	base_paddr = __pa(oaddr & IO_PAGE_MASK);
391	if (strbuf->strbuf_enabled)
392		iopte_protection = IOPTE_STREAMING(ctx);
393	else
394		iopte_protection = IOPTE_CONSISTENT(ctx);
395	if (direction != DMA_TO_DEVICE)
396		iopte_protection |= IOPTE_WRITE;
397
398	for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
399		iopte_val(*base) = iopte_protection | base_paddr;
400
401	return ret;
402
403bad:
404	iommu_free_ctx(iommu, ctx);
405bad_no_ctx:
406	if (printk_ratelimit())
407		WARN_ON(1);
408	return DMA_ERROR_CODE;
409}
410
411static void strbuf_flush(struct strbuf *strbuf, struct iommu *iommu,
412			 u32 vaddr, unsigned long ctx, unsigned long npages,
413			 enum dma_data_direction direction)
414{
415	int limit;
416
417	if (strbuf->strbuf_ctxflush &&
418	    iommu->iommu_ctxflush) {
419		unsigned long matchreg, flushreg;
420		u64 val;
421
422		flushreg = strbuf->strbuf_ctxflush;
423		matchreg = STC_CTXMATCH_ADDR(strbuf, ctx);
424
425		iommu_write(flushreg, ctx);
426		val = iommu_read(matchreg);
427		val &= 0xffff;
428		if (!val)
429			goto do_flush_sync;
430
431		while (val) {
432			if (val & 0x1)
433				iommu_write(flushreg, ctx);
434			val >>= 1;
435		}
436		val = iommu_read(matchreg);
437		if (unlikely(val)) {
438			printk(KERN_WARNING "strbuf_flush: ctx flush "
439			       "timeout matchreg[%llx] ctx[%lx]\n",
440			       val, ctx);
441			goto do_page_flush;
442		}
443	} else {
444		unsigned long i;
445
446	do_page_flush:
447		for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
448			iommu_write(strbuf->strbuf_pflush, vaddr);
449	}
450
451do_flush_sync:
452	/* If the device could not have possibly put dirty data into
453	 * the streaming cache, no flush-flag synchronization needs
454	 * to be performed.
455	 */
456	if (direction == DMA_TO_DEVICE)
457		return;
458
459	STC_FLUSHFLAG_INIT(strbuf);
460	iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
461	(void) iommu_read(iommu->write_complete_reg);
462
463	limit = 100000;
464	while (!STC_FLUSHFLAG_SET(strbuf)) {
465		limit--;
466		if (!limit)
467			break;
468		udelay(1);
469		rmb();
470	}
471	if (!limit)
472		printk(KERN_WARNING "strbuf_flush: flushflag timeout "
473		       "vaddr[%08x] ctx[%lx] npages[%ld]\n",
474		       vaddr, ctx, npages);
475}
476
477static void dma_4u_unmap_page(struct device *dev, dma_addr_t bus_addr,
478			      size_t sz, enum dma_data_direction direction,
479			      struct dma_attrs *attrs)
480{
481	struct iommu *iommu;
482	struct strbuf *strbuf;
483	iopte_t *base;
484	unsigned long flags, npages, ctx, i;
485
486	if (unlikely(direction == DMA_NONE)) {
487		if (printk_ratelimit())
488			WARN_ON(1);
489		return;
490	}
491
492	iommu = dev->archdata.iommu;
493	strbuf = dev->archdata.stc;
494
495	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
496	npages >>= IO_PAGE_SHIFT;
497	base = iommu->page_table +
498		((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
499	bus_addr &= IO_PAGE_MASK;
500
501	spin_lock_irqsave(&iommu->lock, flags);
502
503	/* Record the context, if any. */
504	ctx = 0;
505	if (iommu->iommu_ctxflush)
506		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
507
508	/* Step 1: Kick data out of streaming buffers if necessary. */
509	if (strbuf->strbuf_enabled)
510		strbuf_flush(strbuf, iommu, bus_addr, ctx,
511			     npages, direction);
512
513	/* Step 2: Clear out TSB entries. */
514	for (i = 0; i < npages; i++)
515		iopte_make_dummy(iommu, base + i);
516
517	iommu_range_free(iommu, bus_addr, npages);
518
519	iommu_free_ctx(iommu, ctx);
 
520
521	spin_unlock_irqrestore(&iommu->lock, flags);
522}
523
524static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
525			 int nelems, enum dma_data_direction direction,
526			 struct dma_attrs *attrs)
527{
528	struct scatterlist *s, *outs, *segstart;
529	unsigned long flags, handle, prot, ctx;
530	dma_addr_t dma_next = 0, dma_addr;
531	unsigned int max_seg_size;
532	unsigned long seg_boundary_size;
533	int outcount, incount, i;
534	struct strbuf *strbuf;
535	struct iommu *iommu;
536	unsigned long base_shift;
537
538	BUG_ON(direction == DMA_NONE);
539
540	iommu = dev->archdata.iommu;
541	strbuf = dev->archdata.stc;
542	if (nelems == 0 || !iommu)
543		return 0;
544
545	spin_lock_irqsave(&iommu->lock, flags);
546
547	ctx = 0;
548	if (iommu->iommu_ctxflush)
549		ctx = iommu_alloc_ctx(iommu);
550
551	if (strbuf->strbuf_enabled)
552		prot = IOPTE_STREAMING(ctx);
553	else
554		prot = IOPTE_CONSISTENT(ctx);
555	if (direction != DMA_TO_DEVICE)
556		prot |= IOPTE_WRITE;
557
558	outs = s = segstart = &sglist[0];
559	outcount = 1;
560	incount = nelems;
561	handle = 0;
562
563	/* Init first segment length for backout at failure */
564	outs->dma_length = 0;
565
566	max_seg_size = dma_get_max_seg_size(dev);
567	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
568				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
569	base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
570	for_each_sg(sglist, s, nelems, i) {
571		unsigned long paddr, npages, entry, out_entry = 0, slen;
572		iopte_t *base;
573
574		slen = s->length;
575		/* Sanity check */
576		if (slen == 0) {
577			dma_next = 0;
578			continue;
579		}
580		/* Allocate iommu entries for that segment */
581		paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
582		npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
583		entry = iommu_range_alloc(dev, iommu, npages, &handle);
 
584
585		/* Handle failure */
586		if (unlikely(entry == DMA_ERROR_CODE)) {
587			if (printk_ratelimit())
588				printk(KERN_INFO "iommu_alloc failed, iommu %p paddr %lx"
589				       " npages %lx\n", iommu, paddr, npages);
590			goto iommu_map_failed;
591		}
592
593		base = iommu->page_table + entry;
594
595		/* Convert entry to a dma_addr_t */
596		dma_addr = iommu->page_table_map_base +
597			(entry << IO_PAGE_SHIFT);
598		dma_addr |= (s->offset & ~IO_PAGE_MASK);
599
600		/* Insert into HW table */
601		paddr &= IO_PAGE_MASK;
602		while (npages--) {
603			iopte_val(*base) = prot | paddr;
604			base++;
605			paddr += IO_PAGE_SIZE;
606		}
607
608		/* If we are in an open segment, try merging */
609		if (segstart != s) {
610			/* We cannot merge if:
611			 * - allocated dma_addr isn't contiguous to previous allocation
612			 */
613			if ((dma_addr != dma_next) ||
614			    (outs->dma_length + s->length > max_seg_size) ||
615			    (is_span_boundary(out_entry, base_shift,
616					      seg_boundary_size, outs, s))) {
617				/* Can't merge: create a new segment */
618				segstart = s;
619				outcount++;
620				outs = sg_next(outs);
621			} else {
622				outs->dma_length += s->length;
623			}
624		}
625
626		if (segstart == s) {
627			/* This is a new segment, fill entries */
628			outs->dma_address = dma_addr;
629			outs->dma_length = slen;
630			out_entry = entry;
631		}
632
633		/* Calculate next page pointer for contiguous check */
634		dma_next = dma_addr + slen;
635	}
636
637	spin_unlock_irqrestore(&iommu->lock, flags);
638
639	if (outcount < incount) {
640		outs = sg_next(outs);
641		outs->dma_address = DMA_ERROR_CODE;
642		outs->dma_length = 0;
643	}
644
645	return outcount;
646
647iommu_map_failed:
648	for_each_sg(sglist, s, nelems, i) {
649		if (s->dma_length != 0) {
650			unsigned long vaddr, npages, entry, j;
651			iopte_t *base;
652
653			vaddr = s->dma_address & IO_PAGE_MASK;
654			npages = iommu_num_pages(s->dma_address, s->dma_length,
655						 IO_PAGE_SIZE);
656			iommu_range_free(iommu, vaddr, npages);
657
658			entry = (vaddr - iommu->page_table_map_base)
659				>> IO_PAGE_SHIFT;
660			base = iommu->page_table + entry;
661
662			for (j = 0; j < npages; j++)
663				iopte_make_dummy(iommu, base + j);
664
665			s->dma_address = DMA_ERROR_CODE;
 
 
 
666			s->dma_length = 0;
667		}
668		if (s == outs)
669			break;
670	}
671	spin_unlock_irqrestore(&iommu->lock, flags);
672
673	return 0;
674}
675
676/* If contexts are being used, they are the same in all of the mappings
677 * we make for a particular SG.
678 */
679static unsigned long fetch_sg_ctx(struct iommu *iommu, struct scatterlist *sg)
680{
681	unsigned long ctx = 0;
682
683	if (iommu->iommu_ctxflush) {
684		iopte_t *base;
685		u32 bus_addr;
 
686
687		bus_addr = sg->dma_address & IO_PAGE_MASK;
688		base = iommu->page_table +
689			((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
690
691		ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
692	}
693	return ctx;
694}
695
696static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
697			    int nelems, enum dma_data_direction direction,
698			    struct dma_attrs *attrs)
699{
700	unsigned long flags, ctx;
701	struct scatterlist *sg;
702	struct strbuf *strbuf;
703	struct iommu *iommu;
704
705	BUG_ON(direction == DMA_NONE);
706
707	iommu = dev->archdata.iommu;
708	strbuf = dev->archdata.stc;
709
710	ctx = fetch_sg_ctx(iommu, sglist);
711
712	spin_lock_irqsave(&iommu->lock, flags);
713
714	sg = sglist;
715	while (nelems--) {
716		dma_addr_t dma_handle = sg->dma_address;
717		unsigned int len = sg->dma_length;
718		unsigned long npages, entry;
719		iopte_t *base;
720		int i;
721
722		if (!len)
723			break;
724		npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
725		iommu_range_free(iommu, dma_handle, npages);
726
727		entry = ((dma_handle - iommu->page_table_map_base)
728			 >> IO_PAGE_SHIFT);
729		base = iommu->page_table + entry;
730
731		dma_handle &= IO_PAGE_MASK;
732		if (strbuf->strbuf_enabled)
733			strbuf_flush(strbuf, iommu, dma_handle, ctx,
734				     npages, direction);
735
736		for (i = 0; i < npages; i++)
737			iopte_make_dummy(iommu, base + i);
738
 
 
739		sg = sg_next(sg);
740	}
741
742	iommu_free_ctx(iommu, ctx);
743
744	spin_unlock_irqrestore(&iommu->lock, flags);
745}
746
747static void dma_4u_sync_single_for_cpu(struct device *dev,
748				       dma_addr_t bus_addr, size_t sz,
749				       enum dma_data_direction direction)
750{
751	struct iommu *iommu;
752	struct strbuf *strbuf;
753	unsigned long flags, ctx, npages;
754
755	iommu = dev->archdata.iommu;
756	strbuf = dev->archdata.stc;
757
758	if (!strbuf->strbuf_enabled)
759		return;
760
761	spin_lock_irqsave(&iommu->lock, flags);
762
763	npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
764	npages >>= IO_PAGE_SHIFT;
765	bus_addr &= IO_PAGE_MASK;
766
767	/* Step 1: Record the context, if any. */
768	ctx = 0;
769	if (iommu->iommu_ctxflush &&
770	    strbuf->strbuf_ctxflush) {
771		iopte_t *iopte;
 
772
773		iopte = iommu->page_table +
774			((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
775		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
776	}
777
778	/* Step 2: Kick data out of streaming buffers. */
779	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
780
781	spin_unlock_irqrestore(&iommu->lock, flags);
782}
783
784static void dma_4u_sync_sg_for_cpu(struct device *dev,
785				   struct scatterlist *sglist, int nelems,
786				   enum dma_data_direction direction)
787{
788	struct iommu *iommu;
789	struct strbuf *strbuf;
790	unsigned long flags, ctx, npages, i;
791	struct scatterlist *sg, *sgprv;
792	u32 bus_addr;
793
794	iommu = dev->archdata.iommu;
795	strbuf = dev->archdata.stc;
796
797	if (!strbuf->strbuf_enabled)
798		return;
799
800	spin_lock_irqsave(&iommu->lock, flags);
801
802	/* Step 1: Record the context, if any. */
803	ctx = 0;
804	if (iommu->iommu_ctxflush &&
805	    strbuf->strbuf_ctxflush) {
806		iopte_t *iopte;
 
807
808		iopte = iommu->page_table +
809			((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
810		ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
811	}
812
813	/* Step 2: Kick data out of streaming buffers. */
814	bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
815	sgprv = NULL;
816	for_each_sg(sglist, sg, nelems, i) {
817		if (sg->dma_length == 0)
818			break;
819		sgprv = sg;
820	}
821
822	npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
823		  - bus_addr) >> IO_PAGE_SHIFT;
824	strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
825
826	spin_unlock_irqrestore(&iommu->lock, flags);
827}
828
829static struct dma_map_ops sun4u_dma_ops = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
830	.alloc			= dma_4u_alloc_coherent,
831	.free			= dma_4u_free_coherent,
832	.map_page		= dma_4u_map_page,
833	.unmap_page		= dma_4u_unmap_page,
834	.map_sg			= dma_4u_map_sg,
835	.unmap_sg		= dma_4u_unmap_sg,
836	.sync_single_for_cpu	= dma_4u_sync_single_for_cpu,
837	.sync_sg_for_cpu	= dma_4u_sync_sg_for_cpu,
 
 
838};
839
840struct dma_map_ops *dma_ops = &sun4u_dma_ops;
841EXPORT_SYMBOL(dma_ops);
842
843extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
844
845int dma_supported(struct device *dev, u64 device_mask)
846{
847	struct iommu *iommu = dev->archdata.iommu;
848	u64 dma_addr_mask = iommu->dma_addr_mask;
849
850	if (device_mask >= (1UL << 32UL))
851		return 0;
852
853	if ((device_mask & dma_addr_mask) == dma_addr_mask)
854		return 1;
855
856#ifdef CONFIG_PCI
857	if (dev->bus == &pci_bus_type)
858		return pci64_dma_supported(to_pci_dev(dev), device_mask);
859#endif
860
861	return 0;
862}
863EXPORT_SYMBOL(dma_supported);