Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3** IA64 System Bus Adapter (SBA) I/O MMU manager
4**
5** (c) Copyright 2002-2005 Alex Williamson
6** (c) Copyright 2002-2003 Grant Grundler
7** (c) Copyright 2002-2005 Hewlett-Packard Company
8**
9** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
10** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
11**
12**
13**
14** This module initializes the IOC (I/O Controller) found on HP
15** McKinley machines and their successors.
16**
17*/
18
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/spinlock.h>
23#include <linux/slab.h>
24#include <linux/init.h>
25#include <linux/mm.h>
26#include <linux/string.h>
27#include <linux/pci.h>
28#include <linux/proc_fs.h>
29#include <linux/seq_file.h>
30#include <linux/acpi.h>
31#include <linux/efi.h>
32#include <linux/nodemask.h>
33#include <linux/bitops.h> /* hweight64() */
34#include <linux/crash_dump.h>
35#include <linux/iommu-helper.h>
36#include <linux/dma-mapping.h>
37#include <linux/prefetch.h>
38#include <linux/swiotlb.h>
39
40#include <asm/delay.h> /* ia64_get_itc() */
41#include <asm/io.h>
42#include <asm/page.h> /* PAGE_OFFSET */
43#include <asm/dma.h>
44
45#include <asm/acpi-ext.h>
46
47#define PFX "IOC: "
48
49/*
50** Enabling timing search of the pdir resource map. Output in /proc.
51** Disabled by default to optimize performance.
52*/
53#undef PDIR_SEARCH_TIMING
54
55/*
56** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
57** not defined, all DMA will be 32bit and go through the TLB.
58** There's potentially a conflict in the bio merge code with us
59** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
60** appears to give more performance than bio-level virtual merging, we'll
61** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to
62** completely restrict DMA to the IOMMU.
63*/
64#define ALLOW_IOV_BYPASS
65
66/*
67** This option specifically allows/disallows bypassing scatterlists with
68** multiple entries. Coalescing these entries can allow better DMA streaming
69** and in some cases shows better performance than entirely bypassing the
70** IOMMU. Performance increase on the order of 1-2% sequential output/input
71** using bonnie++ on a RAID0 MD device (sym2 & mpt).
72*/
73#undef ALLOW_IOV_BYPASS_SG
74
75/*
76** If a device prefetches beyond the end of a valid pdir entry, it will cause
77** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
78** disconnect on 4k boundaries and prevent such issues. If the device is
79** particularly aggressive, this option will keep the entire pdir valid such
80** that prefetching will hit a valid address. This could severely impact
81** error containment, and is therefore off by default. The page that is
82** used for spill-over is poisoned, so that should help debugging somewhat.
83*/
84#undef FULL_VALID_PDIR
85
86#define ENABLE_MARK_CLEAN
87
88/*
89** The number of debug flags is a clue - this code is fragile. NOTE: since
90** tightening the use of res_lock the resource bitmap and actual pdir are no
91** longer guaranteed to stay in sync. The sanity checking code isn't going to
92** like that.
93*/
94#undef DEBUG_SBA_INIT
95#undef DEBUG_SBA_RUN
96#undef DEBUG_SBA_RUN_SG
97#undef DEBUG_SBA_RESOURCE
98#undef ASSERT_PDIR_SANITY
99#undef DEBUG_LARGE_SG_ENTRIES
100#undef DEBUG_BYPASS
101
102#if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
103#error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
104#endif
105
106#define SBA_INLINE __inline__
107/* #define SBA_INLINE */
108
109#ifdef DEBUG_SBA_INIT
110#define DBG_INIT(x...) printk(x)
111#else
112#define DBG_INIT(x...)
113#endif
114
115#ifdef DEBUG_SBA_RUN
116#define DBG_RUN(x...) printk(x)
117#else
118#define DBG_RUN(x...)
119#endif
120
121#ifdef DEBUG_SBA_RUN_SG
122#define DBG_RUN_SG(x...) printk(x)
123#else
124#define DBG_RUN_SG(x...)
125#endif
126
127
128#ifdef DEBUG_SBA_RESOURCE
129#define DBG_RES(x...) printk(x)
130#else
131#define DBG_RES(x...)
132#endif
133
134#ifdef DEBUG_BYPASS
135#define DBG_BYPASS(x...) printk(x)
136#else
137#define DBG_BYPASS(x...)
138#endif
139
140#ifdef ASSERT_PDIR_SANITY
141#define ASSERT(expr) \
142 if(!(expr)) { \
143 printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
144 panic(#expr); \
145 }
146#else
147#define ASSERT(expr)
148#endif
149
150/*
151** The number of pdir entries to "free" before issuing
152** a read to PCOM register to flush out PCOM writes.
153** Interacts with allocation granularity (ie 4 or 8 entries
154** allocated and free'd/purged at a time might make this
155** less interesting).
156*/
157#define DELAYED_RESOURCE_CNT 64
158
159#define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec
160
161#define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
162#define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
163#define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
164#define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
165#define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
166
167#define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
168
169#define IOC_FUNC_ID 0x000
170#define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
171#define IOC_IBASE 0x300 /* IO TLB */
172#define IOC_IMASK 0x308
173#define IOC_PCOM 0x310
174#define IOC_TCNFG 0x318
175#define IOC_PDIR_BASE 0x320
176
177#define IOC_ROPE0_CFG 0x500
178#define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
179
180
181/* AGP GART driver looks for this */
182#define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
183
184/*
185** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
186**
187** Some IOCs (sx1000) can run at the above pages sizes, but are
188** really only supported using the IOC at a 4k page size.
189**
190** iovp_size could only be greater than PAGE_SIZE if we are
191** confident the drivers really only touch the next physical
192** page iff that driver instance owns it.
193*/
194static unsigned long iovp_size;
195static unsigned long iovp_shift;
196static unsigned long iovp_mask;
197
198struct ioc {
199 void __iomem *ioc_hpa; /* I/O MMU base address */
200 char *res_map; /* resource map, bit == pdir entry */
201 u64 *pdir_base; /* physical base address */
202 unsigned long ibase; /* pdir IOV Space base */
203 unsigned long imask; /* pdir IOV Space mask */
204
205 unsigned long *res_hint; /* next avail IOVP - circular search */
206 unsigned long dma_mask;
207 spinlock_t res_lock; /* protects the resource bitmap, but must be held when */
208 /* clearing pdir to prevent races with allocations. */
209 unsigned int res_bitshift; /* from the RIGHT! */
210 unsigned int res_size; /* size of resource map in bytes */
211#ifdef CONFIG_NUMA
212 unsigned int node; /* node where this IOC lives */
213#endif
214#if DELAYED_RESOURCE_CNT > 0
215 spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */
216 /* than res_lock for bigger systems. */
217 int saved_cnt;
218 struct sba_dma_pair {
219 dma_addr_t iova;
220 size_t size;
221 } saved[DELAYED_RESOURCE_CNT];
222#endif
223
224#ifdef PDIR_SEARCH_TIMING
225#define SBA_SEARCH_SAMPLE 0x100
226 unsigned long avg_search[SBA_SEARCH_SAMPLE];
227 unsigned long avg_idx; /* current index into avg_search */
228#endif
229
230 /* Stuff we don't need in performance path */
231 struct ioc *next; /* list of IOC's in system */
232 acpi_handle handle; /* for multiple IOC's */
233 const char *name;
234 unsigned int func_id;
235 unsigned int rev; /* HW revision of chip */
236 u32 iov_size;
237 unsigned int pdir_size; /* in bytes, determined by IOV Space size */
238 struct pci_dev *sac_only_dev;
239};
240
241static struct ioc *ioc_list, *ioc_found;
242static int reserve_sba_gart = 1;
243
244static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
245static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
246
247#define sba_sg_address(sg) sg_virt((sg))
248
249#ifdef FULL_VALID_PDIR
250static u64 prefetch_spill_page;
251#endif
252
253#define GET_IOC(dev) ((dev_is_pci(dev)) \
254 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
255
256/*
257** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
258** (or rather not merge) DMAs into manageable chunks.
259** On parisc, this is more of the software/tuning constraint
260** rather than the HW. I/O MMU allocation algorithms can be
261** faster with smaller sizes (to some degree).
262*/
263#define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
264
265#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
266
267/************************************
268** SBA register read and write support
269**
270** BE WARNED: register writes are posted.
271** (ie follow writes which must reach HW with a read)
272**
273*/
274#define READ_REG(addr) __raw_readq(addr)
275#define WRITE_REG(val, addr) __raw_writeq(val, addr)
276
277#ifdef DEBUG_SBA_INIT
278
279/**
280 * sba_dump_tlb - debugging only - print IOMMU operating parameters
281 * @hpa: base address of the IOMMU
282 *
283 * Print the size/location of the IO MMU PDIR.
284 */
285static void
286sba_dump_tlb(char *hpa)
287{
288 DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
289 DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE));
290 DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK));
291 DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG));
292 DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
293 DBG_INIT("\n");
294}
295#endif
296
297
298#ifdef ASSERT_PDIR_SANITY
299
300/**
301 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
302 * @ioc: IO MMU structure which owns the pdir we are interested in.
303 * @msg: text to print ont the output line.
304 * @pide: pdir index.
305 *
306 * Print one entry of the IO MMU PDIR in human readable form.
307 */
308static void
309sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
310{
311 /* start printing from lowest pde in rval */
312 u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)];
313 unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
314 uint rcnt;
315
316 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
317 msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
318
319 rcnt = 0;
320 while (rcnt < BITS_PER_LONG) {
321 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
322 (rcnt == (pide & (BITS_PER_LONG - 1)))
323 ? " -->" : " ",
324 rcnt, ptr, (unsigned long long) *ptr );
325 rcnt++;
326 ptr++;
327 }
328 printk(KERN_DEBUG "%s", msg);
329}
330
331
332/**
333 * sba_check_pdir - debugging only - consistency checker
334 * @ioc: IO MMU structure which owns the pdir we are interested in.
335 * @msg: text to print ont the output line.
336 *
337 * Verify the resource map and pdir state is consistent
338 */
339static int
340sba_check_pdir(struct ioc *ioc, char *msg)
341{
342 u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
343 u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */
344 u64 *pptr = ioc->pdir_base; /* pdir ptr */
345 uint pide = 0;
346
347 while (rptr < rptr_end) {
348 u64 rval;
349 int rcnt; /* number of bits we might check */
350
351 rval = *rptr;
352 rcnt = 64;
353
354 while (rcnt) {
355 /* Get last byte and highest bit from that */
356 u32 pde = ((u32)((*pptr >> (63)) & 0x1));
357 if ((rval & 0x1) ^ pde)
358 {
359 /*
360 ** BUMMER! -- res_map != pdir --
361 ** Dump rval and matching pdir entries
362 */
363 sba_dump_pdir_entry(ioc, msg, pide);
364 return(1);
365 }
366 rcnt--;
367 rval >>= 1; /* try the next bit */
368 pptr++;
369 pide++;
370 }
371 rptr++; /* look at next word of res_map */
372 }
373 /* It'd be nice if we always got here :^) */
374 return 0;
375}
376
377
378/**
379 * sba_dump_sg - debugging only - print Scatter-Gather list
380 * @ioc: IO MMU structure which owns the pdir we are interested in.
381 * @startsg: head of the SG list
382 * @nents: number of entries in SG list
383 *
384 * print the SG list so we can verify it's correct by hand.
385 */
386static void
387sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
388{
389 while (nents-- > 0) {
390 printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
391 startsg->dma_address, startsg->dma_length,
392 sba_sg_address(startsg));
393 startsg = sg_next(startsg);
394 }
395}
396
397static void
398sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
399{
400 struct scatterlist *the_sg = startsg;
401 int the_nents = nents;
402
403 while (the_nents-- > 0) {
404 if (sba_sg_address(the_sg) == 0x0UL)
405 sba_dump_sg(NULL, startsg, nents);
406 the_sg = sg_next(the_sg);
407 }
408}
409
410#endif /* ASSERT_PDIR_SANITY */
411
412
413
414
415/**************************************************************
416*
417* I/O Pdir Resource Management
418*
419* Bits set in the resource map are in use.
420* Each bit can represent a number of pages.
421* LSbs represent lower addresses (IOVA's).
422*
423***************************************************************/
424#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
425
426/* Convert from IOVP to IOVA and vice versa. */
427#define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
428#define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
429
430#define PDIR_ENTRY_SIZE sizeof(u64)
431
432#define PDIR_INDEX(iovp) ((iovp)>>iovp_shift)
433
434#define RESMAP_MASK(n) ~(~0UL << (n))
435#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
436
437
438/**
439 * For most cases the normal get_order is sufficient, however it limits us
440 * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
441 * It only incurs about 1 clock cycle to use this one with the static variable
442 * and makes the code more intuitive.
443 */
444static SBA_INLINE int
445get_iovp_order (unsigned long size)
446{
447 long double d = size - 1;
448 long order;
449
450 order = ia64_getf_exp(d);
451 order = order - iovp_shift - 0xffff + 1;
452 if (order < 0)
453 order = 0;
454 return order;
455}
456
457static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
458 unsigned int bitshiftcnt)
459{
460 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
461 + bitshiftcnt;
462}
463
464/**
465 * sba_search_bitmap - find free space in IO PDIR resource bitmap
466 * @ioc: IO MMU structure which owns the pdir we are interested in.
467 * @bits_wanted: number of entries we need.
468 * @use_hint: use res_hint to indicate where to start looking
469 *
470 * Find consecutive free bits in resource bitmap.
471 * Each bit represents one entry in the IO Pdir.
472 * Cool perf optimization: search for log2(size) bits at a time.
473 */
474static SBA_INLINE unsigned long
475sba_search_bitmap(struct ioc *ioc, struct device *dev,
476 unsigned long bits_wanted, int use_hint)
477{
478 unsigned long *res_ptr;
479 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
480 unsigned long flags, pide = ~0UL, tpide;
481 unsigned long boundary_size;
482 unsigned long shift;
483 int ret;
484
485 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
486 ASSERT(res_ptr < res_end);
487
488 boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1;
489 boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;
490
491 BUG_ON(ioc->ibase & ~iovp_mask);
492 shift = ioc->ibase >> iovp_shift;
493
494 spin_lock_irqsave(&ioc->res_lock, flags);
495
496 /* Allow caller to force a search through the entire resource space */
497 if (likely(use_hint)) {
498 res_ptr = ioc->res_hint;
499 } else {
500 res_ptr = (ulong *)ioc->res_map;
501 ioc->res_bitshift = 0;
502 }
503
504 /*
505 * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
506 * if a TLB entry is purged while in use. sba_mark_invalid()
507 * purges IOTLB entries in power-of-two sizes, so we also
508 * allocate IOVA space in power-of-two sizes.
509 */
510 bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift);
511
512 if (likely(bits_wanted == 1)) {
513 unsigned int bitshiftcnt;
514 for(; res_ptr < res_end ; res_ptr++) {
515 if (likely(*res_ptr != ~0UL)) {
516 bitshiftcnt = ffz(*res_ptr);
517 *res_ptr |= (1UL << bitshiftcnt);
518 pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
519 ioc->res_bitshift = bitshiftcnt + bits_wanted;
520 goto found_it;
521 }
522 }
523 goto not_found;
524
525 }
526
527 if (likely(bits_wanted <= BITS_PER_LONG/2)) {
528 /*
529 ** Search the resource bit map on well-aligned values.
530 ** "o" is the alignment.
531 ** We need the alignment to invalidate I/O TLB using
532 ** SBA HW features in the unmap path.
533 */
534 unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
535 uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
536 unsigned long mask, base_mask;
537
538 base_mask = RESMAP_MASK(bits_wanted);
539 mask = base_mask << bitshiftcnt;
540
541 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
542 for(; res_ptr < res_end ; res_ptr++)
543 {
544 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
545 ASSERT(0 != mask);
546 for (; mask ; mask <<= o, bitshiftcnt += o) {
547 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
548 ret = iommu_is_span_boundary(tpide, bits_wanted,
549 shift,
550 boundary_size);
551 if ((0 == ((*res_ptr) & mask)) && !ret) {
552 *res_ptr |= mask; /* mark resources busy! */
553 pide = tpide;
554 ioc->res_bitshift = bitshiftcnt + bits_wanted;
555 goto found_it;
556 }
557 }
558
559 bitshiftcnt = 0;
560 mask = base_mask;
561
562 }
563
564 } else {
565 int qwords, bits, i;
566 unsigned long *end;
567
568 qwords = bits_wanted >> 6; /* /64 */
569 bits = bits_wanted - (qwords * BITS_PER_LONG);
570
571 end = res_end - qwords;
572
573 for (; res_ptr < end; res_ptr++) {
574 tpide = ptr_to_pide(ioc, res_ptr, 0);
575 ret = iommu_is_span_boundary(tpide, bits_wanted,
576 shift, boundary_size);
577 if (ret)
578 goto next_ptr;
579 for (i = 0 ; i < qwords ; i++) {
580 if (res_ptr[i] != 0)
581 goto next_ptr;
582 }
583 if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits))
584 continue;
585
586 /* Found it, mark it */
587 for (i = 0 ; i < qwords ; i++)
588 res_ptr[i] = ~0UL;
589 res_ptr[i] |= RESMAP_MASK(bits);
590
591 pide = tpide;
592 res_ptr += qwords;
593 ioc->res_bitshift = bits;
594 goto found_it;
595next_ptr:
596 ;
597 }
598 }
599
600not_found:
601 prefetch(ioc->res_map);
602 ioc->res_hint = (unsigned long *) ioc->res_map;
603 ioc->res_bitshift = 0;
604 spin_unlock_irqrestore(&ioc->res_lock, flags);
605 return (pide);
606
607found_it:
608 ioc->res_hint = res_ptr;
609 spin_unlock_irqrestore(&ioc->res_lock, flags);
610 return (pide);
611}
612
613
614/**
615 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
616 * @ioc: IO MMU structure which owns the pdir we are interested in.
617 * @size: number of bytes to create a mapping for
618 *
619 * Given a size, find consecutive unmarked and then mark those bits in the
620 * resource bit map.
621 */
622static int
623sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
624{
625 unsigned int pages_needed = size >> iovp_shift;
626#ifdef PDIR_SEARCH_TIMING
627 unsigned long itc_start;
628#endif
629 unsigned long pide;
630
631 ASSERT(pages_needed);
632 ASSERT(0 == (size & ~iovp_mask));
633
634#ifdef PDIR_SEARCH_TIMING
635 itc_start = ia64_get_itc();
636#endif
637 /*
638 ** "seek and ye shall find"...praying never hurts either...
639 */
640 pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
641 if (unlikely(pide >= (ioc->res_size << 3))) {
642 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
643 if (unlikely(pide >= (ioc->res_size << 3))) {
644#if DELAYED_RESOURCE_CNT > 0
645 unsigned long flags;
646
647 /*
648 ** With delayed resource freeing, we can give this one more shot. We're
649 ** getting close to being in trouble here, so do what we can to make this
650 ** one count.
651 */
652 spin_lock_irqsave(&ioc->saved_lock, flags);
653 if (ioc->saved_cnt > 0) {
654 struct sba_dma_pair *d;
655 int cnt = ioc->saved_cnt;
656
657 d = &(ioc->saved[ioc->saved_cnt - 1]);
658
659 spin_lock(&ioc->res_lock);
660 while (cnt--) {
661 sba_mark_invalid(ioc, d->iova, d->size);
662 sba_free_range(ioc, d->iova, d->size);
663 d--;
664 }
665 ioc->saved_cnt = 0;
666 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
667 spin_unlock(&ioc->res_lock);
668 }
669 spin_unlock_irqrestore(&ioc->saved_lock, flags);
670
671 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
672 if (unlikely(pide >= (ioc->res_size << 3))) {
673 printk(KERN_WARNING "%s: I/O MMU @ %p is"
674 "out of mapping resources, %u %u %lx\n",
675 __func__, ioc->ioc_hpa, ioc->res_size,
676 pages_needed, dma_get_seg_boundary(dev));
677 return -1;
678 }
679#else
680 printk(KERN_WARNING "%s: I/O MMU @ %p is"
681 "out of mapping resources, %u %u %lx\n",
682 __func__, ioc->ioc_hpa, ioc->res_size,
683 pages_needed, dma_get_seg_boundary(dev));
684 return -1;
685#endif
686 }
687 }
688
689#ifdef PDIR_SEARCH_TIMING
690 ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
691 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
692#endif
693
694 prefetchw(&(ioc->pdir_base[pide]));
695
696#ifdef ASSERT_PDIR_SANITY
697 /* verify the first enable bit is clear */
698 if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
699 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
700 }
701#endif
702
703 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
704 __func__, size, pages_needed, pide,
705 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
706 ioc->res_bitshift );
707
708 return (pide);
709}
710
711
712/**
713 * sba_free_range - unmark bits in IO PDIR resource bitmap
714 * @ioc: IO MMU structure which owns the pdir we are interested in.
715 * @iova: IO virtual address which was previously allocated.
716 * @size: number of bytes to create a mapping for
717 *
718 * clear bits in the ioc's resource map
719 */
720static SBA_INLINE void
721sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
722{
723 unsigned long iovp = SBA_IOVP(ioc, iova);
724 unsigned int pide = PDIR_INDEX(iovp);
725 unsigned int ridx = pide >> 3; /* convert bit to byte address */
726 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
727 int bits_not_wanted = size >> iovp_shift;
728 unsigned long m;
729
730 /* Round up to power-of-two size: see AR2305 note above */
731 bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift);
732 for (; bits_not_wanted > 0 ; res_ptr++) {
733
734 if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
735
736 /* these mappings start 64bit aligned */
737 *res_ptr = 0UL;
738 bits_not_wanted -= BITS_PER_LONG;
739 pide += BITS_PER_LONG;
740
741 } else {
742
743 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
744 m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
745 bits_not_wanted = 0;
746
747 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size,
748 bits_not_wanted, m, pide, res_ptr, *res_ptr);
749
750 ASSERT(m != 0);
751 ASSERT(bits_not_wanted);
752 ASSERT((*res_ptr & m) == m); /* verify same bits are set */
753 *res_ptr &= ~m;
754 }
755 }
756}
757
758
759/**************************************************************
760*
761* "Dynamic DMA Mapping" support (aka "Coherent I/O")
762*
763***************************************************************/
764
765/**
766 * sba_io_pdir_entry - fill in one IO PDIR entry
767 * @pdir_ptr: pointer to IO PDIR entry
768 * @vba: Virtual CPU address of buffer to map
769 *
770 * SBA Mapping Routine
771 *
772 * Given a virtual address (vba, arg1) sba_io_pdir_entry()
773 * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
774 * Each IO Pdir entry consists of 8 bytes as shown below
775 * (LSB == bit 0):
776 *
777 * 63 40 11 7 0
778 * +-+---------------------+----------------------------------+----+--------+
779 * |V| U | PPN[39:12] | U | FF |
780 * +-+---------------------+----------------------------------+----+--------+
781 *
782 * V == Valid Bit
783 * U == Unused
784 * PPN == Physical Page Number
785 *
786 * The physical address fields are filled with the results of virt_to_phys()
787 * on the vba.
788 */
789
790#if 1
791#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
792 | 0x8000000000000000ULL)
793#else
794void SBA_INLINE
795sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
796{
797 *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
798}
799#endif
800
801#ifdef ENABLE_MARK_CLEAN
802/**
803 * Since DMA is i-cache coherent, any (complete) pages that were written via
804 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
805 * flush them when they get mapped into an executable vm-area.
806 */
807static void
808mark_clean (void *addr, size_t size)
809{
810 unsigned long pg_addr, end;
811
812 pg_addr = PAGE_ALIGN((unsigned long) addr);
813 end = (unsigned long) addr + size;
814 while (pg_addr + PAGE_SIZE <= end) {
815 struct page *page = virt_to_page((void *)pg_addr);
816 set_bit(PG_arch_1, &page->flags);
817 pg_addr += PAGE_SIZE;
818 }
819}
820#endif
821
822/**
823 * sba_mark_invalid - invalidate one or more IO PDIR entries
824 * @ioc: IO MMU structure which owns the pdir we are interested in.
825 * @iova: IO Virtual Address mapped earlier
826 * @byte_cnt: number of bytes this mapping covers.
827 *
828 * Marking the IO PDIR entry(ies) as Invalid and invalidate
829 * corresponding IO TLB entry. The PCOM (Purge Command Register)
830 * is to purge stale entries in the IO TLB when unmapping entries.
831 *
832 * The PCOM register supports purging of multiple pages, with a minium
833 * of 1 page and a maximum of 2GB. Hardware requires the address be
834 * aligned to the size of the range being purged. The size of the range
835 * must be a power of 2. The "Cool perf optimization" in the
836 * allocation routine helps keep that true.
837 */
838static SBA_INLINE void
839sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
840{
841 u32 iovp = (u32) SBA_IOVP(ioc,iova);
842
843 int off = PDIR_INDEX(iovp);
844
845 /* Must be non-zero and rounded up */
846 ASSERT(byte_cnt > 0);
847 ASSERT(0 == (byte_cnt & ~iovp_mask));
848
849#ifdef ASSERT_PDIR_SANITY
850 /* Assert first pdir entry is set */
851 if (!(ioc->pdir_base[off] >> 60)) {
852 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
853 }
854#endif
855
856 if (byte_cnt <= iovp_size)
857 {
858 ASSERT(off < ioc->pdir_size);
859
860 iovp |= iovp_shift; /* set "size" field for PCOM */
861
862#ifndef FULL_VALID_PDIR
863 /*
864 ** clear I/O PDIR entry "valid" bit
865 ** Do NOT clear the rest - save it for debugging.
866 ** We should only clear bits that have previously
867 ** been enabled.
868 */
869 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
870#else
871 /*
872 ** If we want to maintain the PDIR as valid, put in
873 ** the spill page so devices prefetching won't
874 ** cause a hard fail.
875 */
876 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
877#endif
878 } else {
879 u32 t = get_iovp_order(byte_cnt) + iovp_shift;
880
881 iovp |= t;
882 ASSERT(t <= 31); /* 2GB! Max value of "size" field */
883
884 do {
885 /* verify this pdir entry is enabled */
886 ASSERT(ioc->pdir_base[off] >> 63);
887#ifndef FULL_VALID_PDIR
888 /* clear I/O Pdir entry "valid" bit first */
889 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
890#else
891 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
892#endif
893 off++;
894 byte_cnt -= iovp_size;
895 } while (byte_cnt > 0);
896 }
897
898 WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
899}
900
901/**
902 * sba_map_page - map one buffer and return IOVA for DMA
903 * @dev: instance of PCI owned by the driver that's asking.
904 * @page: page to map
905 * @poff: offset into page
906 * @size: number of bytes to map
907 * @dir: dma direction
908 * @attrs: optional dma attributes
909 *
910 * See Documentation/DMA-API-HOWTO.txt
911 */
912static dma_addr_t sba_map_page(struct device *dev, struct page *page,
913 unsigned long poff, size_t size,
914 enum dma_data_direction dir,
915 unsigned long attrs)
916{
917 struct ioc *ioc;
918 void *addr = page_address(page) + poff;
919 dma_addr_t iovp;
920 dma_addr_t offset;
921 u64 *pdir_start;
922 int pide;
923#ifdef ASSERT_PDIR_SANITY
924 unsigned long flags;
925#endif
926#ifdef ALLOW_IOV_BYPASS
927 unsigned long pci_addr = virt_to_phys(addr);
928#endif
929
930#ifdef ALLOW_IOV_BYPASS
931 ASSERT(to_pci_dev(dev)->dma_mask);
932 /*
933 ** Check if the PCI device can DMA to ptr... if so, just return ptr
934 */
935 if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
936 /*
937 ** Device is bit capable of DMA'ing to the buffer...
938 ** just return the PCI address of ptr
939 */
940 DBG_BYPASS("sba_map_page() bypass mask/addr: "
941 "0x%lx/0x%lx\n",
942 to_pci_dev(dev)->dma_mask, pci_addr);
943 return pci_addr;
944 }
945#endif
946 ioc = GET_IOC(dev);
947 ASSERT(ioc);
948
949 prefetch(ioc->res_hint);
950
951 ASSERT(size > 0);
952 ASSERT(size <= DMA_CHUNK_SIZE);
953
954 /* save offset bits */
955 offset = ((dma_addr_t) (long) addr) & ~iovp_mask;
956
957 /* round up to nearest iovp_size */
958 size = (size + offset + ~iovp_mask) & iovp_mask;
959
960#ifdef ASSERT_PDIR_SANITY
961 spin_lock_irqsave(&ioc->res_lock, flags);
962 if (sba_check_pdir(ioc,"Check before sba_map_page()"))
963 panic("Sanity check failed");
964 spin_unlock_irqrestore(&ioc->res_lock, flags);
965#endif
966
967 pide = sba_alloc_range(ioc, dev, size);
968 if (pide < 0)
969 return DMA_MAPPING_ERROR;
970
971 iovp = (dma_addr_t) pide << iovp_shift;
972
973 DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset);
974
975 pdir_start = &(ioc->pdir_base[pide]);
976
977 while (size > 0) {
978 ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
979 sba_io_pdir_entry(pdir_start, (unsigned long) addr);
980
981 DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
982
983 addr += iovp_size;
984 size -= iovp_size;
985 pdir_start++;
986 }
987 /* force pdir update */
988 wmb();
989
990 /* form complete address */
991#ifdef ASSERT_PDIR_SANITY
992 spin_lock_irqsave(&ioc->res_lock, flags);
993 sba_check_pdir(ioc,"Check after sba_map_page()");
994 spin_unlock_irqrestore(&ioc->res_lock, flags);
995#endif
996 return SBA_IOVA(ioc, iovp, offset);
997}
998
999#ifdef ENABLE_MARK_CLEAN
1000static SBA_INLINE void
1001sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
1002{
1003 u32 iovp = (u32) SBA_IOVP(ioc,iova);
1004 int off = PDIR_INDEX(iovp);
1005 void *addr;
1006
1007 if (size <= iovp_size) {
1008 addr = phys_to_virt(ioc->pdir_base[off] &
1009 ~0xE000000000000FFFULL);
1010 mark_clean(addr, size);
1011 } else {
1012 do {
1013 addr = phys_to_virt(ioc->pdir_base[off] &
1014 ~0xE000000000000FFFULL);
1015 mark_clean(addr, min(size, iovp_size));
1016 off++;
1017 size -= iovp_size;
1018 } while (size > 0);
1019 }
1020}
1021#endif
1022
1023/**
1024 * sba_unmap_page - unmap one IOVA and free resources
1025 * @dev: instance of PCI owned by the driver that's asking.
1026 * @iova: IOVA of driver buffer previously mapped.
1027 * @size: number of bytes mapped in driver buffer.
1028 * @dir: R/W or both.
1029 * @attrs: optional dma attributes
1030 *
1031 * See Documentation/DMA-API-HOWTO.txt
1032 */
1033static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
1034 enum dma_data_direction dir, unsigned long attrs)
1035{
1036 struct ioc *ioc;
1037#if DELAYED_RESOURCE_CNT > 0
1038 struct sba_dma_pair *d;
1039#endif
1040 unsigned long flags;
1041 dma_addr_t offset;
1042
1043 ioc = GET_IOC(dev);
1044 ASSERT(ioc);
1045
1046#ifdef ALLOW_IOV_BYPASS
1047 if (likely((iova & ioc->imask) != ioc->ibase)) {
1048 /*
1049 ** Address does not fall w/in IOVA, must be bypassing
1050 */
1051 DBG_BYPASS("sba_unmap_page() bypass addr: 0x%lx\n",
1052 iova);
1053
1054#ifdef ENABLE_MARK_CLEAN
1055 if (dir == DMA_FROM_DEVICE) {
1056 mark_clean(phys_to_virt(iova), size);
1057 }
1058#endif
1059 return;
1060 }
1061#endif
1062 offset = iova & ~iovp_mask;
1063
1064 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
1065
1066 iova ^= offset; /* clear offset bits */
1067 size += offset;
1068 size = ROUNDUP(size, iovp_size);
1069
1070#ifdef ENABLE_MARK_CLEAN
1071 if (dir == DMA_FROM_DEVICE)
1072 sba_mark_clean(ioc, iova, size);
1073#endif
1074
1075#if DELAYED_RESOURCE_CNT > 0
1076 spin_lock_irqsave(&ioc->saved_lock, flags);
1077 d = &(ioc->saved[ioc->saved_cnt]);
1078 d->iova = iova;
1079 d->size = size;
1080 if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
1081 int cnt = ioc->saved_cnt;
1082 spin_lock(&ioc->res_lock);
1083 while (cnt--) {
1084 sba_mark_invalid(ioc, d->iova, d->size);
1085 sba_free_range(ioc, d->iova, d->size);
1086 d--;
1087 }
1088 ioc->saved_cnt = 0;
1089 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1090 spin_unlock(&ioc->res_lock);
1091 }
1092 spin_unlock_irqrestore(&ioc->saved_lock, flags);
1093#else /* DELAYED_RESOURCE_CNT == 0 */
1094 spin_lock_irqsave(&ioc->res_lock, flags);
1095 sba_mark_invalid(ioc, iova, size);
1096 sba_free_range(ioc, iova, size);
1097 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1098 spin_unlock_irqrestore(&ioc->res_lock, flags);
1099#endif /* DELAYED_RESOURCE_CNT == 0 */
1100}
1101
1102/**
1103 * sba_alloc_coherent - allocate/map shared mem for DMA
1104 * @dev: instance of PCI owned by the driver that's asking.
1105 * @size: number of bytes mapped in driver buffer.
1106 * @dma_handle: IOVA of new buffer.
1107 *
1108 * See Documentation/DMA-API-HOWTO.txt
1109 */
1110static void *
1111sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
1112 gfp_t flags, unsigned long attrs)
1113{
1114 struct page *page;
1115 struct ioc *ioc;
1116 int node = -1;
1117 void *addr;
1118
1119 ioc = GET_IOC(dev);
1120 ASSERT(ioc);
1121#ifdef CONFIG_NUMA
1122 node = ioc->node;
1123#endif
1124
1125 page = alloc_pages_node(node, flags, get_order(size));
1126 if (unlikely(!page))
1127 return NULL;
1128
1129 addr = page_address(page);
1130 memset(addr, 0, size);
1131 *dma_handle = page_to_phys(page);
1132
1133#ifdef ALLOW_IOV_BYPASS
1134 ASSERT(dev->coherent_dma_mask);
1135 /*
1136 ** Check if the PCI device can DMA to ptr... if so, just return ptr
1137 */
1138 if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {
1139 DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
1140 dev->coherent_dma_mask, *dma_handle);
1141
1142 return addr;
1143 }
1144#endif
1145
1146 /*
1147 * If device can't bypass or bypass is disabled, pass the 32bit fake
1148 * device to map single to get an iova mapping.
1149 */
1150 *dma_handle = sba_map_page(&ioc->sac_only_dev->dev, page, 0, size,
1151 DMA_BIDIRECTIONAL, 0);
1152 if (dma_mapping_error(dev, *dma_handle))
1153 return NULL;
1154 return addr;
1155}
1156
1157
1158/**
1159 * sba_free_coherent - free/unmap shared mem for DMA
1160 * @dev: instance of PCI owned by the driver that's asking.
1161 * @size: number of bytes mapped in driver buffer.
1162 * @vaddr: virtual address IOVA of "consistent" buffer.
1163 * @dma_handler: IO virtual address of "consistent" buffer.
1164 *
1165 * See Documentation/DMA-API-HOWTO.txt
1166 */
1167static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
1168 dma_addr_t dma_handle, unsigned long attrs)
1169{
1170 sba_unmap_page(dev, dma_handle, size, 0, 0);
1171 free_pages((unsigned long) vaddr, get_order(size));
1172}
1173
1174
1175/*
1176** Since 0 is a valid pdir_base index value, can't use that
1177** to determine if a value is valid or not. Use a flag to indicate
1178** the SG list entry contains a valid pdir index.
1179*/
1180#define PIDE_FLAG 0x1UL
1181
1182#ifdef DEBUG_LARGE_SG_ENTRIES
1183int dump_run_sg = 0;
1184#endif
1185
1186
1187/**
1188 * sba_fill_pdir - write allocated SG entries into IO PDIR
1189 * @ioc: IO MMU structure which owns the pdir we are interested in.
1190 * @startsg: list of IOVA/size pairs
1191 * @nents: number of entries in startsg list
1192 *
1193 * Take preprocessed SG list and write corresponding entries
1194 * in the IO PDIR.
1195 */
1196
1197static SBA_INLINE int
1198sba_fill_pdir(
1199 struct ioc *ioc,
1200 struct scatterlist *startsg,
1201 int nents)
1202{
1203 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
1204 int n_mappings = 0;
1205 u64 *pdirp = NULL;
1206 unsigned long dma_offset = 0;
1207
1208 while (nents-- > 0) {
1209 int cnt = startsg->dma_length;
1210 startsg->dma_length = 0;
1211
1212#ifdef DEBUG_LARGE_SG_ENTRIES
1213 if (dump_run_sg)
1214 printk(" %2d : %08lx/%05x %p\n",
1215 nents, startsg->dma_address, cnt,
1216 sba_sg_address(startsg));
1217#else
1218 DBG_RUN_SG(" %d : %08lx/%05x %p\n",
1219 nents, startsg->dma_address, cnt,
1220 sba_sg_address(startsg));
1221#endif
1222 /*
1223 ** Look for the start of a new DMA stream
1224 */
1225 if (startsg->dma_address & PIDE_FLAG) {
1226 u32 pide = startsg->dma_address & ~PIDE_FLAG;
1227 dma_offset = (unsigned long) pide & ~iovp_mask;
1228 startsg->dma_address = 0;
1229 if (n_mappings)
1230 dma_sg = sg_next(dma_sg);
1231 dma_sg->dma_address = pide | ioc->ibase;
1232 pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
1233 n_mappings++;
1234 }
1235
1236 /*
1237 ** Look for a VCONTIG chunk
1238 */
1239 if (cnt) {
1240 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1241 ASSERT(pdirp);
1242
1243 /* Since multiple Vcontig blocks could make up
1244 ** one DMA stream, *add* cnt to dma_len.
1245 */
1246 dma_sg->dma_length += cnt;
1247 cnt += dma_offset;
1248 dma_offset=0; /* only want offset on first chunk */
1249 cnt = ROUNDUP(cnt, iovp_size);
1250 do {
1251 sba_io_pdir_entry(pdirp, vaddr);
1252 vaddr += iovp_size;
1253 cnt -= iovp_size;
1254 pdirp++;
1255 } while (cnt > 0);
1256 }
1257 startsg = sg_next(startsg);
1258 }
1259 /* force pdir update */
1260 wmb();
1261
1262#ifdef DEBUG_LARGE_SG_ENTRIES
1263 dump_run_sg = 0;
1264#endif
1265 return(n_mappings);
1266}
1267
1268
1269/*
1270** Two address ranges are DMA contiguous *iff* "end of prev" and
1271** "start of next" are both on an IOV page boundary.
1272**
1273** (shift left is a quick trick to mask off upper bits)
1274*/
1275#define DMA_CONTIG(__X, __Y) \
1276 (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
1277
1278
1279/**
1280 * sba_coalesce_chunks - preprocess the SG list
1281 * @ioc: IO MMU structure which owns the pdir we are interested in.
1282 * @startsg: list of IOVA/size pairs
1283 * @nents: number of entries in startsg list
1284 *
1285 * First pass is to walk the SG list and determine where the breaks are
1286 * in the DMA stream. Allocates PDIR entries but does not fill them.
1287 * Returns the number of DMA chunks.
1288 *
1289 * Doing the fill separate from the coalescing/allocation keeps the
1290 * code simpler. Future enhancement could make one pass through
1291 * the sglist do both.
1292 */
1293static SBA_INLINE int
1294sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1295 struct scatterlist *startsg,
1296 int nents)
1297{
1298 struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
1299 unsigned long vcontig_len; /* len of VCONTIG chunk */
1300 unsigned long vcontig_end;
1301 struct scatterlist *dma_sg; /* next DMA stream head */
1302 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
1303 int n_mappings = 0;
1304 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1305 int idx;
1306
1307 while (nents > 0) {
1308 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1309
1310 /*
1311 ** Prepare for first/next DMA stream
1312 */
1313 dma_sg = vcontig_sg = startsg;
1314 dma_len = vcontig_len = vcontig_end = startsg->length;
1315 vcontig_end += vaddr;
1316 dma_offset = vaddr & ~iovp_mask;
1317
1318 /* PARANOID: clear entries */
1319 startsg->dma_address = startsg->dma_length = 0;
1320
1321 /*
1322 ** This loop terminates one iteration "early" since
1323 ** it's always looking one "ahead".
1324 */
1325 while (--nents > 0) {
1326 unsigned long vaddr; /* tmp */
1327
1328 startsg = sg_next(startsg);
1329
1330 /* PARANOID */
1331 startsg->dma_address = startsg->dma_length = 0;
1332
1333 /* catch brokenness in SCSI layer */
1334 ASSERT(startsg->length <= DMA_CHUNK_SIZE);
1335
1336 /*
1337 ** First make sure current dma stream won't
1338 ** exceed DMA_CHUNK_SIZE if we coalesce the
1339 ** next entry.
1340 */
1341 if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)
1342 > DMA_CHUNK_SIZE)
1343 break;
1344
1345 if (dma_len + startsg->length > max_seg_size)
1346 break;
1347
1348 /*
1349 ** Then look for virtually contiguous blocks.
1350 **
1351 ** append the next transaction?
1352 */
1353 vaddr = (unsigned long) sba_sg_address(startsg);
1354 if (vcontig_end == vaddr)
1355 {
1356 vcontig_len += startsg->length;
1357 vcontig_end += startsg->length;
1358 dma_len += startsg->length;
1359 continue;
1360 }
1361
1362#ifdef DEBUG_LARGE_SG_ENTRIES
1363 dump_run_sg = (vcontig_len > iovp_size);
1364#endif
1365
1366 /*
1367 ** Not virtually contiguous.
1368 ** Terminate prev chunk.
1369 ** Start a new chunk.
1370 **
1371 ** Once we start a new VCONTIG chunk, dma_offset
1372 ** can't change. And we need the offset from the first
1373 ** chunk - not the last one. Ergo Successive chunks
1374 ** must start on page boundaries and dove tail
1375 ** with it's predecessor.
1376 */
1377 vcontig_sg->dma_length = vcontig_len;
1378
1379 vcontig_sg = startsg;
1380 vcontig_len = startsg->length;
1381
1382 /*
1383 ** 3) do the entries end/start on page boundaries?
1384 ** Don't update vcontig_end until we've checked.
1385 */
1386 if (DMA_CONTIG(vcontig_end, vaddr))
1387 {
1388 vcontig_end = vcontig_len + vaddr;
1389 dma_len += vcontig_len;
1390 continue;
1391 } else {
1392 break;
1393 }
1394 }
1395
1396 /*
1397 ** End of DMA Stream
1398 ** Terminate last VCONTIG block.
1399 ** Allocate space for DMA stream.
1400 */
1401 vcontig_sg->dma_length = vcontig_len;
1402 dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
1403 ASSERT(dma_len <= DMA_CHUNK_SIZE);
1404 idx = sba_alloc_range(ioc, dev, dma_len);
1405 if (idx < 0) {
1406 dma_sg->dma_length = 0;
1407 return -1;
1408 }
1409 dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
1410 | dma_offset);
1411 n_mappings++;
1412 }
1413
1414 return n_mappings;
1415}
1416
1417static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1418 int nents, enum dma_data_direction dir,
1419 unsigned long attrs);
1420/**
1421 * sba_map_sg - map Scatter/Gather list
1422 * @dev: instance of PCI owned by the driver that's asking.
1423 * @sglist: array of buffer/length pairs
1424 * @nents: number of entries in list
1425 * @dir: R/W or both.
1426 * @attrs: optional dma attributes
1427 *
1428 * See Documentation/DMA-API-HOWTO.txt
1429 */
1430static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
1431 int nents, enum dma_data_direction dir,
1432 unsigned long attrs)
1433{
1434 struct ioc *ioc;
1435 int coalesced, filled = 0;
1436#ifdef ASSERT_PDIR_SANITY
1437 unsigned long flags;
1438#endif
1439#ifdef ALLOW_IOV_BYPASS_SG
1440 struct scatterlist *sg;
1441#endif
1442
1443 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
1444 ioc = GET_IOC(dev);
1445 ASSERT(ioc);
1446
1447#ifdef ALLOW_IOV_BYPASS_SG
1448 ASSERT(to_pci_dev(dev)->dma_mask);
1449 if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
1450 for_each_sg(sglist, sg, nents, filled) {
1451 sg->dma_length = sg->length;
1452 sg->dma_address = virt_to_phys(sba_sg_address(sg));
1453 }
1454 return filled;
1455 }
1456#endif
1457 /* Fast path single entry scatterlists. */
1458 if (nents == 1) {
1459 sglist->dma_length = sglist->length;
1460 sglist->dma_address = sba_map_page(dev, sg_page(sglist),
1461 sglist->offset, sglist->length, dir, attrs);
1462 if (dma_mapping_error(dev, sglist->dma_address))
1463 return 0;
1464 return 1;
1465 }
1466
1467#ifdef ASSERT_PDIR_SANITY
1468 spin_lock_irqsave(&ioc->res_lock, flags);
1469 if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
1470 {
1471 sba_dump_sg(ioc, sglist, nents);
1472 panic("Check before sba_map_sg_attrs()");
1473 }
1474 spin_unlock_irqrestore(&ioc->res_lock, flags);
1475#endif
1476
1477 prefetch(ioc->res_hint);
1478
1479 /*
1480 ** First coalesce the chunks and allocate I/O pdir space
1481 **
1482 ** If this is one DMA stream, we can properly map using the
1483 ** correct virtual address associated with each DMA page.
1484 ** w/o this association, we wouldn't have coherent DMA!
1485 ** Access to the virtual address is what forces a two pass algorithm.
1486 */
1487 coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
1488 if (coalesced < 0) {
1489 sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
1490 return 0;
1491 }
1492
1493 /*
1494 ** Program the I/O Pdir
1495 **
1496 ** map the virtual addresses to the I/O Pdir
1497 ** o dma_address will contain the pdir index
1498 ** o dma_len will contain the number of bytes to map
1499 ** o address contains the virtual address.
1500 */
1501 filled = sba_fill_pdir(ioc, sglist, nents);
1502
1503#ifdef ASSERT_PDIR_SANITY
1504 spin_lock_irqsave(&ioc->res_lock, flags);
1505 if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
1506 {
1507 sba_dump_sg(ioc, sglist, nents);
1508 panic("Check after sba_map_sg_attrs()\n");
1509 }
1510 spin_unlock_irqrestore(&ioc->res_lock, flags);
1511#endif
1512
1513 ASSERT(coalesced == filled);
1514 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1515
1516 return filled;
1517}
1518
1519/**
1520 * sba_unmap_sg_attrs - unmap Scatter/Gather list
1521 * @dev: instance of PCI owned by the driver that's asking.
1522 * @sglist: array of buffer/length pairs
1523 * @nents: number of entries in list
1524 * @dir: R/W or both.
1525 * @attrs: optional dma attributes
1526 *
1527 * See Documentation/DMA-API-HOWTO.txt
1528 */
1529static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1530 int nents, enum dma_data_direction dir,
1531 unsigned long attrs)
1532{
1533#ifdef ASSERT_PDIR_SANITY
1534 struct ioc *ioc;
1535 unsigned long flags;
1536#endif
1537
1538 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1539 __func__, nents, sba_sg_address(sglist), sglist->length);
1540
1541#ifdef ASSERT_PDIR_SANITY
1542 ioc = GET_IOC(dev);
1543 ASSERT(ioc);
1544
1545 spin_lock_irqsave(&ioc->res_lock, flags);
1546 sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
1547 spin_unlock_irqrestore(&ioc->res_lock, flags);
1548#endif
1549
1550 while (nents && sglist->dma_length) {
1551
1552 sba_unmap_page(dev, sglist->dma_address, sglist->dma_length,
1553 dir, attrs);
1554 sglist = sg_next(sglist);
1555 nents--;
1556 }
1557
1558 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1559
1560#ifdef ASSERT_PDIR_SANITY
1561 spin_lock_irqsave(&ioc->res_lock, flags);
1562 sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
1563 spin_unlock_irqrestore(&ioc->res_lock, flags);
1564#endif
1565
1566}
1567
1568/**************************************************************
1569*
1570* Initialization and claim
1571*
1572***************************************************************/
1573
1574static void
1575ioc_iova_init(struct ioc *ioc)
1576{
1577 int tcnfg;
1578 int agp_found = 0;
1579 struct pci_dev *device = NULL;
1580#ifdef FULL_VALID_PDIR
1581 unsigned long index;
1582#endif
1583
1584 /*
1585 ** Firmware programs the base and size of a "safe IOVA space"
1586 ** (one that doesn't overlap memory or LMMIO space) in the
1587 ** IBASE and IMASK registers.
1588 */
1589 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
1590 ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
1591
1592 ioc->iov_size = ~ioc->imask + 1;
1593
1594 DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
1595 __func__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
1596 ioc->iov_size >> 20);
1597
1598 switch (iovp_size) {
1599 case 4*1024: tcnfg = 0; break;
1600 case 8*1024: tcnfg = 1; break;
1601 case 16*1024: tcnfg = 2; break;
1602 case 64*1024: tcnfg = 3; break;
1603 default:
1604 panic(PFX "Unsupported IOTLB page size %ldK",
1605 iovp_size >> 10);
1606 break;
1607 }
1608 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1609
1610 ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
1611 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1612 get_order(ioc->pdir_size));
1613 if (!ioc->pdir_base)
1614 panic(PFX "Couldn't allocate I/O Page Table\n");
1615
1616 memset(ioc->pdir_base, 0, ioc->pdir_size);
1617
1618 DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__,
1619 iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
1620
1621 ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
1622 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1623
1624 /*
1625 ** If an AGP device is present, only use half of the IOV space
1626 ** for PCI DMA. Unfortunately we can't know ahead of time
1627 ** whether GART support will actually be used, for now we
1628 ** can just key on an AGP device found in the system.
1629 ** We program the next pdir index after we stop w/ a key for
1630 ** the GART code to handshake on.
1631 */
1632 for_each_pci_dev(device)
1633 agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
1634
1635 if (agp_found && reserve_sba_gart) {
1636 printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",
1637 ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
1638 ioc->pdir_size /= 2;
1639 ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
1640 }
1641#ifdef FULL_VALID_PDIR
1642 /*
1643 ** Check to see if the spill page has been allocated, we don't need more than
1644 ** one across multiple SBAs.
1645 */
1646 if (!prefetch_spill_page) {
1647 char *spill_poison = "SBAIOMMU POISON";
1648 int poison_size = 16;
1649 void *poison_addr, *addr;
1650
1651 addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));
1652 if (!addr)
1653 panic(PFX "Couldn't allocate PDIR spill page\n");
1654
1655 poison_addr = addr;
1656 for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
1657 memcpy(poison_addr, spill_poison, poison_size);
1658
1659 prefetch_spill_page = virt_to_phys(addr);
1660
1661 DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page);
1662 }
1663 /*
1664 ** Set all the PDIR entries valid w/ the spill page as the target
1665 */
1666 for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
1667 ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
1668#endif
1669
1670 /* Clear I/O TLB of any possible entries */
1671 WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
1672 READ_REG(ioc->ioc_hpa + IOC_PCOM);
1673
1674 /* Enable IOVA translation */
1675 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1676 READ_REG(ioc->ioc_hpa + IOC_IBASE);
1677}
1678
1679static void __init
1680ioc_resource_init(struct ioc *ioc)
1681{
1682 spin_lock_init(&ioc->res_lock);
1683#if DELAYED_RESOURCE_CNT > 0
1684 spin_lock_init(&ioc->saved_lock);
1685#endif
1686
1687 /* resource map size dictated by pdir_size */
1688 ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
1689 ioc->res_size >>= 3; /* convert bit count to byte count */
1690 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1691
1692 ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
1693 get_order(ioc->res_size));
1694 if (!ioc->res_map)
1695 panic(PFX "Couldn't allocate resource map\n");
1696
1697 memset(ioc->res_map, 0, ioc->res_size);
1698 /* next available IOVP - circular search */
1699 ioc->res_hint = (unsigned long *) ioc->res_map;
1700
1701#ifdef ASSERT_PDIR_SANITY
1702 /* Mark first bit busy - ie no IOVA 0 */
1703 ioc->res_map[0] = 0x1;
1704 ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
1705#endif
1706#ifdef FULL_VALID_PDIR
1707 /* Mark the last resource used so we don't prefetch beyond IOVA space */
1708 ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
1709 ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
1710 | prefetch_spill_page);
1711#endif
1712
1713 DBG_INIT("%s() res_map %x %p\n", __func__,
1714 ioc->res_size, (void *) ioc->res_map);
1715}
1716
1717static void __init
1718ioc_sac_init(struct ioc *ioc)
1719{
1720 struct pci_dev *sac = NULL;
1721 struct pci_controller *controller = NULL;
1722
1723 /*
1724 * pci_alloc_coherent() must return a DMA address which is
1725 * SAC (single address cycle) addressable, so allocate a
1726 * pseudo-device to enforce that.
1727 */
1728 sac = kzalloc(sizeof(*sac), GFP_KERNEL);
1729 if (!sac)
1730 panic(PFX "Couldn't allocate struct pci_dev");
1731
1732 controller = kzalloc(sizeof(*controller), GFP_KERNEL);
1733 if (!controller)
1734 panic(PFX "Couldn't allocate struct pci_controller");
1735
1736 controller->iommu = ioc;
1737 sac->sysdata = controller;
1738 sac->dma_mask = 0xFFFFFFFFUL;
1739 sac->dev.bus = &pci_bus_type;
1740 ioc->sac_only_dev = sac;
1741}
1742
1743static void __init
1744ioc_zx1_init(struct ioc *ioc)
1745{
1746 unsigned long rope_config;
1747 unsigned int i;
1748
1749 if (ioc->rev < 0x20)
1750 panic(PFX "IOC 2.0 or later required for IOMMU support\n");
1751
1752 /* 38 bit memory controller + extra bit for range displaced by MMIO */
1753 ioc->dma_mask = (0x1UL << 39) - 1;
1754
1755 /*
1756 ** Clear ROPE(N)_CONFIG AO bit.
1757 ** Disables "NT Ordering" (~= !"Relaxed Ordering")
1758 ** Overrides bit 1 in DMA Hint Sets.
1759 ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
1760 */
1761 for (i=0; i<(8*8); i+=8) {
1762 rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1763 rope_config &= ~IOC_ROPE_AO;
1764 WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1765 }
1766}
1767
1768typedef void (initfunc)(struct ioc *);
1769
1770struct ioc_iommu {
1771 u32 func_id;
1772 char *name;
1773 initfunc *init;
1774};
1775
1776static struct ioc_iommu ioc_iommu_info[] __initdata = {
1777 { ZX1_IOC_ID, "zx1", ioc_zx1_init },
1778 { ZX2_IOC_ID, "zx2", NULL },
1779 { SX1000_IOC_ID, "sx1000", NULL },
1780 { SX2000_IOC_ID, "sx2000", NULL },
1781};
1782
1783static void __init ioc_init(unsigned long hpa, struct ioc *ioc)
1784{
1785 struct ioc_iommu *info;
1786
1787 ioc->next = ioc_list;
1788 ioc_list = ioc;
1789
1790 ioc->ioc_hpa = ioremap(hpa, 0x1000);
1791
1792 ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
1793 ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
1794 ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */
1795
1796 for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
1797 if (ioc->func_id == info->func_id) {
1798 ioc->name = info->name;
1799 if (info->init)
1800 (info->init)(ioc);
1801 }
1802 }
1803
1804 iovp_size = (1 << iovp_shift);
1805 iovp_mask = ~(iovp_size - 1);
1806
1807 DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__,
1808 PAGE_SIZE >> 10, iovp_size >> 10);
1809
1810 if (!ioc->name) {
1811 ioc->name = kmalloc(24, GFP_KERNEL);
1812 if (ioc->name)
1813 sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
1814 ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
1815 else
1816 ioc->name = "Unknown";
1817 }
1818
1819 ioc_iova_init(ioc);
1820 ioc_resource_init(ioc);
1821 ioc_sac_init(ioc);
1822
1823 printk(KERN_INFO PFX
1824 "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
1825 ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
1826 hpa, ioc->iov_size >> 20, ioc->ibase);
1827}
1828
1829
1830
1831/**************************************************************************
1832**
1833** SBA initialization code (HW and SW)
1834**
1835** o identify SBA chip itself
1836** o FIXME: initialize DMA hints for reasonable defaults
1837**
1838**************************************************************************/
1839
1840#ifdef CONFIG_PROC_FS
1841static void *
1842ioc_start(struct seq_file *s, loff_t *pos)
1843{
1844 struct ioc *ioc;
1845 loff_t n = *pos;
1846
1847 for (ioc = ioc_list; ioc; ioc = ioc->next)
1848 if (!n--)
1849 return ioc;
1850
1851 return NULL;
1852}
1853
1854static void *
1855ioc_next(struct seq_file *s, void *v, loff_t *pos)
1856{
1857 struct ioc *ioc = v;
1858
1859 ++*pos;
1860 return ioc->next;
1861}
1862
1863static void
1864ioc_stop(struct seq_file *s, void *v)
1865{
1866}
1867
1868static int
1869ioc_show(struct seq_file *s, void *v)
1870{
1871 struct ioc *ioc = v;
1872 unsigned long *res_ptr = (unsigned long *)ioc->res_map;
1873 int i, used = 0;
1874
1875 seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
1876 ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
1877#ifdef CONFIG_NUMA
1878 if (ioc->node != NUMA_NO_NODE)
1879 seq_printf(s, "NUMA node : %d\n", ioc->node);
1880#endif
1881 seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
1882 seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024);
1883
1884 for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
1885 used += hweight64(*res_ptr);
1886
1887 seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3);
1888 seq_printf(s, "PDIR used : %d entries\n", used);
1889
1890#ifdef PDIR_SEARCH_TIMING
1891 {
1892 unsigned long i = 0, avg = 0, min, max;
1893 min = max = ioc->avg_search[0];
1894 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1895 avg += ioc->avg_search[i];
1896 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1897 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1898 }
1899 avg /= SBA_SEARCH_SAMPLE;
1900 seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
1901 min, avg, max);
1902 }
1903#endif
1904#ifndef ALLOW_IOV_BYPASS
1905 seq_printf(s, "IOVA bypass disabled\n");
1906#endif
1907 return 0;
1908}
1909
1910static const struct seq_operations ioc_seq_ops = {
1911 .start = ioc_start,
1912 .next = ioc_next,
1913 .stop = ioc_stop,
1914 .show = ioc_show
1915};
1916
1917static void __init
1918ioc_proc_init(void)
1919{
1920 struct proc_dir_entry *dir;
1921
1922 dir = proc_mkdir("bus/mckinley", NULL);
1923 if (!dir)
1924 return;
1925
1926 proc_create_seq(ioc_list->name, 0, dir, &ioc_seq_ops);
1927}
1928#endif
1929
1930static void
1931sba_connect_bus(struct pci_bus *bus)
1932{
1933 acpi_handle handle, parent;
1934 acpi_status status;
1935 struct ioc *ioc;
1936
1937 if (!PCI_CONTROLLER(bus))
1938 panic(PFX "no sysdata on bus %d!\n", bus->number);
1939
1940 if (PCI_CONTROLLER(bus)->iommu)
1941 return;
1942
1943 handle = acpi_device_handle(PCI_CONTROLLER(bus)->companion);
1944 if (!handle)
1945 return;
1946
1947 /*
1948 * The IOC scope encloses PCI root bridges in the ACPI
1949 * namespace, so work our way out until we find an IOC we
1950 * claimed previously.
1951 */
1952 do {
1953 for (ioc = ioc_list; ioc; ioc = ioc->next)
1954 if (ioc->handle == handle) {
1955 PCI_CONTROLLER(bus)->iommu = ioc;
1956 return;
1957 }
1958
1959 status = acpi_get_parent(handle, &parent);
1960 handle = parent;
1961 } while (ACPI_SUCCESS(status));
1962
1963 printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number);
1964}
1965
1966static void __init
1967sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
1968{
1969#ifdef CONFIG_NUMA
1970 unsigned int node;
1971
1972 node = acpi_get_node(handle);
1973 if (node != NUMA_NO_NODE && !node_online(node))
1974 node = NUMA_NO_NODE;
1975
1976 ioc->node = node;
1977#endif
1978}
1979
1980static void __init acpi_sba_ioc_add(struct ioc *ioc)
1981{
1982 acpi_handle handle = ioc->handle;
1983 acpi_status status;
1984 u64 hpa, length;
1985 struct acpi_device_info *adi;
1986
1987 ioc_found = ioc->next;
1988 status = hp_acpi_csr_space(handle, &hpa, &length);
1989 if (ACPI_FAILURE(status))
1990 goto err;
1991
1992 status = acpi_get_object_info(handle, &adi);
1993 if (ACPI_FAILURE(status))
1994 goto err;
1995
1996 /*
1997 * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
1998 * root bridges, and its CSR space includes the IOC function.
1999 */
2000 if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) {
2001 hpa += ZX1_IOC_OFFSET;
2002 /* zx1 based systems default to kernel page size iommu pages */
2003 if (!iovp_shift)
2004 iovp_shift = min(PAGE_SHIFT, 16);
2005 }
2006 kfree(adi);
2007
2008 /*
2009 * default anything not caught above or specified on cmdline to 4k
2010 * iommu page size
2011 */
2012 if (!iovp_shift)
2013 iovp_shift = 12;
2014
2015 ioc_init(hpa, ioc);
2016 /* setup NUMA node association */
2017 sba_map_ioc_to_node(ioc, handle);
2018 return;
2019
2020 err:
2021 kfree(ioc);
2022}
2023
2024static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
2025 {"HWP0001", 0},
2026 {"HWP0004", 0},
2027 {"", 0},
2028};
2029
2030static int acpi_sba_ioc_attach(struct acpi_device *device,
2031 const struct acpi_device_id *not_used)
2032{
2033 struct ioc *ioc;
2034
2035 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2036 if (!ioc)
2037 return -ENOMEM;
2038
2039 ioc->next = ioc_found;
2040 ioc_found = ioc;
2041 ioc->handle = device->handle;
2042 return 1;
2043}
2044
2045
2046static struct acpi_scan_handler acpi_sba_ioc_handler = {
2047 .ids = hp_ioc_iommu_device_ids,
2048 .attach = acpi_sba_ioc_attach,
2049};
2050
2051static int __init acpi_sba_ioc_init_acpi(void)
2052{
2053 return acpi_scan_add_handler(&acpi_sba_ioc_handler);
2054}
2055/* This has to run before acpi_scan_init(). */
2056arch_initcall(acpi_sba_ioc_init_acpi);
2057
2058static int sba_dma_supported (struct device *dev, u64 mask)
2059{
2060 /* make sure it's at least 32bit capable */
2061 return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
2062}
2063
2064static const struct dma_map_ops sba_dma_ops = {
2065 .alloc = sba_alloc_coherent,
2066 .free = sba_free_coherent,
2067 .map_page = sba_map_page,
2068 .unmap_page = sba_unmap_page,
2069 .map_sg = sba_map_sg_attrs,
2070 .unmap_sg = sba_unmap_sg_attrs,
2071 .dma_supported = sba_dma_supported,
2072 .mmap = dma_common_mmap,
2073 .get_sgtable = dma_common_get_sgtable,
2074};
2075
2076static int __init
2077sba_init(void)
2078{
2079 /*
2080 * If we are booting a kdump kernel, the sba_iommu will cause devices
2081 * that were not shutdown properly to MCA as soon as they are turned
2082 * back on. Our only option for a successful kdump kernel boot is to
2083 * use swiotlb.
2084 */
2085 if (is_kdump_kernel())
2086 return 0;
2087
2088 /*
2089 * ioc_found should be populated by the acpi_sba_ioc_handler's .attach()
2090 * routine, but that only happens if acpi_scan_init() has already run.
2091 */
2092 while (ioc_found)
2093 acpi_sba_ioc_add(ioc_found);
2094
2095 if (!ioc_list)
2096 return 0;
2097
2098 {
2099 struct pci_bus *b = NULL;
2100 while ((b = pci_find_next_bus(b)) != NULL)
2101 sba_connect_bus(b);
2102 }
2103
2104 /* no need for swiotlb with the iommu */
2105 swiotlb_exit();
2106 dma_ops = &sba_dma_ops;
2107
2108#ifdef CONFIG_PROC_FS
2109 ioc_proc_init();
2110#endif
2111 return 0;
2112}
2113
2114subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
2115
2116static int __init
2117nosbagart(char *str)
2118{
2119 reserve_sba_gart = 0;
2120 return 1;
2121}
2122
2123__setup("nosbagart", nosbagart);
2124
2125static int __init
2126sba_page_override(char *str)
2127{
2128 unsigned long page_size;
2129
2130 page_size = memparse(str, &str);
2131 switch (page_size) {
2132 case 4096:
2133 case 8192:
2134 case 16384:
2135 case 65536:
2136 iovp_shift = ffs(page_size) - 1;
2137 break;
2138 default:
2139 printk("%s: unknown/unsupported iommu page size %ld\n",
2140 __func__, page_size);
2141 }
2142
2143 return 1;
2144}
2145
2146__setup("sbapagesize=",sba_page_override);
1/*
2** IA64 System Bus Adapter (SBA) I/O MMU manager
3**
4** (c) Copyright 2002-2005 Alex Williamson
5** (c) Copyright 2002-2003 Grant Grundler
6** (c) Copyright 2002-2005 Hewlett-Packard Company
7**
8** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
9** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
10**
11** This program is free software; you can redistribute it and/or modify
12** it under the terms of the GNU General Public License as published by
13** the Free Software Foundation; either version 2 of the License, or
14** (at your option) any later version.
15**
16**
17** This module initializes the IOC (I/O Controller) found on HP
18** McKinley machines and their successors.
19**
20*/
21
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/spinlock.h>
26#include <linux/slab.h>
27#include <linux/init.h>
28#include <linux/mm.h>
29#include <linux/string.h>
30#include <linux/pci.h>
31#include <linux/proc_fs.h>
32#include <linux/seq_file.h>
33#include <linux/acpi.h>
34#include <linux/efi.h>
35#include <linux/nodemask.h>
36#include <linux/bitops.h> /* hweight64() */
37#include <linux/crash_dump.h>
38#include <linux/iommu-helper.h>
39#include <linux/dma-mapping.h>
40#include <linux/prefetch.h>
41
42#include <asm/delay.h> /* ia64_get_itc() */
43#include <asm/io.h>
44#include <asm/page.h> /* PAGE_OFFSET */
45#include <asm/dma.h>
46#include <asm/system.h> /* wmb() */
47
48#include <asm/acpi-ext.h>
49
50extern int swiotlb_late_init_with_default_size (size_t size);
51
52#define PFX "IOC: "
53
54/*
55** Enabling timing search of the pdir resource map. Output in /proc.
56** Disabled by default to optimize performance.
57*/
58#undef PDIR_SEARCH_TIMING
59
60/*
61** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
62** not defined, all DMA will be 32bit and go through the TLB.
63** There's potentially a conflict in the bio merge code with us
64** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
65** appears to give more performance than bio-level virtual merging, we'll
66** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to
67** completely restrict DMA to the IOMMU.
68*/
69#define ALLOW_IOV_BYPASS
70
71/*
72** This option specifically allows/disallows bypassing scatterlists with
73** multiple entries. Coalescing these entries can allow better DMA streaming
74** and in some cases shows better performance than entirely bypassing the
75** IOMMU. Performance increase on the order of 1-2% sequential output/input
76** using bonnie++ on a RAID0 MD device (sym2 & mpt).
77*/
78#undef ALLOW_IOV_BYPASS_SG
79
80/*
81** If a device prefetches beyond the end of a valid pdir entry, it will cause
82** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
83** disconnect on 4k boundaries and prevent such issues. If the device is
84** particularly aggressive, this option will keep the entire pdir valid such
85** that prefetching will hit a valid address. This could severely impact
86** error containment, and is therefore off by default. The page that is
87** used for spill-over is poisoned, so that should help debugging somewhat.
88*/
89#undef FULL_VALID_PDIR
90
91#define ENABLE_MARK_CLEAN
92
93/*
94** The number of debug flags is a clue - this code is fragile. NOTE: since
95** tightening the use of res_lock the resource bitmap and actual pdir are no
96** longer guaranteed to stay in sync. The sanity checking code isn't going to
97** like that.
98*/
99#undef DEBUG_SBA_INIT
100#undef DEBUG_SBA_RUN
101#undef DEBUG_SBA_RUN_SG
102#undef DEBUG_SBA_RESOURCE
103#undef ASSERT_PDIR_SANITY
104#undef DEBUG_LARGE_SG_ENTRIES
105#undef DEBUG_BYPASS
106
107#if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
108#error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
109#endif
110
111#define SBA_INLINE __inline__
112/* #define SBA_INLINE */
113
114#ifdef DEBUG_SBA_INIT
115#define DBG_INIT(x...) printk(x)
116#else
117#define DBG_INIT(x...)
118#endif
119
120#ifdef DEBUG_SBA_RUN
121#define DBG_RUN(x...) printk(x)
122#else
123#define DBG_RUN(x...)
124#endif
125
126#ifdef DEBUG_SBA_RUN_SG
127#define DBG_RUN_SG(x...) printk(x)
128#else
129#define DBG_RUN_SG(x...)
130#endif
131
132
133#ifdef DEBUG_SBA_RESOURCE
134#define DBG_RES(x...) printk(x)
135#else
136#define DBG_RES(x...)
137#endif
138
139#ifdef DEBUG_BYPASS
140#define DBG_BYPASS(x...) printk(x)
141#else
142#define DBG_BYPASS(x...)
143#endif
144
145#ifdef ASSERT_PDIR_SANITY
146#define ASSERT(expr) \
147 if(!(expr)) { \
148 printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
149 panic(#expr); \
150 }
151#else
152#define ASSERT(expr)
153#endif
154
155/*
156** The number of pdir entries to "free" before issuing
157** a read to PCOM register to flush out PCOM writes.
158** Interacts with allocation granularity (ie 4 or 8 entries
159** allocated and free'd/purged at a time might make this
160** less interesting).
161*/
162#define DELAYED_RESOURCE_CNT 64
163
164#define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec
165
166#define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
167#define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
168#define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
169#define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
170#define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
171
172#define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
173
174#define IOC_FUNC_ID 0x000
175#define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
176#define IOC_IBASE 0x300 /* IO TLB */
177#define IOC_IMASK 0x308
178#define IOC_PCOM 0x310
179#define IOC_TCNFG 0x318
180#define IOC_PDIR_BASE 0x320
181
182#define IOC_ROPE0_CFG 0x500
183#define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
184
185
186/* AGP GART driver looks for this */
187#define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
188
189/*
190** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
191**
192** Some IOCs (sx1000) can run at the above pages sizes, but are
193** really only supported using the IOC at a 4k page size.
194**
195** iovp_size could only be greater than PAGE_SIZE if we are
196** confident the drivers really only touch the next physical
197** page iff that driver instance owns it.
198*/
199static unsigned long iovp_size;
200static unsigned long iovp_shift;
201static unsigned long iovp_mask;
202
203struct ioc {
204 void __iomem *ioc_hpa; /* I/O MMU base address */
205 char *res_map; /* resource map, bit == pdir entry */
206 u64 *pdir_base; /* physical base address */
207 unsigned long ibase; /* pdir IOV Space base */
208 unsigned long imask; /* pdir IOV Space mask */
209
210 unsigned long *res_hint; /* next avail IOVP - circular search */
211 unsigned long dma_mask;
212 spinlock_t res_lock; /* protects the resource bitmap, but must be held when */
213 /* clearing pdir to prevent races with allocations. */
214 unsigned int res_bitshift; /* from the RIGHT! */
215 unsigned int res_size; /* size of resource map in bytes */
216#ifdef CONFIG_NUMA
217 unsigned int node; /* node where this IOC lives */
218#endif
219#if DELAYED_RESOURCE_CNT > 0
220 spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */
221 /* than res_lock for bigger systems. */
222 int saved_cnt;
223 struct sba_dma_pair {
224 dma_addr_t iova;
225 size_t size;
226 } saved[DELAYED_RESOURCE_CNT];
227#endif
228
229#ifdef PDIR_SEARCH_TIMING
230#define SBA_SEARCH_SAMPLE 0x100
231 unsigned long avg_search[SBA_SEARCH_SAMPLE];
232 unsigned long avg_idx; /* current index into avg_search */
233#endif
234
235 /* Stuff we don't need in performance path */
236 struct ioc *next; /* list of IOC's in system */
237 acpi_handle handle; /* for multiple IOC's */
238 const char *name;
239 unsigned int func_id;
240 unsigned int rev; /* HW revision of chip */
241 u32 iov_size;
242 unsigned int pdir_size; /* in bytes, determined by IOV Space size */
243 struct pci_dev *sac_only_dev;
244};
245
246static struct ioc *ioc_list;
247static int reserve_sba_gart = 1;
248
249static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
250static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
251
252#define sba_sg_address(sg) sg_virt((sg))
253
254#ifdef FULL_VALID_PDIR
255static u64 prefetch_spill_page;
256#endif
257
258#ifdef CONFIG_PCI
259# define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \
260 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
261#else
262# define GET_IOC(dev) NULL
263#endif
264
265/*
266** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
267** (or rather not merge) DMAs into manageable chunks.
268** On parisc, this is more of the software/tuning constraint
269** rather than the HW. I/O MMU allocation algorithms can be
270** faster with smaller sizes (to some degree).
271*/
272#define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
273
274#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
275
276/************************************
277** SBA register read and write support
278**
279** BE WARNED: register writes are posted.
280** (ie follow writes which must reach HW with a read)
281**
282*/
283#define READ_REG(addr) __raw_readq(addr)
284#define WRITE_REG(val, addr) __raw_writeq(val, addr)
285
286#ifdef DEBUG_SBA_INIT
287
288/**
289 * sba_dump_tlb - debugging only - print IOMMU operating parameters
290 * @hpa: base address of the IOMMU
291 *
292 * Print the size/location of the IO MMU PDIR.
293 */
294static void
295sba_dump_tlb(char *hpa)
296{
297 DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
298 DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE));
299 DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK));
300 DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG));
301 DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
302 DBG_INIT("\n");
303}
304#endif
305
306
307#ifdef ASSERT_PDIR_SANITY
308
309/**
310 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
311 * @ioc: IO MMU structure which owns the pdir we are interested in.
312 * @msg: text to print ont the output line.
313 * @pide: pdir index.
314 *
315 * Print one entry of the IO MMU PDIR in human readable form.
316 */
317static void
318sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
319{
320 /* start printing from lowest pde in rval */
321 u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)];
322 unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
323 uint rcnt;
324
325 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
326 msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
327
328 rcnt = 0;
329 while (rcnt < BITS_PER_LONG) {
330 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
331 (rcnt == (pide & (BITS_PER_LONG - 1)))
332 ? " -->" : " ",
333 rcnt, ptr, (unsigned long long) *ptr );
334 rcnt++;
335 ptr++;
336 }
337 printk(KERN_DEBUG "%s", msg);
338}
339
340
341/**
342 * sba_check_pdir - debugging only - consistency checker
343 * @ioc: IO MMU structure which owns the pdir we are interested in.
344 * @msg: text to print ont the output line.
345 *
346 * Verify the resource map and pdir state is consistent
347 */
348static int
349sba_check_pdir(struct ioc *ioc, char *msg)
350{
351 u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
352 u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */
353 u64 *pptr = ioc->pdir_base; /* pdir ptr */
354 uint pide = 0;
355
356 while (rptr < rptr_end) {
357 u64 rval;
358 int rcnt; /* number of bits we might check */
359
360 rval = *rptr;
361 rcnt = 64;
362
363 while (rcnt) {
364 /* Get last byte and highest bit from that */
365 u32 pde = ((u32)((*pptr >> (63)) & 0x1));
366 if ((rval & 0x1) ^ pde)
367 {
368 /*
369 ** BUMMER! -- res_map != pdir --
370 ** Dump rval and matching pdir entries
371 */
372 sba_dump_pdir_entry(ioc, msg, pide);
373 return(1);
374 }
375 rcnt--;
376 rval >>= 1; /* try the next bit */
377 pptr++;
378 pide++;
379 }
380 rptr++; /* look at next word of res_map */
381 }
382 /* It'd be nice if we always got here :^) */
383 return 0;
384}
385
386
387/**
388 * sba_dump_sg - debugging only - print Scatter-Gather list
389 * @ioc: IO MMU structure which owns the pdir we are interested in.
390 * @startsg: head of the SG list
391 * @nents: number of entries in SG list
392 *
393 * print the SG list so we can verify it's correct by hand.
394 */
395static void
396sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
397{
398 while (nents-- > 0) {
399 printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
400 startsg->dma_address, startsg->dma_length,
401 sba_sg_address(startsg));
402 startsg = sg_next(startsg);
403 }
404}
405
406static void
407sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
408{
409 struct scatterlist *the_sg = startsg;
410 int the_nents = nents;
411
412 while (the_nents-- > 0) {
413 if (sba_sg_address(the_sg) == 0x0UL)
414 sba_dump_sg(NULL, startsg, nents);
415 the_sg = sg_next(the_sg);
416 }
417}
418
419#endif /* ASSERT_PDIR_SANITY */
420
421
422
423
424/**************************************************************
425*
426* I/O Pdir Resource Management
427*
428* Bits set in the resource map are in use.
429* Each bit can represent a number of pages.
430* LSbs represent lower addresses (IOVA's).
431*
432***************************************************************/
433#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
434
435/* Convert from IOVP to IOVA and vice versa. */
436#define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
437#define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
438
439#define PDIR_ENTRY_SIZE sizeof(u64)
440
441#define PDIR_INDEX(iovp) ((iovp)>>iovp_shift)
442
443#define RESMAP_MASK(n) ~(~0UL << (n))
444#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
445
446
447/**
448 * For most cases the normal get_order is sufficient, however it limits us
449 * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
450 * It only incurs about 1 clock cycle to use this one with the static variable
451 * and makes the code more intuitive.
452 */
453static SBA_INLINE int
454get_iovp_order (unsigned long size)
455{
456 long double d = size - 1;
457 long order;
458
459 order = ia64_getf_exp(d);
460 order = order - iovp_shift - 0xffff + 1;
461 if (order < 0)
462 order = 0;
463 return order;
464}
465
466static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
467 unsigned int bitshiftcnt)
468{
469 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
470 + bitshiftcnt;
471}
472
473/**
474 * sba_search_bitmap - find free space in IO PDIR resource bitmap
475 * @ioc: IO MMU structure which owns the pdir we are interested in.
476 * @bits_wanted: number of entries we need.
477 * @use_hint: use res_hint to indicate where to start looking
478 *
479 * Find consecutive free bits in resource bitmap.
480 * Each bit represents one entry in the IO Pdir.
481 * Cool perf optimization: search for log2(size) bits at a time.
482 */
483static SBA_INLINE unsigned long
484sba_search_bitmap(struct ioc *ioc, struct device *dev,
485 unsigned long bits_wanted, int use_hint)
486{
487 unsigned long *res_ptr;
488 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
489 unsigned long flags, pide = ~0UL, tpide;
490 unsigned long boundary_size;
491 unsigned long shift;
492 int ret;
493
494 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
495 ASSERT(res_ptr < res_end);
496
497 boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1;
498 boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;
499
500 BUG_ON(ioc->ibase & ~iovp_mask);
501 shift = ioc->ibase >> iovp_shift;
502
503 spin_lock_irqsave(&ioc->res_lock, flags);
504
505 /* Allow caller to force a search through the entire resource space */
506 if (likely(use_hint)) {
507 res_ptr = ioc->res_hint;
508 } else {
509 res_ptr = (ulong *)ioc->res_map;
510 ioc->res_bitshift = 0;
511 }
512
513 /*
514 * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
515 * if a TLB entry is purged while in use. sba_mark_invalid()
516 * purges IOTLB entries in power-of-two sizes, so we also
517 * allocate IOVA space in power-of-two sizes.
518 */
519 bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift);
520
521 if (likely(bits_wanted == 1)) {
522 unsigned int bitshiftcnt;
523 for(; res_ptr < res_end ; res_ptr++) {
524 if (likely(*res_ptr != ~0UL)) {
525 bitshiftcnt = ffz(*res_ptr);
526 *res_ptr |= (1UL << bitshiftcnt);
527 pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
528 ioc->res_bitshift = bitshiftcnt + bits_wanted;
529 goto found_it;
530 }
531 }
532 goto not_found;
533
534 }
535
536 if (likely(bits_wanted <= BITS_PER_LONG/2)) {
537 /*
538 ** Search the resource bit map on well-aligned values.
539 ** "o" is the alignment.
540 ** We need the alignment to invalidate I/O TLB using
541 ** SBA HW features in the unmap path.
542 */
543 unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
544 uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
545 unsigned long mask, base_mask;
546
547 base_mask = RESMAP_MASK(bits_wanted);
548 mask = base_mask << bitshiftcnt;
549
550 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
551 for(; res_ptr < res_end ; res_ptr++)
552 {
553 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
554 ASSERT(0 != mask);
555 for (; mask ; mask <<= o, bitshiftcnt += o) {
556 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
557 ret = iommu_is_span_boundary(tpide, bits_wanted,
558 shift,
559 boundary_size);
560 if ((0 == ((*res_ptr) & mask)) && !ret) {
561 *res_ptr |= mask; /* mark resources busy! */
562 pide = tpide;
563 ioc->res_bitshift = bitshiftcnt + bits_wanted;
564 goto found_it;
565 }
566 }
567
568 bitshiftcnt = 0;
569 mask = base_mask;
570
571 }
572
573 } else {
574 int qwords, bits, i;
575 unsigned long *end;
576
577 qwords = bits_wanted >> 6; /* /64 */
578 bits = bits_wanted - (qwords * BITS_PER_LONG);
579
580 end = res_end - qwords;
581
582 for (; res_ptr < end; res_ptr++) {
583 tpide = ptr_to_pide(ioc, res_ptr, 0);
584 ret = iommu_is_span_boundary(tpide, bits_wanted,
585 shift, boundary_size);
586 if (ret)
587 goto next_ptr;
588 for (i = 0 ; i < qwords ; i++) {
589 if (res_ptr[i] != 0)
590 goto next_ptr;
591 }
592 if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits))
593 continue;
594
595 /* Found it, mark it */
596 for (i = 0 ; i < qwords ; i++)
597 res_ptr[i] = ~0UL;
598 res_ptr[i] |= RESMAP_MASK(bits);
599
600 pide = tpide;
601 res_ptr += qwords;
602 ioc->res_bitshift = bits;
603 goto found_it;
604next_ptr:
605 ;
606 }
607 }
608
609not_found:
610 prefetch(ioc->res_map);
611 ioc->res_hint = (unsigned long *) ioc->res_map;
612 ioc->res_bitshift = 0;
613 spin_unlock_irqrestore(&ioc->res_lock, flags);
614 return (pide);
615
616found_it:
617 ioc->res_hint = res_ptr;
618 spin_unlock_irqrestore(&ioc->res_lock, flags);
619 return (pide);
620}
621
622
623/**
624 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
625 * @ioc: IO MMU structure which owns the pdir we are interested in.
626 * @size: number of bytes to create a mapping for
627 *
628 * Given a size, find consecutive unmarked and then mark those bits in the
629 * resource bit map.
630 */
631static int
632sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
633{
634 unsigned int pages_needed = size >> iovp_shift;
635#ifdef PDIR_SEARCH_TIMING
636 unsigned long itc_start;
637#endif
638 unsigned long pide;
639
640 ASSERT(pages_needed);
641 ASSERT(0 == (size & ~iovp_mask));
642
643#ifdef PDIR_SEARCH_TIMING
644 itc_start = ia64_get_itc();
645#endif
646 /*
647 ** "seek and ye shall find"...praying never hurts either...
648 */
649 pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
650 if (unlikely(pide >= (ioc->res_size << 3))) {
651 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
652 if (unlikely(pide >= (ioc->res_size << 3))) {
653#if DELAYED_RESOURCE_CNT > 0
654 unsigned long flags;
655
656 /*
657 ** With delayed resource freeing, we can give this one more shot. We're
658 ** getting close to being in trouble here, so do what we can to make this
659 ** one count.
660 */
661 spin_lock_irqsave(&ioc->saved_lock, flags);
662 if (ioc->saved_cnt > 0) {
663 struct sba_dma_pair *d;
664 int cnt = ioc->saved_cnt;
665
666 d = &(ioc->saved[ioc->saved_cnt - 1]);
667
668 spin_lock(&ioc->res_lock);
669 while (cnt--) {
670 sba_mark_invalid(ioc, d->iova, d->size);
671 sba_free_range(ioc, d->iova, d->size);
672 d--;
673 }
674 ioc->saved_cnt = 0;
675 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
676 spin_unlock(&ioc->res_lock);
677 }
678 spin_unlock_irqrestore(&ioc->saved_lock, flags);
679
680 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
681 if (unlikely(pide >= (ioc->res_size << 3))) {
682 printk(KERN_WARNING "%s: I/O MMU @ %p is"
683 "out of mapping resources, %u %u %lx\n",
684 __func__, ioc->ioc_hpa, ioc->res_size,
685 pages_needed, dma_get_seg_boundary(dev));
686 return -1;
687 }
688#else
689 printk(KERN_WARNING "%s: I/O MMU @ %p is"
690 "out of mapping resources, %u %u %lx\n",
691 __func__, ioc->ioc_hpa, ioc->res_size,
692 pages_needed, dma_get_seg_boundary(dev));
693 return -1;
694#endif
695 }
696 }
697
698#ifdef PDIR_SEARCH_TIMING
699 ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
700 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
701#endif
702
703 prefetchw(&(ioc->pdir_base[pide]));
704
705#ifdef ASSERT_PDIR_SANITY
706 /* verify the first enable bit is clear */
707 if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
708 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
709 }
710#endif
711
712 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
713 __func__, size, pages_needed, pide,
714 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
715 ioc->res_bitshift );
716
717 return (pide);
718}
719
720
721/**
722 * sba_free_range - unmark bits in IO PDIR resource bitmap
723 * @ioc: IO MMU structure which owns the pdir we are interested in.
724 * @iova: IO virtual address which was previously allocated.
725 * @size: number of bytes to create a mapping for
726 *
727 * clear bits in the ioc's resource map
728 */
729static SBA_INLINE void
730sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
731{
732 unsigned long iovp = SBA_IOVP(ioc, iova);
733 unsigned int pide = PDIR_INDEX(iovp);
734 unsigned int ridx = pide >> 3; /* convert bit to byte address */
735 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
736 int bits_not_wanted = size >> iovp_shift;
737 unsigned long m;
738
739 /* Round up to power-of-two size: see AR2305 note above */
740 bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift);
741 for (; bits_not_wanted > 0 ; res_ptr++) {
742
743 if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
744
745 /* these mappings start 64bit aligned */
746 *res_ptr = 0UL;
747 bits_not_wanted -= BITS_PER_LONG;
748 pide += BITS_PER_LONG;
749
750 } else {
751
752 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
753 m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
754 bits_not_wanted = 0;
755
756 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size,
757 bits_not_wanted, m, pide, res_ptr, *res_ptr);
758
759 ASSERT(m != 0);
760 ASSERT(bits_not_wanted);
761 ASSERT((*res_ptr & m) == m); /* verify same bits are set */
762 *res_ptr &= ~m;
763 }
764 }
765}
766
767
768/**************************************************************
769*
770* "Dynamic DMA Mapping" support (aka "Coherent I/O")
771*
772***************************************************************/
773
774/**
775 * sba_io_pdir_entry - fill in one IO PDIR entry
776 * @pdir_ptr: pointer to IO PDIR entry
777 * @vba: Virtual CPU address of buffer to map
778 *
779 * SBA Mapping Routine
780 *
781 * Given a virtual address (vba, arg1) sba_io_pdir_entry()
782 * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
783 * Each IO Pdir entry consists of 8 bytes as shown below
784 * (LSB == bit 0):
785 *
786 * 63 40 11 7 0
787 * +-+---------------------+----------------------------------+----+--------+
788 * |V| U | PPN[39:12] | U | FF |
789 * +-+---------------------+----------------------------------+----+--------+
790 *
791 * V == Valid Bit
792 * U == Unused
793 * PPN == Physical Page Number
794 *
795 * The physical address fields are filled with the results of virt_to_phys()
796 * on the vba.
797 */
798
799#if 1
800#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
801 | 0x8000000000000000ULL)
802#else
803void SBA_INLINE
804sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
805{
806 *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
807}
808#endif
809
810#ifdef ENABLE_MARK_CLEAN
811/**
812 * Since DMA is i-cache coherent, any (complete) pages that were written via
813 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
814 * flush them when they get mapped into an executable vm-area.
815 */
816static void
817mark_clean (void *addr, size_t size)
818{
819 unsigned long pg_addr, end;
820
821 pg_addr = PAGE_ALIGN((unsigned long) addr);
822 end = (unsigned long) addr + size;
823 while (pg_addr + PAGE_SIZE <= end) {
824 struct page *page = virt_to_page((void *)pg_addr);
825 set_bit(PG_arch_1, &page->flags);
826 pg_addr += PAGE_SIZE;
827 }
828}
829#endif
830
831/**
832 * sba_mark_invalid - invalidate one or more IO PDIR entries
833 * @ioc: IO MMU structure which owns the pdir we are interested in.
834 * @iova: IO Virtual Address mapped earlier
835 * @byte_cnt: number of bytes this mapping covers.
836 *
837 * Marking the IO PDIR entry(ies) as Invalid and invalidate
838 * corresponding IO TLB entry. The PCOM (Purge Command Register)
839 * is to purge stale entries in the IO TLB when unmapping entries.
840 *
841 * The PCOM register supports purging of multiple pages, with a minium
842 * of 1 page and a maximum of 2GB. Hardware requires the address be
843 * aligned to the size of the range being purged. The size of the range
844 * must be a power of 2. The "Cool perf optimization" in the
845 * allocation routine helps keep that true.
846 */
847static SBA_INLINE void
848sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
849{
850 u32 iovp = (u32) SBA_IOVP(ioc,iova);
851
852 int off = PDIR_INDEX(iovp);
853
854 /* Must be non-zero and rounded up */
855 ASSERT(byte_cnt > 0);
856 ASSERT(0 == (byte_cnt & ~iovp_mask));
857
858#ifdef ASSERT_PDIR_SANITY
859 /* Assert first pdir entry is set */
860 if (!(ioc->pdir_base[off] >> 60)) {
861 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
862 }
863#endif
864
865 if (byte_cnt <= iovp_size)
866 {
867 ASSERT(off < ioc->pdir_size);
868
869 iovp |= iovp_shift; /* set "size" field for PCOM */
870
871#ifndef FULL_VALID_PDIR
872 /*
873 ** clear I/O PDIR entry "valid" bit
874 ** Do NOT clear the rest - save it for debugging.
875 ** We should only clear bits that have previously
876 ** been enabled.
877 */
878 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
879#else
880 /*
881 ** If we want to maintain the PDIR as valid, put in
882 ** the spill page so devices prefetching won't
883 ** cause a hard fail.
884 */
885 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
886#endif
887 } else {
888 u32 t = get_iovp_order(byte_cnt) + iovp_shift;
889
890 iovp |= t;
891 ASSERT(t <= 31); /* 2GB! Max value of "size" field */
892
893 do {
894 /* verify this pdir entry is enabled */
895 ASSERT(ioc->pdir_base[off] >> 63);
896#ifndef FULL_VALID_PDIR
897 /* clear I/O Pdir entry "valid" bit first */
898 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
899#else
900 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
901#endif
902 off++;
903 byte_cnt -= iovp_size;
904 } while (byte_cnt > 0);
905 }
906
907 WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
908}
909
910/**
911 * sba_map_single_attrs - map one buffer and return IOVA for DMA
912 * @dev: instance of PCI owned by the driver that's asking.
913 * @addr: driver buffer to map.
914 * @size: number of bytes to map in driver buffer.
915 * @dir: R/W or both.
916 * @attrs: optional dma attributes
917 *
918 * See Documentation/PCI/PCI-DMA-mapping.txt
919 */
920static dma_addr_t sba_map_page(struct device *dev, struct page *page,
921 unsigned long poff, size_t size,
922 enum dma_data_direction dir,
923 struct dma_attrs *attrs)
924{
925 struct ioc *ioc;
926 void *addr = page_address(page) + poff;
927 dma_addr_t iovp;
928 dma_addr_t offset;
929 u64 *pdir_start;
930 int pide;
931#ifdef ASSERT_PDIR_SANITY
932 unsigned long flags;
933#endif
934#ifdef ALLOW_IOV_BYPASS
935 unsigned long pci_addr = virt_to_phys(addr);
936#endif
937
938#ifdef ALLOW_IOV_BYPASS
939 ASSERT(to_pci_dev(dev)->dma_mask);
940 /*
941 ** Check if the PCI device can DMA to ptr... if so, just return ptr
942 */
943 if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
944 /*
945 ** Device is bit capable of DMA'ing to the buffer...
946 ** just return the PCI address of ptr
947 */
948 DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
949 "0x%lx/0x%lx\n",
950 to_pci_dev(dev)->dma_mask, pci_addr);
951 return pci_addr;
952 }
953#endif
954 ioc = GET_IOC(dev);
955 ASSERT(ioc);
956
957 prefetch(ioc->res_hint);
958
959 ASSERT(size > 0);
960 ASSERT(size <= DMA_CHUNK_SIZE);
961
962 /* save offset bits */
963 offset = ((dma_addr_t) (long) addr) & ~iovp_mask;
964
965 /* round up to nearest iovp_size */
966 size = (size + offset + ~iovp_mask) & iovp_mask;
967
968#ifdef ASSERT_PDIR_SANITY
969 spin_lock_irqsave(&ioc->res_lock, flags);
970 if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
971 panic("Sanity check failed");
972 spin_unlock_irqrestore(&ioc->res_lock, flags);
973#endif
974
975 pide = sba_alloc_range(ioc, dev, size);
976 if (pide < 0)
977 return 0;
978
979 iovp = (dma_addr_t) pide << iovp_shift;
980
981 DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset);
982
983 pdir_start = &(ioc->pdir_base[pide]);
984
985 while (size > 0) {
986 ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
987 sba_io_pdir_entry(pdir_start, (unsigned long) addr);
988
989 DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
990
991 addr += iovp_size;
992 size -= iovp_size;
993 pdir_start++;
994 }
995 /* force pdir update */
996 wmb();
997
998 /* form complete address */
999#ifdef ASSERT_PDIR_SANITY
1000 spin_lock_irqsave(&ioc->res_lock, flags);
1001 sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
1002 spin_unlock_irqrestore(&ioc->res_lock, flags);
1003#endif
1004 return SBA_IOVA(ioc, iovp, offset);
1005}
1006
1007static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
1008 size_t size, enum dma_data_direction dir,
1009 struct dma_attrs *attrs)
1010{
1011 return sba_map_page(dev, virt_to_page(addr),
1012 (unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
1013}
1014
1015#ifdef ENABLE_MARK_CLEAN
1016static SBA_INLINE void
1017sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
1018{
1019 u32 iovp = (u32) SBA_IOVP(ioc,iova);
1020 int off = PDIR_INDEX(iovp);
1021 void *addr;
1022
1023 if (size <= iovp_size) {
1024 addr = phys_to_virt(ioc->pdir_base[off] &
1025 ~0xE000000000000FFFULL);
1026 mark_clean(addr, size);
1027 } else {
1028 do {
1029 addr = phys_to_virt(ioc->pdir_base[off] &
1030 ~0xE000000000000FFFULL);
1031 mark_clean(addr, min(size, iovp_size));
1032 off++;
1033 size -= iovp_size;
1034 } while (size > 0);
1035 }
1036}
1037#endif
1038
1039/**
1040 * sba_unmap_single_attrs - unmap one IOVA and free resources
1041 * @dev: instance of PCI owned by the driver that's asking.
1042 * @iova: IOVA of driver buffer previously mapped.
1043 * @size: number of bytes mapped in driver buffer.
1044 * @dir: R/W or both.
1045 * @attrs: optional dma attributes
1046 *
1047 * See Documentation/PCI/PCI-DMA-mapping.txt
1048 */
1049static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
1050 enum dma_data_direction dir, struct dma_attrs *attrs)
1051{
1052 struct ioc *ioc;
1053#if DELAYED_RESOURCE_CNT > 0
1054 struct sba_dma_pair *d;
1055#endif
1056 unsigned long flags;
1057 dma_addr_t offset;
1058
1059 ioc = GET_IOC(dev);
1060 ASSERT(ioc);
1061
1062#ifdef ALLOW_IOV_BYPASS
1063 if (likely((iova & ioc->imask) != ioc->ibase)) {
1064 /*
1065 ** Address does not fall w/in IOVA, must be bypassing
1066 */
1067 DBG_BYPASS("sba_unmap_single_attrs() bypass addr: 0x%lx\n",
1068 iova);
1069
1070#ifdef ENABLE_MARK_CLEAN
1071 if (dir == DMA_FROM_DEVICE) {
1072 mark_clean(phys_to_virt(iova), size);
1073 }
1074#endif
1075 return;
1076 }
1077#endif
1078 offset = iova & ~iovp_mask;
1079
1080 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
1081
1082 iova ^= offset; /* clear offset bits */
1083 size += offset;
1084 size = ROUNDUP(size, iovp_size);
1085
1086#ifdef ENABLE_MARK_CLEAN
1087 if (dir == DMA_FROM_DEVICE)
1088 sba_mark_clean(ioc, iova, size);
1089#endif
1090
1091#if DELAYED_RESOURCE_CNT > 0
1092 spin_lock_irqsave(&ioc->saved_lock, flags);
1093 d = &(ioc->saved[ioc->saved_cnt]);
1094 d->iova = iova;
1095 d->size = size;
1096 if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
1097 int cnt = ioc->saved_cnt;
1098 spin_lock(&ioc->res_lock);
1099 while (cnt--) {
1100 sba_mark_invalid(ioc, d->iova, d->size);
1101 sba_free_range(ioc, d->iova, d->size);
1102 d--;
1103 }
1104 ioc->saved_cnt = 0;
1105 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1106 spin_unlock(&ioc->res_lock);
1107 }
1108 spin_unlock_irqrestore(&ioc->saved_lock, flags);
1109#else /* DELAYED_RESOURCE_CNT == 0 */
1110 spin_lock_irqsave(&ioc->res_lock, flags);
1111 sba_mark_invalid(ioc, iova, size);
1112 sba_free_range(ioc, iova, size);
1113 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1114 spin_unlock_irqrestore(&ioc->res_lock, flags);
1115#endif /* DELAYED_RESOURCE_CNT == 0 */
1116}
1117
1118void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
1119 enum dma_data_direction dir, struct dma_attrs *attrs)
1120{
1121 sba_unmap_page(dev, iova, size, dir, attrs);
1122}
1123
1124/**
1125 * sba_alloc_coherent - allocate/map shared mem for DMA
1126 * @dev: instance of PCI owned by the driver that's asking.
1127 * @size: number of bytes mapped in driver buffer.
1128 * @dma_handle: IOVA of new buffer.
1129 *
1130 * See Documentation/PCI/PCI-DMA-mapping.txt
1131 */
1132static void *
1133sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
1134{
1135 struct ioc *ioc;
1136 void *addr;
1137
1138 ioc = GET_IOC(dev);
1139 ASSERT(ioc);
1140
1141#ifdef CONFIG_NUMA
1142 {
1143 struct page *page;
1144 page = alloc_pages_exact_node(ioc->node == MAX_NUMNODES ?
1145 numa_node_id() : ioc->node, flags,
1146 get_order(size));
1147
1148 if (unlikely(!page))
1149 return NULL;
1150
1151 addr = page_address(page);
1152 }
1153#else
1154 addr = (void *) __get_free_pages(flags, get_order(size));
1155#endif
1156 if (unlikely(!addr))
1157 return NULL;
1158
1159 memset(addr, 0, size);
1160 *dma_handle = virt_to_phys(addr);
1161
1162#ifdef ALLOW_IOV_BYPASS
1163 ASSERT(dev->coherent_dma_mask);
1164 /*
1165 ** Check if the PCI device can DMA to ptr... if so, just return ptr
1166 */
1167 if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {
1168 DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
1169 dev->coherent_dma_mask, *dma_handle);
1170
1171 return addr;
1172 }
1173#endif
1174
1175 /*
1176 * If device can't bypass or bypass is disabled, pass the 32bit fake
1177 * device to map single to get an iova mapping.
1178 */
1179 *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
1180 size, 0, NULL);
1181
1182 return addr;
1183}
1184
1185
1186/**
1187 * sba_free_coherent - free/unmap shared mem for DMA
1188 * @dev: instance of PCI owned by the driver that's asking.
1189 * @size: number of bytes mapped in driver buffer.
1190 * @vaddr: virtual address IOVA of "consistent" buffer.
1191 * @dma_handler: IO virtual address of "consistent" buffer.
1192 *
1193 * See Documentation/PCI/PCI-DMA-mapping.txt
1194 */
1195static void sba_free_coherent (struct device *dev, size_t size, void *vaddr,
1196 dma_addr_t dma_handle)
1197{
1198 sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL);
1199 free_pages((unsigned long) vaddr, get_order(size));
1200}
1201
1202
1203/*
1204** Since 0 is a valid pdir_base index value, can't use that
1205** to determine if a value is valid or not. Use a flag to indicate
1206** the SG list entry contains a valid pdir index.
1207*/
1208#define PIDE_FLAG 0x1UL
1209
1210#ifdef DEBUG_LARGE_SG_ENTRIES
1211int dump_run_sg = 0;
1212#endif
1213
1214
1215/**
1216 * sba_fill_pdir - write allocated SG entries into IO PDIR
1217 * @ioc: IO MMU structure which owns the pdir we are interested in.
1218 * @startsg: list of IOVA/size pairs
1219 * @nents: number of entries in startsg list
1220 *
1221 * Take preprocessed SG list and write corresponding entries
1222 * in the IO PDIR.
1223 */
1224
1225static SBA_INLINE int
1226sba_fill_pdir(
1227 struct ioc *ioc,
1228 struct scatterlist *startsg,
1229 int nents)
1230{
1231 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
1232 int n_mappings = 0;
1233 u64 *pdirp = NULL;
1234 unsigned long dma_offset = 0;
1235
1236 while (nents-- > 0) {
1237 int cnt = startsg->dma_length;
1238 startsg->dma_length = 0;
1239
1240#ifdef DEBUG_LARGE_SG_ENTRIES
1241 if (dump_run_sg)
1242 printk(" %2d : %08lx/%05x %p\n",
1243 nents, startsg->dma_address, cnt,
1244 sba_sg_address(startsg));
1245#else
1246 DBG_RUN_SG(" %d : %08lx/%05x %p\n",
1247 nents, startsg->dma_address, cnt,
1248 sba_sg_address(startsg));
1249#endif
1250 /*
1251 ** Look for the start of a new DMA stream
1252 */
1253 if (startsg->dma_address & PIDE_FLAG) {
1254 u32 pide = startsg->dma_address & ~PIDE_FLAG;
1255 dma_offset = (unsigned long) pide & ~iovp_mask;
1256 startsg->dma_address = 0;
1257 if (n_mappings)
1258 dma_sg = sg_next(dma_sg);
1259 dma_sg->dma_address = pide | ioc->ibase;
1260 pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
1261 n_mappings++;
1262 }
1263
1264 /*
1265 ** Look for a VCONTIG chunk
1266 */
1267 if (cnt) {
1268 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1269 ASSERT(pdirp);
1270
1271 /* Since multiple Vcontig blocks could make up
1272 ** one DMA stream, *add* cnt to dma_len.
1273 */
1274 dma_sg->dma_length += cnt;
1275 cnt += dma_offset;
1276 dma_offset=0; /* only want offset on first chunk */
1277 cnt = ROUNDUP(cnt, iovp_size);
1278 do {
1279 sba_io_pdir_entry(pdirp, vaddr);
1280 vaddr += iovp_size;
1281 cnt -= iovp_size;
1282 pdirp++;
1283 } while (cnt > 0);
1284 }
1285 startsg = sg_next(startsg);
1286 }
1287 /* force pdir update */
1288 wmb();
1289
1290#ifdef DEBUG_LARGE_SG_ENTRIES
1291 dump_run_sg = 0;
1292#endif
1293 return(n_mappings);
1294}
1295
1296
1297/*
1298** Two address ranges are DMA contiguous *iff* "end of prev" and
1299** "start of next" are both on an IOV page boundary.
1300**
1301** (shift left is a quick trick to mask off upper bits)
1302*/
1303#define DMA_CONTIG(__X, __Y) \
1304 (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
1305
1306
1307/**
1308 * sba_coalesce_chunks - preprocess the SG list
1309 * @ioc: IO MMU structure which owns the pdir we are interested in.
1310 * @startsg: list of IOVA/size pairs
1311 * @nents: number of entries in startsg list
1312 *
1313 * First pass is to walk the SG list and determine where the breaks are
1314 * in the DMA stream. Allocates PDIR entries but does not fill them.
1315 * Returns the number of DMA chunks.
1316 *
1317 * Doing the fill separate from the coalescing/allocation keeps the
1318 * code simpler. Future enhancement could make one pass through
1319 * the sglist do both.
1320 */
1321static SBA_INLINE int
1322sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1323 struct scatterlist *startsg,
1324 int nents)
1325{
1326 struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
1327 unsigned long vcontig_len; /* len of VCONTIG chunk */
1328 unsigned long vcontig_end;
1329 struct scatterlist *dma_sg; /* next DMA stream head */
1330 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
1331 int n_mappings = 0;
1332 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1333 int idx;
1334
1335 while (nents > 0) {
1336 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1337
1338 /*
1339 ** Prepare for first/next DMA stream
1340 */
1341 dma_sg = vcontig_sg = startsg;
1342 dma_len = vcontig_len = vcontig_end = startsg->length;
1343 vcontig_end += vaddr;
1344 dma_offset = vaddr & ~iovp_mask;
1345
1346 /* PARANOID: clear entries */
1347 startsg->dma_address = startsg->dma_length = 0;
1348
1349 /*
1350 ** This loop terminates one iteration "early" since
1351 ** it's always looking one "ahead".
1352 */
1353 while (--nents > 0) {
1354 unsigned long vaddr; /* tmp */
1355
1356 startsg = sg_next(startsg);
1357
1358 /* PARANOID */
1359 startsg->dma_address = startsg->dma_length = 0;
1360
1361 /* catch brokenness in SCSI layer */
1362 ASSERT(startsg->length <= DMA_CHUNK_SIZE);
1363
1364 /*
1365 ** First make sure current dma stream won't
1366 ** exceed DMA_CHUNK_SIZE if we coalesce the
1367 ** next entry.
1368 */
1369 if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)
1370 > DMA_CHUNK_SIZE)
1371 break;
1372
1373 if (dma_len + startsg->length > max_seg_size)
1374 break;
1375
1376 /*
1377 ** Then look for virtually contiguous blocks.
1378 **
1379 ** append the next transaction?
1380 */
1381 vaddr = (unsigned long) sba_sg_address(startsg);
1382 if (vcontig_end == vaddr)
1383 {
1384 vcontig_len += startsg->length;
1385 vcontig_end += startsg->length;
1386 dma_len += startsg->length;
1387 continue;
1388 }
1389
1390#ifdef DEBUG_LARGE_SG_ENTRIES
1391 dump_run_sg = (vcontig_len > iovp_size);
1392#endif
1393
1394 /*
1395 ** Not virtually contiguous.
1396 ** Terminate prev chunk.
1397 ** Start a new chunk.
1398 **
1399 ** Once we start a new VCONTIG chunk, dma_offset
1400 ** can't change. And we need the offset from the first
1401 ** chunk - not the last one. Ergo Successive chunks
1402 ** must start on page boundaries and dove tail
1403 ** with it's predecessor.
1404 */
1405 vcontig_sg->dma_length = vcontig_len;
1406
1407 vcontig_sg = startsg;
1408 vcontig_len = startsg->length;
1409
1410 /*
1411 ** 3) do the entries end/start on page boundaries?
1412 ** Don't update vcontig_end until we've checked.
1413 */
1414 if (DMA_CONTIG(vcontig_end, vaddr))
1415 {
1416 vcontig_end = vcontig_len + vaddr;
1417 dma_len += vcontig_len;
1418 continue;
1419 } else {
1420 break;
1421 }
1422 }
1423
1424 /*
1425 ** End of DMA Stream
1426 ** Terminate last VCONTIG block.
1427 ** Allocate space for DMA stream.
1428 */
1429 vcontig_sg->dma_length = vcontig_len;
1430 dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
1431 ASSERT(dma_len <= DMA_CHUNK_SIZE);
1432 idx = sba_alloc_range(ioc, dev, dma_len);
1433 if (idx < 0) {
1434 dma_sg->dma_length = 0;
1435 return -1;
1436 }
1437 dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
1438 | dma_offset);
1439 n_mappings++;
1440 }
1441
1442 return n_mappings;
1443}
1444
1445static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1446 int nents, enum dma_data_direction dir,
1447 struct dma_attrs *attrs);
1448/**
1449 * sba_map_sg - map Scatter/Gather list
1450 * @dev: instance of PCI owned by the driver that's asking.
1451 * @sglist: array of buffer/length pairs
1452 * @nents: number of entries in list
1453 * @dir: R/W or both.
1454 * @attrs: optional dma attributes
1455 *
1456 * See Documentation/PCI/PCI-DMA-mapping.txt
1457 */
1458static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
1459 int nents, enum dma_data_direction dir,
1460 struct dma_attrs *attrs)
1461{
1462 struct ioc *ioc;
1463 int coalesced, filled = 0;
1464#ifdef ASSERT_PDIR_SANITY
1465 unsigned long flags;
1466#endif
1467#ifdef ALLOW_IOV_BYPASS_SG
1468 struct scatterlist *sg;
1469#endif
1470
1471 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
1472 ioc = GET_IOC(dev);
1473 ASSERT(ioc);
1474
1475#ifdef ALLOW_IOV_BYPASS_SG
1476 ASSERT(to_pci_dev(dev)->dma_mask);
1477 if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
1478 for_each_sg(sglist, sg, nents, filled) {
1479 sg->dma_length = sg->length;
1480 sg->dma_address = virt_to_phys(sba_sg_address(sg));
1481 }
1482 return filled;
1483 }
1484#endif
1485 /* Fast path single entry scatterlists. */
1486 if (nents == 1) {
1487 sglist->dma_length = sglist->length;
1488 sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
1489 return 1;
1490 }
1491
1492#ifdef ASSERT_PDIR_SANITY
1493 spin_lock_irqsave(&ioc->res_lock, flags);
1494 if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
1495 {
1496 sba_dump_sg(ioc, sglist, nents);
1497 panic("Check before sba_map_sg_attrs()");
1498 }
1499 spin_unlock_irqrestore(&ioc->res_lock, flags);
1500#endif
1501
1502 prefetch(ioc->res_hint);
1503
1504 /*
1505 ** First coalesce the chunks and allocate I/O pdir space
1506 **
1507 ** If this is one DMA stream, we can properly map using the
1508 ** correct virtual address associated with each DMA page.
1509 ** w/o this association, we wouldn't have coherent DMA!
1510 ** Access to the virtual address is what forces a two pass algorithm.
1511 */
1512 coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
1513 if (coalesced < 0) {
1514 sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
1515 return 0;
1516 }
1517
1518 /*
1519 ** Program the I/O Pdir
1520 **
1521 ** map the virtual addresses to the I/O Pdir
1522 ** o dma_address will contain the pdir index
1523 ** o dma_len will contain the number of bytes to map
1524 ** o address contains the virtual address.
1525 */
1526 filled = sba_fill_pdir(ioc, sglist, nents);
1527
1528#ifdef ASSERT_PDIR_SANITY
1529 spin_lock_irqsave(&ioc->res_lock, flags);
1530 if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
1531 {
1532 sba_dump_sg(ioc, sglist, nents);
1533 panic("Check after sba_map_sg_attrs()\n");
1534 }
1535 spin_unlock_irqrestore(&ioc->res_lock, flags);
1536#endif
1537
1538 ASSERT(coalesced == filled);
1539 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1540
1541 return filled;
1542}
1543
1544/**
1545 * sba_unmap_sg_attrs - unmap Scatter/Gather list
1546 * @dev: instance of PCI owned by the driver that's asking.
1547 * @sglist: array of buffer/length pairs
1548 * @nents: number of entries in list
1549 * @dir: R/W or both.
1550 * @attrs: optional dma attributes
1551 *
1552 * See Documentation/PCI/PCI-DMA-mapping.txt
1553 */
1554static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1555 int nents, enum dma_data_direction dir,
1556 struct dma_attrs *attrs)
1557{
1558#ifdef ASSERT_PDIR_SANITY
1559 struct ioc *ioc;
1560 unsigned long flags;
1561#endif
1562
1563 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1564 __func__, nents, sba_sg_address(sglist), sglist->length);
1565
1566#ifdef ASSERT_PDIR_SANITY
1567 ioc = GET_IOC(dev);
1568 ASSERT(ioc);
1569
1570 spin_lock_irqsave(&ioc->res_lock, flags);
1571 sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
1572 spin_unlock_irqrestore(&ioc->res_lock, flags);
1573#endif
1574
1575 while (nents && sglist->dma_length) {
1576
1577 sba_unmap_single_attrs(dev, sglist->dma_address,
1578 sglist->dma_length, dir, attrs);
1579 sglist = sg_next(sglist);
1580 nents--;
1581 }
1582
1583 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1584
1585#ifdef ASSERT_PDIR_SANITY
1586 spin_lock_irqsave(&ioc->res_lock, flags);
1587 sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
1588 spin_unlock_irqrestore(&ioc->res_lock, flags);
1589#endif
1590
1591}
1592
1593/**************************************************************
1594*
1595* Initialization and claim
1596*
1597***************************************************************/
1598
1599static void __init
1600ioc_iova_init(struct ioc *ioc)
1601{
1602 int tcnfg;
1603 int agp_found = 0;
1604 struct pci_dev *device = NULL;
1605#ifdef FULL_VALID_PDIR
1606 unsigned long index;
1607#endif
1608
1609 /*
1610 ** Firmware programs the base and size of a "safe IOVA space"
1611 ** (one that doesn't overlap memory or LMMIO space) in the
1612 ** IBASE and IMASK registers.
1613 */
1614 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
1615 ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
1616
1617 ioc->iov_size = ~ioc->imask + 1;
1618
1619 DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
1620 __func__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
1621 ioc->iov_size >> 20);
1622
1623 switch (iovp_size) {
1624 case 4*1024: tcnfg = 0; break;
1625 case 8*1024: tcnfg = 1; break;
1626 case 16*1024: tcnfg = 2; break;
1627 case 64*1024: tcnfg = 3; break;
1628 default:
1629 panic(PFX "Unsupported IOTLB page size %ldK",
1630 iovp_size >> 10);
1631 break;
1632 }
1633 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1634
1635 ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
1636 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1637 get_order(ioc->pdir_size));
1638 if (!ioc->pdir_base)
1639 panic(PFX "Couldn't allocate I/O Page Table\n");
1640
1641 memset(ioc->pdir_base, 0, ioc->pdir_size);
1642
1643 DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__,
1644 iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
1645
1646 ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
1647 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1648
1649 /*
1650 ** If an AGP device is present, only use half of the IOV space
1651 ** for PCI DMA. Unfortunately we can't know ahead of time
1652 ** whether GART support will actually be used, for now we
1653 ** can just key on an AGP device found in the system.
1654 ** We program the next pdir index after we stop w/ a key for
1655 ** the GART code to handshake on.
1656 */
1657 for_each_pci_dev(device)
1658 agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
1659
1660 if (agp_found && reserve_sba_gart) {
1661 printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",
1662 ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
1663 ioc->pdir_size /= 2;
1664 ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
1665 }
1666#ifdef FULL_VALID_PDIR
1667 /*
1668 ** Check to see if the spill page has been allocated, we don't need more than
1669 ** one across multiple SBAs.
1670 */
1671 if (!prefetch_spill_page) {
1672 char *spill_poison = "SBAIOMMU POISON";
1673 int poison_size = 16;
1674 void *poison_addr, *addr;
1675
1676 addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));
1677 if (!addr)
1678 panic(PFX "Couldn't allocate PDIR spill page\n");
1679
1680 poison_addr = addr;
1681 for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
1682 memcpy(poison_addr, spill_poison, poison_size);
1683
1684 prefetch_spill_page = virt_to_phys(addr);
1685
1686 DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page);
1687 }
1688 /*
1689 ** Set all the PDIR entries valid w/ the spill page as the target
1690 */
1691 for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
1692 ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
1693#endif
1694
1695 /* Clear I/O TLB of any possible entries */
1696 WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
1697 READ_REG(ioc->ioc_hpa + IOC_PCOM);
1698
1699 /* Enable IOVA translation */
1700 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1701 READ_REG(ioc->ioc_hpa + IOC_IBASE);
1702}
1703
1704static void __init
1705ioc_resource_init(struct ioc *ioc)
1706{
1707 spin_lock_init(&ioc->res_lock);
1708#if DELAYED_RESOURCE_CNT > 0
1709 spin_lock_init(&ioc->saved_lock);
1710#endif
1711
1712 /* resource map size dictated by pdir_size */
1713 ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
1714 ioc->res_size >>= 3; /* convert bit count to byte count */
1715 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1716
1717 ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
1718 get_order(ioc->res_size));
1719 if (!ioc->res_map)
1720 panic(PFX "Couldn't allocate resource map\n");
1721
1722 memset(ioc->res_map, 0, ioc->res_size);
1723 /* next available IOVP - circular search */
1724 ioc->res_hint = (unsigned long *) ioc->res_map;
1725
1726#ifdef ASSERT_PDIR_SANITY
1727 /* Mark first bit busy - ie no IOVA 0 */
1728 ioc->res_map[0] = 0x1;
1729 ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
1730#endif
1731#ifdef FULL_VALID_PDIR
1732 /* Mark the last resource used so we don't prefetch beyond IOVA space */
1733 ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
1734 ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
1735 | prefetch_spill_page);
1736#endif
1737
1738 DBG_INIT("%s() res_map %x %p\n", __func__,
1739 ioc->res_size, (void *) ioc->res_map);
1740}
1741
1742static void __init
1743ioc_sac_init(struct ioc *ioc)
1744{
1745 struct pci_dev *sac = NULL;
1746 struct pci_controller *controller = NULL;
1747
1748 /*
1749 * pci_alloc_coherent() must return a DMA address which is
1750 * SAC (single address cycle) addressable, so allocate a
1751 * pseudo-device to enforce that.
1752 */
1753 sac = kzalloc(sizeof(*sac), GFP_KERNEL);
1754 if (!sac)
1755 panic(PFX "Couldn't allocate struct pci_dev");
1756
1757 controller = kzalloc(sizeof(*controller), GFP_KERNEL);
1758 if (!controller)
1759 panic(PFX "Couldn't allocate struct pci_controller");
1760
1761 controller->iommu = ioc;
1762 sac->sysdata = controller;
1763 sac->dma_mask = 0xFFFFFFFFUL;
1764#ifdef CONFIG_PCI
1765 sac->dev.bus = &pci_bus_type;
1766#endif
1767 ioc->sac_only_dev = sac;
1768}
1769
1770static void __init
1771ioc_zx1_init(struct ioc *ioc)
1772{
1773 unsigned long rope_config;
1774 unsigned int i;
1775
1776 if (ioc->rev < 0x20)
1777 panic(PFX "IOC 2.0 or later required for IOMMU support\n");
1778
1779 /* 38 bit memory controller + extra bit for range displaced by MMIO */
1780 ioc->dma_mask = (0x1UL << 39) - 1;
1781
1782 /*
1783 ** Clear ROPE(N)_CONFIG AO bit.
1784 ** Disables "NT Ordering" (~= !"Relaxed Ordering")
1785 ** Overrides bit 1 in DMA Hint Sets.
1786 ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
1787 */
1788 for (i=0; i<(8*8); i+=8) {
1789 rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1790 rope_config &= ~IOC_ROPE_AO;
1791 WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1792 }
1793}
1794
1795typedef void (initfunc)(struct ioc *);
1796
1797struct ioc_iommu {
1798 u32 func_id;
1799 char *name;
1800 initfunc *init;
1801};
1802
1803static struct ioc_iommu ioc_iommu_info[] __initdata = {
1804 { ZX1_IOC_ID, "zx1", ioc_zx1_init },
1805 { ZX2_IOC_ID, "zx2", NULL },
1806 { SX1000_IOC_ID, "sx1000", NULL },
1807 { SX2000_IOC_ID, "sx2000", NULL },
1808};
1809
1810static struct ioc * __init
1811ioc_init(unsigned long hpa, void *handle)
1812{
1813 struct ioc *ioc;
1814 struct ioc_iommu *info;
1815
1816 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
1817 if (!ioc)
1818 return NULL;
1819
1820 ioc->next = ioc_list;
1821 ioc_list = ioc;
1822
1823 ioc->handle = handle;
1824 ioc->ioc_hpa = ioremap(hpa, 0x1000);
1825
1826 ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
1827 ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
1828 ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */
1829
1830 for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
1831 if (ioc->func_id == info->func_id) {
1832 ioc->name = info->name;
1833 if (info->init)
1834 (info->init)(ioc);
1835 }
1836 }
1837
1838 iovp_size = (1 << iovp_shift);
1839 iovp_mask = ~(iovp_size - 1);
1840
1841 DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__,
1842 PAGE_SIZE >> 10, iovp_size >> 10);
1843
1844 if (!ioc->name) {
1845 ioc->name = kmalloc(24, GFP_KERNEL);
1846 if (ioc->name)
1847 sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
1848 ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
1849 else
1850 ioc->name = "Unknown";
1851 }
1852
1853 ioc_iova_init(ioc);
1854 ioc_resource_init(ioc);
1855 ioc_sac_init(ioc);
1856
1857 if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask)
1858 ia64_max_iommu_merge_mask = ~iovp_mask;
1859
1860 printk(KERN_INFO PFX
1861 "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
1862 ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
1863 hpa, ioc->iov_size >> 20, ioc->ibase);
1864
1865 return ioc;
1866}
1867
1868
1869
1870/**************************************************************************
1871**
1872** SBA initialization code (HW and SW)
1873**
1874** o identify SBA chip itself
1875** o FIXME: initialize DMA hints for reasonable defaults
1876**
1877**************************************************************************/
1878
1879#ifdef CONFIG_PROC_FS
1880static void *
1881ioc_start(struct seq_file *s, loff_t *pos)
1882{
1883 struct ioc *ioc;
1884 loff_t n = *pos;
1885
1886 for (ioc = ioc_list; ioc; ioc = ioc->next)
1887 if (!n--)
1888 return ioc;
1889
1890 return NULL;
1891}
1892
1893static void *
1894ioc_next(struct seq_file *s, void *v, loff_t *pos)
1895{
1896 struct ioc *ioc = v;
1897
1898 ++*pos;
1899 return ioc->next;
1900}
1901
1902static void
1903ioc_stop(struct seq_file *s, void *v)
1904{
1905}
1906
1907static int
1908ioc_show(struct seq_file *s, void *v)
1909{
1910 struct ioc *ioc = v;
1911 unsigned long *res_ptr = (unsigned long *)ioc->res_map;
1912 int i, used = 0;
1913
1914 seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
1915 ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
1916#ifdef CONFIG_NUMA
1917 if (ioc->node != MAX_NUMNODES)
1918 seq_printf(s, "NUMA node : %d\n", ioc->node);
1919#endif
1920 seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
1921 seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024);
1922
1923 for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
1924 used += hweight64(*res_ptr);
1925
1926 seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3);
1927 seq_printf(s, "PDIR used : %d entries\n", used);
1928
1929#ifdef PDIR_SEARCH_TIMING
1930 {
1931 unsigned long i = 0, avg = 0, min, max;
1932 min = max = ioc->avg_search[0];
1933 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1934 avg += ioc->avg_search[i];
1935 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1936 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1937 }
1938 avg /= SBA_SEARCH_SAMPLE;
1939 seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
1940 min, avg, max);
1941 }
1942#endif
1943#ifndef ALLOW_IOV_BYPASS
1944 seq_printf(s, "IOVA bypass disabled\n");
1945#endif
1946 return 0;
1947}
1948
1949static const struct seq_operations ioc_seq_ops = {
1950 .start = ioc_start,
1951 .next = ioc_next,
1952 .stop = ioc_stop,
1953 .show = ioc_show
1954};
1955
1956static int
1957ioc_open(struct inode *inode, struct file *file)
1958{
1959 return seq_open(file, &ioc_seq_ops);
1960}
1961
1962static const struct file_operations ioc_fops = {
1963 .open = ioc_open,
1964 .read = seq_read,
1965 .llseek = seq_lseek,
1966 .release = seq_release
1967};
1968
1969static void __init
1970ioc_proc_init(void)
1971{
1972 struct proc_dir_entry *dir;
1973
1974 dir = proc_mkdir("bus/mckinley", NULL);
1975 if (!dir)
1976 return;
1977
1978 proc_create(ioc_list->name, 0, dir, &ioc_fops);
1979}
1980#endif
1981
1982static void
1983sba_connect_bus(struct pci_bus *bus)
1984{
1985 acpi_handle handle, parent;
1986 acpi_status status;
1987 struct ioc *ioc;
1988
1989 if (!PCI_CONTROLLER(bus))
1990 panic(PFX "no sysdata on bus %d!\n", bus->number);
1991
1992 if (PCI_CONTROLLER(bus)->iommu)
1993 return;
1994
1995 handle = PCI_CONTROLLER(bus)->acpi_handle;
1996 if (!handle)
1997 return;
1998
1999 /*
2000 * The IOC scope encloses PCI root bridges in the ACPI
2001 * namespace, so work our way out until we find an IOC we
2002 * claimed previously.
2003 */
2004 do {
2005 for (ioc = ioc_list; ioc; ioc = ioc->next)
2006 if (ioc->handle == handle) {
2007 PCI_CONTROLLER(bus)->iommu = ioc;
2008 return;
2009 }
2010
2011 status = acpi_get_parent(handle, &parent);
2012 handle = parent;
2013 } while (ACPI_SUCCESS(status));
2014
2015 printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number);
2016}
2017
2018#ifdef CONFIG_NUMA
2019static void __init
2020sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
2021{
2022 unsigned int node;
2023 int pxm;
2024
2025 ioc->node = MAX_NUMNODES;
2026
2027 pxm = acpi_get_pxm(handle);
2028
2029 if (pxm < 0)
2030 return;
2031
2032 node = pxm_to_node(pxm);
2033
2034 if (node >= MAX_NUMNODES || !node_online(node))
2035 return;
2036
2037 ioc->node = node;
2038 return;
2039}
2040#else
2041#define sba_map_ioc_to_node(ioc, handle)
2042#endif
2043
2044static int __init
2045acpi_sba_ioc_add(struct acpi_device *device)
2046{
2047 struct ioc *ioc;
2048 acpi_status status;
2049 u64 hpa, length;
2050 struct acpi_device_info *adi;
2051
2052 status = hp_acpi_csr_space(device->handle, &hpa, &length);
2053 if (ACPI_FAILURE(status))
2054 return 1;
2055
2056 status = acpi_get_object_info(device->handle, &adi);
2057 if (ACPI_FAILURE(status))
2058 return 1;
2059
2060 /*
2061 * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
2062 * root bridges, and its CSR space includes the IOC function.
2063 */
2064 if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) {
2065 hpa += ZX1_IOC_OFFSET;
2066 /* zx1 based systems default to kernel page size iommu pages */
2067 if (!iovp_shift)
2068 iovp_shift = min(PAGE_SHIFT, 16);
2069 }
2070 kfree(adi);
2071
2072 /*
2073 * default anything not caught above or specified on cmdline to 4k
2074 * iommu page size
2075 */
2076 if (!iovp_shift)
2077 iovp_shift = 12;
2078
2079 ioc = ioc_init(hpa, device->handle);
2080 if (!ioc)
2081 return 1;
2082
2083 /* setup NUMA node association */
2084 sba_map_ioc_to_node(ioc, device->handle);
2085 return 0;
2086}
2087
2088static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
2089 {"HWP0001", 0},
2090 {"HWP0004", 0},
2091 {"", 0},
2092};
2093static struct acpi_driver acpi_sba_ioc_driver = {
2094 .name = "IOC IOMMU Driver",
2095 .ids = hp_ioc_iommu_device_ids,
2096 .ops = {
2097 .add = acpi_sba_ioc_add,
2098 },
2099};
2100
2101extern struct dma_map_ops swiotlb_dma_ops;
2102
2103static int __init
2104sba_init(void)
2105{
2106 if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
2107 return 0;
2108
2109#if defined(CONFIG_IA64_GENERIC)
2110 /* If we are booting a kdump kernel, the sba_iommu will
2111 * cause devices that were not shutdown properly to MCA
2112 * as soon as they are turned back on. Our only option for
2113 * a successful kdump kernel boot is to use the swiotlb.
2114 */
2115 if (is_kdump_kernel()) {
2116 dma_ops = &swiotlb_dma_ops;
2117 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2118 panic("Unable to initialize software I/O TLB:"
2119 " Try machvec=dig boot option");
2120 machvec_init("dig");
2121 return 0;
2122 }
2123#endif
2124
2125 acpi_bus_register_driver(&acpi_sba_ioc_driver);
2126 if (!ioc_list) {
2127#ifdef CONFIG_IA64_GENERIC
2128 /*
2129 * If we didn't find something sba_iommu can claim, we
2130 * need to setup the swiotlb and switch to the dig machvec.
2131 */
2132 dma_ops = &swiotlb_dma_ops;
2133 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2134 panic("Unable to find SBA IOMMU or initialize "
2135 "software I/O TLB: Try machvec=dig boot option");
2136 machvec_init("dig");
2137#else
2138 panic("Unable to find SBA IOMMU: Try a generic or DIG kernel");
2139#endif
2140 return 0;
2141 }
2142
2143#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
2144 /*
2145 * hpzx1_swiotlb needs to have a fairly small swiotlb bounce
2146 * buffer setup to support devices with smaller DMA masks than
2147 * sba_iommu can handle.
2148 */
2149 if (ia64_platform_is("hpzx1_swiotlb")) {
2150 extern void hwsw_init(void);
2151
2152 hwsw_init();
2153 }
2154#endif
2155
2156#ifdef CONFIG_PCI
2157 {
2158 struct pci_bus *b = NULL;
2159 while ((b = pci_find_next_bus(b)) != NULL)
2160 sba_connect_bus(b);
2161 }
2162#endif
2163
2164#ifdef CONFIG_PROC_FS
2165 ioc_proc_init();
2166#endif
2167 return 0;
2168}
2169
2170subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
2171
2172static int __init
2173nosbagart(char *str)
2174{
2175 reserve_sba_gart = 0;
2176 return 1;
2177}
2178
2179static int sba_dma_supported (struct device *dev, u64 mask)
2180{
2181 /* make sure it's at least 32bit capable */
2182 return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
2183}
2184
2185static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2186{
2187 return 0;
2188}
2189
2190__setup("nosbagart", nosbagart);
2191
2192static int __init
2193sba_page_override(char *str)
2194{
2195 unsigned long page_size;
2196
2197 page_size = memparse(str, &str);
2198 switch (page_size) {
2199 case 4096:
2200 case 8192:
2201 case 16384:
2202 case 65536:
2203 iovp_shift = ffs(page_size) - 1;
2204 break;
2205 default:
2206 printk("%s: unknown/unsupported iommu page size %ld\n",
2207 __func__, page_size);
2208 }
2209
2210 return 1;
2211}
2212
2213__setup("sbapagesize=",sba_page_override);
2214
2215struct dma_map_ops sba_dma_ops = {
2216 .alloc_coherent = sba_alloc_coherent,
2217 .free_coherent = sba_free_coherent,
2218 .map_page = sba_map_page,
2219 .unmap_page = sba_unmap_page,
2220 .map_sg = sba_map_sg_attrs,
2221 .unmap_sg = sba_unmap_sg_attrs,
2222 .sync_single_for_cpu = machvec_dma_sync_single,
2223 .sync_sg_for_cpu = machvec_dma_sync_sg,
2224 .sync_single_for_device = machvec_dma_sync_single,
2225 .sync_sg_for_device = machvec_dma_sync_sg,
2226 .dma_supported = sba_dma_supported,
2227 .mapping_error = sba_dma_mapping_error,
2228};
2229
2230void sba_dma_init(void)
2231{
2232 dma_ops = &sba_dma_ops;
2233}