Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3** IA64 System Bus Adapter (SBA) I/O MMU manager
4**
5** (c) Copyright 2002-2005 Alex Williamson
6** (c) Copyright 2002-2003 Grant Grundler
7** (c) Copyright 2002-2005 Hewlett-Packard Company
8**
9** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
10** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
11**
12**
13**
14** This module initializes the IOC (I/O Controller) found on HP
15** McKinley machines and their successors.
16**
17*/
18
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/spinlock.h>
23#include <linux/slab.h>
24#include <linux/init.h>
25#include <linux/mm.h>
26#include <linux/string.h>
27#include <linux/pci.h>
28#include <linux/proc_fs.h>
29#include <linux/seq_file.h>
30#include <linux/acpi.h>
31#include <linux/efi.h>
32#include <linux/nodemask.h>
33#include <linux/bitops.h> /* hweight64() */
34#include <linux/crash_dump.h>
35#include <linux/iommu-helper.h>
36#include <linux/dma-mapping.h>
37#include <linux/prefetch.h>
38#include <linux/swiotlb.h>
39
40#include <asm/delay.h> /* ia64_get_itc() */
41#include <asm/io.h>
42#include <asm/page.h> /* PAGE_OFFSET */
43#include <asm/dma.h>
44
45#include <asm/acpi-ext.h>
46
47#define PFX "IOC: "
48
49/*
50** Enabling timing search of the pdir resource map. Output in /proc.
51** Disabled by default to optimize performance.
52*/
53#undef PDIR_SEARCH_TIMING
54
55/*
56** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
57** not defined, all DMA will be 32bit and go through the TLB.
58** There's potentially a conflict in the bio merge code with us
59** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
60** appears to give more performance than bio-level virtual merging, we'll
61** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to
62** completely restrict DMA to the IOMMU.
63*/
64#define ALLOW_IOV_BYPASS
65
66/*
67** This option specifically allows/disallows bypassing scatterlists with
68** multiple entries. Coalescing these entries can allow better DMA streaming
69** and in some cases shows better performance than entirely bypassing the
70** IOMMU. Performance increase on the order of 1-2% sequential output/input
71** using bonnie++ on a RAID0 MD device (sym2 & mpt).
72*/
73#undef ALLOW_IOV_BYPASS_SG
74
75/*
76** If a device prefetches beyond the end of a valid pdir entry, it will cause
77** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
78** disconnect on 4k boundaries and prevent such issues. If the device is
79** particularly aggressive, this option will keep the entire pdir valid such
80** that prefetching will hit a valid address. This could severely impact
81** error containment, and is therefore off by default. The page that is
82** used for spill-over is poisoned, so that should help debugging somewhat.
83*/
84#undef FULL_VALID_PDIR
85
86#define ENABLE_MARK_CLEAN
87
88/*
89** The number of debug flags is a clue - this code is fragile. NOTE: since
90** tightening the use of res_lock the resource bitmap and actual pdir are no
91** longer guaranteed to stay in sync. The sanity checking code isn't going to
92** like that.
93*/
94#undef DEBUG_SBA_INIT
95#undef DEBUG_SBA_RUN
96#undef DEBUG_SBA_RUN_SG
97#undef DEBUG_SBA_RESOURCE
98#undef ASSERT_PDIR_SANITY
99#undef DEBUG_LARGE_SG_ENTRIES
100#undef DEBUG_BYPASS
101
102#if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
103#error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
104#endif
105
106#define SBA_INLINE __inline__
107/* #define SBA_INLINE */
108
109#ifdef DEBUG_SBA_INIT
110#define DBG_INIT(x...) printk(x)
111#else
112#define DBG_INIT(x...)
113#endif
114
115#ifdef DEBUG_SBA_RUN
116#define DBG_RUN(x...) printk(x)
117#else
118#define DBG_RUN(x...)
119#endif
120
121#ifdef DEBUG_SBA_RUN_SG
122#define DBG_RUN_SG(x...) printk(x)
123#else
124#define DBG_RUN_SG(x...)
125#endif
126
127
128#ifdef DEBUG_SBA_RESOURCE
129#define DBG_RES(x...) printk(x)
130#else
131#define DBG_RES(x...)
132#endif
133
134#ifdef DEBUG_BYPASS
135#define DBG_BYPASS(x...) printk(x)
136#else
137#define DBG_BYPASS(x...)
138#endif
139
140#ifdef ASSERT_PDIR_SANITY
141#define ASSERT(expr) \
142 if(!(expr)) { \
143 printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
144 panic(#expr); \
145 }
146#else
147#define ASSERT(expr)
148#endif
149
150/*
151** The number of pdir entries to "free" before issuing
152** a read to PCOM register to flush out PCOM writes.
153** Interacts with allocation granularity (ie 4 or 8 entries
154** allocated and free'd/purged at a time might make this
155** less interesting).
156*/
157#define DELAYED_RESOURCE_CNT 64
158
159#define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec
160
161#define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
162#define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
163#define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
164#define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
165#define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
166
167#define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
168
169#define IOC_FUNC_ID 0x000
170#define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
171#define IOC_IBASE 0x300 /* IO TLB */
172#define IOC_IMASK 0x308
173#define IOC_PCOM 0x310
174#define IOC_TCNFG 0x318
175#define IOC_PDIR_BASE 0x320
176
177#define IOC_ROPE0_CFG 0x500
178#define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
179
180
181/* AGP GART driver looks for this */
182#define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
183
184/*
185** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
186**
187** Some IOCs (sx1000) can run at the above pages sizes, but are
188** really only supported using the IOC at a 4k page size.
189**
190** iovp_size could only be greater than PAGE_SIZE if we are
191** confident the drivers really only touch the next physical
192** page iff that driver instance owns it.
193*/
194static unsigned long iovp_size;
195static unsigned long iovp_shift;
196static unsigned long iovp_mask;
197
198struct ioc {
199 void __iomem *ioc_hpa; /* I/O MMU base address */
200 char *res_map; /* resource map, bit == pdir entry */
201 u64 *pdir_base; /* physical base address */
202 unsigned long ibase; /* pdir IOV Space base */
203 unsigned long imask; /* pdir IOV Space mask */
204
205 unsigned long *res_hint; /* next avail IOVP - circular search */
206 unsigned long dma_mask;
207 spinlock_t res_lock; /* protects the resource bitmap, but must be held when */
208 /* clearing pdir to prevent races with allocations. */
209 unsigned int res_bitshift; /* from the RIGHT! */
210 unsigned int res_size; /* size of resource map in bytes */
211#ifdef CONFIG_NUMA
212 unsigned int node; /* node where this IOC lives */
213#endif
214#if DELAYED_RESOURCE_CNT > 0
215 spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */
216 /* than res_lock for bigger systems. */
217 int saved_cnt;
218 struct sba_dma_pair {
219 dma_addr_t iova;
220 size_t size;
221 } saved[DELAYED_RESOURCE_CNT];
222#endif
223
224#ifdef PDIR_SEARCH_TIMING
225#define SBA_SEARCH_SAMPLE 0x100
226 unsigned long avg_search[SBA_SEARCH_SAMPLE];
227 unsigned long avg_idx; /* current index into avg_search */
228#endif
229
230 /* Stuff we don't need in performance path */
231 struct ioc *next; /* list of IOC's in system */
232 acpi_handle handle; /* for multiple IOC's */
233 const char *name;
234 unsigned int func_id;
235 unsigned int rev; /* HW revision of chip */
236 u32 iov_size;
237 unsigned int pdir_size; /* in bytes, determined by IOV Space size */
238 struct pci_dev *sac_only_dev;
239};
240
241static struct ioc *ioc_list, *ioc_found;
242static int reserve_sba_gart = 1;
243
244static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
245static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
246
247#define sba_sg_address(sg) sg_virt((sg))
248
249#ifdef FULL_VALID_PDIR
250static u64 prefetch_spill_page;
251#endif
252
253#define GET_IOC(dev) ((dev_is_pci(dev)) \
254 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
255
256/*
257** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
258** (or rather not merge) DMAs into manageable chunks.
259** On parisc, this is more of the software/tuning constraint
260** rather than the HW. I/O MMU allocation algorithms can be
261** faster with smaller sizes (to some degree).
262*/
263#define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
264
265#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
266
267/************************************
268** SBA register read and write support
269**
270** BE WARNED: register writes are posted.
271** (ie follow writes which must reach HW with a read)
272**
273*/
274#define READ_REG(addr) __raw_readq(addr)
275#define WRITE_REG(val, addr) __raw_writeq(val, addr)
276
277#ifdef DEBUG_SBA_INIT
278
279/**
280 * sba_dump_tlb - debugging only - print IOMMU operating parameters
281 * @hpa: base address of the IOMMU
282 *
283 * Print the size/location of the IO MMU PDIR.
284 */
285static void
286sba_dump_tlb(char *hpa)
287{
288 DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
289 DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE));
290 DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK));
291 DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG));
292 DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
293 DBG_INIT("\n");
294}
295#endif
296
297
298#ifdef ASSERT_PDIR_SANITY
299
300/**
301 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
302 * @ioc: IO MMU structure which owns the pdir we are interested in.
303 * @msg: text to print ont the output line.
304 * @pide: pdir index.
305 *
306 * Print one entry of the IO MMU PDIR in human readable form.
307 */
308static void
309sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
310{
311 /* start printing from lowest pde in rval */
312 u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)];
313 unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
314 uint rcnt;
315
316 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
317 msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
318
319 rcnt = 0;
320 while (rcnt < BITS_PER_LONG) {
321 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
322 (rcnt == (pide & (BITS_PER_LONG - 1)))
323 ? " -->" : " ",
324 rcnt, ptr, (unsigned long long) *ptr );
325 rcnt++;
326 ptr++;
327 }
328 printk(KERN_DEBUG "%s", msg);
329}
330
331
332/**
333 * sba_check_pdir - debugging only - consistency checker
334 * @ioc: IO MMU structure which owns the pdir we are interested in.
335 * @msg: text to print ont the output line.
336 *
337 * Verify the resource map and pdir state is consistent
338 */
339static int
340sba_check_pdir(struct ioc *ioc, char *msg)
341{
342 u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
343 u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */
344 u64 *pptr = ioc->pdir_base; /* pdir ptr */
345 uint pide = 0;
346
347 while (rptr < rptr_end) {
348 u64 rval;
349 int rcnt; /* number of bits we might check */
350
351 rval = *rptr;
352 rcnt = 64;
353
354 while (rcnt) {
355 /* Get last byte and highest bit from that */
356 u32 pde = ((u32)((*pptr >> (63)) & 0x1));
357 if ((rval & 0x1) ^ pde)
358 {
359 /*
360 ** BUMMER! -- res_map != pdir --
361 ** Dump rval and matching pdir entries
362 */
363 sba_dump_pdir_entry(ioc, msg, pide);
364 return(1);
365 }
366 rcnt--;
367 rval >>= 1; /* try the next bit */
368 pptr++;
369 pide++;
370 }
371 rptr++; /* look at next word of res_map */
372 }
373 /* It'd be nice if we always got here :^) */
374 return 0;
375}
376
377
378/**
379 * sba_dump_sg - debugging only - print Scatter-Gather list
380 * @ioc: IO MMU structure which owns the pdir we are interested in.
381 * @startsg: head of the SG list
382 * @nents: number of entries in SG list
383 *
384 * print the SG list so we can verify it's correct by hand.
385 */
386static void
387sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
388{
389 while (nents-- > 0) {
390 printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
391 startsg->dma_address, startsg->dma_length,
392 sba_sg_address(startsg));
393 startsg = sg_next(startsg);
394 }
395}
396
397static void
398sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
399{
400 struct scatterlist *the_sg = startsg;
401 int the_nents = nents;
402
403 while (the_nents-- > 0) {
404 if (sba_sg_address(the_sg) == 0x0UL)
405 sba_dump_sg(NULL, startsg, nents);
406 the_sg = sg_next(the_sg);
407 }
408}
409
410#endif /* ASSERT_PDIR_SANITY */
411
412
413
414
415/**************************************************************
416*
417* I/O Pdir Resource Management
418*
419* Bits set in the resource map are in use.
420* Each bit can represent a number of pages.
421* LSbs represent lower addresses (IOVA's).
422*
423***************************************************************/
424#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
425
426/* Convert from IOVP to IOVA and vice versa. */
427#define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
428#define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
429
430#define PDIR_ENTRY_SIZE sizeof(u64)
431
432#define PDIR_INDEX(iovp) ((iovp)>>iovp_shift)
433
434#define RESMAP_MASK(n) ~(~0UL << (n))
435#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
436
437
438/**
439 * For most cases the normal get_order is sufficient, however it limits us
440 * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
441 * It only incurs about 1 clock cycle to use this one with the static variable
442 * and makes the code more intuitive.
443 */
444static SBA_INLINE int
445get_iovp_order (unsigned long size)
446{
447 long double d = size - 1;
448 long order;
449
450 order = ia64_getf_exp(d);
451 order = order - iovp_shift - 0xffff + 1;
452 if (order < 0)
453 order = 0;
454 return order;
455}
456
457static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
458 unsigned int bitshiftcnt)
459{
460 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
461 + bitshiftcnt;
462}
463
464/**
465 * sba_search_bitmap - find free space in IO PDIR resource bitmap
466 * @ioc: IO MMU structure which owns the pdir we are interested in.
467 * @bits_wanted: number of entries we need.
468 * @use_hint: use res_hint to indicate where to start looking
469 *
470 * Find consecutive free bits in resource bitmap.
471 * Each bit represents one entry in the IO Pdir.
472 * Cool perf optimization: search for log2(size) bits at a time.
473 */
474static SBA_INLINE unsigned long
475sba_search_bitmap(struct ioc *ioc, struct device *dev,
476 unsigned long bits_wanted, int use_hint)
477{
478 unsigned long *res_ptr;
479 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
480 unsigned long flags, pide = ~0UL, tpide;
481 unsigned long boundary_size;
482 unsigned long shift;
483 int ret;
484
485 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
486 ASSERT(res_ptr < res_end);
487
488 boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1;
489 boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;
490
491 BUG_ON(ioc->ibase & ~iovp_mask);
492 shift = ioc->ibase >> iovp_shift;
493
494 spin_lock_irqsave(&ioc->res_lock, flags);
495
496 /* Allow caller to force a search through the entire resource space */
497 if (likely(use_hint)) {
498 res_ptr = ioc->res_hint;
499 } else {
500 res_ptr = (ulong *)ioc->res_map;
501 ioc->res_bitshift = 0;
502 }
503
504 /*
505 * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
506 * if a TLB entry is purged while in use. sba_mark_invalid()
507 * purges IOTLB entries in power-of-two sizes, so we also
508 * allocate IOVA space in power-of-two sizes.
509 */
510 bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift);
511
512 if (likely(bits_wanted == 1)) {
513 unsigned int bitshiftcnt;
514 for(; res_ptr < res_end ; res_ptr++) {
515 if (likely(*res_ptr != ~0UL)) {
516 bitshiftcnt = ffz(*res_ptr);
517 *res_ptr |= (1UL << bitshiftcnt);
518 pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
519 ioc->res_bitshift = bitshiftcnt + bits_wanted;
520 goto found_it;
521 }
522 }
523 goto not_found;
524
525 }
526
527 if (likely(bits_wanted <= BITS_PER_LONG/2)) {
528 /*
529 ** Search the resource bit map on well-aligned values.
530 ** "o" is the alignment.
531 ** We need the alignment to invalidate I/O TLB using
532 ** SBA HW features in the unmap path.
533 */
534 unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
535 uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
536 unsigned long mask, base_mask;
537
538 base_mask = RESMAP_MASK(bits_wanted);
539 mask = base_mask << bitshiftcnt;
540
541 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
542 for(; res_ptr < res_end ; res_ptr++)
543 {
544 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
545 ASSERT(0 != mask);
546 for (; mask ; mask <<= o, bitshiftcnt += o) {
547 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
548 ret = iommu_is_span_boundary(tpide, bits_wanted,
549 shift,
550 boundary_size);
551 if ((0 == ((*res_ptr) & mask)) && !ret) {
552 *res_ptr |= mask; /* mark resources busy! */
553 pide = tpide;
554 ioc->res_bitshift = bitshiftcnt + bits_wanted;
555 goto found_it;
556 }
557 }
558
559 bitshiftcnt = 0;
560 mask = base_mask;
561
562 }
563
564 } else {
565 int qwords, bits, i;
566 unsigned long *end;
567
568 qwords = bits_wanted >> 6; /* /64 */
569 bits = bits_wanted - (qwords * BITS_PER_LONG);
570
571 end = res_end - qwords;
572
573 for (; res_ptr < end; res_ptr++) {
574 tpide = ptr_to_pide(ioc, res_ptr, 0);
575 ret = iommu_is_span_boundary(tpide, bits_wanted,
576 shift, boundary_size);
577 if (ret)
578 goto next_ptr;
579 for (i = 0 ; i < qwords ; i++) {
580 if (res_ptr[i] != 0)
581 goto next_ptr;
582 }
583 if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits))
584 continue;
585
586 /* Found it, mark it */
587 for (i = 0 ; i < qwords ; i++)
588 res_ptr[i] = ~0UL;
589 res_ptr[i] |= RESMAP_MASK(bits);
590
591 pide = tpide;
592 res_ptr += qwords;
593 ioc->res_bitshift = bits;
594 goto found_it;
595next_ptr:
596 ;
597 }
598 }
599
600not_found:
601 prefetch(ioc->res_map);
602 ioc->res_hint = (unsigned long *) ioc->res_map;
603 ioc->res_bitshift = 0;
604 spin_unlock_irqrestore(&ioc->res_lock, flags);
605 return (pide);
606
607found_it:
608 ioc->res_hint = res_ptr;
609 spin_unlock_irqrestore(&ioc->res_lock, flags);
610 return (pide);
611}
612
613
614/**
615 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
616 * @ioc: IO MMU structure which owns the pdir we are interested in.
617 * @size: number of bytes to create a mapping for
618 *
619 * Given a size, find consecutive unmarked and then mark those bits in the
620 * resource bit map.
621 */
622static int
623sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
624{
625 unsigned int pages_needed = size >> iovp_shift;
626#ifdef PDIR_SEARCH_TIMING
627 unsigned long itc_start;
628#endif
629 unsigned long pide;
630
631 ASSERT(pages_needed);
632 ASSERT(0 == (size & ~iovp_mask));
633
634#ifdef PDIR_SEARCH_TIMING
635 itc_start = ia64_get_itc();
636#endif
637 /*
638 ** "seek and ye shall find"...praying never hurts either...
639 */
640 pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
641 if (unlikely(pide >= (ioc->res_size << 3))) {
642 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
643 if (unlikely(pide >= (ioc->res_size << 3))) {
644#if DELAYED_RESOURCE_CNT > 0
645 unsigned long flags;
646
647 /*
648 ** With delayed resource freeing, we can give this one more shot. We're
649 ** getting close to being in trouble here, so do what we can to make this
650 ** one count.
651 */
652 spin_lock_irqsave(&ioc->saved_lock, flags);
653 if (ioc->saved_cnt > 0) {
654 struct sba_dma_pair *d;
655 int cnt = ioc->saved_cnt;
656
657 d = &(ioc->saved[ioc->saved_cnt - 1]);
658
659 spin_lock(&ioc->res_lock);
660 while (cnt--) {
661 sba_mark_invalid(ioc, d->iova, d->size);
662 sba_free_range(ioc, d->iova, d->size);
663 d--;
664 }
665 ioc->saved_cnt = 0;
666 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
667 spin_unlock(&ioc->res_lock);
668 }
669 spin_unlock_irqrestore(&ioc->saved_lock, flags);
670
671 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
672 if (unlikely(pide >= (ioc->res_size << 3))) {
673 printk(KERN_WARNING "%s: I/O MMU @ %p is"
674 "out of mapping resources, %u %u %lx\n",
675 __func__, ioc->ioc_hpa, ioc->res_size,
676 pages_needed, dma_get_seg_boundary(dev));
677 return -1;
678 }
679#else
680 printk(KERN_WARNING "%s: I/O MMU @ %p is"
681 "out of mapping resources, %u %u %lx\n",
682 __func__, ioc->ioc_hpa, ioc->res_size,
683 pages_needed, dma_get_seg_boundary(dev));
684 return -1;
685#endif
686 }
687 }
688
689#ifdef PDIR_SEARCH_TIMING
690 ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
691 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
692#endif
693
694 prefetchw(&(ioc->pdir_base[pide]));
695
696#ifdef ASSERT_PDIR_SANITY
697 /* verify the first enable bit is clear */
698 if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
699 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
700 }
701#endif
702
703 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
704 __func__, size, pages_needed, pide,
705 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
706 ioc->res_bitshift );
707
708 return (pide);
709}
710
711
712/**
713 * sba_free_range - unmark bits in IO PDIR resource bitmap
714 * @ioc: IO MMU structure which owns the pdir we are interested in.
715 * @iova: IO virtual address which was previously allocated.
716 * @size: number of bytes to create a mapping for
717 *
718 * clear bits in the ioc's resource map
719 */
720static SBA_INLINE void
721sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
722{
723 unsigned long iovp = SBA_IOVP(ioc, iova);
724 unsigned int pide = PDIR_INDEX(iovp);
725 unsigned int ridx = pide >> 3; /* convert bit to byte address */
726 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
727 int bits_not_wanted = size >> iovp_shift;
728 unsigned long m;
729
730 /* Round up to power-of-two size: see AR2305 note above */
731 bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift);
732 for (; bits_not_wanted > 0 ; res_ptr++) {
733
734 if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
735
736 /* these mappings start 64bit aligned */
737 *res_ptr = 0UL;
738 bits_not_wanted -= BITS_PER_LONG;
739 pide += BITS_PER_LONG;
740
741 } else {
742
743 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
744 m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
745 bits_not_wanted = 0;
746
747 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size,
748 bits_not_wanted, m, pide, res_ptr, *res_ptr);
749
750 ASSERT(m != 0);
751 ASSERT(bits_not_wanted);
752 ASSERT((*res_ptr & m) == m); /* verify same bits are set */
753 *res_ptr &= ~m;
754 }
755 }
756}
757
758
759/**************************************************************
760*
761* "Dynamic DMA Mapping" support (aka "Coherent I/O")
762*
763***************************************************************/
764
765/**
766 * sba_io_pdir_entry - fill in one IO PDIR entry
767 * @pdir_ptr: pointer to IO PDIR entry
768 * @vba: Virtual CPU address of buffer to map
769 *
770 * SBA Mapping Routine
771 *
772 * Given a virtual address (vba, arg1) sba_io_pdir_entry()
773 * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
774 * Each IO Pdir entry consists of 8 bytes as shown below
775 * (LSB == bit 0):
776 *
777 * 63 40 11 7 0
778 * +-+---------------------+----------------------------------+----+--------+
779 * |V| U | PPN[39:12] | U | FF |
780 * +-+---------------------+----------------------------------+----+--------+
781 *
782 * V == Valid Bit
783 * U == Unused
784 * PPN == Physical Page Number
785 *
786 * The physical address fields are filled with the results of virt_to_phys()
787 * on the vba.
788 */
789
790#if 1
791#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
792 | 0x8000000000000000ULL)
793#else
794void SBA_INLINE
795sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
796{
797 *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
798}
799#endif
800
801#ifdef ENABLE_MARK_CLEAN
802/**
803 * Since DMA is i-cache coherent, any (complete) pages that were written via
804 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
805 * flush them when they get mapped into an executable vm-area.
806 */
807static void
808mark_clean (void *addr, size_t size)
809{
810 unsigned long pg_addr, end;
811
812 pg_addr = PAGE_ALIGN((unsigned long) addr);
813 end = (unsigned long) addr + size;
814 while (pg_addr + PAGE_SIZE <= end) {
815 struct page *page = virt_to_page((void *)pg_addr);
816 set_bit(PG_arch_1, &page->flags);
817 pg_addr += PAGE_SIZE;
818 }
819}
820#endif
821
822/**
823 * sba_mark_invalid - invalidate one or more IO PDIR entries
824 * @ioc: IO MMU structure which owns the pdir we are interested in.
825 * @iova: IO Virtual Address mapped earlier
826 * @byte_cnt: number of bytes this mapping covers.
827 *
828 * Marking the IO PDIR entry(ies) as Invalid and invalidate
829 * corresponding IO TLB entry. The PCOM (Purge Command Register)
830 * is to purge stale entries in the IO TLB when unmapping entries.
831 *
832 * The PCOM register supports purging of multiple pages, with a minium
833 * of 1 page and a maximum of 2GB. Hardware requires the address be
834 * aligned to the size of the range being purged. The size of the range
835 * must be a power of 2. The "Cool perf optimization" in the
836 * allocation routine helps keep that true.
837 */
838static SBA_INLINE void
839sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
840{
841 u32 iovp = (u32) SBA_IOVP(ioc,iova);
842
843 int off = PDIR_INDEX(iovp);
844
845 /* Must be non-zero and rounded up */
846 ASSERT(byte_cnt > 0);
847 ASSERT(0 == (byte_cnt & ~iovp_mask));
848
849#ifdef ASSERT_PDIR_SANITY
850 /* Assert first pdir entry is set */
851 if (!(ioc->pdir_base[off] >> 60)) {
852 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
853 }
854#endif
855
856 if (byte_cnt <= iovp_size)
857 {
858 ASSERT(off < ioc->pdir_size);
859
860 iovp |= iovp_shift; /* set "size" field for PCOM */
861
862#ifndef FULL_VALID_PDIR
863 /*
864 ** clear I/O PDIR entry "valid" bit
865 ** Do NOT clear the rest - save it for debugging.
866 ** We should only clear bits that have previously
867 ** been enabled.
868 */
869 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
870#else
871 /*
872 ** If we want to maintain the PDIR as valid, put in
873 ** the spill page so devices prefetching won't
874 ** cause a hard fail.
875 */
876 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
877#endif
878 } else {
879 u32 t = get_iovp_order(byte_cnt) + iovp_shift;
880
881 iovp |= t;
882 ASSERT(t <= 31); /* 2GB! Max value of "size" field */
883
884 do {
885 /* verify this pdir entry is enabled */
886 ASSERT(ioc->pdir_base[off] >> 63);
887#ifndef FULL_VALID_PDIR
888 /* clear I/O Pdir entry "valid" bit first */
889 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
890#else
891 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
892#endif
893 off++;
894 byte_cnt -= iovp_size;
895 } while (byte_cnt > 0);
896 }
897
898 WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
899}
900
901/**
902 * sba_map_page - map one buffer and return IOVA for DMA
903 * @dev: instance of PCI owned by the driver that's asking.
904 * @page: page to map
905 * @poff: offset into page
906 * @size: number of bytes to map
907 * @dir: dma direction
908 * @attrs: optional dma attributes
909 *
910 * See Documentation/DMA-API-HOWTO.txt
911 */
912static dma_addr_t sba_map_page(struct device *dev, struct page *page,
913 unsigned long poff, size_t size,
914 enum dma_data_direction dir,
915 unsigned long attrs)
916{
917 struct ioc *ioc;
918 void *addr = page_address(page) + poff;
919 dma_addr_t iovp;
920 dma_addr_t offset;
921 u64 *pdir_start;
922 int pide;
923#ifdef ASSERT_PDIR_SANITY
924 unsigned long flags;
925#endif
926#ifdef ALLOW_IOV_BYPASS
927 unsigned long pci_addr = virt_to_phys(addr);
928#endif
929
930#ifdef ALLOW_IOV_BYPASS
931 ASSERT(to_pci_dev(dev)->dma_mask);
932 /*
933 ** Check if the PCI device can DMA to ptr... if so, just return ptr
934 */
935 if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
936 /*
937 ** Device is bit capable of DMA'ing to the buffer...
938 ** just return the PCI address of ptr
939 */
940 DBG_BYPASS("sba_map_page() bypass mask/addr: "
941 "0x%lx/0x%lx\n",
942 to_pci_dev(dev)->dma_mask, pci_addr);
943 return pci_addr;
944 }
945#endif
946 ioc = GET_IOC(dev);
947 ASSERT(ioc);
948
949 prefetch(ioc->res_hint);
950
951 ASSERT(size > 0);
952 ASSERT(size <= DMA_CHUNK_SIZE);
953
954 /* save offset bits */
955 offset = ((dma_addr_t) (long) addr) & ~iovp_mask;
956
957 /* round up to nearest iovp_size */
958 size = (size + offset + ~iovp_mask) & iovp_mask;
959
960#ifdef ASSERT_PDIR_SANITY
961 spin_lock_irqsave(&ioc->res_lock, flags);
962 if (sba_check_pdir(ioc,"Check before sba_map_page()"))
963 panic("Sanity check failed");
964 spin_unlock_irqrestore(&ioc->res_lock, flags);
965#endif
966
967 pide = sba_alloc_range(ioc, dev, size);
968 if (pide < 0)
969 return DMA_MAPPING_ERROR;
970
971 iovp = (dma_addr_t) pide << iovp_shift;
972
973 DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset);
974
975 pdir_start = &(ioc->pdir_base[pide]);
976
977 while (size > 0) {
978 ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
979 sba_io_pdir_entry(pdir_start, (unsigned long) addr);
980
981 DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
982
983 addr += iovp_size;
984 size -= iovp_size;
985 pdir_start++;
986 }
987 /* force pdir update */
988 wmb();
989
990 /* form complete address */
991#ifdef ASSERT_PDIR_SANITY
992 spin_lock_irqsave(&ioc->res_lock, flags);
993 sba_check_pdir(ioc,"Check after sba_map_page()");
994 spin_unlock_irqrestore(&ioc->res_lock, flags);
995#endif
996 return SBA_IOVA(ioc, iovp, offset);
997}
998
999#ifdef ENABLE_MARK_CLEAN
1000static SBA_INLINE void
1001sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
1002{
1003 u32 iovp = (u32) SBA_IOVP(ioc,iova);
1004 int off = PDIR_INDEX(iovp);
1005 void *addr;
1006
1007 if (size <= iovp_size) {
1008 addr = phys_to_virt(ioc->pdir_base[off] &
1009 ~0xE000000000000FFFULL);
1010 mark_clean(addr, size);
1011 } else {
1012 do {
1013 addr = phys_to_virt(ioc->pdir_base[off] &
1014 ~0xE000000000000FFFULL);
1015 mark_clean(addr, min(size, iovp_size));
1016 off++;
1017 size -= iovp_size;
1018 } while (size > 0);
1019 }
1020}
1021#endif
1022
1023/**
1024 * sba_unmap_page - unmap one IOVA and free resources
1025 * @dev: instance of PCI owned by the driver that's asking.
1026 * @iova: IOVA of driver buffer previously mapped.
1027 * @size: number of bytes mapped in driver buffer.
1028 * @dir: R/W or both.
1029 * @attrs: optional dma attributes
1030 *
1031 * See Documentation/DMA-API-HOWTO.txt
1032 */
1033static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
1034 enum dma_data_direction dir, unsigned long attrs)
1035{
1036 struct ioc *ioc;
1037#if DELAYED_RESOURCE_CNT > 0
1038 struct sba_dma_pair *d;
1039#endif
1040 unsigned long flags;
1041 dma_addr_t offset;
1042
1043 ioc = GET_IOC(dev);
1044 ASSERT(ioc);
1045
1046#ifdef ALLOW_IOV_BYPASS
1047 if (likely((iova & ioc->imask) != ioc->ibase)) {
1048 /*
1049 ** Address does not fall w/in IOVA, must be bypassing
1050 */
1051 DBG_BYPASS("sba_unmap_page() bypass addr: 0x%lx\n",
1052 iova);
1053
1054#ifdef ENABLE_MARK_CLEAN
1055 if (dir == DMA_FROM_DEVICE) {
1056 mark_clean(phys_to_virt(iova), size);
1057 }
1058#endif
1059 return;
1060 }
1061#endif
1062 offset = iova & ~iovp_mask;
1063
1064 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
1065
1066 iova ^= offset; /* clear offset bits */
1067 size += offset;
1068 size = ROUNDUP(size, iovp_size);
1069
1070#ifdef ENABLE_MARK_CLEAN
1071 if (dir == DMA_FROM_DEVICE)
1072 sba_mark_clean(ioc, iova, size);
1073#endif
1074
1075#if DELAYED_RESOURCE_CNT > 0
1076 spin_lock_irqsave(&ioc->saved_lock, flags);
1077 d = &(ioc->saved[ioc->saved_cnt]);
1078 d->iova = iova;
1079 d->size = size;
1080 if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
1081 int cnt = ioc->saved_cnt;
1082 spin_lock(&ioc->res_lock);
1083 while (cnt--) {
1084 sba_mark_invalid(ioc, d->iova, d->size);
1085 sba_free_range(ioc, d->iova, d->size);
1086 d--;
1087 }
1088 ioc->saved_cnt = 0;
1089 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1090 spin_unlock(&ioc->res_lock);
1091 }
1092 spin_unlock_irqrestore(&ioc->saved_lock, flags);
1093#else /* DELAYED_RESOURCE_CNT == 0 */
1094 spin_lock_irqsave(&ioc->res_lock, flags);
1095 sba_mark_invalid(ioc, iova, size);
1096 sba_free_range(ioc, iova, size);
1097 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1098 spin_unlock_irqrestore(&ioc->res_lock, flags);
1099#endif /* DELAYED_RESOURCE_CNT == 0 */
1100}
1101
1102/**
1103 * sba_alloc_coherent - allocate/map shared mem for DMA
1104 * @dev: instance of PCI owned by the driver that's asking.
1105 * @size: number of bytes mapped in driver buffer.
1106 * @dma_handle: IOVA of new buffer.
1107 *
1108 * See Documentation/DMA-API-HOWTO.txt
1109 */
1110static void *
1111sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
1112 gfp_t flags, unsigned long attrs)
1113{
1114 struct page *page;
1115 struct ioc *ioc;
1116 int node = -1;
1117 void *addr;
1118
1119 ioc = GET_IOC(dev);
1120 ASSERT(ioc);
1121#ifdef CONFIG_NUMA
1122 node = ioc->node;
1123#endif
1124
1125 page = alloc_pages_node(node, flags, get_order(size));
1126 if (unlikely(!page))
1127 return NULL;
1128
1129 addr = page_address(page);
1130 memset(addr, 0, size);
1131 *dma_handle = page_to_phys(page);
1132
1133#ifdef ALLOW_IOV_BYPASS
1134 ASSERT(dev->coherent_dma_mask);
1135 /*
1136 ** Check if the PCI device can DMA to ptr... if so, just return ptr
1137 */
1138 if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {
1139 DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
1140 dev->coherent_dma_mask, *dma_handle);
1141
1142 return addr;
1143 }
1144#endif
1145
1146 /*
1147 * If device can't bypass or bypass is disabled, pass the 32bit fake
1148 * device to map single to get an iova mapping.
1149 */
1150 *dma_handle = sba_map_page(&ioc->sac_only_dev->dev, page, 0, size,
1151 DMA_BIDIRECTIONAL, 0);
1152 if (dma_mapping_error(dev, *dma_handle))
1153 return NULL;
1154 return addr;
1155}
1156
1157
1158/**
1159 * sba_free_coherent - free/unmap shared mem for DMA
1160 * @dev: instance of PCI owned by the driver that's asking.
1161 * @size: number of bytes mapped in driver buffer.
1162 * @vaddr: virtual address IOVA of "consistent" buffer.
1163 * @dma_handler: IO virtual address of "consistent" buffer.
1164 *
1165 * See Documentation/DMA-API-HOWTO.txt
1166 */
1167static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
1168 dma_addr_t dma_handle, unsigned long attrs)
1169{
1170 sba_unmap_page(dev, dma_handle, size, 0, 0);
1171 free_pages((unsigned long) vaddr, get_order(size));
1172}
1173
1174
1175/*
1176** Since 0 is a valid pdir_base index value, can't use that
1177** to determine if a value is valid or not. Use a flag to indicate
1178** the SG list entry contains a valid pdir index.
1179*/
1180#define PIDE_FLAG 0x1UL
1181
1182#ifdef DEBUG_LARGE_SG_ENTRIES
1183int dump_run_sg = 0;
1184#endif
1185
1186
1187/**
1188 * sba_fill_pdir - write allocated SG entries into IO PDIR
1189 * @ioc: IO MMU structure which owns the pdir we are interested in.
1190 * @startsg: list of IOVA/size pairs
1191 * @nents: number of entries in startsg list
1192 *
1193 * Take preprocessed SG list and write corresponding entries
1194 * in the IO PDIR.
1195 */
1196
1197static SBA_INLINE int
1198sba_fill_pdir(
1199 struct ioc *ioc,
1200 struct scatterlist *startsg,
1201 int nents)
1202{
1203 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
1204 int n_mappings = 0;
1205 u64 *pdirp = NULL;
1206 unsigned long dma_offset = 0;
1207
1208 while (nents-- > 0) {
1209 int cnt = startsg->dma_length;
1210 startsg->dma_length = 0;
1211
1212#ifdef DEBUG_LARGE_SG_ENTRIES
1213 if (dump_run_sg)
1214 printk(" %2d : %08lx/%05x %p\n",
1215 nents, startsg->dma_address, cnt,
1216 sba_sg_address(startsg));
1217#else
1218 DBG_RUN_SG(" %d : %08lx/%05x %p\n",
1219 nents, startsg->dma_address, cnt,
1220 sba_sg_address(startsg));
1221#endif
1222 /*
1223 ** Look for the start of a new DMA stream
1224 */
1225 if (startsg->dma_address & PIDE_FLAG) {
1226 u32 pide = startsg->dma_address & ~PIDE_FLAG;
1227 dma_offset = (unsigned long) pide & ~iovp_mask;
1228 startsg->dma_address = 0;
1229 if (n_mappings)
1230 dma_sg = sg_next(dma_sg);
1231 dma_sg->dma_address = pide | ioc->ibase;
1232 pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
1233 n_mappings++;
1234 }
1235
1236 /*
1237 ** Look for a VCONTIG chunk
1238 */
1239 if (cnt) {
1240 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1241 ASSERT(pdirp);
1242
1243 /* Since multiple Vcontig blocks could make up
1244 ** one DMA stream, *add* cnt to dma_len.
1245 */
1246 dma_sg->dma_length += cnt;
1247 cnt += dma_offset;
1248 dma_offset=0; /* only want offset on first chunk */
1249 cnt = ROUNDUP(cnt, iovp_size);
1250 do {
1251 sba_io_pdir_entry(pdirp, vaddr);
1252 vaddr += iovp_size;
1253 cnt -= iovp_size;
1254 pdirp++;
1255 } while (cnt > 0);
1256 }
1257 startsg = sg_next(startsg);
1258 }
1259 /* force pdir update */
1260 wmb();
1261
1262#ifdef DEBUG_LARGE_SG_ENTRIES
1263 dump_run_sg = 0;
1264#endif
1265 return(n_mappings);
1266}
1267
1268
1269/*
1270** Two address ranges are DMA contiguous *iff* "end of prev" and
1271** "start of next" are both on an IOV page boundary.
1272**
1273** (shift left is a quick trick to mask off upper bits)
1274*/
1275#define DMA_CONTIG(__X, __Y) \
1276 (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
1277
1278
1279/**
1280 * sba_coalesce_chunks - preprocess the SG list
1281 * @ioc: IO MMU structure which owns the pdir we are interested in.
1282 * @startsg: list of IOVA/size pairs
1283 * @nents: number of entries in startsg list
1284 *
1285 * First pass is to walk the SG list and determine where the breaks are
1286 * in the DMA stream. Allocates PDIR entries but does not fill them.
1287 * Returns the number of DMA chunks.
1288 *
1289 * Doing the fill separate from the coalescing/allocation keeps the
1290 * code simpler. Future enhancement could make one pass through
1291 * the sglist do both.
1292 */
1293static SBA_INLINE int
1294sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1295 struct scatterlist *startsg,
1296 int nents)
1297{
1298 struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
1299 unsigned long vcontig_len; /* len of VCONTIG chunk */
1300 unsigned long vcontig_end;
1301 struct scatterlist *dma_sg; /* next DMA stream head */
1302 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
1303 int n_mappings = 0;
1304 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1305 int idx;
1306
1307 while (nents > 0) {
1308 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1309
1310 /*
1311 ** Prepare for first/next DMA stream
1312 */
1313 dma_sg = vcontig_sg = startsg;
1314 dma_len = vcontig_len = vcontig_end = startsg->length;
1315 vcontig_end += vaddr;
1316 dma_offset = vaddr & ~iovp_mask;
1317
1318 /* PARANOID: clear entries */
1319 startsg->dma_address = startsg->dma_length = 0;
1320
1321 /*
1322 ** This loop terminates one iteration "early" since
1323 ** it's always looking one "ahead".
1324 */
1325 while (--nents > 0) {
1326 unsigned long vaddr; /* tmp */
1327
1328 startsg = sg_next(startsg);
1329
1330 /* PARANOID */
1331 startsg->dma_address = startsg->dma_length = 0;
1332
1333 /* catch brokenness in SCSI layer */
1334 ASSERT(startsg->length <= DMA_CHUNK_SIZE);
1335
1336 /*
1337 ** First make sure current dma stream won't
1338 ** exceed DMA_CHUNK_SIZE if we coalesce the
1339 ** next entry.
1340 */
1341 if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)
1342 > DMA_CHUNK_SIZE)
1343 break;
1344
1345 if (dma_len + startsg->length > max_seg_size)
1346 break;
1347
1348 /*
1349 ** Then look for virtually contiguous blocks.
1350 **
1351 ** append the next transaction?
1352 */
1353 vaddr = (unsigned long) sba_sg_address(startsg);
1354 if (vcontig_end == vaddr)
1355 {
1356 vcontig_len += startsg->length;
1357 vcontig_end += startsg->length;
1358 dma_len += startsg->length;
1359 continue;
1360 }
1361
1362#ifdef DEBUG_LARGE_SG_ENTRIES
1363 dump_run_sg = (vcontig_len > iovp_size);
1364#endif
1365
1366 /*
1367 ** Not virtually contiguous.
1368 ** Terminate prev chunk.
1369 ** Start a new chunk.
1370 **
1371 ** Once we start a new VCONTIG chunk, dma_offset
1372 ** can't change. And we need the offset from the first
1373 ** chunk - not the last one. Ergo Successive chunks
1374 ** must start on page boundaries and dove tail
1375 ** with it's predecessor.
1376 */
1377 vcontig_sg->dma_length = vcontig_len;
1378
1379 vcontig_sg = startsg;
1380 vcontig_len = startsg->length;
1381
1382 /*
1383 ** 3) do the entries end/start on page boundaries?
1384 ** Don't update vcontig_end until we've checked.
1385 */
1386 if (DMA_CONTIG(vcontig_end, vaddr))
1387 {
1388 vcontig_end = vcontig_len + vaddr;
1389 dma_len += vcontig_len;
1390 continue;
1391 } else {
1392 break;
1393 }
1394 }
1395
1396 /*
1397 ** End of DMA Stream
1398 ** Terminate last VCONTIG block.
1399 ** Allocate space for DMA stream.
1400 */
1401 vcontig_sg->dma_length = vcontig_len;
1402 dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
1403 ASSERT(dma_len <= DMA_CHUNK_SIZE);
1404 idx = sba_alloc_range(ioc, dev, dma_len);
1405 if (idx < 0) {
1406 dma_sg->dma_length = 0;
1407 return -1;
1408 }
1409 dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
1410 | dma_offset);
1411 n_mappings++;
1412 }
1413
1414 return n_mappings;
1415}
1416
1417static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1418 int nents, enum dma_data_direction dir,
1419 unsigned long attrs);
1420/**
1421 * sba_map_sg - map Scatter/Gather list
1422 * @dev: instance of PCI owned by the driver that's asking.
1423 * @sglist: array of buffer/length pairs
1424 * @nents: number of entries in list
1425 * @dir: R/W or both.
1426 * @attrs: optional dma attributes
1427 *
1428 * See Documentation/DMA-API-HOWTO.txt
1429 */
1430static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
1431 int nents, enum dma_data_direction dir,
1432 unsigned long attrs)
1433{
1434 struct ioc *ioc;
1435 int coalesced, filled = 0;
1436#ifdef ASSERT_PDIR_SANITY
1437 unsigned long flags;
1438#endif
1439#ifdef ALLOW_IOV_BYPASS_SG
1440 struct scatterlist *sg;
1441#endif
1442
1443 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
1444 ioc = GET_IOC(dev);
1445 ASSERT(ioc);
1446
1447#ifdef ALLOW_IOV_BYPASS_SG
1448 ASSERT(to_pci_dev(dev)->dma_mask);
1449 if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
1450 for_each_sg(sglist, sg, nents, filled) {
1451 sg->dma_length = sg->length;
1452 sg->dma_address = virt_to_phys(sba_sg_address(sg));
1453 }
1454 return filled;
1455 }
1456#endif
1457 /* Fast path single entry scatterlists. */
1458 if (nents == 1) {
1459 sglist->dma_length = sglist->length;
1460 sglist->dma_address = sba_map_page(dev, sg_page(sglist),
1461 sglist->offset, sglist->length, dir, attrs);
1462 if (dma_mapping_error(dev, sglist->dma_address))
1463 return 0;
1464 return 1;
1465 }
1466
1467#ifdef ASSERT_PDIR_SANITY
1468 spin_lock_irqsave(&ioc->res_lock, flags);
1469 if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
1470 {
1471 sba_dump_sg(ioc, sglist, nents);
1472 panic("Check before sba_map_sg_attrs()");
1473 }
1474 spin_unlock_irqrestore(&ioc->res_lock, flags);
1475#endif
1476
1477 prefetch(ioc->res_hint);
1478
1479 /*
1480 ** First coalesce the chunks and allocate I/O pdir space
1481 **
1482 ** If this is one DMA stream, we can properly map using the
1483 ** correct virtual address associated with each DMA page.
1484 ** w/o this association, we wouldn't have coherent DMA!
1485 ** Access to the virtual address is what forces a two pass algorithm.
1486 */
1487 coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
1488 if (coalesced < 0) {
1489 sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
1490 return 0;
1491 }
1492
1493 /*
1494 ** Program the I/O Pdir
1495 **
1496 ** map the virtual addresses to the I/O Pdir
1497 ** o dma_address will contain the pdir index
1498 ** o dma_len will contain the number of bytes to map
1499 ** o address contains the virtual address.
1500 */
1501 filled = sba_fill_pdir(ioc, sglist, nents);
1502
1503#ifdef ASSERT_PDIR_SANITY
1504 spin_lock_irqsave(&ioc->res_lock, flags);
1505 if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
1506 {
1507 sba_dump_sg(ioc, sglist, nents);
1508 panic("Check after sba_map_sg_attrs()\n");
1509 }
1510 spin_unlock_irqrestore(&ioc->res_lock, flags);
1511#endif
1512
1513 ASSERT(coalesced == filled);
1514 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1515
1516 return filled;
1517}
1518
1519/**
1520 * sba_unmap_sg_attrs - unmap Scatter/Gather list
1521 * @dev: instance of PCI owned by the driver that's asking.
1522 * @sglist: array of buffer/length pairs
1523 * @nents: number of entries in list
1524 * @dir: R/W or both.
1525 * @attrs: optional dma attributes
1526 *
1527 * See Documentation/DMA-API-HOWTO.txt
1528 */
1529static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1530 int nents, enum dma_data_direction dir,
1531 unsigned long attrs)
1532{
1533#ifdef ASSERT_PDIR_SANITY
1534 struct ioc *ioc;
1535 unsigned long flags;
1536#endif
1537
1538 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1539 __func__, nents, sba_sg_address(sglist), sglist->length);
1540
1541#ifdef ASSERT_PDIR_SANITY
1542 ioc = GET_IOC(dev);
1543 ASSERT(ioc);
1544
1545 spin_lock_irqsave(&ioc->res_lock, flags);
1546 sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
1547 spin_unlock_irqrestore(&ioc->res_lock, flags);
1548#endif
1549
1550 while (nents && sglist->dma_length) {
1551
1552 sba_unmap_page(dev, sglist->dma_address, sglist->dma_length,
1553 dir, attrs);
1554 sglist = sg_next(sglist);
1555 nents--;
1556 }
1557
1558 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1559
1560#ifdef ASSERT_PDIR_SANITY
1561 spin_lock_irqsave(&ioc->res_lock, flags);
1562 sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
1563 spin_unlock_irqrestore(&ioc->res_lock, flags);
1564#endif
1565
1566}
1567
1568/**************************************************************
1569*
1570* Initialization and claim
1571*
1572***************************************************************/
1573
1574static void
1575ioc_iova_init(struct ioc *ioc)
1576{
1577 int tcnfg;
1578 int agp_found = 0;
1579 struct pci_dev *device = NULL;
1580#ifdef FULL_VALID_PDIR
1581 unsigned long index;
1582#endif
1583
1584 /*
1585 ** Firmware programs the base and size of a "safe IOVA space"
1586 ** (one that doesn't overlap memory or LMMIO space) in the
1587 ** IBASE and IMASK registers.
1588 */
1589 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
1590 ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
1591
1592 ioc->iov_size = ~ioc->imask + 1;
1593
1594 DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
1595 __func__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
1596 ioc->iov_size >> 20);
1597
1598 switch (iovp_size) {
1599 case 4*1024: tcnfg = 0; break;
1600 case 8*1024: tcnfg = 1; break;
1601 case 16*1024: tcnfg = 2; break;
1602 case 64*1024: tcnfg = 3; break;
1603 default:
1604 panic(PFX "Unsupported IOTLB page size %ldK",
1605 iovp_size >> 10);
1606 break;
1607 }
1608 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1609
1610 ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
1611 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1612 get_order(ioc->pdir_size));
1613 if (!ioc->pdir_base)
1614 panic(PFX "Couldn't allocate I/O Page Table\n");
1615
1616 memset(ioc->pdir_base, 0, ioc->pdir_size);
1617
1618 DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__,
1619 iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
1620
1621 ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
1622 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1623
1624 /*
1625 ** If an AGP device is present, only use half of the IOV space
1626 ** for PCI DMA. Unfortunately we can't know ahead of time
1627 ** whether GART support will actually be used, for now we
1628 ** can just key on an AGP device found in the system.
1629 ** We program the next pdir index after we stop w/ a key for
1630 ** the GART code to handshake on.
1631 */
1632 for_each_pci_dev(device)
1633 agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
1634
1635 if (agp_found && reserve_sba_gart) {
1636 printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",
1637 ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
1638 ioc->pdir_size /= 2;
1639 ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
1640 }
1641#ifdef FULL_VALID_PDIR
1642 /*
1643 ** Check to see if the spill page has been allocated, we don't need more than
1644 ** one across multiple SBAs.
1645 */
1646 if (!prefetch_spill_page) {
1647 char *spill_poison = "SBAIOMMU POISON";
1648 int poison_size = 16;
1649 void *poison_addr, *addr;
1650
1651 addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));
1652 if (!addr)
1653 panic(PFX "Couldn't allocate PDIR spill page\n");
1654
1655 poison_addr = addr;
1656 for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
1657 memcpy(poison_addr, spill_poison, poison_size);
1658
1659 prefetch_spill_page = virt_to_phys(addr);
1660
1661 DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page);
1662 }
1663 /*
1664 ** Set all the PDIR entries valid w/ the spill page as the target
1665 */
1666 for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
1667 ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
1668#endif
1669
1670 /* Clear I/O TLB of any possible entries */
1671 WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
1672 READ_REG(ioc->ioc_hpa + IOC_PCOM);
1673
1674 /* Enable IOVA translation */
1675 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1676 READ_REG(ioc->ioc_hpa + IOC_IBASE);
1677}
1678
1679static void __init
1680ioc_resource_init(struct ioc *ioc)
1681{
1682 spin_lock_init(&ioc->res_lock);
1683#if DELAYED_RESOURCE_CNT > 0
1684 spin_lock_init(&ioc->saved_lock);
1685#endif
1686
1687 /* resource map size dictated by pdir_size */
1688 ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
1689 ioc->res_size >>= 3; /* convert bit count to byte count */
1690 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1691
1692 ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
1693 get_order(ioc->res_size));
1694 if (!ioc->res_map)
1695 panic(PFX "Couldn't allocate resource map\n");
1696
1697 memset(ioc->res_map, 0, ioc->res_size);
1698 /* next available IOVP - circular search */
1699 ioc->res_hint = (unsigned long *) ioc->res_map;
1700
1701#ifdef ASSERT_PDIR_SANITY
1702 /* Mark first bit busy - ie no IOVA 0 */
1703 ioc->res_map[0] = 0x1;
1704 ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
1705#endif
1706#ifdef FULL_VALID_PDIR
1707 /* Mark the last resource used so we don't prefetch beyond IOVA space */
1708 ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
1709 ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
1710 | prefetch_spill_page);
1711#endif
1712
1713 DBG_INIT("%s() res_map %x %p\n", __func__,
1714 ioc->res_size, (void *) ioc->res_map);
1715}
1716
1717static void __init
1718ioc_sac_init(struct ioc *ioc)
1719{
1720 struct pci_dev *sac = NULL;
1721 struct pci_controller *controller = NULL;
1722
1723 /*
1724 * pci_alloc_coherent() must return a DMA address which is
1725 * SAC (single address cycle) addressable, so allocate a
1726 * pseudo-device to enforce that.
1727 */
1728 sac = kzalloc(sizeof(*sac), GFP_KERNEL);
1729 if (!sac)
1730 panic(PFX "Couldn't allocate struct pci_dev");
1731
1732 controller = kzalloc(sizeof(*controller), GFP_KERNEL);
1733 if (!controller)
1734 panic(PFX "Couldn't allocate struct pci_controller");
1735
1736 controller->iommu = ioc;
1737 sac->sysdata = controller;
1738 sac->dma_mask = 0xFFFFFFFFUL;
1739 sac->dev.bus = &pci_bus_type;
1740 ioc->sac_only_dev = sac;
1741}
1742
1743static void __init
1744ioc_zx1_init(struct ioc *ioc)
1745{
1746 unsigned long rope_config;
1747 unsigned int i;
1748
1749 if (ioc->rev < 0x20)
1750 panic(PFX "IOC 2.0 or later required for IOMMU support\n");
1751
1752 /* 38 bit memory controller + extra bit for range displaced by MMIO */
1753 ioc->dma_mask = (0x1UL << 39) - 1;
1754
1755 /*
1756 ** Clear ROPE(N)_CONFIG AO bit.
1757 ** Disables "NT Ordering" (~= !"Relaxed Ordering")
1758 ** Overrides bit 1 in DMA Hint Sets.
1759 ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
1760 */
1761 for (i=0; i<(8*8); i+=8) {
1762 rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1763 rope_config &= ~IOC_ROPE_AO;
1764 WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1765 }
1766}
1767
1768typedef void (initfunc)(struct ioc *);
1769
1770struct ioc_iommu {
1771 u32 func_id;
1772 char *name;
1773 initfunc *init;
1774};
1775
1776static struct ioc_iommu ioc_iommu_info[] __initdata = {
1777 { ZX1_IOC_ID, "zx1", ioc_zx1_init },
1778 { ZX2_IOC_ID, "zx2", NULL },
1779 { SX1000_IOC_ID, "sx1000", NULL },
1780 { SX2000_IOC_ID, "sx2000", NULL },
1781};
1782
1783static void __init ioc_init(unsigned long hpa, struct ioc *ioc)
1784{
1785 struct ioc_iommu *info;
1786
1787 ioc->next = ioc_list;
1788 ioc_list = ioc;
1789
1790 ioc->ioc_hpa = ioremap(hpa, 0x1000);
1791
1792 ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
1793 ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
1794 ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */
1795
1796 for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
1797 if (ioc->func_id == info->func_id) {
1798 ioc->name = info->name;
1799 if (info->init)
1800 (info->init)(ioc);
1801 }
1802 }
1803
1804 iovp_size = (1 << iovp_shift);
1805 iovp_mask = ~(iovp_size - 1);
1806
1807 DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__,
1808 PAGE_SIZE >> 10, iovp_size >> 10);
1809
1810 if (!ioc->name) {
1811 ioc->name = kmalloc(24, GFP_KERNEL);
1812 if (ioc->name)
1813 sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
1814 ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
1815 else
1816 ioc->name = "Unknown";
1817 }
1818
1819 ioc_iova_init(ioc);
1820 ioc_resource_init(ioc);
1821 ioc_sac_init(ioc);
1822
1823 printk(KERN_INFO PFX
1824 "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
1825 ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
1826 hpa, ioc->iov_size >> 20, ioc->ibase);
1827}
1828
1829
1830
1831/**************************************************************************
1832**
1833** SBA initialization code (HW and SW)
1834**
1835** o identify SBA chip itself
1836** o FIXME: initialize DMA hints for reasonable defaults
1837**
1838**************************************************************************/
1839
1840#ifdef CONFIG_PROC_FS
1841static void *
1842ioc_start(struct seq_file *s, loff_t *pos)
1843{
1844 struct ioc *ioc;
1845 loff_t n = *pos;
1846
1847 for (ioc = ioc_list; ioc; ioc = ioc->next)
1848 if (!n--)
1849 return ioc;
1850
1851 return NULL;
1852}
1853
1854static void *
1855ioc_next(struct seq_file *s, void *v, loff_t *pos)
1856{
1857 struct ioc *ioc = v;
1858
1859 ++*pos;
1860 return ioc->next;
1861}
1862
1863static void
1864ioc_stop(struct seq_file *s, void *v)
1865{
1866}
1867
1868static int
1869ioc_show(struct seq_file *s, void *v)
1870{
1871 struct ioc *ioc = v;
1872 unsigned long *res_ptr = (unsigned long *)ioc->res_map;
1873 int i, used = 0;
1874
1875 seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
1876 ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
1877#ifdef CONFIG_NUMA
1878 if (ioc->node != NUMA_NO_NODE)
1879 seq_printf(s, "NUMA node : %d\n", ioc->node);
1880#endif
1881 seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
1882 seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024);
1883
1884 for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
1885 used += hweight64(*res_ptr);
1886
1887 seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3);
1888 seq_printf(s, "PDIR used : %d entries\n", used);
1889
1890#ifdef PDIR_SEARCH_TIMING
1891 {
1892 unsigned long i = 0, avg = 0, min, max;
1893 min = max = ioc->avg_search[0];
1894 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1895 avg += ioc->avg_search[i];
1896 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1897 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1898 }
1899 avg /= SBA_SEARCH_SAMPLE;
1900 seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
1901 min, avg, max);
1902 }
1903#endif
1904#ifndef ALLOW_IOV_BYPASS
1905 seq_printf(s, "IOVA bypass disabled\n");
1906#endif
1907 return 0;
1908}
1909
1910static const struct seq_operations ioc_seq_ops = {
1911 .start = ioc_start,
1912 .next = ioc_next,
1913 .stop = ioc_stop,
1914 .show = ioc_show
1915};
1916
1917static void __init
1918ioc_proc_init(void)
1919{
1920 struct proc_dir_entry *dir;
1921
1922 dir = proc_mkdir("bus/mckinley", NULL);
1923 if (!dir)
1924 return;
1925
1926 proc_create_seq(ioc_list->name, 0, dir, &ioc_seq_ops);
1927}
1928#endif
1929
1930static void
1931sba_connect_bus(struct pci_bus *bus)
1932{
1933 acpi_handle handle, parent;
1934 acpi_status status;
1935 struct ioc *ioc;
1936
1937 if (!PCI_CONTROLLER(bus))
1938 panic(PFX "no sysdata on bus %d!\n", bus->number);
1939
1940 if (PCI_CONTROLLER(bus)->iommu)
1941 return;
1942
1943 handle = acpi_device_handle(PCI_CONTROLLER(bus)->companion);
1944 if (!handle)
1945 return;
1946
1947 /*
1948 * The IOC scope encloses PCI root bridges in the ACPI
1949 * namespace, so work our way out until we find an IOC we
1950 * claimed previously.
1951 */
1952 do {
1953 for (ioc = ioc_list; ioc; ioc = ioc->next)
1954 if (ioc->handle == handle) {
1955 PCI_CONTROLLER(bus)->iommu = ioc;
1956 return;
1957 }
1958
1959 status = acpi_get_parent(handle, &parent);
1960 handle = parent;
1961 } while (ACPI_SUCCESS(status));
1962
1963 printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number);
1964}
1965
1966static void __init
1967sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
1968{
1969#ifdef CONFIG_NUMA
1970 unsigned int node;
1971
1972 node = acpi_get_node(handle);
1973 if (node != NUMA_NO_NODE && !node_online(node))
1974 node = NUMA_NO_NODE;
1975
1976 ioc->node = node;
1977#endif
1978}
1979
1980static void __init acpi_sba_ioc_add(struct ioc *ioc)
1981{
1982 acpi_handle handle = ioc->handle;
1983 acpi_status status;
1984 u64 hpa, length;
1985 struct acpi_device_info *adi;
1986
1987 ioc_found = ioc->next;
1988 status = hp_acpi_csr_space(handle, &hpa, &length);
1989 if (ACPI_FAILURE(status))
1990 goto err;
1991
1992 status = acpi_get_object_info(handle, &adi);
1993 if (ACPI_FAILURE(status))
1994 goto err;
1995
1996 /*
1997 * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
1998 * root bridges, and its CSR space includes the IOC function.
1999 */
2000 if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) {
2001 hpa += ZX1_IOC_OFFSET;
2002 /* zx1 based systems default to kernel page size iommu pages */
2003 if (!iovp_shift)
2004 iovp_shift = min(PAGE_SHIFT, 16);
2005 }
2006 kfree(adi);
2007
2008 /*
2009 * default anything not caught above or specified on cmdline to 4k
2010 * iommu page size
2011 */
2012 if (!iovp_shift)
2013 iovp_shift = 12;
2014
2015 ioc_init(hpa, ioc);
2016 /* setup NUMA node association */
2017 sba_map_ioc_to_node(ioc, handle);
2018 return;
2019
2020 err:
2021 kfree(ioc);
2022}
2023
2024static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
2025 {"HWP0001", 0},
2026 {"HWP0004", 0},
2027 {"", 0},
2028};
2029
2030static int acpi_sba_ioc_attach(struct acpi_device *device,
2031 const struct acpi_device_id *not_used)
2032{
2033 struct ioc *ioc;
2034
2035 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2036 if (!ioc)
2037 return -ENOMEM;
2038
2039 ioc->next = ioc_found;
2040 ioc_found = ioc;
2041 ioc->handle = device->handle;
2042 return 1;
2043}
2044
2045
2046static struct acpi_scan_handler acpi_sba_ioc_handler = {
2047 .ids = hp_ioc_iommu_device_ids,
2048 .attach = acpi_sba_ioc_attach,
2049};
2050
2051static int __init acpi_sba_ioc_init_acpi(void)
2052{
2053 return acpi_scan_add_handler(&acpi_sba_ioc_handler);
2054}
2055/* This has to run before acpi_scan_init(). */
2056arch_initcall(acpi_sba_ioc_init_acpi);
2057
2058static int sba_dma_supported (struct device *dev, u64 mask)
2059{
2060 /* make sure it's at least 32bit capable */
2061 return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
2062}
2063
2064static const struct dma_map_ops sba_dma_ops = {
2065 .alloc = sba_alloc_coherent,
2066 .free = sba_free_coherent,
2067 .map_page = sba_map_page,
2068 .unmap_page = sba_unmap_page,
2069 .map_sg = sba_map_sg_attrs,
2070 .unmap_sg = sba_unmap_sg_attrs,
2071 .dma_supported = sba_dma_supported,
2072 .mmap = dma_common_mmap,
2073 .get_sgtable = dma_common_get_sgtable,
2074};
2075
2076static int __init
2077sba_init(void)
2078{
2079 /*
2080 * If we are booting a kdump kernel, the sba_iommu will cause devices
2081 * that were not shutdown properly to MCA as soon as they are turned
2082 * back on. Our only option for a successful kdump kernel boot is to
2083 * use swiotlb.
2084 */
2085 if (is_kdump_kernel())
2086 return 0;
2087
2088 /*
2089 * ioc_found should be populated by the acpi_sba_ioc_handler's .attach()
2090 * routine, but that only happens if acpi_scan_init() has already run.
2091 */
2092 while (ioc_found)
2093 acpi_sba_ioc_add(ioc_found);
2094
2095 if (!ioc_list)
2096 return 0;
2097
2098 {
2099 struct pci_bus *b = NULL;
2100 while ((b = pci_find_next_bus(b)) != NULL)
2101 sba_connect_bus(b);
2102 }
2103
2104 /* no need for swiotlb with the iommu */
2105 swiotlb_exit();
2106 dma_ops = &sba_dma_ops;
2107
2108#ifdef CONFIG_PROC_FS
2109 ioc_proc_init();
2110#endif
2111 return 0;
2112}
2113
2114subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
2115
2116static int __init
2117nosbagart(char *str)
2118{
2119 reserve_sba_gart = 0;
2120 return 1;
2121}
2122
2123__setup("nosbagart", nosbagart);
2124
2125static int __init
2126sba_page_override(char *str)
2127{
2128 unsigned long page_size;
2129
2130 page_size = memparse(str, &str);
2131 switch (page_size) {
2132 case 4096:
2133 case 8192:
2134 case 16384:
2135 case 65536:
2136 iovp_shift = ffs(page_size) - 1;
2137 break;
2138 default:
2139 printk("%s: unknown/unsupported iommu page size %ld\n",
2140 __func__, page_size);
2141 }
2142
2143 return 1;
2144}
2145
2146__setup("sbapagesize=",sba_page_override);
1/*
2** IA64 System Bus Adapter (SBA) I/O MMU manager
3**
4** (c) Copyright 2002-2005 Alex Williamson
5** (c) Copyright 2002-2003 Grant Grundler
6** (c) Copyright 2002-2005 Hewlett-Packard Company
7**
8** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
9** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
10**
11** This program is free software; you can redistribute it and/or modify
12** it under the terms of the GNU General Public License as published by
13** the Free Software Foundation; either version 2 of the License, or
14** (at your option) any later version.
15**
16**
17** This module initializes the IOC (I/O Controller) found on HP
18** McKinley machines and their successors.
19**
20*/
21
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/spinlock.h>
26#include <linux/slab.h>
27#include <linux/init.h>
28#include <linux/mm.h>
29#include <linux/string.h>
30#include <linux/pci.h>
31#include <linux/proc_fs.h>
32#include <linux/seq_file.h>
33#include <linux/acpi.h>
34#include <linux/efi.h>
35#include <linux/nodemask.h>
36#include <linux/bitops.h> /* hweight64() */
37#include <linux/crash_dump.h>
38#include <linux/iommu-helper.h>
39#include <linux/dma-mapping.h>
40#include <linux/prefetch.h>
41
42#include <asm/delay.h> /* ia64_get_itc() */
43#include <asm/io.h>
44#include <asm/page.h> /* PAGE_OFFSET */
45#include <asm/dma.h>
46
47#include <asm/acpi-ext.h>
48
49extern int swiotlb_late_init_with_default_size (size_t size);
50
51#define PFX "IOC: "
52
53/*
54** Enabling timing search of the pdir resource map. Output in /proc.
55** Disabled by default to optimize performance.
56*/
57#undef PDIR_SEARCH_TIMING
58
59/*
60** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
61** not defined, all DMA will be 32bit and go through the TLB.
62** There's potentially a conflict in the bio merge code with us
63** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
64** appears to give more performance than bio-level virtual merging, we'll
65** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to
66** completely restrict DMA to the IOMMU.
67*/
68#define ALLOW_IOV_BYPASS
69
70/*
71** This option specifically allows/disallows bypassing scatterlists with
72** multiple entries. Coalescing these entries can allow better DMA streaming
73** and in some cases shows better performance than entirely bypassing the
74** IOMMU. Performance increase on the order of 1-2% sequential output/input
75** using bonnie++ on a RAID0 MD device (sym2 & mpt).
76*/
77#undef ALLOW_IOV_BYPASS_SG
78
79/*
80** If a device prefetches beyond the end of a valid pdir entry, it will cause
81** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
82** disconnect on 4k boundaries and prevent such issues. If the device is
83** particularly aggressive, this option will keep the entire pdir valid such
84** that prefetching will hit a valid address. This could severely impact
85** error containment, and is therefore off by default. The page that is
86** used for spill-over is poisoned, so that should help debugging somewhat.
87*/
88#undef FULL_VALID_PDIR
89
90#define ENABLE_MARK_CLEAN
91
92/*
93** The number of debug flags is a clue - this code is fragile. NOTE: since
94** tightening the use of res_lock the resource bitmap and actual pdir are no
95** longer guaranteed to stay in sync. The sanity checking code isn't going to
96** like that.
97*/
98#undef DEBUG_SBA_INIT
99#undef DEBUG_SBA_RUN
100#undef DEBUG_SBA_RUN_SG
101#undef DEBUG_SBA_RESOURCE
102#undef ASSERT_PDIR_SANITY
103#undef DEBUG_LARGE_SG_ENTRIES
104#undef DEBUG_BYPASS
105
106#if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
107#error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
108#endif
109
110#define SBA_INLINE __inline__
111/* #define SBA_INLINE */
112
113#ifdef DEBUG_SBA_INIT
114#define DBG_INIT(x...) printk(x)
115#else
116#define DBG_INIT(x...)
117#endif
118
119#ifdef DEBUG_SBA_RUN
120#define DBG_RUN(x...) printk(x)
121#else
122#define DBG_RUN(x...)
123#endif
124
125#ifdef DEBUG_SBA_RUN_SG
126#define DBG_RUN_SG(x...) printk(x)
127#else
128#define DBG_RUN_SG(x...)
129#endif
130
131
132#ifdef DEBUG_SBA_RESOURCE
133#define DBG_RES(x...) printk(x)
134#else
135#define DBG_RES(x...)
136#endif
137
138#ifdef DEBUG_BYPASS
139#define DBG_BYPASS(x...) printk(x)
140#else
141#define DBG_BYPASS(x...)
142#endif
143
144#ifdef ASSERT_PDIR_SANITY
145#define ASSERT(expr) \
146 if(!(expr)) { \
147 printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
148 panic(#expr); \
149 }
150#else
151#define ASSERT(expr)
152#endif
153
154/*
155** The number of pdir entries to "free" before issuing
156** a read to PCOM register to flush out PCOM writes.
157** Interacts with allocation granularity (ie 4 or 8 entries
158** allocated and free'd/purged at a time might make this
159** less interesting).
160*/
161#define DELAYED_RESOURCE_CNT 64
162
163#define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec
164
165#define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
166#define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
167#define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
168#define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
169#define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
170
171#define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
172
173#define IOC_FUNC_ID 0x000
174#define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
175#define IOC_IBASE 0x300 /* IO TLB */
176#define IOC_IMASK 0x308
177#define IOC_PCOM 0x310
178#define IOC_TCNFG 0x318
179#define IOC_PDIR_BASE 0x320
180
181#define IOC_ROPE0_CFG 0x500
182#define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
183
184
185/* AGP GART driver looks for this */
186#define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
187
188/*
189** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
190**
191** Some IOCs (sx1000) can run at the above pages sizes, but are
192** really only supported using the IOC at a 4k page size.
193**
194** iovp_size could only be greater than PAGE_SIZE if we are
195** confident the drivers really only touch the next physical
196** page iff that driver instance owns it.
197*/
198static unsigned long iovp_size;
199static unsigned long iovp_shift;
200static unsigned long iovp_mask;
201
202struct ioc {
203 void __iomem *ioc_hpa; /* I/O MMU base address */
204 char *res_map; /* resource map, bit == pdir entry */
205 u64 *pdir_base; /* physical base address */
206 unsigned long ibase; /* pdir IOV Space base */
207 unsigned long imask; /* pdir IOV Space mask */
208
209 unsigned long *res_hint; /* next avail IOVP - circular search */
210 unsigned long dma_mask;
211 spinlock_t res_lock; /* protects the resource bitmap, but must be held when */
212 /* clearing pdir to prevent races with allocations. */
213 unsigned int res_bitshift; /* from the RIGHT! */
214 unsigned int res_size; /* size of resource map in bytes */
215#ifdef CONFIG_NUMA
216 unsigned int node; /* node where this IOC lives */
217#endif
218#if DELAYED_RESOURCE_CNT > 0
219 spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */
220 /* than res_lock for bigger systems. */
221 int saved_cnt;
222 struct sba_dma_pair {
223 dma_addr_t iova;
224 size_t size;
225 } saved[DELAYED_RESOURCE_CNT];
226#endif
227
228#ifdef PDIR_SEARCH_TIMING
229#define SBA_SEARCH_SAMPLE 0x100
230 unsigned long avg_search[SBA_SEARCH_SAMPLE];
231 unsigned long avg_idx; /* current index into avg_search */
232#endif
233
234 /* Stuff we don't need in performance path */
235 struct ioc *next; /* list of IOC's in system */
236 acpi_handle handle; /* for multiple IOC's */
237 const char *name;
238 unsigned int func_id;
239 unsigned int rev; /* HW revision of chip */
240 u32 iov_size;
241 unsigned int pdir_size; /* in bytes, determined by IOV Space size */
242 struct pci_dev *sac_only_dev;
243};
244
245static struct ioc *ioc_list, *ioc_found;
246static int reserve_sba_gart = 1;
247
248static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
249static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
250
251#define sba_sg_address(sg) sg_virt((sg))
252
253#ifdef FULL_VALID_PDIR
254static u64 prefetch_spill_page;
255#endif
256
257#ifdef CONFIG_PCI
258# define GET_IOC(dev) ((dev_is_pci(dev)) \
259 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
260#else
261# define GET_IOC(dev) NULL
262#endif
263
264/*
265** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
266** (or rather not merge) DMAs into manageable chunks.
267** On parisc, this is more of the software/tuning constraint
268** rather than the HW. I/O MMU allocation algorithms can be
269** faster with smaller sizes (to some degree).
270*/
271#define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
272
273#define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
274
275/************************************
276** SBA register read and write support
277**
278** BE WARNED: register writes are posted.
279** (ie follow writes which must reach HW with a read)
280**
281*/
282#define READ_REG(addr) __raw_readq(addr)
283#define WRITE_REG(val, addr) __raw_writeq(val, addr)
284
285#ifdef DEBUG_SBA_INIT
286
287/**
288 * sba_dump_tlb - debugging only - print IOMMU operating parameters
289 * @hpa: base address of the IOMMU
290 *
291 * Print the size/location of the IO MMU PDIR.
292 */
293static void
294sba_dump_tlb(char *hpa)
295{
296 DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
297 DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE));
298 DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK));
299 DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG));
300 DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
301 DBG_INIT("\n");
302}
303#endif
304
305
306#ifdef ASSERT_PDIR_SANITY
307
308/**
309 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
310 * @ioc: IO MMU structure which owns the pdir we are interested in.
311 * @msg: text to print ont the output line.
312 * @pide: pdir index.
313 *
314 * Print one entry of the IO MMU PDIR in human readable form.
315 */
316static void
317sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
318{
319 /* start printing from lowest pde in rval */
320 u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)];
321 unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
322 uint rcnt;
323
324 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
325 msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
326
327 rcnt = 0;
328 while (rcnt < BITS_PER_LONG) {
329 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
330 (rcnt == (pide & (BITS_PER_LONG - 1)))
331 ? " -->" : " ",
332 rcnt, ptr, (unsigned long long) *ptr );
333 rcnt++;
334 ptr++;
335 }
336 printk(KERN_DEBUG "%s", msg);
337}
338
339
340/**
341 * sba_check_pdir - debugging only - consistency checker
342 * @ioc: IO MMU structure which owns the pdir we are interested in.
343 * @msg: text to print ont the output line.
344 *
345 * Verify the resource map and pdir state is consistent
346 */
347static int
348sba_check_pdir(struct ioc *ioc, char *msg)
349{
350 u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
351 u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */
352 u64 *pptr = ioc->pdir_base; /* pdir ptr */
353 uint pide = 0;
354
355 while (rptr < rptr_end) {
356 u64 rval;
357 int rcnt; /* number of bits we might check */
358
359 rval = *rptr;
360 rcnt = 64;
361
362 while (rcnt) {
363 /* Get last byte and highest bit from that */
364 u32 pde = ((u32)((*pptr >> (63)) & 0x1));
365 if ((rval & 0x1) ^ pde)
366 {
367 /*
368 ** BUMMER! -- res_map != pdir --
369 ** Dump rval and matching pdir entries
370 */
371 sba_dump_pdir_entry(ioc, msg, pide);
372 return(1);
373 }
374 rcnt--;
375 rval >>= 1; /* try the next bit */
376 pptr++;
377 pide++;
378 }
379 rptr++; /* look at next word of res_map */
380 }
381 /* It'd be nice if we always got here :^) */
382 return 0;
383}
384
385
386/**
387 * sba_dump_sg - debugging only - print Scatter-Gather list
388 * @ioc: IO MMU structure which owns the pdir we are interested in.
389 * @startsg: head of the SG list
390 * @nents: number of entries in SG list
391 *
392 * print the SG list so we can verify it's correct by hand.
393 */
394static void
395sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
396{
397 while (nents-- > 0) {
398 printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
399 startsg->dma_address, startsg->dma_length,
400 sba_sg_address(startsg));
401 startsg = sg_next(startsg);
402 }
403}
404
405static void
406sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
407{
408 struct scatterlist *the_sg = startsg;
409 int the_nents = nents;
410
411 while (the_nents-- > 0) {
412 if (sba_sg_address(the_sg) == 0x0UL)
413 sba_dump_sg(NULL, startsg, nents);
414 the_sg = sg_next(the_sg);
415 }
416}
417
418#endif /* ASSERT_PDIR_SANITY */
419
420
421
422
423/**************************************************************
424*
425* I/O Pdir Resource Management
426*
427* Bits set in the resource map are in use.
428* Each bit can represent a number of pages.
429* LSbs represent lower addresses (IOVA's).
430*
431***************************************************************/
432#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
433
434/* Convert from IOVP to IOVA and vice versa. */
435#define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
436#define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
437
438#define PDIR_ENTRY_SIZE sizeof(u64)
439
440#define PDIR_INDEX(iovp) ((iovp)>>iovp_shift)
441
442#define RESMAP_MASK(n) ~(~0UL << (n))
443#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
444
445
446/**
447 * For most cases the normal get_order is sufficient, however it limits us
448 * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
449 * It only incurs about 1 clock cycle to use this one with the static variable
450 * and makes the code more intuitive.
451 */
452static SBA_INLINE int
453get_iovp_order (unsigned long size)
454{
455 long double d = size - 1;
456 long order;
457
458 order = ia64_getf_exp(d);
459 order = order - iovp_shift - 0xffff + 1;
460 if (order < 0)
461 order = 0;
462 return order;
463}
464
465static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
466 unsigned int bitshiftcnt)
467{
468 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
469 + bitshiftcnt;
470}
471
472/**
473 * sba_search_bitmap - find free space in IO PDIR resource bitmap
474 * @ioc: IO MMU structure which owns the pdir we are interested in.
475 * @bits_wanted: number of entries we need.
476 * @use_hint: use res_hint to indicate where to start looking
477 *
478 * Find consecutive free bits in resource bitmap.
479 * Each bit represents one entry in the IO Pdir.
480 * Cool perf optimization: search for log2(size) bits at a time.
481 */
482static SBA_INLINE unsigned long
483sba_search_bitmap(struct ioc *ioc, struct device *dev,
484 unsigned long bits_wanted, int use_hint)
485{
486 unsigned long *res_ptr;
487 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
488 unsigned long flags, pide = ~0UL, tpide;
489 unsigned long boundary_size;
490 unsigned long shift;
491 int ret;
492
493 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
494 ASSERT(res_ptr < res_end);
495
496 boundary_size = (unsigned long long)dma_get_seg_boundary(dev) + 1;
497 boundary_size = ALIGN(boundary_size, 1ULL << iovp_shift) >> iovp_shift;
498
499 BUG_ON(ioc->ibase & ~iovp_mask);
500 shift = ioc->ibase >> iovp_shift;
501
502 spin_lock_irqsave(&ioc->res_lock, flags);
503
504 /* Allow caller to force a search through the entire resource space */
505 if (likely(use_hint)) {
506 res_ptr = ioc->res_hint;
507 } else {
508 res_ptr = (ulong *)ioc->res_map;
509 ioc->res_bitshift = 0;
510 }
511
512 /*
513 * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
514 * if a TLB entry is purged while in use. sba_mark_invalid()
515 * purges IOTLB entries in power-of-two sizes, so we also
516 * allocate IOVA space in power-of-two sizes.
517 */
518 bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift);
519
520 if (likely(bits_wanted == 1)) {
521 unsigned int bitshiftcnt;
522 for(; res_ptr < res_end ; res_ptr++) {
523 if (likely(*res_ptr != ~0UL)) {
524 bitshiftcnt = ffz(*res_ptr);
525 *res_ptr |= (1UL << bitshiftcnt);
526 pide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
527 ioc->res_bitshift = bitshiftcnt + bits_wanted;
528 goto found_it;
529 }
530 }
531 goto not_found;
532
533 }
534
535 if (likely(bits_wanted <= BITS_PER_LONG/2)) {
536 /*
537 ** Search the resource bit map on well-aligned values.
538 ** "o" is the alignment.
539 ** We need the alignment to invalidate I/O TLB using
540 ** SBA HW features in the unmap path.
541 */
542 unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
543 uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
544 unsigned long mask, base_mask;
545
546 base_mask = RESMAP_MASK(bits_wanted);
547 mask = base_mask << bitshiftcnt;
548
549 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
550 for(; res_ptr < res_end ; res_ptr++)
551 {
552 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
553 ASSERT(0 != mask);
554 for (; mask ; mask <<= o, bitshiftcnt += o) {
555 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
556 ret = iommu_is_span_boundary(tpide, bits_wanted,
557 shift,
558 boundary_size);
559 if ((0 == ((*res_ptr) & mask)) && !ret) {
560 *res_ptr |= mask; /* mark resources busy! */
561 pide = tpide;
562 ioc->res_bitshift = bitshiftcnt + bits_wanted;
563 goto found_it;
564 }
565 }
566
567 bitshiftcnt = 0;
568 mask = base_mask;
569
570 }
571
572 } else {
573 int qwords, bits, i;
574 unsigned long *end;
575
576 qwords = bits_wanted >> 6; /* /64 */
577 bits = bits_wanted - (qwords * BITS_PER_LONG);
578
579 end = res_end - qwords;
580
581 for (; res_ptr < end; res_ptr++) {
582 tpide = ptr_to_pide(ioc, res_ptr, 0);
583 ret = iommu_is_span_boundary(tpide, bits_wanted,
584 shift, boundary_size);
585 if (ret)
586 goto next_ptr;
587 for (i = 0 ; i < qwords ; i++) {
588 if (res_ptr[i] != 0)
589 goto next_ptr;
590 }
591 if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits))
592 continue;
593
594 /* Found it, mark it */
595 for (i = 0 ; i < qwords ; i++)
596 res_ptr[i] = ~0UL;
597 res_ptr[i] |= RESMAP_MASK(bits);
598
599 pide = tpide;
600 res_ptr += qwords;
601 ioc->res_bitshift = bits;
602 goto found_it;
603next_ptr:
604 ;
605 }
606 }
607
608not_found:
609 prefetch(ioc->res_map);
610 ioc->res_hint = (unsigned long *) ioc->res_map;
611 ioc->res_bitshift = 0;
612 spin_unlock_irqrestore(&ioc->res_lock, flags);
613 return (pide);
614
615found_it:
616 ioc->res_hint = res_ptr;
617 spin_unlock_irqrestore(&ioc->res_lock, flags);
618 return (pide);
619}
620
621
622/**
623 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
624 * @ioc: IO MMU structure which owns the pdir we are interested in.
625 * @size: number of bytes to create a mapping for
626 *
627 * Given a size, find consecutive unmarked and then mark those bits in the
628 * resource bit map.
629 */
630static int
631sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
632{
633 unsigned int pages_needed = size >> iovp_shift;
634#ifdef PDIR_SEARCH_TIMING
635 unsigned long itc_start;
636#endif
637 unsigned long pide;
638
639 ASSERT(pages_needed);
640 ASSERT(0 == (size & ~iovp_mask));
641
642#ifdef PDIR_SEARCH_TIMING
643 itc_start = ia64_get_itc();
644#endif
645 /*
646 ** "seek and ye shall find"...praying never hurts either...
647 */
648 pide = sba_search_bitmap(ioc, dev, pages_needed, 1);
649 if (unlikely(pide >= (ioc->res_size << 3))) {
650 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
651 if (unlikely(pide >= (ioc->res_size << 3))) {
652#if DELAYED_RESOURCE_CNT > 0
653 unsigned long flags;
654
655 /*
656 ** With delayed resource freeing, we can give this one more shot. We're
657 ** getting close to being in trouble here, so do what we can to make this
658 ** one count.
659 */
660 spin_lock_irqsave(&ioc->saved_lock, flags);
661 if (ioc->saved_cnt > 0) {
662 struct sba_dma_pair *d;
663 int cnt = ioc->saved_cnt;
664
665 d = &(ioc->saved[ioc->saved_cnt - 1]);
666
667 spin_lock(&ioc->res_lock);
668 while (cnt--) {
669 sba_mark_invalid(ioc, d->iova, d->size);
670 sba_free_range(ioc, d->iova, d->size);
671 d--;
672 }
673 ioc->saved_cnt = 0;
674 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
675 spin_unlock(&ioc->res_lock);
676 }
677 spin_unlock_irqrestore(&ioc->saved_lock, flags);
678
679 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
680 if (unlikely(pide >= (ioc->res_size << 3))) {
681 printk(KERN_WARNING "%s: I/O MMU @ %p is"
682 "out of mapping resources, %u %u %lx\n",
683 __func__, ioc->ioc_hpa, ioc->res_size,
684 pages_needed, dma_get_seg_boundary(dev));
685 return -1;
686 }
687#else
688 printk(KERN_WARNING "%s: I/O MMU @ %p is"
689 "out of mapping resources, %u %u %lx\n",
690 __func__, ioc->ioc_hpa, ioc->res_size,
691 pages_needed, dma_get_seg_boundary(dev));
692 return -1;
693#endif
694 }
695 }
696
697#ifdef PDIR_SEARCH_TIMING
698 ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
699 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
700#endif
701
702 prefetchw(&(ioc->pdir_base[pide]));
703
704#ifdef ASSERT_PDIR_SANITY
705 /* verify the first enable bit is clear */
706 if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
707 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
708 }
709#endif
710
711 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
712 __func__, size, pages_needed, pide,
713 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
714 ioc->res_bitshift );
715
716 return (pide);
717}
718
719
720/**
721 * sba_free_range - unmark bits in IO PDIR resource bitmap
722 * @ioc: IO MMU structure which owns the pdir we are interested in.
723 * @iova: IO virtual address which was previously allocated.
724 * @size: number of bytes to create a mapping for
725 *
726 * clear bits in the ioc's resource map
727 */
728static SBA_INLINE void
729sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
730{
731 unsigned long iovp = SBA_IOVP(ioc, iova);
732 unsigned int pide = PDIR_INDEX(iovp);
733 unsigned int ridx = pide >> 3; /* convert bit to byte address */
734 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
735 int bits_not_wanted = size >> iovp_shift;
736 unsigned long m;
737
738 /* Round up to power-of-two size: see AR2305 note above */
739 bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift);
740 for (; bits_not_wanted > 0 ; res_ptr++) {
741
742 if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
743
744 /* these mappings start 64bit aligned */
745 *res_ptr = 0UL;
746 bits_not_wanted -= BITS_PER_LONG;
747 pide += BITS_PER_LONG;
748
749 } else {
750
751 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
752 m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
753 bits_not_wanted = 0;
754
755 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __func__, (uint) iova, size,
756 bits_not_wanted, m, pide, res_ptr, *res_ptr);
757
758 ASSERT(m != 0);
759 ASSERT(bits_not_wanted);
760 ASSERT((*res_ptr & m) == m); /* verify same bits are set */
761 *res_ptr &= ~m;
762 }
763 }
764}
765
766
767/**************************************************************
768*
769* "Dynamic DMA Mapping" support (aka "Coherent I/O")
770*
771***************************************************************/
772
773/**
774 * sba_io_pdir_entry - fill in one IO PDIR entry
775 * @pdir_ptr: pointer to IO PDIR entry
776 * @vba: Virtual CPU address of buffer to map
777 *
778 * SBA Mapping Routine
779 *
780 * Given a virtual address (vba, arg1) sba_io_pdir_entry()
781 * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
782 * Each IO Pdir entry consists of 8 bytes as shown below
783 * (LSB == bit 0):
784 *
785 * 63 40 11 7 0
786 * +-+---------------------+----------------------------------+----+--------+
787 * |V| U | PPN[39:12] | U | FF |
788 * +-+---------------------+----------------------------------+----+--------+
789 *
790 * V == Valid Bit
791 * U == Unused
792 * PPN == Physical Page Number
793 *
794 * The physical address fields are filled with the results of virt_to_phys()
795 * on the vba.
796 */
797
798#if 1
799#define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
800 | 0x8000000000000000ULL)
801#else
802void SBA_INLINE
803sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
804{
805 *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
806}
807#endif
808
809#ifdef ENABLE_MARK_CLEAN
810/**
811 * Since DMA is i-cache coherent, any (complete) pages that were written via
812 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
813 * flush them when they get mapped into an executable vm-area.
814 */
815static void
816mark_clean (void *addr, size_t size)
817{
818 unsigned long pg_addr, end;
819
820 pg_addr = PAGE_ALIGN((unsigned long) addr);
821 end = (unsigned long) addr + size;
822 while (pg_addr + PAGE_SIZE <= end) {
823 struct page *page = virt_to_page((void *)pg_addr);
824 set_bit(PG_arch_1, &page->flags);
825 pg_addr += PAGE_SIZE;
826 }
827}
828#endif
829
830/**
831 * sba_mark_invalid - invalidate one or more IO PDIR entries
832 * @ioc: IO MMU structure which owns the pdir we are interested in.
833 * @iova: IO Virtual Address mapped earlier
834 * @byte_cnt: number of bytes this mapping covers.
835 *
836 * Marking the IO PDIR entry(ies) as Invalid and invalidate
837 * corresponding IO TLB entry. The PCOM (Purge Command Register)
838 * is to purge stale entries in the IO TLB when unmapping entries.
839 *
840 * The PCOM register supports purging of multiple pages, with a minium
841 * of 1 page and a maximum of 2GB. Hardware requires the address be
842 * aligned to the size of the range being purged. The size of the range
843 * must be a power of 2. The "Cool perf optimization" in the
844 * allocation routine helps keep that true.
845 */
846static SBA_INLINE void
847sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
848{
849 u32 iovp = (u32) SBA_IOVP(ioc,iova);
850
851 int off = PDIR_INDEX(iovp);
852
853 /* Must be non-zero and rounded up */
854 ASSERT(byte_cnt > 0);
855 ASSERT(0 == (byte_cnt & ~iovp_mask));
856
857#ifdef ASSERT_PDIR_SANITY
858 /* Assert first pdir entry is set */
859 if (!(ioc->pdir_base[off] >> 60)) {
860 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
861 }
862#endif
863
864 if (byte_cnt <= iovp_size)
865 {
866 ASSERT(off < ioc->pdir_size);
867
868 iovp |= iovp_shift; /* set "size" field for PCOM */
869
870#ifndef FULL_VALID_PDIR
871 /*
872 ** clear I/O PDIR entry "valid" bit
873 ** Do NOT clear the rest - save it for debugging.
874 ** We should only clear bits that have previously
875 ** been enabled.
876 */
877 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
878#else
879 /*
880 ** If we want to maintain the PDIR as valid, put in
881 ** the spill page so devices prefetching won't
882 ** cause a hard fail.
883 */
884 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
885#endif
886 } else {
887 u32 t = get_iovp_order(byte_cnt) + iovp_shift;
888
889 iovp |= t;
890 ASSERT(t <= 31); /* 2GB! Max value of "size" field */
891
892 do {
893 /* verify this pdir entry is enabled */
894 ASSERT(ioc->pdir_base[off] >> 63);
895#ifndef FULL_VALID_PDIR
896 /* clear I/O Pdir entry "valid" bit first */
897 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
898#else
899 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
900#endif
901 off++;
902 byte_cnt -= iovp_size;
903 } while (byte_cnt > 0);
904 }
905
906 WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
907}
908
909/**
910 * sba_map_single_attrs - map one buffer and return IOVA for DMA
911 * @dev: instance of PCI owned by the driver that's asking.
912 * @addr: driver buffer to map.
913 * @size: number of bytes to map in driver buffer.
914 * @dir: R/W or both.
915 * @attrs: optional dma attributes
916 *
917 * See Documentation/DMA-API-HOWTO.txt
918 */
919static dma_addr_t sba_map_page(struct device *dev, struct page *page,
920 unsigned long poff, size_t size,
921 enum dma_data_direction dir,
922 unsigned long attrs)
923{
924 struct ioc *ioc;
925 void *addr = page_address(page) + poff;
926 dma_addr_t iovp;
927 dma_addr_t offset;
928 u64 *pdir_start;
929 int pide;
930#ifdef ASSERT_PDIR_SANITY
931 unsigned long flags;
932#endif
933#ifdef ALLOW_IOV_BYPASS
934 unsigned long pci_addr = virt_to_phys(addr);
935#endif
936
937#ifdef ALLOW_IOV_BYPASS
938 ASSERT(to_pci_dev(dev)->dma_mask);
939 /*
940 ** Check if the PCI device can DMA to ptr... if so, just return ptr
941 */
942 if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
943 /*
944 ** Device is bit capable of DMA'ing to the buffer...
945 ** just return the PCI address of ptr
946 */
947 DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: "
948 "0x%lx/0x%lx\n",
949 to_pci_dev(dev)->dma_mask, pci_addr);
950 return pci_addr;
951 }
952#endif
953 ioc = GET_IOC(dev);
954 ASSERT(ioc);
955
956 prefetch(ioc->res_hint);
957
958 ASSERT(size > 0);
959 ASSERT(size <= DMA_CHUNK_SIZE);
960
961 /* save offset bits */
962 offset = ((dma_addr_t) (long) addr) & ~iovp_mask;
963
964 /* round up to nearest iovp_size */
965 size = (size + offset + ~iovp_mask) & iovp_mask;
966
967#ifdef ASSERT_PDIR_SANITY
968 spin_lock_irqsave(&ioc->res_lock, flags);
969 if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()"))
970 panic("Sanity check failed");
971 spin_unlock_irqrestore(&ioc->res_lock, flags);
972#endif
973
974 pide = sba_alloc_range(ioc, dev, size);
975 if (pide < 0)
976 return 0;
977
978 iovp = (dma_addr_t) pide << iovp_shift;
979
980 DBG_RUN("%s() 0x%p -> 0x%lx\n", __func__, addr, (long) iovp | offset);
981
982 pdir_start = &(ioc->pdir_base[pide]);
983
984 while (size > 0) {
985 ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
986 sba_io_pdir_entry(pdir_start, (unsigned long) addr);
987
988 DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
989
990 addr += iovp_size;
991 size -= iovp_size;
992 pdir_start++;
993 }
994 /* force pdir update */
995 wmb();
996
997 /* form complete address */
998#ifdef ASSERT_PDIR_SANITY
999 spin_lock_irqsave(&ioc->res_lock, flags);
1000 sba_check_pdir(ioc,"Check after sba_map_single_attrs()");
1001 spin_unlock_irqrestore(&ioc->res_lock, flags);
1002#endif
1003 return SBA_IOVA(ioc, iovp, offset);
1004}
1005
1006static dma_addr_t sba_map_single_attrs(struct device *dev, void *addr,
1007 size_t size, enum dma_data_direction dir,
1008 unsigned long attrs)
1009{
1010 return sba_map_page(dev, virt_to_page(addr),
1011 (unsigned long)addr & ~PAGE_MASK, size, dir, attrs);
1012}
1013
1014#ifdef ENABLE_MARK_CLEAN
1015static SBA_INLINE void
1016sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
1017{
1018 u32 iovp = (u32) SBA_IOVP(ioc,iova);
1019 int off = PDIR_INDEX(iovp);
1020 void *addr;
1021
1022 if (size <= iovp_size) {
1023 addr = phys_to_virt(ioc->pdir_base[off] &
1024 ~0xE000000000000FFFULL);
1025 mark_clean(addr, size);
1026 } else {
1027 do {
1028 addr = phys_to_virt(ioc->pdir_base[off] &
1029 ~0xE000000000000FFFULL);
1030 mark_clean(addr, min(size, iovp_size));
1031 off++;
1032 size -= iovp_size;
1033 } while (size > 0);
1034 }
1035}
1036#endif
1037
1038/**
1039 * sba_unmap_single_attrs - unmap one IOVA and free resources
1040 * @dev: instance of PCI owned by the driver that's asking.
1041 * @iova: IOVA of driver buffer previously mapped.
1042 * @size: number of bytes mapped in driver buffer.
1043 * @dir: R/W or both.
1044 * @attrs: optional dma attributes
1045 *
1046 * See Documentation/DMA-API-HOWTO.txt
1047 */
1048static void sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
1049 enum dma_data_direction dir, unsigned long attrs)
1050{
1051 struct ioc *ioc;
1052#if DELAYED_RESOURCE_CNT > 0
1053 struct sba_dma_pair *d;
1054#endif
1055 unsigned long flags;
1056 dma_addr_t offset;
1057
1058 ioc = GET_IOC(dev);
1059 ASSERT(ioc);
1060
1061#ifdef ALLOW_IOV_BYPASS
1062 if (likely((iova & ioc->imask) != ioc->ibase)) {
1063 /*
1064 ** Address does not fall w/in IOVA, must be bypassing
1065 */
1066 DBG_BYPASS("sba_unmap_single_attrs() bypass addr: 0x%lx\n",
1067 iova);
1068
1069#ifdef ENABLE_MARK_CLEAN
1070 if (dir == DMA_FROM_DEVICE) {
1071 mark_clean(phys_to_virt(iova), size);
1072 }
1073#endif
1074 return;
1075 }
1076#endif
1077 offset = iova & ~iovp_mask;
1078
1079 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
1080
1081 iova ^= offset; /* clear offset bits */
1082 size += offset;
1083 size = ROUNDUP(size, iovp_size);
1084
1085#ifdef ENABLE_MARK_CLEAN
1086 if (dir == DMA_FROM_DEVICE)
1087 sba_mark_clean(ioc, iova, size);
1088#endif
1089
1090#if DELAYED_RESOURCE_CNT > 0
1091 spin_lock_irqsave(&ioc->saved_lock, flags);
1092 d = &(ioc->saved[ioc->saved_cnt]);
1093 d->iova = iova;
1094 d->size = size;
1095 if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
1096 int cnt = ioc->saved_cnt;
1097 spin_lock(&ioc->res_lock);
1098 while (cnt--) {
1099 sba_mark_invalid(ioc, d->iova, d->size);
1100 sba_free_range(ioc, d->iova, d->size);
1101 d--;
1102 }
1103 ioc->saved_cnt = 0;
1104 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1105 spin_unlock(&ioc->res_lock);
1106 }
1107 spin_unlock_irqrestore(&ioc->saved_lock, flags);
1108#else /* DELAYED_RESOURCE_CNT == 0 */
1109 spin_lock_irqsave(&ioc->res_lock, flags);
1110 sba_mark_invalid(ioc, iova, size);
1111 sba_free_range(ioc, iova, size);
1112 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1113 spin_unlock_irqrestore(&ioc->res_lock, flags);
1114#endif /* DELAYED_RESOURCE_CNT == 0 */
1115}
1116
1117void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size,
1118 enum dma_data_direction dir, unsigned long attrs)
1119{
1120 sba_unmap_page(dev, iova, size, dir, attrs);
1121}
1122
1123/**
1124 * sba_alloc_coherent - allocate/map shared mem for DMA
1125 * @dev: instance of PCI owned by the driver that's asking.
1126 * @size: number of bytes mapped in driver buffer.
1127 * @dma_handle: IOVA of new buffer.
1128 *
1129 * See Documentation/DMA-API-HOWTO.txt
1130 */
1131static void *
1132sba_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
1133 gfp_t flags, unsigned long attrs)
1134{
1135 struct ioc *ioc;
1136 void *addr;
1137
1138 ioc = GET_IOC(dev);
1139 ASSERT(ioc);
1140
1141#ifdef CONFIG_NUMA
1142 {
1143 struct page *page;
1144
1145 page = alloc_pages_node(ioc->node, flags, get_order(size));
1146 if (unlikely(!page))
1147 return NULL;
1148
1149 addr = page_address(page);
1150 }
1151#else
1152 addr = (void *) __get_free_pages(flags, get_order(size));
1153#endif
1154 if (unlikely(!addr))
1155 return NULL;
1156
1157 memset(addr, 0, size);
1158 *dma_handle = virt_to_phys(addr);
1159
1160#ifdef ALLOW_IOV_BYPASS
1161 ASSERT(dev->coherent_dma_mask);
1162 /*
1163 ** Check if the PCI device can DMA to ptr... if so, just return ptr
1164 */
1165 if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {
1166 DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
1167 dev->coherent_dma_mask, *dma_handle);
1168
1169 return addr;
1170 }
1171#endif
1172
1173 /*
1174 * If device can't bypass or bypass is disabled, pass the 32bit fake
1175 * device to map single to get an iova mapping.
1176 */
1177 *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr,
1178 size, 0, 0);
1179
1180 return addr;
1181}
1182
1183
1184/**
1185 * sba_free_coherent - free/unmap shared mem for DMA
1186 * @dev: instance of PCI owned by the driver that's asking.
1187 * @size: number of bytes mapped in driver buffer.
1188 * @vaddr: virtual address IOVA of "consistent" buffer.
1189 * @dma_handler: IO virtual address of "consistent" buffer.
1190 *
1191 * See Documentation/DMA-API-HOWTO.txt
1192 */
1193static void sba_free_coherent(struct device *dev, size_t size, void *vaddr,
1194 dma_addr_t dma_handle, unsigned long attrs)
1195{
1196 sba_unmap_single_attrs(dev, dma_handle, size, 0, 0);
1197 free_pages((unsigned long) vaddr, get_order(size));
1198}
1199
1200
1201/*
1202** Since 0 is a valid pdir_base index value, can't use that
1203** to determine if a value is valid or not. Use a flag to indicate
1204** the SG list entry contains a valid pdir index.
1205*/
1206#define PIDE_FLAG 0x1UL
1207
1208#ifdef DEBUG_LARGE_SG_ENTRIES
1209int dump_run_sg = 0;
1210#endif
1211
1212
1213/**
1214 * sba_fill_pdir - write allocated SG entries into IO PDIR
1215 * @ioc: IO MMU structure which owns the pdir we are interested in.
1216 * @startsg: list of IOVA/size pairs
1217 * @nents: number of entries in startsg list
1218 *
1219 * Take preprocessed SG list and write corresponding entries
1220 * in the IO PDIR.
1221 */
1222
1223static SBA_INLINE int
1224sba_fill_pdir(
1225 struct ioc *ioc,
1226 struct scatterlist *startsg,
1227 int nents)
1228{
1229 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
1230 int n_mappings = 0;
1231 u64 *pdirp = NULL;
1232 unsigned long dma_offset = 0;
1233
1234 while (nents-- > 0) {
1235 int cnt = startsg->dma_length;
1236 startsg->dma_length = 0;
1237
1238#ifdef DEBUG_LARGE_SG_ENTRIES
1239 if (dump_run_sg)
1240 printk(" %2d : %08lx/%05x %p\n",
1241 nents, startsg->dma_address, cnt,
1242 sba_sg_address(startsg));
1243#else
1244 DBG_RUN_SG(" %d : %08lx/%05x %p\n",
1245 nents, startsg->dma_address, cnt,
1246 sba_sg_address(startsg));
1247#endif
1248 /*
1249 ** Look for the start of a new DMA stream
1250 */
1251 if (startsg->dma_address & PIDE_FLAG) {
1252 u32 pide = startsg->dma_address & ~PIDE_FLAG;
1253 dma_offset = (unsigned long) pide & ~iovp_mask;
1254 startsg->dma_address = 0;
1255 if (n_mappings)
1256 dma_sg = sg_next(dma_sg);
1257 dma_sg->dma_address = pide | ioc->ibase;
1258 pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
1259 n_mappings++;
1260 }
1261
1262 /*
1263 ** Look for a VCONTIG chunk
1264 */
1265 if (cnt) {
1266 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1267 ASSERT(pdirp);
1268
1269 /* Since multiple Vcontig blocks could make up
1270 ** one DMA stream, *add* cnt to dma_len.
1271 */
1272 dma_sg->dma_length += cnt;
1273 cnt += dma_offset;
1274 dma_offset=0; /* only want offset on first chunk */
1275 cnt = ROUNDUP(cnt, iovp_size);
1276 do {
1277 sba_io_pdir_entry(pdirp, vaddr);
1278 vaddr += iovp_size;
1279 cnt -= iovp_size;
1280 pdirp++;
1281 } while (cnt > 0);
1282 }
1283 startsg = sg_next(startsg);
1284 }
1285 /* force pdir update */
1286 wmb();
1287
1288#ifdef DEBUG_LARGE_SG_ENTRIES
1289 dump_run_sg = 0;
1290#endif
1291 return(n_mappings);
1292}
1293
1294
1295/*
1296** Two address ranges are DMA contiguous *iff* "end of prev" and
1297** "start of next" are both on an IOV page boundary.
1298**
1299** (shift left is a quick trick to mask off upper bits)
1300*/
1301#define DMA_CONTIG(__X, __Y) \
1302 (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
1303
1304
1305/**
1306 * sba_coalesce_chunks - preprocess the SG list
1307 * @ioc: IO MMU structure which owns the pdir we are interested in.
1308 * @startsg: list of IOVA/size pairs
1309 * @nents: number of entries in startsg list
1310 *
1311 * First pass is to walk the SG list and determine where the breaks are
1312 * in the DMA stream. Allocates PDIR entries but does not fill them.
1313 * Returns the number of DMA chunks.
1314 *
1315 * Doing the fill separate from the coalescing/allocation keeps the
1316 * code simpler. Future enhancement could make one pass through
1317 * the sglist do both.
1318 */
1319static SBA_INLINE int
1320sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1321 struct scatterlist *startsg,
1322 int nents)
1323{
1324 struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
1325 unsigned long vcontig_len; /* len of VCONTIG chunk */
1326 unsigned long vcontig_end;
1327 struct scatterlist *dma_sg; /* next DMA stream head */
1328 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
1329 int n_mappings = 0;
1330 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1331 int idx;
1332
1333 while (nents > 0) {
1334 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1335
1336 /*
1337 ** Prepare for first/next DMA stream
1338 */
1339 dma_sg = vcontig_sg = startsg;
1340 dma_len = vcontig_len = vcontig_end = startsg->length;
1341 vcontig_end += vaddr;
1342 dma_offset = vaddr & ~iovp_mask;
1343
1344 /* PARANOID: clear entries */
1345 startsg->dma_address = startsg->dma_length = 0;
1346
1347 /*
1348 ** This loop terminates one iteration "early" since
1349 ** it's always looking one "ahead".
1350 */
1351 while (--nents > 0) {
1352 unsigned long vaddr; /* tmp */
1353
1354 startsg = sg_next(startsg);
1355
1356 /* PARANOID */
1357 startsg->dma_address = startsg->dma_length = 0;
1358
1359 /* catch brokenness in SCSI layer */
1360 ASSERT(startsg->length <= DMA_CHUNK_SIZE);
1361
1362 /*
1363 ** First make sure current dma stream won't
1364 ** exceed DMA_CHUNK_SIZE if we coalesce the
1365 ** next entry.
1366 */
1367 if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)
1368 > DMA_CHUNK_SIZE)
1369 break;
1370
1371 if (dma_len + startsg->length > max_seg_size)
1372 break;
1373
1374 /*
1375 ** Then look for virtually contiguous blocks.
1376 **
1377 ** append the next transaction?
1378 */
1379 vaddr = (unsigned long) sba_sg_address(startsg);
1380 if (vcontig_end == vaddr)
1381 {
1382 vcontig_len += startsg->length;
1383 vcontig_end += startsg->length;
1384 dma_len += startsg->length;
1385 continue;
1386 }
1387
1388#ifdef DEBUG_LARGE_SG_ENTRIES
1389 dump_run_sg = (vcontig_len > iovp_size);
1390#endif
1391
1392 /*
1393 ** Not virtually contiguous.
1394 ** Terminate prev chunk.
1395 ** Start a new chunk.
1396 **
1397 ** Once we start a new VCONTIG chunk, dma_offset
1398 ** can't change. And we need the offset from the first
1399 ** chunk - not the last one. Ergo Successive chunks
1400 ** must start on page boundaries and dove tail
1401 ** with it's predecessor.
1402 */
1403 vcontig_sg->dma_length = vcontig_len;
1404
1405 vcontig_sg = startsg;
1406 vcontig_len = startsg->length;
1407
1408 /*
1409 ** 3) do the entries end/start on page boundaries?
1410 ** Don't update vcontig_end until we've checked.
1411 */
1412 if (DMA_CONTIG(vcontig_end, vaddr))
1413 {
1414 vcontig_end = vcontig_len + vaddr;
1415 dma_len += vcontig_len;
1416 continue;
1417 } else {
1418 break;
1419 }
1420 }
1421
1422 /*
1423 ** End of DMA Stream
1424 ** Terminate last VCONTIG block.
1425 ** Allocate space for DMA stream.
1426 */
1427 vcontig_sg->dma_length = vcontig_len;
1428 dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
1429 ASSERT(dma_len <= DMA_CHUNK_SIZE);
1430 idx = sba_alloc_range(ioc, dev, dma_len);
1431 if (idx < 0) {
1432 dma_sg->dma_length = 0;
1433 return -1;
1434 }
1435 dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
1436 | dma_offset);
1437 n_mappings++;
1438 }
1439
1440 return n_mappings;
1441}
1442
1443static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1444 int nents, enum dma_data_direction dir,
1445 unsigned long attrs);
1446/**
1447 * sba_map_sg - map Scatter/Gather list
1448 * @dev: instance of PCI owned by the driver that's asking.
1449 * @sglist: array of buffer/length pairs
1450 * @nents: number of entries in list
1451 * @dir: R/W or both.
1452 * @attrs: optional dma attributes
1453 *
1454 * See Documentation/DMA-API-HOWTO.txt
1455 */
1456static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
1457 int nents, enum dma_data_direction dir,
1458 unsigned long attrs)
1459{
1460 struct ioc *ioc;
1461 int coalesced, filled = 0;
1462#ifdef ASSERT_PDIR_SANITY
1463 unsigned long flags;
1464#endif
1465#ifdef ALLOW_IOV_BYPASS_SG
1466 struct scatterlist *sg;
1467#endif
1468
1469 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
1470 ioc = GET_IOC(dev);
1471 ASSERT(ioc);
1472
1473#ifdef ALLOW_IOV_BYPASS_SG
1474 ASSERT(to_pci_dev(dev)->dma_mask);
1475 if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
1476 for_each_sg(sglist, sg, nents, filled) {
1477 sg->dma_length = sg->length;
1478 sg->dma_address = virt_to_phys(sba_sg_address(sg));
1479 }
1480 return filled;
1481 }
1482#endif
1483 /* Fast path single entry scatterlists. */
1484 if (nents == 1) {
1485 sglist->dma_length = sglist->length;
1486 sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs);
1487 return 1;
1488 }
1489
1490#ifdef ASSERT_PDIR_SANITY
1491 spin_lock_irqsave(&ioc->res_lock, flags);
1492 if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()"))
1493 {
1494 sba_dump_sg(ioc, sglist, nents);
1495 panic("Check before sba_map_sg_attrs()");
1496 }
1497 spin_unlock_irqrestore(&ioc->res_lock, flags);
1498#endif
1499
1500 prefetch(ioc->res_hint);
1501
1502 /*
1503 ** First coalesce the chunks and allocate I/O pdir space
1504 **
1505 ** If this is one DMA stream, we can properly map using the
1506 ** correct virtual address associated with each DMA page.
1507 ** w/o this association, we wouldn't have coherent DMA!
1508 ** Access to the virtual address is what forces a two pass algorithm.
1509 */
1510 coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
1511 if (coalesced < 0) {
1512 sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
1513 return 0;
1514 }
1515
1516 /*
1517 ** Program the I/O Pdir
1518 **
1519 ** map the virtual addresses to the I/O Pdir
1520 ** o dma_address will contain the pdir index
1521 ** o dma_len will contain the number of bytes to map
1522 ** o address contains the virtual address.
1523 */
1524 filled = sba_fill_pdir(ioc, sglist, nents);
1525
1526#ifdef ASSERT_PDIR_SANITY
1527 spin_lock_irqsave(&ioc->res_lock, flags);
1528 if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()"))
1529 {
1530 sba_dump_sg(ioc, sglist, nents);
1531 panic("Check after sba_map_sg_attrs()\n");
1532 }
1533 spin_unlock_irqrestore(&ioc->res_lock, flags);
1534#endif
1535
1536 ASSERT(coalesced == filled);
1537 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1538
1539 return filled;
1540}
1541
1542/**
1543 * sba_unmap_sg_attrs - unmap Scatter/Gather list
1544 * @dev: instance of PCI owned by the driver that's asking.
1545 * @sglist: array of buffer/length pairs
1546 * @nents: number of entries in list
1547 * @dir: R/W or both.
1548 * @attrs: optional dma attributes
1549 *
1550 * See Documentation/DMA-API-HOWTO.txt
1551 */
1552static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1553 int nents, enum dma_data_direction dir,
1554 unsigned long attrs)
1555{
1556#ifdef ASSERT_PDIR_SANITY
1557 struct ioc *ioc;
1558 unsigned long flags;
1559#endif
1560
1561 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1562 __func__, nents, sba_sg_address(sglist), sglist->length);
1563
1564#ifdef ASSERT_PDIR_SANITY
1565 ioc = GET_IOC(dev);
1566 ASSERT(ioc);
1567
1568 spin_lock_irqsave(&ioc->res_lock, flags);
1569 sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()");
1570 spin_unlock_irqrestore(&ioc->res_lock, flags);
1571#endif
1572
1573 while (nents && sglist->dma_length) {
1574
1575 sba_unmap_single_attrs(dev, sglist->dma_address,
1576 sglist->dma_length, dir, attrs);
1577 sglist = sg_next(sglist);
1578 nents--;
1579 }
1580
1581 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1582
1583#ifdef ASSERT_PDIR_SANITY
1584 spin_lock_irqsave(&ioc->res_lock, flags);
1585 sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()");
1586 spin_unlock_irqrestore(&ioc->res_lock, flags);
1587#endif
1588
1589}
1590
1591/**************************************************************
1592*
1593* Initialization and claim
1594*
1595***************************************************************/
1596
1597static void
1598ioc_iova_init(struct ioc *ioc)
1599{
1600 int tcnfg;
1601 int agp_found = 0;
1602 struct pci_dev *device = NULL;
1603#ifdef FULL_VALID_PDIR
1604 unsigned long index;
1605#endif
1606
1607 /*
1608 ** Firmware programs the base and size of a "safe IOVA space"
1609 ** (one that doesn't overlap memory or LMMIO space) in the
1610 ** IBASE and IMASK registers.
1611 */
1612 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
1613 ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
1614
1615 ioc->iov_size = ~ioc->imask + 1;
1616
1617 DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
1618 __func__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
1619 ioc->iov_size >> 20);
1620
1621 switch (iovp_size) {
1622 case 4*1024: tcnfg = 0; break;
1623 case 8*1024: tcnfg = 1; break;
1624 case 16*1024: tcnfg = 2; break;
1625 case 64*1024: tcnfg = 3; break;
1626 default:
1627 panic(PFX "Unsupported IOTLB page size %ldK",
1628 iovp_size >> 10);
1629 break;
1630 }
1631 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1632
1633 ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
1634 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1635 get_order(ioc->pdir_size));
1636 if (!ioc->pdir_base)
1637 panic(PFX "Couldn't allocate I/O Page Table\n");
1638
1639 memset(ioc->pdir_base, 0, ioc->pdir_size);
1640
1641 DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __func__,
1642 iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
1643
1644 ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
1645 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1646
1647 /*
1648 ** If an AGP device is present, only use half of the IOV space
1649 ** for PCI DMA. Unfortunately we can't know ahead of time
1650 ** whether GART support will actually be used, for now we
1651 ** can just key on an AGP device found in the system.
1652 ** We program the next pdir index after we stop w/ a key for
1653 ** the GART code to handshake on.
1654 */
1655 for_each_pci_dev(device)
1656 agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
1657
1658 if (agp_found && reserve_sba_gart) {
1659 printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",
1660 ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
1661 ioc->pdir_size /= 2;
1662 ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
1663 }
1664#ifdef FULL_VALID_PDIR
1665 /*
1666 ** Check to see if the spill page has been allocated, we don't need more than
1667 ** one across multiple SBAs.
1668 */
1669 if (!prefetch_spill_page) {
1670 char *spill_poison = "SBAIOMMU POISON";
1671 int poison_size = 16;
1672 void *poison_addr, *addr;
1673
1674 addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));
1675 if (!addr)
1676 panic(PFX "Couldn't allocate PDIR spill page\n");
1677
1678 poison_addr = addr;
1679 for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
1680 memcpy(poison_addr, spill_poison, poison_size);
1681
1682 prefetch_spill_page = virt_to_phys(addr);
1683
1684 DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __func__, prefetch_spill_page);
1685 }
1686 /*
1687 ** Set all the PDIR entries valid w/ the spill page as the target
1688 */
1689 for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
1690 ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
1691#endif
1692
1693 /* Clear I/O TLB of any possible entries */
1694 WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
1695 READ_REG(ioc->ioc_hpa + IOC_PCOM);
1696
1697 /* Enable IOVA translation */
1698 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1699 READ_REG(ioc->ioc_hpa + IOC_IBASE);
1700}
1701
1702static void __init
1703ioc_resource_init(struct ioc *ioc)
1704{
1705 spin_lock_init(&ioc->res_lock);
1706#if DELAYED_RESOURCE_CNT > 0
1707 spin_lock_init(&ioc->saved_lock);
1708#endif
1709
1710 /* resource map size dictated by pdir_size */
1711 ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
1712 ioc->res_size >>= 3; /* convert bit count to byte count */
1713 DBG_INIT("%s() res_size 0x%x\n", __func__, ioc->res_size);
1714
1715 ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
1716 get_order(ioc->res_size));
1717 if (!ioc->res_map)
1718 panic(PFX "Couldn't allocate resource map\n");
1719
1720 memset(ioc->res_map, 0, ioc->res_size);
1721 /* next available IOVP - circular search */
1722 ioc->res_hint = (unsigned long *) ioc->res_map;
1723
1724#ifdef ASSERT_PDIR_SANITY
1725 /* Mark first bit busy - ie no IOVA 0 */
1726 ioc->res_map[0] = 0x1;
1727 ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
1728#endif
1729#ifdef FULL_VALID_PDIR
1730 /* Mark the last resource used so we don't prefetch beyond IOVA space */
1731 ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
1732 ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
1733 | prefetch_spill_page);
1734#endif
1735
1736 DBG_INIT("%s() res_map %x %p\n", __func__,
1737 ioc->res_size, (void *) ioc->res_map);
1738}
1739
1740static void __init
1741ioc_sac_init(struct ioc *ioc)
1742{
1743 struct pci_dev *sac = NULL;
1744 struct pci_controller *controller = NULL;
1745
1746 /*
1747 * pci_alloc_coherent() must return a DMA address which is
1748 * SAC (single address cycle) addressable, so allocate a
1749 * pseudo-device to enforce that.
1750 */
1751 sac = kzalloc(sizeof(*sac), GFP_KERNEL);
1752 if (!sac)
1753 panic(PFX "Couldn't allocate struct pci_dev");
1754
1755 controller = kzalloc(sizeof(*controller), GFP_KERNEL);
1756 if (!controller)
1757 panic(PFX "Couldn't allocate struct pci_controller");
1758
1759 controller->iommu = ioc;
1760 sac->sysdata = controller;
1761 sac->dma_mask = 0xFFFFFFFFUL;
1762#ifdef CONFIG_PCI
1763 sac->dev.bus = &pci_bus_type;
1764#endif
1765 ioc->sac_only_dev = sac;
1766}
1767
1768static void __init
1769ioc_zx1_init(struct ioc *ioc)
1770{
1771 unsigned long rope_config;
1772 unsigned int i;
1773
1774 if (ioc->rev < 0x20)
1775 panic(PFX "IOC 2.0 or later required for IOMMU support\n");
1776
1777 /* 38 bit memory controller + extra bit for range displaced by MMIO */
1778 ioc->dma_mask = (0x1UL << 39) - 1;
1779
1780 /*
1781 ** Clear ROPE(N)_CONFIG AO bit.
1782 ** Disables "NT Ordering" (~= !"Relaxed Ordering")
1783 ** Overrides bit 1 in DMA Hint Sets.
1784 ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
1785 */
1786 for (i=0; i<(8*8); i+=8) {
1787 rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1788 rope_config &= ~IOC_ROPE_AO;
1789 WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1790 }
1791}
1792
1793typedef void (initfunc)(struct ioc *);
1794
1795struct ioc_iommu {
1796 u32 func_id;
1797 char *name;
1798 initfunc *init;
1799};
1800
1801static struct ioc_iommu ioc_iommu_info[] __initdata = {
1802 { ZX1_IOC_ID, "zx1", ioc_zx1_init },
1803 { ZX2_IOC_ID, "zx2", NULL },
1804 { SX1000_IOC_ID, "sx1000", NULL },
1805 { SX2000_IOC_ID, "sx2000", NULL },
1806};
1807
1808static void ioc_init(unsigned long hpa, struct ioc *ioc)
1809{
1810 struct ioc_iommu *info;
1811
1812 ioc->next = ioc_list;
1813 ioc_list = ioc;
1814
1815 ioc->ioc_hpa = ioremap(hpa, 0x1000);
1816
1817 ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
1818 ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
1819 ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */
1820
1821 for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
1822 if (ioc->func_id == info->func_id) {
1823 ioc->name = info->name;
1824 if (info->init)
1825 (info->init)(ioc);
1826 }
1827 }
1828
1829 iovp_size = (1 << iovp_shift);
1830 iovp_mask = ~(iovp_size - 1);
1831
1832 DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __func__,
1833 PAGE_SIZE >> 10, iovp_size >> 10);
1834
1835 if (!ioc->name) {
1836 ioc->name = kmalloc(24, GFP_KERNEL);
1837 if (ioc->name)
1838 sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
1839 ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
1840 else
1841 ioc->name = "Unknown";
1842 }
1843
1844 ioc_iova_init(ioc);
1845 ioc_resource_init(ioc);
1846 ioc_sac_init(ioc);
1847
1848 if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask)
1849 ia64_max_iommu_merge_mask = ~iovp_mask;
1850
1851 printk(KERN_INFO PFX
1852 "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
1853 ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
1854 hpa, ioc->iov_size >> 20, ioc->ibase);
1855}
1856
1857
1858
1859/**************************************************************************
1860**
1861** SBA initialization code (HW and SW)
1862**
1863** o identify SBA chip itself
1864** o FIXME: initialize DMA hints for reasonable defaults
1865**
1866**************************************************************************/
1867
1868#ifdef CONFIG_PROC_FS
1869static void *
1870ioc_start(struct seq_file *s, loff_t *pos)
1871{
1872 struct ioc *ioc;
1873 loff_t n = *pos;
1874
1875 for (ioc = ioc_list; ioc; ioc = ioc->next)
1876 if (!n--)
1877 return ioc;
1878
1879 return NULL;
1880}
1881
1882static void *
1883ioc_next(struct seq_file *s, void *v, loff_t *pos)
1884{
1885 struct ioc *ioc = v;
1886
1887 ++*pos;
1888 return ioc->next;
1889}
1890
1891static void
1892ioc_stop(struct seq_file *s, void *v)
1893{
1894}
1895
1896static int
1897ioc_show(struct seq_file *s, void *v)
1898{
1899 struct ioc *ioc = v;
1900 unsigned long *res_ptr = (unsigned long *)ioc->res_map;
1901 int i, used = 0;
1902
1903 seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
1904 ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
1905#ifdef CONFIG_NUMA
1906 if (ioc->node != NUMA_NO_NODE)
1907 seq_printf(s, "NUMA node : %d\n", ioc->node);
1908#endif
1909 seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
1910 seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024);
1911
1912 for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
1913 used += hweight64(*res_ptr);
1914
1915 seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3);
1916 seq_printf(s, "PDIR used : %d entries\n", used);
1917
1918#ifdef PDIR_SEARCH_TIMING
1919 {
1920 unsigned long i = 0, avg = 0, min, max;
1921 min = max = ioc->avg_search[0];
1922 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1923 avg += ioc->avg_search[i];
1924 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1925 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1926 }
1927 avg /= SBA_SEARCH_SAMPLE;
1928 seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
1929 min, avg, max);
1930 }
1931#endif
1932#ifndef ALLOW_IOV_BYPASS
1933 seq_printf(s, "IOVA bypass disabled\n");
1934#endif
1935 return 0;
1936}
1937
1938static const struct seq_operations ioc_seq_ops = {
1939 .start = ioc_start,
1940 .next = ioc_next,
1941 .stop = ioc_stop,
1942 .show = ioc_show
1943};
1944
1945static int
1946ioc_open(struct inode *inode, struct file *file)
1947{
1948 return seq_open(file, &ioc_seq_ops);
1949}
1950
1951static const struct file_operations ioc_fops = {
1952 .open = ioc_open,
1953 .read = seq_read,
1954 .llseek = seq_lseek,
1955 .release = seq_release
1956};
1957
1958static void __init
1959ioc_proc_init(void)
1960{
1961 struct proc_dir_entry *dir;
1962
1963 dir = proc_mkdir("bus/mckinley", NULL);
1964 if (!dir)
1965 return;
1966
1967 proc_create(ioc_list->name, 0, dir, &ioc_fops);
1968}
1969#endif
1970
1971static void
1972sba_connect_bus(struct pci_bus *bus)
1973{
1974 acpi_handle handle, parent;
1975 acpi_status status;
1976 struct ioc *ioc;
1977
1978 if (!PCI_CONTROLLER(bus))
1979 panic(PFX "no sysdata on bus %d!\n", bus->number);
1980
1981 if (PCI_CONTROLLER(bus)->iommu)
1982 return;
1983
1984 handle = acpi_device_handle(PCI_CONTROLLER(bus)->companion);
1985 if (!handle)
1986 return;
1987
1988 /*
1989 * The IOC scope encloses PCI root bridges in the ACPI
1990 * namespace, so work our way out until we find an IOC we
1991 * claimed previously.
1992 */
1993 do {
1994 for (ioc = ioc_list; ioc; ioc = ioc->next)
1995 if (ioc->handle == handle) {
1996 PCI_CONTROLLER(bus)->iommu = ioc;
1997 return;
1998 }
1999
2000 status = acpi_get_parent(handle, &parent);
2001 handle = parent;
2002 } while (ACPI_SUCCESS(status));
2003
2004 printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number);
2005}
2006
2007static void __init
2008sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
2009{
2010#ifdef CONFIG_NUMA
2011 unsigned int node;
2012
2013 node = acpi_get_node(handle);
2014 if (node != NUMA_NO_NODE && !node_online(node))
2015 node = NUMA_NO_NODE;
2016
2017 ioc->node = node;
2018#endif
2019}
2020
2021static void acpi_sba_ioc_add(struct ioc *ioc)
2022{
2023 acpi_handle handle = ioc->handle;
2024 acpi_status status;
2025 u64 hpa, length;
2026 struct acpi_device_info *adi;
2027
2028 ioc_found = ioc->next;
2029 status = hp_acpi_csr_space(handle, &hpa, &length);
2030 if (ACPI_FAILURE(status))
2031 goto err;
2032
2033 status = acpi_get_object_info(handle, &adi);
2034 if (ACPI_FAILURE(status))
2035 goto err;
2036
2037 /*
2038 * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
2039 * root bridges, and its CSR space includes the IOC function.
2040 */
2041 if (strncmp("HWP0001", adi->hardware_id.string, 7) == 0) {
2042 hpa += ZX1_IOC_OFFSET;
2043 /* zx1 based systems default to kernel page size iommu pages */
2044 if (!iovp_shift)
2045 iovp_shift = min(PAGE_SHIFT, 16);
2046 }
2047 kfree(adi);
2048
2049 /*
2050 * default anything not caught above or specified on cmdline to 4k
2051 * iommu page size
2052 */
2053 if (!iovp_shift)
2054 iovp_shift = 12;
2055
2056 ioc_init(hpa, ioc);
2057 /* setup NUMA node association */
2058 sba_map_ioc_to_node(ioc, handle);
2059 return;
2060
2061 err:
2062 kfree(ioc);
2063}
2064
2065static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
2066 {"HWP0001", 0},
2067 {"HWP0004", 0},
2068 {"", 0},
2069};
2070
2071static int acpi_sba_ioc_attach(struct acpi_device *device,
2072 const struct acpi_device_id *not_used)
2073{
2074 struct ioc *ioc;
2075
2076 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
2077 if (!ioc)
2078 return -ENOMEM;
2079
2080 ioc->next = ioc_found;
2081 ioc_found = ioc;
2082 ioc->handle = device->handle;
2083 return 1;
2084}
2085
2086
2087static struct acpi_scan_handler acpi_sba_ioc_handler = {
2088 .ids = hp_ioc_iommu_device_ids,
2089 .attach = acpi_sba_ioc_attach,
2090};
2091
2092static int __init acpi_sba_ioc_init_acpi(void)
2093{
2094 return acpi_scan_add_handler(&acpi_sba_ioc_handler);
2095}
2096/* This has to run before acpi_scan_init(). */
2097arch_initcall(acpi_sba_ioc_init_acpi);
2098
2099extern const struct dma_map_ops swiotlb_dma_ops;
2100
2101static int __init
2102sba_init(void)
2103{
2104 if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
2105 return 0;
2106
2107#if defined(CONFIG_IA64_GENERIC)
2108 /* If we are booting a kdump kernel, the sba_iommu will
2109 * cause devices that were not shutdown properly to MCA
2110 * as soon as they are turned back on. Our only option for
2111 * a successful kdump kernel boot is to use the swiotlb.
2112 */
2113 if (is_kdump_kernel()) {
2114 dma_ops = &swiotlb_dma_ops;
2115 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2116 panic("Unable to initialize software I/O TLB:"
2117 " Try machvec=dig boot option");
2118 machvec_init("dig");
2119 return 0;
2120 }
2121#endif
2122
2123 /*
2124 * ioc_found should be populated by the acpi_sba_ioc_handler's .attach()
2125 * routine, but that only happens if acpi_scan_init() has already run.
2126 */
2127 while (ioc_found)
2128 acpi_sba_ioc_add(ioc_found);
2129
2130 if (!ioc_list) {
2131#ifdef CONFIG_IA64_GENERIC
2132 /*
2133 * If we didn't find something sba_iommu can claim, we
2134 * need to setup the swiotlb and switch to the dig machvec.
2135 */
2136 dma_ops = &swiotlb_dma_ops;
2137 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2138 panic("Unable to find SBA IOMMU or initialize "
2139 "software I/O TLB: Try machvec=dig boot option");
2140 machvec_init("dig");
2141#else
2142 panic("Unable to find SBA IOMMU: Try a generic or DIG kernel");
2143#endif
2144 return 0;
2145 }
2146
2147#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
2148 /*
2149 * hpzx1_swiotlb needs to have a fairly small swiotlb bounce
2150 * buffer setup to support devices with smaller DMA masks than
2151 * sba_iommu can handle.
2152 */
2153 if (ia64_platform_is("hpzx1_swiotlb")) {
2154 extern void hwsw_init(void);
2155
2156 hwsw_init();
2157 }
2158#endif
2159
2160#ifdef CONFIG_PCI
2161 {
2162 struct pci_bus *b = NULL;
2163 while ((b = pci_find_next_bus(b)) != NULL)
2164 sba_connect_bus(b);
2165 }
2166#endif
2167
2168#ifdef CONFIG_PROC_FS
2169 ioc_proc_init();
2170#endif
2171 return 0;
2172}
2173
2174subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
2175
2176static int __init
2177nosbagart(char *str)
2178{
2179 reserve_sba_gart = 0;
2180 return 1;
2181}
2182
2183static int sba_dma_supported (struct device *dev, u64 mask)
2184{
2185 /* make sure it's at least 32bit capable */
2186 return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
2187}
2188
2189static int sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2190{
2191 return 0;
2192}
2193
2194__setup("nosbagart", nosbagart);
2195
2196static int __init
2197sba_page_override(char *str)
2198{
2199 unsigned long page_size;
2200
2201 page_size = memparse(str, &str);
2202 switch (page_size) {
2203 case 4096:
2204 case 8192:
2205 case 16384:
2206 case 65536:
2207 iovp_shift = ffs(page_size) - 1;
2208 break;
2209 default:
2210 printk("%s: unknown/unsupported iommu page size %ld\n",
2211 __func__, page_size);
2212 }
2213
2214 return 1;
2215}
2216
2217__setup("sbapagesize=",sba_page_override);
2218
2219const struct dma_map_ops sba_dma_ops = {
2220 .alloc = sba_alloc_coherent,
2221 .free = sba_free_coherent,
2222 .map_page = sba_map_page,
2223 .unmap_page = sba_unmap_page,
2224 .map_sg = sba_map_sg_attrs,
2225 .unmap_sg = sba_unmap_sg_attrs,
2226 .sync_single_for_cpu = machvec_dma_sync_single,
2227 .sync_sg_for_cpu = machvec_dma_sync_sg,
2228 .sync_single_for_device = machvec_dma_sync_single,
2229 .sync_sg_for_device = machvec_dma_sync_sg,
2230 .dma_supported = sba_dma_supported,
2231 .mapping_error = sba_dma_mapping_error,
2232};
2233
2234void sba_dma_init(void)
2235{
2236 dma_ops = &sba_dma_ops;
2237}