Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3** System Bus Adapter (SBA) I/O MMU manager
4**
5** (c) Copyright 2000-2004 Grant Grundler <grundler @ parisc-linux x org>
6** (c) Copyright 2004 Naresh Kumar Inna <knaresh at india x hp x com>
7** (c) Copyright 2000-2004 Hewlett-Packard Company
8**
9** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
10**
11**
12**
13** This module initializes the IOC (I/O Controller) found on B1000/C3000/
14** J5000/J7000/N-class/L-class machines and their successors.
15**
16** FIXME: add DMA hint support programming in both sba and lba modules.
17*/
18
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/spinlock.h>
22#include <linux/slab.h>
23#include <linux/init.h>
24
25#include <linux/mm.h>
26#include <linux/string.h>
27#include <linux/pci.h>
28#include <linux/dma-map-ops.h>
29#include <linux/scatterlist.h>
30#include <linux/iommu-helper.h>
31/*
32 * The semantics of 64 register access on 32bit systems can't be guaranteed
33 * by the C standard, we hope the _lo_hi() macros defining readq and writeq
34 * here will behave as expected.
35 */
36#include <linux/io-64-nonatomic-lo-hi.h>
37
38#include <asm/byteorder.h>
39#include <asm/io.h>
40#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
41
42#include <asm/hardware.h> /* for register_parisc_driver() stuff */
43
44#include <linux/proc_fs.h>
45#include <linux/seq_file.h>
46#include <linux/module.h>
47
48#include <asm/ropes.h>
49#include <asm/page.h> /* for PAGE0 */
50#include <asm/pdc.h> /* for PDC_MODEL_* */
51#include <asm/pdcpat.h> /* for is_pdc_pat() */
52#include <asm/parisc-device.h>
53
54#include "iommu.h"
55
56#define MODULE_NAME "SBA"
57
58/*
59** The number of debug flags is a clue - this code is fragile.
60** Don't even think about messing with it unless you have
61** plenty of 710's to sacrifice to the computer gods. :^)
62*/
63#undef DEBUG_SBA_INIT
64#undef DEBUG_SBA_RUN
65#undef DEBUG_SBA_RUN_SG
66#undef DEBUG_SBA_RESOURCE
67#undef ASSERT_PDIR_SANITY
68#undef DEBUG_LARGE_SG_ENTRIES
69#undef DEBUG_DMB_TRAP
70
71#ifdef DEBUG_SBA_INIT
72#define DBG_INIT(x...) printk(x)
73#else
74#define DBG_INIT(x...)
75#endif
76
77#ifdef DEBUG_SBA_RUN
78#define DBG_RUN(x...) printk(x)
79#else
80#define DBG_RUN(x...)
81#endif
82
83#ifdef DEBUG_SBA_RUN_SG
84#define DBG_RUN_SG(x...) printk(x)
85#else
86#define DBG_RUN_SG(x...)
87#endif
88
89
90#ifdef DEBUG_SBA_RESOURCE
91#define DBG_RES(x...) printk(x)
92#else
93#define DBG_RES(x...)
94#endif
95
96#define DEFAULT_DMA_HINT_REG 0
97
98struct sba_device *sba_list;
99EXPORT_SYMBOL_GPL(sba_list);
100
101static unsigned long ioc_needs_fdc = 0;
102
103/* global count of IOMMUs in the system */
104static unsigned int global_ioc_cnt = 0;
105
106/* PA8700 (Piranha 2.2) bug workaround */
107static unsigned long piranha_bad_128k = 0;
108
109/* Looks nice and keeps the compiler happy */
110#define SBA_DEV(d) ((struct sba_device *) (d))
111
112#ifdef CONFIG_AGP_PARISC
113#define SBA_AGP_SUPPORT
114#endif /*CONFIG_AGP_PARISC*/
115
116#ifdef SBA_AGP_SUPPORT
117static int sba_reserve_agpgart = 1;
118module_param(sba_reserve_agpgart, int, 0444);
119MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
120#endif
121
122static struct proc_dir_entry *proc_runway_root __ro_after_init;
123static struct proc_dir_entry *proc_mckinley_root __ro_after_init;
124
125/************************************
126** SBA register read and write support
127**
128** BE WARNED: register writes are posted.
129** (ie follow writes which must reach HW with a read)
130**
131** Superdome (in particular, REO) allows only 64-bit CSR accesses.
132*/
133#define READ_REG32(addr) readl(addr)
134#define READ_REG64(addr) readq(addr)
135#define WRITE_REG32(val, addr) writel((val), (addr))
136#define WRITE_REG64(val, addr) writeq((val), (addr))
137
138#ifdef CONFIG_64BIT
139#define READ_REG(addr) READ_REG64(addr)
140#define WRITE_REG(value, addr) WRITE_REG64(value, addr)
141#else
142#define READ_REG(addr) READ_REG32(addr)
143#define WRITE_REG(value, addr) WRITE_REG32(value, addr)
144#endif
145
146#ifdef DEBUG_SBA_INIT
147
148/* NOTE: When CONFIG_64BIT isn't defined, READ_REG64() is two 32-bit reads */
149
150/**
151 * sba_dump_ranges - debugging only - print ranges assigned to this IOA
152 * @hpa: base address of the sba
153 *
154 * Print the MMIO and IO Port address ranges forwarded by an Astro/Ike/RIO
155 * IO Adapter (aka Bus Converter).
156 */
157static void
158sba_dump_ranges(void __iomem *hpa)
159{
160 DBG_INIT("SBA at 0x%p\n", hpa);
161 DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE));
162 DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK));
163 DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE));
164 DBG_INIT("\n");
165 DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE));
166 DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK));
167 DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE));
168}
169
170/**
171 * sba_dump_tlb - debugging only - print IOMMU operating parameters
172 * @hpa: base address of the IOMMU
173 *
174 * Print the size/location of the IO MMU PDIR.
175 */
176static void sba_dump_tlb(void __iomem *hpa)
177{
178 DBG_INIT("IO TLB at 0x%p\n", hpa);
179 DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE));
180 DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK));
181 DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG));
182 DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE));
183 DBG_INIT("\n");
184}
185#else
186#define sba_dump_ranges(x)
187#define sba_dump_tlb(x)
188#endif /* DEBUG_SBA_INIT */
189
190
191#ifdef ASSERT_PDIR_SANITY
192
193/**
194 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
195 * @ioc: IO MMU structure which owns the pdir we are interested in.
196 * @msg: text to print ont the output line.
197 * @pide: pdir index.
198 *
199 * Print one entry of the IO MMU PDIR in human readable form.
200 */
201static void
202sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
203{
204 /* start printing from lowest pde in rval */
205 __le64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
206 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
207 uint rcnt;
208
209 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
210 msg,
211 rptr, pide & (BITS_PER_LONG - 1), *rptr);
212
213 rcnt = 0;
214 while (rcnt < BITS_PER_LONG) {
215 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
216 (rcnt == (pide & (BITS_PER_LONG - 1)))
217 ? " -->" : " ",
218 rcnt, ptr, *ptr );
219 rcnt++;
220 ptr++;
221 }
222 printk(KERN_DEBUG "%s", msg);
223}
224
225
226/**
227 * sba_check_pdir - debugging only - consistency checker
228 * @ioc: IO MMU structure which owns the pdir we are interested in.
229 * @msg: text to print ont the output line.
230 *
231 * Verify the resource map and pdir state is consistent
232 */
233static int
234sba_check_pdir(struct ioc *ioc, char *msg)
235{
236 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]);
237 u32 *rptr = (u32 *) ioc->res_map; /* resource map ptr */
238 u64 *pptr = ioc->pdir_base; /* pdir ptr */
239 uint pide = 0;
240
241 while (rptr < rptr_end) {
242 u32 rval = *rptr;
243 int rcnt = 32; /* number of bits we might check */
244
245 while (rcnt) {
246 /* Get last byte and highest bit from that */
247 u32 pde = ((u32) (((char *)pptr)[7])) << 24;
248 if ((rval ^ pde) & 0x80000000)
249 {
250 /*
251 ** BUMMER! -- res_map != pdir --
252 ** Dump rval and matching pdir entries
253 */
254 sba_dump_pdir_entry(ioc, msg, pide);
255 return(1);
256 }
257 rcnt--;
258 rval <<= 1; /* try the next bit */
259 pptr++;
260 pide++;
261 }
262 rptr++; /* look at next word of res_map */
263 }
264 /* It'd be nice if we always got here :^) */
265 return 0;
266}
267
268
269/**
270 * sba_dump_sg - debugging only - print Scatter-Gather list
271 * @ioc: IO MMU structure which owns the pdir we are interested in.
272 * @startsg: head of the SG list
273 * @nents: number of entries in SG list
274 *
275 * print the SG list so we can verify it's correct by hand.
276 */
277static void
278sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
279{
280 while (nents-- > 0) {
281 printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n",
282 nents,
283 (unsigned long) sg_dma_address(startsg),
284 sg_dma_len(startsg),
285 sg_virt(startsg), startsg->length);
286 startsg++;
287 }
288}
289
290#endif /* ASSERT_PDIR_SANITY */
291
292
293
294
295/**************************************************************
296*
297* I/O Pdir Resource Management
298*
299* Bits set in the resource map are in use.
300* Each bit can represent a number of pages.
301* LSbs represent lower addresses (IOVA's).
302*
303***************************************************************/
304#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
305
306/* Convert from IOVP to IOVA and vice versa. */
307
308#ifdef ZX1_SUPPORT
309/* Pluto (aka ZX1) boxes need to set or clear the ibase bits appropriately */
310#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
311#define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
312#else
313/* only support Astro and ancestors. Saves a few cycles in key places */
314#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
315#define SBA_IOVP(ioc,iova) (iova)
316#endif
317
318#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
319
320#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))
321#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
322
323static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
324 unsigned int bitshiftcnt)
325{
326 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
327 + bitshiftcnt;
328}
329
330/**
331 * sba_search_bitmap - find free space in IO PDIR resource bitmap
332 * @ioc: IO MMU structure which owns the pdir we are interested in.
333 * @dev: device to query the bitmap for
334 * @bits_wanted: number of entries we need.
335 *
336 * Find consecutive free bits in resource bitmap.
337 * Each bit represents one entry in the IO Pdir.
338 * Cool perf optimization: search for log2(size) bits at a time.
339 */
340static unsigned long
341sba_search_bitmap(struct ioc *ioc, struct device *dev,
342 unsigned long bits_wanted)
343{
344 unsigned long *res_ptr = ioc->res_hint;
345 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
346 unsigned long pide = ~0UL, tpide;
347 unsigned long boundary_size;
348 unsigned long shift;
349 int ret;
350
351 boundary_size = dma_get_seg_boundary_nr_pages(dev, IOVP_SHIFT);
352
353#if defined(ZX1_SUPPORT)
354 BUG_ON(ioc->ibase & ~IOVP_MASK);
355 shift = ioc->ibase >> IOVP_SHIFT;
356#else
357 shift = 0;
358#endif
359
360 if (bits_wanted > (BITS_PER_LONG/2)) {
361 /* Search word at a time - no mask needed */
362 for(; res_ptr < res_end; ++res_ptr) {
363 tpide = ptr_to_pide(ioc, res_ptr, 0);
364 ret = iommu_is_span_boundary(tpide, bits_wanted,
365 shift,
366 boundary_size);
367 if ((*res_ptr == 0) && !ret) {
368 *res_ptr = RESMAP_MASK(bits_wanted);
369 pide = tpide;
370 break;
371 }
372 }
373 /* point to the next word on next pass */
374 res_ptr++;
375 ioc->res_bitshift = 0;
376 } else {
377 /*
378 ** Search the resource bit map on well-aligned values.
379 ** "o" is the alignment.
380 ** We need the alignment to invalidate I/O TLB using
381 ** SBA HW features in the unmap path.
382 */
383 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT);
384 uint bitshiftcnt = ALIGN(ioc->res_bitshift, o);
385 unsigned long mask;
386
387 if (bitshiftcnt >= BITS_PER_LONG) {
388 bitshiftcnt = 0;
389 res_ptr++;
390 }
391 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt;
392
393 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
394 while(res_ptr < res_end)
395 {
396 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
397 WARN_ON(mask == 0);
398 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
399 ret = iommu_is_span_boundary(tpide, bits_wanted,
400 shift,
401 boundary_size);
402 if ((((*res_ptr) & mask) == 0) && !ret) {
403 *res_ptr |= mask; /* mark resources busy! */
404 pide = tpide;
405 break;
406 }
407 mask >>= o;
408 bitshiftcnt += o;
409 if (mask == 0) {
410 mask = RESMAP_MASK(bits_wanted);
411 bitshiftcnt=0;
412 res_ptr++;
413 }
414 }
415 /* look in the same word on the next pass */
416 ioc->res_bitshift = bitshiftcnt + bits_wanted;
417 }
418
419 /* wrapped ? */
420 if (res_end <= res_ptr) {
421 ioc->res_hint = (unsigned long *) ioc->res_map;
422 ioc->res_bitshift = 0;
423 } else {
424 ioc->res_hint = res_ptr;
425 }
426 return (pide);
427}
428
429
430/**
431 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
432 * @ioc: IO MMU structure which owns the pdir we are interested in.
433 * @dev: device for which pages should be alloced
434 * @size: number of bytes to create a mapping for
435 *
436 * Given a size, find consecutive unmarked and then mark those bits in the
437 * resource bit map.
438 */
439static int
440sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
441{
442 unsigned int pages_needed = size >> IOVP_SHIFT;
443#ifdef SBA_COLLECT_STATS
444 unsigned long cr_start = mfctl(16);
445#endif
446 unsigned long pide;
447
448 pide = sba_search_bitmap(ioc, dev, pages_needed);
449 if (pide >= (ioc->res_size << 3)) {
450 pide = sba_search_bitmap(ioc, dev, pages_needed);
451 if (pide >= (ioc->res_size << 3))
452 panic("%s: I/O MMU @ %p is out of mapping resources\n",
453 __FILE__, ioc->ioc_hpa);
454 }
455
456#ifdef ASSERT_PDIR_SANITY
457 /* verify the first enable bit is clear */
458 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {
459 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
460 }
461#endif
462
463 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
464 __func__, size, pages_needed, pide,
465 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
466 ioc->res_bitshift );
467
468#ifdef SBA_COLLECT_STATS
469 {
470 unsigned long cr_end = mfctl(16);
471 unsigned long tmp = cr_end - cr_start;
472 /* check for roll over */
473 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
474 }
475 ioc->avg_search[ioc->avg_idx++] = cr_start;
476 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
477
478 ioc->used_pages += pages_needed;
479#endif
480
481 return (pide);
482}
483
484
485/**
486 * sba_free_range - unmark bits in IO PDIR resource bitmap
487 * @ioc: IO MMU structure which owns the pdir we are interested in.
488 * @iova: IO virtual address which was previously allocated.
489 * @size: number of bytes to create a mapping for
490 *
491 * clear bits in the ioc's resource map
492 */
493static void
494sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
495{
496 unsigned long iovp = SBA_IOVP(ioc, iova);
497 unsigned int pide = PDIR_INDEX(iovp);
498 unsigned int ridx = pide >> 3; /* convert bit to byte address */
499 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
500
501 int bits_not_wanted = size >> IOVP_SHIFT;
502
503 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
504 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1));
505
506 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
507 __func__, (uint) iova, size,
508 bits_not_wanted, m, pide, res_ptr, *res_ptr);
509
510#ifdef SBA_COLLECT_STATS
511 ioc->used_pages -= bits_not_wanted;
512#endif
513
514 *res_ptr &= ~m;
515}
516
517
518/**************************************************************
519*
520* "Dynamic DMA Mapping" support (aka "Coherent I/O")
521*
522***************************************************************/
523
524#ifdef SBA_HINT_SUPPORT
525#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
526#endif
527
528typedef unsigned long space_t;
529#define KERNEL_SPACE 0
530
531/**
532 * sba_io_pdir_entry - fill in one IO PDIR entry
533 * @pdir_ptr: pointer to IO PDIR entry
534 * @sid: process Space ID - currently only support KERNEL_SPACE
535 * @vba: Virtual CPU address of buffer to map
536 * @hint: DMA hint set to use for this mapping
537 *
538 * SBA Mapping Routine
539 *
540 * Given a virtual address (vba, arg2) and space id, (sid, arg1)
541 * sba_io_pdir_entry() loads the I/O PDIR entry pointed to by
542 * pdir_ptr (arg0).
543 * Using the bass-ackwards HP bit numbering, Each IO Pdir entry
544 * for Astro/Ike looks like:
545 *
546 *
547 * 0 19 51 55 63
548 * +-+---------------------+----------------------------------+----+--------+
549 * |V| U | PPN[43:12] | U | VI |
550 * +-+---------------------+----------------------------------+----+--------+
551 *
552 * Pluto is basically identical, supports fewer physical address bits:
553 *
554 * 0 23 51 55 63
555 * +-+------------------------+-------------------------------+----+--------+
556 * |V| U | PPN[39:12] | U | VI |
557 * +-+------------------------+-------------------------------+----+--------+
558 *
559 * V == Valid Bit (Most Significant Bit is bit 0)
560 * U == Unused
561 * PPN == Physical Page Number
562 * VI == Virtual Index (aka Coherent Index)
563 *
564 * LPA instruction output is put into PPN field.
565 * LCI (Load Coherence Index) instruction provides the "VI" bits.
566 *
567 * We pre-swap the bytes since PCX-W is Big Endian and the
568 * IOMMU uses little endian for the pdir.
569 */
570
571static void
572sba_io_pdir_entry(__le64 *pdir_ptr, space_t sid, unsigned long vba,
573 unsigned long hint)
574{
575 u64 pa; /* physical address */
576 register unsigned ci; /* coherent index */
577
578 pa = lpa(vba);
579 pa &= IOVP_MASK;
580
581 asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));
582 pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
583
584 pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
585 *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */
586
587 /*
588 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
589 * (bit #61, big endian), we have to flush and sync every time
590 * IO-PDIR is changed in Ike/Astro.
591 */
592 asm_io_fdc(pdir_ptr);
593}
594
595
596/**
597 * sba_mark_invalid - invalidate one or more IO PDIR entries
598 * @ioc: IO MMU structure which owns the pdir we are interested in.
599 * @iova: IO Virtual Address mapped earlier
600 * @byte_cnt: number of bytes this mapping covers.
601 *
602 * Marking the IO PDIR entry(ies) as Invalid and invalidate
603 * corresponding IO TLB entry. The Ike PCOM (Purge Command Register)
604 * is to purge stale entries in the IO TLB when unmapping entries.
605 *
606 * The PCOM register supports purging of multiple pages, with a minium
607 * of 1 page and a maximum of 2GB. Hardware requires the address be
608 * aligned to the size of the range being purged. The size of the range
609 * must be a power of 2. The "Cool perf optimization" in the
610 * allocation routine helps keep that true.
611 */
612static void
613sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
614{
615 u32 iovp = (u32) SBA_IOVP(ioc,iova);
616 __le64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
617
618#ifdef ASSERT_PDIR_SANITY
619 /* Assert first pdir entry is set.
620 **
621 ** Even though this is a big-endian machine, the entries
622 ** in the iopdir are little endian. That's why we look at
623 ** the byte at +7 instead of at +0.
624 */
625 if (0x80 != (((u8 *) pdir_ptr)[7])) {
626 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
627 }
628#endif
629
630 if (byte_cnt > IOVP_SIZE)
631 {
632#if 0
633 unsigned long entries_per_cacheline = ioc_needs_fdc ?
634 L1_CACHE_ALIGN(((unsigned long) pdir_ptr))
635 - (unsigned long) pdir_ptr;
636 : 262144;
637#endif
638
639 /* set "size" field for PCOM */
640 iovp |= get_order(byte_cnt) + PAGE_SHIFT;
641
642 do {
643 /* clear I/O Pdir entry "valid" bit first */
644 ((u8 *) pdir_ptr)[7] = 0;
645 asm_io_fdc(pdir_ptr);
646 if (ioc_needs_fdc) {
647#if 0
648 entries_per_cacheline = L1_CACHE_SHIFT - 3;
649#endif
650 }
651 pdir_ptr++;
652 byte_cnt -= IOVP_SIZE;
653 } while (byte_cnt > IOVP_SIZE);
654 } else
655 iovp |= IOVP_SHIFT; /* set "size" field for PCOM */
656
657 /*
658 ** clear I/O PDIR entry "valid" bit.
659 ** We have to R/M/W the cacheline regardless how much of the
660 ** pdir entry that we clobber.
661 ** The rest of the entry would be useful for debugging if we
662 ** could dump core on HPMC.
663 */
664 ((u8 *) pdir_ptr)[7] = 0;
665 asm_io_fdc(pdir_ptr);
666
667 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
668}
669
670/**
671 * sba_dma_supported - PCI driver can query DMA support
672 * @dev: instance of PCI owned by the driver that's asking
673 * @mask: number of address bits this PCI device can handle
674 *
675 * See Documentation/core-api/dma-api-howto.rst
676 */
677static int sba_dma_supported( struct device *dev, u64 mask)
678{
679 struct ioc *ioc;
680
681 if (dev == NULL) {
682 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
683 BUG();
684 return(0);
685 }
686
687 ioc = GET_IOC(dev);
688 if (!ioc)
689 return 0;
690
691 /*
692 * check if mask is >= than the current max IO Virt Address
693 * The max IO Virt address will *always* < 30 bits.
694 */
695 return((int)(mask >= (ioc->ibase - 1 +
696 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) )));
697}
698
699
700/**
701 * sba_map_single - map one buffer and return IOVA for DMA
702 * @dev: instance of PCI owned by the driver that's asking.
703 * @addr: driver buffer to map.
704 * @size: number of bytes to map in driver buffer.
705 * @direction: R/W or both.
706 *
707 * See Documentation/core-api/dma-api-howto.rst
708 */
709static dma_addr_t
710sba_map_single(struct device *dev, void *addr, size_t size,
711 enum dma_data_direction direction)
712{
713 struct ioc *ioc;
714 unsigned long flags;
715 dma_addr_t iovp;
716 dma_addr_t offset;
717 __le64 *pdir_start;
718 int pide;
719
720 ioc = GET_IOC(dev);
721 if (!ioc)
722 return DMA_MAPPING_ERROR;
723
724 /* save offset bits */
725 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
726
727 /* round up to nearest IOVP_SIZE */
728 size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
729
730 spin_lock_irqsave(&ioc->res_lock, flags);
731#ifdef ASSERT_PDIR_SANITY
732 sba_check_pdir(ioc,"Check before sba_map_single()");
733#endif
734
735#ifdef SBA_COLLECT_STATS
736 ioc->msingle_calls++;
737 ioc->msingle_pages += size >> IOVP_SHIFT;
738#endif
739 pide = sba_alloc_range(ioc, dev, size);
740 iovp = (dma_addr_t) pide << IOVP_SHIFT;
741
742 DBG_RUN("%s() 0x%p -> 0x%lx\n",
743 __func__, addr, (long) iovp | offset);
744
745 pdir_start = &(ioc->pdir_base[pide]);
746
747 while (size > 0) {
748 sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
749
750 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
751 pdir_start,
752 (u8) (((u8 *) pdir_start)[7]),
753 (u8) (((u8 *) pdir_start)[6]),
754 (u8) (((u8 *) pdir_start)[5]),
755 (u8) (((u8 *) pdir_start)[4]),
756 (u8) (((u8 *) pdir_start)[3]),
757 (u8) (((u8 *) pdir_start)[2]),
758 (u8) (((u8 *) pdir_start)[1]),
759 (u8) (((u8 *) pdir_start)[0])
760 );
761
762 addr += IOVP_SIZE;
763 size -= IOVP_SIZE;
764 pdir_start++;
765 }
766
767 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */
768 asm_io_sync();
769
770#ifdef ASSERT_PDIR_SANITY
771 sba_check_pdir(ioc,"Check after sba_map_single()");
772#endif
773 spin_unlock_irqrestore(&ioc->res_lock, flags);
774
775 /* form complete address */
776 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
777}
778
779
780static dma_addr_t
781sba_map_page(struct device *dev, struct page *page, unsigned long offset,
782 size_t size, enum dma_data_direction direction,
783 unsigned long attrs)
784{
785 return sba_map_single(dev, page_address(page) + offset, size,
786 direction);
787}
788
789
790/**
791 * sba_unmap_page - unmap one IOVA and free resources
792 * @dev: instance of PCI owned by the driver that's asking.
793 * @iova: IOVA of driver buffer previously mapped.
794 * @size: number of bytes mapped in driver buffer.
795 * @direction: R/W or both.
796 * @attrs: attributes
797 *
798 * See Documentation/core-api/dma-api-howto.rst
799 */
800static void
801sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
802 enum dma_data_direction direction, unsigned long attrs)
803{
804 struct ioc *ioc;
805#if DELAYED_RESOURCE_CNT > 0
806 struct sba_dma_pair *d;
807#endif
808 unsigned long flags;
809 dma_addr_t offset;
810
811 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
812
813 ioc = GET_IOC(dev);
814 if (!ioc) {
815 WARN_ON(!ioc);
816 return;
817 }
818 offset = iova & ~IOVP_MASK;
819 iova ^= offset; /* clear offset bits */
820 size += offset;
821 size = ALIGN(size, IOVP_SIZE);
822
823 spin_lock_irqsave(&ioc->res_lock, flags);
824
825#ifdef SBA_COLLECT_STATS
826 ioc->usingle_calls++;
827 ioc->usingle_pages += size >> IOVP_SHIFT;
828#endif
829
830 sba_mark_invalid(ioc, iova, size);
831
832#if DELAYED_RESOURCE_CNT > 0
833 /* Delaying when we re-use a IO Pdir entry reduces the number
834 * of MMIO reads needed to flush writes to the PCOM register.
835 */
836 d = &(ioc->saved[ioc->saved_cnt]);
837 d->iova = iova;
838 d->size = size;
839 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
840 int cnt = ioc->saved_cnt;
841 while (cnt--) {
842 sba_free_range(ioc, d->iova, d->size);
843 d--;
844 }
845 ioc->saved_cnt = 0;
846
847 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
848 }
849#else /* DELAYED_RESOURCE_CNT == 0 */
850 sba_free_range(ioc, iova, size);
851
852 /* If fdc's were issued, force fdc's to be visible now */
853 asm_io_sync();
854
855 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
856#endif /* DELAYED_RESOURCE_CNT == 0 */
857
858 spin_unlock_irqrestore(&ioc->res_lock, flags);
859
860 /* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support.
861 ** For Astro based systems this isn't a big deal WRT performance.
862 ** As long as 2.4 kernels copyin/copyout data from/to userspace,
863 ** we don't need the syncdma. The issue here is I/O MMU cachelines
864 ** are *not* coherent in all cases. May be hwrev dependent.
865 ** Need to investigate more.
866 asm volatile("syncdma");
867 */
868}
869
870
871/**
872 * sba_alloc - allocate/map shared mem for DMA
873 * @hwdev: instance of PCI owned by the driver that's asking.
874 * @size: number of bytes mapped in driver buffer.
875 * @dma_handle: IOVA of new buffer.
876 * @gfp: allocation flags
877 * @attrs: attributes
878 *
879 * See Documentation/core-api/dma-api-howto.rst
880 */
881static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
882 gfp_t gfp, unsigned long attrs)
883{
884 void *ret;
885
886 if (!hwdev) {
887 /* only support PCI */
888 *dma_handle = 0;
889 return NULL;
890 }
891
892 ret = (void *) __get_free_pages(gfp, get_order(size));
893
894 if (ret) {
895 memset(ret, 0, size);
896 *dma_handle = sba_map_single(hwdev, ret, size, 0);
897 }
898
899 return ret;
900}
901
902
903/**
904 * sba_free - free/unmap shared mem for DMA
905 * @hwdev: instance of PCI owned by the driver that's asking.
906 * @size: number of bytes mapped in driver buffer.
907 * @vaddr: virtual address IOVA of "consistent" buffer.
908 * @dma_handle: IO virtual address of "consistent" buffer.
909 * @attrs: attributes
910 *
911 * See Documentation/core-api/dma-api-howto.rst
912 */
913static void
914sba_free(struct device *hwdev, size_t size, void *vaddr,
915 dma_addr_t dma_handle, unsigned long attrs)
916{
917 sba_unmap_page(hwdev, dma_handle, size, 0, 0);
918 free_pages((unsigned long) vaddr, get_order(size));
919}
920
921
922/*
923** Since 0 is a valid pdir_base index value, can't use that
924** to determine if a value is valid or not. Use a flag to indicate
925** the SG list entry contains a valid pdir index.
926*/
927#define PIDE_FLAG 0x80000000UL
928
929#ifdef SBA_COLLECT_STATS
930#define IOMMU_MAP_STATS
931#endif
932#include "iommu-helpers.h"
933
934#ifdef DEBUG_LARGE_SG_ENTRIES
935int dump_run_sg = 0;
936#endif
937
938
939/**
940 * sba_map_sg - map Scatter/Gather list
941 * @dev: instance of PCI owned by the driver that's asking.
942 * @sglist: array of buffer/length pairs
943 * @nents: number of entries in list
944 * @direction: R/W or both.
945 * @attrs: attributes
946 *
947 * See Documentation/core-api/dma-api-howto.rst
948 */
949static int
950sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
951 enum dma_data_direction direction, unsigned long attrs)
952{
953 struct ioc *ioc;
954 int filled = 0;
955 unsigned long flags;
956
957 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
958
959 ioc = GET_IOC(dev);
960 if (!ioc)
961 return -EINVAL;
962
963 /* Fast path single entry scatterlists. */
964 if (nents == 1) {
965 sg_dma_address(sglist) = sba_map_single(dev, sg_virt(sglist),
966 sglist->length, direction);
967 sg_dma_len(sglist) = sglist->length;
968 return 1;
969 }
970
971 spin_lock_irqsave(&ioc->res_lock, flags);
972
973#ifdef ASSERT_PDIR_SANITY
974 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
975 {
976 sba_dump_sg(ioc, sglist, nents);
977 panic("Check before sba_map_sg()");
978 }
979#endif
980
981#ifdef SBA_COLLECT_STATS
982 ioc->msg_calls++;
983#endif
984
985 /*
986 ** First coalesce the chunks and allocate I/O pdir space
987 **
988 ** If this is one DMA stream, we can properly map using the
989 ** correct virtual address associated with each DMA page.
990 ** w/o this association, we wouldn't have coherent DMA!
991 ** Access to the virtual address is what forces a two pass algorithm.
992 */
993 iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
994
995 /*
996 ** Program the I/O Pdir
997 **
998 ** map the virtual addresses to the I/O Pdir
999 ** o dma_address will contain the pdir index
1000 ** o dma_len will contain the number of bytes to map
1001 ** o address contains the virtual address.
1002 */
1003 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);
1004
1005 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */
1006 asm_io_sync();
1007
1008#ifdef ASSERT_PDIR_SANITY
1009 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
1010 {
1011 sba_dump_sg(ioc, sglist, nents);
1012 panic("Check after sba_map_sg()\n");
1013 }
1014#endif
1015
1016 spin_unlock_irqrestore(&ioc->res_lock, flags);
1017
1018 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1019
1020 return filled;
1021}
1022
1023
1024/**
1025 * sba_unmap_sg - unmap Scatter/Gather list
1026 * @dev: instance of PCI owned by the driver that's asking.
1027 * @sglist: array of buffer/length pairs
1028 * @nents: number of entries in list
1029 * @direction: R/W or both.
1030 * @attrs: attributes
1031 *
1032 * See Documentation/core-api/dma-api-howto.rst
1033 */
1034static void
1035sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1036 enum dma_data_direction direction, unsigned long attrs)
1037{
1038 struct ioc *ioc;
1039#ifdef ASSERT_PDIR_SANITY
1040 unsigned long flags;
1041#endif
1042
1043 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1044 __func__, nents, sg_virt(sglist), sglist->length);
1045
1046 ioc = GET_IOC(dev);
1047 if (!ioc) {
1048 WARN_ON(!ioc);
1049 return;
1050 }
1051
1052#ifdef SBA_COLLECT_STATS
1053 ioc->usg_calls++;
1054#endif
1055
1056#ifdef ASSERT_PDIR_SANITY
1057 spin_lock_irqsave(&ioc->res_lock, flags);
1058 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1059 spin_unlock_irqrestore(&ioc->res_lock, flags);
1060#endif
1061
1062 while (nents && sg_dma_len(sglist)) {
1063
1064 sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
1065 direction, 0);
1066#ifdef SBA_COLLECT_STATS
1067 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
1068 ioc->usingle_calls--; /* kluge since call is unmap_sg() */
1069#endif
1070 ++sglist;
1071 nents--;
1072 }
1073
1074 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1075
1076#ifdef ASSERT_PDIR_SANITY
1077 spin_lock_irqsave(&ioc->res_lock, flags);
1078 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1079 spin_unlock_irqrestore(&ioc->res_lock, flags);
1080#endif
1081
1082}
1083
1084static const struct dma_map_ops sba_ops = {
1085 .dma_supported = sba_dma_supported,
1086 .alloc = sba_alloc,
1087 .free = sba_free,
1088 .map_page = sba_map_page,
1089 .unmap_page = sba_unmap_page,
1090 .map_sg = sba_map_sg,
1091 .unmap_sg = sba_unmap_sg,
1092 .get_sgtable = dma_common_get_sgtable,
1093 .alloc_pages_op = dma_common_alloc_pages,
1094 .free_pages = dma_common_free_pages,
1095};
1096
1097
1098/**************************************************************************
1099**
1100** SBA PAT PDC support
1101**
1102** o call pdc_pat_cell_module()
1103** o store ranges in PCI "resource" structures
1104**
1105**************************************************************************/
1106
1107static void
1108sba_get_pat_resources(struct sba_device *sba_dev)
1109{
1110#if 0
1111/*
1112** TODO/REVISIT/FIXME: support for directed ranges requires calls to
1113** PAT PDC to program the SBA/LBA directed range registers...this
1114** burden may fall on the LBA code since it directly supports the
1115** PCI subsystem. It's not clear yet. - ggg
1116*/
1117PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp);
1118 FIXME : ???
1119PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp);
1120 Tells where the dvi bits are located in the address.
1121PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp);
1122 FIXME : ???
1123#endif
1124}
1125
1126
1127/**************************************************************
1128*
1129* Initialization and claim
1130*
1131***************************************************************/
1132#define PIRANHA_ADDR_MASK 0x00160000UL /* bit 17,18,20 */
1133#define PIRANHA_ADDR_VAL 0x00060000UL /* bit 17,18 on */
1134static void *
1135sba_alloc_pdir(unsigned int pdir_size)
1136{
1137 unsigned long pdir_base;
1138 unsigned long pdir_order = get_order(pdir_size);
1139
1140 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order);
1141 if (NULL == (void *) pdir_base) {
1142 panic("%s() could not allocate I/O Page Table\n",
1143 __func__);
1144 }
1145
1146 /* If this is not PA8700 (PCX-W2)
1147 ** OR newer than ver 2.2
1148 ** OR in a system that doesn't need VINDEX bits from SBA,
1149 **
1150 ** then we aren't exposed to the HW bug.
1151 */
1152 if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13
1153 || (boot_cpu_data.pdc.versions > 0x202)
1154 || (boot_cpu_data.pdc.capabilities & 0x08L) )
1155 return (void *) pdir_base;
1156
1157 /*
1158 * PA8700 (PCX-W2, aka piranha) silent data corruption fix
1159 *
1160 * An interaction between PA8700 CPU (Ver 2.2 or older) and
1161 * Ike/Astro can cause silent data corruption. This is only
1162 * a problem if the I/O PDIR is located in memory such that
1163 * (little-endian) bits 17 and 18 are on and bit 20 is off.
1164 *
1165 * Since the max IO Pdir size is 2MB, by cleverly allocating the
1166 * right physical address, we can either avoid (IOPDIR <= 1MB)
1167 * or minimize (2MB IO Pdir) the problem if we restrict the
1168 * IO Pdir to a maximum size of 2MB-128K (1902K).
1169 *
1170 * Because we always allocate 2^N sized IO pdirs, either of the
1171 * "bad" regions will be the last 128K if at all. That's easy
1172 * to test for.
1173 *
1174 */
1175 if (pdir_order <= (19-12)) {
1176 if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) {
1177 /* allocate a new one on 512k alignment */
1178 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12));
1179 /* release original */
1180 free_pages(pdir_base, pdir_order);
1181
1182 pdir_base = new_pdir;
1183
1184 /* release excess */
1185 while (pdir_order < (19-12)) {
1186 new_pdir += pdir_size;
1187 free_pages(new_pdir, pdir_order);
1188 pdir_order +=1;
1189 pdir_size <<=1;
1190 }
1191 }
1192 } else {
1193 /*
1194 ** 1MB or 2MB Pdir
1195 ** Needs to be aligned on an "odd" 1MB boundary.
1196 */
1197 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1); /* 2 or 4MB */
1198
1199 /* release original */
1200 free_pages( pdir_base, pdir_order);
1201
1202 /* release first 1MB */
1203 free_pages(new_pdir, 20-12);
1204
1205 pdir_base = new_pdir + 1024*1024;
1206
1207 if (pdir_order > (20-12)) {
1208 /*
1209 ** 2MB Pdir.
1210 **
1211 ** Flag tells init_bitmap() to mark bad 128k as used
1212 ** and to reduce the size by 128k.
1213 */
1214 piranha_bad_128k = 1;
1215
1216 new_pdir += 3*1024*1024;
1217 /* release last 1MB */
1218 free_pages(new_pdir, 20-12);
1219
1220 /* release unusable 128KB */
1221 free_pages(new_pdir - 128*1024 , 17-12);
1222
1223 pdir_size -= 128*1024;
1224 }
1225 }
1226
1227 memset((void *) pdir_base, 0, pdir_size);
1228 return (void *) pdir_base;
1229}
1230
1231struct ibase_data_struct {
1232 struct ioc *ioc;
1233 int ioc_num;
1234};
1235
1236static int setup_ibase_imask_callback(struct device *dev, void *data)
1237{
1238 struct parisc_device *lba = to_parisc_device(dev);
1239 struct ibase_data_struct *ibd = data;
1240 int rope_num = (lba->hpa.start >> 13) & 0xf;
1241 if (rope_num >> 3 == ibd->ioc_num)
1242 lba_set_iregs(lba, ibd->ioc->ibase, ibd->ioc->imask);
1243 return 0;
1244}
1245
1246/* setup Mercury or Elroy IBASE/IMASK registers. */
1247static void
1248setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1249{
1250 struct ibase_data_struct ibase_data = {
1251 .ioc = ioc,
1252 .ioc_num = ioc_num,
1253 };
1254
1255 device_for_each_child(&sba->dev, &ibase_data,
1256 setup_ibase_imask_callback);
1257}
1258
1259#ifdef SBA_AGP_SUPPORT
1260static int
1261sba_ioc_find_quicksilver(struct device *dev, void *data)
1262{
1263 int *agp_found = data;
1264 struct parisc_device *lba = to_parisc_device(dev);
1265
1266 if (IS_QUICKSILVER(lba))
1267 *agp_found = 1;
1268 return 0;
1269}
1270#endif
1271
1272static void
1273sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1274{
1275 u32 iova_space_mask;
1276 u32 iova_space_size;
1277 int iov_order, tcnfg;
1278#ifdef SBA_AGP_SUPPORT
1279 int agp_found = 0;
1280#endif
1281 /*
1282 ** Firmware programs the base and size of a "safe IOVA space"
1283 ** (one that doesn't overlap memory or LMMIO space) in the
1284 ** IBASE and IMASK registers.
1285 */
1286 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1fffffULL;
1287 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
1288
1289 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
1290 printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n");
1291 iova_space_size /= 2;
1292 }
1293
1294 /*
1295 ** iov_order is always based on a 1GB IOVA space since we want to
1296 ** turn on the other half for AGP GART.
1297 */
1298 iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT));
1299 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1300
1301 DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n",
1302 __func__, ioc->ioc_hpa, iova_space_size >> 20,
1303 iov_order + PAGE_SHIFT);
1304
1305 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1306 get_order(ioc->pdir_size));
1307 if (!ioc->pdir_base)
1308 panic("Couldn't allocate I/O Page Table\n");
1309
1310 memset(ioc->pdir_base, 0, ioc->pdir_size);
1311
1312 DBG_INIT("%s() pdir %p size %x\n",
1313 __func__, ioc->pdir_base, ioc->pdir_size);
1314
1315#ifdef SBA_HINT_SUPPORT
1316 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1317 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1318
1319 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1320 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1321#endif
1322
1323 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base);
1324 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1325
1326 /* build IMASK for IOC and Elroy */
1327 iova_space_mask = 0xffffffff;
1328 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1329 ioc->imask = iova_space_mask;
1330#ifdef ZX1_SUPPORT
1331 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1332#endif
1333 sba_dump_tlb(ioc->ioc_hpa);
1334
1335 setup_ibase_imask(sba, ioc, ioc_num);
1336
1337 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
1338
1339#ifdef CONFIG_64BIT
1340 /*
1341 ** Setting the upper bits makes checking for bypass addresses
1342 ** a little faster later on.
1343 */
1344 ioc->imask |= 0xFFFFFFFF00000000UL;
1345#endif
1346
1347 /* Set I/O PDIR Page size to system page size */
1348 switch (PAGE_SHIFT) {
1349 case 12: tcnfg = 0; break; /* 4K */
1350 case 13: tcnfg = 1; break; /* 8K */
1351 case 14: tcnfg = 2; break; /* 16K */
1352 case 16: tcnfg = 3; break; /* 64K */
1353 default:
1354 panic(__FILE__ "Unsupported system page size %d",
1355 1 << PAGE_SHIFT);
1356 break;
1357 }
1358 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1359
1360 /*
1361 ** Program the IOC's ibase and enable IOVA translation
1362 ** Bit zero == enable bit.
1363 */
1364 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1365
1366 /*
1367 ** Clear I/O TLB of any possible entries.
1368 ** (Yes. This is a bit paranoid...but so what)
1369 */
1370 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
1371
1372#ifdef SBA_AGP_SUPPORT
1373
1374 /*
1375 ** If an AGP device is present, only use half of the IOV space
1376 ** for PCI DMA. Unfortunately we can't know ahead of time
1377 ** whether GART support will actually be used, for now we
1378 ** can just key on any AGP device found in the system.
1379 ** We program the next pdir index after we stop w/ a key for
1380 ** the GART code to handshake on.
1381 */
1382 device_for_each_child(&sba->dev, &agp_found, sba_ioc_find_quicksilver);
1383
1384 if (agp_found && sba_reserve_agpgart) {
1385 printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n",
1386 __func__, (iova_space_size/2) >> 20);
1387 ioc->pdir_size /= 2;
1388 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE;
1389 }
1390#endif /*SBA_AGP_SUPPORT*/
1391}
1392
1393static void
1394sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1395{
1396 u32 iova_space_size, iova_space_mask;
1397 unsigned int pdir_size, iov_order, tcnfg;
1398
1399 /*
1400 ** Determine IOVA Space size from memory size.
1401 **
1402 ** Ideally, PCI drivers would register the maximum number
1403 ** of DMA they can have outstanding for each device they
1404 ** own. Next best thing would be to guess how much DMA
1405 ** can be outstanding based on PCI Class/sub-class. Both
1406 ** methods still require some "extra" to support PCI
1407 ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD).
1408 **
1409 ** While we have 32-bits "IOVA" space, top two 2 bits are used
1410 ** for DMA hints - ergo only 30 bits max.
1411 */
1412
1413 iova_space_size = (u32) (totalram_pages()/global_ioc_cnt);
1414
1415 /* limit IOVA space size to 1MB-1GB */
1416 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1417 iova_space_size = 1 << (20 - PAGE_SHIFT);
1418 }
1419 else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1420 iova_space_size = 1 << (30 - PAGE_SHIFT);
1421 }
1422
1423 /*
1424 ** iova space must be log2() in size.
1425 ** thus, pdir/res_map will also be log2().
1426 ** PIRANHA BUG: Exception is when IO Pdir is 2MB (gets reduced)
1427 */
1428 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1429
1430 /* iova_space_size is now bytes, not pages */
1431 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1432
1433 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
1434
1435 DBG_INIT("%s() hpa %px mem %ldMB IOV %dMB (%d bits)\n",
1436 __func__,
1437 ioc->ioc_hpa,
1438 (unsigned long) totalram_pages() >> (20 - PAGE_SHIFT),
1439 iova_space_size>>20,
1440 iov_order + PAGE_SHIFT);
1441
1442 ioc->pdir_base = sba_alloc_pdir(pdir_size);
1443
1444 DBG_INIT("%s() pdir %p size %x\n",
1445 __func__, ioc->pdir_base, pdir_size);
1446
1447#ifdef SBA_HINT_SUPPORT
1448 /* FIXME : DMA HINTs not used */
1449 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1450 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1451
1452 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1453 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1454#endif
1455
1456 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1457
1458 /* build IMASK for IOC and Elroy */
1459 iova_space_mask = 0xffffffff;
1460 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1461
1462 /*
1463 ** On C3000 w/512MB mem, HP-UX 10.20 reports:
1464 ** ibase=0, imask=0xFE000000, size=0x2000000.
1465 */
1466 ioc->ibase = 0;
1467 ioc->imask = iova_space_mask; /* save it */
1468#ifdef ZX1_SUPPORT
1469 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1470#endif
1471
1472 DBG_INIT("%s() IOV base %#lx mask %#0lx\n",
1473 __func__, ioc->ibase, ioc->imask);
1474
1475 /*
1476 ** FIXME: Hint registers are programmed with default hint
1477 ** values during boot, so hints should be sane even if we
1478 ** can't reprogram them the way drivers want.
1479 */
1480
1481 setup_ibase_imask(sba, ioc, ioc_num);
1482
1483 /*
1484 ** Program the IOC's ibase and enable IOVA translation
1485 */
1486 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
1487 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
1488
1489 /* Set I/O PDIR Page size to system page size */
1490 switch (PAGE_SHIFT) {
1491 case 12: tcnfg = 0; break; /* 4K */
1492 case 13: tcnfg = 1; break; /* 8K */
1493 case 14: tcnfg = 2; break; /* 16K */
1494 case 16: tcnfg = 3; break; /* 64K */
1495 default:
1496 panic(__FILE__ "Unsupported system page size %d",
1497 1 << PAGE_SHIFT);
1498 break;
1499 }
1500 /* Set I/O PDIR Page size to PAGE_SIZE (4k/16k/...) */
1501 WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG);
1502
1503 /*
1504 ** Clear I/O TLB of any possible entries.
1505 ** (Yes. This is a bit paranoid...but so what)
1506 */
1507 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
1508
1509 ioc->ibase = 0; /* used by SBA_IOVA and related macros */
1510
1511 DBG_INIT("%s() DONE\n", __func__);
1512}
1513
1514
1515
1516/**************************************************************************
1517**
1518** SBA initialization code (HW and SW)
1519**
1520** o identify SBA chip itself
1521** o initialize SBA chip modes (HardFail)
1522** o initialize SBA chip modes (HardFail)
1523** o FIXME: initialize DMA hints for reasonable defaults
1524**
1525**************************************************************************/
1526
1527static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset)
1528{
1529 return ioremap(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
1530}
1531
1532static void sba_hw_init(struct sba_device *sba_dev)
1533{
1534 int i;
1535 int num_ioc;
1536 u64 ioc_ctl;
1537
1538 if (!is_pdc_pat()) {
1539 /* Shutdown the USB controller on Astro-based workstations.
1540 ** Once we reprogram the IOMMU, the next DMA performed by
1541 ** USB will HPMC the box. USB is only enabled if a
1542 ** keyboard is present and found.
1543 **
1544 ** With serial console, j6k v5.0 firmware says:
1545 ** mem_kbd hpa 0xfee003f8 sba 0x0 pad 0x0 cl_class 0x7
1546 **
1547 ** FIXME: Using GFX+USB console at power up but direct
1548 ** linux to serial console is still broken.
1549 ** USB could generate DMA so we must reset USB.
1550 ** The proper sequence would be:
1551 ** o block console output
1552 ** o reset USB device
1553 ** o reprogram serial port
1554 ** o unblock console output
1555 */
1556 if (PAGE0->mem_kbd.cl_class == CL_KEYBD) {
1557 pdc_io_reset_devices();
1558 }
1559
1560 }
1561
1562
1563#if 0
1564printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1565 PAGE0->mem_boot.spa, PAGE0->mem_boot.pad, PAGE0->mem_boot.cl_class);
1566
1567 /*
1568 ** Need to deal with DMA from LAN.
1569 ** Maybe use page zero boot device as a handle to talk
1570 ** to PDC about which device to shutdown.
1571 **
1572 ** Netbooting, j6k v5.0 firmware says:
1573 ** mem_boot hpa 0xf4008000 sba 0x0 pad 0x0 cl_class 0x1002
1574 ** ARGH! invalid class.
1575 */
1576 if ((PAGE0->mem_boot.cl_class != CL_RANDOM)
1577 && (PAGE0->mem_boot.cl_class != CL_SEQU)) {
1578 pdc_io_reset();
1579 }
1580#endif
1581
1582 if (!IS_PLUTO(sba_dev->dev)) {
1583 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
1584 DBG_INIT("%s() hpa %px ioc_ctl 0x%Lx ->",
1585 __func__, sba_dev->sba_hpa, ioc_ctl);
1586 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
1587 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
1588 /* j6700 v1.6 firmware sets 0x294f */
1589 /* A500 firmware sets 0x4d */
1590
1591 WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
1592
1593#ifdef DEBUG_SBA_INIT
1594 ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL);
1595 DBG_INIT(" 0x%Lx\n", ioc_ctl);
1596#endif
1597 } /* if !PLUTO */
1598
1599 if (IS_ASTRO(sba_dev->dev)) {
1600 int err;
1601 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET);
1602 num_ioc = 1;
1603
1604 sba_dev->chip_resv.name = "Astro Intr Ack";
1605 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL;
1606 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ;
1607 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1608 BUG_ON(err < 0);
1609
1610 } else if (IS_PLUTO(sba_dev->dev)) {
1611 int err;
1612
1613 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET);
1614 num_ioc = 1;
1615
1616 sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA";
1617 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL;
1618 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1);
1619 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1620 WARN_ON(err < 0);
1621
1622 sba_dev->iommu_resv.name = "IOVA Space";
1623 sba_dev->iommu_resv.start = 0x40000000UL;
1624 sba_dev->iommu_resv.end = 0x50000000UL - 1;
1625 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv));
1626 WARN_ON(err < 0);
1627 } else {
1628 /* IKE, REO */
1629 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0));
1630 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1));
1631 num_ioc = 2;
1632
1633 /* TODO - LOOKUP Ike/Stretch chipset mem map */
1634 }
1635 /* XXX: What about Reo Grande? */
1636
1637 sba_dev->num_ioc = num_ioc;
1638 for (i = 0; i < num_ioc; i++) {
1639 void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa;
1640 unsigned int j;
1641
1642 for (j=0; j < sizeof(u64) * ROPES_PER_IOC; j+=sizeof(u64)) {
1643
1644 /*
1645 * Clear ROPE(N)_CONFIG AO bit.
1646 * Disables "NT Ordering" (~= !"Relaxed Ordering")
1647 * Overrides bit 1 in DMA Hint Sets.
1648 * Improves netperf UDP_STREAM by ~10% for bcm5701.
1649 */
1650 if (IS_PLUTO(sba_dev->dev)) {
1651 void __iomem *rope_cfg;
1652 unsigned long cfg_val;
1653
1654 rope_cfg = ioc_hpa + IOC_ROPE0_CFG + j;
1655 cfg_val = READ_REG(rope_cfg);
1656 cfg_val &= ~IOC_ROPE_AO;
1657 WRITE_REG(cfg_val, rope_cfg);
1658 }
1659
1660 /*
1661 ** Make sure the box crashes on rope errors.
1662 */
1663 WRITE_REG(HF_ENABLE, ioc_hpa + ROPE0_CTL + j);
1664 }
1665
1666 /* flush out the last writes */
1667 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
1668
1669 DBG_INIT(" ioc[%d] ROPE_CFG %#lx ROPE_DBG %lx\n",
1670 i,
1671 (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
1672 (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
1673 );
1674 DBG_INIT(" STATUS_CONTROL %#lx FLUSH_CTRL %#lx\n",
1675 (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
1676 (unsigned long) READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
1677 );
1678
1679 if (IS_PLUTO(sba_dev->dev)) {
1680 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
1681 } else {
1682 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
1683 }
1684 }
1685}
1686
1687static void
1688sba_common_init(struct sba_device *sba_dev)
1689{
1690 int i;
1691
1692 /* add this one to the head of the list (order doesn't matter)
1693 ** This will be useful for debugging - especially if we get coredumps
1694 */
1695 sba_dev->next = sba_list;
1696 sba_list = sba_dev;
1697
1698 for(i=0; i< sba_dev->num_ioc; i++) {
1699 int res_size;
1700#ifdef DEBUG_DMB_TRAP
1701 extern void iterate_pages(unsigned long , unsigned long ,
1702 void (*)(pte_t * , unsigned long),
1703 unsigned long );
1704 void set_data_memory_break(pte_t * , unsigned long);
1705#endif
1706 /* resource map size dictated by pdir_size */
1707 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */
1708
1709 /* Second part of PIRANHA BUG */
1710 if (piranha_bad_128k) {
1711 res_size -= (128*1024)/sizeof(u64);
1712 }
1713
1714 res_size >>= 3; /* convert bit count to byte count */
1715 DBG_INIT("%s() res_size 0x%x\n",
1716 __func__, res_size);
1717
1718 sba_dev->ioc[i].res_size = res_size;
1719 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
1720
1721#ifdef DEBUG_DMB_TRAP
1722 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1723 set_data_memory_break, 0);
1724#endif
1725
1726 if (NULL == sba_dev->ioc[i].res_map)
1727 {
1728 panic("%s:%s() could not allocate resource map\n",
1729 __FILE__, __func__ );
1730 }
1731
1732 memset(sba_dev->ioc[i].res_map, 0, res_size);
1733 /* next available IOVP - circular search */
1734 sba_dev->ioc[i].res_hint = (unsigned long *)
1735 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);
1736
1737#ifdef ASSERT_PDIR_SANITY
1738 /* Mark first bit busy - ie no IOVA 0 */
1739 sba_dev->ioc[i].res_map[0] = 0x80;
1740 sba_dev->ioc[i].pdir_base[0] = (__force __le64) 0xeeffc0addbba0080ULL;
1741#endif
1742
1743 /* Third (and last) part of PIRANHA BUG */
1744 if (piranha_bad_128k) {
1745 /* region from +1408K to +1536 is un-usable. */
1746
1747 int idx_start = (1408*1024/sizeof(u64)) >> 3;
1748 int idx_end = (1536*1024/sizeof(u64)) >> 3;
1749 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]);
1750 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]);
1751
1752 /* mark that part of the io pdir busy */
1753 while (p_start < p_end)
1754 *p_start++ = -1;
1755
1756 }
1757
1758#ifdef DEBUG_DMB_TRAP
1759 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1760 set_data_memory_break, 0);
1761 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size,
1762 set_data_memory_break, 0);
1763#endif
1764
1765 DBG_INIT("%s() %d res_map %x %p\n",
1766 __func__, i, res_size, sba_dev->ioc[i].res_map);
1767 }
1768
1769 spin_lock_init(&sba_dev->sba_lock);
1770 ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC;
1771
1772#ifdef DEBUG_SBA_INIT
1773 /*
1774 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
1775 * (bit #61, big endian), we have to flush and sync every time
1776 * IO-PDIR is changed in Ike/Astro.
1777 */
1778 if (ioc_needs_fdc) {
1779 printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n");
1780 } else {
1781 printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n");
1782 }
1783#endif
1784}
1785
1786#ifdef CONFIG_PROC_FS
1787static int sba_proc_info(struct seq_file *m, void *p)
1788{
1789 struct sba_device *sba_dev = sba_list;
1790 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
1791 int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */
1792#ifdef SBA_COLLECT_STATS
1793 unsigned long avg = 0, min, max;
1794#endif
1795 int i;
1796
1797 seq_printf(m, "%s rev %d.%d\n",
1798 sba_dev->name,
1799 (sba_dev->hw_rev & 0x7) + 1,
1800 (sba_dev->hw_rev & 0x18) >> 3);
1801 seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
1802 (int)((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */
1803 total_pages);
1804
1805 seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
1806 ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */
1807
1808 seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n",
1809 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE),
1810 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK),
1811 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE));
1812
1813 for (i=0; i<4; i++)
1814 seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n",
1815 i,
1816 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18),
1817 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18),
1818 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18));
1819
1820#ifdef SBA_COLLECT_STATS
1821 seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1822 total_pages - ioc->used_pages, ioc->used_pages,
1823 (int)(ioc->used_pages * 100 / total_pages));
1824
1825 min = max = ioc->avg_search[0];
1826 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1827 avg += ioc->avg_search[i];
1828 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1829 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1830 }
1831 avg /= SBA_SEARCH_SAMPLE;
1832 seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1833 min, avg, max);
1834
1835 seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
1836 ioc->msingle_calls, ioc->msingle_pages,
1837 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1838
1839 /* KLUGE - unmap_sg calls unmap_single for each mapped page */
1840 min = ioc->usingle_calls;
1841 max = ioc->usingle_pages - ioc->usg_pages;
1842 seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
1843 min, max, (int)((max * 1000)/min));
1844
1845 seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1846 ioc->msg_calls, ioc->msg_pages,
1847 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1848
1849 seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1850 ioc->usg_calls, ioc->usg_pages,
1851 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1852#endif
1853
1854 return 0;
1855}
1856
1857static int
1858sba_proc_bitmap_info(struct seq_file *m, void *p)
1859{
1860 struct sba_device *sba_dev = sba_list;
1861 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
1862
1863 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map,
1864 ioc->res_size, false);
1865 seq_putc(m, '\n');
1866
1867 return 0;
1868}
1869#endif /* CONFIG_PROC_FS */
1870
1871static const struct parisc_device_id sba_tbl[] __initconst = {
1872 { HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb },
1873 { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc },
1874 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc },
1875 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc },
1876 { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc },
1877 { 0, }
1878};
1879
1880static int sba_driver_callback(struct parisc_device *);
1881
1882static struct parisc_driver sba_driver __refdata = {
1883 .name = MODULE_NAME,
1884 .id_table = sba_tbl,
1885 .probe = sba_driver_callback,
1886};
1887
1888/*
1889** Determine if sba should claim this chip (return 0) or not (return 1).
1890** If so, initialize the chip and tell other partners in crime they
1891** have work to do.
1892*/
1893static int __init sba_driver_callback(struct parisc_device *dev)
1894{
1895 struct sba_device *sba_dev;
1896 u32 func_class;
1897 int i;
1898 char *version;
1899 void __iomem *sba_addr = ioremap(dev->hpa.start, SBA_FUNC_SIZE);
1900 struct proc_dir_entry *root __maybe_unused;
1901
1902 sba_dump_ranges(sba_addr);
1903
1904 /* Read HW Rev First */
1905 func_class = READ_REG(sba_addr + SBA_FCLASS);
1906
1907 if (IS_ASTRO(dev)) {
1908 unsigned long fclass;
1909 static char astro_rev[]="Astro ?.?";
1910
1911 /* Astro is broken...Read HW Rev First */
1912 fclass = READ_REG(sba_addr);
1913
1914 astro_rev[6] = '1' + (char) (fclass & 0x7);
1915 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3);
1916 version = astro_rev;
1917
1918 } else if (IS_IKE(dev)) {
1919 static char ike_rev[] = "Ike rev ?";
1920 ike_rev[8] = '0' + (char) (func_class & 0xff);
1921 version = ike_rev;
1922 } else if (IS_PLUTO(dev)) {
1923 static char pluto_rev[]="Pluto ?.?";
1924 pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4);
1925 pluto_rev[8] = '0' + (char) (func_class & 0x0f);
1926 version = pluto_rev;
1927 } else {
1928 static char reo_rev[] = "REO rev ?";
1929 reo_rev[8] = '0' + (char) (func_class & 0xff);
1930 version = reo_rev;
1931 }
1932
1933 if (!global_ioc_cnt) {
1934 global_ioc_cnt = count_parisc_driver(&sba_driver);
1935
1936 /* Astro and Pluto have one IOC per SBA */
1937 if ((!IS_ASTRO(dev)) || (!IS_PLUTO(dev)))
1938 global_ioc_cnt *= 2;
1939 }
1940
1941 printk(KERN_INFO "%s found %s at 0x%llx\n",
1942 MODULE_NAME, version, (unsigned long long)dev->hpa.start);
1943
1944 sba_dev = kzalloc(sizeof(struct sba_device), GFP_KERNEL);
1945 if (!sba_dev) {
1946 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n");
1947 return -ENOMEM;
1948 }
1949
1950 parisc_set_drvdata(dev, sba_dev);
1951
1952 for(i=0; i<MAX_IOC; i++)
1953 spin_lock_init(&(sba_dev->ioc[i].res_lock));
1954
1955 sba_dev->dev = dev;
1956 sba_dev->hw_rev = func_class;
1957 sba_dev->name = dev->name;
1958 sba_dev->sba_hpa = sba_addr;
1959
1960 sba_get_pat_resources(sba_dev);
1961 sba_hw_init(sba_dev);
1962 sba_common_init(sba_dev);
1963
1964 hppa_dma_ops = &sba_ops;
1965
1966 switch (dev->id.hversion) {
1967 case PLUTO_MCKINLEY_PORT:
1968 if (!proc_mckinley_root)
1969 proc_mckinley_root = proc_mkdir("bus/mckinley", NULL);
1970 root = proc_mckinley_root;
1971 break;
1972 case ASTRO_RUNWAY_PORT:
1973 case IKE_MERCED_PORT:
1974 default:
1975 if (!proc_runway_root)
1976 proc_runway_root = proc_mkdir("bus/runway", NULL);
1977 root = proc_runway_root;
1978 break;
1979 }
1980
1981 proc_create_single("sba_iommu", 0, root, sba_proc_info);
1982 proc_create_single("sba_iommu-bitmap", 0, root, sba_proc_bitmap_info);
1983 return 0;
1984}
1985
1986/*
1987** One time initialization to let the world know the SBA was found.
1988** This is the only routine which is NOT static.
1989** Must be called exactly once before pci_init().
1990*/
1991static int __init sba_init(void)
1992{
1993 return register_parisc_driver(&sba_driver);
1994}
1995arch_initcall(sba_init);
1996
1997
1998/**
1999 * sba_get_iommu - Assign the iommu pointer for the pci bus controller.
2000 * @pci_hba: The parisc device.
2001 *
2002 * Returns the appropriate IOMMU data for the given parisc PCI controller.
2003 * This is cached and used later for PCI DMA Mapping.
2004 */
2005void * sba_get_iommu(struct parisc_device *pci_hba)
2006{
2007 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2008 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2009 char t = sba_dev->id.hw_type;
2010 int iocnum = (pci_hba->hw_path >> 3); /* IOC # */
2011
2012 WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT));
2013
2014 return &(sba->ioc[iocnum]);
2015}
2016
2017
2018/**
2019 * sba_directed_lmmio - return first directed LMMIO range routed to rope
2020 * @pci_hba: The parisc device.
2021 * @r: resource PCI host controller wants start/end fields assigned.
2022 *
2023 * For the given parisc PCI controller, determine if any direct ranges
2024 * are routed down the corresponding rope.
2025 */
2026void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
2027{
2028 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2029 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2030 char t = sba_dev->id.hw_type;
2031 int i;
2032 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */
2033
2034 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2035
2036 r->start = r->end = 0;
2037
2038 /* Astro has 4 directed ranges. Not sure about Ike/Pluto/et al */
2039 for (i=0; i<4; i++) {
2040 int base, size;
2041 void __iomem *reg = sba->sba_hpa + i*0x18;
2042
2043 base = READ_REG32(reg + LMMIO_DIRECT0_BASE);
2044 if ((base & 1) == 0)
2045 continue; /* not enabled */
2046
2047 size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE);
2048
2049 if ((size & (ROPES_PER_IOC-1)) != rope)
2050 continue; /* directed down different rope */
2051
2052 r->start = (base & ~1UL) | PCI_F_EXTEND;
2053 size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK);
2054 r->end = r->start + size;
2055 r->flags = IORESOURCE_MEM;
2056 }
2057}
2058
2059
2060/**
2061 * sba_distributed_lmmio - return portion of distributed LMMIO range
2062 * @pci_hba: The parisc device.
2063 * @r: resource PCI host controller wants start/end fields assigned.
2064 *
2065 * For the given parisc PCI controller, return portion of distributed LMMIO
2066 * range. The distributed LMMIO is always present and it's just a question
2067 * of the base address and size of the range.
2068 */
2069void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
2070{
2071 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2072 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2073 char t = sba_dev->id.hw_type;
2074 int base, size;
2075 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */
2076
2077 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2078
2079 r->start = r->end = 0;
2080
2081 base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE);
2082 if ((base & 1) == 0) {
2083 BUG(); /* Gah! Distr Range wasn't enabled! */
2084 return;
2085 }
2086
2087 r->start = (base & ~1UL) | PCI_F_EXTEND;
2088
2089 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC;
2090 r->start += rope * (size + 1); /* adjust base for this rope */
2091 r->end = r->start + size;
2092 r->flags = IORESOURCE_MEM;
2093}
1/*
2** System Bus Adapter (SBA) I/O MMU manager
3**
4** (c) Copyright 2000-2004 Grant Grundler <grundler @ parisc-linux x org>
5** (c) Copyright 2004 Naresh Kumar Inna <knaresh at india x hp x com>
6** (c) Copyright 2000-2004 Hewlett-Packard Company
7**
8** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
9**
10** This program is free software; you can redistribute it and/or modify
11** it under the terms of the GNU General Public License as published by
12** the Free Software Foundation; either version 2 of the License, or
13** (at your option) any later version.
14**
15**
16** This module initializes the IOC (I/O Controller) found on B1000/C3000/
17** J5000/J7000/N-class/L-class machines and their successors.
18**
19** FIXME: add DMA hint support programming in both sba and lba modules.
20*/
21
22#include <linux/types.h>
23#include <linux/kernel.h>
24#include <linux/spinlock.h>
25#include <linux/slab.h>
26#include <linux/init.h>
27
28#include <linux/mm.h>
29#include <linux/string.h>
30#include <linux/pci.h>
31#include <linux/scatterlist.h>
32#include <linux/iommu-helper.h>
33
34#include <asm/byteorder.h>
35#include <asm/io.h>
36#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
37
38#include <asm/hardware.h> /* for register_parisc_driver() stuff */
39
40#include <linux/proc_fs.h>
41#include <linux/seq_file.h>
42#include <linux/module.h>
43
44#include <asm/ropes.h>
45#include <asm/mckinley.h> /* for proc_mckinley_root */
46#include <asm/runway.h> /* for proc_runway_root */
47#include <asm/page.h> /* for PAGE0 */
48#include <asm/pdc.h> /* for PDC_MODEL_* */
49#include <asm/pdcpat.h> /* for is_pdc_pat() */
50#include <asm/parisc-device.h>
51
52#define MODULE_NAME "SBA"
53
54/*
55** The number of debug flags is a clue - this code is fragile.
56** Don't even think about messing with it unless you have
57** plenty of 710's to sacrifice to the computer gods. :^)
58*/
59#undef DEBUG_SBA_INIT
60#undef DEBUG_SBA_RUN
61#undef DEBUG_SBA_RUN_SG
62#undef DEBUG_SBA_RESOURCE
63#undef ASSERT_PDIR_SANITY
64#undef DEBUG_LARGE_SG_ENTRIES
65#undef DEBUG_DMB_TRAP
66
67#ifdef DEBUG_SBA_INIT
68#define DBG_INIT(x...) printk(x)
69#else
70#define DBG_INIT(x...)
71#endif
72
73#ifdef DEBUG_SBA_RUN
74#define DBG_RUN(x...) printk(x)
75#else
76#define DBG_RUN(x...)
77#endif
78
79#ifdef DEBUG_SBA_RUN_SG
80#define DBG_RUN_SG(x...) printk(x)
81#else
82#define DBG_RUN_SG(x...)
83#endif
84
85
86#ifdef DEBUG_SBA_RESOURCE
87#define DBG_RES(x...) printk(x)
88#else
89#define DBG_RES(x...)
90#endif
91
92#define SBA_INLINE __inline__
93
94#define DEFAULT_DMA_HINT_REG 0
95
96#define SBA_MAPPING_ERROR (~(dma_addr_t)0)
97
98struct sba_device *sba_list;
99EXPORT_SYMBOL_GPL(sba_list);
100
101static unsigned long ioc_needs_fdc = 0;
102
103/* global count of IOMMUs in the system */
104static unsigned int global_ioc_cnt = 0;
105
106/* PA8700 (Piranha 2.2) bug workaround */
107static unsigned long piranha_bad_128k = 0;
108
109/* Looks nice and keeps the compiler happy */
110#define SBA_DEV(d) ((struct sba_device *) (d))
111
112#ifdef CONFIG_AGP_PARISC
113#define SBA_AGP_SUPPORT
114#endif /*CONFIG_AGP_PARISC*/
115
116#ifdef SBA_AGP_SUPPORT
117static int sba_reserve_agpgart = 1;
118module_param(sba_reserve_agpgart, int, 0444);
119MODULE_PARM_DESC(sba_reserve_agpgart, "Reserve half of IO pdir as AGPGART");
120#endif
121
122
123/************************************
124** SBA register read and write support
125**
126** BE WARNED: register writes are posted.
127** (ie follow writes which must reach HW with a read)
128**
129** Superdome (in particular, REO) allows only 64-bit CSR accesses.
130*/
131#define READ_REG32(addr) readl(addr)
132#define READ_REG64(addr) readq(addr)
133#define WRITE_REG32(val, addr) writel((val), (addr))
134#define WRITE_REG64(val, addr) writeq((val), (addr))
135
136#ifdef CONFIG_64BIT
137#define READ_REG(addr) READ_REG64(addr)
138#define WRITE_REG(value, addr) WRITE_REG64(value, addr)
139#else
140#define READ_REG(addr) READ_REG32(addr)
141#define WRITE_REG(value, addr) WRITE_REG32(value, addr)
142#endif
143
144#ifdef DEBUG_SBA_INIT
145
146/* NOTE: When CONFIG_64BIT isn't defined, READ_REG64() is two 32-bit reads */
147
148/**
149 * sba_dump_ranges - debugging only - print ranges assigned to this IOA
150 * @hpa: base address of the sba
151 *
152 * Print the MMIO and IO Port address ranges forwarded by an Astro/Ike/RIO
153 * IO Adapter (aka Bus Converter).
154 */
155static void
156sba_dump_ranges(void __iomem *hpa)
157{
158 DBG_INIT("SBA at 0x%p\n", hpa);
159 DBG_INIT("IOS_DIST_BASE : %Lx\n", READ_REG64(hpa+IOS_DIST_BASE));
160 DBG_INIT("IOS_DIST_MASK : %Lx\n", READ_REG64(hpa+IOS_DIST_MASK));
161 DBG_INIT("IOS_DIST_ROUTE : %Lx\n", READ_REG64(hpa+IOS_DIST_ROUTE));
162 DBG_INIT("\n");
163 DBG_INIT("IOS_DIRECT_BASE : %Lx\n", READ_REG64(hpa+IOS_DIRECT_BASE));
164 DBG_INIT("IOS_DIRECT_MASK : %Lx\n", READ_REG64(hpa+IOS_DIRECT_MASK));
165 DBG_INIT("IOS_DIRECT_ROUTE: %Lx\n", READ_REG64(hpa+IOS_DIRECT_ROUTE));
166}
167
168/**
169 * sba_dump_tlb - debugging only - print IOMMU operating parameters
170 * @hpa: base address of the IOMMU
171 *
172 * Print the size/location of the IO MMU PDIR.
173 */
174static void sba_dump_tlb(void __iomem *hpa)
175{
176 DBG_INIT("IO TLB at 0x%p\n", hpa);
177 DBG_INIT("IOC_IBASE : 0x%Lx\n", READ_REG64(hpa+IOC_IBASE));
178 DBG_INIT("IOC_IMASK : 0x%Lx\n", READ_REG64(hpa+IOC_IMASK));
179 DBG_INIT("IOC_TCNFG : 0x%Lx\n", READ_REG64(hpa+IOC_TCNFG));
180 DBG_INIT("IOC_PDIR_BASE: 0x%Lx\n", READ_REG64(hpa+IOC_PDIR_BASE));
181 DBG_INIT("\n");
182}
183#else
184#define sba_dump_ranges(x)
185#define sba_dump_tlb(x)
186#endif /* DEBUG_SBA_INIT */
187
188
189#ifdef ASSERT_PDIR_SANITY
190
191/**
192 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
193 * @ioc: IO MMU structure which owns the pdir we are interested in.
194 * @msg: text to print ont the output line.
195 * @pide: pdir index.
196 *
197 * Print one entry of the IO MMU PDIR in human readable form.
198 */
199static void
200sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
201{
202 /* start printing from lowest pde in rval */
203 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
204 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
205 uint rcnt;
206
207 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
208 msg,
209 rptr, pide & (BITS_PER_LONG - 1), *rptr);
210
211 rcnt = 0;
212 while (rcnt < BITS_PER_LONG) {
213 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
214 (rcnt == (pide & (BITS_PER_LONG - 1)))
215 ? " -->" : " ",
216 rcnt, ptr, *ptr );
217 rcnt++;
218 ptr++;
219 }
220 printk(KERN_DEBUG "%s", msg);
221}
222
223
224/**
225 * sba_check_pdir - debugging only - consistency checker
226 * @ioc: IO MMU structure which owns the pdir we are interested in.
227 * @msg: text to print ont the output line.
228 *
229 * Verify the resource map and pdir state is consistent
230 */
231static int
232sba_check_pdir(struct ioc *ioc, char *msg)
233{
234 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]);
235 u32 *rptr = (u32 *) ioc->res_map; /* resource map ptr */
236 u64 *pptr = ioc->pdir_base; /* pdir ptr */
237 uint pide = 0;
238
239 while (rptr < rptr_end) {
240 u32 rval = *rptr;
241 int rcnt = 32; /* number of bits we might check */
242
243 while (rcnt) {
244 /* Get last byte and highest bit from that */
245 u32 pde = ((u32) (((char *)pptr)[7])) << 24;
246 if ((rval ^ pde) & 0x80000000)
247 {
248 /*
249 ** BUMMER! -- res_map != pdir --
250 ** Dump rval and matching pdir entries
251 */
252 sba_dump_pdir_entry(ioc, msg, pide);
253 return(1);
254 }
255 rcnt--;
256 rval <<= 1; /* try the next bit */
257 pptr++;
258 pide++;
259 }
260 rptr++; /* look at next word of res_map */
261 }
262 /* It'd be nice if we always got here :^) */
263 return 0;
264}
265
266
267/**
268 * sba_dump_sg - debugging only - print Scatter-Gather list
269 * @ioc: IO MMU structure which owns the pdir we are interested in.
270 * @startsg: head of the SG list
271 * @nents: number of entries in SG list
272 *
273 * print the SG list so we can verify it's correct by hand.
274 */
275static void
276sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
277{
278 while (nents-- > 0) {
279 printk(KERN_DEBUG " %d : %08lx/%05x %p/%05x\n",
280 nents,
281 (unsigned long) sg_dma_address(startsg),
282 sg_dma_len(startsg),
283 sg_virt(startsg), startsg->length);
284 startsg++;
285 }
286}
287
288#endif /* ASSERT_PDIR_SANITY */
289
290
291
292
293/**************************************************************
294*
295* I/O Pdir Resource Management
296*
297* Bits set in the resource map are in use.
298* Each bit can represent a number of pages.
299* LSbs represent lower addresses (IOVA's).
300*
301***************************************************************/
302#define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
303
304/* Convert from IOVP to IOVA and vice versa. */
305
306#ifdef ZX1_SUPPORT
307/* Pluto (aka ZX1) boxes need to set or clear the ibase bits appropriately */
308#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
309#define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
310#else
311/* only support Astro and ancestors. Saves a few cycles in key places */
312#define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
313#define SBA_IOVP(ioc,iova) (iova)
314#endif
315
316#define PDIR_INDEX(iovp) ((iovp)>>IOVP_SHIFT)
317
318#define RESMAP_MASK(n) (~0UL << (BITS_PER_LONG - (n)))
319#define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
320
321static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
322 unsigned int bitshiftcnt)
323{
324 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
325 + bitshiftcnt;
326}
327
328/**
329 * sba_search_bitmap - find free space in IO PDIR resource bitmap
330 * @ioc: IO MMU structure which owns the pdir we are interested in.
331 * @bits_wanted: number of entries we need.
332 *
333 * Find consecutive free bits in resource bitmap.
334 * Each bit represents one entry in the IO Pdir.
335 * Cool perf optimization: search for log2(size) bits at a time.
336 */
337static SBA_INLINE unsigned long
338sba_search_bitmap(struct ioc *ioc, struct device *dev,
339 unsigned long bits_wanted)
340{
341 unsigned long *res_ptr = ioc->res_hint;
342 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
343 unsigned long pide = ~0UL, tpide;
344 unsigned long boundary_size;
345 unsigned long shift;
346 int ret;
347
348 boundary_size = ALIGN((unsigned long long)dma_get_seg_boundary(dev) + 1,
349 1ULL << IOVP_SHIFT) >> IOVP_SHIFT;
350
351#if defined(ZX1_SUPPORT)
352 BUG_ON(ioc->ibase & ~IOVP_MASK);
353 shift = ioc->ibase >> IOVP_SHIFT;
354#else
355 shift = 0;
356#endif
357
358 if (bits_wanted > (BITS_PER_LONG/2)) {
359 /* Search word at a time - no mask needed */
360 for(; res_ptr < res_end; ++res_ptr) {
361 tpide = ptr_to_pide(ioc, res_ptr, 0);
362 ret = iommu_is_span_boundary(tpide, bits_wanted,
363 shift,
364 boundary_size);
365 if ((*res_ptr == 0) && !ret) {
366 *res_ptr = RESMAP_MASK(bits_wanted);
367 pide = tpide;
368 break;
369 }
370 }
371 /* point to the next word on next pass */
372 res_ptr++;
373 ioc->res_bitshift = 0;
374 } else {
375 /*
376 ** Search the resource bit map on well-aligned values.
377 ** "o" is the alignment.
378 ** We need the alignment to invalidate I/O TLB using
379 ** SBA HW features in the unmap path.
380 */
381 unsigned long o = 1 << get_order(bits_wanted << PAGE_SHIFT);
382 uint bitshiftcnt = ALIGN(ioc->res_bitshift, o);
383 unsigned long mask;
384
385 if (bitshiftcnt >= BITS_PER_LONG) {
386 bitshiftcnt = 0;
387 res_ptr++;
388 }
389 mask = RESMAP_MASK(bits_wanted) >> bitshiftcnt;
390
391 DBG_RES("%s() o %ld %p", __func__, o, res_ptr);
392 while(res_ptr < res_end)
393 {
394 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
395 WARN_ON(mask == 0);
396 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
397 ret = iommu_is_span_boundary(tpide, bits_wanted,
398 shift,
399 boundary_size);
400 if ((((*res_ptr) & mask) == 0) && !ret) {
401 *res_ptr |= mask; /* mark resources busy! */
402 pide = tpide;
403 break;
404 }
405 mask >>= o;
406 bitshiftcnt += o;
407 if (mask == 0) {
408 mask = RESMAP_MASK(bits_wanted);
409 bitshiftcnt=0;
410 res_ptr++;
411 }
412 }
413 /* look in the same word on the next pass */
414 ioc->res_bitshift = bitshiftcnt + bits_wanted;
415 }
416
417 /* wrapped ? */
418 if (res_end <= res_ptr) {
419 ioc->res_hint = (unsigned long *) ioc->res_map;
420 ioc->res_bitshift = 0;
421 } else {
422 ioc->res_hint = res_ptr;
423 }
424 return (pide);
425}
426
427
428/**
429 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
430 * @ioc: IO MMU structure which owns the pdir we are interested in.
431 * @size: number of bytes to create a mapping for
432 *
433 * Given a size, find consecutive unmarked and then mark those bits in the
434 * resource bit map.
435 */
436static int
437sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
438{
439 unsigned int pages_needed = size >> IOVP_SHIFT;
440#ifdef SBA_COLLECT_STATS
441 unsigned long cr_start = mfctl(16);
442#endif
443 unsigned long pide;
444
445 pide = sba_search_bitmap(ioc, dev, pages_needed);
446 if (pide >= (ioc->res_size << 3)) {
447 pide = sba_search_bitmap(ioc, dev, pages_needed);
448 if (pide >= (ioc->res_size << 3))
449 panic("%s: I/O MMU @ %p is out of mapping resources\n",
450 __FILE__, ioc->ioc_hpa);
451 }
452
453#ifdef ASSERT_PDIR_SANITY
454 /* verify the first enable bit is clear */
455 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {
456 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
457 }
458#endif
459
460 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
461 __func__, size, pages_needed, pide,
462 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
463 ioc->res_bitshift );
464
465#ifdef SBA_COLLECT_STATS
466 {
467 unsigned long cr_end = mfctl(16);
468 unsigned long tmp = cr_end - cr_start;
469 /* check for roll over */
470 cr_start = (cr_end < cr_start) ? -(tmp) : (tmp);
471 }
472 ioc->avg_search[ioc->avg_idx++] = cr_start;
473 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
474
475 ioc->used_pages += pages_needed;
476#endif
477
478 return (pide);
479}
480
481
482/**
483 * sba_free_range - unmark bits in IO PDIR resource bitmap
484 * @ioc: IO MMU structure which owns the pdir we are interested in.
485 * @iova: IO virtual address which was previously allocated.
486 * @size: number of bytes to create a mapping for
487 *
488 * clear bits in the ioc's resource map
489 */
490static SBA_INLINE void
491sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
492{
493 unsigned long iovp = SBA_IOVP(ioc, iova);
494 unsigned int pide = PDIR_INDEX(iovp);
495 unsigned int ridx = pide >> 3; /* convert bit to byte address */
496 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
497
498 int bits_not_wanted = size >> IOVP_SHIFT;
499
500 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
501 unsigned long m = RESMAP_MASK(bits_not_wanted) >> (pide & (BITS_PER_LONG - 1));
502
503 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n",
504 __func__, (uint) iova, size,
505 bits_not_wanted, m, pide, res_ptr, *res_ptr);
506
507#ifdef SBA_COLLECT_STATS
508 ioc->used_pages -= bits_not_wanted;
509#endif
510
511 *res_ptr &= ~m;
512}
513
514
515/**************************************************************
516*
517* "Dynamic DMA Mapping" support (aka "Coherent I/O")
518*
519***************************************************************/
520
521#ifdef SBA_HINT_SUPPORT
522#define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
523#endif
524
525typedef unsigned long space_t;
526#define KERNEL_SPACE 0
527
528/**
529 * sba_io_pdir_entry - fill in one IO PDIR entry
530 * @pdir_ptr: pointer to IO PDIR entry
531 * @sid: process Space ID - currently only support KERNEL_SPACE
532 * @vba: Virtual CPU address of buffer to map
533 * @hint: DMA hint set to use for this mapping
534 *
535 * SBA Mapping Routine
536 *
537 * Given a virtual address (vba, arg2) and space id, (sid, arg1)
538 * sba_io_pdir_entry() loads the I/O PDIR entry pointed to by
539 * pdir_ptr (arg0).
540 * Using the bass-ackwards HP bit numbering, Each IO Pdir entry
541 * for Astro/Ike looks like:
542 *
543 *
544 * 0 19 51 55 63
545 * +-+---------------------+----------------------------------+----+--------+
546 * |V| U | PPN[43:12] | U | VI |
547 * +-+---------------------+----------------------------------+----+--------+
548 *
549 * Pluto is basically identical, supports fewer physical address bits:
550 *
551 * 0 23 51 55 63
552 * +-+------------------------+-------------------------------+----+--------+
553 * |V| U | PPN[39:12] | U | VI |
554 * +-+------------------------+-------------------------------+----+--------+
555 *
556 * V == Valid Bit (Most Significant Bit is bit 0)
557 * U == Unused
558 * PPN == Physical Page Number
559 * VI == Virtual Index (aka Coherent Index)
560 *
561 * LPA instruction output is put into PPN field.
562 * LCI (Load Coherence Index) instruction provides the "VI" bits.
563 *
564 * We pre-swap the bytes since PCX-W is Big Endian and the
565 * IOMMU uses little endian for the pdir.
566 */
567
568static void SBA_INLINE
569sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
570 unsigned long hint)
571{
572 u64 pa; /* physical address */
573 register unsigned ci; /* coherent index */
574
575 pa = virt_to_phys(vba);
576 pa &= IOVP_MASK;
577
578 mtsp(sid,1);
579 asm("lci 0(%%sr1, %1), %0" : "=r" (ci) : "r" (vba));
580 pa |= (ci >> PAGE_SHIFT) & 0xff; /* move CI (8 bits) into lowest byte */
581
582 pa |= SBA_PDIR_VALID_BIT; /* set "valid" bit */
583 *pdir_ptr = cpu_to_le64(pa); /* swap and store into I/O Pdir */
584
585 /*
586 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
587 * (bit #61, big endian), we have to flush and sync every time
588 * IO-PDIR is changed in Ike/Astro.
589 */
590 if (ioc_needs_fdc)
591 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
592}
593
594
595/**
596 * sba_mark_invalid - invalidate one or more IO PDIR entries
597 * @ioc: IO MMU structure which owns the pdir we are interested in.
598 * @iova: IO Virtual Address mapped earlier
599 * @byte_cnt: number of bytes this mapping covers.
600 *
601 * Marking the IO PDIR entry(ies) as Invalid and invalidate
602 * corresponding IO TLB entry. The Ike PCOM (Purge Command Register)
603 * is to purge stale entries in the IO TLB when unmapping entries.
604 *
605 * The PCOM register supports purging of multiple pages, with a minium
606 * of 1 page and a maximum of 2GB. Hardware requires the address be
607 * aligned to the size of the range being purged. The size of the range
608 * must be a power of 2. The "Cool perf optimization" in the
609 * allocation routine helps keep that true.
610 */
611static SBA_INLINE void
612sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
613{
614 u32 iovp = (u32) SBA_IOVP(ioc,iova);
615 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
616
617#ifdef ASSERT_PDIR_SANITY
618 /* Assert first pdir entry is set.
619 **
620 ** Even though this is a big-endian machine, the entries
621 ** in the iopdir are little endian. That's why we look at
622 ** the byte at +7 instead of at +0.
623 */
624 if (0x80 != (((u8 *) pdir_ptr)[7])) {
625 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
626 }
627#endif
628
629 if (byte_cnt > IOVP_SIZE)
630 {
631#if 0
632 unsigned long entries_per_cacheline = ioc_needs_fdc ?
633 L1_CACHE_ALIGN(((unsigned long) pdir_ptr))
634 - (unsigned long) pdir_ptr;
635 : 262144;
636#endif
637
638 /* set "size" field for PCOM */
639 iovp |= get_order(byte_cnt) + PAGE_SHIFT;
640
641 do {
642 /* clear I/O Pdir entry "valid" bit first */
643 ((u8 *) pdir_ptr)[7] = 0;
644 if (ioc_needs_fdc) {
645 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
646#if 0
647 entries_per_cacheline = L1_CACHE_SHIFT - 3;
648#endif
649 }
650 pdir_ptr++;
651 byte_cnt -= IOVP_SIZE;
652 } while (byte_cnt > IOVP_SIZE);
653 } else
654 iovp |= IOVP_SHIFT; /* set "size" field for PCOM */
655
656 /*
657 ** clear I/O PDIR entry "valid" bit.
658 ** We have to R/M/W the cacheline regardless how much of the
659 ** pdir entry that we clobber.
660 ** The rest of the entry would be useful for debugging if we
661 ** could dump core on HPMC.
662 */
663 ((u8 *) pdir_ptr)[7] = 0;
664 if (ioc_needs_fdc)
665 asm volatile("fdc %%r0(%0)" : : "r" (pdir_ptr));
666
667 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
668}
669
670/**
671 * sba_dma_supported - PCI driver can query DMA support
672 * @dev: instance of PCI owned by the driver that's asking
673 * @mask: number of address bits this PCI device can handle
674 *
675 * See Documentation/DMA-API-HOWTO.txt
676 */
677static int sba_dma_supported( struct device *dev, u64 mask)
678{
679 struct ioc *ioc;
680
681 if (dev == NULL) {
682 printk(KERN_ERR MODULE_NAME ": EISA/ISA/et al not supported\n");
683 BUG();
684 return(0);
685 }
686
687 /* Documentation/DMA-API-HOWTO.txt tells drivers to try 64-bit
688 * first, then fall back to 32-bit if that fails.
689 * We are just "encouraging" 32-bit DMA masks here since we can
690 * never allow IOMMU bypass unless we add special support for ZX1.
691 */
692 if (mask > ~0U)
693 return 0;
694
695 ioc = GET_IOC(dev);
696 if (!ioc)
697 return 0;
698
699 /*
700 * check if mask is >= than the current max IO Virt Address
701 * The max IO Virt address will *always* < 30 bits.
702 */
703 return((int)(mask >= (ioc->ibase - 1 +
704 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) )));
705}
706
707
708/**
709 * sba_map_single - map one buffer and return IOVA for DMA
710 * @dev: instance of PCI owned by the driver that's asking.
711 * @addr: driver buffer to map.
712 * @size: number of bytes to map in driver buffer.
713 * @direction: R/W or both.
714 *
715 * See Documentation/DMA-API-HOWTO.txt
716 */
717static dma_addr_t
718sba_map_single(struct device *dev, void *addr, size_t size,
719 enum dma_data_direction direction)
720{
721 struct ioc *ioc;
722 unsigned long flags;
723 dma_addr_t iovp;
724 dma_addr_t offset;
725 u64 *pdir_start;
726 int pide;
727
728 ioc = GET_IOC(dev);
729 if (!ioc)
730 return SBA_MAPPING_ERROR;
731
732 /* save offset bits */
733 offset = ((dma_addr_t) (long) addr) & ~IOVP_MASK;
734
735 /* round up to nearest IOVP_SIZE */
736 size = (size + offset + ~IOVP_MASK) & IOVP_MASK;
737
738 spin_lock_irqsave(&ioc->res_lock, flags);
739#ifdef ASSERT_PDIR_SANITY
740 sba_check_pdir(ioc,"Check before sba_map_single()");
741#endif
742
743#ifdef SBA_COLLECT_STATS
744 ioc->msingle_calls++;
745 ioc->msingle_pages += size >> IOVP_SHIFT;
746#endif
747 pide = sba_alloc_range(ioc, dev, size);
748 iovp = (dma_addr_t) pide << IOVP_SHIFT;
749
750 DBG_RUN("%s() 0x%p -> 0x%lx\n",
751 __func__, addr, (long) iovp | offset);
752
753 pdir_start = &(ioc->pdir_base[pide]);
754
755 while (size > 0) {
756 sba_io_pdir_entry(pdir_start, KERNEL_SPACE, (unsigned long) addr, 0);
757
758 DBG_RUN(" pdir 0x%p %02x%02x%02x%02x%02x%02x%02x%02x\n",
759 pdir_start,
760 (u8) (((u8 *) pdir_start)[7]),
761 (u8) (((u8 *) pdir_start)[6]),
762 (u8) (((u8 *) pdir_start)[5]),
763 (u8) (((u8 *) pdir_start)[4]),
764 (u8) (((u8 *) pdir_start)[3]),
765 (u8) (((u8 *) pdir_start)[2]),
766 (u8) (((u8 *) pdir_start)[1]),
767 (u8) (((u8 *) pdir_start)[0])
768 );
769
770 addr += IOVP_SIZE;
771 size -= IOVP_SIZE;
772 pdir_start++;
773 }
774
775 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */
776 if (ioc_needs_fdc)
777 asm volatile("sync" : : );
778
779#ifdef ASSERT_PDIR_SANITY
780 sba_check_pdir(ioc,"Check after sba_map_single()");
781#endif
782 spin_unlock_irqrestore(&ioc->res_lock, flags);
783
784 /* form complete address */
785 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
786}
787
788
789static dma_addr_t
790sba_map_page(struct device *dev, struct page *page, unsigned long offset,
791 size_t size, enum dma_data_direction direction,
792 unsigned long attrs)
793{
794 return sba_map_single(dev, page_address(page) + offset, size,
795 direction);
796}
797
798
799/**
800 * sba_unmap_page - unmap one IOVA and free resources
801 * @dev: instance of PCI owned by the driver that's asking.
802 * @iova: IOVA of driver buffer previously mapped.
803 * @size: number of bytes mapped in driver buffer.
804 * @direction: R/W or both.
805 *
806 * See Documentation/DMA-API-HOWTO.txt
807 */
808static void
809sba_unmap_page(struct device *dev, dma_addr_t iova, size_t size,
810 enum dma_data_direction direction, unsigned long attrs)
811{
812 struct ioc *ioc;
813#if DELAYED_RESOURCE_CNT > 0
814 struct sba_dma_pair *d;
815#endif
816 unsigned long flags;
817 dma_addr_t offset;
818
819 DBG_RUN("%s() iovp 0x%lx/%x\n", __func__, (long) iova, size);
820
821 ioc = GET_IOC(dev);
822 if (!ioc) {
823 WARN_ON(!ioc);
824 return;
825 }
826 offset = iova & ~IOVP_MASK;
827 iova ^= offset; /* clear offset bits */
828 size += offset;
829 size = ALIGN(size, IOVP_SIZE);
830
831 spin_lock_irqsave(&ioc->res_lock, flags);
832
833#ifdef SBA_COLLECT_STATS
834 ioc->usingle_calls++;
835 ioc->usingle_pages += size >> IOVP_SHIFT;
836#endif
837
838 sba_mark_invalid(ioc, iova, size);
839
840#if DELAYED_RESOURCE_CNT > 0
841 /* Delaying when we re-use a IO Pdir entry reduces the number
842 * of MMIO reads needed to flush writes to the PCOM register.
843 */
844 d = &(ioc->saved[ioc->saved_cnt]);
845 d->iova = iova;
846 d->size = size;
847 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
848 int cnt = ioc->saved_cnt;
849 while (cnt--) {
850 sba_free_range(ioc, d->iova, d->size);
851 d--;
852 }
853 ioc->saved_cnt = 0;
854
855 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
856 }
857#else /* DELAYED_RESOURCE_CNT == 0 */
858 sba_free_range(ioc, iova, size);
859
860 /* If fdc's were issued, force fdc's to be visible now */
861 if (ioc_needs_fdc)
862 asm volatile("sync" : : );
863
864 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
865#endif /* DELAYED_RESOURCE_CNT == 0 */
866
867 spin_unlock_irqrestore(&ioc->res_lock, flags);
868
869 /* XXX REVISIT for 2.5 Linux - need syncdma for zero-copy support.
870 ** For Astro based systems this isn't a big deal WRT performance.
871 ** As long as 2.4 kernels copyin/copyout data from/to userspace,
872 ** we don't need the syncdma. The issue here is I/O MMU cachelines
873 ** are *not* coherent in all cases. May be hwrev dependent.
874 ** Need to investigate more.
875 asm volatile("syncdma");
876 */
877}
878
879
880/**
881 * sba_alloc - allocate/map shared mem for DMA
882 * @hwdev: instance of PCI owned by the driver that's asking.
883 * @size: number of bytes mapped in driver buffer.
884 * @dma_handle: IOVA of new buffer.
885 *
886 * See Documentation/DMA-API-HOWTO.txt
887 */
888static void *sba_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle,
889 gfp_t gfp, unsigned long attrs)
890{
891 void *ret;
892
893 if (!hwdev) {
894 /* only support PCI */
895 *dma_handle = 0;
896 return NULL;
897 }
898
899 ret = (void *) __get_free_pages(gfp, get_order(size));
900
901 if (ret) {
902 memset(ret, 0, size);
903 *dma_handle = sba_map_single(hwdev, ret, size, 0);
904 }
905
906 return ret;
907}
908
909
910/**
911 * sba_free - free/unmap shared mem for DMA
912 * @hwdev: instance of PCI owned by the driver that's asking.
913 * @size: number of bytes mapped in driver buffer.
914 * @vaddr: virtual address IOVA of "consistent" buffer.
915 * @dma_handler: IO virtual address of "consistent" buffer.
916 *
917 * See Documentation/DMA-API-HOWTO.txt
918 */
919static void
920sba_free(struct device *hwdev, size_t size, void *vaddr,
921 dma_addr_t dma_handle, unsigned long attrs)
922{
923 sba_unmap_page(hwdev, dma_handle, size, 0, 0);
924 free_pages((unsigned long) vaddr, get_order(size));
925}
926
927
928/*
929** Since 0 is a valid pdir_base index value, can't use that
930** to determine if a value is valid or not. Use a flag to indicate
931** the SG list entry contains a valid pdir index.
932*/
933#define PIDE_FLAG 0x80000000UL
934
935#ifdef SBA_COLLECT_STATS
936#define IOMMU_MAP_STATS
937#endif
938#include "iommu-helpers.h"
939
940#ifdef DEBUG_LARGE_SG_ENTRIES
941int dump_run_sg = 0;
942#endif
943
944
945/**
946 * sba_map_sg - map Scatter/Gather list
947 * @dev: instance of PCI owned by the driver that's asking.
948 * @sglist: array of buffer/length pairs
949 * @nents: number of entries in list
950 * @direction: R/W or both.
951 *
952 * See Documentation/DMA-API-HOWTO.txt
953 */
954static int
955sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents,
956 enum dma_data_direction direction, unsigned long attrs)
957{
958 struct ioc *ioc;
959 int coalesced, filled = 0;
960 unsigned long flags;
961
962 DBG_RUN_SG("%s() START %d entries\n", __func__, nents);
963
964 ioc = GET_IOC(dev);
965 if (!ioc)
966 return 0;
967
968 /* Fast path single entry scatterlists. */
969 if (nents == 1) {
970 sg_dma_address(sglist) = sba_map_single(dev, sg_virt(sglist),
971 sglist->length, direction);
972 sg_dma_len(sglist) = sglist->length;
973 return 1;
974 }
975
976 spin_lock_irqsave(&ioc->res_lock, flags);
977
978#ifdef ASSERT_PDIR_SANITY
979 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
980 {
981 sba_dump_sg(ioc, sglist, nents);
982 panic("Check before sba_map_sg()");
983 }
984#endif
985
986#ifdef SBA_COLLECT_STATS
987 ioc->msg_calls++;
988#endif
989
990 /*
991 ** First coalesce the chunks and allocate I/O pdir space
992 **
993 ** If this is one DMA stream, we can properly map using the
994 ** correct virtual address associated with each DMA page.
995 ** w/o this association, we wouldn't have coherent DMA!
996 ** Access to the virtual address is what forces a two pass algorithm.
997 */
998 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
999
1000 /*
1001 ** Program the I/O Pdir
1002 **
1003 ** map the virtual addresses to the I/O Pdir
1004 ** o dma_address will contain the pdir index
1005 ** o dma_len will contain the number of bytes to map
1006 ** o address contains the virtual address.
1007 */
1008 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);
1009
1010 /* force FDC ops in io_pdir_entry() to be visible to IOMMU */
1011 if (ioc_needs_fdc)
1012 asm volatile("sync" : : );
1013
1014#ifdef ASSERT_PDIR_SANITY
1015 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
1016 {
1017 sba_dump_sg(ioc, sglist, nents);
1018 panic("Check after sba_map_sg()\n");
1019 }
1020#endif
1021
1022 spin_unlock_irqrestore(&ioc->res_lock, flags);
1023
1024 DBG_RUN_SG("%s() DONE %d mappings\n", __func__, filled);
1025
1026 return filled;
1027}
1028
1029
1030/**
1031 * sba_unmap_sg - unmap Scatter/Gather list
1032 * @dev: instance of PCI owned by the driver that's asking.
1033 * @sglist: array of buffer/length pairs
1034 * @nents: number of entries in list
1035 * @direction: R/W or both.
1036 *
1037 * See Documentation/DMA-API-HOWTO.txt
1038 */
1039static void
1040sba_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents,
1041 enum dma_data_direction direction, unsigned long attrs)
1042{
1043 struct ioc *ioc;
1044#ifdef ASSERT_PDIR_SANITY
1045 unsigned long flags;
1046#endif
1047
1048 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1049 __func__, nents, sg_virt(sglist), sglist->length);
1050
1051 ioc = GET_IOC(dev);
1052 if (!ioc) {
1053 WARN_ON(!ioc);
1054 return;
1055 }
1056
1057#ifdef SBA_COLLECT_STATS
1058 ioc->usg_calls++;
1059#endif
1060
1061#ifdef ASSERT_PDIR_SANITY
1062 spin_lock_irqsave(&ioc->res_lock, flags);
1063 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1064 spin_unlock_irqrestore(&ioc->res_lock, flags);
1065#endif
1066
1067 while (sg_dma_len(sglist) && nents--) {
1068
1069 sba_unmap_page(dev, sg_dma_address(sglist), sg_dma_len(sglist),
1070 direction, 0);
1071#ifdef SBA_COLLECT_STATS
1072 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
1073 ioc->usingle_calls--; /* kluge since call is unmap_sg() */
1074#endif
1075 ++sglist;
1076 }
1077
1078 DBG_RUN_SG("%s() DONE (nents %d)\n", __func__, nents);
1079
1080#ifdef ASSERT_PDIR_SANITY
1081 spin_lock_irqsave(&ioc->res_lock, flags);
1082 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1083 spin_unlock_irqrestore(&ioc->res_lock, flags);
1084#endif
1085
1086}
1087
1088static int sba_mapping_error(struct device *dev, dma_addr_t dma_addr)
1089{
1090 return dma_addr == SBA_MAPPING_ERROR;
1091}
1092
1093static const struct dma_map_ops sba_ops = {
1094 .dma_supported = sba_dma_supported,
1095 .alloc = sba_alloc,
1096 .free = sba_free,
1097 .map_page = sba_map_page,
1098 .unmap_page = sba_unmap_page,
1099 .map_sg = sba_map_sg,
1100 .unmap_sg = sba_unmap_sg,
1101 .mapping_error = sba_mapping_error,
1102};
1103
1104
1105/**************************************************************************
1106**
1107** SBA PAT PDC support
1108**
1109** o call pdc_pat_cell_module()
1110** o store ranges in PCI "resource" structures
1111**
1112**************************************************************************/
1113
1114static void
1115sba_get_pat_resources(struct sba_device *sba_dev)
1116{
1117#if 0
1118/*
1119** TODO/REVISIT/FIXME: support for directed ranges requires calls to
1120** PAT PDC to program the SBA/LBA directed range registers...this
1121** burden may fall on the LBA code since it directly supports the
1122** PCI subsystem. It's not clear yet. - ggg
1123*/
1124PAT_MOD(mod)->mod_info.mod_pages = PAT_GET_MOD_PAGES(temp);
1125 FIXME : ???
1126PAT_MOD(mod)->mod_info.dvi = PAT_GET_DVI(temp);
1127 Tells where the dvi bits are located in the address.
1128PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp);
1129 FIXME : ???
1130#endif
1131}
1132
1133
1134/**************************************************************
1135*
1136* Initialization and claim
1137*
1138***************************************************************/
1139#define PIRANHA_ADDR_MASK 0x00160000UL /* bit 17,18,20 */
1140#define PIRANHA_ADDR_VAL 0x00060000UL /* bit 17,18 on */
1141static void *
1142sba_alloc_pdir(unsigned int pdir_size)
1143{
1144 unsigned long pdir_base;
1145 unsigned long pdir_order = get_order(pdir_size);
1146
1147 pdir_base = __get_free_pages(GFP_KERNEL, pdir_order);
1148 if (NULL == (void *) pdir_base) {
1149 panic("%s() could not allocate I/O Page Table\n",
1150 __func__);
1151 }
1152
1153 /* If this is not PA8700 (PCX-W2)
1154 ** OR newer than ver 2.2
1155 ** OR in a system that doesn't need VINDEX bits from SBA,
1156 **
1157 ** then we aren't exposed to the HW bug.
1158 */
1159 if ( ((boot_cpu_data.pdc.cpuid >> 5) & 0x7f) != 0x13
1160 || (boot_cpu_data.pdc.versions > 0x202)
1161 || (boot_cpu_data.pdc.capabilities & 0x08L) )
1162 return (void *) pdir_base;
1163
1164 /*
1165 * PA8700 (PCX-W2, aka piranha) silent data corruption fix
1166 *
1167 * An interaction between PA8700 CPU (Ver 2.2 or older) and
1168 * Ike/Astro can cause silent data corruption. This is only
1169 * a problem if the I/O PDIR is located in memory such that
1170 * (little-endian) bits 17 and 18 are on and bit 20 is off.
1171 *
1172 * Since the max IO Pdir size is 2MB, by cleverly allocating the
1173 * right physical address, we can either avoid (IOPDIR <= 1MB)
1174 * or minimize (2MB IO Pdir) the problem if we restrict the
1175 * IO Pdir to a maximum size of 2MB-128K (1902K).
1176 *
1177 * Because we always allocate 2^N sized IO pdirs, either of the
1178 * "bad" regions will be the last 128K if at all. That's easy
1179 * to test for.
1180 *
1181 */
1182 if (pdir_order <= (19-12)) {
1183 if (((virt_to_phys(pdir_base)+pdir_size-1) & PIRANHA_ADDR_MASK) == PIRANHA_ADDR_VAL) {
1184 /* allocate a new one on 512k alignment */
1185 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, (19-12));
1186 /* release original */
1187 free_pages(pdir_base, pdir_order);
1188
1189 pdir_base = new_pdir;
1190
1191 /* release excess */
1192 while (pdir_order < (19-12)) {
1193 new_pdir += pdir_size;
1194 free_pages(new_pdir, pdir_order);
1195 pdir_order +=1;
1196 pdir_size <<=1;
1197 }
1198 }
1199 } else {
1200 /*
1201 ** 1MB or 2MB Pdir
1202 ** Needs to be aligned on an "odd" 1MB boundary.
1203 */
1204 unsigned long new_pdir = __get_free_pages(GFP_KERNEL, pdir_order+1); /* 2 or 4MB */
1205
1206 /* release original */
1207 free_pages( pdir_base, pdir_order);
1208
1209 /* release first 1MB */
1210 free_pages(new_pdir, 20-12);
1211
1212 pdir_base = new_pdir + 1024*1024;
1213
1214 if (pdir_order > (20-12)) {
1215 /*
1216 ** 2MB Pdir.
1217 **
1218 ** Flag tells init_bitmap() to mark bad 128k as used
1219 ** and to reduce the size by 128k.
1220 */
1221 piranha_bad_128k = 1;
1222
1223 new_pdir += 3*1024*1024;
1224 /* release last 1MB */
1225 free_pages(new_pdir, 20-12);
1226
1227 /* release unusable 128KB */
1228 free_pages(new_pdir - 128*1024 , 17-12);
1229
1230 pdir_size -= 128*1024;
1231 }
1232 }
1233
1234 memset((void *) pdir_base, 0, pdir_size);
1235 return (void *) pdir_base;
1236}
1237
1238struct ibase_data_struct {
1239 struct ioc *ioc;
1240 int ioc_num;
1241};
1242
1243static int setup_ibase_imask_callback(struct device *dev, void *data)
1244{
1245 /* lba_set_iregs() is in drivers/parisc/lba_pci.c */
1246 extern void lba_set_iregs(struct parisc_device *, u32, u32);
1247 struct parisc_device *lba = to_parisc_device(dev);
1248 struct ibase_data_struct *ibd = data;
1249 int rope_num = (lba->hpa.start >> 13) & 0xf;
1250 if (rope_num >> 3 == ibd->ioc_num)
1251 lba_set_iregs(lba, ibd->ioc->ibase, ibd->ioc->imask);
1252 return 0;
1253}
1254
1255/* setup Mercury or Elroy IBASE/IMASK registers. */
1256static void
1257setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1258{
1259 struct ibase_data_struct ibase_data = {
1260 .ioc = ioc,
1261 .ioc_num = ioc_num,
1262 };
1263
1264 device_for_each_child(&sba->dev, &ibase_data,
1265 setup_ibase_imask_callback);
1266}
1267
1268#ifdef SBA_AGP_SUPPORT
1269static int
1270sba_ioc_find_quicksilver(struct device *dev, void *data)
1271{
1272 int *agp_found = data;
1273 struct parisc_device *lba = to_parisc_device(dev);
1274
1275 if (IS_QUICKSILVER(lba))
1276 *agp_found = 1;
1277 return 0;
1278}
1279#endif
1280
1281static void
1282sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1283{
1284 u32 iova_space_mask;
1285 u32 iova_space_size;
1286 int iov_order, tcnfg;
1287#ifdef SBA_AGP_SUPPORT
1288 int agp_found = 0;
1289#endif
1290 /*
1291 ** Firmware programs the base and size of a "safe IOVA space"
1292 ** (one that doesn't overlap memory or LMMIO space) in the
1293 ** IBASE and IMASK registers.
1294 */
1295 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
1296 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
1297
1298 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
1299 printk("WARNING: IOV space overlaps local config and interrupt message, truncating\n");
1300 iova_space_size /= 2;
1301 }
1302
1303 /*
1304 ** iov_order is always based on a 1GB IOVA space since we want to
1305 ** turn on the other half for AGP GART.
1306 */
1307 iov_order = get_order(iova_space_size >> (IOVP_SHIFT - PAGE_SHIFT));
1308 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1309
1310 DBG_INIT("%s() hpa 0x%p IOV %dMB (%d bits)\n",
1311 __func__, ioc->ioc_hpa, iova_space_size >> 20,
1312 iov_order + PAGE_SHIFT);
1313
1314 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1315 get_order(ioc->pdir_size));
1316 if (!ioc->pdir_base)
1317 panic("Couldn't allocate I/O Page Table\n");
1318
1319 memset(ioc->pdir_base, 0, ioc->pdir_size);
1320
1321 DBG_INIT("%s() pdir %p size %x\n",
1322 __func__, ioc->pdir_base, ioc->pdir_size);
1323
1324#ifdef SBA_HINT_SUPPORT
1325 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1326 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1327
1328 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1329 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1330#endif
1331
1332 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base);
1333 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1334
1335 /* build IMASK for IOC and Elroy */
1336 iova_space_mask = 0xffffffff;
1337 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1338 ioc->imask = iova_space_mask;
1339#ifdef ZX1_SUPPORT
1340 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1341#endif
1342 sba_dump_tlb(ioc->ioc_hpa);
1343
1344 setup_ibase_imask(sba, ioc, ioc_num);
1345
1346 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
1347
1348#ifdef CONFIG_64BIT
1349 /*
1350 ** Setting the upper bits makes checking for bypass addresses
1351 ** a little faster later on.
1352 */
1353 ioc->imask |= 0xFFFFFFFF00000000UL;
1354#endif
1355
1356 /* Set I/O PDIR Page size to system page size */
1357 switch (PAGE_SHIFT) {
1358 case 12: tcnfg = 0; break; /* 4K */
1359 case 13: tcnfg = 1; break; /* 8K */
1360 case 14: tcnfg = 2; break; /* 16K */
1361 case 16: tcnfg = 3; break; /* 64K */
1362 default:
1363 panic(__FILE__ "Unsupported system page size %d",
1364 1 << PAGE_SHIFT);
1365 break;
1366 }
1367 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1368
1369 /*
1370 ** Program the IOC's ibase and enable IOVA translation
1371 ** Bit zero == enable bit.
1372 */
1373 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1374
1375 /*
1376 ** Clear I/O TLB of any possible entries.
1377 ** (Yes. This is a bit paranoid...but so what)
1378 */
1379 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
1380
1381#ifdef SBA_AGP_SUPPORT
1382
1383 /*
1384 ** If an AGP device is present, only use half of the IOV space
1385 ** for PCI DMA. Unfortunately we can't know ahead of time
1386 ** whether GART support will actually be used, for now we
1387 ** can just key on any AGP device found in the system.
1388 ** We program the next pdir index after we stop w/ a key for
1389 ** the GART code to handshake on.
1390 */
1391 device_for_each_child(&sba->dev, &agp_found, sba_ioc_find_quicksilver);
1392
1393 if (agp_found && sba_reserve_agpgart) {
1394 printk(KERN_INFO "%s: reserving %dMb of IOVA space for agpgart\n",
1395 __func__, (iova_space_size/2) >> 20);
1396 ioc->pdir_size /= 2;
1397 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE;
1398 }
1399#endif /*SBA_AGP_SUPPORT*/
1400}
1401
1402static void
1403sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1404{
1405 u32 iova_space_size, iova_space_mask;
1406 unsigned int pdir_size, iov_order, tcnfg;
1407
1408 /*
1409 ** Determine IOVA Space size from memory size.
1410 **
1411 ** Ideally, PCI drivers would register the maximum number
1412 ** of DMA they can have outstanding for each device they
1413 ** own. Next best thing would be to guess how much DMA
1414 ** can be outstanding based on PCI Class/sub-class. Both
1415 ** methods still require some "extra" to support PCI
1416 ** Hot-Plug/Removal of PCI cards. (aka PCI OLARD).
1417 **
1418 ** While we have 32-bits "IOVA" space, top two 2 bits are used
1419 ** for DMA hints - ergo only 30 bits max.
1420 */
1421
1422 iova_space_size = (u32) (totalram_pages/global_ioc_cnt);
1423
1424 /* limit IOVA space size to 1MB-1GB */
1425 if (iova_space_size < (1 << (20 - PAGE_SHIFT))) {
1426 iova_space_size = 1 << (20 - PAGE_SHIFT);
1427 }
1428 else if (iova_space_size > (1 << (30 - PAGE_SHIFT))) {
1429 iova_space_size = 1 << (30 - PAGE_SHIFT);
1430 }
1431
1432 /*
1433 ** iova space must be log2() in size.
1434 ** thus, pdir/res_map will also be log2().
1435 ** PIRANHA BUG: Exception is when IO Pdir is 2MB (gets reduced)
1436 */
1437 iov_order = get_order(iova_space_size << PAGE_SHIFT);
1438
1439 /* iova_space_size is now bytes, not pages */
1440 iova_space_size = 1 << (iov_order + PAGE_SHIFT);
1441
1442 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
1443
1444 DBG_INIT("%s() hpa 0x%lx mem %ldMB IOV %dMB (%d bits)\n",
1445 __func__,
1446 ioc->ioc_hpa,
1447 (unsigned long) totalram_pages >> (20 - PAGE_SHIFT),
1448 iova_space_size>>20,
1449 iov_order + PAGE_SHIFT);
1450
1451 ioc->pdir_base = sba_alloc_pdir(pdir_size);
1452
1453 DBG_INIT("%s() pdir %p size %x\n",
1454 __func__, ioc->pdir_base, pdir_size);
1455
1456#ifdef SBA_HINT_SUPPORT
1457 /* FIXME : DMA HINTs not used */
1458 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1459 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1460
1461 DBG_INIT(" hint_shift_pdir %x hint_mask_pdir %lx\n",
1462 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1463#endif
1464
1465 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1466
1467 /* build IMASK for IOC and Elroy */
1468 iova_space_mask = 0xffffffff;
1469 iova_space_mask <<= (iov_order + PAGE_SHIFT);
1470
1471 /*
1472 ** On C3000 w/512MB mem, HP-UX 10.20 reports:
1473 ** ibase=0, imask=0xFE000000, size=0x2000000.
1474 */
1475 ioc->ibase = 0;
1476 ioc->imask = iova_space_mask; /* save it */
1477#ifdef ZX1_SUPPORT
1478 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1479#endif
1480
1481 DBG_INIT("%s() IOV base 0x%lx mask 0x%0lx\n",
1482 __func__, ioc->ibase, ioc->imask);
1483
1484 /*
1485 ** FIXME: Hint registers are programmed with default hint
1486 ** values during boot, so hints should be sane even if we
1487 ** can't reprogram them the way drivers want.
1488 */
1489
1490 setup_ibase_imask(sba, ioc, ioc_num);
1491
1492 /*
1493 ** Program the IOC's ibase and enable IOVA translation
1494 */
1495 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
1496 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
1497
1498 /* Set I/O PDIR Page size to system page size */
1499 switch (PAGE_SHIFT) {
1500 case 12: tcnfg = 0; break; /* 4K */
1501 case 13: tcnfg = 1; break; /* 8K */
1502 case 14: tcnfg = 2; break; /* 16K */
1503 case 16: tcnfg = 3; break; /* 64K */
1504 default:
1505 panic(__FILE__ "Unsupported system page size %d",
1506 1 << PAGE_SHIFT);
1507 break;
1508 }
1509 /* Set I/O PDIR Page size to PAGE_SIZE (4k/16k/...) */
1510 WRITE_REG(tcnfg, ioc->ioc_hpa+IOC_TCNFG);
1511
1512 /*
1513 ** Clear I/O TLB of any possible entries.
1514 ** (Yes. This is a bit paranoid...but so what)
1515 */
1516 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
1517
1518 ioc->ibase = 0; /* used by SBA_IOVA and related macros */
1519
1520 DBG_INIT("%s() DONE\n", __func__);
1521}
1522
1523
1524
1525/**************************************************************************
1526**
1527** SBA initialization code (HW and SW)
1528**
1529** o identify SBA chip itself
1530** o initialize SBA chip modes (HardFail)
1531** o initialize SBA chip modes (HardFail)
1532** o FIXME: initialize DMA hints for reasonable defaults
1533**
1534**************************************************************************/
1535
1536static void __iomem *ioc_remap(struct sba_device *sba_dev, unsigned int offset)
1537{
1538 return ioremap_nocache(sba_dev->dev->hpa.start + offset, SBA_FUNC_SIZE);
1539}
1540
1541static void sba_hw_init(struct sba_device *sba_dev)
1542{
1543 int i;
1544 int num_ioc;
1545 u64 ioc_ctl;
1546
1547 if (!is_pdc_pat()) {
1548 /* Shutdown the USB controller on Astro-based workstations.
1549 ** Once we reprogram the IOMMU, the next DMA performed by
1550 ** USB will HPMC the box. USB is only enabled if a
1551 ** keyboard is present and found.
1552 **
1553 ** With serial console, j6k v5.0 firmware says:
1554 ** mem_kbd hpa 0xfee003f8 sba 0x0 pad 0x0 cl_class 0x7
1555 **
1556 ** FIXME: Using GFX+USB console at power up but direct
1557 ** linux to serial console is still broken.
1558 ** USB could generate DMA so we must reset USB.
1559 ** The proper sequence would be:
1560 ** o block console output
1561 ** o reset USB device
1562 ** o reprogram serial port
1563 ** o unblock console output
1564 */
1565 if (PAGE0->mem_kbd.cl_class == CL_KEYBD) {
1566 pdc_io_reset_devices();
1567 }
1568
1569 }
1570
1571
1572#if 0
1573printk("sba_hw_init(): mem_boot 0x%x 0x%x 0x%x 0x%x\n", PAGE0->mem_boot.hpa,
1574 PAGE0->mem_boot.spa, PAGE0->mem_boot.pad, PAGE0->mem_boot.cl_class);
1575
1576 /*
1577 ** Need to deal with DMA from LAN.
1578 ** Maybe use page zero boot device as a handle to talk
1579 ** to PDC about which device to shutdown.
1580 **
1581 ** Netbooting, j6k v5.0 firmware says:
1582 ** mem_boot hpa 0xf4008000 sba 0x0 pad 0x0 cl_class 0x1002
1583 ** ARGH! invalid class.
1584 */
1585 if ((PAGE0->mem_boot.cl_class != CL_RANDOM)
1586 && (PAGE0->mem_boot.cl_class != CL_SEQU)) {
1587 pdc_io_reset();
1588 }
1589#endif
1590
1591 if (!IS_PLUTO(sba_dev->dev)) {
1592 ioc_ctl = READ_REG(sba_dev->sba_hpa+IOC_CTRL);
1593 DBG_INIT("%s() hpa 0x%lx ioc_ctl 0x%Lx ->",
1594 __func__, sba_dev->sba_hpa, ioc_ctl);
1595 ioc_ctl &= ~(IOC_CTRL_RM | IOC_CTRL_NC | IOC_CTRL_CE);
1596 ioc_ctl |= IOC_CTRL_DD | IOC_CTRL_D4 | IOC_CTRL_TC;
1597 /* j6700 v1.6 firmware sets 0x294f */
1598 /* A500 firmware sets 0x4d */
1599
1600 WRITE_REG(ioc_ctl, sba_dev->sba_hpa+IOC_CTRL);
1601
1602#ifdef DEBUG_SBA_INIT
1603 ioc_ctl = READ_REG64(sba_dev->sba_hpa+IOC_CTRL);
1604 DBG_INIT(" 0x%Lx\n", ioc_ctl);
1605#endif
1606 } /* if !PLUTO */
1607
1608 if (IS_ASTRO(sba_dev->dev)) {
1609 int err;
1610 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET);
1611 num_ioc = 1;
1612
1613 sba_dev->chip_resv.name = "Astro Intr Ack";
1614 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfef00000UL;
1615 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff000000UL - 1) ;
1616 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1617 BUG_ON(err < 0);
1618
1619 } else if (IS_PLUTO(sba_dev->dev)) {
1620 int err;
1621
1622 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET);
1623 num_ioc = 1;
1624
1625 sba_dev->chip_resv.name = "Pluto Intr/PIOP/VGA";
1626 sba_dev->chip_resv.start = PCI_F_EXTEND | 0xfee00000UL;
1627 sba_dev->chip_resv.end = PCI_F_EXTEND | (0xff200000UL - 1);
1628 err = request_resource(&iomem_resource, &(sba_dev->chip_resv));
1629 WARN_ON(err < 0);
1630
1631 sba_dev->iommu_resv.name = "IOVA Space";
1632 sba_dev->iommu_resv.start = 0x40000000UL;
1633 sba_dev->iommu_resv.end = 0x50000000UL - 1;
1634 err = request_resource(&iomem_resource, &(sba_dev->iommu_resv));
1635 WARN_ON(err < 0);
1636 } else {
1637 /* IKE, REO */
1638 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0));
1639 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1));
1640 num_ioc = 2;
1641
1642 /* TODO - LOOKUP Ike/Stretch chipset mem map */
1643 }
1644 /* XXX: What about Reo Grande? */
1645
1646 sba_dev->num_ioc = num_ioc;
1647 for (i = 0; i < num_ioc; i++) {
1648 void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa;
1649 unsigned int j;
1650
1651 for (j=0; j < sizeof(u64) * ROPES_PER_IOC; j+=sizeof(u64)) {
1652
1653 /*
1654 * Clear ROPE(N)_CONFIG AO bit.
1655 * Disables "NT Ordering" (~= !"Relaxed Ordering")
1656 * Overrides bit 1 in DMA Hint Sets.
1657 * Improves netperf UDP_STREAM by ~10% for bcm5701.
1658 */
1659 if (IS_PLUTO(sba_dev->dev)) {
1660 void __iomem *rope_cfg;
1661 unsigned long cfg_val;
1662
1663 rope_cfg = ioc_hpa + IOC_ROPE0_CFG + j;
1664 cfg_val = READ_REG(rope_cfg);
1665 cfg_val &= ~IOC_ROPE_AO;
1666 WRITE_REG(cfg_val, rope_cfg);
1667 }
1668
1669 /*
1670 ** Make sure the box crashes on rope errors.
1671 */
1672 WRITE_REG(HF_ENABLE, ioc_hpa + ROPE0_CTL + j);
1673 }
1674
1675 /* flush out the last writes */
1676 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
1677
1678 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
1679 i,
1680 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
1681 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
1682 );
1683 DBG_INIT(" STATUS_CONTROL 0x%Lx FLUSH_CTRL 0x%Lx\n",
1684 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
1685 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
1686 );
1687
1688 if (IS_PLUTO(sba_dev->dev)) {
1689 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
1690 } else {
1691 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
1692 }
1693 }
1694}
1695
1696static void
1697sba_common_init(struct sba_device *sba_dev)
1698{
1699 int i;
1700
1701 /* add this one to the head of the list (order doesn't matter)
1702 ** This will be useful for debugging - especially if we get coredumps
1703 */
1704 sba_dev->next = sba_list;
1705 sba_list = sba_dev;
1706
1707 for(i=0; i< sba_dev->num_ioc; i++) {
1708 int res_size;
1709#ifdef DEBUG_DMB_TRAP
1710 extern void iterate_pages(unsigned long , unsigned long ,
1711 void (*)(pte_t * , unsigned long),
1712 unsigned long );
1713 void set_data_memory_break(pte_t * , unsigned long);
1714#endif
1715 /* resource map size dictated by pdir_size */
1716 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */
1717
1718 /* Second part of PIRANHA BUG */
1719 if (piranha_bad_128k) {
1720 res_size -= (128*1024)/sizeof(u64);
1721 }
1722
1723 res_size >>= 3; /* convert bit count to byte count */
1724 DBG_INIT("%s() res_size 0x%x\n",
1725 __func__, res_size);
1726
1727 sba_dev->ioc[i].res_size = res_size;
1728 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
1729
1730#ifdef DEBUG_DMB_TRAP
1731 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1732 set_data_memory_break, 0);
1733#endif
1734
1735 if (NULL == sba_dev->ioc[i].res_map)
1736 {
1737 panic("%s:%s() could not allocate resource map\n",
1738 __FILE__, __func__ );
1739 }
1740
1741 memset(sba_dev->ioc[i].res_map, 0, res_size);
1742 /* next available IOVP - circular search */
1743 sba_dev->ioc[i].res_hint = (unsigned long *)
1744 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);
1745
1746#ifdef ASSERT_PDIR_SANITY
1747 /* Mark first bit busy - ie no IOVA 0 */
1748 sba_dev->ioc[i].res_map[0] = 0x80;
1749 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
1750#endif
1751
1752 /* Third (and last) part of PIRANHA BUG */
1753 if (piranha_bad_128k) {
1754 /* region from +1408K to +1536 is un-usable. */
1755
1756 int idx_start = (1408*1024/sizeof(u64)) >> 3;
1757 int idx_end = (1536*1024/sizeof(u64)) >> 3;
1758 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]);
1759 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]);
1760
1761 /* mark that part of the io pdir busy */
1762 while (p_start < p_end)
1763 *p_start++ = -1;
1764
1765 }
1766
1767#ifdef DEBUG_DMB_TRAP
1768 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1769 set_data_memory_break, 0);
1770 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size,
1771 set_data_memory_break, 0);
1772#endif
1773
1774 DBG_INIT("%s() %d res_map %x %p\n",
1775 __func__, i, res_size, sba_dev->ioc[i].res_map);
1776 }
1777
1778 spin_lock_init(&sba_dev->sba_lock);
1779 ioc_needs_fdc = boot_cpu_data.pdc.capabilities & PDC_MODEL_IOPDIR_FDC;
1780
1781#ifdef DEBUG_SBA_INIT
1782 /*
1783 * If the PDC_MODEL capabilities has Non-coherent IO-PDIR bit set
1784 * (bit #61, big endian), we have to flush and sync every time
1785 * IO-PDIR is changed in Ike/Astro.
1786 */
1787 if (ioc_needs_fdc) {
1788 printk(KERN_INFO MODULE_NAME " FDC/SYNC required.\n");
1789 } else {
1790 printk(KERN_INFO MODULE_NAME " IOC has cache coherent PDIR.\n");
1791 }
1792#endif
1793}
1794
1795#ifdef CONFIG_PROC_FS
1796static int sba_proc_info(struct seq_file *m, void *p)
1797{
1798 struct sba_device *sba_dev = sba_list;
1799 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
1800 int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */
1801#ifdef SBA_COLLECT_STATS
1802 unsigned long avg = 0, min, max;
1803#endif
1804 int i;
1805
1806 seq_printf(m, "%s rev %d.%d\n",
1807 sba_dev->name,
1808 (sba_dev->hw_rev & 0x7) + 1,
1809 (sba_dev->hw_rev & 0x18) >> 3);
1810 seq_printf(m, "IO PDIR size : %d bytes (%d entries)\n",
1811 (int)((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */
1812 total_pages);
1813
1814 seq_printf(m, "Resource bitmap : %d bytes (%d pages)\n",
1815 ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */
1816
1817 seq_printf(m, "LMMIO_BASE/MASK/ROUTE %08x %08x %08x\n",
1818 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_BASE),
1819 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_MASK),
1820 READ_REG32(sba_dev->sba_hpa + LMMIO_DIST_ROUTE));
1821
1822 for (i=0; i<4; i++)
1823 seq_printf(m, "DIR%d_BASE/MASK/ROUTE %08x %08x %08x\n",
1824 i,
1825 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_BASE + i*0x18),
1826 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_MASK + i*0x18),
1827 READ_REG32(sba_dev->sba_hpa + LMMIO_DIRECT0_ROUTE + i*0x18));
1828
1829#ifdef SBA_COLLECT_STATS
1830 seq_printf(m, "IO PDIR entries : %ld free %ld used (%d%%)\n",
1831 total_pages - ioc->used_pages, ioc->used_pages,
1832 (int)(ioc->used_pages * 100 / total_pages));
1833
1834 min = max = ioc->avg_search[0];
1835 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1836 avg += ioc->avg_search[i];
1837 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1838 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1839 }
1840 avg /= SBA_SEARCH_SAMPLE;
1841 seq_printf(m, " Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles)\n",
1842 min, avg, max);
1843
1844 seq_printf(m, "pci_map_single(): %12ld calls %12ld pages (avg %d/1000)\n",
1845 ioc->msingle_calls, ioc->msingle_pages,
1846 (int)((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1847
1848 /* KLUGE - unmap_sg calls unmap_single for each mapped page */
1849 min = ioc->usingle_calls;
1850 max = ioc->usingle_pages - ioc->usg_pages;
1851 seq_printf(m, "pci_unmap_single: %12ld calls %12ld pages (avg %d/1000)\n",
1852 min, max, (int)((max * 1000)/min));
1853
1854 seq_printf(m, "pci_map_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1855 ioc->msg_calls, ioc->msg_pages,
1856 (int)((ioc->msg_pages * 1000)/ioc->msg_calls));
1857
1858 seq_printf(m, "pci_unmap_sg() : %12ld calls %12ld pages (avg %d/1000)\n",
1859 ioc->usg_calls, ioc->usg_pages,
1860 (int)((ioc->usg_pages * 1000)/ioc->usg_calls));
1861#endif
1862
1863 return 0;
1864}
1865
1866static int
1867sba_proc_open(struct inode *i, struct file *f)
1868{
1869 return single_open(f, &sba_proc_info, NULL);
1870}
1871
1872static const struct file_operations sba_proc_fops = {
1873 .owner = THIS_MODULE,
1874 .open = sba_proc_open,
1875 .read = seq_read,
1876 .llseek = seq_lseek,
1877 .release = single_release,
1878};
1879
1880static int
1881sba_proc_bitmap_info(struct seq_file *m, void *p)
1882{
1883 struct sba_device *sba_dev = sba_list;
1884 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
1885
1886 seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map,
1887 ioc->res_size, false);
1888 seq_putc(m, '\n');
1889
1890 return 0;
1891}
1892
1893static int
1894sba_proc_bitmap_open(struct inode *i, struct file *f)
1895{
1896 return single_open(f, &sba_proc_bitmap_info, NULL);
1897}
1898
1899static const struct file_operations sba_proc_bitmap_fops = {
1900 .owner = THIS_MODULE,
1901 .open = sba_proc_bitmap_open,
1902 .read = seq_read,
1903 .llseek = seq_lseek,
1904 .release = single_release,
1905};
1906#endif /* CONFIG_PROC_FS */
1907
1908static const struct parisc_device_id sba_tbl[] __initconst = {
1909 { HPHW_IOA, HVERSION_REV_ANY_ID, ASTRO_RUNWAY_PORT, 0xb },
1910 { HPHW_BCPORT, HVERSION_REV_ANY_ID, IKE_MERCED_PORT, 0xc },
1911 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REO_MERCED_PORT, 0xc },
1912 { HPHW_BCPORT, HVERSION_REV_ANY_ID, REOG_MERCED_PORT, 0xc },
1913 { HPHW_IOA, HVERSION_REV_ANY_ID, PLUTO_MCKINLEY_PORT, 0xc },
1914 { 0, }
1915};
1916
1917static int sba_driver_callback(struct parisc_device *);
1918
1919static struct parisc_driver sba_driver __refdata = {
1920 .name = MODULE_NAME,
1921 .id_table = sba_tbl,
1922 .probe = sba_driver_callback,
1923};
1924
1925/*
1926** Determine if sba should claim this chip (return 0) or not (return 1).
1927** If so, initialize the chip and tell other partners in crime they
1928** have work to do.
1929*/
1930static int __init sba_driver_callback(struct parisc_device *dev)
1931{
1932 struct sba_device *sba_dev;
1933 u32 func_class;
1934 int i;
1935 char *version;
1936 void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE);
1937#ifdef CONFIG_PROC_FS
1938 struct proc_dir_entry *root;
1939#endif
1940
1941 sba_dump_ranges(sba_addr);
1942
1943 /* Read HW Rev First */
1944 func_class = READ_REG(sba_addr + SBA_FCLASS);
1945
1946 if (IS_ASTRO(dev)) {
1947 unsigned long fclass;
1948 static char astro_rev[]="Astro ?.?";
1949
1950 /* Astro is broken...Read HW Rev First */
1951 fclass = READ_REG(sba_addr);
1952
1953 astro_rev[6] = '1' + (char) (fclass & 0x7);
1954 astro_rev[8] = '0' + (char) ((fclass & 0x18) >> 3);
1955 version = astro_rev;
1956
1957 } else if (IS_IKE(dev)) {
1958 static char ike_rev[] = "Ike rev ?";
1959 ike_rev[8] = '0' + (char) (func_class & 0xff);
1960 version = ike_rev;
1961 } else if (IS_PLUTO(dev)) {
1962 static char pluto_rev[]="Pluto ?.?";
1963 pluto_rev[6] = '0' + (char) ((func_class & 0xf0) >> 4);
1964 pluto_rev[8] = '0' + (char) (func_class & 0x0f);
1965 version = pluto_rev;
1966 } else {
1967 static char reo_rev[] = "REO rev ?";
1968 reo_rev[8] = '0' + (char) (func_class & 0xff);
1969 version = reo_rev;
1970 }
1971
1972 if (!global_ioc_cnt) {
1973 global_ioc_cnt = count_parisc_driver(&sba_driver);
1974
1975 /* Astro and Pluto have one IOC per SBA */
1976 if ((!IS_ASTRO(dev)) || (!IS_PLUTO(dev)))
1977 global_ioc_cnt *= 2;
1978 }
1979
1980 printk(KERN_INFO "%s found %s at 0x%llx\n",
1981 MODULE_NAME, version, (unsigned long long)dev->hpa.start);
1982
1983 sba_dev = kzalloc(sizeof(struct sba_device), GFP_KERNEL);
1984 if (!sba_dev) {
1985 printk(KERN_ERR MODULE_NAME " - couldn't alloc sba_device\n");
1986 return -ENOMEM;
1987 }
1988
1989 parisc_set_drvdata(dev, sba_dev);
1990
1991 for(i=0; i<MAX_IOC; i++)
1992 spin_lock_init(&(sba_dev->ioc[i].res_lock));
1993
1994 sba_dev->dev = dev;
1995 sba_dev->hw_rev = func_class;
1996 sba_dev->name = dev->name;
1997 sba_dev->sba_hpa = sba_addr;
1998
1999 sba_get_pat_resources(sba_dev);
2000 sba_hw_init(sba_dev);
2001 sba_common_init(sba_dev);
2002
2003 hppa_dma_ops = &sba_ops;
2004
2005#ifdef CONFIG_PROC_FS
2006 switch (dev->id.hversion) {
2007 case PLUTO_MCKINLEY_PORT:
2008 root = proc_mckinley_root;
2009 break;
2010 case ASTRO_RUNWAY_PORT:
2011 case IKE_MERCED_PORT:
2012 default:
2013 root = proc_runway_root;
2014 break;
2015 }
2016
2017 proc_create("sba_iommu", 0, root, &sba_proc_fops);
2018 proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops);
2019#endif
2020
2021 parisc_has_iommu();
2022 return 0;
2023}
2024
2025/*
2026** One time initialization to let the world know the SBA was found.
2027** This is the only routine which is NOT static.
2028** Must be called exactly once before pci_init().
2029*/
2030void __init sba_init(void)
2031{
2032 register_parisc_driver(&sba_driver);
2033}
2034
2035
2036/**
2037 * sba_get_iommu - Assign the iommu pointer for the pci bus controller.
2038 * @dev: The parisc device.
2039 *
2040 * Returns the appropriate IOMMU data for the given parisc PCI controller.
2041 * This is cached and used later for PCI DMA Mapping.
2042 */
2043void * sba_get_iommu(struct parisc_device *pci_hba)
2044{
2045 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2046 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2047 char t = sba_dev->id.hw_type;
2048 int iocnum = (pci_hba->hw_path >> 3); /* rope # */
2049
2050 WARN_ON((t != HPHW_IOA) && (t != HPHW_BCPORT));
2051
2052 return &(sba->ioc[iocnum]);
2053}
2054
2055
2056/**
2057 * sba_directed_lmmio - return first directed LMMIO range routed to rope
2058 * @pa_dev: The parisc device.
2059 * @r: resource PCI host controller wants start/end fields assigned.
2060 *
2061 * For the given parisc PCI controller, determine if any direct ranges
2062 * are routed down the corresponding rope.
2063 */
2064void sba_directed_lmmio(struct parisc_device *pci_hba, struct resource *r)
2065{
2066 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2067 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2068 char t = sba_dev->id.hw_type;
2069 int i;
2070 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */
2071
2072 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2073
2074 r->start = r->end = 0;
2075
2076 /* Astro has 4 directed ranges. Not sure about Ike/Pluto/et al */
2077 for (i=0; i<4; i++) {
2078 int base, size;
2079 void __iomem *reg = sba->sba_hpa + i*0x18;
2080
2081 base = READ_REG32(reg + LMMIO_DIRECT0_BASE);
2082 if ((base & 1) == 0)
2083 continue; /* not enabled */
2084
2085 size = READ_REG32(reg + LMMIO_DIRECT0_ROUTE);
2086
2087 if ((size & (ROPES_PER_IOC-1)) != rope)
2088 continue; /* directed down different rope */
2089
2090 r->start = (base & ~1UL) | PCI_F_EXTEND;
2091 size = ~ READ_REG32(reg + LMMIO_DIRECT0_MASK);
2092 r->end = r->start + size;
2093 r->flags = IORESOURCE_MEM;
2094 }
2095}
2096
2097
2098/**
2099 * sba_distributed_lmmio - return portion of distributed LMMIO range
2100 * @pa_dev: The parisc device.
2101 * @r: resource PCI host controller wants start/end fields assigned.
2102 *
2103 * For the given parisc PCI controller, return portion of distributed LMMIO
2104 * range. The distributed LMMIO is always present and it's just a question
2105 * of the base address and size of the range.
2106 */
2107void sba_distributed_lmmio(struct parisc_device *pci_hba, struct resource *r )
2108{
2109 struct parisc_device *sba_dev = parisc_parent(pci_hba);
2110 struct sba_device *sba = dev_get_drvdata(&sba_dev->dev);
2111 char t = sba_dev->id.hw_type;
2112 int base, size;
2113 int rope = (pci_hba->hw_path & (ROPES_PER_IOC-1)); /* rope # */
2114
2115 BUG_ON((t!=HPHW_IOA) && (t!=HPHW_BCPORT));
2116
2117 r->start = r->end = 0;
2118
2119 base = READ_REG32(sba->sba_hpa + LMMIO_DIST_BASE);
2120 if ((base & 1) == 0) {
2121 BUG(); /* Gah! Distr Range wasn't enabled! */
2122 return;
2123 }
2124
2125 r->start = (base & ~1UL) | PCI_F_EXTEND;
2126
2127 size = (~READ_REG32(sba->sba_hpa + LMMIO_DIST_MASK)) / ROPES_PER_IOC;
2128 r->start += rope * (size + 1); /* adjust base for this rope */
2129 r->end = r->start + size;
2130 r->flags = IORESOURCE_MEM;
2131}