Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3** PARISC 1.1 Dynamic DMA mapping support.
  4** This implementation is for PA-RISC platforms that do not support
  5** I/O TLBs (aka DMA address translation hardware).
  6** See Documentation/DMA-API-HOWTO.txt for interface definitions.
  7**
  8**      (c) Copyright 1999,2000 Hewlett-Packard Company
  9**      (c) Copyright 2000 Grant Grundler
 10**	(c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
 11**      (c) Copyright 2000 John Marvin
 12**
 13** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
 14** (I assume it's from David Mosberger-Tang but there was no Copyright)
 15**
 16** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
 17**
 18** - ggg
 19*/
 20
 21#include <linux/init.h>
 22#include <linux/gfp.h>
 23#include <linux/mm.h>
 
 24#include <linux/proc_fs.h>
 25#include <linux/seq_file.h>
 26#include <linux/string.h>
 27#include <linux/types.h>
 28#include <linux/dma-direct.h>
 29#include <linux/dma-noncoherent.h>
 30
 31#include <asm/cacheflush.h>
 32#include <asm/dma.h>    /* for DMA_CHUNK_SIZE */
 33#include <asm/io.h>
 34#include <asm/page.h>	/* get_order */
 35#include <asm/pgalloc.h>
 36#include <linux/uaccess.h>
 37#include <asm/tlbflush.h>	/* for purge_tlb_*() macros */
 38
 39static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
 40static unsigned long pcxl_used_bytes __read_mostly = 0;
 41static unsigned long pcxl_used_pages __read_mostly = 0;
 42
 43extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
 44static DEFINE_SPINLOCK(pcxl_res_lock);
 45static char    *pcxl_res_map;
 46static int     pcxl_res_hint;
 47static int     pcxl_res_size;
 48
 49#ifdef DEBUG_PCXL_RESOURCE
 50#define DBG_RES(x...)	printk(x)
 51#else
 52#define DBG_RES(x...)
 53#endif
 54
 55
 56/*
 57** Dump a hex representation of the resource map.
 58*/
 59
 60#ifdef DUMP_RESMAP
 61static
 62void dump_resmap(void)
 63{
 64	u_long *res_ptr = (unsigned long *)pcxl_res_map;
 65	u_long i = 0;
 66
 67	printk("res_map: ");
 68	for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
 69		printk("%08lx ", *res_ptr);
 70
 71	printk("\n");
 72}
 73#else
 74static inline void dump_resmap(void) {;}
 75#endif
 76
 77static inline int map_pte_uncached(pte_t * pte,
 78		unsigned long vaddr,
 79		unsigned long size, unsigned long *paddr_ptr)
 80{
 81	unsigned long end;
 82	unsigned long orig_vaddr = vaddr;
 83
 84	vaddr &= ~PMD_MASK;
 85	end = vaddr + size;
 86	if (end > PMD_SIZE)
 87		end = PMD_SIZE;
 88	do {
 89		unsigned long flags;
 90
 91		if (!pte_none(*pte))
 92			printk(KERN_ERR "map_pte_uncached: page already exists\n");
 93		purge_tlb_start(flags);
 94		set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
 95		pdtlb_kernel(orig_vaddr);
 96		purge_tlb_end(flags);
 97		vaddr += PAGE_SIZE;
 98		orig_vaddr += PAGE_SIZE;
 99		(*paddr_ptr) += PAGE_SIZE;
100		pte++;
101	} while (vaddr < end);
102	return 0;
103}
104
105static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
106		unsigned long size, unsigned long *paddr_ptr)
107{
108	unsigned long end;
109	unsigned long orig_vaddr = vaddr;
110
111	vaddr &= ~PGDIR_MASK;
112	end = vaddr + size;
113	if (end > PGDIR_SIZE)
114		end = PGDIR_SIZE;
115	do {
116		pte_t * pte = pte_alloc_kernel(pmd, vaddr);
117		if (!pte)
118			return -ENOMEM;
119		if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
120			return -ENOMEM;
121		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
122		orig_vaddr += PMD_SIZE;
123		pmd++;
124	} while (vaddr < end);
125	return 0;
126}
127
128static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
129		unsigned long paddr)
130{
131	pgd_t * dir;
132	unsigned long end = vaddr + size;
133
134	dir = pgd_offset_k(vaddr);
135	do {
136		pmd_t *pmd;
137		
138		pmd = pmd_alloc(NULL, dir, vaddr);
139		if (!pmd)
140			return -ENOMEM;
141		if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
142			return -ENOMEM;
143		vaddr = vaddr + PGDIR_SIZE;
144		dir++;
145	} while (vaddr && (vaddr < end));
146	return 0;
147}
148
149static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
150		unsigned long size)
151{
152	pte_t * pte;
153	unsigned long end;
154	unsigned long orig_vaddr = vaddr;
155
156	if (pmd_none(*pmd))
157		return;
158	if (pmd_bad(*pmd)) {
159		pmd_ERROR(*pmd);
160		pmd_clear(pmd);
161		return;
162	}
163	pte = pte_offset_map(pmd, vaddr);
164	vaddr &= ~PMD_MASK;
165	end = vaddr + size;
166	if (end > PMD_SIZE)
167		end = PMD_SIZE;
168	do {
169		unsigned long flags;
170		pte_t page = *pte;
171
172		pte_clear(&init_mm, vaddr, pte);
173		purge_tlb_start(flags);
174		pdtlb_kernel(orig_vaddr);
175		purge_tlb_end(flags);
176		vaddr += PAGE_SIZE;
177		orig_vaddr += PAGE_SIZE;
178		pte++;
179		if (pte_none(page) || pte_present(page))
180			continue;
181		printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
182	} while (vaddr < end);
183}
184
185static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
186		unsigned long size)
187{
188	pmd_t * pmd;
189	unsigned long end;
190	unsigned long orig_vaddr = vaddr;
191
192	if (pgd_none(*dir))
193		return;
194	if (pgd_bad(*dir)) {
195		pgd_ERROR(*dir);
196		pgd_clear(dir);
197		return;
198	}
199	pmd = pmd_offset(dir, vaddr);
200	vaddr &= ~PGDIR_MASK;
201	end = vaddr + size;
202	if (end > PGDIR_SIZE)
203		end = PGDIR_SIZE;
204	do {
205		unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
206		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
207		orig_vaddr += PMD_SIZE;
208		pmd++;
209	} while (vaddr < end);
210}
211
212static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
213{
214	pgd_t * dir;
215	unsigned long end = vaddr + size;
216
217	dir = pgd_offset_k(vaddr);
218	do {
219		unmap_uncached_pmd(dir, vaddr, end - vaddr);
220		vaddr = vaddr + PGDIR_SIZE;
221		dir++;
222	} while (vaddr && (vaddr < end));
223}
224
225#define PCXL_SEARCH_LOOP(idx, mask, size)  \
226       for(; res_ptr < res_end; ++res_ptr) \
227       { \
228               if(0 == ((*res_ptr) & mask)) { \
229                       *res_ptr |= mask; \
230		       idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
231		       pcxl_res_hint = idx + (size >> 3); \
232                       goto resource_found; \
233               } \
234       }
235
236#define PCXL_FIND_FREE_MAPPING(idx, mask, size)  { \
237       u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
238       u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
239       PCXL_SEARCH_LOOP(idx, mask, size); \
240       res_ptr = (u##size *)&pcxl_res_map[0]; \
241       PCXL_SEARCH_LOOP(idx, mask, size); \
242}
243
244unsigned long
245pcxl_alloc_range(size_t size)
246{
247	int res_idx;
248	u_long mask, flags;
249	unsigned int pages_needed = size >> PAGE_SHIFT;
250
251	mask = (u_long) -1L;
252 	mask >>= BITS_PER_LONG - pages_needed;
253
254	DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n", 
255		size, pages_needed, mask);
256
257	spin_lock_irqsave(&pcxl_res_lock, flags);
258
259	if(pages_needed <= 8) {
260		PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
261	} else if(pages_needed <= 16) {
262		PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
263	} else if(pages_needed <= 32) {
264		PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
265	} else {
266		panic("%s: pcxl_alloc_range() Too many pages to map.\n",
267		      __FILE__);
268	}
269
270	dump_resmap();
271	panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
272	      __FILE__);
273	
274resource_found:
275	
276	DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
277		res_idx, mask, pcxl_res_hint);
278
279	pcxl_used_pages += pages_needed;
280	pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
281
282	spin_unlock_irqrestore(&pcxl_res_lock, flags);
283
284	dump_resmap();
285
286	/* 
287	** return the corresponding vaddr in the pcxl dma map
288	*/
289	return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
290}
291
292#define PCXL_FREE_MAPPINGS(idx, m, size) \
293		u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
294		/* BUG_ON((*res_ptr & m) != m); */ \
295		*res_ptr &= ~m;
296
297/*
298** clear bits in the pcxl resource map
299*/
300static void
301pcxl_free_range(unsigned long vaddr, size_t size)
302{
303	u_long mask, flags;
304	unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
305	unsigned int pages_mapped = size >> PAGE_SHIFT;
306
307	mask = (u_long) -1L;
308 	mask >>= BITS_PER_LONG - pages_mapped;
309
310	DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n", 
311		res_idx, size, pages_mapped, mask);
312
313	spin_lock_irqsave(&pcxl_res_lock, flags);
314
315	if(pages_mapped <= 8) {
316		PCXL_FREE_MAPPINGS(res_idx, mask, 8);
317	} else if(pages_mapped <= 16) {
318		PCXL_FREE_MAPPINGS(res_idx, mask, 16);
319	} else if(pages_mapped <= 32) {
320		PCXL_FREE_MAPPINGS(res_idx, mask, 32);
321	} else {
322		panic("%s: pcxl_free_range() Too many pages to unmap.\n",
323		      __FILE__);
324	}
325	
326	pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
327	pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
328
329	spin_unlock_irqrestore(&pcxl_res_lock, flags);
330
331	dump_resmap();
332}
333
334static int proc_pcxl_dma_show(struct seq_file *m, void *v)
335{
336#if 0
337	u_long i = 0;
338	unsigned long *res_ptr = (u_long *)pcxl_res_map;
339#endif
340	unsigned long total_pages = pcxl_res_size << 3;   /* 8 bits per byte */
341
342	seq_printf(m, "\nDMA Mapping Area size    : %d bytes (%ld pages)\n",
343		PCXL_DMA_MAP_SIZE, total_pages);
344
345	seq_printf(m, "Resource bitmap : %d bytes\n", pcxl_res_size);
346
347	seq_puts(m,  "     	  total:    free:    used:   % used:\n");
348	seq_printf(m, "blocks  %8d %8ld %8ld %8ld%%\n", pcxl_res_size,
349		pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
350		(pcxl_used_bytes * 100) / pcxl_res_size);
351
352	seq_printf(m, "pages   %8ld %8ld %8ld %8ld%%\n", total_pages,
353		total_pages - pcxl_used_pages, pcxl_used_pages,
354		(pcxl_used_pages * 100 / total_pages));
355
356#if 0
357	seq_puts(m, "\nResource bitmap:");
358
359	for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
360		if ((i & 7) == 0)
361		    seq_puts(m,"\n   ");
362		seq_printf(m, "%s %08lx", buf, *res_ptr);
363	}
364#endif
365	seq_putc(m, '\n');
366	return 0;
367}
368
 
 
 
 
 
 
 
 
 
 
 
 
 
369static int __init
370pcxl_dma_init(void)
371{
372	if (pcxl_dma_start == 0)
373		return 0;
374
375	pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
376	pcxl_res_hint = 0;
377	pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
378					    get_order(pcxl_res_size));
379	memset(pcxl_res_map, 0, pcxl_res_size);
380	proc_gsc_root = proc_mkdir("gsc", NULL);
381	if (!proc_gsc_root)
382    		printk(KERN_WARNING
383			"pcxl_dma_init: Unable to create gsc /proc dir entry\n");
384	else {
385		struct proc_dir_entry* ent;
386		ent = proc_create_single("pcxl_dma", 0, proc_gsc_root,
387				proc_pcxl_dma_show);
388		if (!ent)
389			printk(KERN_WARNING
390				"pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
391	}
392	return 0;
393}
394
395__initcall(pcxl_dma_init);
396
397void *arch_dma_alloc(struct device *dev, size_t size,
398		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
399{
400	unsigned long vaddr;
401	unsigned long paddr;
402	int order;
403
404	if (boot_cpu_data.cpu_type != pcxl2 && boot_cpu_data.cpu_type != pcxl)
405		return NULL;
406
407	order = get_order(size);
408	size = 1 << (order + PAGE_SHIFT);
409	vaddr = pcxl_alloc_range(size);
410	paddr = __get_free_pages(gfp | __GFP_ZERO, order);
411	flush_kernel_dcache_range(paddr, size);
412	paddr = __pa(paddr);
413	map_uncached_pages(vaddr, size, paddr);
414	*dma_handle = (dma_addr_t) paddr;
415
416#if 0
417/* This probably isn't needed to support EISA cards.
418** ISA cards will certainly only support 24-bit DMA addressing.
419** Not clear if we can, want, or need to support ISA.
420*/
421	if (!dev || *dev->coherent_dma_mask < 0xffffffff)
422		gfp |= GFP_DMA;
423#endif
424	return (void *)vaddr;
425}
426
427void arch_dma_free(struct device *dev, size_t size, void *vaddr,
428		dma_addr_t dma_handle, unsigned long attrs)
429{
430	int order = get_order(size);
431
432	WARN_ON_ONCE(boot_cpu_data.cpu_type != pcxl2 &&
433		     boot_cpu_data.cpu_type != pcxl);
434
 
435	size = 1 << (order + PAGE_SHIFT);
436	unmap_uncached_pages((unsigned long)vaddr, size);
437	pcxl_free_range((unsigned long)vaddr, size);
438
439	free_pages((unsigned long)__va(dma_handle), order);
440}
441
442void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
443		size_t size, enum dma_data_direction dir)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
444{
445	flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
 
 
 
446}
447
448void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
449		size_t size, enum dma_data_direction dir)
 
450{
451	flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
 
 
 
452}
453
454void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
455	       enum dma_data_direction direction)
456{
457	flush_kernel_dcache_range((unsigned long)vaddr, size);
458}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3** PARISC 1.1 Dynamic DMA mapping support.
  4** This implementation is for PA-RISC platforms that do not support
  5** I/O TLBs (aka DMA address translation hardware).
  6** See Documentation/DMA-API-HOWTO.txt for interface definitions.
  7**
  8**      (c) Copyright 1999,2000 Hewlett-Packard Company
  9**      (c) Copyright 2000 Grant Grundler
 10**	(c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
 11**      (c) Copyright 2000 John Marvin
 12**
 13** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
 14** (I assume it's from David Mosberger-Tang but there was no Copyright)
 15**
 16** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
 17**
 18** - ggg
 19*/
 20
 21#include <linux/init.h>
 22#include <linux/gfp.h>
 23#include <linux/mm.h>
 24#include <linux/pci.h>
 25#include <linux/proc_fs.h>
 26#include <linux/seq_file.h>
 27#include <linux/string.h>
 28#include <linux/types.h>
 29#include <linux/scatterlist.h>
 30#include <linux/export.h>
 31
 32#include <asm/cacheflush.h>
 33#include <asm/dma.h>    /* for DMA_CHUNK_SIZE */
 34#include <asm/io.h>
 35#include <asm/page.h>	/* get_order */
 36#include <asm/pgalloc.h>
 37#include <linux/uaccess.h>
 38#include <asm/tlbflush.h>	/* for purge_tlb_*() macros */
 39
 40static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
 41static unsigned long pcxl_used_bytes __read_mostly = 0;
 42static unsigned long pcxl_used_pages __read_mostly = 0;
 43
 44extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
 45static DEFINE_SPINLOCK(pcxl_res_lock);
 46static char    *pcxl_res_map;
 47static int     pcxl_res_hint;
 48static int     pcxl_res_size;
 49
 50#ifdef DEBUG_PCXL_RESOURCE
 51#define DBG_RES(x...)	printk(x)
 52#else
 53#define DBG_RES(x...)
 54#endif
 55
 56
 57/*
 58** Dump a hex representation of the resource map.
 59*/
 60
 61#ifdef DUMP_RESMAP
 62static
 63void dump_resmap(void)
 64{
 65	u_long *res_ptr = (unsigned long *)pcxl_res_map;
 66	u_long i = 0;
 67
 68	printk("res_map: ");
 69	for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
 70		printk("%08lx ", *res_ptr);
 71
 72	printk("\n");
 73}
 74#else
 75static inline void dump_resmap(void) {;}
 76#endif
 77
 78static inline int map_pte_uncached(pte_t * pte,
 79		unsigned long vaddr,
 80		unsigned long size, unsigned long *paddr_ptr)
 81{
 82	unsigned long end;
 83	unsigned long orig_vaddr = vaddr;
 84
 85	vaddr &= ~PMD_MASK;
 86	end = vaddr + size;
 87	if (end > PMD_SIZE)
 88		end = PMD_SIZE;
 89	do {
 90		unsigned long flags;
 91
 92		if (!pte_none(*pte))
 93			printk(KERN_ERR "map_pte_uncached: page already exists\n");
 94		purge_tlb_start(flags);
 95		set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
 96		pdtlb_kernel(orig_vaddr);
 97		purge_tlb_end(flags);
 98		vaddr += PAGE_SIZE;
 99		orig_vaddr += PAGE_SIZE;
100		(*paddr_ptr) += PAGE_SIZE;
101		pte++;
102	} while (vaddr < end);
103	return 0;
104}
105
106static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
107		unsigned long size, unsigned long *paddr_ptr)
108{
109	unsigned long end;
110	unsigned long orig_vaddr = vaddr;
111
112	vaddr &= ~PGDIR_MASK;
113	end = vaddr + size;
114	if (end > PGDIR_SIZE)
115		end = PGDIR_SIZE;
116	do {
117		pte_t * pte = pte_alloc_kernel(pmd, vaddr);
118		if (!pte)
119			return -ENOMEM;
120		if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
121			return -ENOMEM;
122		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
123		orig_vaddr += PMD_SIZE;
124		pmd++;
125	} while (vaddr < end);
126	return 0;
127}
128
129static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
130		unsigned long paddr)
131{
132	pgd_t * dir;
133	unsigned long end = vaddr + size;
134
135	dir = pgd_offset_k(vaddr);
136	do {
137		pmd_t *pmd;
138		
139		pmd = pmd_alloc(NULL, dir, vaddr);
140		if (!pmd)
141			return -ENOMEM;
142		if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
143			return -ENOMEM;
144		vaddr = vaddr + PGDIR_SIZE;
145		dir++;
146	} while (vaddr && (vaddr < end));
147	return 0;
148}
149
150static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
151		unsigned long size)
152{
153	pte_t * pte;
154	unsigned long end;
155	unsigned long orig_vaddr = vaddr;
156
157	if (pmd_none(*pmd))
158		return;
159	if (pmd_bad(*pmd)) {
160		pmd_ERROR(*pmd);
161		pmd_clear(pmd);
162		return;
163	}
164	pte = pte_offset_map(pmd, vaddr);
165	vaddr &= ~PMD_MASK;
166	end = vaddr + size;
167	if (end > PMD_SIZE)
168		end = PMD_SIZE;
169	do {
170		unsigned long flags;
171		pte_t page = *pte;
172
173		pte_clear(&init_mm, vaddr, pte);
174		purge_tlb_start(flags);
175		pdtlb_kernel(orig_vaddr);
176		purge_tlb_end(flags);
177		vaddr += PAGE_SIZE;
178		orig_vaddr += PAGE_SIZE;
179		pte++;
180		if (pte_none(page) || pte_present(page))
181			continue;
182		printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
183	} while (vaddr < end);
184}
185
186static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
187		unsigned long size)
188{
189	pmd_t * pmd;
190	unsigned long end;
191	unsigned long orig_vaddr = vaddr;
192
193	if (pgd_none(*dir))
194		return;
195	if (pgd_bad(*dir)) {
196		pgd_ERROR(*dir);
197		pgd_clear(dir);
198		return;
199	}
200	pmd = pmd_offset(dir, vaddr);
201	vaddr &= ~PGDIR_MASK;
202	end = vaddr + size;
203	if (end > PGDIR_SIZE)
204		end = PGDIR_SIZE;
205	do {
206		unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
207		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
208		orig_vaddr += PMD_SIZE;
209		pmd++;
210	} while (vaddr < end);
211}
212
213static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
214{
215	pgd_t * dir;
216	unsigned long end = vaddr + size;
217
218	dir = pgd_offset_k(vaddr);
219	do {
220		unmap_uncached_pmd(dir, vaddr, end - vaddr);
221		vaddr = vaddr + PGDIR_SIZE;
222		dir++;
223	} while (vaddr && (vaddr < end));
224}
225
226#define PCXL_SEARCH_LOOP(idx, mask, size)  \
227       for(; res_ptr < res_end; ++res_ptr) \
228       { \
229               if(0 == ((*res_ptr) & mask)) { \
230                       *res_ptr |= mask; \
231		       idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
232		       pcxl_res_hint = idx + (size >> 3); \
233                       goto resource_found; \
234               } \
235       }
236
237#define PCXL_FIND_FREE_MAPPING(idx, mask, size)  { \
238       u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
239       u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
240       PCXL_SEARCH_LOOP(idx, mask, size); \
241       res_ptr = (u##size *)&pcxl_res_map[0]; \
242       PCXL_SEARCH_LOOP(idx, mask, size); \
243}
244
245unsigned long
246pcxl_alloc_range(size_t size)
247{
248	int res_idx;
249	u_long mask, flags;
250	unsigned int pages_needed = size >> PAGE_SHIFT;
251
252	mask = (u_long) -1L;
253 	mask >>= BITS_PER_LONG - pages_needed;
254
255	DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n", 
256		size, pages_needed, mask);
257
258	spin_lock_irqsave(&pcxl_res_lock, flags);
259
260	if(pages_needed <= 8) {
261		PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
262	} else if(pages_needed <= 16) {
263		PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
264	} else if(pages_needed <= 32) {
265		PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
266	} else {
267		panic("%s: pcxl_alloc_range() Too many pages to map.\n",
268		      __FILE__);
269	}
270
271	dump_resmap();
272	panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
273	      __FILE__);
274	
275resource_found:
276	
277	DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
278		res_idx, mask, pcxl_res_hint);
279
280	pcxl_used_pages += pages_needed;
281	pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
282
283	spin_unlock_irqrestore(&pcxl_res_lock, flags);
284
285	dump_resmap();
286
287	/* 
288	** return the corresponding vaddr in the pcxl dma map
289	*/
290	return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
291}
292
293#define PCXL_FREE_MAPPINGS(idx, m, size) \
294		u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
295		/* BUG_ON((*res_ptr & m) != m); */ \
296		*res_ptr &= ~m;
297
298/*
299** clear bits in the pcxl resource map
300*/
301static void
302pcxl_free_range(unsigned long vaddr, size_t size)
303{
304	u_long mask, flags;
305	unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
306	unsigned int pages_mapped = size >> PAGE_SHIFT;
307
308	mask = (u_long) -1L;
309 	mask >>= BITS_PER_LONG - pages_mapped;
310
311	DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n", 
312		res_idx, size, pages_mapped, mask);
313
314	spin_lock_irqsave(&pcxl_res_lock, flags);
315
316	if(pages_mapped <= 8) {
317		PCXL_FREE_MAPPINGS(res_idx, mask, 8);
318	} else if(pages_mapped <= 16) {
319		PCXL_FREE_MAPPINGS(res_idx, mask, 16);
320	} else if(pages_mapped <= 32) {
321		PCXL_FREE_MAPPINGS(res_idx, mask, 32);
322	} else {
323		panic("%s: pcxl_free_range() Too many pages to unmap.\n",
324		      __FILE__);
325	}
326	
327	pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
328	pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
329
330	spin_unlock_irqrestore(&pcxl_res_lock, flags);
331
332	dump_resmap();
333}
334
335static int proc_pcxl_dma_show(struct seq_file *m, void *v)
336{
337#if 0
338	u_long i = 0;
339	unsigned long *res_ptr = (u_long *)pcxl_res_map;
340#endif
341	unsigned long total_pages = pcxl_res_size << 3;   /* 8 bits per byte */
342
343	seq_printf(m, "\nDMA Mapping Area size    : %d bytes (%ld pages)\n",
344		PCXL_DMA_MAP_SIZE, total_pages);
345
346	seq_printf(m, "Resource bitmap : %d bytes\n", pcxl_res_size);
347
348	seq_puts(m,  "     	  total:    free:    used:   % used:\n");
349	seq_printf(m, "blocks  %8d %8ld %8ld %8ld%%\n", pcxl_res_size,
350		pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
351		(pcxl_used_bytes * 100) / pcxl_res_size);
352
353	seq_printf(m, "pages   %8ld %8ld %8ld %8ld%%\n", total_pages,
354		total_pages - pcxl_used_pages, pcxl_used_pages,
355		(pcxl_used_pages * 100 / total_pages));
356
357#if 0
358	seq_puts(m, "\nResource bitmap:");
359
360	for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
361		if ((i & 7) == 0)
362		    seq_puts(m,"\n   ");
363		seq_printf(m, "%s %08lx", buf, *res_ptr);
364	}
365#endif
366	seq_putc(m, '\n');
367	return 0;
368}
369
370static int proc_pcxl_dma_open(struct inode *inode, struct file *file)
371{
372	return single_open(file, proc_pcxl_dma_show, NULL);
373}
374
375static const struct file_operations proc_pcxl_dma_ops = {
376	.owner		= THIS_MODULE,
377	.open		= proc_pcxl_dma_open,
378	.read		= seq_read,
379	.llseek		= seq_lseek,
380	.release	= single_release,
381};
382
383static int __init
384pcxl_dma_init(void)
385{
386	if (pcxl_dma_start == 0)
387		return 0;
388
389	pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
390	pcxl_res_hint = 0;
391	pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
392					    get_order(pcxl_res_size));
393	memset(pcxl_res_map, 0, pcxl_res_size);
394	proc_gsc_root = proc_mkdir("gsc", NULL);
395	if (!proc_gsc_root)
396    		printk(KERN_WARNING
397			"pcxl_dma_init: Unable to create gsc /proc dir entry\n");
398	else {
399		struct proc_dir_entry* ent;
400		ent = proc_create("pcxl_dma", 0, proc_gsc_root,
401				  &proc_pcxl_dma_ops);
402		if (!ent)
403			printk(KERN_WARNING
404				"pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
405	}
406	return 0;
407}
408
409__initcall(pcxl_dma_init);
410
411static void *pa11_dma_alloc(struct device *dev, size_t size,
412		dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
413{
414	unsigned long vaddr;
415	unsigned long paddr;
416	int order;
417
 
 
 
418	order = get_order(size);
419	size = 1 << (order + PAGE_SHIFT);
420	vaddr = pcxl_alloc_range(size);
421	paddr = __get_free_pages(flag, order);
422	flush_kernel_dcache_range(paddr, size);
423	paddr = __pa(paddr);
424	map_uncached_pages(vaddr, size, paddr);
425	*dma_handle = (dma_addr_t) paddr;
426
427#if 0
428/* This probably isn't needed to support EISA cards.
429** ISA cards will certainly only support 24-bit DMA addressing.
430** Not clear if we can, want, or need to support ISA.
431*/
432	if (!dev || *dev->coherent_dma_mask < 0xffffffff)
433		gfp |= GFP_DMA;
434#endif
435	return (void *)vaddr;
436}
437
438static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
439		dma_addr_t dma_handle, unsigned long attrs)
440{
441	int order;
 
 
 
442
443	order = get_order(size);
444	size = 1 << (order + PAGE_SHIFT);
445	unmap_uncached_pages((unsigned long)vaddr, size);
446	pcxl_free_range((unsigned long)vaddr, size);
 
447	free_pages((unsigned long)__va(dma_handle), order);
448}
449
450static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
451		unsigned long offset, size_t size,
452		enum dma_data_direction direction, unsigned long attrs)
453{
454	void *addr = page_address(page) + offset;
455	BUG_ON(direction == DMA_NONE);
456
457	if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
458		flush_kernel_dcache_range((unsigned long) addr, size);
459
460	return virt_to_phys(addr);
461}
462
463static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
464		size_t size, enum dma_data_direction direction,
465		unsigned long attrs)
466{
467	BUG_ON(direction == DMA_NONE);
468
469	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
470		return;
471
472	if (direction == DMA_TO_DEVICE)
473		return;
474
475	/*
476	 * For PCI_DMA_FROMDEVICE this flush is not necessary for the
477	 * simple map/unmap case. However, it IS necessary if if
478	 * pci_dma_sync_single_* has been called and the buffer reused.
479	 */
480
481	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
482}
483
484static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
485		int nents, enum dma_data_direction direction,
486		unsigned long attrs)
487{
488	int i;
489	struct scatterlist *sg;
490
491	BUG_ON(direction == DMA_NONE);
492
493	for_each_sg(sglist, sg, nents, i) {
494		unsigned long vaddr = (unsigned long)sg_virt(sg);
495
496		sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr);
497		sg_dma_len(sg) = sg->length;
498
499		if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
500			continue;
501
502		flush_kernel_dcache_range(vaddr, sg->length);
503	}
504	return nents;
505}
506
507static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
508		int nents, enum dma_data_direction direction,
509		unsigned long attrs)
510{
511	int i;
512	struct scatterlist *sg;
513
514	BUG_ON(direction == DMA_NONE);
515
516	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
517		return;
518
519	if (direction == DMA_TO_DEVICE)
520		return;
521
522	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
523
524	for_each_sg(sglist, sg, nents, i)
525		flush_kernel_vmap_range(sg_virt(sg), sg->length);
526}
527
528static void pa11_dma_sync_single_for_cpu(struct device *dev,
529		dma_addr_t dma_handle, size_t size,
530		enum dma_data_direction direction)
531{
532	BUG_ON(direction == DMA_NONE);
533
534	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
535			size);
536}
537
538static void pa11_dma_sync_single_for_device(struct device *dev,
539		dma_addr_t dma_handle, size_t size,
540		enum dma_data_direction direction)
541{
542	BUG_ON(direction == DMA_NONE);
543
544	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
545			size);
546}
547
548static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
549{
550	int i;
551	struct scatterlist *sg;
552
553	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
554
555	for_each_sg(sglist, sg, nents, i)
556		flush_kernel_vmap_range(sg_virt(sg), sg->length);
557}
558
559static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
560{
561	int i;
562	struct scatterlist *sg;
563
564	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
565
566	for_each_sg(sglist, sg, nents, i)
567		flush_kernel_vmap_range(sg_virt(sg), sg->length);
568}
569
570static void pa11_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
571	       enum dma_data_direction direction)
572{
573	flush_kernel_dcache_range((unsigned long)vaddr, size);
574}
575
576const struct dma_map_ops pcxl_dma_ops = {
577	.alloc =		pa11_dma_alloc,
578	.free =			pa11_dma_free,
579	.map_page =		pa11_dma_map_page,
580	.unmap_page =		pa11_dma_unmap_page,
581	.map_sg =		pa11_dma_map_sg,
582	.unmap_sg =		pa11_dma_unmap_sg,
583	.sync_single_for_cpu =	pa11_dma_sync_single_for_cpu,
584	.sync_single_for_device = pa11_dma_sync_single_for_device,
585	.sync_sg_for_cpu =	pa11_dma_sync_sg_for_cpu,
586	.sync_sg_for_device =	pa11_dma_sync_sg_for_device,
587	.cache_sync =		pa11_dma_cache_sync,
588};
589
590static void *pcx_dma_alloc(struct device *dev, size_t size,
591		dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
592{
593	void *addr;
594
595	if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0)
596		return NULL;
597
598	addr = (void *)__get_free_pages(flag, get_order(size));
599	if (addr)
600		*dma_handle = (dma_addr_t)virt_to_phys(addr);
601
602	return addr;
603}
604
605static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
606		dma_addr_t iova, unsigned long attrs)
607{
608	free_pages((unsigned long)vaddr, get_order(size));
609	return;
610}
611
612const struct dma_map_ops pcx_dma_ops = {
613	.alloc =		pcx_dma_alloc,
614	.free =			pcx_dma_free,
615	.map_page =		pa11_dma_map_page,
616	.unmap_page =		pa11_dma_unmap_page,
617	.map_sg =		pa11_dma_map_sg,
618	.unmap_sg =		pa11_dma_unmap_sg,
619	.sync_single_for_cpu =	pa11_dma_sync_single_for_cpu,
620	.sync_single_for_device = pa11_dma_sync_single_for_device,
621	.sync_sg_for_cpu =	pa11_dma_sync_sg_for_cpu,
622	.sync_sg_for_device =	pa11_dma_sync_sg_for_device,
623	.cache_sync =		pa11_dma_cache_sync,
624};