Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2** PARISC 1.1 Dynamic DMA mapping support.
  3** This implementation is for PA-RISC platforms that do not support
  4** I/O TLBs (aka DMA address translation hardware).
  5** See Documentation/DMA-API-HOWTO.txt for interface definitions.
  6**
  7**      (c) Copyright 1999,2000 Hewlett-Packard Company
  8**      (c) Copyright 2000 Grant Grundler
  9**	(c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
 10**      (c) Copyright 2000 John Marvin
 11**
 12** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
 13** (I assume it's from David Mosberger-Tang but there was no Copyright)
 14**
 15** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
 16**
 17** - ggg
 18*/
 19
 20#include <linux/init.h>
 21#include <linux/gfp.h>
 22#include <linux/mm.h>
 23#include <linux/pci.h>
 24#include <linux/proc_fs.h>
 25#include <linux/seq_file.h>
 26#include <linux/string.h>
 27#include <linux/types.h>
 28#include <linux/scatterlist.h>
 29#include <linux/export.h>
 30
 31#include <asm/cacheflush.h>
 32#include <asm/dma.h>    /* for DMA_CHUNK_SIZE */
 33#include <asm/io.h>
 34#include <asm/page.h>	/* get_order */
 35#include <asm/pgalloc.h>
 36#include <asm/uaccess.h>
 37#include <asm/tlbflush.h>	/* for purge_tlb_*() macros */
 38
 39static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
 40static unsigned long pcxl_used_bytes __read_mostly = 0;
 41static unsigned long pcxl_used_pages __read_mostly = 0;
 42
 43extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
 44static spinlock_t   pcxl_res_lock;
 45static char    *pcxl_res_map;
 46static int     pcxl_res_hint;
 47static int     pcxl_res_size;
 48
 49#ifdef DEBUG_PCXL_RESOURCE
 50#define DBG_RES(x...)	printk(x)
 51#else
 52#define DBG_RES(x...)
 53#endif
 54
 55
 56/*
 57** Dump a hex representation of the resource map.
 58*/
 59
 60#ifdef DUMP_RESMAP
 61static
 62void dump_resmap(void)
 63{
 64	u_long *res_ptr = (unsigned long *)pcxl_res_map;
 65	u_long i = 0;
 66
 67	printk("res_map: ");
 68	for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
 69		printk("%08lx ", *res_ptr);
 70
 71	printk("\n");
 72}
 73#else
 74static inline void dump_resmap(void) {;}
 75#endif
 76
 77static int pa11_dma_supported( struct device *dev, u64 mask)
 78{
 79	return 1;
 80}
 81
 82static inline int map_pte_uncached(pte_t * pte,
 83		unsigned long vaddr,
 84		unsigned long size, unsigned long *paddr_ptr)
 85{
 86	unsigned long end;
 87	unsigned long orig_vaddr = vaddr;
 88
 89	vaddr &= ~PMD_MASK;
 90	end = vaddr + size;
 91	if (end > PMD_SIZE)
 92		end = PMD_SIZE;
 93	do {
 94		unsigned long flags;
 95
 96		if (!pte_none(*pte))
 97			printk(KERN_ERR "map_pte_uncached: page already exists\n");
 98		set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
 99		purge_tlb_start(flags);
100		pdtlb_kernel(orig_vaddr);
 
101		purge_tlb_end(flags);
102		vaddr += PAGE_SIZE;
103		orig_vaddr += PAGE_SIZE;
104		(*paddr_ptr) += PAGE_SIZE;
105		pte++;
106	} while (vaddr < end);
107	return 0;
108}
109
110static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
111		unsigned long size, unsigned long *paddr_ptr)
112{
113	unsigned long end;
114	unsigned long orig_vaddr = vaddr;
115
116	vaddr &= ~PGDIR_MASK;
117	end = vaddr + size;
118	if (end > PGDIR_SIZE)
119		end = PGDIR_SIZE;
120	do {
121		pte_t * pte = pte_alloc_kernel(pmd, vaddr);
122		if (!pte)
123			return -ENOMEM;
124		if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
125			return -ENOMEM;
126		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
127		orig_vaddr += PMD_SIZE;
128		pmd++;
129	} while (vaddr < end);
130	return 0;
131}
132
133static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
134		unsigned long paddr)
135{
136	pgd_t * dir;
137	unsigned long end = vaddr + size;
138
139	dir = pgd_offset_k(vaddr);
140	do {
 
 
141		pmd_t *pmd;
142		
143		pmd = pmd_alloc(NULL, dir, vaddr);
 
 
 
144		if (!pmd)
145			return -ENOMEM;
146		if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
147			return -ENOMEM;
148		vaddr = vaddr + PGDIR_SIZE;
149		dir++;
150	} while (vaddr && (vaddr < end));
151	return 0;
152}
153
154static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
155		unsigned long size)
156{
157	pte_t * pte;
158	unsigned long end;
159	unsigned long orig_vaddr = vaddr;
160
161	if (pmd_none(*pmd))
162		return;
163	if (pmd_bad(*pmd)) {
164		pmd_ERROR(*pmd);
165		pmd_clear(pmd);
166		return;
167	}
168	pte = pte_offset_map(pmd, vaddr);
169	vaddr &= ~PMD_MASK;
170	end = vaddr + size;
171	if (end > PMD_SIZE)
172		end = PMD_SIZE;
173	do {
174		unsigned long flags;
175		pte_t page = *pte;
176
177		pte_clear(&init_mm, vaddr, pte);
178		purge_tlb_start(flags);
179		pdtlb_kernel(orig_vaddr);
180		purge_tlb_end(flags);
181		vaddr += PAGE_SIZE;
182		orig_vaddr += PAGE_SIZE;
183		pte++;
184		if (pte_none(page) || pte_present(page))
185			continue;
186		printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
187	} while (vaddr < end);
188}
189
190static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
191		unsigned long size)
192{
193	pmd_t * pmd;
194	unsigned long end;
195	unsigned long orig_vaddr = vaddr;
196
197	if (pgd_none(*dir))
198		return;
199	if (pgd_bad(*dir)) {
200		pgd_ERROR(*dir);
201		pgd_clear(dir);
202		return;
203	}
204	pmd = pmd_offset(dir, vaddr);
205	vaddr &= ~PGDIR_MASK;
206	end = vaddr + size;
207	if (end > PGDIR_SIZE)
208		end = PGDIR_SIZE;
209	do {
210		unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
211		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
212		orig_vaddr += PMD_SIZE;
213		pmd++;
214	} while (vaddr < end);
215}
216
217static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
218{
219	pgd_t * dir;
220	unsigned long end = vaddr + size;
221
222	dir = pgd_offset_k(vaddr);
223	do {
224		unmap_uncached_pmd(dir, vaddr, end - vaddr);
225		vaddr = vaddr + PGDIR_SIZE;
226		dir++;
227	} while (vaddr && (vaddr < end));
228}
229
230#define PCXL_SEARCH_LOOP(idx, mask, size)  \
231       for(; res_ptr < res_end; ++res_ptr) \
232       { \
233               if(0 == ((*res_ptr) & mask)) { \
234                       *res_ptr |= mask; \
235		       idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
236		       pcxl_res_hint = idx + (size >> 3); \
237                       goto resource_found; \
238               } \
239       }
240
241#define PCXL_FIND_FREE_MAPPING(idx, mask, size)  { \
242       u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
243       u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
244       PCXL_SEARCH_LOOP(idx, mask, size); \
245       res_ptr = (u##size *)&pcxl_res_map[0]; \
246       PCXL_SEARCH_LOOP(idx, mask, size); \
247}
248
249unsigned long
250pcxl_alloc_range(size_t size)
251{
252	int res_idx;
253	u_long mask, flags;
254	unsigned int pages_needed = size >> PAGE_SHIFT;
255
256	mask = (u_long) -1L;
257 	mask >>= BITS_PER_LONG - pages_needed;
258
259	DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n", 
260		size, pages_needed, mask);
261
262	spin_lock_irqsave(&pcxl_res_lock, flags);
263
264	if(pages_needed <= 8) {
265		PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
266	} else if(pages_needed <= 16) {
267		PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
268	} else if(pages_needed <= 32) {
269		PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
270	} else {
271		panic("%s: pcxl_alloc_range() Too many pages to map.\n",
272		      __FILE__);
273	}
274
275	dump_resmap();
276	panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
277	      __FILE__);
278	
279resource_found:
280	
281	DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
282		res_idx, mask, pcxl_res_hint);
283
284	pcxl_used_pages += pages_needed;
285	pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
286
287	spin_unlock_irqrestore(&pcxl_res_lock, flags);
288
289	dump_resmap();
290
291	/* 
292	** return the corresponding vaddr in the pcxl dma map
293	*/
294	return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
295}
296
297#define PCXL_FREE_MAPPINGS(idx, m, size) \
298		u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
299		/* BUG_ON((*res_ptr & m) != m); */ \
300		*res_ptr &= ~m;
301
302/*
303** clear bits in the pcxl resource map
304*/
305static void
306pcxl_free_range(unsigned long vaddr, size_t size)
307{
308	u_long mask, flags;
309	unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
310	unsigned int pages_mapped = size >> PAGE_SHIFT;
311
312	mask = (u_long) -1L;
313 	mask >>= BITS_PER_LONG - pages_mapped;
314
315	DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n", 
316		res_idx, size, pages_mapped, mask);
317
318	spin_lock_irqsave(&pcxl_res_lock, flags);
319
320	if(pages_mapped <= 8) {
321		PCXL_FREE_MAPPINGS(res_idx, mask, 8);
322	} else if(pages_mapped <= 16) {
323		PCXL_FREE_MAPPINGS(res_idx, mask, 16);
324	} else if(pages_mapped <= 32) {
325		PCXL_FREE_MAPPINGS(res_idx, mask, 32);
326	} else {
327		panic("%s: pcxl_free_range() Too many pages to unmap.\n",
328		      __FILE__);
329	}
330	
331	pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
332	pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
333
334	spin_unlock_irqrestore(&pcxl_res_lock, flags);
335
336	dump_resmap();
337}
338
339static int proc_pcxl_dma_show(struct seq_file *m, void *v)
340{
341#if 0
342	u_long i = 0;
343	unsigned long *res_ptr = (u_long *)pcxl_res_map;
344#endif
345	unsigned long total_pages = pcxl_res_size << 3;   /* 8 bits per byte */
346
347	seq_printf(m, "\nDMA Mapping Area size    : %d bytes (%ld pages)\n",
348		PCXL_DMA_MAP_SIZE, total_pages);
349
350	seq_printf(m, "Resource bitmap : %d bytes\n", pcxl_res_size);
351
352	seq_puts(m,  "     	  total:    free:    used:   % used:\n");
353	seq_printf(m, "blocks  %8d %8ld %8ld %8ld%%\n", pcxl_res_size,
354		pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
355		(pcxl_used_bytes * 100) / pcxl_res_size);
356
357	seq_printf(m, "pages   %8ld %8ld %8ld %8ld%%\n", total_pages,
358		total_pages - pcxl_used_pages, pcxl_used_pages,
359		(pcxl_used_pages * 100 / total_pages));
360
361#if 0
362	seq_puts(m, "\nResource bitmap:");
363
364	for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
365		if ((i & 7) == 0)
366		    seq_puts(m,"\n   ");
367		seq_printf(m, "%s %08lx", buf, *res_ptr);
368	}
369#endif
370	seq_putc(m, '\n');
371	return 0;
372}
373
374static int proc_pcxl_dma_open(struct inode *inode, struct file *file)
375{
376	return single_open(file, proc_pcxl_dma_show, NULL);
377}
378
379static const struct file_operations proc_pcxl_dma_ops = {
380	.owner		= THIS_MODULE,
381	.open		= proc_pcxl_dma_open,
382	.read		= seq_read,
383	.llseek		= seq_lseek,
384	.release	= single_release,
385};
386
387static int __init
388pcxl_dma_init(void)
389{
390	if (pcxl_dma_start == 0)
391		return 0;
392
393	spin_lock_init(&pcxl_res_lock);
394	pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
395	pcxl_res_hint = 0;
396	pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
397					    get_order(pcxl_res_size));
398	memset(pcxl_res_map, 0, pcxl_res_size);
399	proc_gsc_root = proc_mkdir("gsc", NULL);
400	if (!proc_gsc_root)
401    		printk(KERN_WARNING
402			"pcxl_dma_init: Unable to create gsc /proc dir entry\n");
403	else {
404		struct proc_dir_entry* ent;
405		ent = proc_create("pcxl_dma", 0, proc_gsc_root,
406				  &proc_pcxl_dma_ops);
407		if (!ent)
408			printk(KERN_WARNING
409				"pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
410	}
411	return 0;
412}
413
414__initcall(pcxl_dma_init);
415
416static void *pa11_dma_alloc(struct device *dev, size_t size,
417		dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
418{
419	unsigned long vaddr;
420	unsigned long paddr;
421	int order;
422
 
 
 
423	order = get_order(size);
424	size = 1 << (order + PAGE_SHIFT);
425	vaddr = pcxl_alloc_range(size);
426	paddr = __get_free_pages(flag, order);
427	flush_kernel_dcache_range(paddr, size);
428	paddr = __pa(paddr);
429	map_uncached_pages(vaddr, size, paddr);
430	*dma_handle = (dma_addr_t) paddr;
431
432#if 0
433/* This probably isn't needed to support EISA cards.
434** ISA cards will certainly only support 24-bit DMA addressing.
435** Not clear if we can, want, or need to support ISA.
436*/
437	if (!dev || *dev->coherent_dma_mask < 0xffffffff)
438		gfp |= GFP_DMA;
439#endif
440	return (void *)vaddr;
441}
442
443static void pa11_dma_free(struct device *dev, size_t size, void *vaddr,
444		dma_addr_t dma_handle, struct dma_attrs *attrs)
445{
446	int order;
 
 
 
447
448	order = get_order(size);
449	size = 1 << (order + PAGE_SHIFT);
450	unmap_uncached_pages((unsigned long)vaddr, size);
451	pcxl_free_range((unsigned long)vaddr, size);
452	free_pages((unsigned long)__va(dma_handle), order);
453}
454
455static dma_addr_t pa11_dma_map_page(struct device *dev, struct page *page,
456		unsigned long offset, size_t size,
457		enum dma_data_direction direction, struct dma_attrs *attrs)
458{
459	void *addr = page_address(page) + offset;
460	BUG_ON(direction == DMA_NONE);
461
462	flush_kernel_dcache_range((unsigned long) addr, size);
463	return virt_to_phys(addr);
464}
465
466static void pa11_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
467		size_t size, enum dma_data_direction direction,
468		struct dma_attrs *attrs)
469{
470	BUG_ON(direction == DMA_NONE);
471
472	if (direction == DMA_TO_DEVICE)
473	    return;
474
475	/*
476	 * For PCI_DMA_FROMDEVICE this flush is not necessary for the
477	 * simple map/unmap case. However, it IS necessary if if
478	 * pci_dma_sync_single_* has been called and the buffer reused.
479	 */
480
481	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
482	return;
483}
484
485static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist,
486		int nents, enum dma_data_direction direction,
487		struct dma_attrs *attrs)
488{
489	int i;
490	struct scatterlist *sg;
491
492	BUG_ON(direction == DMA_NONE);
493
494	for_each_sg(sglist, sg, nents, i) {
495		unsigned long vaddr = (unsigned long)sg_virt(sg);
496
497		sg_dma_address(sg) = (dma_addr_t) virt_to_phys(vaddr);
498		sg_dma_len(sg) = sg->length;
499		flush_kernel_dcache_range(vaddr, sg->length);
 
 
 
 
500	}
501	return nents;
502}
503
504static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
505		int nents, enum dma_data_direction direction,
506		struct dma_attrs *attrs)
507{
508	int i;
509	struct scatterlist *sg;
510
511	BUG_ON(direction == DMA_NONE);
512
513	if (direction == DMA_TO_DEVICE)
514	    return;
515
516	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
517
518	for_each_sg(sglist, sg, nents, i)
519		flush_kernel_vmap_range(sg_virt(sg), sg->length);
520	return;
521}
522
523static void pa11_dma_sync_single_for_cpu(struct device *dev,
524		dma_addr_t dma_handle, size_t size,
525		enum dma_data_direction direction)
526{
527	BUG_ON(direction == DMA_NONE);
528
529	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
530			size);
531}
532
533static void pa11_dma_sync_single_for_device(struct device *dev,
534		dma_addr_t dma_handle, size_t size,
535		enum dma_data_direction direction)
536{
537	BUG_ON(direction == DMA_NONE);
538
539	flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle),
540			size);
541}
542
543static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
544{
545	int i;
546	struct scatterlist *sg;
547
548	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
549
550	for_each_sg(sglist, sg, nents, i)
551		flush_kernel_vmap_range(sg_virt(sg), sg->length);
552}
553
554static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
555{
556	int i;
557	struct scatterlist *sg;
558
559	/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
560
561	for_each_sg(sglist, sg, nents, i)
562		flush_kernel_vmap_range(sg_virt(sg), sg->length);
563}
564
565struct dma_map_ops pcxl_dma_ops = {
566	.dma_supported =	pa11_dma_supported,
567	.alloc =		pa11_dma_alloc,
568	.free =			pa11_dma_free,
569	.map_page =		pa11_dma_map_page,
570	.unmap_page =		pa11_dma_unmap_page,
571	.map_sg =		pa11_dma_map_sg,
572	.unmap_sg =		pa11_dma_unmap_sg,
573	.sync_single_for_cpu =	pa11_dma_sync_single_for_cpu,
574	.sync_single_for_device = pa11_dma_sync_single_for_device,
575	.sync_sg_for_cpu =	pa11_dma_sync_sg_for_cpu,
576	.sync_sg_for_device =	pa11_dma_sync_sg_for_device,
577};
578
579static void *pcx_dma_alloc(struct device *dev, size_t size,
580		dma_addr_t *dma_handle, gfp_t flag, struct dma_attrs *attrs)
581{
582	void *addr;
583
584	if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
585		return NULL;
586
587	addr = (void *)__get_free_pages(flag, get_order(size));
588	if (addr)
589		*dma_handle = (dma_addr_t)virt_to_phys(addr);
590
591	return addr;
592}
593
594static void pcx_dma_free(struct device *dev, size_t size, void *vaddr,
595		dma_addr_t iova, struct dma_attrs *attrs)
596{
597	free_pages((unsigned long)vaddr, get_order(size));
598	return;
599}
600
601struct dma_map_ops pcx_dma_ops = {
602	.dma_supported =	pa11_dma_supported,
603	.alloc =		pcx_dma_alloc,
604	.free =			pcx_dma_free,
605	.map_page =		pa11_dma_map_page,
606	.unmap_page =		pa11_dma_unmap_page,
607	.map_sg =		pa11_dma_map_sg,
608	.unmap_sg =		pa11_dma_unmap_sg,
609	.sync_single_for_cpu =	pa11_dma_sync_single_for_cpu,
610	.sync_single_for_device = pa11_dma_sync_single_for_device,
611	.sync_sg_for_cpu =	pa11_dma_sync_sg_for_cpu,
612	.sync_sg_for_device =	pa11_dma_sync_sg_for_device,
613};
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3** PARISC 1.1 Dynamic DMA mapping support.
  4** This implementation is for PA-RISC platforms that do not support
  5** I/O TLBs (aka DMA address translation hardware).
  6** See Documentation/core-api/dma-api-howto.rst for interface definitions.
  7**
  8**      (c) Copyright 1999,2000 Hewlett-Packard Company
  9**      (c) Copyright 2000 Grant Grundler
 10**	(c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
 11**      (c) Copyright 2000 John Marvin
 12**
 13** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
 14** (I assume it's from David Mosberger-Tang but there was no Copyright)
 15**
 16** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
 17**
 18** - ggg
 19*/
 20
 21#include <linux/init.h>
 22#include <linux/gfp.h>
 23#include <linux/mm.h>
 
 24#include <linux/proc_fs.h>
 25#include <linux/seq_file.h>
 26#include <linux/string.h>
 27#include <linux/types.h>
 28#include <linux/dma-direct.h>
 29#include <linux/dma-map-ops.h>
 30
 31#include <asm/cacheflush.h>
 32#include <asm/dma.h>    /* for DMA_CHUNK_SIZE */
 33#include <asm/io.h>
 34#include <asm/page.h>	/* get_order */
 35#include <linux/uaccess.h>
 
 36#include <asm/tlbflush.h>	/* for purge_tlb_*() macros */
 37
 38static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
 39static unsigned long pcxl_used_bytes __read_mostly;
 40static unsigned long pcxl_used_pages __read_mostly;
 41
 42unsigned long pcxl_dma_start __ro_after_init; /* pcxl dma mapping area start */
 43static DEFINE_SPINLOCK(pcxl_res_lock);
 44static char    *pcxl_res_map;
 45static int     pcxl_res_hint;
 46static int     pcxl_res_size;
 47
 48#ifdef DEBUG_PCXL_RESOURCE
 49#define DBG_RES(x...)	printk(x)
 50#else
 51#define DBG_RES(x...)
 52#endif
 53
 54
 55/*
 56** Dump a hex representation of the resource map.
 57*/
 58
 59#ifdef DUMP_RESMAP
 60static
 61void dump_resmap(void)
 62{
 63	u_long *res_ptr = (unsigned long *)pcxl_res_map;
 64	u_long i = 0;
 65
 66	printk("res_map: ");
 67	for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
 68		printk("%08lx ", *res_ptr);
 69
 70	printk("\n");
 71}
 72#else
 73static inline void dump_resmap(void) {;}
 74#endif
 75
 
 
 
 
 
 76static inline int map_pte_uncached(pte_t * pte,
 77		unsigned long vaddr,
 78		unsigned long size, unsigned long *paddr_ptr)
 79{
 80	unsigned long end;
 81	unsigned long orig_vaddr = vaddr;
 82
 83	vaddr &= ~PMD_MASK;
 84	end = vaddr + size;
 85	if (end > PMD_SIZE)
 86		end = PMD_SIZE;
 87	do {
 88		unsigned long flags;
 89
 90		if (!pte_none(*pte))
 91			printk(KERN_ERR "map_pte_uncached: page already exists\n");
 
 92		purge_tlb_start(flags);
 93		set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
 94		pdtlb(SR_KERNEL, orig_vaddr);
 95		purge_tlb_end(flags);
 96		vaddr += PAGE_SIZE;
 97		orig_vaddr += PAGE_SIZE;
 98		(*paddr_ptr) += PAGE_SIZE;
 99		pte++;
100	} while (vaddr < end);
101	return 0;
102}
103
104static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
105		unsigned long size, unsigned long *paddr_ptr)
106{
107	unsigned long end;
108	unsigned long orig_vaddr = vaddr;
109
110	vaddr &= ~PGDIR_MASK;
111	end = vaddr + size;
112	if (end > PGDIR_SIZE)
113		end = PGDIR_SIZE;
114	do {
115		pte_t * pte = pte_alloc_kernel(pmd, vaddr);
116		if (!pte)
117			return -ENOMEM;
118		if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
119			return -ENOMEM;
120		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
121		orig_vaddr += PMD_SIZE;
122		pmd++;
123	} while (vaddr < end);
124	return 0;
125}
126
127static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
128		unsigned long paddr)
129{
130	pgd_t * dir;
131	unsigned long end = vaddr + size;
132
133	dir = pgd_offset_k(vaddr);
134	do {
135		p4d_t *p4d;
136		pud_t *pud;
137		pmd_t *pmd;
138
139		p4d = p4d_offset(dir, vaddr);
140		pud = pud_offset(p4d, vaddr);
141		pmd = pmd_alloc(NULL, pud, vaddr);
142
143		if (!pmd)
144			return -ENOMEM;
145		if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
146			return -ENOMEM;
147		vaddr = vaddr + PGDIR_SIZE;
148		dir++;
149	} while (vaddr && (vaddr < end));
150	return 0;
151}
152
153static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
154		unsigned long size)
155{
156	pte_t * pte;
157	unsigned long end;
158	unsigned long orig_vaddr = vaddr;
159
160	if (pmd_none(*pmd))
161		return;
162	if (pmd_bad(*pmd)) {
163		pmd_ERROR(*pmd);
164		pmd_clear(pmd);
165		return;
166	}
167	pte = pte_offset_kernel(pmd, vaddr);
168	vaddr &= ~PMD_MASK;
169	end = vaddr + size;
170	if (end > PMD_SIZE)
171		end = PMD_SIZE;
172	do {
173		unsigned long flags;
174		pte_t page = *pte;
175
176		pte_clear(&init_mm, vaddr, pte);
177		purge_tlb_start(flags);
178		pdtlb(SR_KERNEL, orig_vaddr);
179		purge_tlb_end(flags);
180		vaddr += PAGE_SIZE;
181		orig_vaddr += PAGE_SIZE;
182		pte++;
183		if (pte_none(page) || pte_present(page))
184			continue;
185		printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
186	} while (vaddr < end);
187}
188
189static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
190		unsigned long size)
191{
192	pmd_t * pmd;
193	unsigned long end;
194	unsigned long orig_vaddr = vaddr;
195
196	if (pgd_none(*dir))
197		return;
198	if (pgd_bad(*dir)) {
199		pgd_ERROR(*dir);
200		pgd_clear(dir);
201		return;
202	}
203	pmd = pmd_offset(pud_offset(p4d_offset(dir, vaddr), vaddr), vaddr);
204	vaddr &= ~PGDIR_MASK;
205	end = vaddr + size;
206	if (end > PGDIR_SIZE)
207		end = PGDIR_SIZE;
208	do {
209		unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
210		vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
211		orig_vaddr += PMD_SIZE;
212		pmd++;
213	} while (vaddr < end);
214}
215
216static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
217{
218	pgd_t * dir;
219	unsigned long end = vaddr + size;
220
221	dir = pgd_offset_k(vaddr);
222	do {
223		unmap_uncached_pmd(dir, vaddr, end - vaddr);
224		vaddr = vaddr + PGDIR_SIZE;
225		dir++;
226	} while (vaddr && (vaddr < end));
227}
228
229#define PCXL_SEARCH_LOOP(idx, mask, size)  \
230       for(; res_ptr < res_end; ++res_ptr) \
231       { \
232               if(0 == ((*res_ptr) & mask)) { \
233                       *res_ptr |= mask; \
234		       idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
235		       pcxl_res_hint = idx + (size >> 3); \
236                       goto resource_found; \
237               } \
238       }
239
240#define PCXL_FIND_FREE_MAPPING(idx, mask, size)  { \
241       u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
242       u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
243       PCXL_SEARCH_LOOP(idx, mask, size); \
244       res_ptr = (u##size *)&pcxl_res_map[0]; \
245       PCXL_SEARCH_LOOP(idx, mask, size); \
246}
247
248static unsigned long
249pcxl_alloc_range(size_t size)
250{
251	int res_idx;
252	u_long mask, flags;
253	unsigned int pages_needed = size >> PAGE_SHIFT;
254
255	mask = (u_long) -1L;
256 	mask >>= BITS_PER_LONG - pages_needed;
257
258	DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n", 
259		size, pages_needed, mask);
260
261	spin_lock_irqsave(&pcxl_res_lock, flags);
262
263	if(pages_needed <= 8) {
264		PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
265	} else if(pages_needed <= 16) {
266		PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
267	} else if(pages_needed <= 32) {
268		PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
269	} else {
270		panic("%s: pcxl_alloc_range() Too many pages to map.\n",
271		      __FILE__);
272	}
273
274	dump_resmap();
275	panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
276	      __FILE__);
277	
278resource_found:
279	
280	DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
281		res_idx, mask, pcxl_res_hint);
282
283	pcxl_used_pages += pages_needed;
284	pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
285
286	spin_unlock_irqrestore(&pcxl_res_lock, flags);
287
288	dump_resmap();
289
290	/* 
291	** return the corresponding vaddr in the pcxl dma map
292	*/
293	return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
294}
295
296#define PCXL_FREE_MAPPINGS(idx, m, size) \
297		u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
298		/* BUG_ON((*res_ptr & m) != m); */ \
299		*res_ptr &= ~m;
300
301/*
302** clear bits in the pcxl resource map
303*/
304static void
305pcxl_free_range(unsigned long vaddr, size_t size)
306{
307	u_long mask, flags;
308	unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
309	unsigned int pages_mapped = size >> PAGE_SHIFT;
310
311	mask = (u_long) -1L;
312 	mask >>= BITS_PER_LONG - pages_mapped;
313
314	DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n", 
315		res_idx, size, pages_mapped, mask);
316
317	spin_lock_irqsave(&pcxl_res_lock, flags);
318
319	if(pages_mapped <= 8) {
320		PCXL_FREE_MAPPINGS(res_idx, mask, 8);
321	} else if(pages_mapped <= 16) {
322		PCXL_FREE_MAPPINGS(res_idx, mask, 16);
323	} else if(pages_mapped <= 32) {
324		PCXL_FREE_MAPPINGS(res_idx, mask, 32);
325	} else {
326		panic("%s: pcxl_free_range() Too many pages to unmap.\n",
327		      __FILE__);
328	}
329	
330	pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
331	pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
332
333	spin_unlock_irqrestore(&pcxl_res_lock, flags);
334
335	dump_resmap();
336}
337
338static int __maybe_unused proc_pcxl_dma_show(struct seq_file *m, void *v)
339{
340#if 0
341	u_long i = 0;
342	unsigned long *res_ptr = (u_long *)pcxl_res_map;
343#endif
344	unsigned long total_pages = pcxl_res_size << 3;   /* 8 bits per byte */
345
346	seq_printf(m, "\nDMA Mapping Area size    : %d bytes (%ld pages)\n",
347		PCXL_DMA_MAP_SIZE, total_pages);
348
349	seq_printf(m, "Resource bitmap : %d bytes\n", pcxl_res_size);
350
351	seq_puts(m,  "     	  total:    free:    used:   % used:\n");
352	seq_printf(m, "blocks  %8d %8ld %8ld %8ld%%\n", pcxl_res_size,
353		pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
354		(pcxl_used_bytes * 100) / pcxl_res_size);
355
356	seq_printf(m, "pages   %8ld %8ld %8ld %8ld%%\n", total_pages,
357		total_pages - pcxl_used_pages, pcxl_used_pages,
358		(pcxl_used_pages * 100 / total_pages));
359
360#if 0
361	seq_puts(m, "\nResource bitmap:");
362
363	for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
364		if ((i & 7) == 0)
365		    seq_puts(m,"\n   ");
366		seq_printf(m, "%s %08lx", buf, *res_ptr);
367	}
368#endif
369	seq_putc(m, '\n');
370	return 0;
371}
372
 
 
 
 
 
 
 
 
 
 
 
 
 
373static int __init
374pcxl_dma_init(void)
375{
376	if (pcxl_dma_start == 0)
377		return 0;
378
 
379	pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
380	pcxl_res_hint = 0;
381	pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
382					    get_order(pcxl_res_size));
383	memset(pcxl_res_map, 0, pcxl_res_size);
384	proc_gsc_root = proc_mkdir("bus/gsc", NULL);
385	if (!proc_gsc_root)
386    		printk(KERN_WARNING
387			"pcxl_dma_init: Unable to create gsc /proc dir entry\n");
388	else {
389		struct proc_dir_entry* ent;
390		ent = proc_create_single("pcxl_dma", 0, proc_gsc_root,
391				proc_pcxl_dma_show);
392		if (!ent)
393			printk(KERN_WARNING
394				"pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
395	}
396	return 0;
397}
398
399__initcall(pcxl_dma_init);
400
401void *arch_dma_alloc(struct device *dev, size_t size,
402		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
403{
404	unsigned long vaddr;
405	unsigned long paddr;
406	int order;
407
408	if (boot_cpu_data.cpu_type != pcxl2 && boot_cpu_data.cpu_type != pcxl)
409		return NULL;
410
411	order = get_order(size);
412	size = 1 << (order + PAGE_SHIFT);
413	vaddr = pcxl_alloc_range(size);
414	paddr = __get_free_pages(gfp | __GFP_ZERO, order);
415	flush_kernel_dcache_range(paddr, size);
416	paddr = __pa(paddr);
417	map_uncached_pages(vaddr, size, paddr);
418	*dma_handle = (dma_addr_t) paddr;
419
 
 
 
 
 
 
 
 
420	return (void *)vaddr;
421}
422
423void arch_dma_free(struct device *dev, size_t size, void *vaddr,
424		dma_addr_t dma_handle, unsigned long attrs)
425{
426	int order = get_order(size);
427
428	WARN_ON_ONCE(boot_cpu_data.cpu_type != pcxl2 &&
429		     boot_cpu_data.cpu_type != pcxl);
430
 
431	size = 1 << (order + PAGE_SHIFT);
432	unmap_uncached_pages((unsigned long)vaddr, size);
433	pcxl_free_range((unsigned long)vaddr, size);
 
 
 
 
 
 
 
 
 
434
435	free_pages((unsigned long)__va(dma_handle), order);
 
436}
437
438void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
439		enum dma_data_direction dir)
 
440{
 
 
 
 
 
441	/*
442	 * fdc: The data cache line is written back to memory, if and only if
443	 * it is dirty, and then invalidated from the data cache.
 
444	 */
445	flush_kernel_dcache_range((unsigned long)phys_to_virt(paddr), size);
 
 
446}
447
448void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
449		enum dma_data_direction dir)
 
450{
451	unsigned long addr = (unsigned long) phys_to_virt(paddr);
 
 
 
452
453	switch (dir) {
454	case DMA_TO_DEVICE:
455	case DMA_BIDIRECTIONAL:
456		flush_kernel_dcache_range(addr, size);
457		return;
458	case DMA_FROM_DEVICE:
459		purge_kernel_dcache_range_asm(addr, addr + size);
460		return;
461	default:
462		BUG();
463	}
 
464}