Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * io-unit.c:  IO-UNIT specific routines for memory management.
  3 *
  4 * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
  5 */
  6 
  7#include <linux/kernel.h>
  8#include <linux/init.h>
  9#include <linux/slab.h>
 10#include <linux/spinlock.h>
 11#include <linux/mm.h>
 12#include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
 13#include <linux/bitops.h>
 14#include <linux/scatterlist.h>
 15#include <linux/of.h>
 16#include <linux/of_device.h>
 17
 18#include <asm/pgalloc.h>
 19#include <asm/pgtable.h>
 20#include <asm/io.h>
 21#include <asm/io-unit.h>
 22#include <asm/mxcc.h>
 23#include <asm/cacheflush.h>
 24#include <asm/tlbflush.h>
 25#include <asm/dma.h>
 26#include <asm/oplib.h>
 27
 
 
 28/* #define IOUNIT_DEBUG */
 29#ifdef IOUNIT_DEBUG
 30#define IOD(x) printk(x)
 31#else
 32#define IOD(x) do { } while (0)
 33#endif
 34
 35#define IOPERM        (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
 36#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
 37
 38static void __init iounit_iommu_init(struct platform_device *op)
 39{
 40	struct iounit_struct *iounit;
 41	iopte_t *xpt, *xptend;
 
 42
 43	iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
 44	if (!iounit) {
 45		prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
 46		prom_halt();
 47	}
 48
 49	iounit->limit[0] = IOUNIT_BMAP1_START;
 50	iounit->limit[1] = IOUNIT_BMAP2_START;
 51	iounit->limit[2] = IOUNIT_BMAPM_START;
 52	iounit->limit[3] = IOUNIT_BMAPM_END;
 53	iounit->rotor[1] = IOUNIT_BMAP2_START;
 54	iounit->rotor[2] = IOUNIT_BMAPM_START;
 55
 56	xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
 57	if (!xpt) {
 58		prom_printf("SUN4D: Cannot map External Page Table.");
 59		prom_halt();
 60	}
 61	
 62	op->dev.archdata.iommu = iounit;
 63	iounit->page_table = xpt;
 64	spin_lock_init(&iounit->lock);
 65	
 66	for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
 67	     xpt < xptend;)
 68	     	iopte_val(*xpt++) = 0;
 69}
 70
 71static int __init iounit_init(void)
 72{
 73	extern void sun4d_init_sbi_irq(void);
 74	struct device_node *dp;
 75
 76	for_each_node_by_name(dp, "sbi") {
 77		struct platform_device *op = of_find_device_by_node(dp);
 78
 79		iounit_iommu_init(op);
 80		of_propagate_archdata(op);
 81	}
 82
 83	sun4d_init_sbi_irq();
 84
 85	return 0;
 86}
 87
 88subsys_initcall(iounit_init);
 89
 90/* One has to hold iounit->lock to call this */
 91static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
 92{
 93	int i, j, k, npages;
 94	unsigned long rotor, scan, limit;
 95	iopte_t iopte;
 96
 97        npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
 98
 99	/* A tiny bit of magic ingredience :) */
100	switch (npages) {
101	case 1: i = 0x0231; break;
102	case 2: i = 0x0132; break;
103	default: i = 0x0213; break;
104	}
105	
106	IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
107	
108next:	j = (i & 15);
109	rotor = iounit->rotor[j - 1];
110	limit = iounit->limit[j];
111	scan = rotor;
112nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
113	if (scan + npages > limit) {
114		if (limit != rotor) {
115			limit = rotor;
116			scan = iounit->limit[j - 1];
117			goto nexti;
118		}
119		i >>= 4;
120		if (!(i & 15))
121			panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
122		goto next;
123	}
124	for (k = 1, scan++; k < npages; k++)
125		if (test_bit(scan++, iounit->bmap))
126			goto nexti;
127	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
128	scan -= npages;
129	iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
130	vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
131	for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
132		set_bit(scan, iounit->bmap);
133		iounit->page_table[scan] = iopte;
134	}
135	IOD(("%08lx\n", vaddr));
136	return vaddr;
137}
138
139static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len)
140{
141	struct iounit_struct *iounit = dev->archdata.iommu;
142	unsigned long ret, flags;
143	
144	spin_lock_irqsave(&iounit->lock, flags);
145	ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
146	spin_unlock_irqrestore(&iounit->lock, flags);
147	return ret;
148}
149
150static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
151{
152	struct iounit_struct *iounit = dev->archdata.iommu;
153	unsigned long flags;
154
155	/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
156	spin_lock_irqsave(&iounit->lock, flags);
157	while (sz != 0) {
158		--sz;
159		sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
160		sg->dma_length = sg->length;
161		sg = sg_next(sg);
162	}
163	spin_unlock_irqrestore(&iounit->lock, flags);
164}
165
166static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
167{
168	struct iounit_struct *iounit = dev->archdata.iommu;
169	unsigned long flags;
170	
171	spin_lock_irqsave(&iounit->lock, flags);
172	len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
173	vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
174	IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
175	for (len += vaddr; vaddr < len; vaddr++)
176		clear_bit(vaddr, iounit->bmap);
177	spin_unlock_irqrestore(&iounit->lock, flags);
178}
179
180static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
181{
182	struct iounit_struct *iounit = dev->archdata.iommu;
183	unsigned long flags;
184	unsigned long vaddr, len;
185
186	spin_lock_irqsave(&iounit->lock, flags);
187	while (sz != 0) {
188		--sz;
189		len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
190		vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
191		IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
192		for (len += vaddr; vaddr < len; vaddr++)
193			clear_bit(vaddr, iounit->bmap);
194		sg = sg_next(sg);
195	}
196	spin_unlock_irqrestore(&iounit->lock, flags);
197}
198
199#ifdef CONFIG_SBUS
200static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, __u32 addr, int len)
201{
202	struct iounit_struct *iounit = dev->archdata.iommu;
203	unsigned long page, end;
204	pgprot_t dvma_prot;
205	iopte_t *iopte;
206
207	*pba = addr;
208
209	dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
210	end = PAGE_ALIGN((addr + len));
211	while(addr < end) {
212		page = va;
213		{
214			pgd_t *pgdp;
215			pmd_t *pmdp;
216			pte_t *ptep;
217			long i;
218
219			pgdp = pgd_offset(&init_mm, addr);
220			pmdp = pmd_offset(pgdp, addr);
221			ptep = pte_offset_map(pmdp, addr);
222
223			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
224			
225			i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
226
227			iopte = (iopte_t *)(iounit->page_table + i);
228			*iopte = MKIOPTE(__pa(page));
229		}
230		addr += PAGE_SIZE;
231		va += PAGE_SIZE;
232	}
233	flush_cache_all();
234	flush_tlb_all();
235
236	return 0;
237}
238
239static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len)
240{
241	/* XXX Somebody please fill this in */
242}
243#endif
244
245static char *iounit_lockarea(char *vaddr, unsigned long len)
246{
247/* FIXME: Write this */
248	return vaddr;
249}
250
251static void iounit_unlockarea(char *vaddr, unsigned long len)
252{
253/* FIXME: Write this */
254}
255
256void __init ld_mmu_iounit(void)
257{
258	BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
259	BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
260
261	BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
262	BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
263	BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
264	BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
265
266#ifdef CONFIG_SBUS
267	BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
268	BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
269#endif
270}
v4.6
  1/*
  2 * io-unit.c:  IO-UNIT specific routines for memory management.
  3 *
  4 * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
  5 */
  6 
  7#include <linux/kernel.h>
  8#include <linux/init.h>
  9#include <linux/slab.h>
 10#include <linux/spinlock.h>
 11#include <linux/mm.h>
 12#include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
 13#include <linux/bitops.h>
 14#include <linux/scatterlist.h>
 15#include <linux/of.h>
 16#include <linux/of_device.h>
 17
 18#include <asm/pgalloc.h>
 19#include <asm/pgtable.h>
 20#include <asm/io.h>
 21#include <asm/io-unit.h>
 22#include <asm/mxcc.h>
 23#include <asm/cacheflush.h>
 24#include <asm/tlbflush.h>
 25#include <asm/dma.h>
 26#include <asm/oplib.h>
 27
 28#include "mm_32.h"
 29
 30/* #define IOUNIT_DEBUG */
 31#ifdef IOUNIT_DEBUG
 32#define IOD(x) printk(x)
 33#else
 34#define IOD(x) do { } while (0)
 35#endif
 36
 37#define IOPERM        (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
 38#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
 39
 40static void __init iounit_iommu_init(struct platform_device *op)
 41{
 42	struct iounit_struct *iounit;
 43	iopte_t __iomem *xpt;
 44	iopte_t __iomem *xptend;
 45
 46	iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
 47	if (!iounit) {
 48		prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
 49		prom_halt();
 50	}
 51
 52	iounit->limit[0] = IOUNIT_BMAP1_START;
 53	iounit->limit[1] = IOUNIT_BMAP2_START;
 54	iounit->limit[2] = IOUNIT_BMAPM_START;
 55	iounit->limit[3] = IOUNIT_BMAPM_END;
 56	iounit->rotor[1] = IOUNIT_BMAP2_START;
 57	iounit->rotor[2] = IOUNIT_BMAPM_START;
 58
 59	xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
 60	if (!xpt) {
 61		prom_printf("SUN4D: Cannot map External Page Table.");
 62		prom_halt();
 63	}
 64	
 65	op->dev.archdata.iommu = iounit;
 66	iounit->page_table = xpt;
 67	spin_lock_init(&iounit->lock);
 68
 69	xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
 70	for (; xpt < xptend; xpt++)
 71		sbus_writel(0, xpt);
 72}
 73
 74static int __init iounit_init(void)
 75{
 76	extern void sun4d_init_sbi_irq(void);
 77	struct device_node *dp;
 78
 79	for_each_node_by_name(dp, "sbi") {
 80		struct platform_device *op = of_find_device_by_node(dp);
 81
 82		iounit_iommu_init(op);
 83		of_propagate_archdata(op);
 84	}
 85
 86	sun4d_init_sbi_irq();
 87
 88	return 0;
 89}
 90
 91subsys_initcall(iounit_init);
 92
 93/* One has to hold iounit->lock to call this */
 94static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
 95{
 96	int i, j, k, npages;
 97	unsigned long rotor, scan, limit;
 98	iopte_t iopte;
 99
100        npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
101
102	/* A tiny bit of magic ingredience :) */
103	switch (npages) {
104	case 1: i = 0x0231; break;
105	case 2: i = 0x0132; break;
106	default: i = 0x0213; break;
107	}
108	
109	IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
110	
111next:	j = (i & 15);
112	rotor = iounit->rotor[j - 1];
113	limit = iounit->limit[j];
114	scan = rotor;
115nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
116	if (scan + npages > limit) {
117		if (limit != rotor) {
118			limit = rotor;
119			scan = iounit->limit[j - 1];
120			goto nexti;
121		}
122		i >>= 4;
123		if (!(i & 15))
124			panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
125		goto next;
126	}
127	for (k = 1, scan++; k < npages; k++)
128		if (test_bit(scan++, iounit->bmap))
129			goto nexti;
130	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
131	scan -= npages;
132	iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
133	vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
134	for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
135		set_bit(scan, iounit->bmap);
136		sbus_writel(iopte, &iounit->page_table[scan]);
137	}
138	IOD(("%08lx\n", vaddr));
139	return vaddr;
140}
141
142static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len)
143{
144	struct iounit_struct *iounit = dev->archdata.iommu;
145	unsigned long ret, flags;
146	
147	spin_lock_irqsave(&iounit->lock, flags);
148	ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
149	spin_unlock_irqrestore(&iounit->lock, flags);
150	return ret;
151}
152
153static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
154{
155	struct iounit_struct *iounit = dev->archdata.iommu;
156	unsigned long flags;
157
158	/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
159	spin_lock_irqsave(&iounit->lock, flags);
160	while (sz != 0) {
161		--sz;
162		sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
163		sg->dma_length = sg->length;
164		sg = sg_next(sg);
165	}
166	spin_unlock_irqrestore(&iounit->lock, flags);
167}
168
169static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
170{
171	struct iounit_struct *iounit = dev->archdata.iommu;
172	unsigned long flags;
173	
174	spin_lock_irqsave(&iounit->lock, flags);
175	len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
176	vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
177	IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
178	for (len += vaddr; vaddr < len; vaddr++)
179		clear_bit(vaddr, iounit->bmap);
180	spin_unlock_irqrestore(&iounit->lock, flags);
181}
182
183static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
184{
185	struct iounit_struct *iounit = dev->archdata.iommu;
186	unsigned long flags;
187	unsigned long vaddr, len;
188
189	spin_lock_irqsave(&iounit->lock, flags);
190	while (sz != 0) {
191		--sz;
192		len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
193		vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
194		IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
195		for (len += vaddr; vaddr < len; vaddr++)
196			clear_bit(vaddr, iounit->bmap);
197		sg = sg_next(sg);
198	}
199	spin_unlock_irqrestore(&iounit->lock, flags);
200}
201
202#ifdef CONFIG_SBUS
203static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, unsigned long addr, int len)
204{
205	struct iounit_struct *iounit = dev->archdata.iommu;
206	unsigned long page, end;
207	pgprot_t dvma_prot;
208	iopte_t __iomem *iopte;
209
210	*pba = addr;
211
212	dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
213	end = PAGE_ALIGN((addr + len));
214	while(addr < end) {
215		page = va;
216		{
217			pgd_t *pgdp;
218			pmd_t *pmdp;
219			pte_t *ptep;
220			long i;
221
222			pgdp = pgd_offset(&init_mm, addr);
223			pmdp = pmd_offset(pgdp, addr);
224			ptep = pte_offset_map(pmdp, addr);
225
226			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
227			
228			i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
229
230			iopte = iounit->page_table + i;
231			sbus_writel(MKIOPTE(__pa(page)), iopte);
232		}
233		addr += PAGE_SIZE;
234		va += PAGE_SIZE;
235	}
236	flush_cache_all();
237	flush_tlb_all();
238
239	return 0;
240}
241
242static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len)
243{
244	/* XXX Somebody please fill this in */
245}
246#endif
247
248static const struct sparc32_dma_ops iounit_dma_ops = {
249	.get_scsi_one		= iounit_get_scsi_one,
250	.get_scsi_sgl		= iounit_get_scsi_sgl,
251	.release_scsi_one	= iounit_release_scsi_one,
252	.release_scsi_sgl	= iounit_release_scsi_sgl,
253#ifdef CONFIG_SBUS
254	.map_dma_area		= iounit_map_dma_area,
255	.unmap_dma_area		= iounit_unmap_dma_area,
256#endif
257};
258
259void __init ld_mmu_iounit(void)
260{
261	sparc32_dma_ops = &iounit_dma_ops;
 
 
 
 
 
 
 
 
 
 
 
262}