Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * io-unit.c:  IO-UNIT specific routines for memory management.
  4 *
  5 * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
  6 */
  7 
  8#include <linux/kernel.h>
  9#include <linux/init.h>
 10#include <linux/slab.h>
 11#include <linux/spinlock.h>
 12#include <linux/mm.h>
 13#include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
 14#include <linux/bitops.h>
 15#include <linux/scatterlist.h>
 16#include <linux/of.h>
 17#include <linux/of_device.h>
 18
 19#include <asm/pgalloc.h>
 20#include <asm/pgtable.h>
 21#include <asm/io.h>
 22#include <asm/io-unit.h>
 23#include <asm/mxcc.h>
 24#include <asm/cacheflush.h>
 25#include <asm/tlbflush.h>
 26#include <asm/dma.h>
 27#include <asm/oplib.h>
 28
 29#include "mm_32.h"
 30
 31/* #define IOUNIT_DEBUG */
 32#ifdef IOUNIT_DEBUG
 33#define IOD(x) printk(x)
 34#else
 35#define IOD(x) do { } while (0)
 36#endif
 37
 38#define IOPERM        (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
 39#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
 40
 41static void __init iounit_iommu_init(struct platform_device *op)
 42{
 43	struct iounit_struct *iounit;
 44	iopte_t __iomem *xpt;
 45	iopte_t __iomem *xptend;
 46
 47	iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
 48	if (!iounit) {
 49		prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
 50		prom_halt();
 51	}
 52
 53	iounit->limit[0] = IOUNIT_BMAP1_START;
 54	iounit->limit[1] = IOUNIT_BMAP2_START;
 55	iounit->limit[2] = IOUNIT_BMAPM_START;
 56	iounit->limit[3] = IOUNIT_BMAPM_END;
 57	iounit->rotor[1] = IOUNIT_BMAP2_START;
 58	iounit->rotor[2] = IOUNIT_BMAPM_START;
 59
 60	xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
 61	if (!xpt) {
 62		prom_printf("SUN4D: Cannot map External Page Table.");
 63		prom_halt();
 64	}
 65	
 66	op->dev.archdata.iommu = iounit;
 67	iounit->page_table = xpt;
 68	spin_lock_init(&iounit->lock);
 69
 70	xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
 71	for (; xpt < xptend; xpt++)
 72		sbus_writel(0, xpt);
 73}
 74
 75static int __init iounit_init(void)
 76{
 77	extern void sun4d_init_sbi_irq(void);
 78	struct device_node *dp;
 79
 80	for_each_node_by_name(dp, "sbi") {
 81		struct platform_device *op = of_find_device_by_node(dp);
 82
 83		iounit_iommu_init(op);
 84		of_propagate_archdata(op);
 85	}
 86
 87	sun4d_init_sbi_irq();
 88
 89	return 0;
 90}
 91
 92subsys_initcall(iounit_init);
 93
 94/* One has to hold iounit->lock to call this */
 95static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
 96{
 97	int i, j, k, npages;
 98	unsigned long rotor, scan, limit;
 99	iopte_t iopte;
100
101        npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
102
103	/* A tiny bit of magic ingredience :) */
104	switch (npages) {
105	case 1: i = 0x0231; break;
106	case 2: i = 0x0132; break;
107	default: i = 0x0213; break;
108	}
109	
110	IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
111	
112next:	j = (i & 15);
113	rotor = iounit->rotor[j - 1];
114	limit = iounit->limit[j];
115	scan = rotor;
116nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
117	if (scan + npages > limit) {
118		if (limit != rotor) {
119			limit = rotor;
120			scan = iounit->limit[j - 1];
121			goto nexti;
122		}
123		i >>= 4;
124		if (!(i & 15))
125			panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
126		goto next;
127	}
128	for (k = 1, scan++; k < npages; k++)
129		if (test_bit(scan++, iounit->bmap))
130			goto nexti;
131	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
132	scan -= npages;
133	iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
134	vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
135	for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
136		set_bit(scan, iounit->bmap);
137		sbus_writel(iopte_val(iopte), &iounit->page_table[scan]);
138	}
139	IOD(("%08lx\n", vaddr));
140	return vaddr;
141}
142
143static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len)
144{
145	struct iounit_struct *iounit = dev->archdata.iommu;
146	unsigned long ret, flags;
147	
148	spin_lock_irqsave(&iounit->lock, flags);
149	ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
150	spin_unlock_irqrestore(&iounit->lock, flags);
151	return ret;
152}
153
154static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
155{
156	struct iounit_struct *iounit = dev->archdata.iommu;
157	unsigned long flags;
158
159	/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
160	spin_lock_irqsave(&iounit->lock, flags);
161	while (sz != 0) {
162		--sz;
163		sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
164		sg->dma_length = sg->length;
165		sg = sg_next(sg);
166	}
167	spin_unlock_irqrestore(&iounit->lock, flags);
168}
169
170static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
171{
172	struct iounit_struct *iounit = dev->archdata.iommu;
173	unsigned long flags;
174	
175	spin_lock_irqsave(&iounit->lock, flags);
176	len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
177	vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
178	IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
179	for (len += vaddr; vaddr < len; vaddr++)
180		clear_bit(vaddr, iounit->bmap);
181	spin_unlock_irqrestore(&iounit->lock, flags);
182}
183
184static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
185{
186	struct iounit_struct *iounit = dev->archdata.iommu;
187	unsigned long flags;
188	unsigned long vaddr, len;
189
190	spin_lock_irqsave(&iounit->lock, flags);
191	while (sz != 0) {
192		--sz;
193		len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
194		vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
195		IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
196		for (len += vaddr; vaddr < len; vaddr++)
197			clear_bit(vaddr, iounit->bmap);
198		sg = sg_next(sg);
199	}
200	spin_unlock_irqrestore(&iounit->lock, flags);
201}
202
203#ifdef CONFIG_SBUS
204static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, unsigned long addr, int len)
205{
206	struct iounit_struct *iounit = dev->archdata.iommu;
207	unsigned long page, end;
208	pgprot_t dvma_prot;
209	iopte_t __iomem *iopte;
210
211	*pba = addr;
212
213	dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
214	end = PAGE_ALIGN((addr + len));
215	while(addr < end) {
216		page = va;
217		{
218			pgd_t *pgdp;
219			pmd_t *pmdp;
220			pte_t *ptep;
221			long i;
222
223			pgdp = pgd_offset(&init_mm, addr);
224			pmdp = pmd_offset(pgdp, addr);
225			ptep = pte_offset_map(pmdp, addr);
226
227			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
228			
229			i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
230
231			iopte = iounit->page_table + i;
232			sbus_writel(iopte_val(MKIOPTE(__pa(page))), iopte);
233		}
234		addr += PAGE_SIZE;
235		va += PAGE_SIZE;
236	}
237	flush_cache_all();
238	flush_tlb_all();
239
240	return 0;
241}
242
243static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len)
244{
245	/* XXX Somebody please fill this in */
246}
247#endif
248
249static const struct sparc32_dma_ops iounit_dma_ops = {
250	.get_scsi_one		= iounit_get_scsi_one,
251	.get_scsi_sgl		= iounit_get_scsi_sgl,
252	.release_scsi_one	= iounit_release_scsi_one,
253	.release_scsi_sgl	= iounit_release_scsi_sgl,
254#ifdef CONFIG_SBUS
255	.map_dma_area		= iounit_map_dma_area,
256	.unmap_dma_area		= iounit_unmap_dma_area,
257#endif
258};
259
260void __init ld_mmu_iounit(void)
261{
262	sparc32_dma_ops = &iounit_dma_ops;
263}
v4.6
 
  1/*
  2 * io-unit.c:  IO-UNIT specific routines for memory management.
  3 *
  4 * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
  5 */
  6 
  7#include <linux/kernel.h>
  8#include <linux/init.h>
  9#include <linux/slab.h>
 10#include <linux/spinlock.h>
 11#include <linux/mm.h>
 12#include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
 13#include <linux/bitops.h>
 14#include <linux/scatterlist.h>
 15#include <linux/of.h>
 16#include <linux/of_device.h>
 17
 18#include <asm/pgalloc.h>
 19#include <asm/pgtable.h>
 20#include <asm/io.h>
 21#include <asm/io-unit.h>
 22#include <asm/mxcc.h>
 23#include <asm/cacheflush.h>
 24#include <asm/tlbflush.h>
 25#include <asm/dma.h>
 26#include <asm/oplib.h>
 27
 28#include "mm_32.h"
 29
 30/* #define IOUNIT_DEBUG */
 31#ifdef IOUNIT_DEBUG
 32#define IOD(x) printk(x)
 33#else
 34#define IOD(x) do { } while (0)
 35#endif
 36
 37#define IOPERM        (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
 38#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
 39
 40static void __init iounit_iommu_init(struct platform_device *op)
 41{
 42	struct iounit_struct *iounit;
 43	iopte_t __iomem *xpt;
 44	iopte_t __iomem *xptend;
 45
 46	iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
 47	if (!iounit) {
 48		prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
 49		prom_halt();
 50	}
 51
 52	iounit->limit[0] = IOUNIT_BMAP1_START;
 53	iounit->limit[1] = IOUNIT_BMAP2_START;
 54	iounit->limit[2] = IOUNIT_BMAPM_START;
 55	iounit->limit[3] = IOUNIT_BMAPM_END;
 56	iounit->rotor[1] = IOUNIT_BMAP2_START;
 57	iounit->rotor[2] = IOUNIT_BMAPM_START;
 58
 59	xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
 60	if (!xpt) {
 61		prom_printf("SUN4D: Cannot map External Page Table.");
 62		prom_halt();
 63	}
 64	
 65	op->dev.archdata.iommu = iounit;
 66	iounit->page_table = xpt;
 67	spin_lock_init(&iounit->lock);
 68
 69	xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
 70	for (; xpt < xptend; xpt++)
 71		sbus_writel(0, xpt);
 72}
 73
 74static int __init iounit_init(void)
 75{
 76	extern void sun4d_init_sbi_irq(void);
 77	struct device_node *dp;
 78
 79	for_each_node_by_name(dp, "sbi") {
 80		struct platform_device *op = of_find_device_by_node(dp);
 81
 82		iounit_iommu_init(op);
 83		of_propagate_archdata(op);
 84	}
 85
 86	sun4d_init_sbi_irq();
 87
 88	return 0;
 89}
 90
 91subsys_initcall(iounit_init);
 92
 93/* One has to hold iounit->lock to call this */
 94static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
 95{
 96	int i, j, k, npages;
 97	unsigned long rotor, scan, limit;
 98	iopte_t iopte;
 99
100        npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
101
102	/* A tiny bit of magic ingredience :) */
103	switch (npages) {
104	case 1: i = 0x0231; break;
105	case 2: i = 0x0132; break;
106	default: i = 0x0213; break;
107	}
108	
109	IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
110	
111next:	j = (i & 15);
112	rotor = iounit->rotor[j - 1];
113	limit = iounit->limit[j];
114	scan = rotor;
115nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
116	if (scan + npages > limit) {
117		if (limit != rotor) {
118			limit = rotor;
119			scan = iounit->limit[j - 1];
120			goto nexti;
121		}
122		i >>= 4;
123		if (!(i & 15))
124			panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
125		goto next;
126	}
127	for (k = 1, scan++; k < npages; k++)
128		if (test_bit(scan++, iounit->bmap))
129			goto nexti;
130	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
131	scan -= npages;
132	iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
133	vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
134	for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
135		set_bit(scan, iounit->bmap);
136		sbus_writel(iopte, &iounit->page_table[scan]);
137	}
138	IOD(("%08lx\n", vaddr));
139	return vaddr;
140}
141
142static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len)
143{
144	struct iounit_struct *iounit = dev->archdata.iommu;
145	unsigned long ret, flags;
146	
147	spin_lock_irqsave(&iounit->lock, flags);
148	ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
149	spin_unlock_irqrestore(&iounit->lock, flags);
150	return ret;
151}
152
153static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
154{
155	struct iounit_struct *iounit = dev->archdata.iommu;
156	unsigned long flags;
157
158	/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
159	spin_lock_irqsave(&iounit->lock, flags);
160	while (sz != 0) {
161		--sz;
162		sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
163		sg->dma_length = sg->length;
164		sg = sg_next(sg);
165	}
166	spin_unlock_irqrestore(&iounit->lock, flags);
167}
168
169static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
170{
171	struct iounit_struct *iounit = dev->archdata.iommu;
172	unsigned long flags;
173	
174	spin_lock_irqsave(&iounit->lock, flags);
175	len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
176	vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
177	IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
178	for (len += vaddr; vaddr < len; vaddr++)
179		clear_bit(vaddr, iounit->bmap);
180	spin_unlock_irqrestore(&iounit->lock, flags);
181}
182
183static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
184{
185	struct iounit_struct *iounit = dev->archdata.iommu;
186	unsigned long flags;
187	unsigned long vaddr, len;
188
189	spin_lock_irqsave(&iounit->lock, flags);
190	while (sz != 0) {
191		--sz;
192		len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
193		vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
194		IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
195		for (len += vaddr; vaddr < len; vaddr++)
196			clear_bit(vaddr, iounit->bmap);
197		sg = sg_next(sg);
198	}
199	spin_unlock_irqrestore(&iounit->lock, flags);
200}
201
202#ifdef CONFIG_SBUS
203static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, unsigned long addr, int len)
204{
205	struct iounit_struct *iounit = dev->archdata.iommu;
206	unsigned long page, end;
207	pgprot_t dvma_prot;
208	iopte_t __iomem *iopte;
209
210	*pba = addr;
211
212	dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
213	end = PAGE_ALIGN((addr + len));
214	while(addr < end) {
215		page = va;
216		{
217			pgd_t *pgdp;
218			pmd_t *pmdp;
219			pte_t *ptep;
220			long i;
221
222			pgdp = pgd_offset(&init_mm, addr);
223			pmdp = pmd_offset(pgdp, addr);
224			ptep = pte_offset_map(pmdp, addr);
225
226			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
227			
228			i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
229
230			iopte = iounit->page_table + i;
231			sbus_writel(MKIOPTE(__pa(page)), iopte);
232		}
233		addr += PAGE_SIZE;
234		va += PAGE_SIZE;
235	}
236	flush_cache_all();
237	flush_tlb_all();
238
239	return 0;
240}
241
242static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len)
243{
244	/* XXX Somebody please fill this in */
245}
246#endif
247
248static const struct sparc32_dma_ops iounit_dma_ops = {
249	.get_scsi_one		= iounit_get_scsi_one,
250	.get_scsi_sgl		= iounit_get_scsi_sgl,
251	.release_scsi_one	= iounit_release_scsi_one,
252	.release_scsi_sgl	= iounit_release_scsi_sgl,
253#ifdef CONFIG_SBUS
254	.map_dma_area		= iounit_map_dma_area,
255	.unmap_dma_area		= iounit_unmap_dma_area,
256#endif
257};
258
259void __init ld_mmu_iounit(void)
260{
261	sparc32_dma_ops = &iounit_dma_ops;
262}