Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v3.1
 
  1/*
  2 * io-unit.c:  IO-UNIT specific routines for memory management.
  3 *
  4 * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
  5 */
  6 
  7#include <linux/kernel.h>
  8#include <linux/init.h>
  9#include <linux/slab.h>
 10#include <linux/spinlock.h>
 11#include <linux/mm.h>
 12#include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
 13#include <linux/bitops.h>
 14#include <linux/scatterlist.h>
 15#include <linux/of.h>
 16#include <linux/of_device.h>
 
 17
 18#include <asm/pgalloc.h>
 19#include <asm/pgtable.h>
 20#include <asm/io.h>
 21#include <asm/io-unit.h>
 22#include <asm/mxcc.h>
 23#include <asm/cacheflush.h>
 24#include <asm/tlbflush.h>
 25#include <asm/dma.h>
 26#include <asm/oplib.h>
 27
 
 
 28/* #define IOUNIT_DEBUG */
 29#ifdef IOUNIT_DEBUG
 30#define IOD(x) printk(x)
 31#else
 32#define IOD(x) do { } while (0)
 33#endif
 34
 35#define IOPERM        (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
 36#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
 37
 
 
 38static void __init iounit_iommu_init(struct platform_device *op)
 39{
 40	struct iounit_struct *iounit;
 41	iopte_t *xpt, *xptend;
 
 42
 43	iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
 44	if (!iounit) {
 45		prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
 46		prom_halt();
 47	}
 48
 49	iounit->limit[0] = IOUNIT_BMAP1_START;
 50	iounit->limit[1] = IOUNIT_BMAP2_START;
 51	iounit->limit[2] = IOUNIT_BMAPM_START;
 52	iounit->limit[3] = IOUNIT_BMAPM_END;
 53	iounit->rotor[1] = IOUNIT_BMAP2_START;
 54	iounit->rotor[2] = IOUNIT_BMAPM_START;
 55
 56	xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
 57	if (!xpt) {
 58		prom_printf("SUN4D: Cannot map External Page Table.");
 59		prom_halt();
 60	}
 61	
 62	op->dev.archdata.iommu = iounit;
 63	iounit->page_table = xpt;
 64	spin_lock_init(&iounit->lock);
 65	
 66	for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
 67	     xpt < xptend;)
 68	     	iopte_val(*xpt++) = 0;
 
 
 69}
 70
 71static int __init iounit_init(void)
 72{
 73	extern void sun4d_init_sbi_irq(void);
 74	struct device_node *dp;
 75
 76	for_each_node_by_name(dp, "sbi") {
 77		struct platform_device *op = of_find_device_by_node(dp);
 78
 79		iounit_iommu_init(op);
 80		of_propagate_archdata(op);
 81	}
 82
 83	sun4d_init_sbi_irq();
 84
 85	return 0;
 86}
 87
 88subsys_initcall(iounit_init);
 89
 90/* One has to hold iounit->lock to call this */
 91static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
 92{
 93	int i, j, k, npages;
 94	unsigned long rotor, scan, limit;
 95	iopte_t iopte;
 96
 97        npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
 98
 99	/* A tiny bit of magic ingredience :) */
100	switch (npages) {
101	case 1: i = 0x0231; break;
102	case 2: i = 0x0132; break;
103	default: i = 0x0213; break;
104	}
105	
106	IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
107	
108next:	j = (i & 15);
109	rotor = iounit->rotor[j - 1];
110	limit = iounit->limit[j];
111	scan = rotor;
112nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
113	if (scan + npages > limit) {
114		if (limit != rotor) {
115			limit = rotor;
116			scan = iounit->limit[j - 1];
117			goto nexti;
118		}
119		i >>= 4;
120		if (!(i & 15))
121			panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
122		goto next;
123	}
124	for (k = 1, scan++; k < npages; k++)
125		if (test_bit(scan++, iounit->bmap))
126			goto nexti;
127	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
128	scan -= npages;
129	iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
130	vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
131	for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
132		set_bit(scan, iounit->bmap);
133		iounit->page_table[scan] = iopte;
134	}
135	IOD(("%08lx\n", vaddr));
136	return vaddr;
137}
138
139static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len)
 
 
140{
 
141	struct iounit_struct *iounit = dev->archdata.iommu;
142	unsigned long ret, flags;
143	
 
 
 
 
144	spin_lock_irqsave(&iounit->lock, flags);
145	ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
146	spin_unlock_irqrestore(&iounit->lock, flags);
147	return ret;
148}
149
150static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
 
151{
152	struct iounit_struct *iounit = dev->archdata.iommu;
 
153	unsigned long flags;
 
154
155	/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
156	spin_lock_irqsave(&iounit->lock, flags);
157	while (sz != 0) {
158		--sz;
159		sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
160		sg->dma_length = sg->length;
161		sg = sg_next(sg);
162	}
163	spin_unlock_irqrestore(&iounit->lock, flags);
 
164}
165
166static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
 
167{
168	struct iounit_struct *iounit = dev->archdata.iommu;
169	unsigned long flags;
170	
171	spin_lock_irqsave(&iounit->lock, flags);
172	len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
173	vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
174	IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
175	for (len += vaddr; vaddr < len; vaddr++)
176		clear_bit(vaddr, iounit->bmap);
177	spin_unlock_irqrestore(&iounit->lock, flags);
178}
179
180static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
 
181{
182	struct iounit_struct *iounit = dev->archdata.iommu;
183	unsigned long flags;
184	unsigned long vaddr, len;
 
185
186	spin_lock_irqsave(&iounit->lock, flags);
187	while (sz != 0) {
188		--sz;
189		len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
190		vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
191		IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
192		for (len += vaddr; vaddr < len; vaddr++)
193			clear_bit(vaddr, iounit->bmap);
194		sg = sg_next(sg);
195	}
196	spin_unlock_irqrestore(&iounit->lock, flags);
197}
198
199#ifdef CONFIG_SBUS
200static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, __u32 addr, int len)
 
201{
202	struct iounit_struct *iounit = dev->archdata.iommu;
203	unsigned long page, end;
204	pgprot_t dvma_prot;
205	iopte_t *iopte;
206
207	*pba = addr;
 
 
 
 
 
 
 
 
 
 
 
 
208
209	dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
210	end = PAGE_ALIGN((addr + len));
211	while(addr < end) {
212		page = va;
213		{
214			pgd_t *pgdp;
215			pmd_t *pmdp;
216			pte_t *ptep;
217			long i;
218
219			pgdp = pgd_offset(&init_mm, addr);
220			pmdp = pmd_offset(pgdp, addr);
221			ptep = pte_offset_map(pmdp, addr);
222
223			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
224			
225			i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
226
227			iopte = (iopte_t *)(iounit->page_table + i);
228			*iopte = MKIOPTE(__pa(page));
229		}
230		addr += PAGE_SIZE;
231		va += PAGE_SIZE;
232	}
233	flush_cache_all();
234	flush_tlb_all();
235
236	return 0;
 
 
 
 
237}
238
239static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len)
 
240{
241	/* XXX Somebody please fill this in */
242}
243#endif
244
245static char *iounit_lockarea(char *vaddr, unsigned long len)
246{
247/* FIXME: Write this */
248	return vaddr;
249}
250
251static void iounit_unlockarea(char *vaddr, unsigned long len)
252{
253/* FIXME: Write this */
254}
255
256void __init ld_mmu_iounit(void)
257{
258	BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
259	BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
260
261	BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
262	BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
263	BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
264	BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
265
266#ifdef CONFIG_SBUS
267	BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
268	BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
269#endif
270}
 
 
 
 
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * io-unit.c:  IO-UNIT specific routines for memory management.
  4 *
  5 * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
  6 */
  7 
  8#include <linux/kernel.h>
  9#include <linux/init.h>
 10#include <linux/slab.h>
 11#include <linux/spinlock.h>
 12#include <linux/mm.h>
 
 13#include <linux/bitops.h>
 14#include <linux/dma-map-ops.h>
 15#include <linux/of.h>
 16#include <linux/of_platform.h>
 17#include <linux/platform_device.h>
 18
 
 
 19#include <asm/io.h>
 20#include <asm/io-unit.h>
 21#include <asm/mxcc.h>
 22#include <asm/cacheflush.h>
 23#include <asm/tlbflush.h>
 24#include <asm/dma.h>
 25#include <asm/oplib.h>
 26
 27#include "mm_32.h"
 28
 29/* #define IOUNIT_DEBUG */
 30#ifdef IOUNIT_DEBUG
 31#define IOD(x) printk(x)
 32#else
 33#define IOD(x) do { } while (0)
 34#endif
 35
 36#define IOPERM        (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
 37#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
 38
 39static const struct dma_map_ops iounit_dma_ops;
 40
 41static void __init iounit_iommu_init(struct platform_device *op)
 42{
 43	struct iounit_struct *iounit;
 44	iopte_t __iomem *xpt;
 45	iopte_t __iomem *xptend;
 46
 47	iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
 48	if (!iounit) {
 49		prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
 50		prom_halt();
 51	}
 52
 53	iounit->limit[0] = IOUNIT_BMAP1_START;
 54	iounit->limit[1] = IOUNIT_BMAP2_START;
 55	iounit->limit[2] = IOUNIT_BMAPM_START;
 56	iounit->limit[3] = IOUNIT_BMAPM_END;
 57	iounit->rotor[1] = IOUNIT_BMAP2_START;
 58	iounit->rotor[2] = IOUNIT_BMAPM_START;
 59
 60	xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
 61	if (!xpt) {
 62		prom_printf("SUN4D: Cannot map External Page Table.");
 63		prom_halt();
 64	}
 65	
 66	op->dev.archdata.iommu = iounit;
 67	iounit->page_table = xpt;
 68	spin_lock_init(&iounit->lock);
 69
 70	xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
 71	for (; xpt < xptend; xpt++)
 72		sbus_writel(0, xpt);
 73
 74	op->dev.dma_ops = &iounit_dma_ops;
 75}
 76
 77static int __init iounit_init(void)
 78{
 79	extern void sun4d_init_sbi_irq(void);
 80	struct device_node *dp;
 81
 82	for_each_node_by_name(dp, "sbi") {
 83		struct platform_device *op = of_find_device_by_node(dp);
 84
 85		iounit_iommu_init(op);
 86		of_propagate_archdata(op);
 87	}
 88
 89	sun4d_init_sbi_irq();
 90
 91	return 0;
 92}
 93
 94subsys_initcall(iounit_init);
 95
 96/* One has to hold iounit->lock to call this */
 97static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
 98{
 99	int i, j, k, npages;
100	unsigned long rotor, scan, limit;
101	iopte_t iopte;
102
103        npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
104
105	/* A tiny bit of magic ingredience :) */
106	switch (npages) {
107	case 1: i = 0x0231; break;
108	case 2: i = 0x0132; break;
109	default: i = 0x0213; break;
110	}
111	
112	IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
113	
114next:	j = (i & 15);
115	rotor = iounit->rotor[j - 1];
116	limit = iounit->limit[j];
117	scan = rotor;
118nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
119	if (scan + npages > limit) {
120		if (limit != rotor) {
121			limit = rotor;
122			scan = iounit->limit[j - 1];
123			goto nexti;
124		}
125		i >>= 4;
126		if (!(i & 15))
127			panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
128		goto next;
129	}
130	for (k = 1, scan++; k < npages; k++)
131		if (test_bit(scan++, iounit->bmap))
132			goto nexti;
133	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
134	scan -= npages;
135	iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
136	vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
137	for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
138		set_bit(scan, iounit->bmap);
139		sbus_writel(iopte_val(iopte), &iounit->page_table[scan]);
140	}
141	IOD(("%08lx\n", vaddr));
142	return vaddr;
143}
144
145static dma_addr_t iounit_map_page(struct device *dev, struct page *page,
146		unsigned long offset, size_t len, enum dma_data_direction dir,
147		unsigned long attrs)
148{
149	void *vaddr = page_address(page) + offset;
150	struct iounit_struct *iounit = dev->archdata.iommu;
151	unsigned long ret, flags;
152	
153	/* XXX So what is maxphys for us and how do drivers know it? */
154	if (!len || len > 256 * 1024)
155		return DMA_MAPPING_ERROR;
156
157	spin_lock_irqsave(&iounit->lock, flags);
158	ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
159	spin_unlock_irqrestore(&iounit->lock, flags);
160	return ret;
161}
162
163static int iounit_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
164		enum dma_data_direction dir, unsigned long attrs)
165{
166	struct iounit_struct *iounit = dev->archdata.iommu;
167	struct scatterlist *sg;
168	unsigned long flags;
169	int i;
170
171	/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
172	spin_lock_irqsave(&iounit->lock, flags);
173	for_each_sg(sgl, sg, nents, i) {
 
174		sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
175		sg->dma_length = sg->length;
 
176	}
177	spin_unlock_irqrestore(&iounit->lock, flags);
178	return nents;
179}
180
181static void iounit_unmap_page(struct device *dev, dma_addr_t vaddr, size_t len,
182		enum dma_data_direction dir, unsigned long attrs)
183{
184	struct iounit_struct *iounit = dev->archdata.iommu;
185	unsigned long flags;
186	
187	spin_lock_irqsave(&iounit->lock, flags);
188	len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
189	vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
190	IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
191	for (len += vaddr; vaddr < len; vaddr++)
192		clear_bit(vaddr, iounit->bmap);
193	spin_unlock_irqrestore(&iounit->lock, flags);
194}
195
196static void iounit_unmap_sg(struct device *dev, struct scatterlist *sgl,
197		int nents, enum dma_data_direction dir, unsigned long attrs)
198{
199	struct iounit_struct *iounit = dev->archdata.iommu;
200	unsigned long flags, vaddr, len;
201	struct scatterlist *sg;
202	int i;
203
204	spin_lock_irqsave(&iounit->lock, flags);
205	for_each_sg(sgl, sg, nents, i) {
 
206		len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
207		vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
208		IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
209		for (len += vaddr; vaddr < len; vaddr++)
210			clear_bit(vaddr, iounit->bmap);
 
211	}
212	spin_unlock_irqrestore(&iounit->lock, flags);
213}
214
215#ifdef CONFIG_SBUS
216static void *iounit_alloc(struct device *dev, size_t len,
217		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
218{
219	struct iounit_struct *iounit = dev->archdata.iommu;
220	unsigned long va, addr, page, end, ret;
221	pgprot_t dvma_prot;
222	iopte_t __iomem *iopte;
223
224	/* XXX So what is maxphys for us and how do drivers know it? */
225	if (!len || len > 256 * 1024)
226		return NULL;
227
228	len = PAGE_ALIGN(len);
229	va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
230	if (!va)
231		return NULL;
232
233	addr = ret = sparc_dma_alloc_resource(dev, len);
234	if (!addr)
235		goto out_free_pages;
236	*dma_handle = addr;
237
238	dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
239	end = PAGE_ALIGN((addr + len));
240	while(addr < end) {
241		page = va;
242		{
 
243			pmd_t *pmdp;
244			pte_t *ptep;
245			long i;
246
247			pmdp = pmd_off_k(addr);
248			ptep = pte_offset_kernel(pmdp, addr);
 
249
250			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
251
252			i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
253
254			iopte = iounit->page_table + i;
255			sbus_writel(iopte_val(MKIOPTE(__pa(page))), iopte);
256		}
257		addr += PAGE_SIZE;
258		va += PAGE_SIZE;
259	}
260	flush_cache_all();
261	flush_tlb_all();
262
263	return (void *)ret;
264
265out_free_pages:
266	free_pages(va, get_order(len));
267	return NULL;
268}
269
270static void iounit_free(struct device *dev, size_t size, void *cpu_addr,
271		dma_addr_t dma_addr, unsigned long attrs)
272{
273	/* XXX Somebody please fill this in */
274}
275#endif
276
277static const struct dma_map_ops iounit_dma_ops = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278#ifdef CONFIG_SBUS
279	.alloc			= iounit_alloc,
280	.free			= iounit_free,
281#endif
282	.map_page		= iounit_map_page,
283	.unmap_page		= iounit_unmap_page,
284	.map_sg			= iounit_map_sg,
285	.unmap_sg		= iounit_unmap_sg,
286};