Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * io-unit.c:  IO-UNIT specific routines for memory management.
  3 *
  4 * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
  5 */
  6 
  7#include <linux/kernel.h>
  8#include <linux/init.h>
  9#include <linux/slab.h>
 10#include <linux/spinlock.h>
 11#include <linux/mm.h>
 12#include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
 13#include <linux/bitops.h>
 14#include <linux/scatterlist.h>
 15#include <linux/of.h>
 16#include <linux/of_device.h>
 17
 18#include <asm/pgalloc.h>
 19#include <asm/pgtable.h>
 20#include <asm/io.h>
 21#include <asm/io-unit.h>
 22#include <asm/mxcc.h>
 23#include <asm/cacheflush.h>
 24#include <asm/tlbflush.h>
 25#include <asm/dma.h>
 26#include <asm/oplib.h>
 27
 
 
 28/* #define IOUNIT_DEBUG */
 29#ifdef IOUNIT_DEBUG
 30#define IOD(x) printk(x)
 31#else
 32#define IOD(x) do { } while (0)
 33#endif
 34
 35#define IOPERM        (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
 36#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
 37
 38static void __init iounit_iommu_init(struct platform_device *op)
 39{
 40	struct iounit_struct *iounit;
 41	iopte_t *xpt, *xptend;
 
 42
 43	iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
 44	if (!iounit) {
 45		prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
 46		prom_halt();
 47	}
 48
 49	iounit->limit[0] = IOUNIT_BMAP1_START;
 50	iounit->limit[1] = IOUNIT_BMAP2_START;
 51	iounit->limit[2] = IOUNIT_BMAPM_START;
 52	iounit->limit[3] = IOUNIT_BMAPM_END;
 53	iounit->rotor[1] = IOUNIT_BMAP2_START;
 54	iounit->rotor[2] = IOUNIT_BMAPM_START;
 55
 56	xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
 57	if (!xpt) {
 58		prom_printf("SUN4D: Cannot map External Page Table.");
 59		prom_halt();
 60	}
 61	
 62	op->dev.archdata.iommu = iounit;
 63	iounit->page_table = xpt;
 64	spin_lock_init(&iounit->lock);
 65	
 66	for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
 67	     xpt < xptend;)
 68	     	iopte_val(*xpt++) = 0;
 69}
 70
 71static int __init iounit_init(void)
 72{
 73	extern void sun4d_init_sbi_irq(void);
 74	struct device_node *dp;
 75
 76	for_each_node_by_name(dp, "sbi") {
 77		struct platform_device *op = of_find_device_by_node(dp);
 78
 79		iounit_iommu_init(op);
 80		of_propagate_archdata(op);
 81	}
 82
 83	sun4d_init_sbi_irq();
 84
 85	return 0;
 86}
 87
 88subsys_initcall(iounit_init);
 89
 90/* One has to hold iounit->lock to call this */
 91static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
 92{
 93	int i, j, k, npages;
 94	unsigned long rotor, scan, limit;
 95	iopte_t iopte;
 96
 97        npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
 98
 99	/* A tiny bit of magic ingredience :) */
100	switch (npages) {
101	case 1: i = 0x0231; break;
102	case 2: i = 0x0132; break;
103	default: i = 0x0213; break;
104	}
105	
106	IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
107	
108next:	j = (i & 15);
109	rotor = iounit->rotor[j - 1];
110	limit = iounit->limit[j];
111	scan = rotor;
112nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
113	if (scan + npages > limit) {
114		if (limit != rotor) {
115			limit = rotor;
116			scan = iounit->limit[j - 1];
117			goto nexti;
118		}
119		i >>= 4;
120		if (!(i & 15))
121			panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
122		goto next;
123	}
124	for (k = 1, scan++; k < npages; k++)
125		if (test_bit(scan++, iounit->bmap))
126			goto nexti;
127	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
128	scan -= npages;
129	iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
130	vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
131	for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
132		set_bit(scan, iounit->bmap);
133		iounit->page_table[scan] = iopte;
134	}
135	IOD(("%08lx\n", vaddr));
136	return vaddr;
137}
138
139static __u32 iounit_get_scsi_one(struct device *dev, char *vaddr, unsigned long len)
 
 
140{
 
141	struct iounit_struct *iounit = dev->archdata.iommu;
142	unsigned long ret, flags;
143	
 
 
 
 
144	spin_lock_irqsave(&iounit->lock, flags);
145	ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
146	spin_unlock_irqrestore(&iounit->lock, flags);
147	return ret;
148}
149
150static void iounit_get_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
 
151{
152	struct iounit_struct *iounit = dev->archdata.iommu;
 
153	unsigned long flags;
 
154
155	/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
156	spin_lock_irqsave(&iounit->lock, flags);
157	while (sz != 0) {
158		--sz;
159		sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
160		sg->dma_length = sg->length;
161		sg = sg_next(sg);
162	}
163	spin_unlock_irqrestore(&iounit->lock, flags);
 
164}
165
166static void iounit_release_scsi_one(struct device *dev, __u32 vaddr, unsigned long len)
 
167{
168	struct iounit_struct *iounit = dev->archdata.iommu;
169	unsigned long flags;
170	
171	spin_lock_irqsave(&iounit->lock, flags);
172	len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
173	vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
174	IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
175	for (len += vaddr; vaddr < len; vaddr++)
176		clear_bit(vaddr, iounit->bmap);
177	spin_unlock_irqrestore(&iounit->lock, flags);
178}
179
180static void iounit_release_scsi_sgl(struct device *dev, struct scatterlist *sg, int sz)
 
181{
182	struct iounit_struct *iounit = dev->archdata.iommu;
183	unsigned long flags;
184	unsigned long vaddr, len;
 
185
186	spin_lock_irqsave(&iounit->lock, flags);
187	while (sz != 0) {
188		--sz;
189		len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
190		vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
191		IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
192		for (len += vaddr; vaddr < len; vaddr++)
193			clear_bit(vaddr, iounit->bmap);
194		sg = sg_next(sg);
195	}
196	spin_unlock_irqrestore(&iounit->lock, flags);
197}
198
199#ifdef CONFIG_SBUS
200static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned long va, __u32 addr, int len)
 
201{
202	struct iounit_struct *iounit = dev->archdata.iommu;
203	unsigned long page, end;
204	pgprot_t dvma_prot;
205	iopte_t *iopte;
206
207	*pba = addr;
 
 
 
 
 
 
 
 
 
 
 
 
208
209	dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
210	end = PAGE_ALIGN((addr + len));
211	while(addr < end) {
212		page = va;
213		{
214			pgd_t *pgdp;
215			pmd_t *pmdp;
216			pte_t *ptep;
217			long i;
218
219			pgdp = pgd_offset(&init_mm, addr);
220			pmdp = pmd_offset(pgdp, addr);
221			ptep = pte_offset_map(pmdp, addr);
222
223			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
224			
225			i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
226
227			iopte = (iopte_t *)(iounit->page_table + i);
228			*iopte = MKIOPTE(__pa(page));
229		}
230		addr += PAGE_SIZE;
231		va += PAGE_SIZE;
232	}
233	flush_cache_all();
234	flush_tlb_all();
235
236	return 0;
 
 
 
 
237}
238
239static void iounit_unmap_dma_area(struct device *dev, unsigned long addr, int len)
 
240{
241	/* XXX Somebody please fill this in */
242}
243#endif
244
245static char *iounit_lockarea(char *vaddr, unsigned long len)
246{
247/* FIXME: Write this */
248	return vaddr;
249}
250
251static void iounit_unlockarea(char *vaddr, unsigned long len)
252{
253/* FIXME: Write this */
254}
255
256void __init ld_mmu_iounit(void)
257{
258	BTFIXUPSET_CALL(mmu_lockarea, iounit_lockarea, BTFIXUPCALL_RETO0);
259	BTFIXUPSET_CALL(mmu_unlockarea, iounit_unlockarea, BTFIXUPCALL_NOP);
260
261	BTFIXUPSET_CALL(mmu_get_scsi_one, iounit_get_scsi_one, BTFIXUPCALL_NORM);
262	BTFIXUPSET_CALL(mmu_get_scsi_sgl, iounit_get_scsi_sgl, BTFIXUPCALL_NORM);
263	BTFIXUPSET_CALL(mmu_release_scsi_one, iounit_release_scsi_one, BTFIXUPCALL_NORM);
264	BTFIXUPSET_CALL(mmu_release_scsi_sgl, iounit_release_scsi_sgl, BTFIXUPCALL_NORM);
265
266#ifdef CONFIG_SBUS
267	BTFIXUPSET_CALL(mmu_map_dma_area, iounit_map_dma_area, BTFIXUPCALL_NORM);
268	BTFIXUPSET_CALL(mmu_unmap_dma_area, iounit_unmap_dma_area, BTFIXUPCALL_NORM);
269#endif
270}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * io-unit.c:  IO-UNIT specific routines for memory management.
  4 *
  5 * Copyright (C) 1997,1998 Jakub Jelinek    (jj@sunsite.mff.cuni.cz)
  6 */
  7 
  8#include <linux/kernel.h>
  9#include <linux/init.h>
 10#include <linux/slab.h>
 11#include <linux/spinlock.h>
 12#include <linux/mm.h>
 13#include <linux/highmem.h>	/* pte_offset_map => kmap_atomic */
 14#include <linux/bitops.h>
 15#include <linux/dma-mapping.h>
 16#include <linux/of.h>
 17#include <linux/of_device.h>
 18
 19#include <asm/pgalloc.h>
 20#include <asm/pgtable.h>
 21#include <asm/io.h>
 22#include <asm/io-unit.h>
 23#include <asm/mxcc.h>
 24#include <asm/cacheflush.h>
 25#include <asm/tlbflush.h>
 26#include <asm/dma.h>
 27#include <asm/oplib.h>
 28
 29#include "mm_32.h"
 30
 31/* #define IOUNIT_DEBUG */
 32#ifdef IOUNIT_DEBUG
 33#define IOD(x) printk(x)
 34#else
 35#define IOD(x) do { } while (0)
 36#endif
 37
 38#define IOPERM        (IOUPTE_CACHE | IOUPTE_WRITE | IOUPTE_VALID)
 39#define MKIOPTE(phys) __iopte((((phys)>>4) & IOUPTE_PAGE) | IOPERM)
 40
 41static void __init iounit_iommu_init(struct platform_device *op)
 42{
 43	struct iounit_struct *iounit;
 44	iopte_t __iomem *xpt;
 45	iopte_t __iomem *xptend;
 46
 47	iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
 48	if (!iounit) {
 49		prom_printf("SUN4D: Cannot alloc iounit, halting.\n");
 50		prom_halt();
 51	}
 52
 53	iounit->limit[0] = IOUNIT_BMAP1_START;
 54	iounit->limit[1] = IOUNIT_BMAP2_START;
 55	iounit->limit[2] = IOUNIT_BMAPM_START;
 56	iounit->limit[3] = IOUNIT_BMAPM_END;
 57	iounit->rotor[1] = IOUNIT_BMAP2_START;
 58	iounit->rotor[2] = IOUNIT_BMAPM_START;
 59
 60	xpt = of_ioremap(&op->resource[2], 0, PAGE_SIZE * 16, "XPT");
 61	if (!xpt) {
 62		prom_printf("SUN4D: Cannot map External Page Table.");
 63		prom_halt();
 64	}
 65	
 66	op->dev.archdata.iommu = iounit;
 67	iounit->page_table = xpt;
 68	spin_lock_init(&iounit->lock);
 69
 70	xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
 71	for (; xpt < xptend; xpt++)
 72		sbus_writel(0, xpt);
 73}
 74
 75static int __init iounit_init(void)
 76{
 77	extern void sun4d_init_sbi_irq(void);
 78	struct device_node *dp;
 79
 80	for_each_node_by_name(dp, "sbi") {
 81		struct platform_device *op = of_find_device_by_node(dp);
 82
 83		iounit_iommu_init(op);
 84		of_propagate_archdata(op);
 85	}
 86
 87	sun4d_init_sbi_irq();
 88
 89	return 0;
 90}
 91
 92subsys_initcall(iounit_init);
 93
 94/* One has to hold iounit->lock to call this */
 95static unsigned long iounit_get_area(struct iounit_struct *iounit, unsigned long vaddr, int size)
 96{
 97	int i, j, k, npages;
 98	unsigned long rotor, scan, limit;
 99	iopte_t iopte;
100
101        npages = ((vaddr & ~PAGE_MASK) + size + (PAGE_SIZE-1)) >> PAGE_SHIFT;
102
103	/* A tiny bit of magic ingredience :) */
104	switch (npages) {
105	case 1: i = 0x0231; break;
106	case 2: i = 0x0132; break;
107	default: i = 0x0213; break;
108	}
109	
110	IOD(("iounit_get_area(%08lx,%d[%d])=", vaddr, size, npages));
111	
112next:	j = (i & 15);
113	rotor = iounit->rotor[j - 1];
114	limit = iounit->limit[j];
115	scan = rotor;
116nexti:	scan = find_next_zero_bit(iounit->bmap, limit, scan);
117	if (scan + npages > limit) {
118		if (limit != rotor) {
119			limit = rotor;
120			scan = iounit->limit[j - 1];
121			goto nexti;
122		}
123		i >>= 4;
124		if (!(i & 15))
125			panic("iounit_get_area: Couldn't find free iopte slots for (%08lx,%d)\n", vaddr, size);
126		goto next;
127	}
128	for (k = 1, scan++; k < npages; k++)
129		if (test_bit(scan++, iounit->bmap))
130			goto nexti;
131	iounit->rotor[j - 1] = (scan < limit) ? scan : iounit->limit[j - 1];
132	scan -= npages;
133	iopte = MKIOPTE(__pa(vaddr & PAGE_MASK));
134	vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
135	for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
136		set_bit(scan, iounit->bmap);
137		sbus_writel(iopte_val(iopte), &iounit->page_table[scan]);
138	}
139	IOD(("%08lx\n", vaddr));
140	return vaddr;
141}
142
143static dma_addr_t iounit_map_page(struct device *dev, struct page *page,
144		unsigned long offset, size_t len, enum dma_data_direction dir,
145		unsigned long attrs)
146{
147	void *vaddr = page_address(page) + offset;
148	struct iounit_struct *iounit = dev->archdata.iommu;
149	unsigned long ret, flags;
150	
151	/* XXX So what is maxphys for us and how do drivers know it? */
152	if (!len || len > 256 * 1024)
153		return DMA_MAPPING_ERROR;
154
155	spin_lock_irqsave(&iounit->lock, flags);
156	ret = iounit_get_area(iounit, (unsigned long)vaddr, len);
157	spin_unlock_irqrestore(&iounit->lock, flags);
158	return ret;
159}
160
161static int iounit_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
162		enum dma_data_direction dir, unsigned long attrs)
163{
164	struct iounit_struct *iounit = dev->archdata.iommu;
165	struct scatterlist *sg;
166	unsigned long flags;
167	int i;
168
169	/* FIXME: Cache some resolved pages - often several sg entries are to the same page */
170	spin_lock_irqsave(&iounit->lock, flags);
171	for_each_sg(sgl, sg, nents, i) {
 
172		sg->dma_address = iounit_get_area(iounit, (unsigned long) sg_virt(sg), sg->length);
173		sg->dma_length = sg->length;
 
174	}
175	spin_unlock_irqrestore(&iounit->lock, flags);
176	return nents;
177}
178
179static void iounit_unmap_page(struct device *dev, dma_addr_t vaddr, size_t len,
180		enum dma_data_direction dir, unsigned long attrs)
181{
182	struct iounit_struct *iounit = dev->archdata.iommu;
183	unsigned long flags;
184	
185	spin_lock_irqsave(&iounit->lock, flags);
186	len = ((vaddr & ~PAGE_MASK) + len + (PAGE_SIZE-1)) >> PAGE_SHIFT;
187	vaddr = (vaddr - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
188	IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
189	for (len += vaddr; vaddr < len; vaddr++)
190		clear_bit(vaddr, iounit->bmap);
191	spin_unlock_irqrestore(&iounit->lock, flags);
192}
193
194static void iounit_unmap_sg(struct device *dev, struct scatterlist *sgl,
195		int nents, enum dma_data_direction dir, unsigned long attrs)
196{
197	struct iounit_struct *iounit = dev->archdata.iommu;
198	unsigned long flags, vaddr, len;
199	struct scatterlist *sg;
200	int i;
201
202	spin_lock_irqsave(&iounit->lock, flags);
203	for_each_sg(sgl, sg, nents, i) {
 
204		len = ((sg->dma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
205		vaddr = (sg->dma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
206		IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
207		for (len += vaddr; vaddr < len; vaddr++)
208			clear_bit(vaddr, iounit->bmap);
 
209	}
210	spin_unlock_irqrestore(&iounit->lock, flags);
211}
212
213#ifdef CONFIG_SBUS
214static void *iounit_alloc(struct device *dev, size_t len,
215		dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
216{
217	struct iounit_struct *iounit = dev->archdata.iommu;
218	unsigned long va, addr, page, end, ret;
219	pgprot_t dvma_prot;
220	iopte_t __iomem *iopte;
221
222	/* XXX So what is maxphys for us and how do drivers know it? */
223	if (!len || len > 256 * 1024)
224		return NULL;
225
226	len = PAGE_ALIGN(len);
227	va = __get_free_pages(gfp | __GFP_ZERO, get_order(len));
228	if (!va)
229		return NULL;
230
231	addr = ret = sparc_dma_alloc_resource(dev, len);
232	if (!addr)
233		goto out_free_pages;
234	*dma_handle = addr;
235
236	dvma_prot = __pgprot(SRMMU_CACHE | SRMMU_ET_PTE | SRMMU_PRIV);
237	end = PAGE_ALIGN((addr + len));
238	while(addr < end) {
239		page = va;
240		{
241			pgd_t *pgdp;
242			pmd_t *pmdp;
243			pte_t *ptep;
244			long i;
245
246			pgdp = pgd_offset(&init_mm, addr);
247			pmdp = pmd_offset(pgdp, addr);
248			ptep = pte_offset_map(pmdp, addr);
249
250			set_pte(ptep, mk_pte(virt_to_page(page), dvma_prot));
251			
252			i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
253
254			iopte = iounit->page_table + i;
255			sbus_writel(iopte_val(MKIOPTE(__pa(page))), iopte);
256		}
257		addr += PAGE_SIZE;
258		va += PAGE_SIZE;
259	}
260	flush_cache_all();
261	flush_tlb_all();
262
263	return (void *)ret;
264
265out_free_pages:
266	free_pages(va, get_order(len));
267	return NULL;
268}
269
270static void iounit_free(struct device *dev, size_t size, void *cpu_addr,
271		dma_addr_t dma_addr, unsigned long attrs)
272{
273	/* XXX Somebody please fill this in */
274}
275#endif
276
277static const struct dma_map_ops iounit_dma_ops = {
278#ifdef CONFIG_SBUS
279	.alloc			= iounit_alloc,
280	.free			= iounit_free,
281#endif
282	.map_page		= iounit_map_page,
283	.unmap_page		= iounit_unmap_page,
284	.map_sg			= iounit_map_sg,
285	.unmap_sg		= iounit_unmap_sg,
286};
287
288void __init ld_mmu_iounit(void)
289{
290	dma_ops = &iounit_dma_ops;
 
 
 
 
 
 
 
 
 
 
 
291}