Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * OpenRISC Linux
  3 *
  4 * Linux architectural port borrowing liberally from similar works of
  5 * others.  All original copyrights apply as per the original source
  6 * declaration.
  7 *
  8 * Modifications for the OpenRISC architecture:
  9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
 10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
 11 *
 12 *      This program is free software; you can redistribute it and/or
 13 *      modify it under the terms of the GNU General Public License
 14 *      as published by the Free Software Foundation; either version
 15 *      2 of the License, or (at your option) any later version.
 16 *
 17 * DMA mapping callbacks...
 18 * As alloc_coherent is the only DMA callback being used currently, that's
 19 * the only thing implemented properly.  The rest need looking into...
 20 */
 21
 22#include <linux/dma-mapping.h>
 23#include <linux/dma-debug.h>
 
 24
 25#include <asm/cpuinfo.h>
 26#include <asm/spr_defs.h>
 27#include <asm/tlbflush.h>
 28
 29static int page_set_nocache(pte_t *pte, unsigned long addr,
 30			    unsigned long next, struct mm_walk *walk)
 
 31{
 32	unsigned long cl;
 33
 34	pte_val(*pte) |= _PAGE_CI;
 35
 36	/*
 37	 * Flush the page out of the TLB so that the new page flags get
 38	 * picked up next time there's an access
 39	 */
 40	flush_tlb_page(NULL, addr);
 41
 42	/* Flush page out of dcache */
 43	for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo.dcache_block_size)
 44		mtspr(SPR_DCBFR, cl);
 45
 46	return 0;
 47}
 48
 49static int page_clear_nocache(pte_t *pte, unsigned long addr,
 50			      unsigned long next, struct mm_walk *walk)
 
 51{
 52	pte_val(*pte) &= ~_PAGE_CI;
 53
 54	/*
 55	 * Flush the page out of the TLB so that the new page flags get
 56	 * picked up next time there's an access
 57	 */
 58	flush_tlb_page(NULL, addr);
 59
 60	return 0;
 61}
 62
 63/*
 64 * Alloc "coherent" memory, which for OpenRISC means simply uncached.
 65 *
 66 * This function effectively just calls __get_free_pages, sets the
 67 * cache-inhibit bit on those pages, and makes sure that the pages are
 68 * flushed out of the cache before they are used.
 69 *
 
 
 
 
 
 
 
 
 70 */
 71void *or1k_dma_alloc_coherent(struct device *dev, size_t size,
 72			      dma_addr_t *dma_handle, gfp_t gfp)
 
 
 73{
 74	unsigned long va;
 75	void *page;
 76	struct mm_walk walk = {
 77		.pte_entry = page_set_nocache,
 78		.mm = &init_mm
 79	};
 80
 81	page = alloc_pages_exact(size, gfp);
 82	if (!page)
 83		return NULL;
 84
 85	/* This gives us the real physical address of the first page. */
 86	*dma_handle = __pa(page);
 87
 88	va = (unsigned long)page;
 89
 90	/*
 91	 * We need to iterate through the pages, clearing the dcache for
 92	 * them and setting the cache-inhibit bit.
 93	 */
 94	if (walk_page_range(va, va + size, &walk)) {
 95		free_pages_exact(page, size);
 96		return NULL;
 
 
 97	}
 98
 99	return (void *)va;
100}
101
102void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
103			    dma_addr_t dma_handle)
 
104{
105	unsigned long va = (unsigned long)vaddr;
106	struct mm_walk walk = {
107		.pte_entry = page_clear_nocache,
108		.mm = &init_mm
109	};
110
111	/* walk_page_range shouldn't be able to fail here */
112	WARN_ON(walk_page_range(va, va + size, &walk));
 
 
113
114	free_pages_exact(vaddr, size);
115}
116
117dma_addr_t or1k_map_page(struct device *dev, struct page *page,
118			 unsigned long offset, size_t size,
119			 enum dma_data_direction dir,
120			 struct dma_attrs *attrs)
 
121{
122	unsigned long cl;
123	dma_addr_t addr = page_to_phys(page) + offset;
124
 
 
 
125	switch (dir) {
126	case DMA_TO_DEVICE:
127		/* Flush the dcache for the requested range */
128		for (cl = addr; cl < addr + size;
129		     cl += cpuinfo.dcache_block_size)
130			mtspr(SPR_DCBFR, cl);
131		break;
132	case DMA_FROM_DEVICE:
133		/* Invalidate the dcache for the requested range */
134		for (cl = addr; cl < addr + size;
135		     cl += cpuinfo.dcache_block_size)
136			mtspr(SPR_DCBIR, cl);
137		break;
138	default:
139		/*
140		 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
141		 * flush nor invalidate the cache here as the area will need
142		 * to be manually synced anyway.
143		 */
144		break;
145	}
146
147	return addr;
148}
149
150void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
151		     size_t size, enum dma_data_direction dir,
152		     struct dma_attrs *attrs)
 
153{
154	/* Nothing special to do here... */
155}
156
157int or1k_map_sg(struct device *dev, struct scatterlist *sg,
158		int nents, enum dma_data_direction dir,
159		struct dma_attrs *attrs)
 
160{
161	struct scatterlist *s;
162	int i;
163
164	for_each_sg(sg, s, nents, i) {
165		s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
166					       s->length, dir, NULL);
167	}
168
169	return nents;
170}
171
172void or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
173		   int nents, enum dma_data_direction dir,
174		   struct dma_attrs *attrs)
 
175{
176	struct scatterlist *s;
177	int i;
178
179	for_each_sg(sg, s, nents, i) {
180		or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, NULL);
181	}
182}
183
184void or1k_sync_single_for_cpu(struct device *dev,
185			      dma_addr_t dma_handle, size_t size,
186			      enum dma_data_direction dir)
 
187{
188	unsigned long cl;
189	dma_addr_t addr = dma_handle;
190
191	/* Invalidate the dcache for the requested range */
192	for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
193		mtspr(SPR_DCBIR, cl);
194}
195
196void or1k_sync_single_for_device(struct device *dev,
197			         dma_addr_t dma_handle, size_t size,
198			         enum dma_data_direction dir)
 
199{
200	unsigned long cl;
201	dma_addr_t addr = dma_handle;
202
203	/* Flush the dcache for the requested range */
204	for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
205		mtspr(SPR_DCBFR, cl);
206}
 
 
 
 
 
 
 
 
 
 
 
 
207
208/* Number of entries preallocated for DMA-API debugging */
209#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
210
211static int __init dma_init(void)
212{
213	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
214
215	return 0;
216}
217fs_initcall(dma_init);
v4.10.11
  1/*
  2 * OpenRISC Linux
  3 *
  4 * Linux architectural port borrowing liberally from similar works of
  5 * others.  All original copyrights apply as per the original source
  6 * declaration.
  7 *
  8 * Modifications for the OpenRISC architecture:
  9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
 10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
 11 *
 12 *      This program is free software; you can redistribute it and/or
 13 *      modify it under the terms of the GNU General Public License
 14 *      as published by the Free Software Foundation; either version
 15 *      2 of the License, or (at your option) any later version.
 16 *
 17 * DMA mapping callbacks...
 18 * As alloc_coherent is the only DMA callback being used currently, that's
 19 * the only thing implemented properly.  The rest need looking into...
 20 */
 21
 22#include <linux/dma-mapping.h>
 23#include <linux/dma-debug.h>
 24#include <linux/export.h>
 25
 26#include <asm/cpuinfo.h>
 27#include <asm/spr_defs.h>
 28#include <asm/tlbflush.h>
 29
 30static int
 31page_set_nocache(pte_t *pte, unsigned long addr,
 32		 unsigned long next, struct mm_walk *walk)
 33{
 34	unsigned long cl;
 35
 36	pte_val(*pte) |= _PAGE_CI;
 37
 38	/*
 39	 * Flush the page out of the TLB so that the new page flags get
 40	 * picked up next time there's an access
 41	 */
 42	flush_tlb_page(NULL, addr);
 43
 44	/* Flush page out of dcache */
 45	for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo.dcache_block_size)
 46		mtspr(SPR_DCBFR, cl);
 47
 48	return 0;
 49}
 50
 51static int
 52page_clear_nocache(pte_t *pte, unsigned long addr,
 53		   unsigned long next, struct mm_walk *walk)
 54{
 55	pte_val(*pte) &= ~_PAGE_CI;
 56
 57	/*
 58	 * Flush the page out of the TLB so that the new page flags get
 59	 * picked up next time there's an access
 60	 */
 61	flush_tlb_page(NULL, addr);
 62
 63	return 0;
 64}
 65
 66/*
 67 * Alloc "coherent" memory, which for OpenRISC means simply uncached.
 68 *
 69 * This function effectively just calls __get_free_pages, sets the
 70 * cache-inhibit bit on those pages, and makes sure that the pages are
 71 * flushed out of the cache before they are used.
 72 *
 73 * If the NON_CONSISTENT attribute is set, then this function just
 74 * returns "normal", cachable memory.
 75 *
 76 * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
 77 * into consideration here, too.  All current known implementations of
 78 * the OR1K support only strongly ordered memory accesses, so that flag
 79 * is being ignored for now; uncached but write-combined memory is a
 80 * missing feature of the OR1K.
 81 */
 82static void *
 83or1k_dma_alloc(struct device *dev, size_t size,
 84	       dma_addr_t *dma_handle, gfp_t gfp,
 85	       unsigned long attrs)
 86{
 87	unsigned long va;
 88	void *page;
 89	struct mm_walk walk = {
 90		.pte_entry = page_set_nocache,
 91		.mm = &init_mm
 92	};
 93
 94	page = alloc_pages_exact(size, gfp);
 95	if (!page)
 96		return NULL;
 97
 98	/* This gives us the real physical address of the first page. */
 99	*dma_handle = __pa(page);
100
101	va = (unsigned long)page;
102
103	if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
104		/*
105		 * We need to iterate through the pages, clearing the dcache for
106		 * them and setting the cache-inhibit bit.
107		 */
108		if (walk_page_range(va, va + size, &walk)) {
109			free_pages_exact(page, size);
110			return NULL;
111		}
112	}
113
114	return (void *)va;
115}
116
117static void
118or1k_dma_free(struct device *dev, size_t size, void *vaddr,
119	      dma_addr_t dma_handle, unsigned long attrs)
120{
121	unsigned long va = (unsigned long)vaddr;
122	struct mm_walk walk = {
123		.pte_entry = page_clear_nocache,
124		.mm = &init_mm
125	};
126
127	if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
128		/* walk_page_range shouldn't be able to fail here */
129		WARN_ON(walk_page_range(va, va + size, &walk));
130	}
131
132	free_pages_exact(vaddr, size);
133}
134
135static dma_addr_t
136or1k_map_page(struct device *dev, struct page *page,
137	      unsigned long offset, size_t size,
138	      enum dma_data_direction dir,
139	      unsigned long attrs)
140{
141	unsigned long cl;
142	dma_addr_t addr = page_to_phys(page) + offset;
143
144	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
145		return addr;
146
147	switch (dir) {
148	case DMA_TO_DEVICE:
149		/* Flush the dcache for the requested range */
150		for (cl = addr; cl < addr + size;
151		     cl += cpuinfo.dcache_block_size)
152			mtspr(SPR_DCBFR, cl);
153		break;
154	case DMA_FROM_DEVICE:
155		/* Invalidate the dcache for the requested range */
156		for (cl = addr; cl < addr + size;
157		     cl += cpuinfo.dcache_block_size)
158			mtspr(SPR_DCBIR, cl);
159		break;
160	default:
161		/*
162		 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
163		 * flush nor invalidate the cache here as the area will need
164		 * to be manually synced anyway.
165		 */
166		break;
167	}
168
169	return addr;
170}
171
172static void
173or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
174		size_t size, enum dma_data_direction dir,
175		unsigned long attrs)
176{
177	/* Nothing special to do here... */
178}
179
180static int
181or1k_map_sg(struct device *dev, struct scatterlist *sg,
182	    int nents, enum dma_data_direction dir,
183	    unsigned long attrs)
184{
185	struct scatterlist *s;
186	int i;
187
188	for_each_sg(sg, s, nents, i) {
189		s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
190					       s->length, dir, 0);
191	}
192
193	return nents;
194}
195
196static void
197or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
198	      int nents, enum dma_data_direction dir,
199	      unsigned long attrs)
200{
201	struct scatterlist *s;
202	int i;
203
204	for_each_sg(sg, s, nents, i) {
205		or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 0);
206	}
207}
208
209static void
210or1k_sync_single_for_cpu(struct device *dev,
211			 dma_addr_t dma_handle, size_t size,
212			 enum dma_data_direction dir)
213{
214	unsigned long cl;
215	dma_addr_t addr = dma_handle;
216
217	/* Invalidate the dcache for the requested range */
218	for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
219		mtspr(SPR_DCBIR, cl);
220}
221
222static void
223or1k_sync_single_for_device(struct device *dev,
224			    dma_addr_t dma_handle, size_t size,
225			    enum dma_data_direction dir)
226{
227	unsigned long cl;
228	dma_addr_t addr = dma_handle;
229
230	/* Flush the dcache for the requested range */
231	for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
232		mtspr(SPR_DCBFR, cl);
233}
234
235struct dma_map_ops or1k_dma_map_ops = {
236	.alloc = or1k_dma_alloc,
237	.free = or1k_dma_free,
238	.map_page = or1k_map_page,
239	.unmap_page = or1k_unmap_page,
240	.map_sg = or1k_map_sg,
241	.unmap_sg = or1k_unmap_sg,
242	.sync_single_for_cpu = or1k_sync_single_for_cpu,
243	.sync_single_for_device = or1k_sync_single_for_device,
244};
245EXPORT_SYMBOL(or1k_dma_map_ops);
246
247/* Number of entries preallocated for DMA-API debugging */
248#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
249
250static int __init dma_init(void)
251{
252	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
253
254	return 0;
255}
256fs_initcall(dma_init);