Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * OpenRISC Linux
  3 *
  4 * Linux architectural port borrowing liberally from similar works of
  5 * others.  All original copyrights apply as per the original source
  6 * declaration.
  7 *
  8 * Modifications for the OpenRISC architecture:
  9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
 10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
 11 *
 12 *      This program is free software; you can redistribute it and/or
 13 *      modify it under the terms of the GNU General Public License
 14 *      as published by the Free Software Foundation; either version
 15 *      2 of the License, or (at your option) any later version.
 16 *
 17 * DMA mapping callbacks...
 18 * As alloc_coherent is the only DMA callback being used currently, that's
 19 * the only thing implemented properly.  The rest need looking into...
 20 */
 21
 22#include <linux/dma-mapping.h>
 23#include <linux/dma-debug.h>
 24#include <linux/export.h>
 25
 26#include <asm/cpuinfo.h>
 27#include <asm/spr_defs.h>
 28#include <asm/tlbflush.h>
 29
 30static int
 31page_set_nocache(pte_t *pte, unsigned long addr,
 32		 unsigned long next, struct mm_walk *walk)
 33{
 34	unsigned long cl;
 35	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
 36
 37	pte_val(*pte) |= _PAGE_CI;
 38
 39	/*
 40	 * Flush the page out of the TLB so that the new page flags get
 41	 * picked up next time there's an access
 42	 */
 43	flush_tlb_page(NULL, addr);
 44
 45	/* Flush page out of dcache */
 46	for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
 47		mtspr(SPR_DCBFR, cl);
 48
 49	return 0;
 50}
 51
 52static int
 53page_clear_nocache(pte_t *pte, unsigned long addr,
 54		   unsigned long next, struct mm_walk *walk)
 55{
 56	pte_val(*pte) &= ~_PAGE_CI;
 57
 58	/*
 59	 * Flush the page out of the TLB so that the new page flags get
 60	 * picked up next time there's an access
 61	 */
 62	flush_tlb_page(NULL, addr);
 63
 64	return 0;
 65}
 66
 67/*
 68 * Alloc "coherent" memory, which for OpenRISC means simply uncached.
 69 *
 70 * This function effectively just calls __get_free_pages, sets the
 71 * cache-inhibit bit on those pages, and makes sure that the pages are
 72 * flushed out of the cache before they are used.
 73 *
 74 * If the NON_CONSISTENT attribute is set, then this function just
 75 * returns "normal", cachable memory.
 76 *
 77 * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
 78 * into consideration here, too.  All current known implementations of
 79 * the OR1K support only strongly ordered memory accesses, so that flag
 80 * is being ignored for now; uncached but write-combined memory is a
 81 * missing feature of the OR1K.
 82 */
 83static void *
 84or1k_dma_alloc(struct device *dev, size_t size,
 85	       dma_addr_t *dma_handle, gfp_t gfp,
 86	       unsigned long attrs)
 87{
 88	unsigned long va;
 89	void *page;
 90	struct mm_walk walk = {
 91		.pte_entry = page_set_nocache,
 92		.mm = &init_mm
 93	};
 94
 95	page = alloc_pages_exact(size, gfp);
 96	if (!page)
 97		return NULL;
 98
 99	/* This gives us the real physical address of the first page. */
100	*dma_handle = __pa(page);
101
102	va = (unsigned long)page;
103
104	if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
105		/*
106		 * We need to iterate through the pages, clearing the dcache for
107		 * them and setting the cache-inhibit bit.
108		 */
109		if (walk_page_range(va, va + size, &walk)) {
110			free_pages_exact(page, size);
111			return NULL;
112		}
113	}
114
115	return (void *)va;
116}
117
118static void
119or1k_dma_free(struct device *dev, size_t size, void *vaddr,
120	      dma_addr_t dma_handle, unsigned long attrs)
121{
122	unsigned long va = (unsigned long)vaddr;
123	struct mm_walk walk = {
124		.pte_entry = page_clear_nocache,
125		.mm = &init_mm
126	};
127
128	if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
129		/* walk_page_range shouldn't be able to fail here */
130		WARN_ON(walk_page_range(va, va + size, &walk));
131	}
132
133	free_pages_exact(vaddr, size);
134}
135
136static dma_addr_t
137or1k_map_page(struct device *dev, struct page *page,
138	      unsigned long offset, size_t size,
139	      enum dma_data_direction dir,
140	      unsigned long attrs)
141{
142	unsigned long cl;
143	dma_addr_t addr = page_to_phys(page) + offset;
144	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
145
146	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
147		return addr;
148
149	switch (dir) {
150	case DMA_TO_DEVICE:
151		/* Flush the dcache for the requested range */
152		for (cl = addr; cl < addr + size;
153		     cl += cpuinfo->dcache_block_size)
154			mtspr(SPR_DCBFR, cl);
155		break;
156	case DMA_FROM_DEVICE:
157		/* Invalidate the dcache for the requested range */
158		for (cl = addr; cl < addr + size;
159		     cl += cpuinfo->dcache_block_size)
160			mtspr(SPR_DCBIR, cl);
161		break;
162	default:
163		/*
164		 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
165		 * flush nor invalidate the cache here as the area will need
166		 * to be manually synced anyway.
167		 */
168		break;
169	}
170
171	return addr;
172}
173
174static void
175or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
176		size_t size, enum dma_data_direction dir,
177		unsigned long attrs)
178{
179	/* Nothing special to do here... */
180}
181
182static int
183or1k_map_sg(struct device *dev, struct scatterlist *sg,
184	    int nents, enum dma_data_direction dir,
185	    unsigned long attrs)
186{
187	struct scatterlist *s;
188	int i;
189
190	for_each_sg(sg, s, nents, i) {
191		s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
192					       s->length, dir, 0);
193	}
194
195	return nents;
196}
197
198static void
199or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
200	      int nents, enum dma_data_direction dir,
201	      unsigned long attrs)
202{
203	struct scatterlist *s;
204	int i;
205
206	for_each_sg(sg, s, nents, i) {
207		or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 0);
208	}
209}
210
211static void
212or1k_sync_single_for_cpu(struct device *dev,
213			 dma_addr_t dma_handle, size_t size,
214			 enum dma_data_direction dir)
215{
216	unsigned long cl;
217	dma_addr_t addr = dma_handle;
218	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
219
220	/* Invalidate the dcache for the requested range */
221	for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size)
222		mtspr(SPR_DCBIR, cl);
223}
224
225static void
226or1k_sync_single_for_device(struct device *dev,
227			    dma_addr_t dma_handle, size_t size,
228			    enum dma_data_direction dir)
229{
230	unsigned long cl;
231	dma_addr_t addr = dma_handle;
232	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
233
234	/* Flush the dcache for the requested range */
235	for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size)
236		mtspr(SPR_DCBFR, cl);
237}
238
239const struct dma_map_ops or1k_dma_map_ops = {
240	.alloc = or1k_dma_alloc,
241	.free = or1k_dma_free,
242	.map_page = or1k_map_page,
243	.unmap_page = or1k_unmap_page,
244	.map_sg = or1k_map_sg,
245	.unmap_sg = or1k_unmap_sg,
246	.sync_single_for_cpu = or1k_sync_single_for_cpu,
247	.sync_single_for_device = or1k_sync_single_for_device,
248};
249EXPORT_SYMBOL(or1k_dma_map_ops);
250
251/* Number of entries preallocated for DMA-API debugging */
252#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
253
254static int __init dma_init(void)
255{
256	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
257
258	return 0;
259}
260fs_initcall(dma_init);
v4.10.11
  1/*
  2 * OpenRISC Linux
  3 *
  4 * Linux architectural port borrowing liberally from similar works of
  5 * others.  All original copyrights apply as per the original source
  6 * declaration.
  7 *
  8 * Modifications for the OpenRISC architecture:
  9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
 10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
 11 *
 12 *      This program is free software; you can redistribute it and/or
 13 *      modify it under the terms of the GNU General Public License
 14 *      as published by the Free Software Foundation; either version
 15 *      2 of the License, or (at your option) any later version.
 16 *
 17 * DMA mapping callbacks...
 18 * As alloc_coherent is the only DMA callback being used currently, that's
 19 * the only thing implemented properly.  The rest need looking into...
 20 */
 21
 22#include <linux/dma-mapping.h>
 23#include <linux/dma-debug.h>
 24#include <linux/export.h>
 25
 26#include <asm/cpuinfo.h>
 27#include <asm/spr_defs.h>
 28#include <asm/tlbflush.h>
 29
 30static int
 31page_set_nocache(pte_t *pte, unsigned long addr,
 32		 unsigned long next, struct mm_walk *walk)
 33{
 34	unsigned long cl;
 
 35
 36	pte_val(*pte) |= _PAGE_CI;
 37
 38	/*
 39	 * Flush the page out of the TLB so that the new page flags get
 40	 * picked up next time there's an access
 41	 */
 42	flush_tlb_page(NULL, addr);
 43
 44	/* Flush page out of dcache */
 45	for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo.dcache_block_size)
 46		mtspr(SPR_DCBFR, cl);
 47
 48	return 0;
 49}
 50
 51static int
 52page_clear_nocache(pte_t *pte, unsigned long addr,
 53		   unsigned long next, struct mm_walk *walk)
 54{
 55	pte_val(*pte) &= ~_PAGE_CI;
 56
 57	/*
 58	 * Flush the page out of the TLB so that the new page flags get
 59	 * picked up next time there's an access
 60	 */
 61	flush_tlb_page(NULL, addr);
 62
 63	return 0;
 64}
 65
 66/*
 67 * Alloc "coherent" memory, which for OpenRISC means simply uncached.
 68 *
 69 * This function effectively just calls __get_free_pages, sets the
 70 * cache-inhibit bit on those pages, and makes sure that the pages are
 71 * flushed out of the cache before they are used.
 72 *
 73 * If the NON_CONSISTENT attribute is set, then this function just
 74 * returns "normal", cachable memory.
 75 *
 76 * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
 77 * into consideration here, too.  All current known implementations of
 78 * the OR1K support only strongly ordered memory accesses, so that flag
 79 * is being ignored for now; uncached but write-combined memory is a
 80 * missing feature of the OR1K.
 81 */
 82static void *
 83or1k_dma_alloc(struct device *dev, size_t size,
 84	       dma_addr_t *dma_handle, gfp_t gfp,
 85	       unsigned long attrs)
 86{
 87	unsigned long va;
 88	void *page;
 89	struct mm_walk walk = {
 90		.pte_entry = page_set_nocache,
 91		.mm = &init_mm
 92	};
 93
 94	page = alloc_pages_exact(size, gfp);
 95	if (!page)
 96		return NULL;
 97
 98	/* This gives us the real physical address of the first page. */
 99	*dma_handle = __pa(page);
100
101	va = (unsigned long)page;
102
103	if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
104		/*
105		 * We need to iterate through the pages, clearing the dcache for
106		 * them and setting the cache-inhibit bit.
107		 */
108		if (walk_page_range(va, va + size, &walk)) {
109			free_pages_exact(page, size);
110			return NULL;
111		}
112	}
113
114	return (void *)va;
115}
116
117static void
118or1k_dma_free(struct device *dev, size_t size, void *vaddr,
119	      dma_addr_t dma_handle, unsigned long attrs)
120{
121	unsigned long va = (unsigned long)vaddr;
122	struct mm_walk walk = {
123		.pte_entry = page_clear_nocache,
124		.mm = &init_mm
125	};
126
127	if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
128		/* walk_page_range shouldn't be able to fail here */
129		WARN_ON(walk_page_range(va, va + size, &walk));
130	}
131
132	free_pages_exact(vaddr, size);
133}
134
135static dma_addr_t
136or1k_map_page(struct device *dev, struct page *page,
137	      unsigned long offset, size_t size,
138	      enum dma_data_direction dir,
139	      unsigned long attrs)
140{
141	unsigned long cl;
142	dma_addr_t addr = page_to_phys(page) + offset;
 
143
144	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
145		return addr;
146
147	switch (dir) {
148	case DMA_TO_DEVICE:
149		/* Flush the dcache for the requested range */
150		for (cl = addr; cl < addr + size;
151		     cl += cpuinfo.dcache_block_size)
152			mtspr(SPR_DCBFR, cl);
153		break;
154	case DMA_FROM_DEVICE:
155		/* Invalidate the dcache for the requested range */
156		for (cl = addr; cl < addr + size;
157		     cl += cpuinfo.dcache_block_size)
158			mtspr(SPR_DCBIR, cl);
159		break;
160	default:
161		/*
162		 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
163		 * flush nor invalidate the cache here as the area will need
164		 * to be manually synced anyway.
165		 */
166		break;
167	}
168
169	return addr;
170}
171
172static void
173or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
174		size_t size, enum dma_data_direction dir,
175		unsigned long attrs)
176{
177	/* Nothing special to do here... */
178}
179
180static int
181or1k_map_sg(struct device *dev, struct scatterlist *sg,
182	    int nents, enum dma_data_direction dir,
183	    unsigned long attrs)
184{
185	struct scatterlist *s;
186	int i;
187
188	for_each_sg(sg, s, nents, i) {
189		s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
190					       s->length, dir, 0);
191	}
192
193	return nents;
194}
195
196static void
197or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
198	      int nents, enum dma_data_direction dir,
199	      unsigned long attrs)
200{
201	struct scatterlist *s;
202	int i;
203
204	for_each_sg(sg, s, nents, i) {
205		or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 0);
206	}
207}
208
209static void
210or1k_sync_single_for_cpu(struct device *dev,
211			 dma_addr_t dma_handle, size_t size,
212			 enum dma_data_direction dir)
213{
214	unsigned long cl;
215	dma_addr_t addr = dma_handle;
 
216
217	/* Invalidate the dcache for the requested range */
218	for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
219		mtspr(SPR_DCBIR, cl);
220}
221
222static void
223or1k_sync_single_for_device(struct device *dev,
224			    dma_addr_t dma_handle, size_t size,
225			    enum dma_data_direction dir)
226{
227	unsigned long cl;
228	dma_addr_t addr = dma_handle;
 
229
230	/* Flush the dcache for the requested range */
231	for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
232		mtspr(SPR_DCBFR, cl);
233}
234
235struct dma_map_ops or1k_dma_map_ops = {
236	.alloc = or1k_dma_alloc,
237	.free = or1k_dma_free,
238	.map_page = or1k_map_page,
239	.unmap_page = or1k_unmap_page,
240	.map_sg = or1k_map_sg,
241	.unmap_sg = or1k_unmap_sg,
242	.sync_single_for_cpu = or1k_sync_single_for_cpu,
243	.sync_single_for_device = or1k_sync_single_for_device,
244};
245EXPORT_SYMBOL(or1k_dma_map_ops);
246
247/* Number of entries preallocated for DMA-API debugging */
248#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
249
250static int __init dma_init(void)
251{
252	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
253
254	return 0;
255}
256fs_initcall(dma_init);