Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1/*
  2 * OpenRISC Linux
  3 *
  4 * Linux architectural port borrowing liberally from similar works of
  5 * others.  All original copyrights apply as per the original source
  6 * declaration.
  7 *
  8 * Modifications for the OpenRISC architecture:
  9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
 10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
 11 *
 12 *      This program is free software; you can redistribute it and/or
 13 *      modify it under the terms of the GNU General Public License
 14 *      as published by the Free Software Foundation; either version
 15 *      2 of the License, or (at your option) any later version.
 16 *
 17 * DMA mapping callbacks...
 18 * As alloc_coherent is the only DMA callback being used currently, that's
 19 * the only thing implemented properly.  The rest need looking into...
 20 */
 21
 22#include <linux/dma-mapping.h>
 23#include <linux/dma-debug.h>
 24#include <linux/export.h>
 25
 26#include <asm/cpuinfo.h>
 27#include <asm/spr_defs.h>
 28#include <asm/tlbflush.h>
 29
 30static int
 31page_set_nocache(pte_t *pte, unsigned long addr,
 32		 unsigned long next, struct mm_walk *walk)
 33{
 34	unsigned long cl;
 
 35
 36	pte_val(*pte) |= _PAGE_CI;
 37
 38	/*
 39	 * Flush the page out of the TLB so that the new page flags get
 40	 * picked up next time there's an access
 41	 */
 42	flush_tlb_page(NULL, addr);
 43
 44	/* Flush page out of dcache */
 45	for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo.dcache_block_size)
 46		mtspr(SPR_DCBFR, cl);
 47
 48	return 0;
 49}
 50
 
 
 
 
 51static int
 52page_clear_nocache(pte_t *pte, unsigned long addr,
 53		   unsigned long next, struct mm_walk *walk)
 54{
 55	pte_val(*pte) &= ~_PAGE_CI;
 56
 57	/*
 58	 * Flush the page out of the TLB so that the new page flags get
 59	 * picked up next time there's an access
 60	 */
 61	flush_tlb_page(NULL, addr);
 62
 63	return 0;
 64}
 65
 
 
 
 
 66/*
 67 * Alloc "coherent" memory, which for OpenRISC means simply uncached.
 68 *
 69 * This function effectively just calls __get_free_pages, sets the
 70 * cache-inhibit bit on those pages, and makes sure that the pages are
 71 * flushed out of the cache before they are used.
 72 *
 73 * If the NON_CONSISTENT attribute is set, then this function just
 74 * returns "normal", cachable memory.
 75 *
 76 * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
 77 * into consideration here, too.  All current known implementations of
 78 * the OR1K support only strongly ordered memory accesses, so that flag
 79 * is being ignored for now; uncached but write-combined memory is a
 80 * missing feature of the OR1K.
 81 */
 82static void *
 83or1k_dma_alloc(struct device *dev, size_t size,
 84	       dma_addr_t *dma_handle, gfp_t gfp,
 85	       unsigned long attrs)
 86{
 87	unsigned long va;
 88	void *page;
 89	struct mm_walk walk = {
 90		.pte_entry = page_set_nocache,
 91		.mm = &init_mm
 92	};
 93
 94	page = alloc_pages_exact(size, gfp);
 95	if (!page)
 96		return NULL;
 97
 98	/* This gives us the real physical address of the first page. */
 99	*dma_handle = __pa(page);
100
101	va = (unsigned long)page;
102
103	if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
104		/*
105		 * We need to iterate through the pages, clearing the dcache for
106		 * them and setting the cache-inhibit bit.
107		 */
108		if (walk_page_range(va, va + size, &walk)) {
109			free_pages_exact(page, size);
110			return NULL;
111		}
112	}
113
114	return (void *)va;
115}
116
117static void
118or1k_dma_free(struct device *dev, size_t size, void *vaddr,
119	      dma_addr_t dma_handle, unsigned long attrs)
120{
121	unsigned long va = (unsigned long)vaddr;
122	struct mm_walk walk = {
123		.pte_entry = page_clear_nocache,
124		.mm = &init_mm
125	};
126
127	if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
128		/* walk_page_range shouldn't be able to fail here */
129		WARN_ON(walk_page_range(va, va + size, &walk));
130	}
131
132	free_pages_exact(vaddr, size);
133}
134
135static dma_addr_t
136or1k_map_page(struct device *dev, struct page *page,
137	      unsigned long offset, size_t size,
138	      enum dma_data_direction dir,
139	      unsigned long attrs)
140{
141	unsigned long cl;
142	dma_addr_t addr = page_to_phys(page) + offset;
143
144	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
145		return addr;
146
147	switch (dir) {
148	case DMA_TO_DEVICE:
149		/* Flush the dcache for the requested range */
150		for (cl = addr; cl < addr + size;
151		     cl += cpuinfo.dcache_block_size)
152			mtspr(SPR_DCBFR, cl);
153		break;
154	case DMA_FROM_DEVICE:
155		/* Invalidate the dcache for the requested range */
156		for (cl = addr; cl < addr + size;
157		     cl += cpuinfo.dcache_block_size)
158			mtspr(SPR_DCBIR, cl);
159		break;
160	default:
161		/*
162		 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
163		 * flush nor invalidate the cache here as the area will need
164		 * to be manually synced anyway.
165		 */
166		break;
167	}
168
169	return addr;
170}
171
172static void
173or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
174		size_t size, enum dma_data_direction dir,
175		unsigned long attrs)
176{
177	/* Nothing special to do here... */
178}
179
180static int
181or1k_map_sg(struct device *dev, struct scatterlist *sg,
182	    int nents, enum dma_data_direction dir,
183	    unsigned long attrs)
184{
185	struct scatterlist *s;
186	int i;
187
188	for_each_sg(sg, s, nents, i) {
189		s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
190					       s->length, dir, 0);
191	}
192
193	return nents;
194}
195
196static void
197or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
198	      int nents, enum dma_data_direction dir,
199	      unsigned long attrs)
200{
201	struct scatterlist *s;
202	int i;
203
204	for_each_sg(sg, s, nents, i) {
205		or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 0);
206	}
207}
208
209static void
210or1k_sync_single_for_cpu(struct device *dev,
211			 dma_addr_t dma_handle, size_t size,
212			 enum dma_data_direction dir)
213{
214	unsigned long cl;
215	dma_addr_t addr = dma_handle;
216
217	/* Invalidate the dcache for the requested range */
218	for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
219		mtspr(SPR_DCBIR, cl);
220}
221
222static void
223or1k_sync_single_for_device(struct device *dev,
224			    dma_addr_t dma_handle, size_t size,
225			    enum dma_data_direction dir)
226{
227	unsigned long cl;
228	dma_addr_t addr = dma_handle;
229
230	/* Flush the dcache for the requested range */
231	for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
232		mtspr(SPR_DCBFR, cl);
233}
234
235struct dma_map_ops or1k_dma_map_ops = {
236	.alloc = or1k_dma_alloc,
237	.free = or1k_dma_free,
238	.map_page = or1k_map_page,
239	.unmap_page = or1k_unmap_page,
240	.map_sg = or1k_map_sg,
241	.unmap_sg = or1k_unmap_sg,
242	.sync_single_for_cpu = or1k_sync_single_for_cpu,
243	.sync_single_for_device = or1k_sync_single_for_device,
244};
245EXPORT_SYMBOL(or1k_dma_map_ops);
246
247/* Number of entries preallocated for DMA-API debugging */
248#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
249
250static int __init dma_init(void)
251{
252	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
253
254	return 0;
255}
256fs_initcall(dma_init);
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * OpenRISC Linux
  4 *
  5 * Linux architectural port borrowing liberally from similar works of
  6 * others.  All original copyrights apply as per the original source
  7 * declaration.
  8 *
  9 * Modifications for the OpenRISC architecture:
 10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
 11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
 12 *
 
 
 
 
 
 13 * DMA mapping callbacks...
 14 * As alloc_coherent is the only DMA callback being used currently, that's
 15 * the only thing implemented properly.  The rest need looking into...
 16 */
 17
 18#include <linux/dma-noncoherent.h>
 19#include <linux/pagewalk.h>
 
 20
 21#include <asm/cpuinfo.h>
 22#include <asm/spr_defs.h>
 23#include <asm/tlbflush.h>
 24
 25static int
 26page_set_nocache(pte_t *pte, unsigned long addr,
 27		 unsigned long next, struct mm_walk *walk)
 28{
 29	unsigned long cl;
 30	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
 31
 32	pte_val(*pte) |= _PAGE_CI;
 33
 34	/*
 35	 * Flush the page out of the TLB so that the new page flags get
 36	 * picked up next time there's an access
 37	 */
 38	flush_tlb_page(NULL, addr);
 39
 40	/* Flush page out of dcache */
 41	for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
 42		mtspr(SPR_DCBFR, cl);
 43
 44	return 0;
 45}
 46
 47static const struct mm_walk_ops set_nocache_walk_ops = {
 48	.pte_entry		= page_set_nocache,
 49};
 50
 51static int
 52page_clear_nocache(pte_t *pte, unsigned long addr,
 53		   unsigned long next, struct mm_walk *walk)
 54{
 55	pte_val(*pte) &= ~_PAGE_CI;
 56
 57	/*
 58	 * Flush the page out of the TLB so that the new page flags get
 59	 * picked up next time there's an access
 60	 */
 61	flush_tlb_page(NULL, addr);
 62
 63	return 0;
 64}
 65
 66static const struct mm_walk_ops clear_nocache_walk_ops = {
 67	.pte_entry		= page_clear_nocache,
 68};
 69
 70/*
 71 * Alloc "coherent" memory, which for OpenRISC means simply uncached.
 72 *
 73 * This function effectively just calls __get_free_pages, sets the
 74 * cache-inhibit bit on those pages, and makes sure that the pages are
 75 * flushed out of the cache before they are used.
 76 *
 77 * If the NON_CONSISTENT attribute is set, then this function just
 78 * returns "normal", cachable memory.
 79 *
 80 * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
 81 * into consideration here, too.  All current known implementations of
 82 * the OR1K support only strongly ordered memory accesses, so that flag
 83 * is being ignored for now; uncached but write-combined memory is a
 84 * missing feature of the OR1K.
 85 */
 86void *
 87arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
 88		gfp_t gfp, unsigned long attrs)
 
 89{
 90	unsigned long va;
 91	void *page;
 
 
 
 
 92
 93	page = alloc_pages_exact(size, gfp | __GFP_ZERO);
 94	if (!page)
 95		return NULL;
 96
 97	/* This gives us the real physical address of the first page. */
 98	*dma_handle = __pa(page);
 99
100	va = (unsigned long)page;
101
102	/*
103	 * We need to iterate through the pages, clearing the dcache for
104	 * them and setting the cache-inhibit bit.
105	 */
106	if (walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
107			NULL)) {
108		free_pages_exact(page, size);
109		return NULL;
 
110	}
111
112	return (void *)va;
113}
114
115void
116arch_dma_free(struct device *dev, size_t size, void *vaddr,
117		dma_addr_t dma_handle, unsigned long attrs)
118{
119	unsigned long va = (unsigned long)vaddr;
120
121	/* walk_page_range shouldn't be able to fail here */
122	WARN_ON(walk_page_range(&init_mm, va, va + size,
123			&clear_nocache_walk_ops, NULL));
 
 
 
 
 
124
125	free_pages_exact(vaddr, size);
126}
127
128void arch_sync_dma_for_device(struct device *dev, phys_addr_t addr, size_t size,
129		enum dma_data_direction dir)
 
 
 
130{
131	unsigned long cl;
132	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
 
 
 
133
134	switch (dir) {
135	case DMA_TO_DEVICE:
136		/* Flush the dcache for the requested range */
137		for (cl = addr; cl < addr + size;
138		     cl += cpuinfo->dcache_block_size)
139			mtspr(SPR_DCBFR, cl);
140		break;
141	case DMA_FROM_DEVICE:
142		/* Invalidate the dcache for the requested range */
143		for (cl = addr; cl < addr + size;
144		     cl += cpuinfo->dcache_block_size)
145			mtspr(SPR_DCBIR, cl);
146		break;
147	default:
148		/*
149		 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
150		 * flush nor invalidate the cache here as the area will need
151		 * to be manually synced anyway.
152		 */
153		break;
154	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155}