Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * OpenRISC Linux
  3 *
  4 * Linux architectural port borrowing liberally from similar works of
  5 * others.  All original copyrights apply as per the original source
  6 * declaration.
  7 *
  8 * Modifications for the OpenRISC architecture:
  9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
 10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
 11 *
 12 *      This program is free software; you can redistribute it and/or
 13 *      modify it under the terms of the GNU General Public License
 14 *      as published by the Free Software Foundation; either version
 15 *      2 of the License, or (at your option) any later version.
 16 *
 17 * DMA mapping callbacks...
 18 * As alloc_coherent is the only DMA callback being used currently, that's
 19 * the only thing implemented properly.  The rest need looking into...
 20 */
 21
 22#include <linux/dma-mapping.h>
 23#include <linux/dma-debug.h>
 24#include <linux/export.h>
 25
 26#include <asm/cpuinfo.h>
 27#include <asm/spr_defs.h>
 28#include <asm/tlbflush.h>
 29
 30static int
 31page_set_nocache(pte_t *pte, unsigned long addr,
 32		 unsigned long next, struct mm_walk *walk)
 33{
 34	unsigned long cl;
 35	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
 36
 37	pte_val(*pte) |= _PAGE_CI;
 38
 39	/*
 40	 * Flush the page out of the TLB so that the new page flags get
 41	 * picked up next time there's an access
 42	 */
 43	flush_tlb_page(NULL, addr);
 44
 45	/* Flush page out of dcache */
 46	for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
 47		mtspr(SPR_DCBFR, cl);
 48
 49	return 0;
 50}
 51
 
 
 
 
 52static int
 53page_clear_nocache(pte_t *pte, unsigned long addr,
 54		   unsigned long next, struct mm_walk *walk)
 55{
 56	pte_val(*pte) &= ~_PAGE_CI;
 57
 58	/*
 59	 * Flush the page out of the TLB so that the new page flags get
 60	 * picked up next time there's an access
 61	 */
 62	flush_tlb_page(NULL, addr);
 63
 64	return 0;
 65}
 66
 67/*
 68 * Alloc "coherent" memory, which for OpenRISC means simply uncached.
 69 *
 70 * This function effectively just calls __get_free_pages, sets the
 71 * cache-inhibit bit on those pages, and makes sure that the pages are
 72 * flushed out of the cache before they are used.
 73 *
 74 * If the NON_CONSISTENT attribute is set, then this function just
 75 * returns "normal", cachable memory.
 76 *
 77 * There are additional flags WEAK_ORDERING and WRITE_COMBINE to take
 78 * into consideration here, too.  All current known implementations of
 79 * the OR1K support only strongly ordered memory accesses, so that flag
 80 * is being ignored for now; uncached but write-combined memory is a
 81 * missing feature of the OR1K.
 82 */
 83static void *
 84or1k_dma_alloc(struct device *dev, size_t size,
 85	       dma_addr_t *dma_handle, gfp_t gfp,
 86	       unsigned long attrs)
 87{
 88	unsigned long va;
 89	void *page;
 90	struct mm_walk walk = {
 91		.pte_entry = page_set_nocache,
 92		.mm = &init_mm
 93	};
 94
 95	page = alloc_pages_exact(size, gfp);
 96	if (!page)
 97		return NULL;
 98
 99	/* This gives us the real physical address of the first page. */
100	*dma_handle = __pa(page);
101
102	va = (unsigned long)page;
 
 
 
103
104	if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
105		/*
106		 * We need to iterate through the pages, clearing the dcache for
107		 * them and setting the cache-inhibit bit.
108		 */
109		if (walk_page_range(va, va + size, &walk)) {
110			free_pages_exact(page, size);
111			return NULL;
112		}
113	}
114
115	return (void *)va;
 
 
116}
117
118static void
119or1k_dma_free(struct device *dev, size_t size, void *vaddr,
120	      dma_addr_t dma_handle, unsigned long attrs)
121{
122	unsigned long va = (unsigned long)vaddr;
123	struct mm_walk walk = {
124		.pte_entry = page_clear_nocache,
125		.mm = &init_mm
126	};
127
128	if ((attrs & DMA_ATTR_NON_CONSISTENT) == 0) {
129		/* walk_page_range shouldn't be able to fail here */
130		WARN_ON(walk_page_range(va, va + size, &walk));
131	}
132
133	free_pages_exact(vaddr, size);
 
 
 
 
134}
135
136static dma_addr_t
137or1k_map_page(struct device *dev, struct page *page,
138	      unsigned long offset, size_t size,
139	      enum dma_data_direction dir,
140	      unsigned long attrs)
141{
142	unsigned long cl;
143	dma_addr_t addr = page_to_phys(page) + offset;
144	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
145
146	if (attrs & DMA_ATTR_SKIP_CPU_SYNC)
147		return addr;
148
149	switch (dir) {
150	case DMA_TO_DEVICE:
151		/* Flush the dcache for the requested range */
152		for (cl = addr; cl < addr + size;
153		     cl += cpuinfo->dcache_block_size)
154			mtspr(SPR_DCBFR, cl);
155		break;
156	case DMA_FROM_DEVICE:
157		/* Invalidate the dcache for the requested range */
158		for (cl = addr; cl < addr + size;
159		     cl += cpuinfo->dcache_block_size)
160			mtspr(SPR_DCBIR, cl);
161		break;
162	default:
163		/*
164		 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
165		 * flush nor invalidate the cache here as the area will need
166		 * to be manually synced anyway.
167		 */
168		break;
169	}
170
171	return addr;
172}
173
174static void
175or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
176		size_t size, enum dma_data_direction dir,
177		unsigned long attrs)
178{
179	/* Nothing special to do here... */
180}
181
182static int
183or1k_map_sg(struct device *dev, struct scatterlist *sg,
184	    int nents, enum dma_data_direction dir,
185	    unsigned long attrs)
186{
187	struct scatterlist *s;
188	int i;
189
190	for_each_sg(sg, s, nents, i) {
191		s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
192					       s->length, dir, 0);
193	}
194
195	return nents;
196}
197
198static void
199or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
200	      int nents, enum dma_data_direction dir,
201	      unsigned long attrs)
202{
203	struct scatterlist *s;
204	int i;
205
206	for_each_sg(sg, s, nents, i) {
207		or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, 0);
208	}
209}
210
211static void
212or1k_sync_single_for_cpu(struct device *dev,
213			 dma_addr_t dma_handle, size_t size,
214			 enum dma_data_direction dir)
215{
216	unsigned long cl;
217	dma_addr_t addr = dma_handle;
218	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
219
220	/* Invalidate the dcache for the requested range */
221	for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size)
222		mtspr(SPR_DCBIR, cl);
223}
224
225static void
226or1k_sync_single_for_device(struct device *dev,
227			    dma_addr_t dma_handle, size_t size,
228			    enum dma_data_direction dir)
229{
230	unsigned long cl;
231	dma_addr_t addr = dma_handle;
232	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
233
234	/* Flush the dcache for the requested range */
235	for (cl = addr; cl < addr + size; cl += cpuinfo->dcache_block_size)
236		mtspr(SPR_DCBFR, cl);
237}
238
239const struct dma_map_ops or1k_dma_map_ops = {
240	.alloc = or1k_dma_alloc,
241	.free = or1k_dma_free,
242	.map_page = or1k_map_page,
243	.unmap_page = or1k_unmap_page,
244	.map_sg = or1k_map_sg,
245	.unmap_sg = or1k_unmap_sg,
246	.sync_single_for_cpu = or1k_sync_single_for_cpu,
247	.sync_single_for_device = or1k_sync_single_for_device,
248};
249EXPORT_SYMBOL(or1k_dma_map_ops);
250
251/* Number of entries preallocated for DMA-API debugging */
252#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
253
254static int __init dma_init(void)
255{
256	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
257
258	return 0;
259}
260fs_initcall(dma_init);
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * OpenRISC Linux
  4 *
  5 * Linux architectural port borrowing liberally from similar works of
  6 * others.  All original copyrights apply as per the original source
  7 * declaration.
  8 *
  9 * Modifications for the OpenRISC architecture:
 10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
 11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
 12 *
 
 
 
 
 
 13 * DMA mapping callbacks...
 
 
 14 */
 15
 16#include <linux/dma-map-ops.h>
 17#include <linux/pagewalk.h>
 
 18
 19#include <asm/cpuinfo.h>
 20#include <asm/spr_defs.h>
 21#include <asm/tlbflush.h>
 22
 23static int
 24page_set_nocache(pte_t *pte, unsigned long addr,
 25		 unsigned long next, struct mm_walk *walk)
 26{
 27	unsigned long cl;
 28	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
 29
 30	pte_val(*pte) |= _PAGE_CI;
 31
 32	/*
 33	 * Flush the page out of the TLB so that the new page flags get
 34	 * picked up next time there's an access
 35	 */
 36	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
 37
 38	/* Flush page out of dcache */
 39	for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
 40		mtspr(SPR_DCBFR, cl);
 41
 42	return 0;
 43}
 44
 45static const struct mm_walk_ops set_nocache_walk_ops = {
 46	.pte_entry		= page_set_nocache,
 47};
 48
 49static int
 50page_clear_nocache(pte_t *pte, unsigned long addr,
 51		   unsigned long next, struct mm_walk *walk)
 52{
 53	pte_val(*pte) &= ~_PAGE_CI;
 54
 55	/*
 56	 * Flush the page out of the TLB so that the new page flags get
 57	 * picked up next time there's an access
 58	 */
 59	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
 60
 61	return 0;
 62}
 63
 64static const struct mm_walk_ops clear_nocache_walk_ops = {
 65	.pte_entry		= page_clear_nocache,
 66};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 67
 68void *arch_dma_set_uncached(void *cpu_addr, size_t size)
 69{
 70	unsigned long va = (unsigned long)cpu_addr;
 71	int error;
 72
 73	/*
 74	 * We need to iterate through the pages, clearing the dcache for
 75	 * them and setting the cache-inhibit bit.
 76	 */
 77	mmap_write_lock(&init_mm);
 78	error = walk_page_range_novma(&init_mm, va, va + size,
 79			&set_nocache_walk_ops, NULL, NULL);
 80	mmap_write_unlock(&init_mm);
 
 
 81
 82	if (error)
 83		return ERR_PTR(error);
 84	return cpu_addr;
 85}
 86
 87void arch_dma_clear_uncached(void *cpu_addr, size_t size)
 
 
 88{
 89	unsigned long va = (unsigned long)cpu_addr;
 
 
 
 
 
 
 
 
 
 90
 91	mmap_write_lock(&init_mm);
 92	/* walk_page_range shouldn't be able to fail here */
 93	WARN_ON(walk_page_range_novma(&init_mm, va, va + size,
 94			&clear_nocache_walk_ops, NULL, NULL));
 95	mmap_write_unlock(&init_mm);
 96}
 97
 98void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
 99		enum dma_data_direction dir)
 
 
 
100{
101	unsigned long cl;
 
102	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
103
 
 
 
104	switch (dir) {
105	case DMA_TO_DEVICE:
106		/* Flush the dcache for the requested range */
107		for (cl = addr; cl < addr + size;
108		     cl += cpuinfo->dcache_block_size)
109			mtspr(SPR_DCBFR, cl);
110		break;
111	case DMA_FROM_DEVICE:
112		/* Invalidate the dcache for the requested range */
113		for (cl = addr; cl < addr + size;
114		     cl += cpuinfo->dcache_block_size)
115			mtspr(SPR_DCBIR, cl);
116		break;
117	default:
118		/*
119		 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
120		 * flush nor invalidate the cache here as the area will need
121		 * to be manually synced anyway.
122		 */
123		break;
124	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125}