Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * OpenRISC Linux
  3 *
  4 * Linux architectural port borrowing liberally from similar works of
  5 * others.  All original copyrights apply as per the original source
  6 * declaration.
  7 *
  8 * Modifications for the OpenRISC architecture:
  9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
 10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
 11 *
 12 *      This program is free software; you can redistribute it and/or
 13 *      modify it under the terms of the GNU General Public License
 14 *      as published by the Free Software Foundation; either version
 15 *      2 of the License, or (at your option) any later version.
 16 *
 17 * DMA mapping callbacks...
 18 * As alloc_coherent is the only DMA callback being used currently, that's
 19 * the only thing implemented properly.  The rest need looking into...
 20 */
 21
 22#include <linux/dma-mapping.h>
 23#include <linux/dma-debug.h>
 24
 25#include <asm/cpuinfo.h>
 26#include <asm/spr_defs.h>
 27#include <asm/tlbflush.h>
 28
 29static int page_set_nocache(pte_t *pte, unsigned long addr,
 30			    unsigned long next, struct mm_walk *walk)
 
 31{
 32	unsigned long cl;
 
 33
 34	pte_val(*pte) |= _PAGE_CI;
 35
 36	/*
 37	 * Flush the page out of the TLB so that the new page flags get
 38	 * picked up next time there's an access
 39	 */
 40	flush_tlb_page(NULL, addr);
 41
 42	/* Flush page out of dcache */
 43	for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo.dcache_block_size)
 44		mtspr(SPR_DCBFR, cl);
 45
 46	return 0;
 47}
 48
 49static int page_clear_nocache(pte_t *pte, unsigned long addr,
 50			      unsigned long next, struct mm_walk *walk)
 
 
 
 
 
 51{
 52	pte_val(*pte) &= ~_PAGE_CI;
 53
 54	/*
 55	 * Flush the page out of the TLB so that the new page flags get
 56	 * picked up next time there's an access
 57	 */
 58	flush_tlb_page(NULL, addr);
 59
 60	return 0;
 61}
 62
 63/*
 64 * Alloc "coherent" memory, which for OpenRISC means simply uncached.
 65 *
 66 * This function effectively just calls __get_free_pages, sets the
 67 * cache-inhibit bit on those pages, and makes sure that the pages are
 68 * flushed out of the cache before they are used.
 69 *
 70 */
 71void *or1k_dma_alloc_coherent(struct device *dev, size_t size,
 72			      dma_addr_t *dma_handle, gfp_t gfp)
 73{
 74	unsigned long va;
 75	void *page;
 76	struct mm_walk walk = {
 77		.pte_entry = page_set_nocache,
 78		.mm = &init_mm
 79	};
 80
 81	page = alloc_pages_exact(size, gfp);
 82	if (!page)
 83		return NULL;
 84
 85	/* This gives us the real physical address of the first page. */
 86	*dma_handle = __pa(page);
 87
 88	va = (unsigned long)page;
 89
 90	/*
 91	 * We need to iterate through the pages, clearing the dcache for
 92	 * them and setting the cache-inhibit bit.
 93	 */
 94	if (walk_page_range(va, va + size, &walk)) {
 95		free_pages_exact(page, size);
 96		return NULL;
 97	}
 98
 99	return (void *)va;
 
 
100}
101
102void or1k_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
103			    dma_addr_t dma_handle)
104{
105	unsigned long va = (unsigned long)vaddr;
106	struct mm_walk walk = {
107		.pte_entry = page_clear_nocache,
108		.mm = &init_mm
109	};
110
 
111	/* walk_page_range shouldn't be able to fail here */
112	WARN_ON(walk_page_range(va, va + size, &walk));
113
114	free_pages_exact(vaddr, size);
115}
116
117dma_addr_t or1k_map_page(struct device *dev, struct page *page,
118			 unsigned long offset, size_t size,
119			 enum dma_data_direction dir,
120			 struct dma_attrs *attrs)
121{
122	unsigned long cl;
123	dma_addr_t addr = page_to_phys(page) + offset;
124
125	switch (dir) {
126	case DMA_TO_DEVICE:
127		/* Flush the dcache for the requested range */
128		for (cl = addr; cl < addr + size;
129		     cl += cpuinfo.dcache_block_size)
130			mtspr(SPR_DCBFR, cl);
131		break;
132	case DMA_FROM_DEVICE:
133		/* Invalidate the dcache for the requested range */
134		for (cl = addr; cl < addr + size;
135		     cl += cpuinfo.dcache_block_size)
136			mtspr(SPR_DCBIR, cl);
137		break;
138	default:
139		/*
140		 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
141		 * flush nor invalidate the cache here as the area will need
142		 * to be manually synced anyway.
143		 */
144		break;
145	}
146
147	return addr;
148}
149
150void or1k_unmap_page(struct device *dev, dma_addr_t dma_handle,
151		     size_t size, enum dma_data_direction dir,
152		     struct dma_attrs *attrs)
153{
154	/* Nothing special to do here... */
155}
156
157int or1k_map_sg(struct device *dev, struct scatterlist *sg,
158		int nents, enum dma_data_direction dir,
159		struct dma_attrs *attrs)
160{
161	struct scatterlist *s;
162	int i;
163
164	for_each_sg(sg, s, nents, i) {
165		s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
166					       s->length, dir, NULL);
167	}
168
169	return nents;
170}
171
172void or1k_unmap_sg(struct device *dev, struct scatterlist *sg,
173		   int nents, enum dma_data_direction dir,
174		   struct dma_attrs *attrs)
175{
176	struct scatterlist *s;
177	int i;
178
179	for_each_sg(sg, s, nents, i) {
180		or1k_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, NULL);
181	}
182}
183
184void or1k_sync_single_for_cpu(struct device *dev,
185			      dma_addr_t dma_handle, size_t size,
186			      enum dma_data_direction dir)
187{
188	unsigned long cl;
189	dma_addr_t addr = dma_handle;
190
191	/* Invalidate the dcache for the requested range */
192	for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
193		mtspr(SPR_DCBIR, cl);
194}
195
196void or1k_sync_single_for_device(struct device *dev,
197			         dma_addr_t dma_handle, size_t size,
198			         enum dma_data_direction dir)
199{
200	unsigned long cl;
201	dma_addr_t addr = dma_handle;
202
203	/* Flush the dcache for the requested range */
204	for (cl = addr; cl < addr + size; cl += cpuinfo.dcache_block_size)
205		mtspr(SPR_DCBFR, cl);
206}
207
208/* Number of entries preallocated for DMA-API debugging */
209#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
210
211static int __init dma_init(void)
212{
213	dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
214
215	return 0;
216}
217fs_initcall(dma_init);
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * OpenRISC Linux
  4 *
  5 * Linux architectural port borrowing liberally from similar works of
  6 * others.  All original copyrights apply as per the original source
  7 * declaration.
  8 *
  9 * Modifications for the OpenRISC architecture:
 10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
 11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
 12 *
 
 
 
 
 
 13 * DMA mapping callbacks...
 
 
 14 */
 15
 16#include <linux/dma-map-ops.h>
 17#include <linux/pagewalk.h>
 18
 19#include <asm/cpuinfo.h>
 20#include <asm/spr_defs.h>
 21#include <asm/tlbflush.h>
 22
 23static int
 24page_set_nocache(pte_t *pte, unsigned long addr,
 25		 unsigned long next, struct mm_walk *walk)
 26{
 27	unsigned long cl;
 28	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
 29
 30	pte_val(*pte) |= _PAGE_CI;
 31
 32	/*
 33	 * Flush the page out of the TLB so that the new page flags get
 34	 * picked up next time there's an access
 35	 */
 36	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
 37
 38	/* Flush page out of dcache */
 39	for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
 40		mtspr(SPR_DCBFR, cl);
 41
 42	return 0;
 43}
 44
 45static const struct mm_walk_ops set_nocache_walk_ops = {
 46	.pte_entry		= page_set_nocache,
 47};
 48
 49static int
 50page_clear_nocache(pte_t *pte, unsigned long addr,
 51		   unsigned long next, struct mm_walk *walk)
 52{
 53	pte_val(*pte) &= ~_PAGE_CI;
 54
 55	/*
 56	 * Flush the page out of the TLB so that the new page flags get
 57	 * picked up next time there's an access
 58	 */
 59	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
 60
 61	return 0;
 62}
 63
 64static const struct mm_walk_ops clear_nocache_walk_ops = {
 65	.pte_entry		= page_clear_nocache,
 66};
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 67
 68void *arch_dma_set_uncached(void *cpu_addr, size_t size)
 69{
 70	unsigned long va = (unsigned long)cpu_addr;
 71	int error;
 72
 73	/*
 74	 * We need to iterate through the pages, clearing the dcache for
 75	 * them and setting the cache-inhibit bit.
 76	 */
 77	mmap_write_lock(&init_mm);
 78	error = walk_page_range_novma(&init_mm, va, va + size,
 79			&set_nocache_walk_ops, NULL, NULL);
 80	mmap_write_unlock(&init_mm);
 81
 82	if (error)
 83		return ERR_PTR(error);
 84	return cpu_addr;
 85}
 86
 87void arch_dma_clear_uncached(void *cpu_addr, size_t size)
 
 88{
 89	unsigned long va = (unsigned long)cpu_addr;
 
 
 
 
 90
 91	mmap_write_lock(&init_mm);
 92	/* walk_page_range shouldn't be able to fail here */
 93	WARN_ON(walk_page_range_novma(&init_mm, va, va + size,
 94			&clear_nocache_walk_ops, NULL, NULL));
 95	mmap_write_unlock(&init_mm);
 96}
 97
 98void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
 99		enum dma_data_direction dir)
 
 
100{
101	unsigned long cl;
102	struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
103
104	switch (dir) {
105	case DMA_TO_DEVICE:
106		/* Flush the dcache for the requested range */
107		for (cl = addr; cl < addr + size;
108		     cl += cpuinfo->dcache_block_size)
109			mtspr(SPR_DCBFR, cl);
110		break;
111	case DMA_FROM_DEVICE:
112		/* Invalidate the dcache for the requested range */
113		for (cl = addr; cl < addr + size;
114		     cl += cpuinfo->dcache_block_size)
115			mtspr(SPR_DCBIR, cl);
116		break;
117	default:
118		/*
119		 * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
120		 * flush nor invalidate the cache here as the area will need
121		 * to be manually synced anyway.
122		 */
123		break;
124	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125}