Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
 
 
 
 
  4 */
  5
  6#include <linux/dma-noncoherent.h>
  7#include <asm/cache.h>
  8#include <asm/cacheflush.h>
  9
 10/*
 11 * ARCH specific callbacks for generic noncoherent DMA ops
 12 *  - hardware IOC not available (or "dma-coherent" not set for device in DT)
 13 *  - But still handle both coherent and non-coherent requests from caller
 
 
 14 *
 15 * For DMA coherent hardware (IOC) generic code suffices
 16 */
 17
 18void arch_dma_prep_coherent(struct page *page, size_t size)
 
 
 
 
 
 
 19{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 20	/*
 21	 * Evict any existing L1 and/or L2 lines for the backing page
 22	 * in case it was used earlier as a normal "cached" page.
 23	 * Yeah this bit us - STAR 9000898266
 24	 *
 25	 * Although core does call flush_cache_vmap(), it gets kvaddr hence
 26	 * can't be used to efficiently flush L1 and/or L2 which need paddr
 27	 * Currently flush_cache_vmap nukes the L1 cache completely which
 28	 * will be optimized as a separate commit
 29	 */
 30	dma_cache_wback_inv(page_to_phys(page), size);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31}
 32
 33/*
 34 * Cache operations depending on function and direction argument, inspired by
 35 * https://lkml.org/lkml/2018/5/18/979
 36 * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
 37 * dma-mapping: provide a generic dma-noncoherent implementation)"
 38 *
 39 *          |   map          ==  for_device     |   unmap     ==  for_cpu
 40 *          |----------------------------------------------------------------
 41 * TO_DEV   |   writeback        writeback      |   none          none
 42 * FROM_DEV |   invalidate       invalidate     |   invalidate*   invalidate*
 43 * BIDIR    |   writeback+inv    writeback+inv  |   invalidate    invalidate
 44 *
 45 *     [*] needed for CPU speculative prefetches
 46 *
 47 * NOTE: we don't check the validity of direction argument as it is done in
 48 * upper layer functions (in include/linux/dma-mapping.h)
 49 */
 50
 51void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
 52		enum dma_data_direction dir)
 53{
 54	switch (dir) {
 55	case DMA_TO_DEVICE:
 56		dma_cache_wback(paddr, size);
 57		break;
 58
 59	case DMA_FROM_DEVICE:
 60		dma_cache_inv(paddr, size);
 61		break;
 62
 
 
 63	case DMA_BIDIRECTIONAL:
 64		dma_cache_wback_inv(paddr, size);
 65		break;
 66
 67	default:
 68		break;
 69	}
 70}
 71
 72void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
 73		enum dma_data_direction dir)
 
 74{
 75	switch (dir) {
 76	case DMA_TO_DEVICE:
 77		break;
 
 78
 79	/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
 80	case DMA_FROM_DEVICE:
 81	case DMA_BIDIRECTIONAL:
 82		dma_cache_inv(paddr, size);
 83		break;
 84
 85	default:
 86		break;
 87	}
 
 
 88}
 89
 90/*
 91 * Plug in direct dma map ops.
 92 */
 93void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 94			const struct iommu_ops *iommu, bool coherent)
 95{
 96	/*
 97	 * IOC hardware snoops all DMA traffic keeping the caches consistent
 98	 * with memory - eliding need for any explicit cache maintenance of
 99	 * DMA buffers.
100	 */
101	if (is_isa_arcv2() && ioc_enable && coherent)
102		dev->dma_coherent = true;
 
 
 
 
 
 
 
 
103
104	dev_info(dev, "use %scoherent DMA ops\n",
105		 dev->dma_coherent ? "" : "non");
106}
v4.6
 
  1/*
  2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License version 2 as
  6 * published by the Free Software Foundation.
  7 */
  8
 
 
 
 
  9/*
 10 * DMA Coherent API Notes
 11 *
 12 * I/O is inherently non-coherent on ARC. So a coherent DMA buffer is
 13 * implemented by accessintg it using a kernel virtual address, with
 14 * Cache bit off in the TLB entry.
 15 *
 16 * The default DMA address == Phy address which is 0x8000_0000 based.
 17 */
 18
 19#include <linux/dma-mapping.h>
 20#include <asm/cache.h>
 21#include <asm/cacheflush.h>
 22
 23
 24static void *arc_dma_alloc(struct device *dev, size_t size,
 25		dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
 26{
 27	unsigned long order = get_order(size);
 28	struct page *page;
 29	phys_addr_t paddr;
 30	void *kvaddr;
 31	int need_coh = 1, need_kvaddr = 0;
 32
 33	page = alloc_pages(gfp, order);
 34	if (!page)
 35		return NULL;
 36
 37	/*
 38	 * IOC relies on all data (even coherent DMA data) being in cache
 39	 * Thus allocate normal cached memory
 40	 *
 41	 * The gains with IOC are two pronged:
 42	 *   -For streaming data, elides need for cache maintenance, saving
 43	 *    cycles in flush code, and bus bandwidth as all the lines of a
 44	 *    buffer need to be flushed out to memory
 45	 *   -For coherent data, Read/Write to buffers terminate early in cache
 46	 *   (vs. always going to memory - thus are faster)
 47	 */
 48	if ((is_isa_arcv2() && ioc_exists) ||
 49	    dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
 50		need_coh = 0;
 51
 52	/*
 53	 * - A coherent buffer needs MMU mapping to enforce non-cachability
 54	 * - A highmem page needs a virtual handle (hence MMU mapping)
 55	 *   independent of cachability
 56	 */
 57	if (PageHighMem(page) || need_coh)
 58		need_kvaddr = 1;
 59
 60	/* This is linear addr (0x8000_0000 based) */
 61	paddr = page_to_phys(page);
 62
 63	*dma_handle = plat_phys_to_dma(dev, paddr);
 64
 65	/* This is kernel Virtual address (0x7000_0000 based) */
 66	if (need_kvaddr) {
 67		kvaddr = ioremap_nocache(paddr, size);
 68		if (kvaddr == NULL) {
 69			__free_pages(page, order);
 70			return NULL;
 71		}
 72	} else {
 73		kvaddr = (void *)(u32)paddr;
 74	}
 75
 76	/*
 77	 * Evict any existing L1 and/or L2 lines for the backing page
 78	 * in case it was used earlier as a normal "cached" page.
 79	 * Yeah this bit us - STAR 9000898266
 80	 *
 81	 * Although core does call flush_cache_vmap(), it gets kvaddr hence
 82	 * can't be used to efficiently flush L1 and/or L2 which need paddr
 83	 * Currently flush_cache_vmap nukes the L1 cache completely which
 84	 * will be optimized as a separate commit
 85	 */
 86	if (need_coh)
 87		dma_cache_wback_inv(paddr, size);
 88
 89	return kvaddr;
 90}
 91
 92static void arc_dma_free(struct device *dev, size_t size, void *vaddr,
 93		dma_addr_t dma_handle, struct dma_attrs *attrs)
 94{
 95	struct page *page = virt_to_page(dma_handle);
 96	int is_non_coh = 1;
 97
 98	is_non_coh = dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs) ||
 99			(is_isa_arcv2() && ioc_exists);
100
101	if (PageHighMem(page) || !is_non_coh)
102		iounmap((void __force __iomem *)vaddr);
103
104	__free_pages(page, get_order(size));
105}
106
107/*
108 * streaming DMA Mapping API...
109 * CPU accesses page via normal paddr, thus needs to explicitly made
110 * consistent before each use
 
 
 
 
 
 
 
 
 
 
 
 
111 */
112static void _dma_cache_sync(phys_addr_t paddr, size_t size,
 
113		enum dma_data_direction dir)
114{
115	switch (dir) {
 
 
 
 
116	case DMA_FROM_DEVICE:
117		dma_cache_inv(paddr, size);
118		break;
119	case DMA_TO_DEVICE:
120		dma_cache_wback(paddr, size);
121		break;
122	case DMA_BIDIRECTIONAL:
123		dma_cache_wback_inv(paddr, size);
124		break;
 
125	default:
126		pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr);
127	}
128}
129
130static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
131		unsigned long offset, size_t size, enum dma_data_direction dir,
132		struct dma_attrs *attrs)
133{
134	phys_addr_t paddr = page_to_phys(page) + offset;
135	_dma_cache_sync(paddr, size, dir);
136	return plat_phys_to_dma(dev, paddr);
137}
138
139static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
140	   int nents, enum dma_data_direction dir, struct dma_attrs *attrs)
141{
142	struct scatterlist *s;
143	int i;
144
145	for_each_sg(sg, s, nents, i)
146		s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
147					       s->length, dir);
148
149	return nents;
150}
151
152static void arc_dma_sync_single_for_cpu(struct device *dev,
153		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
 
 
 
154{
155	_dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_FROM_DEVICE);
156}
157
158static void arc_dma_sync_single_for_device(struct device *dev,
159		dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
160{
161	_dma_cache_sync(plat_dma_to_phys(dev, dma_handle), size, DMA_TO_DEVICE);
162}
163
164static void arc_dma_sync_sg_for_cpu(struct device *dev,
165		struct scatterlist *sglist, int nelems,
166		enum dma_data_direction dir)
167{
168	int i;
169	struct scatterlist *sg;
170
171	for_each_sg(sglist, sg, nelems, i)
172		_dma_cache_sync(sg_phys(sg), sg->length, dir);
173}
174
175static void arc_dma_sync_sg_for_device(struct device *dev,
176		struct scatterlist *sglist, int nelems,
177		enum dma_data_direction dir)
178{
179	int i;
180	struct scatterlist *sg;
181
182	for_each_sg(sglist, sg, nelems, i)
183		_dma_cache_sync(sg_phys(sg), sg->length, dir);
184}
185
186static int arc_dma_supported(struct device *dev, u64 dma_mask)
187{
188	/* Support 32 bit DMA mask exclusively */
189	return dma_mask == DMA_BIT_MASK(32);
190}
191
192struct dma_map_ops arc_dma_ops = {
193	.alloc			= arc_dma_alloc,
194	.free			= arc_dma_free,
195	.map_page		= arc_dma_map_page,
196	.map_sg			= arc_dma_map_sg,
197	.sync_single_for_device	= arc_dma_sync_single_for_device,
198	.sync_single_for_cpu	= arc_dma_sync_single_for_cpu,
199	.sync_sg_for_cpu	= arc_dma_sync_sg_for_cpu,
200	.sync_sg_for_device	= arc_dma_sync_sg_for_device,
201	.dma_supported		= arc_dma_supported,
202};
203EXPORT_SYMBOL(arc_dma_ops);