Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
  1/*
  2 * DMA Mapping glue for ARC
  3 *
  4 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 */
 10
 11#ifndef ASM_ARC_DMA_MAPPING_H
 12#define ASM_ARC_DMA_MAPPING_H
 13
 14#include <asm-generic/dma-coherent.h>
 15#include <asm/cacheflush.h>
 16
 17#ifndef CONFIG_ARC_PLAT_NEEDS_CPU_TO_DMA
 18/*
 19 * dma_map_* API take cpu addresses, which is kernel logical address in the
 20 * untranslated address space (0x8000_0000) based. The dma address (bus addr)
 21 * ideally needs to be 0x0000_0000 based hence these glue routines.
 22 * However given that intermediate bus bridges can ignore the high bit, we can
 23 * do with these routines being no-ops.
 24 * If a platform/device comes up which sriclty requires 0 based bus addr
 25 * (e.g. AHB-PCI bridge on Angel4 board), then it can provide it's own versions
 26 */
 27#define plat_dma_addr_to_kernel(dev, addr) ((unsigned long)(addr))
 28#define plat_kernel_addr_to_dma(dev, ptr) ((dma_addr_t)(ptr))
 29
 30#else
 31#include <plat/dma_addr.h>
 32#endif
 33
 34void *dma_alloc_noncoherent(struct device *dev, size_t size,
 35			    dma_addr_t *dma_handle, gfp_t gfp);
 36
 37void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
 38			  dma_addr_t dma_handle);
 39
 40void *dma_alloc_coherent(struct device *dev, size_t size,
 41			 dma_addr_t *dma_handle, gfp_t gfp);
 42
 43void dma_free_coherent(struct device *dev, size_t size, void *kvaddr,
 44		       dma_addr_t dma_handle);
 45
 46/* drivers/base/dma-mapping.c */
 47extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
 48			   void *cpu_addr, dma_addr_t dma_addr, size_t size);
 49extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
 50				  void *cpu_addr, dma_addr_t dma_addr,
 51				  size_t size);
 52
 53#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s)
 54#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s)
 55
 56/*
 57 * streaming DMA Mapping API...
 58 * CPU accesses page via normal paddr, thus needs to explicitly made
 59 * consistent before each use
 60 */
 61
 62static inline void __inline_dma_cache_sync(unsigned long paddr, size_t size,
 63					   enum dma_data_direction dir)
 64{
 65	switch (dir) {
 66	case DMA_FROM_DEVICE:
 67		dma_cache_inv(paddr, size);
 68		break;
 69	case DMA_TO_DEVICE:
 70		dma_cache_wback(paddr, size);
 71		break;
 72	case DMA_BIDIRECTIONAL:
 73		dma_cache_wback_inv(paddr, size);
 74		break;
 75	default:
 76		pr_err("Invalid DMA dir [%d] for OP @ %lx\n", dir, paddr);
 77	}
 78}
 79
 80void __arc_dma_cache_sync(unsigned long paddr, size_t size,
 81			  enum dma_data_direction dir);
 82
 83#define _dma_cache_sync(addr, sz, dir)			\
 84do {							\
 85	if (__builtin_constant_p(dir))			\
 86		__inline_dma_cache_sync(addr, sz, dir);	\
 87	else						\
 88		__arc_dma_cache_sync(addr, sz, dir);	\
 89}							\
 90while (0);
 91
 92static inline dma_addr_t
 93dma_map_single(struct device *dev, void *cpu_addr, size_t size,
 94	       enum dma_data_direction dir)
 95{
 96	_dma_cache_sync((unsigned long)cpu_addr, size, dir);
 97	return plat_kernel_addr_to_dma(dev, cpu_addr);
 98}
 99
100static inline void
101dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
102		 size_t size, enum dma_data_direction dir)
103{
104}
105
106static inline dma_addr_t
107dma_map_page(struct device *dev, struct page *page,
108	     unsigned long offset, size_t size,
109	     enum dma_data_direction dir)
110{
111	unsigned long paddr = page_to_phys(page) + offset;
112	return dma_map_single(dev, (void *)paddr, size, dir);
113}
114
115static inline void
116dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
117	       size_t size, enum dma_data_direction dir)
118{
119}
120
121static inline int
122dma_map_sg(struct device *dev, struct scatterlist *sg,
123	   int nents, enum dma_data_direction dir)
124{
125	struct scatterlist *s;
126	int i;
127
128	for_each_sg(sg, s, nents, i)
129		s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
130					       s->length, dir);
131
132	return nents;
133}
134
135static inline void
136dma_unmap_sg(struct device *dev, struct scatterlist *sg,
137	     int nents, enum dma_data_direction dir)
138{
139	struct scatterlist *s;
140	int i;
141
142	for_each_sg(sg, s, nents, i)
143		dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
144}
145
146static inline void
147dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
148			size_t size, enum dma_data_direction dir)
149{
150	_dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
151			DMA_FROM_DEVICE);
152}
153
154static inline void
155dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
156			   size_t size, enum dma_data_direction dir)
157{
158	_dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle), size,
159			DMA_TO_DEVICE);
160}
161
162static inline void
163dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
164			      unsigned long offset, size_t size,
165			      enum dma_data_direction direction)
166{
167	_dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
168			size, DMA_FROM_DEVICE);
169}
170
171static inline void
172dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
173				 unsigned long offset, size_t size,
174				 enum dma_data_direction direction)
175{
176	_dma_cache_sync(plat_dma_addr_to_kernel(dev, dma_handle) + offset,
177			size, DMA_TO_DEVICE);
178}
179
180static inline void
181dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
182		    enum dma_data_direction dir)
183{
184	int i;
185
186	for (i = 0; i < nelems; i++, sg++)
187		_dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
188}
189
190static inline void
191dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
192		       enum dma_data_direction dir)
193{
194	int i;
195
196	for (i = 0; i < nelems; i++, sg++)
197		_dma_cache_sync((unsigned int)sg_virt(sg), sg->length, dir);
198}
199
200static inline int dma_supported(struct device *dev, u64 dma_mask)
201{
202	/* Support 32 bit DMA mask exclusively */
203	return dma_mask == DMA_BIT_MASK(32);
204}
205
206static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
207{
208	return 0;
209}
210
211static inline int dma_set_mask(struct device *dev, u64 dma_mask)
212{
213	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
214		return -EIO;
215
216	*dev->dma_mask = dma_mask;
217
218	return 0;
219}
220
221#endif