Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.9.
  1/*
  2 * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
  3 *
  4 * Provide default implementations of the DMA mapping callbacks for
  5 * directly mapped busses.
  6 */
  7
  8#include <linux/device.h>
  9#include <linux/dma-mapping.h>
 10#include <linux/dma-debug.h>
 11#include <linux/gfp.h>
 12#include <linux/memblock.h>
 13#include <asm/bug.h>
 14#include <asm/abs_addr.h>
 15#include <asm/machdep.h>
 16
 17/*
 18 * Generic direct DMA implementation
 19 *
 20 * This implementation supports a per-device offset that can be applied if
 21 * the address at which memory is visible to devices is not 0. Platform code
 22 * can set archdata.dma_data to an unsigned long holding the offset. By
 23 * default the offset is PCI_DRAM_OFFSET.
 24 */
 25
 26
 27void *dma_direct_alloc_coherent(struct device *dev, size_t size,
 28				dma_addr_t *dma_handle, gfp_t flag)
 29{
 30	void *ret;
 31#ifdef CONFIG_NOT_COHERENT_CACHE
 32	ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
 33	if (ret == NULL)
 34		return NULL;
 35	*dma_handle += get_dma_offset(dev);
 36	return ret;
 37#else
 38	struct page *page;
 39	int node = dev_to_node(dev);
 40
 41	/* ignore region specifiers */
 42	flag  &= ~(__GFP_HIGHMEM);
 43
 44	page = alloc_pages_node(node, flag, get_order(size));
 45	if (page == NULL)
 46		return NULL;
 47	ret = page_address(page);
 48	memset(ret, 0, size);
 49	*dma_handle = virt_to_abs(ret) + get_dma_offset(dev);
 50
 51	return ret;
 52#endif
 53}
 54
 55void dma_direct_free_coherent(struct device *dev, size_t size,
 56			      void *vaddr, dma_addr_t dma_handle)
 57{
 58#ifdef CONFIG_NOT_COHERENT_CACHE
 59	__dma_free_coherent(size, vaddr);
 60#else
 61	free_pages((unsigned long)vaddr, get_order(size));
 62#endif
 63}
 64
 65static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
 66			     int nents, enum dma_data_direction direction,
 67			     struct dma_attrs *attrs)
 68{
 69	struct scatterlist *sg;
 70	int i;
 71
 72	for_each_sg(sgl, sg, nents, i) {
 73		sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
 74		sg->dma_length = sg->length;
 75		__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
 76	}
 77
 78	return nents;
 79}
 80
 81static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
 82				int nents, enum dma_data_direction direction,
 83				struct dma_attrs *attrs)
 84{
 85}
 86
 87static int dma_direct_dma_supported(struct device *dev, u64 mask)
 88{
 89#ifdef CONFIG_PPC64
 90	/* Could be improved so platforms can set the limit in case
 91	 * they have limited DMA windows
 92	 */
 93	return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1);
 94#else
 95	return 1;
 96#endif
 97}
 98
 99static inline dma_addr_t dma_direct_map_page(struct device *dev,
100					     struct page *page,
101					     unsigned long offset,
102					     size_t size,
103					     enum dma_data_direction dir,
104					     struct dma_attrs *attrs)
105{
106	BUG_ON(dir == DMA_NONE);
107	__dma_sync_page(page, offset, size, dir);
108	return page_to_phys(page) + offset + get_dma_offset(dev);
109}
110
111static inline void dma_direct_unmap_page(struct device *dev,
112					 dma_addr_t dma_address,
113					 size_t size,
114					 enum dma_data_direction direction,
115					 struct dma_attrs *attrs)
116{
117}
118
119#ifdef CONFIG_NOT_COHERENT_CACHE
120static inline void dma_direct_sync_sg(struct device *dev,
121		struct scatterlist *sgl, int nents,
122		enum dma_data_direction direction)
123{
124	struct scatterlist *sg;
125	int i;
126
127	for_each_sg(sgl, sg, nents, i)
128		__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
129}
130
131static inline void dma_direct_sync_single(struct device *dev,
132					  dma_addr_t dma_handle, size_t size,
133					  enum dma_data_direction direction)
134{
135	__dma_sync(bus_to_virt(dma_handle), size, direction);
136}
137#endif
138
139struct dma_map_ops dma_direct_ops = {
140	.alloc_coherent	= dma_direct_alloc_coherent,
141	.free_coherent	= dma_direct_free_coherent,
142	.map_sg		= dma_direct_map_sg,
143	.unmap_sg	= dma_direct_unmap_sg,
144	.dma_supported	= dma_direct_dma_supported,
145	.map_page	= dma_direct_map_page,
146	.unmap_page	= dma_direct_unmap_page,
147#ifdef CONFIG_NOT_COHERENT_CACHE
148	.sync_single_for_cpu 		= dma_direct_sync_single,
149	.sync_single_for_device 	= dma_direct_sync_single,
150	.sync_sg_for_cpu 		= dma_direct_sync_sg,
151	.sync_sg_for_device 		= dma_direct_sync_sg,
152#endif
153};
154EXPORT_SYMBOL(dma_direct_ops);
155
156#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
157
158int dma_set_mask(struct device *dev, u64 dma_mask)
159{
160	struct dma_map_ops *dma_ops = get_dma_ops(dev);
161
162	if (ppc_md.dma_set_mask)
163		return ppc_md.dma_set_mask(dev, dma_mask);
164	if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
165		return dma_ops->set_dma_mask(dev, dma_mask);
166	if (!dev->dma_mask || !dma_supported(dev, dma_mask))
167		return -EIO;
168	*dev->dma_mask = dma_mask;
169	return 0;
170}
171EXPORT_SYMBOL(dma_set_mask);
172
173static int __init dma_init(void)
174{
175       dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
176
177       return 0;
178}
179fs_initcall(dma_init);
180
181int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
182		      void *cpu_addr, dma_addr_t handle, size_t size)
183{
184	unsigned long pfn;
185
186#ifdef CONFIG_NOT_COHERENT_CACHE
187	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
188	pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
189#else
190	pfn = page_to_pfn(virt_to_page(cpu_addr));
191#endif
192	return remap_pfn_range(vma, vma->vm_start,
193			       pfn + vma->vm_pgoff,
194			       vma->vm_end - vma->vm_start,
195			       vma->vm_page_prot);
196}
197EXPORT_SYMBOL_GPL(dma_mmap_coherent);