Loading...
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2006, 07 Ralf Baechle <ralf@linux-mips.org>
7 * Copyright (C) 2007 Lemote, Inc. & Institute of Computing Technology
8 * Author: Fuxin Zhang, zhangfx@lemote.com
9 *
10 */
11#ifndef __ASM_MACH_LOONGSON_DMA_COHERENCE_H
12#define __ASM_MACH_LOONGSON_DMA_COHERENCE_H
13
14struct device;
15
16static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
17 size_t size)
18{
19 return virt_to_phys(addr) | 0x80000000;
20}
21
22static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
23 struct page *page)
24{
25 return page_to_phys(page) | 0x80000000;
26}
27
28static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
29 dma_addr_t dma_addr)
30{
31#if defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
32 return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff);
33#else
34 return dma_addr & 0x7fffffff;
35#endif
36}
37
38static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
39 size_t size, enum dma_data_direction direction)
40{
41}
42
43static inline int plat_dma_supported(struct device *dev, u64 mask)
44{
45 /*
46 * we fall back to GFP_DMA when the mask isn't all 1s,
47 * so we can't guarantee allocations that must be
48 * within a tighter range than GFP_DMA..
49 */
50 if (mask < DMA_BIT_MASK(24))
51 return 0;
52
53 return 1;
54}
55
56static inline void plat_extra_sync_for_device(struct device *dev)
57{
58}
59
60static inline int plat_dma_mapping_error(struct device *dev,
61 dma_addr_t dma_addr)
62{
63 return 0;
64}
65
66static inline int plat_device_is_coherent(struct device *dev)
67{
68 return 0;
69}
70
71#endif /* __ASM_MACH_LOONGSON_DMA_COHERENCE_H */