Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 | // SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) */ #include <linux/dma-map-ops.h> #include <asm/cache.h> #include <asm/cacheflush.h> /* * ARCH specific callbacks for generic noncoherent DMA ops * - hardware IOC not available (or "dma-coherent" not set for device in DT) * - But still handle both coherent and non-coherent requests from caller * * For DMA coherent hardware (IOC) generic code suffices */ void arch_dma_prep_coherent(struct page *page, size_t size) { /* * Evict any existing L1 and/or L2 lines for the backing page * in case it was used earlier as a normal "cached" page. * Yeah this bit us - STAR 9000898266 * * Although core does call flush_cache_vmap(), it gets kvaddr hence * can't be used to efficiently flush L1 and/or L2 which need paddr * Currently flush_cache_vmap nukes the L1 cache completely which * will be optimized as a separate commit */ dma_cache_wback_inv(page_to_phys(page), size); } /* * Cache operations depending on function and direction argument, inspired by * https://lore.kernel.org/lkml/20180518175004.GF17671@n2100.armlinux.org.uk * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20] * dma-mapping: provide a generic dma-noncoherent implementation)" * * | map == for_device | unmap == for_cpu * |---------------------------------------------------------------- * TO_DEV | writeback writeback | none none * FROM_DEV | invalidate invalidate | invalidate* invalidate* * BIDIR | writeback+inv writeback+inv | invalidate invalidate * * [*] needed for CPU speculative prefetches * * NOTE: we don't check the validity of direction argument as it is done in * upper layer functions (in include/linux/dma-mapping.h) */ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { switch (dir) { case DMA_TO_DEVICE: dma_cache_wback(paddr, size); break; case DMA_FROM_DEVICE: dma_cache_inv(paddr, size); break; case DMA_BIDIRECTIONAL: dma_cache_wback_inv(paddr, size); break; default: break; } } void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, enum dma_data_direction dir) { switch (dir) { case DMA_TO_DEVICE: break; /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */ case DMA_FROM_DEVICE: case DMA_BIDIRECTIONAL: dma_cache_inv(paddr, size); break; default: break; } } /* * Plug in direct dma map ops. */ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, bool coherent) { /* * IOC hardware snoops all DMA traffic keeping the caches consistent * with memory - eliding need for any explicit cache maintenance of * DMA buffers. */ if (is_isa_arcv2() && ioc_enable && coherent) dev->dma_coherent = true; dev_info(dev, "use %scoherent DMA ops\n", dev->dma_coherent ? "" : "non"); } |