Linux Audio

Check our new training course

Loading...
 1/* SPDX-License-Identifier: GPL-2.0-only */
 2/*
 3 * Cache flush operations for the Hexagon architecture
 4 *
 5 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
 6 */
 7
 8#ifndef _ASM_CACHEFLUSH_H
 9#define _ASM_CACHEFLUSH_H
10
11#include <linux/mm_types.h>
12
13/* Cache flushing:
14 *
15 *  - flush_cache_all() flushes entire cache
16 *  - flush_cache_mm(mm) flushes the specified mm context's cache lines
17 *  - flush_cache_page(mm, vmaddr, pfn) flushes a single page
18 *  - flush_cache_range(vma, start, end) flushes a range of pages
19 *  - flush_icache_range(start, end) flush a range of instructions
20 *  - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
21 *  - flush_icache_pages(vma, pg, nr) flushes(invalidates) nr pages for icache
22 *
23 *  Need to doublecheck which one is really needed for ptrace stuff to work.
24 */
25#define LINESIZE	32
26#define LINEBITS	5
27
28/*
29 * Flush Dcache range through current map.
30 */
31extern void flush_dcache_range(unsigned long start, unsigned long end);
32#define flush_dcache_range flush_dcache_range
33
34/*
35 * Flush Icache range through current map.
36 */
37extern void flush_icache_range(unsigned long start, unsigned long end);
38#define flush_icache_range flush_icache_range
39
40/*
41 * Memory-management related flushes are there to ensure in non-physically
42 * indexed cache schemes that stale lines belonging to a given ASID aren't
43 * in the cache to confuse things.  The prototype Hexagon Virtual Machine
44 * only uses a single ASID for all user-mode maps, which should
45 * mean that they aren't necessary.  A brute-force, flush-everything
46 * implementation, with the name xxxxx_hexagon() is present in
47 * arch/hexagon/mm/cache.c, but let's not wire it up until we know
48 * it is needed.
49 */
50extern void flush_cache_all_hexagon(void);
51
52/*
53 * This may or may not ever have to be non-null, depending on the
54 * virtual machine MMU.  For a native kernel, it's definitiely  a no-op
55 *
56 * This is also the place where deferred cache coherency stuff seems
57 * to happen, classically...  but instead we do it like ia64 and
58 * clean the cache when the PTE is set.
59 *
60 */
61static inline void update_mmu_cache_range(struct vm_fault *vmf,
62		struct vm_area_struct *vma, unsigned long address,
63		pte_t *ptep, unsigned int nr)
64{
65	/*  generic_ptrace_pokedata doesn't wind up here, does it?  */
66}
67
68#define update_mmu_cache(vma, addr, ptep) \
69	update_mmu_cache_range(NULL, vma, addr, ptep, 1)
70
71void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
72		       unsigned long vaddr, void *dst, void *src, int len);
73#define copy_to_user_page copy_to_user_page
74
75#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
76	memcpy(dst, src, len)
77
78extern void hexagon_inv_dcache_range(unsigned long start, unsigned long end);
79extern void hexagon_clean_dcache_range(unsigned long start, unsigned long end);
80
81#include <asm-generic/cacheflush.h>
82
83#endif