Linux Audio

Check our new training course

Loading...
v5.4
 1/* SPDX-License-Identifier: GPL-2.0 */
 2#ifndef _ASM_HIGHMEM_H
 3#define _ASM_HIGHMEM_H
 4
 5#include <asm/kmap_types.h>
 
 6
 7#define PKMAP_BASE		(PAGE_OFFSET - PMD_SIZE)
 8#define LAST_PKMAP		PTRS_PER_PTE
 9#define LAST_PKMAP_MASK		(LAST_PKMAP - 1)
10#define PKMAP_NR(virt)		(((virt) - PKMAP_BASE) >> PAGE_SHIFT)
11#define PKMAP_ADDR(nr)		(PKMAP_BASE + ((nr) << PAGE_SHIFT))
12
13#define kmap_prot		PAGE_KERNEL
14
15#define flush_cache_kmaps() \
16	do { \
17		if (cache_is_vivt()) \
18			flush_cache_all(); \
19	} while (0)
20
21extern pte_t *pkmap_page_table;
22
23extern void *kmap_high(struct page *page);
24extern void kunmap_high(struct page *page);
25
26/*
27 * The reason for kmap_high_get() is to ensure that the currently kmap'd
28 * page usage count does not decrease to zero while we're using its
29 * existing virtual mapping in an atomic context.  With a VIVT cache this
30 * is essential to do, but with a VIPT cache this is only an optimization
31 * so not to pay the price of establishing a second mapping if an existing
32 * one can be used.  However, on platforms without hardware TLB maintenance
33 * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since
34 * the locking involved must also disable IRQs which is incompatible with
35 * the IPI mechanism used by global TLB operations.
36 */
37#define ARCH_NEEDS_KMAP_HIGH_GET
38#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6)
39#undef ARCH_NEEDS_KMAP_HIGH_GET
40#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT)
41#error "The sum of features in your kernel config cannot be supported together"
42#endif
43#endif
44
45/*
46 * Needed to be able to broadcast the TLB invalidation for kmap.
47 */
48#ifdef CONFIG_ARM_ERRATA_798181
49#undef ARCH_NEEDS_KMAP_HIGH_GET
50#endif
51
52#ifdef ARCH_NEEDS_KMAP_HIGH_GET
53extern void *kmap_high_get(struct page *page);
54#else
 
 
 
 
 
 
 
 
 
55static inline void *kmap_high_get(struct page *page)
56{
57	return NULL;
58}
59#endif
60
61/*
62 * The following functions are already defined by <linux/highmem.h>
63 * when CONFIG_HIGHMEM is not set.
64 */
65#ifdef CONFIG_HIGHMEM
66extern void *kmap(struct page *page);
67extern void kunmap(struct page *page);
68extern void *kmap_atomic(struct page *page);
69extern void __kunmap_atomic(void *kvaddr);
70extern void *kmap_atomic_pfn(unsigned long pfn);
71#endif
72
73#endif
v6.13.7
 1/* SPDX-License-Identifier: GPL-2.0 */
 2#ifndef _ASM_HIGHMEM_H
 3#define _ASM_HIGHMEM_H
 4
 5#include <asm/cachetype.h>
 6#include <asm/fixmap.h>
 7
 8#define PKMAP_BASE		(PAGE_OFFSET - PMD_SIZE)
 9#define LAST_PKMAP		PTRS_PER_PTE
10#define LAST_PKMAP_MASK		(LAST_PKMAP - 1)
11#define PKMAP_NR(virt)		(((virt) - PKMAP_BASE) >> PAGE_SHIFT)
12#define PKMAP_ADDR(nr)		(PKMAP_BASE + ((nr) << PAGE_SHIFT))
13
 
 
14#define flush_cache_kmaps() \
15	do { \
16		if (cache_is_vivt()) \
17			flush_cache_all(); \
18	} while (0)
19
20extern pte_t *pkmap_page_table;
21
 
 
 
22/*
23 * The reason for kmap_high_get() is to ensure that the currently kmap'd
24 * page usage count does not decrease to zero while we're using its
25 * existing virtual mapping in an atomic context.  With a VIVT cache this
26 * is essential to do, but with a VIPT cache this is only an optimization
27 * so not to pay the price of establishing a second mapping if an existing
28 * one can be used.  However, on platforms without hardware TLB maintenance
29 * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since
30 * the locking involved must also disable IRQs which is incompatible with
31 * the IPI mechanism used by global TLB operations.
32 */
33#define ARCH_NEEDS_KMAP_HIGH_GET
34#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6)
35#undef ARCH_NEEDS_KMAP_HIGH_GET
36#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT)
37#error "The sum of features in your kernel config cannot be supported together"
38#endif
39#endif
40
41/*
42 * Needed to be able to broadcast the TLB invalidation for kmap.
43 */
44#ifdef CONFIG_ARM_ERRATA_798181
45#undef ARCH_NEEDS_KMAP_HIGH_GET
46#endif
47
48#ifdef ARCH_NEEDS_KMAP_HIGH_GET
49extern void *kmap_high_get(struct page *page);
50
51static inline void *arch_kmap_local_high_get(struct page *page)
52{
53	if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !cache_is_vivt())
54		return NULL;
55	return kmap_high_get(page);
56}
57#define arch_kmap_local_high_get arch_kmap_local_high_get
58
59#else /* ARCH_NEEDS_KMAP_HIGH_GET */
60static inline void *kmap_high_get(struct page *page)
61{
62	return NULL;
63}
64#endif /* !ARCH_NEEDS_KMAP_HIGH_GET */
65
66#define arch_kmap_local_post_map(vaddr, pteval)				\
67	local_flush_tlb_kernel_page(vaddr)
68
69#define arch_kmap_local_pre_unmap(vaddr)				\
70do {									\
71	if (cache_is_vivt())						\
72		__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);	\
73} while (0)
74
75#define arch_kmap_local_post_unmap(vaddr)				\
76	local_flush_tlb_kernel_page(vaddr)
77
78#endif