Loading...
1#ifndef _ASM_HIGHMEM_H
2#define _ASM_HIGHMEM_H
3
4#include <asm/kmap_types.h>
5
6#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
7#define LAST_PKMAP PTRS_PER_PTE
8#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
9#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
10#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
11
12#define kmap_prot PAGE_KERNEL
13
14#define flush_cache_kmaps() \
15 do { \
16 if (cache_is_vivt()) \
17 flush_cache_all(); \
18 } while (0)
19
20extern pte_t *pkmap_page_table;
21
22extern void *kmap_high(struct page *page);
23extern void kunmap_high(struct page *page);
24
25/*
26 * The reason for kmap_high_get() is to ensure that the currently kmap'd
27 * page usage count does not decrease to zero while we're using its
28 * existing virtual mapping in an atomic context. With a VIVT cache this
29 * is essential to do, but with a VIPT cache this is only an optimization
30 * so not to pay the price of establishing a second mapping if an existing
31 * one can be used. However, on platforms without hardware TLB maintenance
32 * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since
33 * the locking involved must also disable IRQs which is incompatible with
34 * the IPI mechanism used by global TLB operations.
35 */
36#define ARCH_NEEDS_KMAP_HIGH_GET
37#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6)
38#undef ARCH_NEEDS_KMAP_HIGH_GET
39#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT)
40#error "The sum of features in your kernel config cannot be supported together"
41#endif
42#endif
43
44#ifdef ARCH_NEEDS_KMAP_HIGH_GET
45extern void *kmap_high_get(struct page *page);
46#else
47static inline void *kmap_high_get(struct page *page)
48{
49 return NULL;
50}
51#endif
52
53/*
54 * The following functions are already defined by <linux/highmem.h>
55 * when CONFIG_HIGHMEM is not set.
56 */
57#ifdef CONFIG_HIGHMEM
58extern void *kmap(struct page *page);
59extern void kunmap(struct page *page);
60extern void *kmap_atomic(struct page *page);
61extern void __kunmap_atomic(void *kvaddr);
62extern void *kmap_atomic_pfn(unsigned long pfn);
63extern struct page *kmap_atomic_to_page(const void *ptr);
64#endif
65
66#endif
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_HIGHMEM_H
3#define _ASM_HIGHMEM_H
4
5#include <asm/kmap_types.h>
6
7#define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
8#define LAST_PKMAP PTRS_PER_PTE
9#define LAST_PKMAP_MASK (LAST_PKMAP - 1)
10#define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT)
11#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
12
13#define kmap_prot PAGE_KERNEL
14
15#define flush_cache_kmaps() \
16 do { \
17 if (cache_is_vivt()) \
18 flush_cache_all(); \
19 } while (0)
20
21extern pte_t *pkmap_page_table;
22
23extern void *kmap_high(struct page *page);
24extern void kunmap_high(struct page *page);
25
26/*
27 * The reason for kmap_high_get() is to ensure that the currently kmap'd
28 * page usage count does not decrease to zero while we're using its
29 * existing virtual mapping in an atomic context. With a VIVT cache this
30 * is essential to do, but with a VIPT cache this is only an optimization
31 * so not to pay the price of establishing a second mapping if an existing
32 * one can be used. However, on platforms without hardware TLB maintenance
33 * broadcast, we simply cannot use ARCH_NEEDS_KMAP_HIGH_GET at all since
34 * the locking involved must also disable IRQs which is incompatible with
35 * the IPI mechanism used by global TLB operations.
36 */
37#define ARCH_NEEDS_KMAP_HIGH_GET
38#if defined(CONFIG_SMP) && defined(CONFIG_CPU_TLB_V6)
39#undef ARCH_NEEDS_KMAP_HIGH_GET
40#if defined(CONFIG_HIGHMEM) && defined(CONFIG_CPU_CACHE_VIVT)
41#error "The sum of features in your kernel config cannot be supported together"
42#endif
43#endif
44
45/*
46 * Needed to be able to broadcast the TLB invalidation for kmap.
47 */
48#ifdef CONFIG_ARM_ERRATA_798181
49#undef ARCH_NEEDS_KMAP_HIGH_GET
50#endif
51
52#ifdef ARCH_NEEDS_KMAP_HIGH_GET
53extern void *kmap_high_get(struct page *page);
54#else
55static inline void *kmap_high_get(struct page *page)
56{
57 return NULL;
58}
59#endif
60
61/*
62 * The following functions are already defined by <linux/highmem.h>
63 * when CONFIG_HIGHMEM is not set.
64 */
65#ifdef CONFIG_HIGHMEM
66extern void *kmap(struct page *page);
67extern void kunmap(struct page *page);
68extern void *kmap_atomic(struct page *page);
69extern void __kunmap_atomic(void *kvaddr);
70extern void *kmap_atomic_pfn(unsigned long pfn);
71#endif
72
73#endif