Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2#include <linux/highmem.h>
3#include <linux/export.h>
4#include <linux/swap.h> /* for totalram_pages */
5#include <linux/memblock.h>
6
7void *kmap(struct page *page)
8{
9 might_sleep();
10 if (!PageHighMem(page))
11 return page_address(page);
12 return kmap_high(page);
13}
14EXPORT_SYMBOL(kmap);
15
16void kunmap(struct page *page)
17{
18 if (in_interrupt())
19 BUG();
20 if (!PageHighMem(page))
21 return;
22 kunmap_high(page);
23}
24EXPORT_SYMBOL(kunmap);
25
26/*
27 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
28 * no global lock is needed and because the kmap code must perform a global TLB
29 * invalidation when the kmap pool wraps.
30 *
31 * However when holding an atomic kmap it is not legal to sleep, so atomic
32 * kmaps are appropriate for short, tight code paths only.
33 */
34void *kmap_atomic_prot(struct page *page, pgprot_t prot)
35{
36 unsigned long vaddr;
37 int idx, type;
38
39 preempt_disable();
40 pagefault_disable();
41
42 if (!PageHighMem(page))
43 return page_address(page);
44
45 type = kmap_atomic_idx_push();
46 idx = type + KM_TYPE_NR*smp_processor_id();
47 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
48 BUG_ON(!pte_none(*(kmap_pte-idx)));
49 set_pte(kmap_pte-idx, mk_pte(page, prot));
50 arch_flush_lazy_mmu_mode();
51
52 return (void *)vaddr;
53}
54EXPORT_SYMBOL(kmap_atomic_prot);
55
56void *kmap_atomic(struct page *page)
57{
58 return kmap_atomic_prot(page, kmap_prot);
59}
60EXPORT_SYMBOL(kmap_atomic);
61
62/*
63 * This is the same as kmap_atomic() but can map memory that doesn't
64 * have a struct page associated with it.
65 */
66void *kmap_atomic_pfn(unsigned long pfn)
67{
68 return kmap_atomic_prot_pfn(pfn, kmap_prot);
69}
70EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
71
72void __kunmap_atomic(void *kvaddr)
73{
74 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
75
76 if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
77 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
78 int idx, type;
79
80 type = kmap_atomic_idx();
81 idx = type + KM_TYPE_NR * smp_processor_id();
82
83#ifdef CONFIG_DEBUG_HIGHMEM
84 WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
85#endif
86 /*
87 * Force other mappings to Oops if they'll try to access this
88 * pte without first remap it. Keeping stale mappings around
89 * is a bad idea also, in case the page changes cacheability
90 * attributes or becomes a protected page in a hypervisor.
91 */
92 kpte_clear_flush(kmap_pte-idx, vaddr);
93 kmap_atomic_idx_pop();
94 arch_flush_lazy_mmu_mode();
95 }
96#ifdef CONFIG_DEBUG_HIGHMEM
97 else {
98 BUG_ON(vaddr < PAGE_OFFSET);
99 BUG_ON(vaddr >= (unsigned long)high_memory);
100 }
101#endif
102
103 pagefault_enable();
104 preempt_enable();
105}
106EXPORT_SYMBOL(__kunmap_atomic);
107
108void __init set_highmem_pages_init(void)
109{
110 struct zone *zone;
111 int nid;
112
113 /*
114 * Explicitly reset zone->managed_pages because set_highmem_pages_init()
115 * is invoked before memblock_free_all()
116 */
117 reset_all_zones_managed_pages();
118 for_each_zone(zone) {
119 unsigned long zone_start_pfn, zone_end_pfn;
120
121 if (!is_highmem(zone))
122 continue;
123
124 zone_start_pfn = zone->zone_start_pfn;
125 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
126
127 nid = zone_to_nid(zone);
128 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
129 zone->name, nid, zone_start_pfn, zone_end_pfn);
130
131 add_highpages_with_active_regions(nid, zone_start_pfn,
132 zone_end_pfn);
133 }
134}
1#include <linux/highmem.h>
2#include <linux/module.h>
3#include <linux/swap.h> /* for totalram_pages */
4
5void *kmap(struct page *page)
6{
7 might_sleep();
8 if (!PageHighMem(page))
9 return page_address(page);
10 return kmap_high(page);
11}
12EXPORT_SYMBOL(kmap);
13
14void kunmap(struct page *page)
15{
16 if (in_interrupt())
17 BUG();
18 if (!PageHighMem(page))
19 return;
20 kunmap_high(page);
21}
22EXPORT_SYMBOL(kunmap);
23
24/*
25 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
26 * no global lock is needed and because the kmap code must perform a global TLB
27 * invalidation when the kmap pool wraps.
28 *
29 * However when holding an atomic kmap it is not legal to sleep, so atomic
30 * kmaps are appropriate for short, tight code paths only.
31 */
32void *kmap_atomic_prot(struct page *page, pgprot_t prot)
33{
34 unsigned long vaddr;
35 int idx, type;
36
37 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
38 pagefault_disable();
39
40 if (!PageHighMem(page))
41 return page_address(page);
42
43 type = kmap_atomic_idx_push();
44 idx = type + KM_TYPE_NR*smp_processor_id();
45 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
46 BUG_ON(!pte_none(*(kmap_pte-idx)));
47 set_pte(kmap_pte-idx, mk_pte(page, prot));
48
49 return (void *)vaddr;
50}
51EXPORT_SYMBOL(kmap_atomic_prot);
52
53void *__kmap_atomic(struct page *page)
54{
55 return kmap_atomic_prot(page, kmap_prot);
56}
57EXPORT_SYMBOL(__kmap_atomic);
58
59/*
60 * This is the same as kmap_atomic() but can map memory that doesn't
61 * have a struct page associated with it.
62 */
63void *kmap_atomic_pfn(unsigned long pfn)
64{
65 return kmap_atomic_prot_pfn(pfn, kmap_prot);
66}
67EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
68
69void __kunmap_atomic(void *kvaddr)
70{
71 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
72
73 if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
74 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
75 int idx, type;
76
77 type = kmap_atomic_idx();
78 idx = type + KM_TYPE_NR * smp_processor_id();
79
80#ifdef CONFIG_DEBUG_HIGHMEM
81 WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
82#endif
83 /*
84 * Force other mappings to Oops if they'll try to access this
85 * pte without first remap it. Keeping stale mappings around
86 * is a bad idea also, in case the page changes cacheability
87 * attributes or becomes a protected page in a hypervisor.
88 */
89 kpte_clear_flush(kmap_pte-idx, vaddr);
90 kmap_atomic_idx_pop();
91 }
92#ifdef CONFIG_DEBUG_HIGHMEM
93 else {
94 BUG_ON(vaddr < PAGE_OFFSET);
95 BUG_ON(vaddr >= (unsigned long)high_memory);
96 }
97#endif
98
99 pagefault_enable();
100}
101EXPORT_SYMBOL(__kunmap_atomic);
102
103struct page *kmap_atomic_to_page(void *ptr)
104{
105 unsigned long idx, vaddr = (unsigned long)ptr;
106 pte_t *pte;
107
108 if (vaddr < FIXADDR_START)
109 return virt_to_page(ptr);
110
111 idx = virt_to_fix(vaddr);
112 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
113 return pte_page(*pte);
114}
115EXPORT_SYMBOL(kmap_atomic_to_page);
116
117void __init set_highmem_pages_init(void)
118{
119 struct zone *zone;
120 int nid;
121
122 for_each_zone(zone) {
123 unsigned long zone_start_pfn, zone_end_pfn;
124
125 if (!is_highmem(zone))
126 continue;
127
128 zone_start_pfn = zone->zone_start_pfn;
129 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
130
131 nid = zone_to_nid(zone);
132 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
133 zone->name, nid, zone_start_pfn, zone_end_pfn);
134
135 add_highpages_with_active_regions(nid, zone_start_pfn,
136 zone_end_pfn);
137 }
138 totalram_pages += totalhigh_pages;
139}