Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v4.6
 
  1#include <linux/highmem.h>
  2#include <linux/module.h>
  3#include <linux/swap.h> /* for totalram_pages */
  4#include <linux/bootmem.h>
  5
  6void *kmap(struct page *page)
  7{
  8	might_sleep();
  9	if (!PageHighMem(page))
 10		return page_address(page);
 11	return kmap_high(page);
 12}
 13EXPORT_SYMBOL(kmap);
 14
 15void kunmap(struct page *page)
 16{
 17	if (in_interrupt())
 18		BUG();
 19	if (!PageHighMem(page))
 20		return;
 21	kunmap_high(page);
 22}
 23EXPORT_SYMBOL(kunmap);
 24
 25/*
 26 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
 27 * no global lock is needed and because the kmap code must perform a global TLB
 28 * invalidation when the kmap pool wraps.
 29 *
 30 * However when holding an atomic kmap it is not legal to sleep, so atomic
 31 * kmaps are appropriate for short, tight code paths only.
 32 */
 33void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 34{
 35	unsigned long vaddr;
 36	int idx, type;
 37
 38	preempt_disable();
 39	pagefault_disable();
 40
 41	if (!PageHighMem(page))
 42		return page_address(page);
 43
 44	type = kmap_atomic_idx_push();
 45	idx = type + KM_TYPE_NR*smp_processor_id();
 46	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 47	BUG_ON(!pte_none(*(kmap_pte-idx)));
 48	set_pte(kmap_pte-idx, mk_pte(page, prot));
 49	arch_flush_lazy_mmu_mode();
 50
 51	return (void *)vaddr;
 52}
 53EXPORT_SYMBOL(kmap_atomic_prot);
 54
 55void *kmap_atomic(struct page *page)
 56{
 57	return kmap_atomic_prot(page, kmap_prot);
 58}
 59EXPORT_SYMBOL(kmap_atomic);
 60
 61/*
 62 * This is the same as kmap_atomic() but can map memory that doesn't
 63 * have a struct page associated with it.
 64 */
 65void *kmap_atomic_pfn(unsigned long pfn)
 66{
 67	return kmap_atomic_prot_pfn(pfn, kmap_prot);
 68}
 69EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
 70
 71void __kunmap_atomic(void *kvaddr)
 72{
 73	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 74
 75	if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
 76	    vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
 77		int idx, type;
 78
 79		type = kmap_atomic_idx();
 80		idx = type + KM_TYPE_NR * smp_processor_id();
 81
 82#ifdef CONFIG_DEBUG_HIGHMEM
 83		WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
 84#endif
 85		/*
 86		 * Force other mappings to Oops if they'll try to access this
 87		 * pte without first remap it.  Keeping stale mappings around
 88		 * is a bad idea also, in case the page changes cacheability
 89		 * attributes or becomes a protected page in a hypervisor.
 90		 */
 91		kpte_clear_flush(kmap_pte-idx, vaddr);
 92		kmap_atomic_idx_pop();
 93		arch_flush_lazy_mmu_mode();
 94	}
 95#ifdef CONFIG_DEBUG_HIGHMEM
 96	else {
 97		BUG_ON(vaddr < PAGE_OFFSET);
 98		BUG_ON(vaddr >= (unsigned long)high_memory);
 99	}
100#endif
101
102	pagefault_enable();
103	preempt_enable();
104}
105EXPORT_SYMBOL(__kunmap_atomic);
106
107void __init set_highmem_pages_init(void)
108{
109	struct zone *zone;
110	int nid;
111
112	/*
113	 * Explicitly reset zone->managed_pages because set_highmem_pages_init()
114	 * is invoked before free_all_bootmem()
115	 */
116	reset_all_zones_managed_pages();
117	for_each_zone(zone) {
118		unsigned long zone_start_pfn, zone_end_pfn;
119
120		if (!is_highmem(zone))
121			continue;
122
123		zone_start_pfn = zone->zone_start_pfn;
124		zone_end_pfn = zone_start_pfn + zone->spanned_pages;
125
126		nid = zone_to_nid(zone);
127		printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
128				zone->name, nid, zone_start_pfn, zone_end_pfn);
129
130		add_highpages_with_active_regions(nid, zone_start_pfn,
131				 zone_end_pfn);
132	}
133}
v5.9
 1// SPDX-License-Identifier: GPL-2.0-only
 2#include <linux/highmem.h>
 3#include <linux/export.h>
 4#include <linux/swap.h> /* for totalram_pages */
 5#include <linux/memblock.h>
 6
 7void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 8{
 9	unsigned long vaddr;
10	int idx, type;
11
 
 
 
 
 
 
12	type = kmap_atomic_idx_push();
13	idx = type + KM_TYPE_NR*smp_processor_id();
14	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
15	BUG_ON(!pte_none(*(kmap_pte-idx)));
16	set_pte(kmap_pte-idx, mk_pte(page, prot));
17	arch_flush_lazy_mmu_mode();
18
19	return (void *)vaddr;
20}
21EXPORT_SYMBOL(kmap_atomic_high_prot);
 
 
 
 
 
 
22
23/*
24 * This is the same as kmap_atomic() but can map memory that doesn't
25 * have a struct page associated with it.
26 */
27void *kmap_atomic_pfn(unsigned long pfn)
28{
29	return kmap_atomic_prot_pfn(pfn, kmap_prot);
30}
31EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
32
33void kunmap_atomic_high(void *kvaddr)
34{
35	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
36
37	if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
38	    vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
39		int idx, type;
40
41		type = kmap_atomic_idx();
42		idx = type + KM_TYPE_NR * smp_processor_id();
43
44#ifdef CONFIG_DEBUG_HIGHMEM
45		WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
46#endif
47		/*
48		 * Force other mappings to Oops if they'll try to access this
49		 * pte without first remap it.  Keeping stale mappings around
50		 * is a bad idea also, in case the page changes cacheability
51		 * attributes or becomes a protected page in a hypervisor.
52		 */
53		kpte_clear_flush(kmap_pte-idx, vaddr);
54		kmap_atomic_idx_pop();
55		arch_flush_lazy_mmu_mode();
56	}
57#ifdef CONFIG_DEBUG_HIGHMEM
58	else {
59		BUG_ON(vaddr < PAGE_OFFSET);
60		BUG_ON(vaddr >= (unsigned long)high_memory);
61	}
62#endif
 
 
 
63}
64EXPORT_SYMBOL(kunmap_atomic_high);
65
66void __init set_highmem_pages_init(void)
67{
68	struct zone *zone;
69	int nid;
70
71	/*
72	 * Explicitly reset zone->managed_pages because set_highmem_pages_init()
73	 * is invoked before memblock_free_all()
74	 */
75	reset_all_zones_managed_pages();
76	for_each_zone(zone) {
77		unsigned long zone_start_pfn, zone_end_pfn;
78
79		if (!is_highmem(zone))
80			continue;
81
82		zone_start_pfn = zone->zone_start_pfn;
83		zone_end_pfn = zone_start_pfn + zone->spanned_pages;
84
85		nid = zone_to_nid(zone);
86		printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
87				zone->name, nid, zone_start_pfn, zone_end_pfn);
88
89		add_highpages_with_active_regions(nid, zone_start_pfn,
90				 zone_end_pfn);
91	}
92}