Linux Audio

Check our new training course

Loading...
v4.10.11
  1#include <linux/compiler.h>
  2#include <linux/init.h>
  3#include <linux/export.h>
  4#include <linux/highmem.h>
  5#include <linux/sched.h>
  6#include <linux/smp.h>
  7#include <asm/fixmap.h>
  8#include <asm/tlbflush.h>
  9
 10static pte_t *kmap_pte;
 11
 12unsigned long highstart_pfn, highend_pfn;
 13
 14void *kmap(struct page *page)
 15{
 16	void *addr;
 17
 18	might_sleep();
 19	if (!PageHighMem(page))
 20		return page_address(page);
 21	addr = kmap_high(page);
 22	flush_tlb_one((unsigned long)addr);
 23
 24	return addr;
 25}
 26EXPORT_SYMBOL(kmap);
 27
 28void kunmap(struct page *page)
 29{
 30	BUG_ON(in_interrupt());
 31	if (!PageHighMem(page))
 32		return;
 33	kunmap_high(page);
 34}
 35EXPORT_SYMBOL(kunmap);
 36
 37/*
 38 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
 39 * no global lock is needed and because the kmap code must perform a global TLB
 40 * invalidation when the kmap pool wraps.
 41 *
 42 * However when holding an atomic kmap is is not legal to sleep, so atomic
 43 * kmaps are appropriate for short, tight code paths only.
 44 */
 45
 46void *kmap_atomic(struct page *page)
 47{
 48	unsigned long vaddr;
 49	int idx, type;
 50
 51	preempt_disable();
 52	pagefault_disable();
 53	if (!PageHighMem(page))
 54		return page_address(page);
 55
 56	type = kmap_atomic_idx_push();
 57	idx = type + KM_TYPE_NR*smp_processor_id();
 58	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 59#ifdef CONFIG_DEBUG_HIGHMEM
 60	BUG_ON(!pte_none(*(kmap_pte - idx)));
 61#endif
 62	set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
 63	local_flush_tlb_one((unsigned long)vaddr);
 64
 65	return (void*) vaddr;
 66}
 67EXPORT_SYMBOL(kmap_atomic);
 68
 69void __kunmap_atomic(void *kvaddr)
 70{
 71	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 72	int type __maybe_unused;
 73
 74	if (vaddr < FIXADDR_START) { // FIXME
 75		pagefault_enable();
 76		preempt_enable();
 77		return;
 78	}
 79
 80	type = kmap_atomic_idx();
 81#ifdef CONFIG_DEBUG_HIGHMEM
 82	{
 83		int idx = type + KM_TYPE_NR * smp_processor_id();
 84
 85		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
 86
 87		/*
 88		 * force other mappings to Oops if they'll try to access
 89		 * this pte without first remap it
 90		 */
 91		pte_clear(&init_mm, vaddr, kmap_pte-idx);
 92		local_flush_tlb_one(vaddr);
 93	}
 94#endif
 95	kmap_atomic_idx_pop();
 96	pagefault_enable();
 97	preempt_enable();
 98}
 99EXPORT_SYMBOL(__kunmap_atomic);
100
101/*
102 * This is the same as kmap_atomic() but can map memory that doesn't
103 * have a struct page associated with it.
104 */
105void *kmap_atomic_pfn(unsigned long pfn)
106{
107	unsigned long vaddr;
108	int idx, type;
109
110	preempt_disable();
111	pagefault_disable();
112
113	type = kmap_atomic_idx_push();
114	idx = type + KM_TYPE_NR*smp_processor_id();
115	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
116	set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
117	flush_tlb_one(vaddr);
118
119	return (void*) vaddr;
 
 
 
 
 
 
 
 
 
 
 
 
 
120}
121
122void __init kmap_init(void)
123{
124	unsigned long kmap_vstart;
125
126	/* cache the first kmap pte */
127	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
128	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
129}
v3.15
  1#include <linux/compiler.h>
  2#include <linux/module.h>
 
  3#include <linux/highmem.h>
  4#include <linux/sched.h>
  5#include <linux/smp.h>
  6#include <asm/fixmap.h>
  7#include <asm/tlbflush.h>
  8
  9static pte_t *kmap_pte;
 10
 11unsigned long highstart_pfn, highend_pfn;
 12
 13void *kmap(struct page *page)
 14{
 15	void *addr;
 16
 17	might_sleep();
 18	if (!PageHighMem(page))
 19		return page_address(page);
 20	addr = kmap_high(page);
 21	flush_tlb_one((unsigned long)addr);
 22
 23	return addr;
 24}
 25EXPORT_SYMBOL(kmap);
 26
 27void kunmap(struct page *page)
 28{
 29	BUG_ON(in_interrupt());
 30	if (!PageHighMem(page))
 31		return;
 32	kunmap_high(page);
 33}
 34EXPORT_SYMBOL(kunmap);
 35
 36/*
 37 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
 38 * no global lock is needed and because the kmap code must perform a global TLB
 39 * invalidation when the kmap pool wraps.
 40 *
 41 * However when holding an atomic kmap is is not legal to sleep, so atomic
 42 * kmaps are appropriate for short, tight code paths only.
 43 */
 44
 45void *kmap_atomic(struct page *page)
 46{
 47	unsigned long vaddr;
 48	int idx, type;
 49
 50	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
 51	pagefault_disable();
 52	if (!PageHighMem(page))
 53		return page_address(page);
 54
 55	type = kmap_atomic_idx_push();
 56	idx = type + KM_TYPE_NR*smp_processor_id();
 57	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 58#ifdef CONFIG_DEBUG_HIGHMEM
 59	BUG_ON(!pte_none(*(kmap_pte - idx)));
 60#endif
 61	set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL));
 62	local_flush_tlb_one((unsigned long)vaddr);
 63
 64	return (void*) vaddr;
 65}
 66EXPORT_SYMBOL(kmap_atomic);
 67
 68void __kunmap_atomic(void *kvaddr)
 69{
 70	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 71	int type __maybe_unused;
 72
 73	if (vaddr < FIXADDR_START) { // FIXME
 74		pagefault_enable();
 
 75		return;
 76	}
 77
 78	type = kmap_atomic_idx();
 79#ifdef CONFIG_DEBUG_HIGHMEM
 80	{
 81		int idx = type + KM_TYPE_NR * smp_processor_id();
 82
 83		BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
 84
 85		/*
 86		 * force other mappings to Oops if they'll try to access
 87		 * this pte without first remap it
 88		 */
 89		pte_clear(&init_mm, vaddr, kmap_pte-idx);
 90		local_flush_tlb_one(vaddr);
 91	}
 92#endif
 93	kmap_atomic_idx_pop();
 94	pagefault_enable();
 
 95}
 96EXPORT_SYMBOL(__kunmap_atomic);
 97
 98/*
 99 * This is the same as kmap_atomic() but can map memory that doesn't
100 * have a struct page associated with it.
101 */
102void *kmap_atomic_pfn(unsigned long pfn)
103{
104	unsigned long vaddr;
105	int idx, type;
106
 
107	pagefault_disable();
108
109	type = kmap_atomic_idx_push();
110	idx = type + KM_TYPE_NR*smp_processor_id();
111	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
112	set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
113	flush_tlb_one(vaddr);
114
115	return (void*) vaddr;
116}
117
118struct page *kmap_atomic_to_page(void *ptr)
119{
120	unsigned long idx, vaddr = (unsigned long)ptr;
121	pte_t *pte;
122
123	if (vaddr < FIXADDR_START)
124		return virt_to_page(ptr);
125
126	idx = virt_to_fix(vaddr);
127	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
128	return pte_page(*pte);
129}
130
131void __init kmap_init(void)
132{
133	unsigned long kmap_vstart;
134
135	/* cache the first kmap pte */
136	kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
137	kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
138}