Linux Audio

Check our new training course

Loading...
  1#include <linux/highmem.h>
  2#include <linux/module.h>
  3#include <linux/swap.h> /* for totalram_pages */
  4
  5void *kmap(struct page *page)
  6{
  7	might_sleep();
  8	if (!PageHighMem(page))
  9		return page_address(page);
 10	return kmap_high(page);
 11}
 12EXPORT_SYMBOL(kmap);
 13
 14void kunmap(struct page *page)
 15{
 16	if (in_interrupt())
 17		BUG();
 18	if (!PageHighMem(page))
 19		return;
 20	kunmap_high(page);
 21}
 22EXPORT_SYMBOL(kunmap);
 23
 24/*
 25 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
 26 * no global lock is needed and because the kmap code must perform a global TLB
 27 * invalidation when the kmap pool wraps.
 28 *
 29 * However when holding an atomic kmap it is not legal to sleep, so atomic
 30 * kmaps are appropriate for short, tight code paths only.
 31 */
 32void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 33{
 34	unsigned long vaddr;
 35	int idx, type;
 36
 37	/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
 38	pagefault_disable();
 39
 40	if (!PageHighMem(page))
 41		return page_address(page);
 42
 43	type = kmap_atomic_idx_push();
 44	idx = type + KM_TYPE_NR*smp_processor_id();
 45	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 46	BUG_ON(!pte_none(*(kmap_pte-idx)));
 47	set_pte(kmap_pte-idx, mk_pte(page, prot));
 48	arch_flush_lazy_mmu_mode();
 49
 50	return (void *)vaddr;
 51}
 52EXPORT_SYMBOL(kmap_atomic_prot);
 53
 54void *kmap_atomic(struct page *page)
 55{
 56	return kmap_atomic_prot(page, kmap_prot);
 57}
 58EXPORT_SYMBOL(kmap_atomic);
 59
 60/*
 61 * This is the same as kmap_atomic() but can map memory that doesn't
 62 * have a struct page associated with it.
 63 */
 64void *kmap_atomic_pfn(unsigned long pfn)
 65{
 66	return kmap_atomic_prot_pfn(pfn, kmap_prot);
 67}
 68EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
 69
 70void __kunmap_atomic(void *kvaddr)
 71{
 72	unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
 73
 74	if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
 75	    vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
 76		int idx, type;
 77
 78		type = kmap_atomic_idx();
 79		idx = type + KM_TYPE_NR * smp_processor_id();
 80
 81#ifdef CONFIG_DEBUG_HIGHMEM
 82		WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
 83#endif
 84		/*
 85		 * Force other mappings to Oops if they'll try to access this
 86		 * pte without first remap it.  Keeping stale mappings around
 87		 * is a bad idea also, in case the page changes cacheability
 88		 * attributes or becomes a protected page in a hypervisor.
 89		 */
 90		kpte_clear_flush(kmap_pte-idx, vaddr);
 91		kmap_atomic_idx_pop();
 92		arch_flush_lazy_mmu_mode();
 93	}
 94#ifdef CONFIG_DEBUG_HIGHMEM
 95	else {
 96		BUG_ON(vaddr < PAGE_OFFSET);
 97		BUG_ON(vaddr >= (unsigned long)high_memory);
 98	}
 99#endif
100
101	pagefault_enable();
102}
103EXPORT_SYMBOL(__kunmap_atomic);
104
105struct page *kmap_atomic_to_page(void *ptr)
106{
107	unsigned long idx, vaddr = (unsigned long)ptr;
108	pte_t *pte;
109
110	if (vaddr < FIXADDR_START)
111		return virt_to_page(ptr);
112
113	idx = virt_to_fix(vaddr);
114	pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
115	return pte_page(*pte);
116}
117EXPORT_SYMBOL(kmap_atomic_to_page);
118
119void __init set_highmem_pages_init(void)
120{
121	struct zone *zone;
122	int nid;
123
124	for_each_zone(zone) {
125		unsigned long zone_start_pfn, zone_end_pfn;
126
127		if (!is_highmem(zone))
128			continue;
129
130		zone_start_pfn = zone->zone_start_pfn;
131		zone_end_pfn = zone_start_pfn + zone->spanned_pages;
132
133		nid = zone_to_nid(zone);
134		printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
135				zone->name, nid, zone_start_pfn, zone_end_pfn);
136
137		add_highpages_with_active_regions(nid, zone_start_pfn,
138				 zone_end_pfn);
139	}
140	totalram_pages += totalhigh_pages;
141}