Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
  1/*
  2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
  3 *
  4 * This program is free software; you can redistribute it and/or modify
  5 * it under the terms of the GNU General Public License version 2 and
  6 * only version 2 as published by the Free Software Foundation.
  7 *
  8 * This program is distributed in the hope that it will be useful,
  9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 11 * GNU General Public License for more details.
 12 */
 13#include <linux/kernel.h>
 14#include <linux/mm.h>
 15#include <linux/module.h>
 16#include <linux/sched.h>
 17#include <linux/vmalloc.h>
 18
 19#include <asm/pgtable.h>
 20#include <asm/set_memory.h>
 21#include <asm/tlbflush.h>
 22
 23struct page_change_data {
 24	pgprot_t set_mask;
 25	pgprot_t clear_mask;
 26};
 27
 28static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
 29			void *data)
 30{
 31	struct page_change_data *cdata = data;
 32	pte_t pte = READ_ONCE(*ptep);
 33
 34	pte = clear_pte_bit(pte, cdata->clear_mask);
 35	pte = set_pte_bit(pte, cdata->set_mask);
 36
 37	set_pte(ptep, pte);
 38	return 0;
 39}
 40
 41/*
 42 * This function assumes that the range is mapped with PAGE_SIZE pages.
 43 */
 44static int __change_memory_common(unsigned long start, unsigned long size,
 45				pgprot_t set_mask, pgprot_t clear_mask)
 46{
 47	struct page_change_data data;
 48	int ret;
 49
 50	data.set_mask = set_mask;
 51	data.clear_mask = clear_mask;
 52
 53	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
 54					&data);
 55
 56	flush_tlb_kernel_range(start, start + size);
 57	return ret;
 58}
 59
 60static int change_memory_common(unsigned long addr, int numpages,
 61				pgprot_t set_mask, pgprot_t clear_mask)
 62{
 63	unsigned long start = addr;
 64	unsigned long size = PAGE_SIZE*numpages;
 65	unsigned long end = start + size;
 66	struct vm_struct *area;
 67
 68	if (!PAGE_ALIGNED(addr)) {
 69		start &= PAGE_MASK;
 70		end = start + size;
 71		WARN_ON_ONCE(1);
 72	}
 73
 74	/*
 75	 * Kernel VA mappings are always live, and splitting live section
 76	 * mappings into page mappings may cause TLB conflicts. This means
 77	 * we have to ensure that changing the permission bits of the range
 78	 * we are operating on does not result in such splitting.
 79	 *
 80	 * Let's restrict ourselves to mappings created by vmalloc (or vmap).
 81	 * Those are guaranteed to consist entirely of page mappings, and
 82	 * splitting is never needed.
 83	 *
 84	 * So check whether the [addr, addr + size) interval is entirely
 85	 * covered by precisely one VM area that has the VM_ALLOC flag set.
 86	 */
 87	area = find_vm_area((void *)addr);
 88	if (!area ||
 89	    end > (unsigned long)area->addr + area->size ||
 90	    !(area->flags & VM_ALLOC))
 91		return -EINVAL;
 92
 93	if (!numpages)
 94		return 0;
 95
 96	return __change_memory_common(start, size, set_mask, clear_mask);
 97}
 98
 99int set_memory_ro(unsigned long addr, int numpages)
100{
101	return change_memory_common(addr, numpages,
102					__pgprot(PTE_RDONLY),
103					__pgprot(PTE_WRITE));
104}
105
106int set_memory_rw(unsigned long addr, int numpages)
107{
108	return change_memory_common(addr, numpages,
109					__pgprot(PTE_WRITE),
110					__pgprot(PTE_RDONLY));
111}
112
113int set_memory_nx(unsigned long addr, int numpages)
114{
115	return change_memory_common(addr, numpages,
116					__pgprot(PTE_PXN),
117					__pgprot(0));
118}
119EXPORT_SYMBOL_GPL(set_memory_nx);
120
121int set_memory_x(unsigned long addr, int numpages)
122{
123	return change_memory_common(addr, numpages,
124					__pgprot(0),
125					__pgprot(PTE_PXN));
126}
127EXPORT_SYMBOL_GPL(set_memory_x);
128
129int set_memory_valid(unsigned long addr, int numpages, int enable)
130{
131	if (enable)
132		return __change_memory_common(addr, PAGE_SIZE * numpages,
133					__pgprot(PTE_VALID),
134					__pgprot(0));
135	else
136		return __change_memory_common(addr, PAGE_SIZE * numpages,
137					__pgprot(0),
138					__pgprot(PTE_VALID));
139}
140
141#ifdef CONFIG_DEBUG_PAGEALLOC
142void __kernel_map_pages(struct page *page, int numpages, int enable)
143{
144	set_memory_valid((unsigned long)page_address(page), numpages, enable);
145}
146#ifdef CONFIG_HIBERNATION
147/*
148 * When built with CONFIG_DEBUG_PAGEALLOC and CONFIG_HIBERNATION, this function
149 * is used to determine if a linear map page has been marked as not-valid by
150 * CONFIG_DEBUG_PAGEALLOC. Walk the page table and check the PTE_VALID bit.
151 * This is based on kern_addr_valid(), which almost does what we need.
152 *
153 * Because this is only called on the kernel linear map,  p?d_sect() implies
154 * p?d_present(). When debug_pagealloc is enabled, sections mappings are
155 * disabled.
156 */
157bool kernel_page_present(struct page *page)
158{
159	pgd_t *pgdp;
160	pud_t *pudp, pud;
161	pmd_t *pmdp, pmd;
162	pte_t *ptep;
163	unsigned long addr = (unsigned long)page_address(page);
164
165	pgdp = pgd_offset_k(addr);
166	if (pgd_none(READ_ONCE(*pgdp)))
167		return false;
168
169	pudp = pud_offset(pgdp, addr);
170	pud = READ_ONCE(*pudp);
171	if (pud_none(pud))
172		return false;
173	if (pud_sect(pud))
174		return true;
175
176	pmdp = pmd_offset(pudp, addr);
177	pmd = READ_ONCE(*pmdp);
178	if (pmd_none(pmd))
179		return false;
180	if (pmd_sect(pmd))
181		return true;
182
183	ptep = pte_offset_kernel(pmdp, addr);
184	return pte_valid(READ_ONCE(*ptep));
185}
186#endif /* CONFIG_HIBERNATION */
187#endif /* CONFIG_DEBUG_PAGEALLOC */