Loading...
1/*
2 * linux/arch/alpha/mm/init.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 */
6
7/* 2.3.x zone allocator, 1999 Andrea Arcangeli <andrea@suse.de> */
8
9#include <linux/pagemap.h>
10#include <linux/signal.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/mman.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/init.h>
21#include <linux/bootmem.h> /* max_low_pfn */
22#include <linux/vmalloc.h>
23#include <linux/gfp.h>
24
25#include <asm/system.h>
26#include <asm/uaccess.h>
27#include <asm/pgtable.h>
28#include <asm/pgalloc.h>
29#include <asm/hwrpb.h>
30#include <asm/dma.h>
31#include <asm/mmu_context.h>
32#include <asm/console.h>
33#include <asm/tlb.h>
34
35extern void die_if_kernel(char *,struct pt_regs *,long);
36
37static struct pcb_struct original_pcb;
38
39pgd_t *
40pgd_alloc(struct mm_struct *mm)
41{
42 pgd_t *ret, *init;
43
44 ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
45 init = pgd_offset(&init_mm, 0UL);
46 if (ret) {
47#ifdef CONFIG_ALPHA_LARGE_VMALLOC
48 memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
49 (PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t));
50#else
51 pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]);
52#endif
53
54 /* The last PGD entry is the VPTB self-map. */
55 pgd_val(ret[PTRS_PER_PGD-1])
56 = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL));
57 }
58 return ret;
59}
60
61
62/*
63 * BAD_PAGE is the page that is used for page faults when linux
64 * is out-of-memory. Older versions of linux just did a
65 * do_exit(), but using this instead means there is less risk
66 * for a process dying in kernel mode, possibly leaving an inode
67 * unused etc..
68 *
69 * BAD_PAGETABLE is the accompanying page-table: it is initialized
70 * to point to BAD_PAGE entries.
71 *
72 * ZERO_PAGE is a special page that is used for zero-initialized
73 * data and COW.
74 */
75pmd_t *
76__bad_pagetable(void)
77{
78 memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
79 return (pmd_t *) EMPTY_PGT;
80}
81
82pte_t
83__bad_page(void)
84{
85 memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
86 return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED));
87}
88
89static inline unsigned long
90load_PCB(struct pcb_struct *pcb)
91{
92 register unsigned long sp __asm__("$30");
93 pcb->ksp = sp;
94 return __reload_thread(pcb);
95}
96
97/* Set up initial PCB, VPTB, and other such nicities. */
98
99static inline void
100switch_to_system_map(void)
101{
102 unsigned long newptbr;
103 unsigned long original_pcb_ptr;
104
105 /* Initialize the kernel's page tables. Linux puts the vptb in
106 the last slot of the L1 page table. */
107 memset(swapper_pg_dir, 0, PAGE_SIZE);
108 newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT;
109 pgd_val(swapper_pg_dir[1023]) =
110 (newptbr << 32) | pgprot_val(PAGE_KERNEL);
111
112 /* Set the vptb. This is often done by the bootloader, but
113 shouldn't be required. */
114 if (hwrpb->vptb != 0xfffffffe00000000UL) {
115 wrvptptr(0xfffffffe00000000UL);
116 hwrpb->vptb = 0xfffffffe00000000UL;
117 hwrpb_update_checksum(hwrpb);
118 }
119
120 /* Also set up the real kernel PCB while we're at it. */
121 init_thread_info.pcb.ptbr = newptbr;
122 init_thread_info.pcb.flags = 1; /* set FEN, clear everything else */
123 original_pcb_ptr = load_PCB(&init_thread_info.pcb);
124 tbia();
125
126 /* Save off the contents of the original PCB so that we can
127 restore the original console's page tables for a clean reboot.
128
129 Note that the PCB is supposed to be a physical address, but
130 since KSEG values also happen to work, folks get confused.
131 Check this here. */
132
133 if (original_pcb_ptr < PAGE_OFFSET) {
134 original_pcb_ptr = (unsigned long)
135 phys_to_virt(original_pcb_ptr);
136 }
137 original_pcb = *(struct pcb_struct *) original_pcb_ptr;
138}
139
140int callback_init_done;
141
142void * __init
143callback_init(void * kernel_end)
144{
145 struct crb_struct * crb;
146 pgd_t *pgd;
147 pmd_t *pmd;
148 void *two_pages;
149
150 /* Starting at the HWRPB, locate the CRB. */
151 crb = (struct crb_struct *)((char *)hwrpb + hwrpb->crb_offset);
152
153 if (alpha_using_srm) {
154 /* Tell the console whither it is to be remapped. */
155 if (srm_fixup(VMALLOC_START, (unsigned long)hwrpb))
156 __halt(); /* "We're boned." --Bender */
157
158 /* Edit the procedure descriptors for DISPATCH and FIXUP. */
159 crb->dispatch_va = (struct procdesc_struct *)
160 (VMALLOC_START + (unsigned long)crb->dispatch_va
161 - crb->map[0].va);
162 crb->fixup_va = (struct procdesc_struct *)
163 (VMALLOC_START + (unsigned long)crb->fixup_va
164 - crb->map[0].va);
165 }
166
167 switch_to_system_map();
168
169 /* Allocate one PGD and one PMD. In the case of SRM, we'll need
170 these to actually remap the console. There is an assumption
171 here that only one of each is needed, and this allows for 8MB.
172 On systems with larger consoles, additional pages will be
173 allocated as needed during the mapping process.
174
175 In the case of not SRM, but not CONFIG_ALPHA_LARGE_VMALLOC,
176 we need to allocate the PGD we use for vmalloc before we start
177 forking other tasks. */
178
179 two_pages = (void *)
180 (((unsigned long)kernel_end + ~PAGE_MASK) & PAGE_MASK);
181 kernel_end = two_pages + 2*PAGE_SIZE;
182 memset(two_pages, 0, 2*PAGE_SIZE);
183
184 pgd = pgd_offset_k(VMALLOC_START);
185 pgd_set(pgd, (pmd_t *)two_pages);
186 pmd = pmd_offset(pgd, VMALLOC_START);
187 pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
188
189 if (alpha_using_srm) {
190 static struct vm_struct console_remap_vm;
191 unsigned long nr_pages = 0;
192 unsigned long vaddr;
193 unsigned long i, j;
194
195 /* calculate needed size */
196 for (i = 0; i < crb->map_entries; ++i)
197 nr_pages += crb->map[i].count;
198
199 /* register the vm area */
200 console_remap_vm.flags = VM_ALLOC;
201 console_remap_vm.size = nr_pages << PAGE_SHIFT;
202 vm_area_register_early(&console_remap_vm, PAGE_SIZE);
203
204 vaddr = (unsigned long)console_remap_vm.addr;
205
206 /* Set up the third level PTEs and update the virtual
207 addresses of the CRB entries. */
208 for (i = 0; i < crb->map_entries; ++i) {
209 unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT;
210 crb->map[i].va = vaddr;
211 for (j = 0; j < crb->map[i].count; ++j) {
212 /* Newer consoles (especially on larger
213 systems) may require more pages of
214 PTEs. Grab additional pages as needed. */
215 if (pmd != pmd_offset(pgd, vaddr)) {
216 memset(kernel_end, 0, PAGE_SIZE);
217 pmd = pmd_offset(pgd, vaddr);
218 pmd_set(pmd, (pte_t *)kernel_end);
219 kernel_end += PAGE_SIZE;
220 }
221 set_pte(pte_offset_kernel(pmd, vaddr),
222 pfn_pte(pfn, PAGE_KERNEL));
223 pfn++;
224 vaddr += PAGE_SIZE;
225 }
226 }
227 }
228
229 callback_init_done = 1;
230 return kernel_end;
231}
232
233
234#ifndef CONFIG_DISCONTIGMEM
235/*
236 * paging_init() sets up the memory map.
237 */
238void __init paging_init(void)
239{
240 unsigned long zones_size[MAX_NR_ZONES] = {0, };
241 unsigned long dma_pfn, high_pfn;
242
243 dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
244 high_pfn = max_pfn = max_low_pfn;
245
246 if (dma_pfn >= high_pfn)
247 zones_size[ZONE_DMA] = high_pfn;
248 else {
249 zones_size[ZONE_DMA] = dma_pfn;
250 zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
251 }
252
253 /* Initialize mem_map[]. */
254 free_area_init(zones_size);
255
256 /* Initialize the kernel's ZERO_PGE. */
257 memset((void *)ZERO_PGE, 0, PAGE_SIZE);
258}
259#endif /* CONFIG_DISCONTIGMEM */
260
261#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
262void
263srm_paging_stop (void)
264{
265 /* Move the vptb back to where the SRM console expects it. */
266 swapper_pg_dir[1] = swapper_pg_dir[1023];
267 tbia();
268 wrvptptr(0x200000000UL);
269 hwrpb->vptb = 0x200000000UL;
270 hwrpb_update_checksum(hwrpb);
271
272 /* Reload the page tables that the console had in use. */
273 load_PCB(&original_pcb);
274 tbia();
275}
276#endif
277
278#ifndef CONFIG_DISCONTIGMEM
279static void __init
280printk_memory_info(void)
281{
282 unsigned long codesize, reservedpages, datasize, initsize, tmp;
283 extern int page_is_ram(unsigned long) __init;
284 extern char _text, _etext, _data, _edata;
285 extern char __init_begin, __init_end;
286
287 /* printk all informations */
288 reservedpages = 0;
289 for (tmp = 0; tmp < max_low_pfn; tmp++)
290 /*
291 * Only count reserved RAM pages
292 */
293 if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
294 reservedpages++;
295
296 codesize = (unsigned long) &_etext - (unsigned long) &_text;
297 datasize = (unsigned long) &_edata - (unsigned long) &_data;
298 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
299
300 printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, %luk data, %luk init)\n",
301 nr_free_pages() << (PAGE_SHIFT-10),
302 max_mapnr << (PAGE_SHIFT-10),
303 codesize >> 10,
304 reservedpages << (PAGE_SHIFT-10),
305 datasize >> 10,
306 initsize >> 10);
307}
308
309void __init
310mem_init(void)
311{
312 max_mapnr = num_physpages = max_low_pfn;
313 totalram_pages += free_all_bootmem();
314 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
315
316 printk_memory_info();
317}
318#endif /* CONFIG_DISCONTIGMEM */
319
320void
321free_reserved_mem(void *start, void *end)
322{
323 void *__start = start;
324 for (; __start < end; __start += PAGE_SIZE) {
325 ClearPageReserved(virt_to_page(__start));
326 init_page_count(virt_to_page(__start));
327 free_page((long)__start);
328 totalram_pages++;
329 }
330}
331
332void
333free_initmem(void)
334{
335 extern char __init_begin, __init_end;
336
337 free_reserved_mem(&__init_begin, &__init_end);
338 printk ("Freeing unused kernel memory: %ldk freed\n",
339 (&__init_end - &__init_begin) >> 10);
340}
341
342#ifdef CONFIG_BLK_DEV_INITRD
343void
344free_initrd_mem(unsigned long start, unsigned long end)
345{
346 free_reserved_mem((void *)start, (void *)end);
347 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
348}
349#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * linux/arch/alpha/mm/init.c
4 *
5 * Copyright (C) 1995 Linus Torvalds
6 */
7
8/* 2.3.x zone allocator, 1999 Andrea Arcangeli <andrea@suse.de> */
9
10#include <linux/pagemap.h>
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/ptrace.h>
18#include <linux/mman.h>
19#include <linux/mm.h>
20#include <linux/swap.h>
21#include <linux/init.h>
22#include <linux/memblock.h> /* max_low_pfn */
23#include <linux/vmalloc.h>
24#include <linux/gfp.h>
25
26#include <linux/uaccess.h>
27#include <asm/pgalloc.h>
28#include <asm/hwrpb.h>
29#include <asm/dma.h>
30#include <asm/mmu_context.h>
31#include <asm/console.h>
32#include <asm/tlb.h>
33#include <asm/setup.h>
34#include <asm/sections.h>
35
36extern void die_if_kernel(char *,struct pt_regs *,long);
37
38static struct pcb_struct original_pcb;
39
40pgd_t *
41pgd_alloc(struct mm_struct *mm)
42{
43 pgd_t *ret, *init;
44
45 ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
46 init = pgd_offset(&init_mm, 0UL);
47 if (ret) {
48#ifdef CONFIG_ALPHA_LARGE_VMALLOC
49 memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
50 (PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t));
51#else
52 pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]);
53#endif
54
55 /* The last PGD entry is the VPTB self-map. */
56 pgd_val(ret[PTRS_PER_PGD-1])
57 = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL));
58 }
59 return ret;
60}
61
62
63/*
64 * BAD_PAGE is the page that is used for page faults when linux
65 * is out-of-memory. Older versions of linux just did a
66 * do_exit(), but using this instead means there is less risk
67 * for a process dying in kernel mode, possibly leaving an inode
68 * unused etc..
69 *
70 * BAD_PAGETABLE is the accompanying page-table: it is initialized
71 * to point to BAD_PAGE entries.
72 *
73 * ZERO_PAGE is a special page that is used for zero-initialized
74 * data and COW.
75 */
76pmd_t *
77__bad_pagetable(void)
78{
79 memset(absolute_pointer(EMPTY_PGT), 0, PAGE_SIZE);
80 return (pmd_t *) EMPTY_PGT;
81}
82
83pte_t
84__bad_page(void)
85{
86 memset(absolute_pointer(EMPTY_PGE), 0, PAGE_SIZE);
87 return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED));
88}
89
90static inline unsigned long
91load_PCB(struct pcb_struct *pcb)
92{
93 register unsigned long sp __asm__("$30");
94 pcb->ksp = sp;
95 return __reload_thread(pcb);
96}
97
98/* Set up initial PCB, VPTB, and other such nicities. */
99
100static inline void
101switch_to_system_map(void)
102{
103 unsigned long newptbr;
104 unsigned long original_pcb_ptr;
105
106 /* Initialize the kernel's page tables. Linux puts the vptb in
107 the last slot of the L1 page table. */
108 memset(swapper_pg_dir, 0, PAGE_SIZE);
109 newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT;
110 pgd_val(swapper_pg_dir[1023]) =
111 (newptbr << 32) | pgprot_val(PAGE_KERNEL);
112
113 /* Set the vptb. This is often done by the bootloader, but
114 shouldn't be required. */
115 if (hwrpb->vptb != 0xfffffffe00000000UL) {
116 wrvptptr(0xfffffffe00000000UL);
117 hwrpb->vptb = 0xfffffffe00000000UL;
118 hwrpb_update_checksum(hwrpb);
119 }
120
121 /* Also set up the real kernel PCB while we're at it. */
122 init_thread_info.pcb.ptbr = newptbr;
123 init_thread_info.pcb.flags = 1; /* set FEN, clear everything else */
124 original_pcb_ptr = load_PCB(&init_thread_info.pcb);
125 tbia();
126
127 /* Save off the contents of the original PCB so that we can
128 restore the original console's page tables for a clean reboot.
129
130 Note that the PCB is supposed to be a physical address, but
131 since KSEG values also happen to work, folks get confused.
132 Check this here. */
133
134 if (original_pcb_ptr < PAGE_OFFSET) {
135 original_pcb_ptr = (unsigned long)
136 phys_to_virt(original_pcb_ptr);
137 }
138 original_pcb = *(struct pcb_struct *) original_pcb_ptr;
139}
140
141int callback_init_done;
142
143void * __init
144callback_init(void * kernel_end)
145{
146 struct crb_struct * crb;
147 pgd_t *pgd;
148 p4d_t *p4d;
149 pud_t *pud;
150 pmd_t *pmd;
151 void *two_pages;
152
153 /* Starting at the HWRPB, locate the CRB. */
154 crb = (struct crb_struct *)((char *)hwrpb + hwrpb->crb_offset);
155
156 if (alpha_using_srm) {
157 /* Tell the console whither it is to be remapped. */
158 if (srm_fixup(VMALLOC_START, (unsigned long)hwrpb))
159 __halt(); /* "We're boned." --Bender */
160
161 /* Edit the procedure descriptors for DISPATCH and FIXUP. */
162 crb->dispatch_va = (struct procdesc_struct *)
163 (VMALLOC_START + (unsigned long)crb->dispatch_va
164 - crb->map[0].va);
165 crb->fixup_va = (struct procdesc_struct *)
166 (VMALLOC_START + (unsigned long)crb->fixup_va
167 - crb->map[0].va);
168 }
169
170 switch_to_system_map();
171
172 /* Allocate one PGD and one PMD. In the case of SRM, we'll need
173 these to actually remap the console. There is an assumption
174 here that only one of each is needed, and this allows for 8MB.
175 On systems with larger consoles, additional pages will be
176 allocated as needed during the mapping process.
177
178 In the case of not SRM, but not CONFIG_ALPHA_LARGE_VMALLOC,
179 we need to allocate the PGD we use for vmalloc before we start
180 forking other tasks. */
181
182 two_pages = (void *)
183 (((unsigned long)kernel_end + ~PAGE_MASK) & PAGE_MASK);
184 kernel_end = two_pages + 2*PAGE_SIZE;
185 memset(two_pages, 0, 2*PAGE_SIZE);
186
187 pgd = pgd_offset_k(VMALLOC_START);
188 p4d = p4d_offset(pgd, VMALLOC_START);
189 pud = pud_offset(p4d, VMALLOC_START);
190 pud_set(pud, (pmd_t *)two_pages);
191 pmd = pmd_offset(pud, VMALLOC_START);
192 pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
193
194 if (alpha_using_srm) {
195 static struct vm_struct console_remap_vm;
196 unsigned long nr_pages = 0;
197 unsigned long vaddr;
198 unsigned long i, j;
199
200 /* calculate needed size */
201 for (i = 0; i < crb->map_entries; ++i)
202 nr_pages += crb->map[i].count;
203
204 /* register the vm area */
205 console_remap_vm.flags = VM_ALLOC;
206 console_remap_vm.size = nr_pages << PAGE_SHIFT;
207 vm_area_register_early(&console_remap_vm, PAGE_SIZE);
208
209 vaddr = (unsigned long)console_remap_vm.addr;
210
211 /* Set up the third level PTEs and update the virtual
212 addresses of the CRB entries. */
213 for (i = 0; i < crb->map_entries; ++i) {
214 unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT;
215 crb->map[i].va = vaddr;
216 for (j = 0; j < crb->map[i].count; ++j) {
217 /* Newer consoles (especially on larger
218 systems) may require more pages of
219 PTEs. Grab additional pages as needed. */
220 if (pmd != pmd_offset(pud, vaddr)) {
221 memset(kernel_end, 0, PAGE_SIZE);
222 pmd = pmd_offset(pud, vaddr);
223 pmd_set(pmd, (pte_t *)kernel_end);
224 kernel_end += PAGE_SIZE;
225 }
226 set_pte(pte_offset_kernel(pmd, vaddr),
227 pfn_pte(pfn, PAGE_KERNEL));
228 pfn++;
229 vaddr += PAGE_SIZE;
230 }
231 }
232 }
233
234 callback_init_done = 1;
235 return kernel_end;
236}
237
238/*
239 * paging_init() sets up the memory map.
240 */
241void __init paging_init(void)
242{
243 unsigned long max_zone_pfn[MAX_NR_ZONES] = {0, };
244 unsigned long dma_pfn;
245
246 dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
247 max_pfn = max_low_pfn;
248
249 max_zone_pfn[ZONE_DMA] = dma_pfn;
250 max_zone_pfn[ZONE_NORMAL] = max_pfn;
251
252 /* Initialize mem_map[]. */
253 free_area_init(max_zone_pfn);
254
255 /* Initialize the kernel's ZERO_PGE. */
256 memset(absolute_pointer(ZERO_PGE), 0, PAGE_SIZE);
257}
258
259#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
260void
261srm_paging_stop (void)
262{
263 /* Move the vptb back to where the SRM console expects it. */
264 swapper_pg_dir[1] = swapper_pg_dir[1023];
265 tbia();
266 wrvptptr(0x200000000UL);
267 hwrpb->vptb = 0x200000000UL;
268 hwrpb_update_checksum(hwrpb);
269
270 /* Reload the page tables that the console had in use. */
271 load_PCB(&original_pcb);
272 tbia();
273}
274#endif
275
276void __init
277mem_init(void)
278{
279 set_max_mapnr(max_low_pfn);
280 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
281 memblock_free_all();
282}
283
284static const pgprot_t protection_map[16] = {
285 [VM_NONE] = _PAGE_P(_PAGE_FOE | _PAGE_FOW |
286 _PAGE_FOR),
287 [VM_READ] = _PAGE_P(_PAGE_FOE | _PAGE_FOW),
288 [VM_WRITE] = _PAGE_P(_PAGE_FOE),
289 [VM_WRITE | VM_READ] = _PAGE_P(_PAGE_FOE),
290 [VM_EXEC] = _PAGE_P(_PAGE_FOW | _PAGE_FOR),
291 [VM_EXEC | VM_READ] = _PAGE_P(_PAGE_FOW),
292 [VM_EXEC | VM_WRITE] = _PAGE_P(0),
293 [VM_EXEC | VM_WRITE | VM_READ] = _PAGE_P(0),
294 [VM_SHARED] = _PAGE_S(_PAGE_FOE | _PAGE_FOW |
295 _PAGE_FOR),
296 [VM_SHARED | VM_READ] = _PAGE_S(_PAGE_FOE | _PAGE_FOW),
297 [VM_SHARED | VM_WRITE] = _PAGE_S(_PAGE_FOE),
298 [VM_SHARED | VM_WRITE | VM_READ] = _PAGE_S(_PAGE_FOE),
299 [VM_SHARED | VM_EXEC] = _PAGE_S(_PAGE_FOW | _PAGE_FOR),
300 [VM_SHARED | VM_EXEC | VM_READ] = _PAGE_S(_PAGE_FOW),
301 [VM_SHARED | VM_EXEC | VM_WRITE] = _PAGE_S(0),
302 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = _PAGE_S(0)
303};
304DECLARE_VM_GET_PAGE_PROT