Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Page table support for the Hexagon architecture
  4 *
  5 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  6 */
  7
  8#ifndef _ASM_PGTABLE_H
  9#define _ASM_PGTABLE_H
 10
 11/*
 12 * Page table definitions for Qualcomm Hexagon processor.
 13 */
 
 14#include <asm/page.h>
 15#include <asm-generic/pgtable-nopmd.h>
 16
 17/* A handy thing to have if one has the RAM. Declared in head.S */
 18extern unsigned long empty_zero_page;
 
 19
 20/*
 21 * The PTE model described here is that of the Hexagon Virtual Machine,
 22 * which autonomously walks 2-level page tables.  At a lower level, we
 23 * also describe the RISCish software-loaded TLB entry structure of
 24 * the underlying Hexagon processor. A kernel built to run on the
 25 * virtual machine has no need to know about the underlying hardware.
 26 */
 27#include <asm/vm_mmu.h>
 28
 29/*
 30 * To maximize the comfort level for the PTE manipulation macros,
 31 * define the "well known" architecture-specific bits.
 32 */
 33#define _PAGE_READ	__HVM_PTE_R
 34#define _PAGE_WRITE	__HVM_PTE_W
 35#define _PAGE_EXECUTE	__HVM_PTE_X
 36#define _PAGE_USER	__HVM_PTE_U
 37
 38/*
 39 * We have a total of 4 "soft" bits available in the abstract PTE.
 40 * The two mandatory software bits are Dirty and Accessed.
 41 * To make nonlinear swap work according to the more recent
 42 * model, we want a low order "Present" bit to indicate whether
 43 * the PTE describes MMU programming or swap space.
 44 */
 45#define _PAGE_PRESENT	(1<<0)
 46#define _PAGE_DIRTY	(1<<1)
 47#define _PAGE_ACCESSED	(1<<2)
 48
 49/*
 
 
 
 
 
 
 
 50 * For now, let's say that Valid and Present are the same thing.
 51 * Alternatively, we could say that it's the "or" of R, W, and X
 52 * permissions.
 53 */
 54#define _PAGE_VALID	_PAGE_PRESENT
 55
 56/*
 57 * We're not defining _PAGE_GLOBAL here, since there's no concept
 58 * of global pages or ASIDs exposed to the Hexagon Virtual Machine,
 59 * and we want to use the same page table structures and macros in
 60 * the native kernel as we do in the virtual machine kernel.
 61 * So we'll put up with a bit of inefficiency for now...
 62 */
 63
 64/* We borrow bit 6 to store the exclusive marker in swap PTEs. */
 65#define _PAGE_SWP_EXCLUSIVE	(1<<6)
 66
 67/*
 68 * Top "FOURTH" level (pgd), which for the Hexagon VM is really
 69 * only the second from the bottom, pgd and pud both being collapsed.
 70 * Each entry represents 4MB of virtual address space, 4K of table
 71 * thus maps the full 4GB.
 72 */
 73#define PGDIR_SHIFT 22
 74#define PTRS_PER_PGD 1024
 75
 76#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
 77#define PGDIR_MASK (~(PGDIR_SIZE-1))
 78
 79#ifdef CONFIG_PAGE_SIZE_4KB
 80#define PTRS_PER_PTE 1024
 81#endif
 82
 83#ifdef CONFIG_PAGE_SIZE_16KB
 84#define PTRS_PER_PTE 256
 85#endif
 86
 87#ifdef CONFIG_PAGE_SIZE_64KB
 88#define PTRS_PER_PTE 64
 89#endif
 90
 91#ifdef CONFIG_PAGE_SIZE_256KB
 92#define PTRS_PER_PTE 16
 93#endif
 94
 95#ifdef CONFIG_PAGE_SIZE_1MB
 96#define PTRS_PER_PTE 4
 97#endif
 98
 99/*  Any bigger and the PTE disappears.  */
100#define pgd_ERROR(e) \
101	printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__,\
102		pgd_val(e))
103
104/*
105 * Page Protection Constants. Includes (in this variant) cache attributes.
106 */
107extern unsigned long _dflt_cache_att;
108
109#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_USER | \
110				_dflt_cache_att)
111#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | \
112				_PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
113#define PAGE_COPY	PAGE_READONLY
114#define PAGE_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER | \
115				_PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
116#define PAGE_COPY_EXEC	PAGE_EXEC
117#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
118				_PAGE_EXECUTE | _PAGE_WRITE | _dflt_cache_att)
119#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_READ | \
120				_PAGE_WRITE | _PAGE_EXECUTE | _dflt_cache_att)
121
122
123/*
124 * Aliases for mapping mmap() protection bits to page protections.
125 * These get used for static initialization, so using the _dflt_cache_att
126 * variable for the default cache attribute isn't workable. If the
127 * default gets changed at boot time, the boot option code has to
128 * update data structures like the protaction_map[] array.
129 */
130#define CACHEDEF	(CACHE_DEFAULT << 6)
131
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132extern pgd_t swapper_pg_dir[PTRS_PER_PGD];  /* located in head.S */
133
 
 
 
 
 
134/*  HUGETLB not working currently  */
135#ifdef CONFIG_HUGETLB_PAGE
136#define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE)
137#endif
138
139/*
140 * For now, assume that higher-level code will do TLB/MMU invalidations
141 * and don't insert that overhead into this low-level function.
142 */
143extern void sync_icache_dcache(pte_t pte);
144
145#define pte_present_exec_user(pte) \
146	((pte_val(pte) & (_PAGE_EXECUTE | _PAGE_USER)) == \
147	(_PAGE_EXECUTE | _PAGE_USER))
148
149static inline void set_pte(pte_t *ptep, pte_t pteval)
150{
151	/*  should really be using pte_exec, if it weren't declared later. */
152	if (pte_present_exec_user(pteval))
153		sync_icache_dcache(pteval);
154
155	*ptep = pteval;
156}
157
158/*
159 * For the Hexagon Virtual Machine MMU (or its emulation), a null/invalid
160 * L1 PTE (PMD/PGD) has 7 in the least significant bits. For the L2 PTE
161 * (Linux PTE), the key is to have bits 11..9 all zero.  We'd use 0x7
162 * as a universal null entry, but some of those least significant bits
163 * are interpreted by software.
164 */
165#define _NULL_PMD	0x7
166#define _NULL_PTE	0x0
167
168static inline void pmd_clear(pmd_t *pmd_entry_ptr)
169{
170	 pmd_val(*pmd_entry_ptr) = _NULL_PMD;
171}
172
173/*
174 * Conveniently, a null PTE value is invalid.
175 */
176static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
177				pte_t *ptep)
178{
179	pte_val(*ptep) = _NULL_PTE;
180}
181
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
182/**
183 * pmd_none - check if pmd_entry is mapped
184 * @pmd_entry:  pmd entry
185 *
186 * MIPS checks it against that "invalid pte table" thing.
187 */
188static inline int pmd_none(pmd_t pmd)
189{
190	return pmd_val(pmd) == _NULL_PMD;
191}
192
193/**
194 * pmd_present - is there a page table behind this?
195 * Essentially the inverse of pmd_none.  We maybe
196 * save an inline instruction by defining it this
197 * way, instead of simply "!pmd_none".
198 */
199static inline int pmd_present(pmd_t pmd)
200{
201	return pmd_val(pmd) != (unsigned long)_NULL_PMD;
202}
203
204/**
205 * pmd_bad - check if a PMD entry is "bad". That might mean swapped out.
206 * As we have no known cause of badness, it's null, as it is for many
207 * architectures.
208 */
209static inline int pmd_bad(pmd_t pmd)
210{
211	return 0;
212}
213
214/*
215 * pmd_pfn - converts a PMD entry to a page frame number
216 */
217#define pmd_pfn(pmd)  (pmd_val(pmd) >> PAGE_SHIFT)
218
219/*
220 * pmd_page - converts a PMD entry to a page pointer
221 */
222#define pmd_page(pmd)  (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
 
223
224/**
225 * pte_none - check if pte is mapped
226 * @pte: pte_t entry
227 */
228static inline int pte_none(pte_t pte)
229{
230	return pte_val(pte) == _NULL_PTE;
231};
232
233/*
234 * pte_present - check if page is present
235 */
236static inline int pte_present(pte_t pte)
237{
238	return pte_val(pte) & _PAGE_PRESENT;
239}
240
241/* mk_pte - make a PTE out of a page pointer and protection bits */
242#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
243
244/* pte_page - returns a page (frame pointer/descriptor?) based on a PTE */
245#define pte_page(x) pfn_to_page(pte_pfn(x))
246
247/* pte_mkold - mark PTE as not recently accessed */
248static inline pte_t pte_mkold(pte_t pte)
249{
250	pte_val(pte) &= ~_PAGE_ACCESSED;
251	return pte;
252}
253
254/* pte_mkyoung - mark PTE as recently accessed */
255static inline pte_t pte_mkyoung(pte_t pte)
256{
257	pte_val(pte) |= _PAGE_ACCESSED;
258	return pte;
259}
260
261/* pte_mkclean - mark page as in sync with backing store */
262static inline pte_t pte_mkclean(pte_t pte)
263{
264	pte_val(pte) &= ~_PAGE_DIRTY;
265	return pte;
266}
267
268/* pte_mkdirty - mark page as modified */
269static inline pte_t pte_mkdirty(pte_t pte)
270{
271	pte_val(pte) |= _PAGE_DIRTY;
272	return pte;
273}
274
275/* pte_young - "is PTE marked as accessed"? */
276static inline int pte_young(pte_t pte)
277{
278	return pte_val(pte) & _PAGE_ACCESSED;
279}
280
281/* pte_dirty - "is PTE dirty?" */
282static inline int pte_dirty(pte_t pte)
283{
284	return pte_val(pte) & _PAGE_DIRTY;
285}
286
287/* pte_modify - set protection bits on PTE */
288static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
289{
290	pte_val(pte) &= PAGE_MASK;
291	pte_val(pte) |= pgprot_val(prot);
292	return pte;
293}
294
295/* pte_wrprotect - mark page as not writable */
296static inline pte_t pte_wrprotect(pte_t pte)
297{
298	pte_val(pte) &= ~_PAGE_WRITE;
299	return pte;
300}
301
302/* pte_mkwrite - mark page as writable */
303static inline pte_t pte_mkwrite_novma(pte_t pte)
304{
305	pte_val(pte) |= _PAGE_WRITE;
306	return pte;
307}
308
309/* pte_mkexec - mark PTE as executable */
310static inline pte_t pte_mkexec(pte_t pte)
311{
312	pte_val(pte) |= _PAGE_EXECUTE;
313	return pte;
314}
315
316/* pte_read - "is PTE marked as readable?" */
317static inline int pte_read(pte_t pte)
318{
319	return pte_val(pte) & _PAGE_READ;
320}
321
322/* pte_write - "is PTE marked as writable?" */
323static inline int pte_write(pte_t pte)
324{
325	return pte_val(pte) & _PAGE_WRITE;
326}
327
328
329/* pte_exec - "is PTE marked as executable?" */
330static inline int pte_exec(pte_t pte)
331{
332	return pte_val(pte) & _PAGE_EXECUTE;
333}
334
335/* __pte_to_swp_entry - extract swap entry from PTE */
336#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
337
338/* __swp_entry_to_pte - extract PTE from swap entry */
339#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
340
341#define PFN_PTE_SHIFT	PAGE_SHIFT
342/* pfn_pte - convert page number and protection value to page table entry */
343#define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot))
344
345/* pte_pfn - convert pte to page frame number */
346#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
347#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
348
349static inline unsigned long pmd_page_vaddr(pmd_t pmd)
350{
351	return (unsigned long)__va(pmd_val(pmd) & PAGE_MASK);
352}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
353
354/* ZERO_PAGE - returns the globally shared zero page */
355#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
356
 
 
 
 
 
357/*
358 * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
359 * are !pte_none() && !pte_present().
360 *
361 * Swap/file PTE definitions.  If _PAGE_PRESENT is zero, the rest of the PTE is
362 * interpreted as swap information.  The remaining free bits are interpreted as
363 * listed below.  Rather than have the TLB fill handler test
364 * _PAGE_PRESENT, we're going to reserve the permissions bits and set them to
365 * all zeros for swap entries, which speeds up the miss handler at the cost of
366 * 3 bits of offset.  That trade-off can be revisited if necessary, but Hexagon
367 * processor architecture and target applications suggest a lot of TLB misses
368 * and not much swap space.
369 *
370 * Format of swap PTE:
371 *	bit	0:	Present (zero)
372 *	bits	1-5:	swap type (arch independent layer uses 5 bits max)
373 *	bit	6:	exclusive marker
374 *	bits	7-9:	bits 2:0 of offset
375 *	bits	10-12:	effectively _PAGE_PROTNONE (all zero)
376 *	bits	13-31:  bits 21:3 of swap offset
 
 
 
 
 
 
 
377 *
378 * The split offset makes some of the following macros a little gnarly,
379 * but there's plenty of precedent for this sort of thing.
380 */
 
381
382/* Used for swap PTEs */
383#define __swp_type(swp_pte)		(((swp_pte).val >> 1) & 0x1f)
384
385#define __swp_offset(swp_pte) \
386	((((swp_pte).val >> 7) & 0x7) | (((swp_pte).val >> 10) & 0x3ffff8))
387
388#define __swp_entry(type, offset) \
389	((swp_entry_t)	{ \
390		(((type & 0x1f) << 1) | \
391		 ((offset & 0x3ffff8) << 10) | ((offset & 0x7) << 7)) })
392
393static inline int pte_swp_exclusive(pte_t pte)
394{
395	return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
396}
 
 
 
 
 
 
397
398static inline pte_t pte_swp_mkexclusive(pte_t pte)
399{
400	pte_val(pte) |= _PAGE_SWP_EXCLUSIVE;
401	return pte;
402}
403
404static inline pte_t pte_swp_clear_exclusive(pte_t pte)
405{
406	pte_val(pte) &= ~_PAGE_SWP_EXCLUSIVE;
407	return pte;
408}
409
410#endif
v3.15
 
  1/*
  2 * Page table support for the Hexagon architecture
  3 *
  4 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 and
  8 * only version 2 as published by the Free Software Foundation.
  9 *
 10 * This program is distributed in the hope that it will be useful,
 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 * GNU General Public License for more details.
 14 *
 15 * You should have received a copy of the GNU General Public License
 16 * along with this program; if not, write to the Free Software
 17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 18 * 02110-1301, USA.
 19 */
 20
 21#ifndef _ASM_PGTABLE_H
 22#define _ASM_PGTABLE_H
 23
 24/*
 25 * Page table definitions for Qualcomm Hexagon processor.
 26 */
 27#include <linux/swap.h>
 28#include <asm/page.h>
 29#include <asm-generic/pgtable-nopmd.h>
 30
 31/* A handy thing to have if one has the RAM. Declared in head.S */
 32extern unsigned long empty_zero_page;
 33extern unsigned long zero_page_mask;
 34
 35/*
 36 * The PTE model described here is that of the Hexagon Virtual Machine,
 37 * which autonomously walks 2-level page tables.  At a lower level, we
 38 * also describe the RISCish software-loaded TLB entry structure of
 39 * the underlying Hexagon processor. A kernel built to run on the
 40 * virtual machine has no need to know about the underlying hardware.
 41 */
 42#include <asm/vm_mmu.h>
 43
 44/*
 45 * To maximize the comfort level for the PTE manipulation macros,
 46 * define the "well known" architecture-specific bits.
 47 */
 48#define _PAGE_READ	__HVM_PTE_R
 49#define _PAGE_WRITE	__HVM_PTE_W
 50#define _PAGE_EXECUTE	__HVM_PTE_X
 51#define _PAGE_USER	__HVM_PTE_U
 52
 53/*
 54 * We have a total of 4 "soft" bits available in the abstract PTE.
 55 * The two mandatory software bits are Dirty and Accessed.
 56 * To make nonlinear swap work according to the more recent
 57 * model, we want a low order "Present" bit to indicate whether
 58 * the PTE describes MMU programming or swap space.
 59 */
 60#define _PAGE_PRESENT	(1<<0)
 61#define _PAGE_DIRTY	(1<<1)
 62#define _PAGE_ACCESSED	(1<<2)
 63
 64/*
 65 * _PAGE_FILE is only meaningful if _PAGE_PRESENT is false, while
 66 * _PAGE_DIRTY is only meaningful if _PAGE_PRESENT is true.
 67 * So we can overload the bit...
 68 */
 69#define _PAGE_FILE	_PAGE_DIRTY /* set:  pagecache, unset = swap */
 70
 71/*
 72 * For now, let's say that Valid and Present are the same thing.
 73 * Alternatively, we could say that it's the "or" of R, W, and X
 74 * permissions.
 75 */
 76#define _PAGE_VALID	_PAGE_PRESENT
 77
 78/*
 79 * We're not defining _PAGE_GLOBAL here, since there's no concept
 80 * of global pages or ASIDs exposed to the Hexagon Virtual Machine,
 81 * and we want to use the same page table structures and macros in
 82 * the native kernel as we do in the virtual machine kernel.
 83 * So we'll put up with a bit of inefficiency for now...
 84 */
 85
 
 
 
 86/*
 87 * Top "FOURTH" level (pgd), which for the Hexagon VM is really
 88 * only the second from the bottom, pgd and pud both being collapsed.
 89 * Each entry represents 4MB of virtual address space, 4K of table
 90 * thus maps the full 4GB.
 91 */
 92#define PGDIR_SHIFT 22
 93#define PTRS_PER_PGD 1024
 94
 95#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
 96#define PGDIR_MASK (~(PGDIR_SIZE-1))
 97
 98#ifdef CONFIG_PAGE_SIZE_4KB
 99#define PTRS_PER_PTE 1024
100#endif
101
102#ifdef CONFIG_PAGE_SIZE_16KB
103#define PTRS_PER_PTE 256
104#endif
105
106#ifdef CONFIG_PAGE_SIZE_64KB
107#define PTRS_PER_PTE 64
108#endif
109
110#ifdef CONFIG_PAGE_SIZE_256KB
111#define PTRS_PER_PTE 16
112#endif
113
114#ifdef CONFIG_PAGE_SIZE_1MB
115#define PTRS_PER_PTE 4
116#endif
117
118/*  Any bigger and the PTE disappears.  */
119#define pgd_ERROR(e) \
120	printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__,\
121		pgd_val(e))
122
123/*
124 * Page Protection Constants. Includes (in this variant) cache attributes.
125 */
126extern unsigned long _dflt_cache_att;
127
128#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_USER | \
129				_dflt_cache_att)
130#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | \
131				_PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
132#define PAGE_COPY	PAGE_READONLY
133#define PAGE_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER | \
134				_PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
135#define PAGE_COPY_EXEC	PAGE_EXEC
136#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
137				_PAGE_EXECUTE | _PAGE_WRITE | _dflt_cache_att)
138#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_READ | \
139				_PAGE_WRITE | _PAGE_EXECUTE | _dflt_cache_att)
140
141
142/*
143 * Aliases for mapping mmap() protection bits to page protections.
144 * These get used for static initialization, so using the _dflt_cache_att
145 * variable for the default cache attribute isn't workable. If the
146 * default gets changed at boot time, the boot option code has to
147 * update data structures like the protaction_map[] array.
148 */
149#define CACHEDEF	(CACHE_DEFAULT << 6)
150
151/* Private (copy-on-write) page protections. */
152#define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF)
153#define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | CACHEDEF)
154#define __P010 __P000	/* Write-only copy-on-write */
155#define __P011 __P001	/* Read/Write copy-on-write */
156#define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
157			_PAGE_EXECUTE | CACHEDEF)
158#define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_EXECUTE | \
159			_PAGE_READ | CACHEDEF)
160#define __P110 __P100	/* Write/execute copy-on-write */
161#define __P111 __P101	/* Read/Write/Execute, copy-on-write */
162
163/* Shared page protections. */
164#define __S000 __P000
165#define __S001 __P001
166#define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
167			_PAGE_WRITE | CACHEDEF)
168#define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
169			_PAGE_WRITE | CACHEDEF)
170#define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
171			_PAGE_EXECUTE | CACHEDEF)
172#define __S101 __P101
173#define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
174			_PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
175#define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
176			_PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
177
178extern pgd_t swapper_pg_dir[PTRS_PER_PGD];  /* located in head.S */
179
180/* Seems to be zero even in architectures where the zero page is firewalled? */
181#define FIRST_USER_ADDRESS 0
182#define pte_special(pte)	0
183#define pte_mkspecial(pte)	(pte)
184
185/*  HUGETLB not working currently  */
186#ifdef CONFIG_HUGETLB_PAGE
187#define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE)
188#endif
189
190/*
191 * For now, assume that higher-level code will do TLB/MMU invalidations
192 * and don't insert that overhead into this low-level function.
193 */
194extern void sync_icache_dcache(pte_t pte);
195
196#define pte_present_exec_user(pte) \
197	((pte_val(pte) & (_PAGE_EXECUTE | _PAGE_USER)) == \
198	(_PAGE_EXECUTE | _PAGE_USER))
199
200static inline void set_pte(pte_t *ptep, pte_t pteval)
201{
202	/*  should really be using pte_exec, if it weren't declared later. */
203	if (pte_present_exec_user(pteval))
204		sync_icache_dcache(pteval);
205
206	*ptep = pteval;
207}
208
209/*
210 * For the Hexagon Virtual Machine MMU (or its emulation), a null/invalid
211 * L1 PTE (PMD/PGD) has 7 in the least significant bits. For the L2 PTE
212 * (Linux PTE), the key is to have bits 11..9 all zero.  We'd use 0x7
213 * as a universal null entry, but some of those least significant bits
214 * are interpreted by software.
215 */
216#define _NULL_PMD	0x7
217#define _NULL_PTE	0x0
218
219static inline void pmd_clear(pmd_t *pmd_entry_ptr)
220{
221	 pmd_val(*pmd_entry_ptr) = _NULL_PMD;
222}
223
224/*
225 * Conveniently, a null PTE value is invalid.
226 */
227static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
228				pte_t *ptep)
229{
230	pte_val(*ptep) = _NULL_PTE;
231}
232
233#ifdef NEED_PMD_INDEX_DESPITE_BEING_2_LEVEL
234/**
235 * pmd_index - returns the index of the entry in the PMD page
236 * which would control the given virtual address
237 */
238#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
239
240#endif
241
242/**
243 * pgd_index - returns the index of the entry in the PGD page
244 * which would control the given virtual address
245 *
246 * This returns the *index* for the address in the pgd_t
247 */
248#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
249
250/*
251 * pgd_offset - find an offset in a page-table-directory
252 */
253#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
254
255/*
256 * pgd_offset_k - get kernel (init_mm) pgd entry pointer for addr
257 */
258#define pgd_offset_k(address) pgd_offset(&init_mm, address)
259
260/**
261 * pmd_none - check if pmd_entry is mapped
262 * @pmd_entry:  pmd entry
263 *
264 * MIPS checks it against that "invalid pte table" thing.
265 */
266static inline int pmd_none(pmd_t pmd)
267{
268	return pmd_val(pmd) == _NULL_PMD;
269}
270
271/**
272 * pmd_present - is there a page table behind this?
273 * Essentially the inverse of pmd_none.  We maybe
274 * save an inline instruction by defining it this
275 * way, instead of simply "!pmd_none".
276 */
277static inline int pmd_present(pmd_t pmd)
278{
279	return pmd_val(pmd) != (unsigned long)_NULL_PMD;
280}
281
282/**
283 * pmd_bad - check if a PMD entry is "bad". That might mean swapped out.
284 * As we have no known cause of badness, it's null, as it is for many
285 * architectures.
286 */
287static inline int pmd_bad(pmd_t pmd)
288{
289	return 0;
290}
291
292/*
 
 
 
 
 
293 * pmd_page - converts a PMD entry to a page pointer
294 */
295#define pmd_page(pmd)  (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
296#define pmd_pgtable(pmd) pmd_page(pmd)
297
298/**
299 * pte_none - check if pte is mapped
300 * @pte: pte_t entry
301 */
302static inline int pte_none(pte_t pte)
303{
304	return pte_val(pte) == _NULL_PTE;
305};
306
307/*
308 * pte_present - check if page is present
309 */
310static inline int pte_present(pte_t pte)
311{
312	return pte_val(pte) & _PAGE_PRESENT;
313}
314
315/* mk_pte - make a PTE out of a page pointer and protection bits */
316#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
317
318/* pte_page - returns a page (frame pointer/descriptor?) based on a PTE */
319#define pte_page(x) pfn_to_page(pte_pfn(x))
320
321/* pte_mkold - mark PTE as not recently accessed */
322static inline pte_t pte_mkold(pte_t pte)
323{
324	pte_val(pte) &= ~_PAGE_ACCESSED;
325	return pte;
326}
327
328/* pte_mkyoung - mark PTE as recently accessed */
329static inline pte_t pte_mkyoung(pte_t pte)
330{
331	pte_val(pte) |= _PAGE_ACCESSED;
332	return pte;
333}
334
335/* pte_mkclean - mark page as in sync with backing store */
336static inline pte_t pte_mkclean(pte_t pte)
337{
338	pte_val(pte) &= ~_PAGE_DIRTY;
339	return pte;
340}
341
342/* pte_mkdirty - mark page as modified */
343static inline pte_t pte_mkdirty(pte_t pte)
344{
345	pte_val(pte) |= _PAGE_DIRTY;
346	return pte;
347}
348
349/* pte_young - "is PTE marked as accessed"? */
350static inline int pte_young(pte_t pte)
351{
352	return pte_val(pte) & _PAGE_ACCESSED;
353}
354
355/* pte_dirty - "is PTE dirty?" */
356static inline int pte_dirty(pte_t pte)
357{
358	return pte_val(pte) & _PAGE_DIRTY;
359}
360
361/* pte_modify - set protection bits on PTE */
362static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
363{
364	pte_val(pte) &= PAGE_MASK;
365	pte_val(pte) |= pgprot_val(prot);
366	return pte;
367}
368
369/* pte_wrprotect - mark page as not writable */
370static inline pte_t pte_wrprotect(pte_t pte)
371{
372	pte_val(pte) &= ~_PAGE_WRITE;
373	return pte;
374}
375
376/* pte_mkwrite - mark page as writable */
377static inline pte_t pte_mkwrite(pte_t pte)
378{
379	pte_val(pte) |= _PAGE_WRITE;
380	return pte;
381}
382
383/* pte_mkexec - mark PTE as executable */
384static inline pte_t pte_mkexec(pte_t pte)
385{
386	pte_val(pte) |= _PAGE_EXECUTE;
387	return pte;
388}
389
390/* pte_read - "is PTE marked as readable?" */
391static inline int pte_read(pte_t pte)
392{
393	return pte_val(pte) & _PAGE_READ;
394}
395
396/* pte_write - "is PTE marked as writable?" */
397static inline int pte_write(pte_t pte)
398{
399	return pte_val(pte) & _PAGE_WRITE;
400}
401
402
403/* pte_exec - "is PTE marked as executable?" */
404static inline int pte_exec(pte_t pte)
405{
406	return pte_val(pte) & _PAGE_EXECUTE;
407}
408
409/* __pte_to_swp_entry - extract swap entry from PTE */
410#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
411
412/* __swp_entry_to_pte - extract PTE from swap entry */
413#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
414
 
415/* pfn_pte - convert page number and protection value to page table entry */
416#define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot))
417
418/* pte_pfn - convert pte to page frame number */
419#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
420#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
421
422/*
423 * set_pte_at - update page table and do whatever magic may be
424 * necessary to make the underlying hardware/firmware take note.
425 *
426 * VM may require a virtual instruction to alert the MMU.
427 */
428#define set_pte_at(mm, addr, ptep, pte) set_pte(ptep, pte)
429
430/*
431 * May need to invoke the virtual machine as well...
432 */
433#define pte_unmap(pte)		do { } while (0)
434#define pte_unmap_nested(pte)	do { } while (0)
435
436/*
437 * pte_offset_map - returns the linear address of the page table entry
438 * corresponding to an address
439 */
440#define pte_offset_map(dir, address)                                    \
441	((pte_t *)page_address(pmd_page(*(dir))) + __pte_offset(address))
442
443#define pte_offset_map_nested(pmd, addr) pte_offset_map(pmd, addr)
444
445/* pte_offset_kernel - kernel version of pte_offset */
446#define pte_offset_kernel(dir, address) \
447	((pte_t *) (unsigned long) __va(pmd_val(*dir) & PAGE_MASK) \
448				+  __pte_offset(address))
449
450/* ZERO_PAGE - returns the globally shared zero page */
451#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
452
453#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
454
455/*  I think this is in case we have page table caches; needed by init/main.c  */
456#define pgtable_cache_init()    do { } while (0)
457
458/*
459 * Swap/file PTE definitions.  If _PAGE_PRESENT is zero, the rest of the
460 * PTE is interpreted as swap information.  Depending on the _PAGE_FILE
461 * bit, the remaining free bits are eitehr interpreted as a file offset
462 * or a swap type/offset tuple.  Rather than have the TLB fill handler
463 * test _PAGE_PRESENT, we're going to reserve the permissions bits
464 * and set them to all zeros for swap entries, which speeds up the
465 * miss handler at the cost of 3 bits of offset.  That trade-off can
466 * be revisited if necessary, but Hexagon processor architecture and
467 * target applications suggest a lot of TLB misses and not much swap space.
 
 
468 *
469 * Format of swap PTE:
470 *	bit	0:	Present (zero)
471 *	bit	1:	_PAGE_FILE (zero)
472 *	bits	2-6:	swap type (arch independent layer uses 5 bits max)
473 *	bits	7-9:	bits 2:0 of offset
474 *	bits 10-12:	effectively _PAGE_PROTNONE (all zero)
475 *	bits 13-31:  bits 21:3 of swap offset
476 *
477 * Format of file PTE:
478 *	bit	0:	Present (zero)
479 *	bit	1:	_PAGE_FILE (zero)
480 *	bits	2-9:	bits 7:0 of offset
481 *	bits 10-12:	effectively _PAGE_PROTNONE (all zero)
482 *	bits 13-31:  bits 26:8 of swap offset
483 *
484 * The split offset makes some of the following macros a little gnarly,
485 * but there's plenty of precedent for this sort of thing.
486 */
487#define PTE_FILE_MAX_BITS     27
488
489/* Used for swap PTEs */
490#define __swp_type(swp_pte)		(((swp_pte).val >> 2) & 0x1f)
491
492#define __swp_offset(swp_pte) \
493	((((swp_pte).val >> 7) & 0x7) | (((swp_pte).val >> 10) & 0x003ffff8))
494
495#define __swp_entry(type, offset) \
496	((swp_entry_t)	{ \
497		((type << 2) | \
498		 ((offset & 0x3ffff8) << 10) | ((offset & 0x7) << 7)) })
499
500/* Used for file PTEs */
501#define pte_file(pte) \
502	((pte_val(pte) & (_PAGE_FILE | _PAGE_PRESENT)) == _PAGE_FILE)
503
504#define pte_to_pgoff(pte) \
505	(((pte_val(pte) >> 2) & 0xff) | ((pte_val(pte) >> 5) & 0x07ffff00))
506
507#define pgoff_to_pte(off) \
508	((pte_t) { ((((off) & 0x7ffff00) << 5) | (((off) & 0xff) << 2)\
509	| _PAGE_FILE) })
510
511/*  Oh boy.  There are a lot of possible arch overrides found in this file.  */
512#include <asm-generic/pgtable.h>
 
 
 
 
 
 
 
 
 
513
514#endif