Linux Audio

Check our new training course

Loading...
v5.4
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994 - 2002 by Ralf Baechle
  7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
  8 * Copyright (C) 2002  Maciej W. Rozycki
  9 */
 10#ifndef _ASM_PGTABLE_BITS_H
 11#define _ASM_PGTABLE_BITS_H
 12
 13
 14/*
 15 * Note that we shift the lower 32bits of each EntryLo[01] entry
 16 * 6 bits to the left. That way we can convert the PFN into the
 17 * physical address by a single 'and' operation and gain 6 additional
 18 * bits for storing information which isn't present in a normal
 19 * MIPS page table.
 20 *
 21 * Similar to the Alpha port, we need to keep track of the ref
 22 * and mod bits in software.  We have a software "yeah you can read
 23 * from this page" bit, and a hardware one which actually lets the
 24 * process read from the page.	On the same token we have a software
 25 * writable bit and the real hardware one which actually lets the
 26 * process write to the page, this keeps a mod bit via the hardware
 27 * dirty bit.
 28 *
 29 * Certain revisions of the R4000 and R5000 have a bug where if a
 30 * certain sequence occurs in the last 3 instructions of an executable
 31 * page, and the following page is not mapped, the cpu can do
 32 * unpredictable things.  The code (when it is written) to deal with
 33 * this problem will be in the update_mmu_cache() code for the r4k.
 34 */
 35#if defined(CONFIG_XPA)
 36
 37/*
 38 * Page table bit offsets used for 64 bit physical addressing on
 39 * MIPS32r5 with XPA.
 40 */
 41enum pgtable_bits {
 42	/* Used by TLB hardware (placed in EntryLo*) */
 43	_PAGE_NO_EXEC_SHIFT,
 44	_PAGE_NO_READ_SHIFT,
 45	_PAGE_GLOBAL_SHIFT,
 46	_PAGE_VALID_SHIFT,
 47	_PAGE_DIRTY_SHIFT,
 48	_CACHE_SHIFT,
 49
 50	/* Used only by software (masked out before writing EntryLo*) */
 51	_PAGE_PRESENT_SHIFT = 24,
 52	_PAGE_WRITE_SHIFT,
 53	_PAGE_ACCESSED_SHIFT,
 54	_PAGE_MODIFIED_SHIFT,
 55#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
 56	_PAGE_SPECIAL_SHIFT,
 57#endif
 58};
 
 
 
 
 
 
 
 
 
 
 59
 60/*
 61 * Bits for extended EntryLo0/EntryLo1 registers
 62 */
 63#define _PFNX_MASK		0xffffff
 64
 65#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 66
 67/*
 68 * Page table bit offsets used for 36 bit physical addressing on MIPS32,
 69 * for example with Alchemy or Netlogic XLP/XLR.
 70 */
 71enum pgtable_bits {
 72	/* Used by TLB hardware (placed in EntryLo*) */
 73	_PAGE_GLOBAL_SHIFT,
 74	_PAGE_VALID_SHIFT,
 75	_PAGE_DIRTY_SHIFT,
 76	_CACHE_SHIFT,
 77
 78	/* Used only by software (masked out before writing EntryLo*) */
 79	_PAGE_PRESENT_SHIFT = _CACHE_SHIFT + 3,
 80	_PAGE_NO_READ_SHIFT,
 81	_PAGE_WRITE_SHIFT,
 82	_PAGE_ACCESSED_SHIFT,
 83	_PAGE_MODIFIED_SHIFT,
 84#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
 85	_PAGE_SPECIAL_SHIFT,
 86#endif
 87};
 88
 89#elif defined(CONFIG_CPU_R3K_TLB)
 90
 91/* Page table bits used for r3k systems */
 92enum pgtable_bits {
 93	/* Used only by software (writes to EntryLo ignored) */
 94	_PAGE_PRESENT_SHIFT,
 95	_PAGE_NO_READ_SHIFT,
 96	_PAGE_WRITE_SHIFT,
 97	_PAGE_ACCESSED_SHIFT,
 98	_PAGE_MODIFIED_SHIFT,
 99#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
100	_PAGE_SPECIAL_SHIFT,
101#endif
102
103	/* Used by TLB hardware (placed in EntryLo) */
104	_PAGE_GLOBAL_SHIFT = 8,
105	_PAGE_VALID_SHIFT,
106	_PAGE_DIRTY_SHIFT,
107	_CACHE_UNCACHED_SHIFT,
108};
109
110#else
 
 
 
 
 
 
 
 
 
 
 
111
112/* Page table bits used for r4k systems */
113enum pgtable_bits {
114	/* Used only by software (masked out before writing EntryLo*) */
115	_PAGE_PRESENT_SHIFT,
116#if !defined(CONFIG_CPU_HAS_RIXI)
117	_PAGE_NO_READ_SHIFT,
118#endif
119	_PAGE_WRITE_SHIFT,
120	_PAGE_ACCESSED_SHIFT,
121	_PAGE_MODIFIED_SHIFT,
122#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
123	_PAGE_HUGE_SHIFT,
124#endif
125#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
126	_PAGE_SPECIAL_SHIFT,
127#endif
128
129	/* Used by TLB hardware (placed in EntryLo*) */
130#if defined(CONFIG_CPU_HAS_RIXI)
131	_PAGE_NO_EXEC_SHIFT,
132	_PAGE_NO_READ_SHIFT,
133#endif
134	_PAGE_GLOBAL_SHIFT,
135	_PAGE_VALID_SHIFT,
136	_PAGE_DIRTY_SHIFT,
137	_CACHE_SHIFT,
138};
139
140#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
 
 
 
141
142/* Used only by software */
 
 
 
143#define _PAGE_PRESENT		(1 << _PAGE_PRESENT_SHIFT)
 
 
 
 
 
 
 
 
144#define _PAGE_WRITE		(1 << _PAGE_WRITE_SHIFT)
 
 
145#define _PAGE_ACCESSED		(1 << _PAGE_ACCESSED_SHIFT)
 
146#define _PAGE_MODIFIED		(1 << _PAGE_MODIFIED_SHIFT)
147#if defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
148# define _PAGE_HUGE		(1 << _PAGE_HUGE_SHIFT)
149#endif
150#if defined(CONFIG_ARCH_HAS_PTE_SPECIAL)
151# define _PAGE_SPECIAL		(1 << _PAGE_SPECIAL_SHIFT)
 
 
 
 
 
 
152#else
153# define _PAGE_SPECIAL		0
154#endif
 
155
156/* Used by TLB hardware (placed in EntryLo*) */
157#if defined(CONFIG_XPA)
158# define _PAGE_NO_EXEC		(1 << _PAGE_NO_EXEC_SHIFT)
159#elif defined(CONFIG_CPU_HAS_RIXI)
160# define _PAGE_NO_EXEC		(cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
 
 
 
 
 
 
 
 
161#endif
162#define _PAGE_NO_READ		(1 << _PAGE_NO_READ_SHIFT)
163#define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
 
 
164#define _PAGE_VALID		(1 << _PAGE_VALID_SHIFT)
 
165#define _PAGE_DIRTY		(1 << _PAGE_DIRTY_SHIFT)
166#if defined(CONFIG_CPU_R3K_TLB)
167# define _CACHE_UNCACHED	(1 << _CACHE_UNCACHED_SHIFT)
168# define _CACHE_MASK		_CACHE_UNCACHED
169# define _PFN_SHIFT		PAGE_SHIFT
170#else
171# define _CACHE_MASK		(7 << _CACHE_SHIFT)
172# define _PFN_SHIFT		(PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
173#endif
174
175#ifndef _PAGE_NO_EXEC
176#define _PAGE_NO_EXEC		0
177#endif
 
 
 
178
179#define _PAGE_SILENT_READ	_PAGE_VALID
180#define _PAGE_SILENT_WRITE	_PAGE_DIRTY
181
182#define _PFN_MASK		(~((1 << (_PFN_SHIFT)) - 1))
183
184/*
185 * The final layouts of the PTE bits are:
186 *
187 *   64-bit, R1 or earlier:     CCC D V G [S H] M A W R P
188 *   32-bit, R1 or earler:      CCC D V G M A W R P
189 *   64-bit, R2 or later:       CCC D V G RI/R XI [S H] M A W P
190 *   32-bit, R2 or later:       CCC D V G RI/R XI M A W P
191 */
192
193
 
194/*
195 * pte_to_entrylo converts a page table entry (PTE) into a Mips
196 * entrylo0/1 value.
197 */
198static inline uint64_t pte_to_entrylo(unsigned long pte_val)
199{
200#ifdef CONFIG_CPU_HAS_RIXI
201	if (cpu_has_rixi) {
202		int sa;
203#ifdef CONFIG_32BIT
204		sa = 31 - _PAGE_NO_READ_SHIFT;
205#else
206		sa = 63 - _PAGE_NO_READ_SHIFT;
207#endif
208		/*
209		 * C has no way to express that this is a DSRL
210		 * _PAGE_NO_EXEC_SHIFT followed by a ROTR 2.  Luckily
211		 * in the fast path this is done in assembly
212		 */
213		return (pte_val >> _PAGE_GLOBAL_SHIFT) |
214			((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa);
215	}
216#endif
217
218	return pte_val >> _PAGE_GLOBAL_SHIFT;
219}
 
220
221/*
222 * Cache attributes
223 */
224#if defined(CONFIG_CPU_R3K_TLB)
225
226#define _CACHE_CACHABLE_NONCOHERENT 0
227#define _CACHE_UNCACHED_ACCELERATED _CACHE_UNCACHED
228
229#elif defined(CONFIG_CPU_SB1)
230
231/* No penalty for being coherent on the SB1, so just
232   use it for "noncoherent" spaces, too.  Shouldn't hurt. */
233
234#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT)
235
 
 
 
 
 
 
 
236#elif defined(CONFIG_MACH_INGENIC)
237
238/* Ingenic uses the WA bit to achieve write-combine memory writes */
239#define _CACHE_UNCACHED_ACCELERATED (1<<_CACHE_SHIFT)
240
241#endif
242
243#ifndef _CACHE_CACHABLE_NO_WA
244#define _CACHE_CACHABLE_NO_WA		(0<<_CACHE_SHIFT)
245#endif
246#ifndef _CACHE_CACHABLE_WA
247#define _CACHE_CACHABLE_WA		(1<<_CACHE_SHIFT)
248#endif
249#ifndef _CACHE_UNCACHED
250#define _CACHE_UNCACHED			(2<<_CACHE_SHIFT)
251#endif
252#ifndef _CACHE_CACHABLE_NONCOHERENT
253#define _CACHE_CACHABLE_NONCOHERENT	(3<<_CACHE_SHIFT)
254#endif
255#ifndef _CACHE_CACHABLE_CE
256#define _CACHE_CACHABLE_CE		(4<<_CACHE_SHIFT)
257#endif
258#ifndef _CACHE_CACHABLE_COW
259#define _CACHE_CACHABLE_COW		(5<<_CACHE_SHIFT)
260#endif
261#ifndef _CACHE_CACHABLE_CUW
262#define _CACHE_CACHABLE_CUW		(6<<_CACHE_SHIFT)
263#endif
264#ifndef _CACHE_UNCACHED_ACCELERATED
265#define _CACHE_UNCACHED_ACCELERATED	(7<<_CACHE_SHIFT)
266#endif
267
268#define __READABLE	(_PAGE_SILENT_READ | _PAGE_ACCESSED)
269#define __WRITEABLE	(_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED)
270
271#define _PAGE_CHG_MASK	(_PAGE_ACCESSED | _PAGE_MODIFIED |	\
272			 _PFN_MASK | _CACHE_MASK)
273
274#endif /* _ASM_PGTABLE_BITS_H */
v4.6
  1/*
  2 * This file is subject to the terms and conditions of the GNU General Public
  3 * License.  See the file "COPYING" in the main directory of this archive
  4 * for more details.
  5 *
  6 * Copyright (C) 1994 - 2002 by Ralf Baechle
  7 * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
  8 * Copyright (C) 2002  Maciej W. Rozycki
  9 */
 10#ifndef _ASM_PGTABLE_BITS_H
 11#define _ASM_PGTABLE_BITS_H
 12
 13
 14/*
 15 * Note that we shift the lower 32bits of each EntryLo[01] entry
 16 * 6 bits to the left. That way we can convert the PFN into the
 17 * physical address by a single 'and' operation and gain 6 additional
 18 * bits for storing information which isn't present in a normal
 19 * MIPS page table.
 20 *
 21 * Similar to the Alpha port, we need to keep track of the ref
 22 * and mod bits in software.  We have a software "yeah you can read
 23 * from this page" bit, and a hardware one which actually lets the
 24 * process read from the page.	On the same token we have a software
 25 * writable bit and the real hardware one which actually lets the
 26 * process write to the page, this keeps a mod bit via the hardware
 27 * dirty bit.
 28 *
 29 * Certain revisions of the R4000 and R5000 have a bug where if a
 30 * certain sequence occurs in the last 3 instructions of an executable
 31 * page, and the following page is not mapped, the cpu can do
 32 * unpredictable things.  The code (when it is written) to deal with
 33 * this problem will be in the update_mmu_cache() code for the r4k.
 34 */
 35#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
 36
 37/*
 38 * The following bits are implemented by the TLB hardware
 
 39 */
 40#define _PAGE_NO_EXEC_SHIFT	0
 41#define _PAGE_NO_EXEC		(1 << _PAGE_NO_EXEC_SHIFT)
 42#define _PAGE_NO_READ_SHIFT	(_PAGE_NO_EXEC_SHIFT + 1)
 43#define _PAGE_NO_READ		(1 << _PAGE_NO_READ_SHIFT)
 44#define _PAGE_GLOBAL_SHIFT	(_PAGE_NO_READ_SHIFT + 1)
 45#define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
 46#define _PAGE_VALID_SHIFT	(_PAGE_GLOBAL_SHIFT + 1)
 47#define _PAGE_VALID		(1 << _PAGE_VALID_SHIFT)
 48#define _PAGE_DIRTY_SHIFT	(_PAGE_VALID_SHIFT + 1)
 49#define _PAGE_DIRTY		(1 << _PAGE_DIRTY_SHIFT)
 50#define _CACHE_SHIFT		(_PAGE_DIRTY_SHIFT + 1)
 51#define _CACHE_MASK		(7 << _CACHE_SHIFT)
 52
 53/*
 54 * The following bits are implemented in software
 55 */
 56#define _PAGE_PRESENT_SHIFT	(24)
 57#define _PAGE_PRESENT		(1 << _PAGE_PRESENT_SHIFT)
 58#define _PAGE_READ_SHIFT	(_PAGE_PRESENT_SHIFT + 1)
 59#define _PAGE_READ		(1 << _PAGE_READ_SHIFT)
 60#define _PAGE_WRITE_SHIFT	(_PAGE_READ_SHIFT + 1)
 61#define _PAGE_WRITE		(1 << _PAGE_WRITE_SHIFT)
 62#define _PAGE_ACCESSED_SHIFT	(_PAGE_WRITE_SHIFT + 1)
 63#define _PAGE_ACCESSED		(1 << _PAGE_ACCESSED_SHIFT)
 64#define _PAGE_MODIFIED_SHIFT	(_PAGE_ACCESSED_SHIFT + 1)
 65#define _PAGE_MODIFIED		(1 << _PAGE_MODIFIED_SHIFT)
 66
 67#define _PFN_SHIFT		(PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
 68
 69/*
 70 * Bits for extended EntryLo0/EntryLo1 registers
 71 */
 72#define _PFNX_MASK		0xffffff
 73
 74#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
 75
 76/*
 77 * The following bits are implemented in software
 
 78 */
 79#define _PAGE_PRESENT_SHIFT	(0)
 80#define _PAGE_PRESENT		(1 << _PAGE_PRESENT_SHIFT)
 81#define _PAGE_READ_SHIFT	(_PAGE_PRESENT_SHIFT + 1)
 82#define _PAGE_READ		(1 << _PAGE_READ_SHIFT)
 83#define _PAGE_WRITE_SHIFT	(_PAGE_READ_SHIFT + 1)
 84#define _PAGE_WRITE		(1 << _PAGE_WRITE_SHIFT)
 85#define _PAGE_ACCESSED_SHIFT	(_PAGE_WRITE_SHIFT + 1)
 86#define _PAGE_ACCESSED		(1 << _PAGE_ACCESSED_SHIFT)
 87#define _PAGE_MODIFIED_SHIFT	(_PAGE_ACCESSED_SHIFT + 1)
 88#define _PAGE_MODIFIED		(1 << _PAGE_MODIFIED_SHIFT)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 89
 90/*
 91 * The following bits are implemented by the TLB hardware
 92 */
 93#define _PAGE_GLOBAL_SHIFT	(_PAGE_MODIFIED_SHIFT + 4)
 94#define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
 95#define _PAGE_VALID_SHIFT	(_PAGE_GLOBAL_SHIFT + 1)
 96#define _PAGE_VALID		(1 << _PAGE_VALID_SHIFT)
 97#define _PAGE_DIRTY_SHIFT	(_PAGE_VALID_SHIFT + 1)
 98#define _PAGE_DIRTY		(1 << _PAGE_DIRTY_SHIFT)
 99#define _CACHE_UNCACHED_SHIFT	(_PAGE_DIRTY_SHIFT + 1)
100#define _CACHE_UNCACHED		(1 << _CACHE_UNCACHED_SHIFT)
101#define _CACHE_MASK		_CACHE_UNCACHED
102
103#define _PFN_SHIFT		PAGE_SHIFT
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
105#else
106/*
107 * Below are the "Normal" R4K cases
108 */
109
110/*
111 * The following bits are implemented in software
112 */
113#define _PAGE_PRESENT_SHIFT	0
114#define _PAGE_PRESENT		(1 << _PAGE_PRESENT_SHIFT)
115/* R2 or later cores check for RI/XI support to determine _PAGE_READ */
116#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
117#define _PAGE_WRITE_SHIFT	(_PAGE_PRESENT_SHIFT + 1)
118#define _PAGE_WRITE		(1 << _PAGE_WRITE_SHIFT)
119#else
120#define _PAGE_READ_SHIFT	(_PAGE_PRESENT_SHIFT + 1)
121#define _PAGE_READ		(1 << _PAGE_READ_SHIFT)
122#define _PAGE_WRITE_SHIFT	(_PAGE_READ_SHIFT + 1)
123#define _PAGE_WRITE		(1 << _PAGE_WRITE_SHIFT)
124#endif
125#define _PAGE_ACCESSED_SHIFT	(_PAGE_WRITE_SHIFT + 1)
126#define _PAGE_ACCESSED		(1 << _PAGE_ACCESSED_SHIFT)
127#define _PAGE_MODIFIED_SHIFT	(_PAGE_ACCESSED_SHIFT + 1)
128#define _PAGE_MODIFIED		(1 << _PAGE_MODIFIED_SHIFT)
129
130#if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
131/* Huge TLB page */
132#define _PAGE_HUGE_SHIFT	(_PAGE_MODIFIED_SHIFT + 1)
133#define _PAGE_HUGE		(1 << _PAGE_HUGE_SHIFT)
134#endif	/* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */
135
136#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
137/* XI - page cannot be executed */
138#ifdef _PAGE_HUGE_SHIFT
139#define _PAGE_NO_EXEC_SHIFT	(_PAGE_HUGE_SHIFT + 1)
140#else
141#define _PAGE_NO_EXEC_SHIFT	(_PAGE_MODIFIED_SHIFT + 1)
142#endif
143#define _PAGE_NO_EXEC		(cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
144
145/* RI - page cannot be read */
146#define _PAGE_READ_SHIFT	(_PAGE_NO_EXEC_SHIFT + 1)
147#define _PAGE_READ		(cpu_has_rixi ? 0 : (1 << _PAGE_READ_SHIFT))
148#define _PAGE_NO_READ_SHIFT	_PAGE_READ_SHIFT
149#define _PAGE_NO_READ		(cpu_has_rixi ? (1 << _PAGE_READ_SHIFT) : 0)
150#endif	/* defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) */
151
152#if defined(_PAGE_NO_READ_SHIFT)
153#define _PAGE_GLOBAL_SHIFT	(_PAGE_NO_READ_SHIFT + 1)
154#elif defined(_PAGE_HUGE_SHIFT)
155#define _PAGE_GLOBAL_SHIFT	(_PAGE_HUGE_SHIFT + 1)
156#else
157#define _PAGE_GLOBAL_SHIFT	(_PAGE_MODIFIED_SHIFT + 1)
158#endif
 
159#define _PAGE_GLOBAL		(1 << _PAGE_GLOBAL_SHIFT)
160
161#define _PAGE_VALID_SHIFT	(_PAGE_GLOBAL_SHIFT + 1)
162#define _PAGE_VALID		(1 << _PAGE_VALID_SHIFT)
163#define _PAGE_DIRTY_SHIFT	(_PAGE_VALID_SHIFT + 1)
164#define _PAGE_DIRTY		(1 << _PAGE_DIRTY_SHIFT)
165#define _CACHE_SHIFT		(_PAGE_DIRTY_SHIFT + 1)
166#define _CACHE_MASK		(7 << _CACHE_SHIFT)
167
168#define _PFN_SHIFT		(PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
169
170#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
 
 
171
172#ifndef _PAGE_NO_EXEC
173#define _PAGE_NO_EXEC		0
174#endif
175#ifndef _PAGE_NO_READ
176#define _PAGE_NO_READ		0
177#endif
178
179#define _PAGE_SILENT_READ	_PAGE_VALID
180#define _PAGE_SILENT_WRITE	_PAGE_DIRTY
181
182#define _PFN_MASK		(~((1 << (_PFN_SHIFT)) - 1))
183
184/*
185 * The final layouts of the PTE bits are:
186 *
187 *   64-bit, R1 or earlier:     CCC D V G [S H] M A W R P
188 *   32-bit, R1 or earler:      CCC D V G M A W R P
189 *   64-bit, R2 or later:       CCC D V G RI/R XI [S H] M A W P
190 *   32-bit, R2 or later:       CCC D V G RI/R XI M A W P
191 */
192
193
194#ifndef __ASSEMBLY__
195/*
196 * pte_to_entrylo converts a page table entry (PTE) into a Mips
197 * entrylo0/1 value.
198 */
199static inline uint64_t pte_to_entrylo(unsigned long pte_val)
200{
201#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
202	if (cpu_has_rixi) {
203		int sa;
204#ifdef CONFIG_32BIT
205		sa = 31 - _PAGE_NO_READ_SHIFT;
206#else
207		sa = 63 - _PAGE_NO_READ_SHIFT;
208#endif
209		/*
210		 * C has no way to express that this is a DSRL
211		 * _PAGE_NO_EXEC_SHIFT followed by a ROTR 2.  Luckily
212		 * in the fast path this is done in assembly
213		 */
214		return (pte_val >> _PAGE_GLOBAL_SHIFT) |
215			((pte_val & (_PAGE_NO_EXEC | _PAGE_NO_READ)) << sa);
216	}
217#endif
218
219	return pte_val >> _PAGE_GLOBAL_SHIFT;
220}
221#endif
222
223/*
224 * Cache attributes
225 */
226#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
227
228#define _CACHE_CACHABLE_NONCOHERENT 0
229#define _CACHE_UNCACHED_ACCELERATED _CACHE_UNCACHED
230
231#elif defined(CONFIG_CPU_SB1)
232
233/* No penalty for being coherent on the SB1, so just
234   use it for "noncoherent" spaces, too.  Shouldn't hurt. */
235
236#define _CACHE_CACHABLE_NONCOHERENT (5<<_CACHE_SHIFT)
237
238#elif defined(CONFIG_CPU_LOONGSON3)
239
240/* Using COHERENT flag for NONCOHERENT doesn't hurt. */
241
242#define _CACHE_CACHABLE_NONCOHERENT (3<<_CACHE_SHIFT)  /* LOONGSON       */
243#define _CACHE_CACHABLE_COHERENT    (3<<_CACHE_SHIFT)  /* LOONGSON-3     */
244
245#elif defined(CONFIG_MACH_INGENIC)
246
247/* Ingenic uses the WA bit to achieve write-combine memory writes */
248#define _CACHE_UNCACHED_ACCELERATED (1<<_CACHE_SHIFT)
249
250#endif
251
252#ifndef _CACHE_CACHABLE_NO_WA
253#define _CACHE_CACHABLE_NO_WA		(0<<_CACHE_SHIFT)
254#endif
255#ifndef _CACHE_CACHABLE_WA
256#define _CACHE_CACHABLE_WA		(1<<_CACHE_SHIFT)
257#endif
258#ifndef _CACHE_UNCACHED
259#define _CACHE_UNCACHED			(2<<_CACHE_SHIFT)
260#endif
261#ifndef _CACHE_CACHABLE_NONCOHERENT
262#define _CACHE_CACHABLE_NONCOHERENT	(3<<_CACHE_SHIFT)
263#endif
264#ifndef _CACHE_CACHABLE_CE
265#define _CACHE_CACHABLE_CE		(4<<_CACHE_SHIFT)
266#endif
267#ifndef _CACHE_CACHABLE_COW
268#define _CACHE_CACHABLE_COW		(5<<_CACHE_SHIFT)
269#endif
270#ifndef _CACHE_CACHABLE_CUW
271#define _CACHE_CACHABLE_CUW		(6<<_CACHE_SHIFT)
272#endif
273#ifndef _CACHE_UNCACHED_ACCELERATED
274#define _CACHE_UNCACHED_ACCELERATED	(7<<_CACHE_SHIFT)
275#endif
276
277#define __READABLE	(_PAGE_SILENT_READ | _PAGE_READ | _PAGE_ACCESSED)
278#define __WRITEABLE	(_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED)
279
280#define _PAGE_CHG_MASK	(_PAGE_ACCESSED | _PAGE_MODIFIED |	\
281			 _PFN_MASK | _CACHE_MASK)
282
283#endif /* _ASM_PGTABLE_BITS_H */