Loading...
1#ifndef _SPARC64_MM_INIT_H
2#define _SPARC64_MM_INIT_H
3
4/* Most of the symbols in this file are defined in init.c and
5 * marked non-static so that assembler code can get at them.
6 */
7
8#define MAX_PHYS_ADDRESS (1UL << 41UL)
9#define KPTE_BITMAP_CHUNK_SZ (256UL * 1024UL * 1024UL)
10#define KPTE_BITMAP_BYTES \
11 ((MAX_PHYS_ADDRESS / KPTE_BITMAP_CHUNK_SZ) / 8)
12#define VALID_ADDR_BITMAP_CHUNK_SZ (4UL * 1024UL * 1024UL)
13#define VALID_ADDR_BITMAP_BYTES \
14 ((MAX_PHYS_ADDRESS / VALID_ADDR_BITMAP_CHUNK_SZ) / 8)
15
16extern unsigned long kern_linear_pte_xor[2];
17extern unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
18extern unsigned int sparc64_highest_unlocked_tlb_ent;
19extern unsigned long sparc64_kern_pri_context;
20extern unsigned long sparc64_kern_pri_nuc_bits;
21extern unsigned long sparc64_kern_sec_context;
22extern void mmu_info(struct seq_file *m);
23
24struct linux_prom_translation {
25 unsigned long virt;
26 unsigned long size;
27 unsigned long data;
28};
29
30/* Exported for kernel TLB miss handling in ktlb.S */
31extern struct linux_prom_translation prom_trans[512];
32extern unsigned int prom_trans_ents;
33
34/* Exported for SMP bootup purposes. */
35extern unsigned long kern_locked_tte_data;
36
37extern void prom_world(int enter);
38
39#ifdef CONFIG_SPARSEMEM_VMEMMAP
40#define VMEMMAP_CHUNK_SHIFT 22
41#define VMEMMAP_CHUNK (1UL << VMEMMAP_CHUNK_SHIFT)
42#define VMEMMAP_CHUNK_MASK ~(VMEMMAP_CHUNK - 1UL)
43#define VMEMMAP_ALIGN(x) (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
44
45#define VMEMMAP_SIZE ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
46 sizeof(struct page)) >> VMEMMAP_CHUNK_SHIFT)
47extern unsigned long vmemmap_table[VMEMMAP_SIZE];
48#endif
49
50#endif /* _SPARC64_MM_INIT_H */