Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * OpenRISC idle.c
4 *
5 * Linux architectural port borrowing liberally from similar works of
6 * others. All original copyrights apply as per the original source
7 * declaration.
8 *
9 * Modifications for the OpenRISC architecture:
10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
12 */
13
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/string.h>
19#include <linux/types.h>
20#include <linux/ptrace.h>
21#include <linux/mman.h>
22#include <linux/mm.h>
23#include <linux/swap.h>
24#include <linux/smp.h>
25#include <linux/memblock.h>
26#include <linux/init.h>
27#include <linux/delay.h>
28#include <linux/pagemap.h>
29
30#include <asm/pgalloc.h>
31#include <asm/dma.h>
32#include <asm/io.h>
33#include <asm/tlb.h>
34#include <asm/mmu_context.h>
35#include <asm/fixmap.h>
36#include <asm/tlbflush.h>
37#include <asm/sections.h>
38
39int mem_init_done;
40
41static void __init zone_sizes_init(void)
42{
43 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
44
45 /*
46 * We use only ZONE_NORMAL
47 */
48 max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
49
50 free_area_init(max_zone_pfn);
51}
52
53extern const char _s_kernel_ro[], _e_kernel_ro[];
54
55/*
56 * Map all physical memory into kernel's address space.
57 *
58 * This is explicitly coded for two-level page tables, so if you need
59 * something else then this needs to change.
60 */
61static void __init map_ram(void)
62{
63 phys_addr_t start, end;
64 unsigned long v, p, e;
65 pgprot_t prot;
66 pgd_t *pge;
67 p4d_t *p4e;
68 pud_t *pue;
69 pmd_t *pme;
70 pte_t *pte;
71 u64 i;
72 /* These mark extents of read-only kernel pages...
73 * ...from vmlinux.lds.S
74 */
75
76 v = PAGE_OFFSET;
77
78 for_each_mem_range(i, &start, &end) {
79 p = (u32) start & PAGE_MASK;
80 e = (u32) end;
81
82 v = (u32) __va(p);
83 pge = pgd_offset_k(v);
84
85 while (p < e) {
86 int j;
87 p4e = p4d_offset(pge, v);
88 pue = pud_offset(p4e, v);
89 pme = pmd_offset(pue, v);
90
91 if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
92 panic("%s: OR1K kernel hardcoded for "
93 "two-level page tables",
94 __func__);
95 }
96
97 /* Alloc one page for holding PTE's... */
98 pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
99 if (!pte)
100 panic("%s: Failed to allocate page for PTEs\n",
101 __func__);
102 set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
103
104 /* Fill the newly allocated page with PTE'S */
105 for (j = 0; p < e && j < PTRS_PER_PTE;
106 v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
107 if (v >= (u32) _e_kernel_ro ||
108 v < (u32) _s_kernel_ro)
109 prot = PAGE_KERNEL;
110 else
111 prot = PAGE_KERNEL_RO;
112
113 set_pte(pte, mk_pte_phys(p, prot));
114 }
115
116 pge++;
117 }
118
119 printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
120 start, end);
121 }
122}
123
124void __init paging_init(void)
125{
126 extern void tlb_init(void);
127
128 int i;
129
130 printk(KERN_INFO "Setting up paging and PTEs.\n");
131
132 /* clear out the init_mm.pgd that will contain the kernel's mappings */
133
134 for (i = 0; i < PTRS_PER_PGD; i++)
135 swapper_pg_dir[i] = __pgd(0);
136
137 /* make sure the current pgd table points to something sane
138 * (even if it is most probably not used until the next
139 * switch_mm)
140 */
141 current_pgd[smp_processor_id()] = init_mm.pgd;
142
143 map_ram();
144
145 zone_sizes_init();
146
147 /* self modifying code ;) */
148 /* Since the old TLB miss handler has been running up until now,
149 * the kernel pages are still all RW, so we can still modify the
150 * text directly... after this change and a TLB flush, the kernel
151 * pages will become RO.
152 */
153 {
154 extern unsigned long dtlb_miss_handler;
155 extern unsigned long itlb_miss_handler;
156
157 unsigned long *dtlb_vector = __va(0x900);
158 unsigned long *itlb_vector = __va(0xa00);
159
160 printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
161 *itlb_vector = ((unsigned long)&itlb_miss_handler -
162 (unsigned long)itlb_vector) >> 2;
163
164 /* Soft ordering constraint to ensure that dtlb_vector is
165 * the last thing updated
166 */
167 barrier();
168
169 printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
170 *dtlb_vector = ((unsigned long)&dtlb_miss_handler -
171 (unsigned long)dtlb_vector) >> 2;
172
173 }
174
175 /* Soft ordering constraint to ensure that cache invalidation and
176 * TLB flush really happen _after_ code has been modified.
177 */
178 barrier();
179
180 /* Invalidate instruction caches after code modification */
181 mtspr(SPR_ICBIR, 0x900);
182 mtspr(SPR_ICBIR, 0xa00);
183
184 /* New TLB miss handlers and kernel page tables are in now place.
185 * Make sure that page flags get updated for all pages in TLB by
186 * flushing the TLB and forcing all TLB entries to be recreated
187 * from their page table flags.
188 */
189 flush_tlb_all();
190}
191
192/* References to section boundaries */
193
194void __init mem_init(void)
195{
196 BUG_ON(!mem_map);
197
198 max_mapnr = max_low_pfn;
199 high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
200
201 /* clear the zero-page */
202 memset((void *)empty_zero_page, 0, PAGE_SIZE);
203
204 /* this will put all low memory onto the freelists */
205 memblock_free_all();
206
207 printk("mem_init_done ...........................................\n");
208 mem_init_done = 1;
209 return;
210}
211
212static const pgprot_t protection_map[16] = {
213 [VM_NONE] = PAGE_NONE,
214 [VM_READ] = PAGE_READONLY_X,
215 [VM_WRITE] = PAGE_COPY,
216 [VM_WRITE | VM_READ] = PAGE_COPY_X,
217 [VM_EXEC] = PAGE_READONLY,
218 [VM_EXEC | VM_READ] = PAGE_READONLY_X,
219 [VM_EXEC | VM_WRITE] = PAGE_COPY,
220 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_X,
221 [VM_SHARED] = PAGE_NONE,
222 [VM_SHARED | VM_READ] = PAGE_READONLY_X,
223 [VM_SHARED | VM_WRITE] = PAGE_SHARED,
224 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_X,
225 [VM_SHARED | VM_EXEC] = PAGE_READONLY,
226 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_X,
227 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED,
228 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_X
229};
230DECLARE_VM_GET_PAGE_PROT
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * OpenRISC idle.c
4 *
5 * Linux architectural port borrowing liberally from similar works of
6 * others. All original copyrights apply as per the original source
7 * declaration.
8 *
9 * Modifications for the OpenRISC architecture:
10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
12 */
13
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/string.h>
19#include <linux/types.h>
20#include <linux/ptrace.h>
21#include <linux/mman.h>
22#include <linux/mm.h>
23#include <linux/swap.h>
24#include <linux/smp.h>
25#include <linux/memblock.h>
26#include <linux/init.h>
27#include <linux/delay.h>
28#include <linux/blkdev.h> /* for initrd_* */
29#include <linux/pagemap.h>
30
31#include <asm/pgalloc.h>
32#include <asm/pgtable.h>
33#include <asm/dma.h>
34#include <asm/io.h>
35#include <asm/tlb.h>
36#include <asm/mmu_context.h>
37#include <asm/kmap_types.h>
38#include <asm/fixmap.h>
39#include <asm/tlbflush.h>
40#include <asm/sections.h>
41
42int mem_init_done;
43
44DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
45
46static void __init zone_sizes_init(void)
47{
48 unsigned long zones_size[MAX_NR_ZONES];
49
50 /* Clear the zone sizes */
51 memset(zones_size, 0, sizeof(zones_size));
52
53 /*
54 * We use only ZONE_NORMAL
55 */
56 zones_size[ZONE_NORMAL] = max_low_pfn;
57
58 free_area_init(zones_size);
59}
60
61extern const char _s_kernel_ro[], _e_kernel_ro[];
62
63/*
64 * Map all physical memory into kernel's address space.
65 *
66 * This is explicitly coded for two-level page tables, so if you need
67 * something else then this needs to change.
68 */
69static void __init map_ram(void)
70{
71 unsigned long v, p, e;
72 pgprot_t prot;
73 pgd_t *pge;
74 pud_t *pue;
75 pmd_t *pme;
76 pte_t *pte;
77 /* These mark extents of read-only kernel pages...
78 * ...from vmlinux.lds.S
79 */
80 struct memblock_region *region;
81
82 v = PAGE_OFFSET;
83
84 for_each_memblock(memory, region) {
85 p = (u32) region->base & PAGE_MASK;
86 e = p + (u32) region->size;
87
88 v = (u32) __va(p);
89 pge = pgd_offset_k(v);
90
91 while (p < e) {
92 int j;
93 pue = pud_offset(pge, v);
94 pme = pmd_offset(pue, v);
95
96 if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
97 panic("%s: OR1K kernel hardcoded for "
98 "two-level page tables",
99 __func__);
100 }
101
102 /* Alloc one page for holding PTE's... */
103 pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
104 if (!pte)
105 panic("%s: Failed to allocate page for PTEs\n",
106 __func__);
107 set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
108
109 /* Fill the newly allocated page with PTE'S */
110 for (j = 0; p < e && j < PTRS_PER_PTE;
111 v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
112 if (v >= (u32) _e_kernel_ro ||
113 v < (u32) _s_kernel_ro)
114 prot = PAGE_KERNEL;
115 else
116 prot = PAGE_KERNEL_RO;
117
118 set_pte(pte, mk_pte_phys(p, prot));
119 }
120
121 pge++;
122 }
123
124 printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
125 region->base, region->base + region->size);
126 }
127}
128
129void __init paging_init(void)
130{
131 extern void tlb_init(void);
132
133 unsigned long end;
134 int i;
135
136 printk(KERN_INFO "Setting up paging and PTEs.\n");
137
138 /* clear out the init_mm.pgd that will contain the kernel's mappings */
139
140 for (i = 0; i < PTRS_PER_PGD; i++)
141 swapper_pg_dir[i] = __pgd(0);
142
143 /* make sure the current pgd table points to something sane
144 * (even if it is most probably not used until the next
145 * switch_mm)
146 */
147 current_pgd[smp_processor_id()] = init_mm.pgd;
148
149 end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
150
151 map_ram();
152
153 zone_sizes_init();
154
155 /* self modifying code ;) */
156 /* Since the old TLB miss handler has been running up until now,
157 * the kernel pages are still all RW, so we can still modify the
158 * text directly... after this change and a TLB flush, the kernel
159 * pages will become RO.
160 */
161 {
162 extern unsigned long dtlb_miss_handler;
163 extern unsigned long itlb_miss_handler;
164
165 unsigned long *dtlb_vector = __va(0x900);
166 unsigned long *itlb_vector = __va(0xa00);
167
168 printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
169 *itlb_vector = ((unsigned long)&itlb_miss_handler -
170 (unsigned long)itlb_vector) >> 2;
171
172 /* Soft ordering constraint to ensure that dtlb_vector is
173 * the last thing updated
174 */
175 barrier();
176
177 printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
178 *dtlb_vector = ((unsigned long)&dtlb_miss_handler -
179 (unsigned long)dtlb_vector) >> 2;
180
181 }
182
183 /* Soft ordering constraint to ensure that cache invalidation and
184 * TLB flush really happen _after_ code has been modified.
185 */
186 barrier();
187
188 /* Invalidate instruction caches after code modification */
189 mtspr(SPR_ICBIR, 0x900);
190 mtspr(SPR_ICBIR, 0xa00);
191
192 /* New TLB miss handlers and kernel page tables are in now place.
193 * Make sure that page flags get updated for all pages in TLB by
194 * flushing the TLB and forcing all TLB entries to be recreated
195 * from their page table flags.
196 */
197 flush_tlb_all();
198}
199
200/* References to section boundaries */
201
202void __init mem_init(void)
203{
204 BUG_ON(!mem_map);
205
206 max_mapnr = max_low_pfn;
207 high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
208
209 /* clear the zero-page */
210 memset((void *)empty_zero_page, 0, PAGE_SIZE);
211
212 /* this will put all low memory onto the freelists */
213 memblock_free_all();
214
215 mem_init_print_info(NULL);
216
217 printk("mem_init_done ...........................................\n");
218 mem_init_done = 1;
219 return;
220}