Loading...
1/*
2 * OpenRISC idle.c
3 *
4 * Linux architectural port borrowing liberally from similar works of
5 * others. All original copyrights apply as per the original source
6 * declaration.
7 *
8 * Modifications for the OpenRISC architecture:
9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18#include <linux/signal.h>
19#include <linux/sched.h>
20#include <linux/kernel.h>
21#include <linux/errno.h>
22#include <linux/string.h>
23#include <linux/types.h>
24#include <linux/ptrace.h>
25#include <linux/mman.h>
26#include <linux/mm.h>
27#include <linux/swap.h>
28#include <linux/smp.h>
29#include <linux/bootmem.h>
30#include <linux/init.h>
31#include <linux/delay.h>
32#include <linux/blkdev.h> /* for initrd_* */
33#include <linux/pagemap.h>
34#include <linux/memblock.h>
35
36#include <asm/system.h>
37#include <asm/segment.h>
38#include <asm/pgalloc.h>
39#include <asm/pgtable.h>
40#include <asm/dma.h>
41#include <asm/io.h>
42#include <asm/tlb.h>
43#include <asm/mmu_context.h>
44#include <asm/kmap_types.h>
45#include <asm/fixmap.h>
46#include <asm/tlbflush.h>
47
48int mem_init_done;
49
50DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
51
52static void __init zone_sizes_init(void)
53{
54 unsigned long zones_size[MAX_NR_ZONES];
55
56 /* Clear the zone sizes */
57 memset(zones_size, 0, sizeof(zones_size));
58
59 /*
60 * We use only ZONE_NORMAL
61 */
62 zones_size[ZONE_NORMAL] = max_low_pfn;
63
64 free_area_init(zones_size);
65}
66
67extern const char _s_kernel_ro[], _e_kernel_ro[];
68
69/*
70 * Map all physical memory into kernel's address space.
71 *
72 * This is explicitly coded for two-level page tables, so if you need
73 * something else then this needs to change.
74 */
75static void __init map_ram(void)
76{
77 unsigned long v, p, e;
78 pgprot_t prot;
79 pgd_t *pge;
80 pud_t *pue;
81 pmd_t *pme;
82 pte_t *pte;
83 /* These mark extents of read-only kernel pages...
84 * ...from vmlinux.lds.S
85 */
86 struct memblock_region *region;
87
88 v = PAGE_OFFSET;
89
90 for_each_memblock(memory, region) {
91 p = (u32) region->base & PAGE_MASK;
92 e = p + (u32) region->size;
93
94 v = (u32) __va(p);
95 pge = pgd_offset_k(v);
96
97 while (p < e) {
98 int j;
99 pue = pud_offset(pge, v);
100 pme = pmd_offset(pue, v);
101
102 if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
103 panic("%s: OR1K kernel hardcoded for "
104 "two-level page tables",
105 __func__);
106 }
107
108 /* Alloc one page for holding PTE's... */
109 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
110 set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
111
112 /* Fill the newly allocated page with PTE'S */
113 for (j = 0; p < e && j < PTRS_PER_PGD;
114 v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
115 if (v >= (u32) _e_kernel_ro ||
116 v < (u32) _s_kernel_ro)
117 prot = PAGE_KERNEL;
118 else
119 prot = PAGE_KERNEL_RO;
120
121 set_pte(pte, mk_pte_phys(p, prot));
122 }
123
124 pge++;
125 }
126
127 printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
128 region->base, region->base + region->size);
129 }
130}
131
132void __init paging_init(void)
133{
134 extern void tlb_init(void);
135
136 unsigned long end;
137 int i;
138
139 printk(KERN_INFO "Setting up paging and PTEs.\n");
140
141 /* clear out the init_mm.pgd that will contain the kernel's mappings */
142
143 for (i = 0; i < PTRS_PER_PGD; i++)
144 swapper_pg_dir[i] = __pgd(0);
145
146 /* make sure the current pgd table points to something sane
147 * (even if it is most probably not used until the next
148 * switch_mm)
149 */
150 current_pgd = init_mm.pgd;
151
152 end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
153
154 map_ram();
155
156 zone_sizes_init();
157
158 /* self modifying code ;) */
159 /* Since the old TLB miss handler has been running up until now,
160 * the kernel pages are still all RW, so we can still modify the
161 * text directly... after this change and a TLB flush, the kernel
162 * pages will become RO.
163 */
164 {
165 extern unsigned long dtlb_miss_handler;
166 extern unsigned long itlb_miss_handler;
167
168 unsigned long *dtlb_vector = __va(0x900);
169 unsigned long *itlb_vector = __va(0xa00);
170
171 printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
172 *dtlb_vector = ((unsigned long)&dtlb_miss_handler -
173 (unsigned long)dtlb_vector) >> 2;
174
175 printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
176 *itlb_vector = ((unsigned long)&itlb_miss_handler -
177 (unsigned long)itlb_vector) >> 2;
178 }
179
180 /* Invalidate instruction caches after code modification */
181 mtspr(SPR_ICBIR, 0x900);
182 mtspr(SPR_ICBIR, 0xa00);
183
184 /* New TLB miss handlers and kernel page tables are in now place.
185 * Make sure that page flags get updated for all pages in TLB by
186 * flushing the TLB and forcing all TLB entries to be recreated
187 * from their page table flags.
188 */
189 flush_tlb_all();
190}
191
192/* References to section boundaries */
193
194extern char _stext, _etext, _edata, __bss_start, _end;
195extern char __init_begin, __init_end;
196
197static int __init free_pages_init(void)
198{
199 int reservedpages, pfn;
200
201 /* this will put all low memory onto the freelists */
202 totalram_pages = free_all_bootmem();
203
204 reservedpages = 0;
205 for (pfn = 0; pfn < max_low_pfn; pfn++) {
206 /*
207 * Only count reserved RAM pages
208 */
209 if (PageReserved(mem_map + pfn))
210 reservedpages++;
211 }
212
213 return reservedpages;
214}
215
216static void __init set_max_mapnr_init(void)
217{
218 max_mapnr = num_physpages = max_low_pfn;
219}
220
221void __init mem_init(void)
222{
223 int codesize, reservedpages, datasize, initsize;
224
225 if (!mem_map)
226 BUG();
227
228 set_max_mapnr_init();
229
230 high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
231
232 /* clear the zero-page */
233 memset((void *)empty_zero_page, 0, PAGE_SIZE);
234
235 reservedpages = free_pages_init();
236
237 codesize = (unsigned long)&_etext - (unsigned long)&_stext;
238 datasize = (unsigned long)&_edata - (unsigned long)&_etext;
239 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
240
241 printk(KERN_INFO
242 "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
243 (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10),
244 max_mapnr << (PAGE_SHIFT - 10), codesize >> 10,
245 reservedpages << (PAGE_SHIFT - 10), datasize >> 10,
246 initsize >> 10, (unsigned long)(0 << (PAGE_SHIFT - 10))
247 );
248
249 printk("mem_init_done ...........................................\n");
250 mem_init_done = 1;
251 return;
252}
253
254#ifdef CONFIG_BLK_DEV_INITRD
255void free_initrd_mem(unsigned long start, unsigned long end)
256{
257 printk(KERN_INFO "Freeing initrd memory: %ldk freed\n",
258 (end - start) >> 10);
259
260 for (; start < end; start += PAGE_SIZE) {
261 ClearPageReserved(virt_to_page(start));
262 init_page_count(virt_to_page(start));
263 free_page(start);
264 totalram_pages++;
265 }
266}
267#endif
268
269void free_initmem(void)
270{
271 unsigned long addr;
272
273 addr = (unsigned long)(&__init_begin);
274 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
275 ClearPageReserved(virt_to_page(addr));
276 init_page_count(virt_to_page(addr));
277 free_page(addr);
278 totalram_pages++;
279 }
280 printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n",
281 ((unsigned long)&__init_end -
282 (unsigned long)&__init_begin) >> 10);
283}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * OpenRISC idle.c
4 *
5 * Linux architectural port borrowing liberally from similar works of
6 * others. All original copyrights apply as per the original source
7 * declaration.
8 *
9 * Modifications for the OpenRISC architecture:
10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
12 */
13
14#include <linux/signal.h>
15#include <linux/sched.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
18#include <linux/string.h>
19#include <linux/types.h>
20#include <linux/ptrace.h>
21#include <linux/mman.h>
22#include <linux/mm.h>
23#include <linux/swap.h>
24#include <linux/smp.h>
25#include <linux/memblock.h>
26#include <linux/init.h>
27#include <linux/delay.h>
28#include <linux/blkdev.h> /* for initrd_* */
29#include <linux/pagemap.h>
30
31#include <asm/pgalloc.h>
32#include <asm/pgtable.h>
33#include <asm/dma.h>
34#include <asm/io.h>
35#include <asm/tlb.h>
36#include <asm/mmu_context.h>
37#include <asm/kmap_types.h>
38#include <asm/fixmap.h>
39#include <asm/tlbflush.h>
40#include <asm/sections.h>
41
42int mem_init_done;
43
44DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
45
46static void __init zone_sizes_init(void)
47{
48 unsigned long zones_size[MAX_NR_ZONES];
49
50 /* Clear the zone sizes */
51 memset(zones_size, 0, sizeof(zones_size));
52
53 /*
54 * We use only ZONE_NORMAL
55 */
56 zones_size[ZONE_NORMAL] = max_low_pfn;
57
58 free_area_init(zones_size);
59}
60
61extern const char _s_kernel_ro[], _e_kernel_ro[];
62
63/*
64 * Map all physical memory into kernel's address space.
65 *
66 * This is explicitly coded for two-level page tables, so if you need
67 * something else then this needs to change.
68 */
69static void __init map_ram(void)
70{
71 unsigned long v, p, e;
72 pgprot_t prot;
73 pgd_t *pge;
74 pud_t *pue;
75 pmd_t *pme;
76 pte_t *pte;
77 /* These mark extents of read-only kernel pages...
78 * ...from vmlinux.lds.S
79 */
80 struct memblock_region *region;
81
82 v = PAGE_OFFSET;
83
84 for_each_memblock(memory, region) {
85 p = (u32) region->base & PAGE_MASK;
86 e = p + (u32) region->size;
87
88 v = (u32) __va(p);
89 pge = pgd_offset_k(v);
90
91 while (p < e) {
92 int j;
93 pue = pud_offset(pge, v);
94 pme = pmd_offset(pue, v);
95
96 if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
97 panic("%s: OR1K kernel hardcoded for "
98 "two-level page tables",
99 __func__);
100 }
101
102 /* Alloc one page for holding PTE's... */
103 pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
104 if (!pte)
105 panic("%s: Failed to allocate page for PTEs\n",
106 __func__);
107 set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
108
109 /* Fill the newly allocated page with PTE'S */
110 for (j = 0; p < e && j < PTRS_PER_PTE;
111 v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
112 if (v >= (u32) _e_kernel_ro ||
113 v < (u32) _s_kernel_ro)
114 prot = PAGE_KERNEL;
115 else
116 prot = PAGE_KERNEL_RO;
117
118 set_pte(pte, mk_pte_phys(p, prot));
119 }
120
121 pge++;
122 }
123
124 printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
125 region->base, region->base + region->size);
126 }
127}
128
129void __init paging_init(void)
130{
131 extern void tlb_init(void);
132
133 unsigned long end;
134 int i;
135
136 printk(KERN_INFO "Setting up paging and PTEs.\n");
137
138 /* clear out the init_mm.pgd that will contain the kernel's mappings */
139
140 for (i = 0; i < PTRS_PER_PGD; i++)
141 swapper_pg_dir[i] = __pgd(0);
142
143 /* make sure the current pgd table points to something sane
144 * (even if it is most probably not used until the next
145 * switch_mm)
146 */
147 current_pgd[smp_processor_id()] = init_mm.pgd;
148
149 end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
150
151 map_ram();
152
153 zone_sizes_init();
154
155 /* self modifying code ;) */
156 /* Since the old TLB miss handler has been running up until now,
157 * the kernel pages are still all RW, so we can still modify the
158 * text directly... after this change and a TLB flush, the kernel
159 * pages will become RO.
160 */
161 {
162 extern unsigned long dtlb_miss_handler;
163 extern unsigned long itlb_miss_handler;
164
165 unsigned long *dtlb_vector = __va(0x900);
166 unsigned long *itlb_vector = __va(0xa00);
167
168 printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
169 *itlb_vector = ((unsigned long)&itlb_miss_handler -
170 (unsigned long)itlb_vector) >> 2;
171
172 /* Soft ordering constraint to ensure that dtlb_vector is
173 * the last thing updated
174 */
175 barrier();
176
177 printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
178 *dtlb_vector = ((unsigned long)&dtlb_miss_handler -
179 (unsigned long)dtlb_vector) >> 2;
180
181 }
182
183 /* Soft ordering constraint to ensure that cache invalidation and
184 * TLB flush really happen _after_ code has been modified.
185 */
186 barrier();
187
188 /* Invalidate instruction caches after code modification */
189 mtspr(SPR_ICBIR, 0x900);
190 mtspr(SPR_ICBIR, 0xa00);
191
192 /* New TLB miss handlers and kernel page tables are in now place.
193 * Make sure that page flags get updated for all pages in TLB by
194 * flushing the TLB and forcing all TLB entries to be recreated
195 * from their page table flags.
196 */
197 flush_tlb_all();
198}
199
200/* References to section boundaries */
201
202void __init mem_init(void)
203{
204 BUG_ON(!mem_map);
205
206 max_mapnr = max_low_pfn;
207 high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
208
209 /* clear the zero-page */
210 memset((void *)empty_zero_page, 0, PAGE_SIZE);
211
212 /* this will put all low memory onto the freelists */
213 memblock_free_all();
214
215 mem_init_print_info(NULL);
216
217 printk("mem_init_done ...........................................\n");
218 mem_init_done = 1;
219 return;
220}