Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * OpenRISC idle.c
  4 *
  5 * Linux architectural port borrowing liberally from similar works of
  6 * others.  All original copyrights apply as per the original source
  7 * declaration.
  8 *
  9 * Modifications for the OpenRISC architecture:
 10 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
 11 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
 
 
 
 
 
 12 */
 13
 14#include <linux/signal.h>
 15#include <linux/sched.h>
 16#include <linux/kernel.h>
 17#include <linux/errno.h>
 18#include <linux/string.h>
 19#include <linux/types.h>
 20#include <linux/ptrace.h>
 21#include <linux/mman.h>
 22#include <linux/mm.h>
 23#include <linux/swap.h>
 24#include <linux/smp.h>
 25#include <linux/memblock.h>
 26#include <linux/init.h>
 27#include <linux/delay.h>
 28#include <linux/blkdev.h>	/* for initrd_* */
 29#include <linux/pagemap.h>
 
 30
 
 31#include <asm/pgalloc.h>
 32#include <asm/pgtable.h>
 33#include <asm/dma.h>
 34#include <asm/io.h>
 35#include <asm/tlb.h>
 36#include <asm/mmu_context.h>
 37#include <asm/kmap_types.h>
 38#include <asm/fixmap.h>
 39#include <asm/tlbflush.h>
 40#include <asm/sections.h>
 41
 42int mem_init_done;
 43
 44DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 45
 46static void __init zone_sizes_init(void)
 47{
 48	unsigned long zones_size[MAX_NR_ZONES];
 49
 50	/* Clear the zone sizes */
 51	memset(zones_size, 0, sizeof(zones_size));
 52
 53	/*
 54	 * We use only ZONE_NORMAL
 55	 */
 56	zones_size[ZONE_NORMAL] = max_low_pfn;
 57
 58	free_area_init(zones_size);
 59}
 60
 61extern const char _s_kernel_ro[], _e_kernel_ro[];
 62
 63/*
 64 * Map all physical memory into kernel's address space.
 65 *
 66 * This is explicitly coded for two-level page tables, so if you need
 67 * something else then this needs to change.
 68 */
 69static void __init map_ram(void)
 70{
 71	unsigned long v, p, e;
 72	pgprot_t prot;
 73	pgd_t *pge;
 74	pud_t *pue;
 75	pmd_t *pme;
 76	pte_t *pte;
 77	/* These mark extents of read-only kernel pages...
 78	 * ...from vmlinux.lds.S
 79	 */
 80	struct memblock_region *region;
 81
 82	v = PAGE_OFFSET;
 83
 84	for_each_memblock(memory, region) {
 85		p = (u32) region->base & PAGE_MASK;
 86		e = p + (u32) region->size;
 87
 88		v = (u32) __va(p);
 89		pge = pgd_offset_k(v);
 90
 91		while (p < e) {
 92			int j;
 93			pue = pud_offset(pge, v);
 94			pme = pmd_offset(pue, v);
 95
 96			if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
 97				panic("%s: OR1K kernel hardcoded for "
 98				      "two-level page tables",
 99				     __func__);
100			}
101
102			/* Alloc one page for holding PTE's... */
103			pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
104			if (!pte)
105				panic("%s: Failed to allocate page for PTEs\n",
106				      __func__);
107			set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
108
109			/* Fill the newly allocated page with PTE'S */
110			for (j = 0; p < e && j < PTRS_PER_PTE;
111			     v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
112				if (v >= (u32) _e_kernel_ro ||
113				    v < (u32) _s_kernel_ro)
114					prot = PAGE_KERNEL;
115				else
116					prot = PAGE_KERNEL_RO;
117
118				set_pte(pte, mk_pte_phys(p, prot));
119			}
120
121			pge++;
122		}
123
124		printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
125		       region->base, region->base + region->size);
126	}
127}
128
129void __init paging_init(void)
130{
131	extern void tlb_init(void);
132
133	unsigned long end;
134	int i;
135
136	printk(KERN_INFO "Setting up paging and PTEs.\n");
137
138	/* clear out the init_mm.pgd that will contain the kernel's mappings */
139
140	for (i = 0; i < PTRS_PER_PGD; i++)
141		swapper_pg_dir[i] = __pgd(0);
142
143	/* make sure the current pgd table points to something sane
144	 * (even if it is most probably not used until the next
145	 *  switch_mm)
146	 */
147	current_pgd[smp_processor_id()] = init_mm.pgd;
148
149	end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
150
151	map_ram();
152
153	zone_sizes_init();
154
155	/* self modifying code ;) */
156	/* Since the old TLB miss handler has been running up until now,
157	 * the kernel pages are still all RW, so we can still modify the
158	 * text directly... after this change and a TLB flush, the kernel
159	 * pages will become RO.
160	 */
161	{
162		extern unsigned long dtlb_miss_handler;
163		extern unsigned long itlb_miss_handler;
164
165		unsigned long *dtlb_vector = __va(0x900);
166		unsigned long *itlb_vector = __va(0xa00);
167
168		printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
169		*itlb_vector = ((unsigned long)&itlb_miss_handler -
170				(unsigned long)itlb_vector) >> 2;
171
172		/* Soft ordering constraint to ensure that dtlb_vector is
173		 * the last thing updated
174		 */
175		barrier();
176
177		printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
178		*dtlb_vector = ((unsigned long)&dtlb_miss_handler -
179				(unsigned long)dtlb_vector) >> 2;
180
 
 
 
181	}
182
183	/* Soft ordering constraint to ensure that cache invalidation and
184	 * TLB flush really happen _after_ code has been modified.
185	 */
186	barrier();
187
188	/* Invalidate instruction caches after code modification */
189	mtspr(SPR_ICBIR, 0x900);
190	mtspr(SPR_ICBIR, 0xa00);
191
192	/* New TLB miss handlers and kernel page tables are in now place.
193	 * Make sure that page flags get updated for all pages in TLB by
194	 * flushing the TLB and forcing all TLB entries to be recreated
195	 * from their page table flags.
196	 */
197	flush_tlb_all();
198}
199
200/* References to section boundaries */
201
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
202void __init mem_init(void)
203{
 
 
204	BUG_ON(!mem_map);
205
206	max_mapnr = max_low_pfn;
 
207	high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
208
209	/* clear the zero-page */
210	memset((void *)empty_zero_page, 0, PAGE_SIZE);
211
212	/* this will put all low memory onto the freelists */
213	memblock_free_all();
214
215	mem_init_print_info(NULL);
 
 
 
 
 
 
 
 
 
 
216
217	printk("mem_init_done ...........................................\n");
218	mem_init_done = 1;
219	return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
220}
v3.5.6
 
  1/*
  2 * OpenRISC idle.c
  3 *
  4 * Linux architectural port borrowing liberally from similar works of
  5 * others.  All original copyrights apply as per the original source
  6 * declaration.
  7 *
  8 * Modifications for the OpenRISC architecture:
  9 * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
 10 * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
 11 *
 12 *      This program is free software; you can redistribute it and/or
 13 *      modify it under the terms of the GNU General Public License
 14 *      as published by the Free Software Foundation; either version
 15 *      2 of the License, or (at your option) any later version.
 16 */
 17
 18#include <linux/signal.h>
 19#include <linux/sched.h>
 20#include <linux/kernel.h>
 21#include <linux/errno.h>
 22#include <linux/string.h>
 23#include <linux/types.h>
 24#include <linux/ptrace.h>
 25#include <linux/mman.h>
 26#include <linux/mm.h>
 27#include <linux/swap.h>
 28#include <linux/smp.h>
 29#include <linux/bootmem.h>
 30#include <linux/init.h>
 31#include <linux/delay.h>
 32#include <linux/blkdev.h>	/* for initrd_* */
 33#include <linux/pagemap.h>
 34#include <linux/memblock.h>
 35
 36#include <asm/segment.h>
 37#include <asm/pgalloc.h>
 38#include <asm/pgtable.h>
 39#include <asm/dma.h>
 40#include <asm/io.h>
 41#include <asm/tlb.h>
 42#include <asm/mmu_context.h>
 43#include <asm/kmap_types.h>
 44#include <asm/fixmap.h>
 45#include <asm/tlbflush.h>
 
 46
 47int mem_init_done;
 48
 49DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 50
 51static void __init zone_sizes_init(void)
 52{
 53	unsigned long zones_size[MAX_NR_ZONES];
 54
 55	/* Clear the zone sizes */
 56	memset(zones_size, 0, sizeof(zones_size));
 57
 58	/*
 59	 * We use only ZONE_NORMAL
 60	 */
 61	zones_size[ZONE_NORMAL] = max_low_pfn;
 62
 63	free_area_init(zones_size);
 64}
 65
 66extern const char _s_kernel_ro[], _e_kernel_ro[];
 67
 68/*
 69 * Map all physical memory into kernel's address space.
 70 *
 71 * This is explicitly coded for two-level page tables, so if you need
 72 * something else then this needs to change.
 73 */
 74static void __init map_ram(void)
 75{
 76	unsigned long v, p, e;
 77	pgprot_t prot;
 78	pgd_t *pge;
 79	pud_t *pue;
 80	pmd_t *pme;
 81	pte_t *pte;
 82	/* These mark extents of read-only kernel pages...
 83	 * ...from vmlinux.lds.S
 84	 */
 85	struct memblock_region *region;
 86
 87	v = PAGE_OFFSET;
 88
 89	for_each_memblock(memory, region) {
 90		p = (u32) region->base & PAGE_MASK;
 91		e = p + (u32) region->size;
 92
 93		v = (u32) __va(p);
 94		pge = pgd_offset_k(v);
 95
 96		while (p < e) {
 97			int j;
 98			pue = pud_offset(pge, v);
 99			pme = pmd_offset(pue, v);
100
101			if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
102				panic("%s: OR1K kernel hardcoded for "
103				      "two-level page tables",
104				     __func__);
105			}
106
107			/* Alloc one page for holding PTE's... */
108			pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
 
 
 
109			set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
110
111			/* Fill the newly allocated page with PTE'S */
112			for (j = 0; p < e && j < PTRS_PER_PGD;
113			     v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
114				if (v >= (u32) _e_kernel_ro ||
115				    v < (u32) _s_kernel_ro)
116					prot = PAGE_KERNEL;
117				else
118					prot = PAGE_KERNEL_RO;
119
120				set_pte(pte, mk_pte_phys(p, prot));
121			}
122
123			pge++;
124		}
125
126		printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
127		       region->base, region->base + region->size);
128	}
129}
130
131void __init paging_init(void)
132{
133	extern void tlb_init(void);
134
135	unsigned long end;
136	int i;
137
138	printk(KERN_INFO "Setting up paging and PTEs.\n");
139
140	/* clear out the init_mm.pgd that will contain the kernel's mappings */
141
142	for (i = 0; i < PTRS_PER_PGD; i++)
143		swapper_pg_dir[i] = __pgd(0);
144
145	/* make sure the current pgd table points to something sane
146	 * (even if it is most probably not used until the next
147	 *  switch_mm)
148	 */
149	current_pgd = init_mm.pgd;
150
151	end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
152
153	map_ram();
154
155	zone_sizes_init();
156
157	/* self modifying code ;) */
158	/* Since the old TLB miss handler has been running up until now,
159	 * the kernel pages are still all RW, so we can still modify the
160	 * text directly... after this change and a TLB flush, the kernel
161	 * pages will become RO.
162	 */
163	{
164		extern unsigned long dtlb_miss_handler;
165		extern unsigned long itlb_miss_handler;
166
167		unsigned long *dtlb_vector = __va(0x900);
168		unsigned long *itlb_vector = __va(0xa00);
169
 
 
 
 
 
 
 
 
 
170		printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
171		*dtlb_vector = ((unsigned long)&dtlb_miss_handler -
172				(unsigned long)dtlb_vector) >> 2;
173
174		printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
175		*itlb_vector = ((unsigned long)&itlb_miss_handler -
176				(unsigned long)itlb_vector) >> 2;
177	}
178
 
 
 
 
 
179	/* Invalidate instruction caches after code modification */
180	mtspr(SPR_ICBIR, 0x900);
181	mtspr(SPR_ICBIR, 0xa00);
182
183	/* New TLB miss handlers and kernel page tables are in now place.
184	 * Make sure that page flags get updated for all pages in TLB by
185	 * flushing the TLB and forcing all TLB entries to be recreated
186	 * from their page table flags.
187	 */
188	flush_tlb_all();
189}
190
191/* References to section boundaries */
192
193extern char _stext, _etext, _edata, __bss_start, _end;
194extern char __init_begin, __init_end;
195
196static int __init free_pages_init(void)
197{
198	int reservedpages, pfn;
199
200	/* this will put all low memory onto the freelists */
201	totalram_pages = free_all_bootmem();
202
203	reservedpages = 0;
204	for (pfn = 0; pfn < max_low_pfn; pfn++) {
205		/*
206		 * Only count reserved RAM pages
207		 */
208		if (PageReserved(mem_map + pfn))
209			reservedpages++;
210	}
211
212	return reservedpages;
213}
214
215static void __init set_max_mapnr_init(void)
216{
217	max_mapnr = num_physpages = max_low_pfn;
218}
219
220void __init mem_init(void)
221{
222	int codesize, reservedpages, datasize, initsize;
223
224	BUG_ON(!mem_map);
225
226	set_max_mapnr_init();
227
228	high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
229
230	/* clear the zero-page */
231	memset((void *)empty_zero_page, 0, PAGE_SIZE);
232
233	reservedpages = free_pages_init();
 
234
235	codesize = (unsigned long)&_etext - (unsigned long)&_stext;
236	datasize = (unsigned long)&_edata - (unsigned long)&_etext;
237	initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
238
239	printk(KERN_INFO
240	       "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
241	       (unsigned long)nr_free_pages() << (PAGE_SHIFT - 10),
242	       max_mapnr << (PAGE_SHIFT - 10), codesize >> 10,
243	       reservedpages << (PAGE_SHIFT - 10), datasize >> 10,
244	       initsize >> 10, (unsigned long)(0 << (PAGE_SHIFT - 10))
245	    );
246
247	printk("mem_init_done ...........................................\n");
248	mem_init_done = 1;
249	return;
250}
251
252#ifdef CONFIG_BLK_DEV_INITRD
253void free_initrd_mem(unsigned long start, unsigned long end)
254{
255	printk(KERN_INFO "Freeing initrd memory: %ldk freed\n",
256	       (end - start) >> 10);
257
258	for (; start < end; start += PAGE_SIZE) {
259		ClearPageReserved(virt_to_page(start));
260		init_page_count(virt_to_page(start));
261		free_page(start);
262		totalram_pages++;
263	}
264}
265#endif
266
267void free_initmem(void)
268{
269	unsigned long addr;
270
271	addr = (unsigned long)(&__init_begin);
272	for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
273		ClearPageReserved(virt_to_page(addr));
274		init_page_count(virt_to_page(addr));
275		free_page(addr);
276		totalram_pages++;
277	}
278	printk(KERN_INFO "Freeing unused kernel memory: %luk freed\n",
279	       ((unsigned long)&__init_end -
280		(unsigned long)&__init_begin) >> 10);
281}