Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * linux/arch/sh/mm/init.c
  4 *
  5 *  Copyright (C) 1999  Niibe Yutaka
  6 *  Copyright (C) 2002 - 2011  Paul Mundt
  7 *
  8 *  Based on linux/arch/i386/mm/init.c:
  9 *   Copyright (C) 1995  Linus Torvalds
 10 */
 11#include <linux/mm.h>
 12#include <linux/swap.h>
 13#include <linux/init.h>
 14#include <linux/gfp.h>
 15#include <linux/memblock.h>
 16#include <linux/proc_fs.h>
 17#include <linux/pagemap.h>
 18#include <linux/percpu.h>
 19#include <linux/io.h>
 
 20#include <linux/dma-mapping.h>
 21#include <linux/export.h>
 22#include <asm/mmu_context.h>
 23#include <asm/mmzone.h>
 24#include <asm/kexec.h>
 25#include <asm/tlb.h>
 26#include <asm/cacheflush.h>
 27#include <asm/sections.h>
 28#include <asm/setup.h>
 29#include <asm/cache.h>
 30#include <asm/pgalloc.h>
 31#include <linux/sizes.h>
 32#include "ioremap.h"
 33
 34pgd_t swapper_pg_dir[PTRS_PER_PGD];
 35
 36void __init generic_mem_init(void)
 37{
 38	memblock_add(__MEMORY_START, __MEMORY_SIZE);
 39}
 40
 41void __init __weak plat_mem_setup(void)
 42{
 43	/* Nothing to see here, move along. */
 44}
 45
 46#ifdef CONFIG_MMU
 47static pte_t *__get_pte_phys(unsigned long addr)
 48{
 49	pgd_t *pgd;
 50	p4d_t *p4d;
 51	pud_t *pud;
 52	pmd_t *pmd;
 53
 54	pgd = pgd_offset_k(addr);
 55	if (pgd_none(*pgd)) {
 56		pgd_ERROR(*pgd);
 57		return NULL;
 58	}
 59
 60	p4d = p4d_alloc(NULL, pgd, addr);
 61	if (unlikely(!p4d)) {
 62		p4d_ERROR(*p4d);
 63		return NULL;
 64	}
 65
 66	pud = pud_alloc(NULL, p4d, addr);
 67	if (unlikely(!pud)) {
 68		pud_ERROR(*pud);
 69		return NULL;
 70	}
 71
 72	pmd = pmd_alloc(NULL, pud, addr);
 73	if (unlikely(!pmd)) {
 74		pmd_ERROR(*pmd);
 75		return NULL;
 76	}
 77
 78	return pte_offset_kernel(pmd, addr);
 79}
 80
 81static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
 82{
 83	pte_t *pte;
 84
 85	pte = __get_pte_phys(addr);
 86	if (!pte_none(*pte)) {
 87		pte_ERROR(*pte);
 88		return;
 89	}
 90
 91	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
 92	local_flush_tlb_one(get_asid(), addr);
 93
 94	if (pgprot_val(prot) & _PAGE_WIRED)
 95		tlb_wire_entry(NULL, addr, *pte);
 96}
 97
 98static void clear_pte_phys(unsigned long addr, pgprot_t prot)
 99{
100	pte_t *pte;
101
102	pte = __get_pte_phys(addr);
103
104	if (pgprot_val(prot) & _PAGE_WIRED)
105		tlb_unwire_entry();
106
107	set_pte(pte, pfn_pte(0, __pgprot(0)));
108	local_flush_tlb_one(get_asid(), addr);
109}
110
111void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
112{
113	unsigned long address = __fix_to_virt(idx);
114
115	if (idx >= __end_of_fixed_addresses) {
116		BUG();
117		return;
118	}
119
120	set_pte_phys(address, phys, prot);
121}
122
123void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
124{
125	unsigned long address = __fix_to_virt(idx);
126
127	if (idx >= __end_of_fixed_addresses) {
128		BUG();
129		return;
130	}
131
132	clear_pte_phys(address, prot);
133}
134
135static pmd_t * __init one_md_table_init(pud_t *pud)
136{
137	if (pud_none(*pud)) {
138		pmd_t *pmd;
139
140		pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
141		if (!pmd)
142			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
143			      __func__, PAGE_SIZE, PAGE_SIZE);
144		pud_populate(&init_mm, pud, pmd);
145		BUG_ON(pmd != pmd_offset(pud, 0));
146	}
147
148	return pmd_offset(pud, 0);
149}
150
151static pte_t * __init one_page_table_init(pmd_t *pmd)
152{
153	if (pmd_none(*pmd)) {
154		pte_t *pte;
155
156		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
157		if (!pte)
158			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
159			      __func__, PAGE_SIZE, PAGE_SIZE);
160		pmd_populate_kernel(&init_mm, pmd, pte);
161		BUG_ON(pte != pte_offset_kernel(pmd, 0));
162	}
163
164	return pte_offset_kernel(pmd, 0);
165}
166
167static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
168					    unsigned long vaddr, pte_t *lastpte)
169{
170	return pte;
171}
172
173void __init page_table_range_init(unsigned long start, unsigned long end,
174					 pgd_t *pgd_base)
175{
176	pgd_t *pgd;
177	pud_t *pud;
178	pmd_t *pmd;
179	pte_t *pte = NULL;
180	int i, j, k;
181	unsigned long vaddr;
182
183	vaddr = start;
184	i = pgd_index(vaddr);
185	j = pud_index(vaddr);
186	k = pmd_index(vaddr);
187	pgd = pgd_base + i;
188
189	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
190		pud = (pud_t *)pgd;
191		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
192			pmd = one_md_table_init(pud);
193#ifndef __PAGETABLE_PMD_FOLDED
194			pmd += k;
195#endif
196			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
197				pte = page_table_kmap_check(one_page_table_init(pmd),
198							    pmd, vaddr, pte);
199				vaddr += PMD_SIZE;
200			}
201			k = 0;
202		}
203		j = 0;
204	}
205}
206#endif	/* CONFIG_MMU */
207
208void __init allocate_pgdat(unsigned int nid)
209{
210	unsigned long start_pfn, end_pfn;
 
 
 
211
212	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
213
214#ifdef CONFIG_NUMA
215	NODE_DATA(nid) = memblock_alloc_try_nid(
216				sizeof(struct pglist_data),
217				SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
218				MEMBLOCK_ALLOC_ACCESSIBLE, nid);
219	if (!NODE_DATA(nid))
 
 
220		panic("Can't allocate pgdat for node %d\n", nid);
 
 
 
 
 
221#endif
222
223	NODE_DATA(nid)->node_start_pfn = start_pfn;
224	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
225}
226
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
227static void __init do_init_bootmem(void)
228{
229	unsigned long start_pfn, end_pfn;
230	int i;
231
232	/* Add active regions with valid PFNs. */
233	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL)
 
 
 
234		__add_active_range(0, start_pfn, end_pfn);
 
235
236	/* All of system RAM sits in node 0 for the non-NUMA case */
237	allocate_pgdat(0);
238	node_set_online(0);
239
240	plat_mem_setup();
241
 
 
 
242	sparse_init();
243}
244
245static void __init early_reserve_mem(void)
246{
247	unsigned long start_pfn;
248	u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
249	u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
250
251	/*
252	 * Partially used pages are not usable - thus
253	 * we are rounding upwards:
254	 */
255	start_pfn = PFN_UP(__pa(_end));
256
257	/*
258	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
259	 * this in two steps (first step was init_bootmem()), because
260	 * this catches the (definitely buggy) case of us accidentally
261	 * initializing the bootmem allocator with an invalid RAM area.
262	 */
263	memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
264
265	/*
266	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
267	 */
268	if (CONFIG_ZERO_PAGE_OFFSET != 0)
269		memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
270
271	/*
272	 * Handle additional early reservations
273	 */
274	check_for_initrd();
275	reserve_crashkernel();
276}
277
278void __init paging_init(void)
279{
280	unsigned long max_zone_pfns[MAX_NR_ZONES];
281	unsigned long vaddr, end;
 
282
283	sh_mv.mv_mem_init();
284
285	early_reserve_mem();
286
287	/*
288	 * Once the early reservations are out of the way, give the
289	 * platforms a chance to kick out some memory.
290	 */
291	if (sh_mv.mv_mem_reserve)
292		sh_mv.mv_mem_reserve();
293
294	memblock_enforce_memory_limit(memory_limit);
295	memblock_allow_resize();
296
297	memblock_dump_all();
298
299	/*
300	 * Determine low and high memory ranges:
301	 */
302	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
303	min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
304
305	nodes_clear(node_online_map);
306
307	memory_start = (unsigned long)__va(__MEMORY_START);
308	memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
309
310	uncached_init();
311	pmb_init();
312	do_init_bootmem();
313	ioremap_fixed_init();
314
315	/* We don't need to map the kernel through the TLB, as
316	 * it is permanatly mapped using P1. So clear the
317	 * entire pgd. */
318	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
319
320	/* Set an initial value for the MMU.TTB so we don't have to
321	 * check for a null value. */
322	set_TTB(swapper_pg_dir);
323
324	/*
325	 * Populate the relevant portions of swapper_pg_dir so that
326	 * we can use the fixmap entries without calling kmalloc.
327	 * pte's will be filled in by __set_fixmap().
328	 */
329	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
330	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
331	page_table_range_init(vaddr, end, swapper_pg_dir);
332
333	kmap_coherent_init();
334
335	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
336	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
337	free_area_init(max_zone_pfns);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
338}
339
340unsigned int mem_init_done = 0;
341
342void __init mem_init(void)
343{
344	pg_data_t *pgdat;
345
 
 
346	high_memory = NULL;
347	for_each_online_pgdat(pgdat)
348		high_memory = max_t(void *, high_memory,
349				    __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
350
351	memblock_free_all();
352
353	/* Set this up early, so we can take care of the zero page */
354	cpu_cache_init();
355
356	/* clear the zero-page */
357	memset(empty_zero_page, 0, PAGE_SIZE);
358	__flush_wback_region(empty_zero_page, PAGE_SIZE);
359
360	vsyscall_init();
361
 
362	pr_info("virtual kernel memory layout:\n"
363		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 
 
 
364		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
365		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
366#ifdef CONFIG_UNCACHED_MAPPING
367		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
368#endif
369		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
370		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
371		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
372		FIXADDR_START, FIXADDR_TOP,
373		(FIXADDR_TOP - FIXADDR_START) >> 10,
374
 
 
 
 
 
375		(unsigned long)VMALLOC_START, VMALLOC_END,
376		(VMALLOC_END - VMALLOC_START) >> 20,
377
378		(unsigned long)memory_start, (unsigned long)high_memory,
379		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
380
381#ifdef CONFIG_UNCACHED_MAPPING
382		uncached_start, uncached_end, uncached_size >> 20,
383#endif
384
385		(unsigned long)&__init_begin, (unsigned long)&__init_end,
386		((unsigned long)&__init_end -
387		 (unsigned long)&__init_begin) >> 10,
388
389		(unsigned long)&_etext, (unsigned long)&_edata,
390		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
391
392		(unsigned long)&_text, (unsigned long)&_etext,
393		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
394
395	mem_init_done = 1;
396}
397
 
 
 
 
 
 
 
 
 
 
 
 
398#ifdef CONFIG_MEMORY_HOTPLUG
399int arch_add_memory(int nid, u64 start, u64 size,
400		    struct mhp_params *params)
401{
 
402	unsigned long start_pfn = PFN_DOWN(start);
403	unsigned long nr_pages = size >> PAGE_SHIFT;
404	int ret;
405
406	if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
407		return -EINVAL;
408
409	/* We only have ZONE_NORMAL, so this is easy.. */
410	ret = __add_pages(nid, start_pfn, nr_pages, params);
 
 
 
411	if (unlikely(ret))
412		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
413
414	return ret;
415}
 
416
417void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
 
 
 
 
 
 
 
 
 
 
418{
419	unsigned long start_pfn = PFN_DOWN(start);
420	unsigned long nr_pages = size >> PAGE_SHIFT;
 
 
421
422	__remove_pages(start_pfn, nr_pages, altmap);
 
 
 
 
 
 
423}
 
424#endif /* CONFIG_MEMORY_HOTPLUG */
v4.6
 
  1/*
  2 * linux/arch/sh/mm/init.c
  3 *
  4 *  Copyright (C) 1999  Niibe Yutaka
  5 *  Copyright (C) 2002 - 2011  Paul Mundt
  6 *
  7 *  Based on linux/arch/i386/mm/init.c:
  8 *   Copyright (C) 1995  Linus Torvalds
  9 */
 10#include <linux/mm.h>
 11#include <linux/swap.h>
 12#include <linux/init.h>
 13#include <linux/gfp.h>
 14#include <linux/bootmem.h>
 15#include <linux/proc_fs.h>
 16#include <linux/pagemap.h>
 17#include <linux/percpu.h>
 18#include <linux/io.h>
 19#include <linux/memblock.h>
 20#include <linux/dma-mapping.h>
 21#include <linux/export.h>
 22#include <asm/mmu_context.h>
 23#include <asm/mmzone.h>
 24#include <asm/kexec.h>
 25#include <asm/tlb.h>
 26#include <asm/cacheflush.h>
 27#include <asm/sections.h>
 28#include <asm/setup.h>
 29#include <asm/cache.h>
 30#include <asm/sizes.h>
 
 
 31
 32pgd_t swapper_pg_dir[PTRS_PER_PGD];
 33
 34void __init generic_mem_init(void)
 35{
 36	memblock_add(__MEMORY_START, __MEMORY_SIZE);
 37}
 38
 39void __init __weak plat_mem_setup(void)
 40{
 41	/* Nothing to see here, move along. */
 42}
 43
 44#ifdef CONFIG_MMU
 45static pte_t *__get_pte_phys(unsigned long addr)
 46{
 47	pgd_t *pgd;
 
 48	pud_t *pud;
 49	pmd_t *pmd;
 50
 51	pgd = pgd_offset_k(addr);
 52	if (pgd_none(*pgd)) {
 53		pgd_ERROR(*pgd);
 54		return NULL;
 55	}
 56
 57	pud = pud_alloc(NULL, pgd, addr);
 
 
 
 
 
 
 58	if (unlikely(!pud)) {
 59		pud_ERROR(*pud);
 60		return NULL;
 61	}
 62
 63	pmd = pmd_alloc(NULL, pud, addr);
 64	if (unlikely(!pmd)) {
 65		pmd_ERROR(*pmd);
 66		return NULL;
 67	}
 68
 69	return pte_offset_kernel(pmd, addr);
 70}
 71
 72static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
 73{
 74	pte_t *pte;
 75
 76	pte = __get_pte_phys(addr);
 77	if (!pte_none(*pte)) {
 78		pte_ERROR(*pte);
 79		return;
 80	}
 81
 82	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
 83	local_flush_tlb_one(get_asid(), addr);
 84
 85	if (pgprot_val(prot) & _PAGE_WIRED)
 86		tlb_wire_entry(NULL, addr, *pte);
 87}
 88
 89static void clear_pte_phys(unsigned long addr, pgprot_t prot)
 90{
 91	pte_t *pte;
 92
 93	pte = __get_pte_phys(addr);
 94
 95	if (pgprot_val(prot) & _PAGE_WIRED)
 96		tlb_unwire_entry();
 97
 98	set_pte(pte, pfn_pte(0, __pgprot(0)));
 99	local_flush_tlb_one(get_asid(), addr);
100}
101
102void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
103{
104	unsigned long address = __fix_to_virt(idx);
105
106	if (idx >= __end_of_fixed_addresses) {
107		BUG();
108		return;
109	}
110
111	set_pte_phys(address, phys, prot);
112}
113
114void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
115{
116	unsigned long address = __fix_to_virt(idx);
117
118	if (idx >= __end_of_fixed_addresses) {
119		BUG();
120		return;
121	}
122
123	clear_pte_phys(address, prot);
124}
125
126static pmd_t * __init one_md_table_init(pud_t *pud)
127{
128	if (pud_none(*pud)) {
129		pmd_t *pmd;
130
131		pmd = alloc_bootmem_pages(PAGE_SIZE);
 
 
 
132		pud_populate(&init_mm, pud, pmd);
133		BUG_ON(pmd != pmd_offset(pud, 0));
134	}
135
136	return pmd_offset(pud, 0);
137}
138
139static pte_t * __init one_page_table_init(pmd_t *pmd)
140{
141	if (pmd_none(*pmd)) {
142		pte_t *pte;
143
144		pte = alloc_bootmem_pages(PAGE_SIZE);
 
 
 
145		pmd_populate_kernel(&init_mm, pmd, pte);
146		BUG_ON(pte != pte_offset_kernel(pmd, 0));
147	}
148
149	return pte_offset_kernel(pmd, 0);
150}
151
152static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
153					    unsigned long vaddr, pte_t *lastpte)
154{
155	return pte;
156}
157
158void __init page_table_range_init(unsigned long start, unsigned long end,
159					 pgd_t *pgd_base)
160{
161	pgd_t *pgd;
162	pud_t *pud;
163	pmd_t *pmd;
164	pte_t *pte = NULL;
165	int i, j, k;
166	unsigned long vaddr;
167
168	vaddr = start;
169	i = __pgd_offset(vaddr);
170	j = __pud_offset(vaddr);
171	k = __pmd_offset(vaddr);
172	pgd = pgd_base + i;
173
174	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
175		pud = (pud_t *)pgd;
176		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
177			pmd = one_md_table_init(pud);
178#ifndef __PAGETABLE_PMD_FOLDED
179			pmd += k;
180#endif
181			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
182				pte = page_table_kmap_check(one_page_table_init(pmd),
183							    pmd, vaddr, pte);
184				vaddr += PMD_SIZE;
185			}
186			k = 0;
187		}
188		j = 0;
189	}
190}
191#endif	/* CONFIG_MMU */
192
193void __init allocate_pgdat(unsigned int nid)
194{
195	unsigned long start_pfn, end_pfn;
196#ifdef CONFIG_NEED_MULTIPLE_NODES
197	unsigned long phys;
198#endif
199
200	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
201
202#ifdef CONFIG_NEED_MULTIPLE_NODES
203	phys = __memblock_alloc_base(sizeof(struct pglist_data),
204				SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
205	/* Retry with all of system memory */
206	if (!phys)
207		phys = __memblock_alloc_base(sizeof(struct pglist_data),
208					SMP_CACHE_BYTES, memblock_end_of_DRAM());
209	if (!phys)
210		panic("Can't allocate pgdat for node %d\n", nid);
211
212	NODE_DATA(nid) = __va(phys);
213	memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
214
215	NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
216#endif
217
218	NODE_DATA(nid)->node_start_pfn = start_pfn;
219	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
220}
221
222static void __init bootmem_init_one_node(unsigned int nid)
223{
224	unsigned long total_pages, paddr;
225	unsigned long end_pfn;
226	struct pglist_data *p;
227
228	p = NODE_DATA(nid);
229
230	/* Nothing to do.. */
231	if (!p->node_spanned_pages)
232		return;
233
234	end_pfn = pgdat_end_pfn(p);
235
236	total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
237
238	paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
239	if (!paddr)
240		panic("Can't allocate bootmap for nid[%d]\n", nid);
241
242	init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
243
244	free_bootmem_with_active_regions(nid, end_pfn);
245
246	/*
247	 * XXX Handle initial reservations for the system memory node
248	 * only for the moment, we'll refactor this later for handling
249	 * reservations in other nodes.
250	 */
251	if (nid == 0) {
252		struct memblock_region *reg;
253
254		/* Reserve the sections we're already using. */
255		for_each_memblock(reserved, reg) {
256			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
257		}
258	}
259
260	sparse_memory_present_with_active_regions(nid);
261}
262
263static void __init do_init_bootmem(void)
264{
265	struct memblock_region *reg;
266	int i;
267
268	/* Add active regions with valid PFNs. */
269	for_each_memblock(memory, reg) {
270		unsigned long start_pfn, end_pfn;
271		start_pfn = memblock_region_memory_base_pfn(reg);
272		end_pfn = memblock_region_memory_end_pfn(reg);
273		__add_active_range(0, start_pfn, end_pfn);
274	}
275
276	/* All of system RAM sits in node 0 for the non-NUMA case */
277	allocate_pgdat(0);
278	node_set_online(0);
279
280	plat_mem_setup();
281
282	for_each_online_node(i)
283		bootmem_init_one_node(i);
284
285	sparse_init();
286}
287
288static void __init early_reserve_mem(void)
289{
290	unsigned long start_pfn;
291	u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
292	u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
293
294	/*
295	 * Partially used pages are not usable - thus
296	 * we are rounding upwards:
297	 */
298	start_pfn = PFN_UP(__pa(_end));
299
300	/*
301	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
302	 * this in two steps (first step was init_bootmem()), because
303	 * this catches the (definitely buggy) case of us accidentally
304	 * initializing the bootmem allocator with an invalid RAM area.
305	 */
306	memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
307
308	/*
309	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
310	 */
311	if (CONFIG_ZERO_PAGE_OFFSET != 0)
312		memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
313
314	/*
315	 * Handle additional early reservations
316	 */
317	check_for_initrd();
318	reserve_crashkernel();
319}
320
321void __init paging_init(void)
322{
323	unsigned long max_zone_pfns[MAX_NR_ZONES];
324	unsigned long vaddr, end;
325	int nid;
326
327	sh_mv.mv_mem_init();
328
329	early_reserve_mem();
330
331	/*
332	 * Once the early reservations are out of the way, give the
333	 * platforms a chance to kick out some memory.
334	 */
335	if (sh_mv.mv_mem_reserve)
336		sh_mv.mv_mem_reserve();
337
338	memblock_enforce_memory_limit(memory_limit);
339	memblock_allow_resize();
340
341	memblock_dump_all();
342
343	/*
344	 * Determine low and high memory ranges:
345	 */
346	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
347	min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
348
349	nodes_clear(node_online_map);
350
351	memory_start = (unsigned long)__va(__MEMORY_START);
352	memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
353
354	uncached_init();
355	pmb_init();
356	do_init_bootmem();
357	ioremap_fixed_init();
358
359	/* We don't need to map the kernel through the TLB, as
360	 * it is permanatly mapped using P1. So clear the
361	 * entire pgd. */
362	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
363
364	/* Set an initial value for the MMU.TTB so we don't have to
365	 * check for a null value. */
366	set_TTB(swapper_pg_dir);
367
368	/*
369	 * Populate the relevant portions of swapper_pg_dir so that
370	 * we can use the fixmap entries without calling kmalloc.
371	 * pte's will be filled in by __set_fixmap().
372	 */
373	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
374	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
375	page_table_range_init(vaddr, end, swapper_pg_dir);
376
377	kmap_coherent_init();
378
379	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
380
381	for_each_online_node(nid) {
382		pg_data_t *pgdat = NODE_DATA(nid);
383		unsigned long low, start_pfn;
384
385		start_pfn = pgdat->bdata->node_min_pfn;
386		low = pgdat->bdata->node_low_pfn;
387
388		if (max_zone_pfns[ZONE_NORMAL] < low)
389			max_zone_pfns[ZONE_NORMAL] = low;
390
391		printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
392		       nid, start_pfn, low);
393	}
394
395	free_area_init_nodes(max_zone_pfns);
396}
397
398/*
399 * Early initialization for any I/O MMUs we might have.
400 */
401static void __init iommu_init(void)
402{
403	no_iommu_init();
404}
405
406unsigned int mem_init_done = 0;
407
408void __init mem_init(void)
409{
410	pg_data_t *pgdat;
411
412	iommu_init();
413
414	high_memory = NULL;
415	for_each_online_pgdat(pgdat)
416		high_memory = max_t(void *, high_memory,
417				    __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
418
419	free_all_bootmem();
420
421	/* Set this up early, so we can take care of the zero page */
422	cpu_cache_init();
423
424	/* clear the zero-page */
425	memset(empty_zero_page, 0, PAGE_SIZE);
426	__flush_wback_region(empty_zero_page, PAGE_SIZE);
427
428	vsyscall_init();
429
430	mem_init_print_info(NULL);
431	pr_info("virtual kernel memory layout:\n"
432		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
433#ifdef CONFIG_HIGHMEM
434		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
435#endif
436		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
437		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
438#ifdef CONFIG_UNCACHED_MAPPING
439		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
440#endif
441		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
442		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
443		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
444		FIXADDR_START, FIXADDR_TOP,
445		(FIXADDR_TOP - FIXADDR_START) >> 10,
446
447#ifdef CONFIG_HIGHMEM
448		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
449		(LAST_PKMAP*PAGE_SIZE) >> 10,
450#endif
451
452		(unsigned long)VMALLOC_START, VMALLOC_END,
453		(VMALLOC_END - VMALLOC_START) >> 20,
454
455		(unsigned long)memory_start, (unsigned long)high_memory,
456		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
457
458#ifdef CONFIG_UNCACHED_MAPPING
459		uncached_start, uncached_end, uncached_size >> 20,
460#endif
461
462		(unsigned long)&__init_begin, (unsigned long)&__init_end,
463		((unsigned long)&__init_end -
464		 (unsigned long)&__init_begin) >> 10,
465
466		(unsigned long)&_etext, (unsigned long)&_edata,
467		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
468
469		(unsigned long)&_text, (unsigned long)&_etext,
470		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
471
472	mem_init_done = 1;
473}
474
475void free_initmem(void)
476{
477	free_initmem_default(-1);
478}
479
480#ifdef CONFIG_BLK_DEV_INITRD
481void free_initrd_mem(unsigned long start, unsigned long end)
482{
483	free_reserved_area((void *)start, (void *)end, -1, "initrd");
484}
485#endif
486
487#ifdef CONFIG_MEMORY_HOTPLUG
488int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
 
489{
490	pg_data_t *pgdat;
491	unsigned long start_pfn = PFN_DOWN(start);
492	unsigned long nr_pages = size >> PAGE_SHIFT;
493	int ret;
494
495	pgdat = NODE_DATA(nid);
 
496
497	/* We only have ZONE_NORMAL, so this is easy.. */
498	ret = __add_pages(nid, pgdat->node_zones +
499			zone_for_memory(nid, start, size, ZONE_NORMAL,
500			for_device),
501			start_pfn, nr_pages);
502	if (unlikely(ret))
503		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
504
505	return ret;
506}
507EXPORT_SYMBOL_GPL(arch_add_memory);
508
509#ifdef CONFIG_NUMA
510int memory_add_physaddr_to_nid(u64 addr)
511{
512	/* Node 0 for now.. */
513	return 0;
514}
515EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
516#endif
517
518#ifdef CONFIG_MEMORY_HOTREMOVE
519int arch_remove_memory(u64 start, u64 size)
520{
521	unsigned long start_pfn = PFN_DOWN(start);
522	unsigned long nr_pages = size >> PAGE_SHIFT;
523	struct zone *zone;
524	int ret;
525
526	zone = page_zone(pfn_to_page(start_pfn));
527	ret = __remove_pages(zone, start_pfn, nr_pages);
528	if (unlikely(ret))
529		pr_warn("%s: Failed, __remove_pages() == %d\n", __func__,
530			ret);
531
532	return ret;
533}
534#endif
535#endif /* CONFIG_MEMORY_HOTPLUG */