Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * linux/arch/sh/mm/init.c
  4 *
  5 *  Copyright (C) 1999  Niibe Yutaka
  6 *  Copyright (C) 2002 - 2011  Paul Mundt
  7 *
  8 *  Based on linux/arch/i386/mm/init.c:
  9 *   Copyright (C) 1995  Linus Torvalds
 10 */
 11#include <linux/mm.h>
 12#include <linux/swap.h>
 13#include <linux/init.h>
 14#include <linux/gfp.h>
 15#include <linux/memblock.h>
 16#include <linux/proc_fs.h>
 17#include <linux/pagemap.h>
 18#include <linux/percpu.h>
 19#include <linux/io.h>
 
 20#include <linux/dma-mapping.h>
 21#include <linux/export.h>
 22#include <asm/mmu_context.h>
 23#include <asm/mmzone.h>
 24#include <asm/kexec.h>
 25#include <asm/tlb.h>
 26#include <asm/cacheflush.h>
 27#include <asm/sections.h>
 28#include <asm/setup.h>
 29#include <asm/cache.h>
 30#include <linux/sizes.h>
 31
 32pgd_t swapper_pg_dir[PTRS_PER_PGD];
 33
 34void __init generic_mem_init(void)
 35{
 36	memblock_add(__MEMORY_START, __MEMORY_SIZE);
 37}
 38
 39void __init __weak plat_mem_setup(void)
 40{
 41	/* Nothing to see here, move along. */
 42}
 43
 44#ifdef CONFIG_MMU
 45static pte_t *__get_pte_phys(unsigned long addr)
 46{
 47	pgd_t *pgd;
 48	pud_t *pud;
 49	pmd_t *pmd;
 50
 51	pgd = pgd_offset_k(addr);
 52	if (pgd_none(*pgd)) {
 53		pgd_ERROR(*pgd);
 54		return NULL;
 55	}
 56
 57	pud = pud_alloc(NULL, pgd, addr);
 58	if (unlikely(!pud)) {
 59		pud_ERROR(*pud);
 60		return NULL;
 61	}
 62
 63	pmd = pmd_alloc(NULL, pud, addr);
 64	if (unlikely(!pmd)) {
 65		pmd_ERROR(*pmd);
 66		return NULL;
 67	}
 68
 69	return pte_offset_kernel(pmd, addr);
 70}
 71
 72static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
 73{
 74	pte_t *pte;
 75
 76	pte = __get_pte_phys(addr);
 77	if (!pte_none(*pte)) {
 78		pte_ERROR(*pte);
 79		return;
 80	}
 81
 82	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
 83	local_flush_tlb_one(get_asid(), addr);
 84
 85	if (pgprot_val(prot) & _PAGE_WIRED)
 86		tlb_wire_entry(NULL, addr, *pte);
 87}
 88
 89static void clear_pte_phys(unsigned long addr, pgprot_t prot)
 90{
 91	pte_t *pte;
 92
 93	pte = __get_pte_phys(addr);
 94
 95	if (pgprot_val(prot) & _PAGE_WIRED)
 96		tlb_unwire_entry();
 97
 98	set_pte(pte, pfn_pte(0, __pgprot(0)));
 99	local_flush_tlb_one(get_asid(), addr);
100}
101
102void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
103{
104	unsigned long address = __fix_to_virt(idx);
105
106	if (idx >= __end_of_fixed_addresses) {
107		BUG();
108		return;
109	}
110
111	set_pte_phys(address, phys, prot);
112}
113
114void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
115{
116	unsigned long address = __fix_to_virt(idx);
117
118	if (idx >= __end_of_fixed_addresses) {
119		BUG();
120		return;
121	}
122
123	clear_pte_phys(address, prot);
124}
125
126static pmd_t * __init one_md_table_init(pud_t *pud)
127{
128	if (pud_none(*pud)) {
129		pmd_t *pmd;
130
131		pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
132		if (!pmd)
133			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
134			      __func__, PAGE_SIZE, PAGE_SIZE);
135		pud_populate(&init_mm, pud, pmd);
136		BUG_ON(pmd != pmd_offset(pud, 0));
137	}
138
139	return pmd_offset(pud, 0);
140}
141
142static pte_t * __init one_page_table_init(pmd_t *pmd)
143{
144	if (pmd_none(*pmd)) {
145		pte_t *pte;
146
147		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
148		if (!pte)
149			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
150			      __func__, PAGE_SIZE, PAGE_SIZE);
151		pmd_populate_kernel(&init_mm, pmd, pte);
152		BUG_ON(pte != pte_offset_kernel(pmd, 0));
153	}
154
155	return pte_offset_kernel(pmd, 0);
156}
157
158static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
159					    unsigned long vaddr, pte_t *lastpte)
160{
161	return pte;
162}
163
164void __init page_table_range_init(unsigned long start, unsigned long end,
165					 pgd_t *pgd_base)
166{
167	pgd_t *pgd;
168	pud_t *pud;
169	pmd_t *pmd;
170	pte_t *pte = NULL;
171	int i, j, k;
172	unsigned long vaddr;
173
174	vaddr = start;
175	i = __pgd_offset(vaddr);
176	j = __pud_offset(vaddr);
177	k = __pmd_offset(vaddr);
178	pgd = pgd_base + i;
179
180	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
181		pud = (pud_t *)pgd;
182		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
183			pmd = one_md_table_init(pud);
184#ifndef __PAGETABLE_PMD_FOLDED
185			pmd += k;
186#endif
187			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
188				pte = page_table_kmap_check(one_page_table_init(pmd),
189							    pmd, vaddr, pte);
190				vaddr += PMD_SIZE;
191			}
192			k = 0;
193		}
194		j = 0;
195	}
196}
197#endif	/* CONFIG_MMU */
198
199void __init allocate_pgdat(unsigned int nid)
200{
201	unsigned long start_pfn, end_pfn;
 
 
 
202
203	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
204
205#ifdef CONFIG_NEED_MULTIPLE_NODES
206	NODE_DATA(nid) = memblock_alloc_try_nid(
207				sizeof(struct pglist_data),
208				SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
209				MEMBLOCK_ALLOC_ACCESSIBLE, nid);
210	if (!NODE_DATA(nid))
 
 
211		panic("Can't allocate pgdat for node %d\n", nid);
 
 
 
212#endif
213
214	NODE_DATA(nid)->node_start_pfn = start_pfn;
215	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
216}
217
218static void __init do_init_bootmem(void)
219{
220	struct memblock_region *reg;
221
222	/* Add active regions with valid PFNs. */
223	for_each_memblock(memory, reg) {
224		unsigned long start_pfn, end_pfn;
225		start_pfn = memblock_region_memory_base_pfn(reg);
226		end_pfn = memblock_region_memory_end_pfn(reg);
227		__add_active_range(0, start_pfn, end_pfn);
228	}
229
230	/* All of system RAM sits in node 0 for the non-NUMA case */
231	allocate_pgdat(0);
232	node_set_online(0);
233
234	plat_mem_setup();
235
236	for_each_memblock(memory, reg) {
237		int nid = memblock_get_region_node(reg);
238
239		memory_present(nid, memblock_region_memory_base_pfn(reg),
240			memblock_region_memory_end_pfn(reg));
241	}
242	sparse_init();
243}
244
245static void __init early_reserve_mem(void)
246{
247	unsigned long start_pfn;
248	u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
249	u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
250
251	/*
252	 * Partially used pages are not usable - thus
253	 * we are rounding upwards:
254	 */
255	start_pfn = PFN_UP(__pa(_end));
256
257	/*
258	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
259	 * this in two steps (first step was init_bootmem()), because
260	 * this catches the (definitely buggy) case of us accidentally
261	 * initializing the bootmem allocator with an invalid RAM area.
262	 */
263	memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
264
265	/*
266	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
267	 */
268	if (CONFIG_ZERO_PAGE_OFFSET != 0)
269		memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
270
271	/*
272	 * Handle additional early reservations
273	 */
274	check_for_initrd();
275	reserve_crashkernel();
276}
277
278void __init paging_init(void)
279{
280	unsigned long max_zone_pfns[MAX_NR_ZONES];
281	unsigned long vaddr, end;
282
283	sh_mv.mv_mem_init();
284
285	early_reserve_mem();
286
287	/*
288	 * Once the early reservations are out of the way, give the
289	 * platforms a chance to kick out some memory.
290	 */
291	if (sh_mv.mv_mem_reserve)
292		sh_mv.mv_mem_reserve();
293
294	memblock_enforce_memory_limit(memory_limit);
295	memblock_allow_resize();
296
297	memblock_dump_all();
298
299	/*
300	 * Determine low and high memory ranges:
301	 */
302	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
303	min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
304
305	nodes_clear(node_online_map);
306
307	memory_start = (unsigned long)__va(__MEMORY_START);
308	memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
309
310	uncached_init();
311	pmb_init();
312	do_init_bootmem();
313	ioremap_fixed_init();
314
315	/* We don't need to map the kernel through the TLB, as
316	 * it is permanatly mapped using P1. So clear the
317	 * entire pgd. */
318	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
319
320	/* Set an initial value for the MMU.TTB so we don't have to
321	 * check for a null value. */
322	set_TTB(swapper_pg_dir);
323
324	/*
325	 * Populate the relevant portions of swapper_pg_dir so that
326	 * we can use the fixmap entries without calling kmalloc.
327	 * pte's will be filled in by __set_fixmap().
328	 */
329	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
330	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
331	page_table_range_init(vaddr, end, swapper_pg_dir);
332
333	kmap_coherent_init();
334
335	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
336	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
337	free_area_init_nodes(max_zone_pfns);
338}
339
 
 
 
 
 
 
 
 
340unsigned int mem_init_done = 0;
341
342void __init mem_init(void)
343{
344	pg_data_t *pgdat;
345
 
 
346	high_memory = NULL;
347	for_each_online_pgdat(pgdat)
348		high_memory = max_t(void *, high_memory,
349				    __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
350
351	memblock_free_all();
352
353	/* Set this up early, so we can take care of the zero page */
354	cpu_cache_init();
355
356	/* clear the zero-page */
357	memset(empty_zero_page, 0, PAGE_SIZE);
358	__flush_wback_region(empty_zero_page, PAGE_SIZE);
359
360	vsyscall_init();
361
362	mem_init_print_info(NULL);
363	pr_info("virtual kernel memory layout:\n"
364		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
365#ifdef CONFIG_HIGHMEM
366		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
367#endif
368		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
369		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
370#ifdef CONFIG_UNCACHED_MAPPING
371		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
372#endif
373		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
374		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
375		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
376		FIXADDR_START, FIXADDR_TOP,
377		(FIXADDR_TOP - FIXADDR_START) >> 10,
378
379#ifdef CONFIG_HIGHMEM
380		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
381		(LAST_PKMAP*PAGE_SIZE) >> 10,
382#endif
383
384		(unsigned long)VMALLOC_START, VMALLOC_END,
385		(VMALLOC_END - VMALLOC_START) >> 20,
386
387		(unsigned long)memory_start, (unsigned long)high_memory,
388		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
389
390#ifdef CONFIG_UNCACHED_MAPPING
391		uncached_start, uncached_end, uncached_size >> 20,
392#endif
393
394		(unsigned long)&__init_begin, (unsigned long)&__init_end,
395		((unsigned long)&__init_end -
396		 (unsigned long)&__init_begin) >> 10,
397
398		(unsigned long)&_etext, (unsigned long)&_edata,
399		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
400
401		(unsigned long)&_text, (unsigned long)&_etext,
402		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
403
404	mem_init_done = 1;
405}
406
 
 
 
 
 
 
 
 
 
 
 
 
407#ifdef CONFIG_MEMORY_HOTPLUG
408int arch_add_memory(int nid, u64 start, u64 size,
409			struct mhp_restrictions *restrictions)
410{
411	unsigned long start_pfn = PFN_DOWN(start);
412	unsigned long nr_pages = size >> PAGE_SHIFT;
413	int ret;
414
415	/* We only have ZONE_NORMAL, so this is easy.. */
416	ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
417	if (unlikely(ret))
418		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
419
420	return ret;
421}
422
423#ifdef CONFIG_NUMA
424int memory_add_physaddr_to_nid(u64 addr)
425{
426	/* Node 0 for now.. */
427	return 0;
428}
429EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
430#endif
431
432void arch_remove_memory(int nid, u64 start, u64 size,
433			struct vmem_altmap *altmap)
434{
435	unsigned long start_pfn = PFN_DOWN(start);
436	unsigned long nr_pages = size >> PAGE_SHIFT;
437	struct zone *zone;
 
438
439	zone = page_zone(pfn_to_page(start_pfn));
440	__remove_pages(zone, start_pfn, nr_pages, altmap);
 
 
 
 
 
441}
 
442#endif /* CONFIG_MEMORY_HOTPLUG */
v4.17
 
  1/*
  2 * linux/arch/sh/mm/init.c
  3 *
  4 *  Copyright (C) 1999  Niibe Yutaka
  5 *  Copyright (C) 2002 - 2011  Paul Mundt
  6 *
  7 *  Based on linux/arch/i386/mm/init.c:
  8 *   Copyright (C) 1995  Linus Torvalds
  9 */
 10#include <linux/mm.h>
 11#include <linux/swap.h>
 12#include <linux/init.h>
 13#include <linux/gfp.h>
 14#include <linux/bootmem.h>
 15#include <linux/proc_fs.h>
 16#include <linux/pagemap.h>
 17#include <linux/percpu.h>
 18#include <linux/io.h>
 19#include <linux/memblock.h>
 20#include <linux/dma-mapping.h>
 21#include <linux/export.h>
 22#include <asm/mmu_context.h>
 23#include <asm/mmzone.h>
 24#include <asm/kexec.h>
 25#include <asm/tlb.h>
 26#include <asm/cacheflush.h>
 27#include <asm/sections.h>
 28#include <asm/setup.h>
 29#include <asm/cache.h>
 30#include <asm/sizes.h>
 31
 32pgd_t swapper_pg_dir[PTRS_PER_PGD];
 33
 34void __init generic_mem_init(void)
 35{
 36	memblock_add(__MEMORY_START, __MEMORY_SIZE);
 37}
 38
 39void __init __weak plat_mem_setup(void)
 40{
 41	/* Nothing to see here, move along. */
 42}
 43
 44#ifdef CONFIG_MMU
 45static pte_t *__get_pte_phys(unsigned long addr)
 46{
 47	pgd_t *pgd;
 48	pud_t *pud;
 49	pmd_t *pmd;
 50
 51	pgd = pgd_offset_k(addr);
 52	if (pgd_none(*pgd)) {
 53		pgd_ERROR(*pgd);
 54		return NULL;
 55	}
 56
 57	pud = pud_alloc(NULL, pgd, addr);
 58	if (unlikely(!pud)) {
 59		pud_ERROR(*pud);
 60		return NULL;
 61	}
 62
 63	pmd = pmd_alloc(NULL, pud, addr);
 64	if (unlikely(!pmd)) {
 65		pmd_ERROR(*pmd);
 66		return NULL;
 67	}
 68
 69	return pte_offset_kernel(pmd, addr);
 70}
 71
 72static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
 73{
 74	pte_t *pte;
 75
 76	pte = __get_pte_phys(addr);
 77	if (!pte_none(*pte)) {
 78		pte_ERROR(*pte);
 79		return;
 80	}
 81
 82	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
 83	local_flush_tlb_one(get_asid(), addr);
 84
 85	if (pgprot_val(prot) & _PAGE_WIRED)
 86		tlb_wire_entry(NULL, addr, *pte);
 87}
 88
 89static void clear_pte_phys(unsigned long addr, pgprot_t prot)
 90{
 91	pte_t *pte;
 92
 93	pte = __get_pte_phys(addr);
 94
 95	if (pgprot_val(prot) & _PAGE_WIRED)
 96		tlb_unwire_entry();
 97
 98	set_pte(pte, pfn_pte(0, __pgprot(0)));
 99	local_flush_tlb_one(get_asid(), addr);
100}
101
102void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
103{
104	unsigned long address = __fix_to_virt(idx);
105
106	if (idx >= __end_of_fixed_addresses) {
107		BUG();
108		return;
109	}
110
111	set_pte_phys(address, phys, prot);
112}
113
114void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
115{
116	unsigned long address = __fix_to_virt(idx);
117
118	if (idx >= __end_of_fixed_addresses) {
119		BUG();
120		return;
121	}
122
123	clear_pte_phys(address, prot);
124}
125
126static pmd_t * __init one_md_table_init(pud_t *pud)
127{
128	if (pud_none(*pud)) {
129		pmd_t *pmd;
130
131		pmd = alloc_bootmem_pages(PAGE_SIZE);
 
 
 
132		pud_populate(&init_mm, pud, pmd);
133		BUG_ON(pmd != pmd_offset(pud, 0));
134	}
135
136	return pmd_offset(pud, 0);
137}
138
139static pte_t * __init one_page_table_init(pmd_t *pmd)
140{
141	if (pmd_none(*pmd)) {
142		pte_t *pte;
143
144		pte = alloc_bootmem_pages(PAGE_SIZE);
 
 
 
145		pmd_populate_kernel(&init_mm, pmd, pte);
146		BUG_ON(pte != pte_offset_kernel(pmd, 0));
147	}
148
149	return pte_offset_kernel(pmd, 0);
150}
151
152static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
153					    unsigned long vaddr, pte_t *lastpte)
154{
155	return pte;
156}
157
158void __init page_table_range_init(unsigned long start, unsigned long end,
159					 pgd_t *pgd_base)
160{
161	pgd_t *pgd;
162	pud_t *pud;
163	pmd_t *pmd;
164	pte_t *pte = NULL;
165	int i, j, k;
166	unsigned long vaddr;
167
168	vaddr = start;
169	i = __pgd_offset(vaddr);
170	j = __pud_offset(vaddr);
171	k = __pmd_offset(vaddr);
172	pgd = pgd_base + i;
173
174	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
175		pud = (pud_t *)pgd;
176		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
177			pmd = one_md_table_init(pud);
178#ifndef __PAGETABLE_PMD_FOLDED
179			pmd += k;
180#endif
181			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
182				pte = page_table_kmap_check(one_page_table_init(pmd),
183							    pmd, vaddr, pte);
184				vaddr += PMD_SIZE;
185			}
186			k = 0;
187		}
188		j = 0;
189	}
190}
191#endif	/* CONFIG_MMU */
192
193void __init allocate_pgdat(unsigned int nid)
194{
195	unsigned long start_pfn, end_pfn;
196#ifdef CONFIG_NEED_MULTIPLE_NODES
197	unsigned long phys;
198#endif
199
200	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
201
202#ifdef CONFIG_NEED_MULTIPLE_NODES
203	phys = __memblock_alloc_base(sizeof(struct pglist_data),
204				SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
205	/* Retry with all of system memory */
206	if (!phys)
207		phys = __memblock_alloc_base(sizeof(struct pglist_data),
208					SMP_CACHE_BYTES, memblock_end_of_DRAM());
209	if (!phys)
210		panic("Can't allocate pgdat for node %d\n", nid);
211
212	NODE_DATA(nid) = __va(phys);
213	memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
214#endif
215
216	NODE_DATA(nid)->node_start_pfn = start_pfn;
217	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
218}
219
220static void __init do_init_bootmem(void)
221{
222	struct memblock_region *reg;
223
224	/* Add active regions with valid PFNs. */
225	for_each_memblock(memory, reg) {
226		unsigned long start_pfn, end_pfn;
227		start_pfn = memblock_region_memory_base_pfn(reg);
228		end_pfn = memblock_region_memory_end_pfn(reg);
229		__add_active_range(0, start_pfn, end_pfn);
230	}
231
232	/* All of system RAM sits in node 0 for the non-NUMA case */
233	allocate_pgdat(0);
234	node_set_online(0);
235
236	plat_mem_setup();
237
238	for_each_memblock(memory, reg) {
239		int nid = memblock_get_region_node(reg);
240
241		memory_present(nid, memblock_region_memory_base_pfn(reg),
242			memblock_region_memory_end_pfn(reg));
243	}
244	sparse_init();
245}
246
247static void __init early_reserve_mem(void)
248{
249	unsigned long start_pfn;
250	u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
251	u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
252
253	/*
254	 * Partially used pages are not usable - thus
255	 * we are rounding upwards:
256	 */
257	start_pfn = PFN_UP(__pa(_end));
258
259	/*
260	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
261	 * this in two steps (first step was init_bootmem()), because
262	 * this catches the (definitely buggy) case of us accidentally
263	 * initializing the bootmem allocator with an invalid RAM area.
264	 */
265	memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
266
267	/*
268	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
269	 */
270	if (CONFIG_ZERO_PAGE_OFFSET != 0)
271		memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
272
273	/*
274	 * Handle additional early reservations
275	 */
276	check_for_initrd();
277	reserve_crashkernel();
278}
279
280void __init paging_init(void)
281{
282	unsigned long max_zone_pfns[MAX_NR_ZONES];
283	unsigned long vaddr, end;
284
285	sh_mv.mv_mem_init();
286
287	early_reserve_mem();
288
289	/*
290	 * Once the early reservations are out of the way, give the
291	 * platforms a chance to kick out some memory.
292	 */
293	if (sh_mv.mv_mem_reserve)
294		sh_mv.mv_mem_reserve();
295
296	memblock_enforce_memory_limit(memory_limit);
297	memblock_allow_resize();
298
299	memblock_dump_all();
300
301	/*
302	 * Determine low and high memory ranges:
303	 */
304	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
305	min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
306
307	nodes_clear(node_online_map);
308
309	memory_start = (unsigned long)__va(__MEMORY_START);
310	memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
311
312	uncached_init();
313	pmb_init();
314	do_init_bootmem();
315	ioremap_fixed_init();
316
317	/* We don't need to map the kernel through the TLB, as
318	 * it is permanatly mapped using P1. So clear the
319	 * entire pgd. */
320	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
321
322	/* Set an initial value for the MMU.TTB so we don't have to
323	 * check for a null value. */
324	set_TTB(swapper_pg_dir);
325
326	/*
327	 * Populate the relevant portions of swapper_pg_dir so that
328	 * we can use the fixmap entries without calling kmalloc.
329	 * pte's will be filled in by __set_fixmap().
330	 */
331	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
332	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
333	page_table_range_init(vaddr, end, swapper_pg_dir);
334
335	kmap_coherent_init();
336
337	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
338	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
339	free_area_init_nodes(max_zone_pfns);
340}
341
342/*
343 * Early initialization for any I/O MMUs we might have.
344 */
345static void __init iommu_init(void)
346{
347	no_iommu_init();
348}
349
350unsigned int mem_init_done = 0;
351
352void __init mem_init(void)
353{
354	pg_data_t *pgdat;
355
356	iommu_init();
357
358	high_memory = NULL;
359	for_each_online_pgdat(pgdat)
360		high_memory = max_t(void *, high_memory,
361				    __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
362
363	free_all_bootmem();
364
365	/* Set this up early, so we can take care of the zero page */
366	cpu_cache_init();
367
368	/* clear the zero-page */
369	memset(empty_zero_page, 0, PAGE_SIZE);
370	__flush_wback_region(empty_zero_page, PAGE_SIZE);
371
372	vsyscall_init();
373
374	mem_init_print_info(NULL);
375	pr_info("virtual kernel memory layout:\n"
376		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
377#ifdef CONFIG_HIGHMEM
378		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
379#endif
380		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
381		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
382#ifdef CONFIG_UNCACHED_MAPPING
383		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
384#endif
385		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
386		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
387		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
388		FIXADDR_START, FIXADDR_TOP,
389		(FIXADDR_TOP - FIXADDR_START) >> 10,
390
391#ifdef CONFIG_HIGHMEM
392		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
393		(LAST_PKMAP*PAGE_SIZE) >> 10,
394#endif
395
396		(unsigned long)VMALLOC_START, VMALLOC_END,
397		(VMALLOC_END - VMALLOC_START) >> 20,
398
399		(unsigned long)memory_start, (unsigned long)high_memory,
400		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
401
402#ifdef CONFIG_UNCACHED_MAPPING
403		uncached_start, uncached_end, uncached_size >> 20,
404#endif
405
406		(unsigned long)&__init_begin, (unsigned long)&__init_end,
407		((unsigned long)&__init_end -
408		 (unsigned long)&__init_begin) >> 10,
409
410		(unsigned long)&_etext, (unsigned long)&_edata,
411		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
412
413		(unsigned long)&_text, (unsigned long)&_etext,
414		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
415
416	mem_init_done = 1;
417}
418
419void free_initmem(void)
420{
421	free_initmem_default(-1);
422}
423
424#ifdef CONFIG_BLK_DEV_INITRD
425void free_initrd_mem(unsigned long start, unsigned long end)
426{
427	free_reserved_area((void *)start, (void *)end, -1, "initrd");
428}
429#endif
430
431#ifdef CONFIG_MEMORY_HOTPLUG
432int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
433		bool want_memblock)
434{
435	unsigned long start_pfn = PFN_DOWN(start);
436	unsigned long nr_pages = size >> PAGE_SHIFT;
437	int ret;
438
439	/* We only have ZONE_NORMAL, so this is easy.. */
440	ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
441	if (unlikely(ret))
442		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
443
444	return ret;
445}
446
447#ifdef CONFIG_NUMA
448int memory_add_physaddr_to_nid(u64 addr)
449{
450	/* Node 0 for now.. */
451	return 0;
452}
453EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
454#endif
455
456#ifdef CONFIG_MEMORY_HOTREMOVE
457int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
458{
459	unsigned long start_pfn = PFN_DOWN(start);
460	unsigned long nr_pages = size >> PAGE_SHIFT;
461	struct zone *zone;
462	int ret;
463
464	zone = page_zone(pfn_to_page(start_pfn));
465	ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
466	if (unlikely(ret))
467		pr_warn("%s: Failed, __remove_pages() == %d\n", __func__,
468			ret);
469
470	return ret;
471}
472#endif
473#endif /* CONFIG_MEMORY_HOTPLUG */