Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * linux/arch/sh/mm/init.c
  4 *
  5 *  Copyright (C) 1999  Niibe Yutaka
  6 *  Copyright (C) 2002 - 2011  Paul Mundt
  7 *
  8 *  Based on linux/arch/i386/mm/init.c:
  9 *   Copyright (C) 1995  Linus Torvalds
 10 */
 11#include <linux/mm.h>
 12#include <linux/swap.h>
 13#include <linux/init.h>
 14#include <linux/gfp.h>
 15#include <linux/memblock.h>
 16#include <linux/proc_fs.h>
 17#include <linux/pagemap.h>
 18#include <linux/percpu.h>
 19#include <linux/io.h>
 20#include <linux/dma-mapping.h>
 21#include <linux/export.h>
 22#include <asm/mmu_context.h>
 23#include <asm/mmzone.h>
 24#include <asm/kexec.h>
 25#include <asm/tlb.h>
 26#include <asm/cacheflush.h>
 27#include <asm/sections.h>
 28#include <asm/setup.h>
 29#include <asm/cache.h>
 
 30#include <linux/sizes.h>
 
 31
 32pgd_t swapper_pg_dir[PTRS_PER_PGD];
 33
 34void __init generic_mem_init(void)
 35{
 36	memblock_add(__MEMORY_START, __MEMORY_SIZE);
 37}
 38
 39void __init __weak plat_mem_setup(void)
 40{
 41	/* Nothing to see here, move along. */
 42}
 43
 44#ifdef CONFIG_MMU
 45static pte_t *__get_pte_phys(unsigned long addr)
 46{
 47	pgd_t *pgd;
 
 48	pud_t *pud;
 49	pmd_t *pmd;
 50
 51	pgd = pgd_offset_k(addr);
 52	if (pgd_none(*pgd)) {
 53		pgd_ERROR(*pgd);
 54		return NULL;
 55	}
 56
 57	pud = pud_alloc(NULL, pgd, addr);
 
 
 
 
 
 
 58	if (unlikely(!pud)) {
 59		pud_ERROR(*pud);
 60		return NULL;
 61	}
 62
 63	pmd = pmd_alloc(NULL, pud, addr);
 64	if (unlikely(!pmd)) {
 65		pmd_ERROR(*pmd);
 66		return NULL;
 67	}
 68
 69	return pte_offset_kernel(pmd, addr);
 70}
 71
 72static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
 73{
 74	pte_t *pte;
 75
 76	pte = __get_pte_phys(addr);
 77	if (!pte_none(*pte)) {
 78		pte_ERROR(*pte);
 79		return;
 80	}
 81
 82	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
 83	local_flush_tlb_one(get_asid(), addr);
 84
 85	if (pgprot_val(prot) & _PAGE_WIRED)
 86		tlb_wire_entry(NULL, addr, *pte);
 87}
 88
 89static void clear_pte_phys(unsigned long addr, pgprot_t prot)
 90{
 91	pte_t *pte;
 92
 93	pte = __get_pte_phys(addr);
 94
 95	if (pgprot_val(prot) & _PAGE_WIRED)
 96		tlb_unwire_entry();
 97
 98	set_pte(pte, pfn_pte(0, __pgprot(0)));
 99	local_flush_tlb_one(get_asid(), addr);
100}
101
102void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
103{
104	unsigned long address = __fix_to_virt(idx);
105
106	if (idx >= __end_of_fixed_addresses) {
107		BUG();
108		return;
109	}
110
111	set_pte_phys(address, phys, prot);
112}
113
114void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
115{
116	unsigned long address = __fix_to_virt(idx);
117
118	if (idx >= __end_of_fixed_addresses) {
119		BUG();
120		return;
121	}
122
123	clear_pte_phys(address, prot);
124}
125
126static pmd_t * __init one_md_table_init(pud_t *pud)
127{
128	if (pud_none(*pud)) {
129		pmd_t *pmd;
130
131		pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
132		if (!pmd)
133			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
134			      __func__, PAGE_SIZE, PAGE_SIZE);
135		pud_populate(&init_mm, pud, pmd);
136		BUG_ON(pmd != pmd_offset(pud, 0));
137	}
138
139	return pmd_offset(pud, 0);
140}
141
142static pte_t * __init one_page_table_init(pmd_t *pmd)
143{
144	if (pmd_none(*pmd)) {
145		pte_t *pte;
146
147		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
148		if (!pte)
149			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
150			      __func__, PAGE_SIZE, PAGE_SIZE);
151		pmd_populate_kernel(&init_mm, pmd, pte);
152		BUG_ON(pte != pte_offset_kernel(pmd, 0));
153	}
154
155	return pte_offset_kernel(pmd, 0);
156}
157
158static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
159					    unsigned long vaddr, pte_t *lastpte)
160{
161	return pte;
162}
163
164void __init page_table_range_init(unsigned long start, unsigned long end,
165					 pgd_t *pgd_base)
166{
167	pgd_t *pgd;
168	pud_t *pud;
169	pmd_t *pmd;
170	pte_t *pte = NULL;
171	int i, j, k;
172	unsigned long vaddr;
173
174	vaddr = start;
175	i = __pgd_offset(vaddr);
176	j = __pud_offset(vaddr);
177	k = __pmd_offset(vaddr);
178	pgd = pgd_base + i;
179
180	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
181		pud = (pud_t *)pgd;
182		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
183			pmd = one_md_table_init(pud);
184#ifndef __PAGETABLE_PMD_FOLDED
185			pmd += k;
186#endif
187			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
188				pte = page_table_kmap_check(one_page_table_init(pmd),
189							    pmd, vaddr, pte);
190				vaddr += PMD_SIZE;
191			}
192			k = 0;
193		}
194		j = 0;
195	}
196}
197#endif	/* CONFIG_MMU */
198
199void __init allocate_pgdat(unsigned int nid)
200{
201	unsigned long start_pfn, end_pfn;
202
203	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
204
205#ifdef CONFIG_NEED_MULTIPLE_NODES
206	NODE_DATA(nid) = memblock_alloc_try_nid(
207				sizeof(struct pglist_data),
208				SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
209				MEMBLOCK_ALLOC_ACCESSIBLE, nid);
210	if (!NODE_DATA(nid))
211		panic("Can't allocate pgdat for node %d\n", nid);
212#endif
213
214	NODE_DATA(nid)->node_start_pfn = start_pfn;
215	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
216}
217
218static void __init do_init_bootmem(void)
219{
220	struct memblock_region *reg;
 
221
222	/* Add active regions with valid PFNs. */
223	for_each_memblock(memory, reg) {
224		unsigned long start_pfn, end_pfn;
225		start_pfn = memblock_region_memory_base_pfn(reg);
226		end_pfn = memblock_region_memory_end_pfn(reg);
227		__add_active_range(0, start_pfn, end_pfn);
228	}
229
230	/* All of system RAM sits in node 0 for the non-NUMA case */
231	allocate_pgdat(0);
232	node_set_online(0);
233
234	plat_mem_setup();
235
236	for_each_memblock(memory, reg) {
237		int nid = memblock_get_region_node(reg);
238
239		memory_present(nid, memblock_region_memory_base_pfn(reg),
240			memblock_region_memory_end_pfn(reg));
241	}
242	sparse_init();
243}
244
245static void __init early_reserve_mem(void)
246{
247	unsigned long start_pfn;
248	u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
249	u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
250
251	/*
252	 * Partially used pages are not usable - thus
253	 * we are rounding upwards:
254	 */
255	start_pfn = PFN_UP(__pa(_end));
256
257	/*
258	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
259	 * this in two steps (first step was init_bootmem()), because
260	 * this catches the (definitely buggy) case of us accidentally
261	 * initializing the bootmem allocator with an invalid RAM area.
262	 */
263	memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
264
265	/*
266	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
267	 */
268	if (CONFIG_ZERO_PAGE_OFFSET != 0)
269		memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
270
271	/*
272	 * Handle additional early reservations
273	 */
274	check_for_initrd();
275	reserve_crashkernel();
276}
277
278void __init paging_init(void)
279{
280	unsigned long max_zone_pfns[MAX_NR_ZONES];
281	unsigned long vaddr, end;
282
283	sh_mv.mv_mem_init();
284
285	early_reserve_mem();
286
287	/*
288	 * Once the early reservations are out of the way, give the
289	 * platforms a chance to kick out some memory.
290	 */
291	if (sh_mv.mv_mem_reserve)
292		sh_mv.mv_mem_reserve();
293
294	memblock_enforce_memory_limit(memory_limit);
295	memblock_allow_resize();
296
297	memblock_dump_all();
298
299	/*
300	 * Determine low and high memory ranges:
301	 */
302	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
303	min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
304
305	nodes_clear(node_online_map);
306
307	memory_start = (unsigned long)__va(__MEMORY_START);
308	memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
309
310	uncached_init();
311	pmb_init();
312	do_init_bootmem();
313	ioremap_fixed_init();
314
315	/* We don't need to map the kernel through the TLB, as
316	 * it is permanatly mapped using P1. So clear the
317	 * entire pgd. */
318	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
319
320	/* Set an initial value for the MMU.TTB so we don't have to
321	 * check for a null value. */
322	set_TTB(swapper_pg_dir);
323
324	/*
325	 * Populate the relevant portions of swapper_pg_dir so that
326	 * we can use the fixmap entries without calling kmalloc.
327	 * pte's will be filled in by __set_fixmap().
328	 */
329	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
330	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
331	page_table_range_init(vaddr, end, swapper_pg_dir);
332
333	kmap_coherent_init();
334
335	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
336	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
337	free_area_init_nodes(max_zone_pfns);
338}
339
340unsigned int mem_init_done = 0;
341
342void __init mem_init(void)
343{
344	pg_data_t *pgdat;
345
346	high_memory = NULL;
347	for_each_online_pgdat(pgdat)
348		high_memory = max_t(void *, high_memory,
349				    __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
350
351	memblock_free_all();
352
353	/* Set this up early, so we can take care of the zero page */
354	cpu_cache_init();
355
356	/* clear the zero-page */
357	memset(empty_zero_page, 0, PAGE_SIZE);
358	__flush_wback_region(empty_zero_page, PAGE_SIZE);
359
360	vsyscall_init();
361
362	mem_init_print_info(NULL);
363	pr_info("virtual kernel memory layout:\n"
364		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
365#ifdef CONFIG_HIGHMEM
366		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
367#endif
368		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
369		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
370#ifdef CONFIG_UNCACHED_MAPPING
371		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
372#endif
373		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
374		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
375		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
376		FIXADDR_START, FIXADDR_TOP,
377		(FIXADDR_TOP - FIXADDR_START) >> 10,
378
379#ifdef CONFIG_HIGHMEM
380		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
381		(LAST_PKMAP*PAGE_SIZE) >> 10,
382#endif
383
384		(unsigned long)VMALLOC_START, VMALLOC_END,
385		(VMALLOC_END - VMALLOC_START) >> 20,
386
387		(unsigned long)memory_start, (unsigned long)high_memory,
388		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
389
390#ifdef CONFIG_UNCACHED_MAPPING
391		uncached_start, uncached_end, uncached_size >> 20,
392#endif
393
394		(unsigned long)&__init_begin, (unsigned long)&__init_end,
395		((unsigned long)&__init_end -
396		 (unsigned long)&__init_begin) >> 10,
397
398		(unsigned long)&_etext, (unsigned long)&_edata,
399		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
400
401		(unsigned long)&_text, (unsigned long)&_etext,
402		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
403
404	mem_init_done = 1;
405}
406
407#ifdef CONFIG_MEMORY_HOTPLUG
408int arch_add_memory(int nid, u64 start, u64 size,
409			struct mhp_restrictions *restrictions)
410{
411	unsigned long start_pfn = PFN_DOWN(start);
412	unsigned long nr_pages = size >> PAGE_SHIFT;
413	int ret;
414
 
 
 
415	/* We only have ZONE_NORMAL, so this is easy.. */
416	ret = __add_pages(nid, start_pfn, nr_pages, restrictions);
417	if (unlikely(ret))
418		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
419
420	return ret;
421}
422
423#ifdef CONFIG_NUMA
424int memory_add_physaddr_to_nid(u64 addr)
425{
426	/* Node 0 for now.. */
427	return 0;
428}
429EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
430#endif
431
432void arch_remove_memory(int nid, u64 start, u64 size,
433			struct vmem_altmap *altmap)
434{
435	unsigned long start_pfn = PFN_DOWN(start);
436	unsigned long nr_pages = size >> PAGE_SHIFT;
437	struct zone *zone;
438
439	zone = page_zone(pfn_to_page(start_pfn));
440	__remove_pages(zone, start_pfn, nr_pages, altmap);
441}
442#endif /* CONFIG_MEMORY_HOTPLUG */
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * linux/arch/sh/mm/init.c
  4 *
  5 *  Copyright (C) 1999  Niibe Yutaka
  6 *  Copyright (C) 2002 - 2011  Paul Mundt
  7 *
  8 *  Based on linux/arch/i386/mm/init.c:
  9 *   Copyright (C) 1995  Linus Torvalds
 10 */
 11#include <linux/mm.h>
 12#include <linux/swap.h>
 13#include <linux/init.h>
 14#include <linux/gfp.h>
 15#include <linux/memblock.h>
 16#include <linux/proc_fs.h>
 17#include <linux/pagemap.h>
 18#include <linux/percpu.h>
 19#include <linux/io.h>
 20#include <linux/dma-mapping.h>
 21#include <linux/export.h>
 22#include <asm/mmu_context.h>
 23#include <asm/mmzone.h>
 24#include <asm/kexec.h>
 25#include <asm/tlb.h>
 26#include <asm/cacheflush.h>
 27#include <asm/sections.h>
 28#include <asm/setup.h>
 29#include <asm/cache.h>
 30#include <asm/pgalloc.h>
 31#include <linux/sizes.h>
 32#include "ioremap.h"
 33
 34pgd_t swapper_pg_dir[PTRS_PER_PGD];
 35
 36void __init generic_mem_init(void)
 37{
 38	memblock_add(__MEMORY_START, __MEMORY_SIZE);
 39}
 40
 41void __init __weak plat_mem_setup(void)
 42{
 43	/* Nothing to see here, move along. */
 44}
 45
 46#ifdef CONFIG_MMU
 47static pte_t *__get_pte_phys(unsigned long addr)
 48{
 49	pgd_t *pgd;
 50	p4d_t *p4d;
 51	pud_t *pud;
 52	pmd_t *pmd;
 53
 54	pgd = pgd_offset_k(addr);
 55	if (pgd_none(*pgd)) {
 56		pgd_ERROR(*pgd);
 57		return NULL;
 58	}
 59
 60	p4d = p4d_alloc(NULL, pgd, addr);
 61	if (unlikely(!p4d)) {
 62		p4d_ERROR(*p4d);
 63		return NULL;
 64	}
 65
 66	pud = pud_alloc(NULL, p4d, addr);
 67	if (unlikely(!pud)) {
 68		pud_ERROR(*pud);
 69		return NULL;
 70	}
 71
 72	pmd = pmd_alloc(NULL, pud, addr);
 73	if (unlikely(!pmd)) {
 74		pmd_ERROR(*pmd);
 75		return NULL;
 76	}
 77
 78	return pte_offset_kernel(pmd, addr);
 79}
 80
 81static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
 82{
 83	pte_t *pte;
 84
 85	pte = __get_pte_phys(addr);
 86	if (!pte_none(*pte)) {
 87		pte_ERROR(*pte);
 88		return;
 89	}
 90
 91	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
 92	local_flush_tlb_one(get_asid(), addr);
 93
 94	if (pgprot_val(prot) & _PAGE_WIRED)
 95		tlb_wire_entry(NULL, addr, *pte);
 96}
 97
 98static void clear_pte_phys(unsigned long addr, pgprot_t prot)
 99{
100	pte_t *pte;
101
102	pte = __get_pte_phys(addr);
103
104	if (pgprot_val(prot) & _PAGE_WIRED)
105		tlb_unwire_entry();
106
107	set_pte(pte, pfn_pte(0, __pgprot(0)));
108	local_flush_tlb_one(get_asid(), addr);
109}
110
111void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
112{
113	unsigned long address = __fix_to_virt(idx);
114
115	if (idx >= __end_of_fixed_addresses) {
116		BUG();
117		return;
118	}
119
120	set_pte_phys(address, phys, prot);
121}
122
123void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
124{
125	unsigned long address = __fix_to_virt(idx);
126
127	if (idx >= __end_of_fixed_addresses) {
128		BUG();
129		return;
130	}
131
132	clear_pte_phys(address, prot);
133}
134
135static pmd_t * __init one_md_table_init(pud_t *pud)
136{
137	if (pud_none(*pud)) {
138		pmd_t *pmd;
139
140		pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
141		if (!pmd)
142			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
143			      __func__, PAGE_SIZE, PAGE_SIZE);
144		pud_populate(&init_mm, pud, pmd);
145		BUG_ON(pmd != pmd_offset(pud, 0));
146	}
147
148	return pmd_offset(pud, 0);
149}
150
151static pte_t * __init one_page_table_init(pmd_t *pmd)
152{
153	if (pmd_none(*pmd)) {
154		pte_t *pte;
155
156		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
157		if (!pte)
158			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
159			      __func__, PAGE_SIZE, PAGE_SIZE);
160		pmd_populate_kernel(&init_mm, pmd, pte);
161		BUG_ON(pte != pte_offset_kernel(pmd, 0));
162	}
163
164	return pte_offset_kernel(pmd, 0);
165}
166
167static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
168					    unsigned long vaddr, pte_t *lastpte)
169{
170	return pte;
171}
172
173void __init page_table_range_init(unsigned long start, unsigned long end,
174					 pgd_t *pgd_base)
175{
176	pgd_t *pgd;
177	pud_t *pud;
178	pmd_t *pmd;
179	pte_t *pte = NULL;
180	int i, j, k;
181	unsigned long vaddr;
182
183	vaddr = start;
184	i = pgd_index(vaddr);
185	j = pud_index(vaddr);
186	k = pmd_index(vaddr);
187	pgd = pgd_base + i;
188
189	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
190		pud = (pud_t *)pgd;
191		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
192			pmd = one_md_table_init(pud);
193#ifndef __PAGETABLE_PMD_FOLDED
194			pmd += k;
195#endif
196			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
197				pte = page_table_kmap_check(one_page_table_init(pmd),
198							    pmd, vaddr, pte);
199				vaddr += PMD_SIZE;
200			}
201			k = 0;
202		}
203		j = 0;
204	}
205}
206#endif	/* CONFIG_MMU */
207
208void __init allocate_pgdat(unsigned int nid)
209{
210	unsigned long start_pfn, end_pfn;
211
212	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
213
214#ifdef CONFIG_NUMA
215	NODE_DATA(nid) = memblock_alloc_try_nid(
216				sizeof(struct pglist_data),
217				SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
218				MEMBLOCK_ALLOC_ACCESSIBLE, nid);
219	if (!NODE_DATA(nid))
220		panic("Can't allocate pgdat for node %d\n", nid);
221#endif
222
223	NODE_DATA(nid)->node_start_pfn = start_pfn;
224	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
225}
226
227static void __init do_init_bootmem(void)
228{
229	unsigned long start_pfn, end_pfn;
230	int i;
231
232	/* Add active regions with valid PFNs. */
233	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL)
 
 
 
234		__add_active_range(0, start_pfn, end_pfn);
 
235
236	/* All of system RAM sits in node 0 for the non-NUMA case */
237	allocate_pgdat(0);
238	node_set_online(0);
239
240	plat_mem_setup();
241
 
 
 
 
 
 
242	sparse_init();
243}
244
245static void __init early_reserve_mem(void)
246{
247	unsigned long start_pfn;
248	u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
249	u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
250
251	/*
252	 * Partially used pages are not usable - thus
253	 * we are rounding upwards:
254	 */
255	start_pfn = PFN_UP(__pa(_end));
256
257	/*
258	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
259	 * this in two steps (first step was init_bootmem()), because
260	 * this catches the (definitely buggy) case of us accidentally
261	 * initializing the bootmem allocator with an invalid RAM area.
262	 */
263	memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
264
265	/*
266	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
267	 */
268	if (CONFIG_ZERO_PAGE_OFFSET != 0)
269		memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
270
271	/*
272	 * Handle additional early reservations
273	 */
274	check_for_initrd();
275	reserve_crashkernel();
276}
277
278void __init paging_init(void)
279{
280	unsigned long max_zone_pfns[MAX_NR_ZONES];
281	unsigned long vaddr, end;
282
283	sh_mv.mv_mem_init();
284
285	early_reserve_mem();
286
287	/*
288	 * Once the early reservations are out of the way, give the
289	 * platforms a chance to kick out some memory.
290	 */
291	if (sh_mv.mv_mem_reserve)
292		sh_mv.mv_mem_reserve();
293
294	memblock_enforce_memory_limit(memory_limit);
295	memblock_allow_resize();
296
297	memblock_dump_all();
298
299	/*
300	 * Determine low and high memory ranges:
301	 */
302	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
303	min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
304
305	nodes_clear(node_online_map);
306
307	memory_start = (unsigned long)__va(__MEMORY_START);
308	memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
309
310	uncached_init();
311	pmb_init();
312	do_init_bootmem();
313	ioremap_fixed_init();
314
315	/* We don't need to map the kernel through the TLB, as
316	 * it is permanatly mapped using P1. So clear the
317	 * entire pgd. */
318	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
319
320	/* Set an initial value for the MMU.TTB so we don't have to
321	 * check for a null value. */
322	set_TTB(swapper_pg_dir);
323
324	/*
325	 * Populate the relevant portions of swapper_pg_dir so that
326	 * we can use the fixmap entries without calling kmalloc.
327	 * pte's will be filled in by __set_fixmap().
328	 */
329	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
330	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
331	page_table_range_init(vaddr, end, swapper_pg_dir);
332
333	kmap_coherent_init();
334
335	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
336	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
337	free_area_init(max_zone_pfns);
338}
339
340unsigned int mem_init_done = 0;
341
342void __init mem_init(void)
343{
344	pg_data_t *pgdat;
345
346	high_memory = NULL;
347	for_each_online_pgdat(pgdat)
348		high_memory = max_t(void *, high_memory,
349				    __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
350
351	memblock_free_all();
352
353	/* Set this up early, so we can take care of the zero page */
354	cpu_cache_init();
355
356	/* clear the zero-page */
357	memset(empty_zero_page, 0, PAGE_SIZE);
358	__flush_wback_region(empty_zero_page, PAGE_SIZE);
359
360	vsyscall_init();
361
 
362	pr_info("virtual kernel memory layout:\n"
363		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
 
 
 
364		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
365		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
366#ifdef CONFIG_UNCACHED_MAPPING
367		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
368#endif
369		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
370		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
371		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
372		FIXADDR_START, FIXADDR_TOP,
373		(FIXADDR_TOP - FIXADDR_START) >> 10,
374
 
 
 
 
 
375		(unsigned long)VMALLOC_START, VMALLOC_END,
376		(VMALLOC_END - VMALLOC_START) >> 20,
377
378		(unsigned long)memory_start, (unsigned long)high_memory,
379		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
380
381#ifdef CONFIG_UNCACHED_MAPPING
382		uncached_start, uncached_end, uncached_size >> 20,
383#endif
384
385		(unsigned long)&__init_begin, (unsigned long)&__init_end,
386		((unsigned long)&__init_end -
387		 (unsigned long)&__init_begin) >> 10,
388
389		(unsigned long)&_etext, (unsigned long)&_edata,
390		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
391
392		(unsigned long)&_text, (unsigned long)&_etext,
393		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
394
395	mem_init_done = 1;
396}
397
398#ifdef CONFIG_MEMORY_HOTPLUG
399int arch_add_memory(int nid, u64 start, u64 size,
400		    struct mhp_params *params)
401{
402	unsigned long start_pfn = PFN_DOWN(start);
403	unsigned long nr_pages = size >> PAGE_SHIFT;
404	int ret;
405
406	if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
407		return -EINVAL;
408
409	/* We only have ZONE_NORMAL, so this is easy.. */
410	ret = __add_pages(nid, start_pfn, nr_pages, params);
411	if (unlikely(ret))
412		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
413
414	return ret;
415}
416
417void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
 
 
 
 
 
 
 
 
 
 
418{
419	unsigned long start_pfn = PFN_DOWN(start);
420	unsigned long nr_pages = size >> PAGE_SHIFT;
 
421
422	__remove_pages(start_pfn, nr_pages, altmap);
 
423}
424#endif /* CONFIG_MEMORY_HOTPLUG */