Loading...
1/*
2 * linux/arch/sh/mm/init.c
3 *
4 * Copyright (C) 1999 Niibe Yutaka
5 * Copyright (C) 2002 - 2011 Paul Mundt
6 *
7 * Based on linux/arch/i386/mm/init.c:
8 * Copyright (C) 1995 Linus Torvalds
9 */
10#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/init.h>
13#include <linux/gfp.h>
14#include <linux/bootmem.h>
15#include <linux/proc_fs.h>
16#include <linux/pagemap.h>
17#include <linux/percpu.h>
18#include <linux/io.h>
19#include <linux/memblock.h>
20#include <linux/dma-mapping.h>
21#include <linux/export.h>
22#include <asm/mmu_context.h>
23#include <asm/mmzone.h>
24#include <asm/kexec.h>
25#include <asm/tlb.h>
26#include <asm/cacheflush.h>
27#include <asm/sections.h>
28#include <asm/setup.h>
29#include <asm/cache.h>
30#include <asm/sizes.h>
31
32pgd_t swapper_pg_dir[PTRS_PER_PGD];
33
34void __init generic_mem_init(void)
35{
36 memblock_add(__MEMORY_START, __MEMORY_SIZE);
37}
38
39void __init __weak plat_mem_setup(void)
40{
41 /* Nothing to see here, move along. */
42}
43
44#ifdef CONFIG_MMU
45static pte_t *__get_pte_phys(unsigned long addr)
46{
47 pgd_t *pgd;
48 pud_t *pud;
49 pmd_t *pmd;
50
51 pgd = pgd_offset_k(addr);
52 if (pgd_none(*pgd)) {
53 pgd_ERROR(*pgd);
54 return NULL;
55 }
56
57 pud = pud_alloc(NULL, pgd, addr);
58 if (unlikely(!pud)) {
59 pud_ERROR(*pud);
60 return NULL;
61 }
62
63 pmd = pmd_alloc(NULL, pud, addr);
64 if (unlikely(!pmd)) {
65 pmd_ERROR(*pmd);
66 return NULL;
67 }
68
69 return pte_offset_kernel(pmd, addr);
70}
71
72static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
73{
74 pte_t *pte;
75
76 pte = __get_pte_phys(addr);
77 if (!pte_none(*pte)) {
78 pte_ERROR(*pte);
79 return;
80 }
81
82 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
83 local_flush_tlb_one(get_asid(), addr);
84
85 if (pgprot_val(prot) & _PAGE_WIRED)
86 tlb_wire_entry(NULL, addr, *pte);
87}
88
89static void clear_pte_phys(unsigned long addr, pgprot_t prot)
90{
91 pte_t *pte;
92
93 pte = __get_pte_phys(addr);
94
95 if (pgprot_val(prot) & _PAGE_WIRED)
96 tlb_unwire_entry();
97
98 set_pte(pte, pfn_pte(0, __pgprot(0)));
99 local_flush_tlb_one(get_asid(), addr);
100}
101
102void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
103{
104 unsigned long address = __fix_to_virt(idx);
105
106 if (idx >= __end_of_fixed_addresses) {
107 BUG();
108 return;
109 }
110
111 set_pte_phys(address, phys, prot);
112}
113
114void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
115{
116 unsigned long address = __fix_to_virt(idx);
117
118 if (idx >= __end_of_fixed_addresses) {
119 BUG();
120 return;
121 }
122
123 clear_pte_phys(address, prot);
124}
125
126static pmd_t * __init one_md_table_init(pud_t *pud)
127{
128 if (pud_none(*pud)) {
129 pmd_t *pmd;
130
131 pmd = alloc_bootmem_pages(PAGE_SIZE);
132 pud_populate(&init_mm, pud, pmd);
133 BUG_ON(pmd != pmd_offset(pud, 0));
134 }
135
136 return pmd_offset(pud, 0);
137}
138
139static pte_t * __init one_page_table_init(pmd_t *pmd)
140{
141 if (pmd_none(*pmd)) {
142 pte_t *pte;
143
144 pte = alloc_bootmem_pages(PAGE_SIZE);
145 pmd_populate_kernel(&init_mm, pmd, pte);
146 BUG_ON(pte != pte_offset_kernel(pmd, 0));
147 }
148
149 return pte_offset_kernel(pmd, 0);
150}
151
152static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
153 unsigned long vaddr, pte_t *lastpte)
154{
155 return pte;
156}
157
158void __init page_table_range_init(unsigned long start, unsigned long end,
159 pgd_t *pgd_base)
160{
161 pgd_t *pgd;
162 pud_t *pud;
163 pmd_t *pmd;
164 pte_t *pte = NULL;
165 int i, j, k;
166 unsigned long vaddr;
167
168 vaddr = start;
169 i = __pgd_offset(vaddr);
170 j = __pud_offset(vaddr);
171 k = __pmd_offset(vaddr);
172 pgd = pgd_base + i;
173
174 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
175 pud = (pud_t *)pgd;
176 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
177 pmd = one_md_table_init(pud);
178#ifndef __PAGETABLE_PMD_FOLDED
179 pmd += k;
180#endif
181 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
182 pte = page_table_kmap_check(one_page_table_init(pmd),
183 pmd, vaddr, pte);
184 vaddr += PMD_SIZE;
185 }
186 k = 0;
187 }
188 j = 0;
189 }
190}
191#endif /* CONFIG_MMU */
192
193void __init allocate_pgdat(unsigned int nid)
194{
195 unsigned long start_pfn, end_pfn;
196#ifdef CONFIG_NEED_MULTIPLE_NODES
197 unsigned long phys;
198#endif
199
200 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
201
202#ifdef CONFIG_NEED_MULTIPLE_NODES
203 phys = __memblock_alloc_base(sizeof(struct pglist_data),
204 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
205 /* Retry with all of system memory */
206 if (!phys)
207 phys = __memblock_alloc_base(sizeof(struct pglist_data),
208 SMP_CACHE_BYTES, memblock_end_of_DRAM());
209 if (!phys)
210 panic("Can't allocate pgdat for node %d\n", nid);
211
212 NODE_DATA(nid) = __va(phys);
213 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
214
215 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
216#endif
217
218 NODE_DATA(nid)->node_start_pfn = start_pfn;
219 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
220}
221
222static void __init bootmem_init_one_node(unsigned int nid)
223{
224 unsigned long total_pages, paddr;
225 unsigned long end_pfn;
226 struct pglist_data *p;
227
228 p = NODE_DATA(nid);
229
230 /* Nothing to do.. */
231 if (!p->node_spanned_pages)
232 return;
233
234 end_pfn = p->node_start_pfn + p->node_spanned_pages;
235
236 total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
237
238 paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
239 if (!paddr)
240 panic("Can't allocate bootmap for nid[%d]\n", nid);
241
242 init_bootmem_node(p, paddr >> PAGE_SHIFT, p->node_start_pfn, end_pfn);
243
244 free_bootmem_with_active_regions(nid, end_pfn);
245
246 /*
247 * XXX Handle initial reservations for the system memory node
248 * only for the moment, we'll refactor this later for handling
249 * reservations in other nodes.
250 */
251 if (nid == 0) {
252 struct memblock_region *reg;
253
254 /* Reserve the sections we're already using. */
255 for_each_memblock(reserved, reg) {
256 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
257 }
258 }
259
260 sparse_memory_present_with_active_regions(nid);
261}
262
263static void __init do_init_bootmem(void)
264{
265 struct memblock_region *reg;
266 int i;
267
268 /* Add active regions with valid PFNs. */
269 for_each_memblock(memory, reg) {
270 unsigned long start_pfn, end_pfn;
271 start_pfn = memblock_region_memory_base_pfn(reg);
272 end_pfn = memblock_region_memory_end_pfn(reg);
273 __add_active_range(0, start_pfn, end_pfn);
274 }
275
276 /* All of system RAM sits in node 0 for the non-NUMA case */
277 allocate_pgdat(0);
278 node_set_online(0);
279
280 plat_mem_setup();
281
282 for_each_online_node(i)
283 bootmem_init_one_node(i);
284
285 sparse_init();
286}
287
288static void __init early_reserve_mem(void)
289{
290 unsigned long start_pfn;
291 u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
292 u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
293
294 /*
295 * Partially used pages are not usable - thus
296 * we are rounding upwards:
297 */
298 start_pfn = PFN_UP(__pa(_end));
299
300 /*
301 * Reserve the kernel text and Reserve the bootmem bitmap. We do
302 * this in two steps (first step was init_bootmem()), because
303 * this catches the (definitely buggy) case of us accidentally
304 * initializing the bootmem allocator with an invalid RAM area.
305 */
306 memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
307
308 /*
309 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
310 */
311 if (CONFIG_ZERO_PAGE_OFFSET != 0)
312 memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
313
314 /*
315 * Handle additional early reservations
316 */
317 check_for_initrd();
318 reserve_crashkernel();
319}
320
321void __init paging_init(void)
322{
323 unsigned long max_zone_pfns[MAX_NR_ZONES];
324 unsigned long vaddr, end;
325 int nid;
326
327 sh_mv.mv_mem_init();
328
329 early_reserve_mem();
330
331 /*
332 * Once the early reservations are out of the way, give the
333 * platforms a chance to kick out some memory.
334 */
335 if (sh_mv.mv_mem_reserve)
336 sh_mv.mv_mem_reserve();
337
338 memblock_enforce_memory_limit(memory_limit);
339 memblock_allow_resize();
340
341 memblock_dump_all();
342
343 /*
344 * Determine low and high memory ranges:
345 */
346 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
347 min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
348
349 nodes_clear(node_online_map);
350
351 memory_start = (unsigned long)__va(__MEMORY_START);
352 memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
353
354 uncached_init();
355 pmb_init();
356 do_init_bootmem();
357 ioremap_fixed_init();
358
359 /* We don't need to map the kernel through the TLB, as
360 * it is permanatly mapped using P1. So clear the
361 * entire pgd. */
362 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
363
364 /* Set an initial value for the MMU.TTB so we don't have to
365 * check for a null value. */
366 set_TTB(swapper_pg_dir);
367
368 /*
369 * Populate the relevant portions of swapper_pg_dir so that
370 * we can use the fixmap entries without calling kmalloc.
371 * pte's will be filled in by __set_fixmap().
372 */
373 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
374 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
375 page_table_range_init(vaddr, end, swapper_pg_dir);
376
377 kmap_coherent_init();
378
379 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
380
381 for_each_online_node(nid) {
382 pg_data_t *pgdat = NODE_DATA(nid);
383 unsigned long low, start_pfn;
384
385 start_pfn = pgdat->bdata->node_min_pfn;
386 low = pgdat->bdata->node_low_pfn;
387
388 if (max_zone_pfns[ZONE_NORMAL] < low)
389 max_zone_pfns[ZONE_NORMAL] = low;
390
391 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
392 nid, start_pfn, low);
393 }
394
395 free_area_init_nodes(max_zone_pfns);
396}
397
398/*
399 * Early initialization for any I/O MMUs we might have.
400 */
401static void __init iommu_init(void)
402{
403 no_iommu_init();
404}
405
406unsigned int mem_init_done = 0;
407
408void __init mem_init(void)
409{
410 int codesize, datasize, initsize;
411 int nid;
412
413 iommu_init();
414
415 num_physpages = 0;
416 high_memory = NULL;
417
418 for_each_online_node(nid) {
419 pg_data_t *pgdat = NODE_DATA(nid);
420 unsigned long node_pages = 0;
421 void *node_high_memory;
422
423 num_physpages += pgdat->node_present_pages;
424
425 if (pgdat->node_spanned_pages)
426 node_pages = free_all_bootmem_node(pgdat);
427
428 totalram_pages += node_pages;
429
430 node_high_memory = (void *)__va((pgdat->node_start_pfn +
431 pgdat->node_spanned_pages) <<
432 PAGE_SHIFT);
433 if (node_high_memory > high_memory)
434 high_memory = node_high_memory;
435 }
436
437 /* Set this up early, so we can take care of the zero page */
438 cpu_cache_init();
439
440 /* clear the zero-page */
441 memset(empty_zero_page, 0, PAGE_SIZE);
442 __flush_wback_region(empty_zero_page, PAGE_SIZE);
443
444 vsyscall_init();
445
446 codesize = (unsigned long) &_etext - (unsigned long) &_text;
447 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
448 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
449
450 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
451 "%dk data, %dk init)\n",
452 nr_free_pages() << (PAGE_SHIFT-10),
453 num_physpages << (PAGE_SHIFT-10),
454 codesize >> 10,
455 datasize >> 10,
456 initsize >> 10);
457
458 printk(KERN_INFO "virtual kernel memory layout:\n"
459 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
460#ifdef CONFIG_HIGHMEM
461 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
462#endif
463 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
464 " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n"
465#ifdef CONFIG_UNCACHED_MAPPING
466 " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n"
467#endif
468 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
469 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
470 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
471 FIXADDR_START, FIXADDR_TOP,
472 (FIXADDR_TOP - FIXADDR_START) >> 10,
473
474#ifdef CONFIG_HIGHMEM
475 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
476 (LAST_PKMAP*PAGE_SIZE) >> 10,
477#endif
478
479 (unsigned long)VMALLOC_START, VMALLOC_END,
480 (VMALLOC_END - VMALLOC_START) >> 20,
481
482 (unsigned long)memory_start, (unsigned long)high_memory,
483 ((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
484
485#ifdef CONFIG_UNCACHED_MAPPING
486 uncached_start, uncached_end, uncached_size >> 20,
487#endif
488
489 (unsigned long)&__init_begin, (unsigned long)&__init_end,
490 ((unsigned long)&__init_end -
491 (unsigned long)&__init_begin) >> 10,
492
493 (unsigned long)&_etext, (unsigned long)&_edata,
494 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
495
496 (unsigned long)&_text, (unsigned long)&_etext,
497 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
498
499 mem_init_done = 1;
500}
501
502void free_initmem(void)
503{
504 unsigned long addr;
505
506 addr = (unsigned long)(&__init_begin);
507 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
508 ClearPageReserved(virt_to_page(addr));
509 init_page_count(virt_to_page(addr));
510 free_page(addr);
511 totalram_pages++;
512 }
513 printk("Freeing unused kernel memory: %ldk freed\n",
514 ((unsigned long)&__init_end -
515 (unsigned long)&__init_begin) >> 10);
516}
517
518#ifdef CONFIG_BLK_DEV_INITRD
519void free_initrd_mem(unsigned long start, unsigned long end)
520{
521 unsigned long p;
522 for (p = start; p < end; p += PAGE_SIZE) {
523 ClearPageReserved(virt_to_page(p));
524 init_page_count(virt_to_page(p));
525 free_page(p);
526 totalram_pages++;
527 }
528 printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
529}
530#endif
531
532#ifdef CONFIG_MEMORY_HOTPLUG
533int arch_add_memory(int nid, u64 start, u64 size)
534{
535 pg_data_t *pgdat;
536 unsigned long start_pfn = start >> PAGE_SHIFT;
537 unsigned long nr_pages = size >> PAGE_SHIFT;
538 int ret;
539
540 pgdat = NODE_DATA(nid);
541
542 /* We only have ZONE_NORMAL, so this is easy.. */
543 ret = __add_pages(nid, pgdat->node_zones + ZONE_NORMAL,
544 start_pfn, nr_pages);
545 if (unlikely(ret))
546 printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
547
548 return ret;
549}
550EXPORT_SYMBOL_GPL(arch_add_memory);
551
552#ifdef CONFIG_NUMA
553int memory_add_physaddr_to_nid(u64 addr)
554{
555 /* Node 0 for now.. */
556 return 0;
557}
558EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
559#endif
560
561#endif /* CONFIG_MEMORY_HOTPLUG */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/sh/mm/init.c
4 *
5 * Copyright (C) 1999 Niibe Yutaka
6 * Copyright (C) 2002 - 2011 Paul Mundt
7 *
8 * Based on linux/arch/i386/mm/init.c:
9 * Copyright (C) 1995 Linus Torvalds
10 */
11#include <linux/mm.h>
12#include <linux/swap.h>
13#include <linux/init.h>
14#include <linux/gfp.h>
15#include <linux/memblock.h>
16#include <linux/proc_fs.h>
17#include <linux/pagemap.h>
18#include <linux/percpu.h>
19#include <linux/io.h>
20#include <linux/dma-mapping.h>
21#include <linux/export.h>
22#include <asm/mmu_context.h>
23#include <asm/mmzone.h>
24#include <asm/kexec.h>
25#include <asm/tlb.h>
26#include <asm/cacheflush.h>
27#include <asm/sections.h>
28#include <asm/setup.h>
29#include <asm/cache.h>
30#include <asm/pgalloc.h>
31#include <linux/sizes.h>
32#include "ioremap.h"
33
34pgd_t swapper_pg_dir[PTRS_PER_PGD];
35
36void __init generic_mem_init(void)
37{
38 memblock_add(__MEMORY_START, __MEMORY_SIZE);
39}
40
41void __init __weak plat_mem_setup(void)
42{
43 /* Nothing to see here, move along. */
44}
45
46#ifdef CONFIG_MMU
47static pte_t *__get_pte_phys(unsigned long addr)
48{
49 pgd_t *pgd;
50 p4d_t *p4d;
51 pud_t *pud;
52 pmd_t *pmd;
53
54 pgd = pgd_offset_k(addr);
55 if (pgd_none(*pgd)) {
56 pgd_ERROR(*pgd);
57 return NULL;
58 }
59
60 p4d = p4d_alloc(NULL, pgd, addr);
61 if (unlikely(!p4d)) {
62 p4d_ERROR(*p4d);
63 return NULL;
64 }
65
66 pud = pud_alloc(NULL, p4d, addr);
67 if (unlikely(!pud)) {
68 pud_ERROR(*pud);
69 return NULL;
70 }
71
72 pmd = pmd_alloc(NULL, pud, addr);
73 if (unlikely(!pmd)) {
74 pmd_ERROR(*pmd);
75 return NULL;
76 }
77
78 return pte_offset_kernel(pmd, addr);
79}
80
81static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
82{
83 pte_t *pte;
84
85 pte = __get_pte_phys(addr);
86 if (!pte_none(*pte)) {
87 pte_ERROR(*pte);
88 return;
89 }
90
91 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
92 local_flush_tlb_one(get_asid(), addr);
93
94 if (pgprot_val(prot) & _PAGE_WIRED)
95 tlb_wire_entry(NULL, addr, *pte);
96}
97
98static void clear_pte_phys(unsigned long addr, pgprot_t prot)
99{
100 pte_t *pte;
101
102 pte = __get_pte_phys(addr);
103
104 if (pgprot_val(prot) & _PAGE_WIRED)
105 tlb_unwire_entry();
106
107 set_pte(pte, pfn_pte(0, __pgprot(0)));
108 local_flush_tlb_one(get_asid(), addr);
109}
110
111void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
112{
113 unsigned long address = __fix_to_virt(idx);
114
115 if (idx >= __end_of_fixed_addresses) {
116 BUG();
117 return;
118 }
119
120 set_pte_phys(address, phys, prot);
121}
122
123void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
124{
125 unsigned long address = __fix_to_virt(idx);
126
127 if (idx >= __end_of_fixed_addresses) {
128 BUG();
129 return;
130 }
131
132 clear_pte_phys(address, prot);
133}
134
135static pmd_t * __init one_md_table_init(pud_t *pud)
136{
137 if (pud_none(*pud)) {
138 pmd_t *pmd;
139
140 pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
141 if (!pmd)
142 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
143 __func__, PAGE_SIZE, PAGE_SIZE);
144 pud_populate(&init_mm, pud, pmd);
145 BUG_ON(pmd != pmd_offset(pud, 0));
146 }
147
148 return pmd_offset(pud, 0);
149}
150
151static pte_t * __init one_page_table_init(pmd_t *pmd)
152{
153 if (pmd_none(*pmd)) {
154 pte_t *pte;
155
156 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
157 if (!pte)
158 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
159 __func__, PAGE_SIZE, PAGE_SIZE);
160 pmd_populate_kernel(&init_mm, pmd, pte);
161 BUG_ON(pte != pte_offset_kernel(pmd, 0));
162 }
163
164 return pte_offset_kernel(pmd, 0);
165}
166
167static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
168 unsigned long vaddr, pte_t *lastpte)
169{
170 return pte;
171}
172
173void __init page_table_range_init(unsigned long start, unsigned long end,
174 pgd_t *pgd_base)
175{
176 pgd_t *pgd;
177 pud_t *pud;
178 pmd_t *pmd;
179 pte_t *pte = NULL;
180 int i, j, k;
181 unsigned long vaddr;
182
183 vaddr = start;
184 i = pgd_index(vaddr);
185 j = pud_index(vaddr);
186 k = pmd_index(vaddr);
187 pgd = pgd_base + i;
188
189 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
190 pud = (pud_t *)pgd;
191 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
192 pmd = one_md_table_init(pud);
193#ifndef __PAGETABLE_PMD_FOLDED
194 pmd += k;
195#endif
196 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
197 pte = page_table_kmap_check(one_page_table_init(pmd),
198 pmd, vaddr, pte);
199 vaddr += PMD_SIZE;
200 }
201 k = 0;
202 }
203 j = 0;
204 }
205}
206#endif /* CONFIG_MMU */
207
208void __init allocate_pgdat(unsigned int nid)
209{
210 unsigned long start_pfn, end_pfn;
211
212 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
213
214#ifdef CONFIG_NEED_MULTIPLE_NODES
215 NODE_DATA(nid) = memblock_alloc_try_nid(
216 sizeof(struct pglist_data),
217 SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
218 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
219 if (!NODE_DATA(nid))
220 panic("Can't allocate pgdat for node %d\n", nid);
221#endif
222
223 NODE_DATA(nid)->node_start_pfn = start_pfn;
224 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
225}
226
227static void __init do_init_bootmem(void)
228{
229 struct memblock_region *reg;
230
231 /* Add active regions with valid PFNs. */
232 for_each_memblock(memory, reg) {
233 unsigned long start_pfn, end_pfn;
234 start_pfn = memblock_region_memory_base_pfn(reg);
235 end_pfn = memblock_region_memory_end_pfn(reg);
236 __add_active_range(0, start_pfn, end_pfn);
237 }
238
239 /* All of system RAM sits in node 0 for the non-NUMA case */
240 allocate_pgdat(0);
241 node_set_online(0);
242
243 plat_mem_setup();
244
245 sparse_init();
246}
247
248static void __init early_reserve_mem(void)
249{
250 unsigned long start_pfn;
251 u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
252 u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
253
254 /*
255 * Partially used pages are not usable - thus
256 * we are rounding upwards:
257 */
258 start_pfn = PFN_UP(__pa(_end));
259
260 /*
261 * Reserve the kernel text and Reserve the bootmem bitmap. We do
262 * this in two steps (first step was init_bootmem()), because
263 * this catches the (definitely buggy) case of us accidentally
264 * initializing the bootmem allocator with an invalid RAM area.
265 */
266 memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
267
268 /*
269 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
270 */
271 if (CONFIG_ZERO_PAGE_OFFSET != 0)
272 memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
273
274 /*
275 * Handle additional early reservations
276 */
277 check_for_initrd();
278 reserve_crashkernel();
279}
280
281void __init paging_init(void)
282{
283 unsigned long max_zone_pfns[MAX_NR_ZONES];
284 unsigned long vaddr, end;
285
286 sh_mv.mv_mem_init();
287
288 early_reserve_mem();
289
290 /*
291 * Once the early reservations are out of the way, give the
292 * platforms a chance to kick out some memory.
293 */
294 if (sh_mv.mv_mem_reserve)
295 sh_mv.mv_mem_reserve();
296
297 memblock_enforce_memory_limit(memory_limit);
298 memblock_allow_resize();
299
300 memblock_dump_all();
301
302 /*
303 * Determine low and high memory ranges:
304 */
305 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
306 min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
307
308 nodes_clear(node_online_map);
309
310 memory_start = (unsigned long)__va(__MEMORY_START);
311 memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
312
313 uncached_init();
314 pmb_init();
315 do_init_bootmem();
316 ioremap_fixed_init();
317
318 /* We don't need to map the kernel through the TLB, as
319 * it is permanatly mapped using P1. So clear the
320 * entire pgd. */
321 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
322
323 /* Set an initial value for the MMU.TTB so we don't have to
324 * check for a null value. */
325 set_TTB(swapper_pg_dir);
326
327 /*
328 * Populate the relevant portions of swapper_pg_dir so that
329 * we can use the fixmap entries without calling kmalloc.
330 * pte's will be filled in by __set_fixmap().
331 */
332 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
333 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
334 page_table_range_init(vaddr, end, swapper_pg_dir);
335
336 kmap_coherent_init();
337
338 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
339 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
340 free_area_init(max_zone_pfns);
341}
342
343unsigned int mem_init_done = 0;
344
345void __init mem_init(void)
346{
347 pg_data_t *pgdat;
348
349 high_memory = NULL;
350 for_each_online_pgdat(pgdat)
351 high_memory = max_t(void *, high_memory,
352 __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
353
354 memblock_free_all();
355
356 /* Set this up early, so we can take care of the zero page */
357 cpu_cache_init();
358
359 /* clear the zero-page */
360 memset(empty_zero_page, 0, PAGE_SIZE);
361 __flush_wback_region(empty_zero_page, PAGE_SIZE);
362
363 vsyscall_init();
364
365 mem_init_print_info(NULL);
366 pr_info("virtual kernel memory layout:\n"
367 " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
368#ifdef CONFIG_HIGHMEM
369 " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
370#endif
371 " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
372 " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n"
373#ifdef CONFIG_UNCACHED_MAPPING
374 " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n"
375#endif
376 " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
377 " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
378 " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
379 FIXADDR_START, FIXADDR_TOP,
380 (FIXADDR_TOP - FIXADDR_START) >> 10,
381
382#ifdef CONFIG_HIGHMEM
383 PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
384 (LAST_PKMAP*PAGE_SIZE) >> 10,
385#endif
386
387 (unsigned long)VMALLOC_START, VMALLOC_END,
388 (VMALLOC_END - VMALLOC_START) >> 20,
389
390 (unsigned long)memory_start, (unsigned long)high_memory,
391 ((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
392
393#ifdef CONFIG_UNCACHED_MAPPING
394 uncached_start, uncached_end, uncached_size >> 20,
395#endif
396
397 (unsigned long)&__init_begin, (unsigned long)&__init_end,
398 ((unsigned long)&__init_end -
399 (unsigned long)&__init_begin) >> 10,
400
401 (unsigned long)&_etext, (unsigned long)&_edata,
402 ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
403
404 (unsigned long)&_text, (unsigned long)&_etext,
405 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
406
407 mem_init_done = 1;
408}
409
410#ifdef CONFIG_MEMORY_HOTPLUG
411int arch_add_memory(int nid, u64 start, u64 size,
412 struct mhp_params *params)
413{
414 unsigned long start_pfn = PFN_DOWN(start);
415 unsigned long nr_pages = size >> PAGE_SHIFT;
416 int ret;
417
418 if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
419 return -EINVAL;
420
421 /* We only have ZONE_NORMAL, so this is easy.. */
422 ret = __add_pages(nid, start_pfn, nr_pages, params);
423 if (unlikely(ret))
424 printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
425
426 return ret;
427}
428
429void arch_remove_memory(int nid, u64 start, u64 size,
430 struct vmem_altmap *altmap)
431{
432 unsigned long start_pfn = PFN_DOWN(start);
433 unsigned long nr_pages = size >> PAGE_SHIFT;
434
435 __remove_pages(start_pfn, nr_pages, altmap);
436}
437#endif /* CONFIG_MEMORY_HOTPLUG */