Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * PowerPC version
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 *
6 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
8 * Copyright (C) 1996 Paul Mackerras
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
10 *
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 */
14
15#include <linux/memblock.h>
16#include <linux/highmem.h>
17#include <linux/suspend.h>
18#include <linux/dma-direct.h>
19
20#include <asm/machdep.h>
21#include <asm/rtas.h>
22#include <asm/kasan.h>
23#include <asm/sparsemem.h>
24#include <asm/svm.h>
25
26#include <mm/mmu_decl.h>
27
28unsigned long long memory_limit;
29bool init_mem_is_free;
30
31unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
32EXPORT_SYMBOL(empty_zero_page);
33
34pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
35 unsigned long size, pgprot_t vma_prot)
36{
37 if (ppc_md.phys_mem_access_prot)
38 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
39
40 if (!page_is_ram(pfn))
41 vma_prot = pgprot_noncached(vma_prot);
42
43 return vma_prot;
44}
45EXPORT_SYMBOL(phys_mem_access_prot);
46
47#ifdef CONFIG_MEMORY_HOTPLUG
48static DEFINE_MUTEX(linear_mapping_mutex);
49
50#ifdef CONFIG_NUMA
51int memory_add_physaddr_to_nid(u64 start)
52{
53 return hot_add_scn_to_nid(start);
54}
55#endif
56
57int __weak create_section_mapping(unsigned long start, unsigned long end,
58 int nid, pgprot_t prot)
59{
60 return -ENODEV;
61}
62
63int __weak remove_section_mapping(unsigned long start, unsigned long end)
64{
65 return -ENODEV;
66}
67
68int __ref arch_create_linear_mapping(int nid, u64 start, u64 size,
69 struct mhp_params *params)
70{
71 int rc;
72
73 start = (unsigned long)__va(start);
74 mutex_lock(&linear_mapping_mutex);
75 rc = create_section_mapping(start, start + size, nid,
76 params->pgprot);
77 mutex_unlock(&linear_mapping_mutex);
78 if (rc) {
79 pr_warn("Unable to create linear mapping for 0x%llx..0x%llx: %d\n",
80 start, start + size, rc);
81 return -EFAULT;
82 }
83 return 0;
84}
85
86void __ref arch_remove_linear_mapping(u64 start, u64 size)
87{
88 int ret;
89
90 /* Remove htab bolted mappings for this section of memory */
91 start = (unsigned long)__va(start);
92
93 mutex_lock(&linear_mapping_mutex);
94 ret = remove_section_mapping(start, start + size);
95 mutex_unlock(&linear_mapping_mutex);
96 if (ret)
97 pr_warn("Unable to remove linear mapping for 0x%llx..0x%llx: %d\n",
98 start, start + size, ret);
99
100 /* Ensure all vmalloc mappings are flushed in case they also
101 * hit that section of memory
102 */
103 vm_unmap_aliases();
104}
105
106int __ref arch_add_memory(int nid, u64 start, u64 size,
107 struct mhp_params *params)
108{
109 unsigned long start_pfn = start >> PAGE_SHIFT;
110 unsigned long nr_pages = size >> PAGE_SHIFT;
111 int rc;
112
113 rc = arch_create_linear_mapping(nid, start, size, params);
114 if (rc)
115 return rc;
116 rc = __add_pages(nid, start_pfn, nr_pages, params);
117 if (rc)
118 arch_remove_linear_mapping(start, size);
119 return rc;
120}
121
122void __ref arch_remove_memory(int nid, u64 start, u64 size,
123 struct vmem_altmap *altmap)
124{
125 unsigned long start_pfn = start >> PAGE_SHIFT;
126 unsigned long nr_pages = size >> PAGE_SHIFT;
127
128 __remove_pages(start_pfn, nr_pages, altmap);
129 arch_remove_linear_mapping(start, size);
130}
131#endif
132
133#ifndef CONFIG_NUMA
134void __init mem_topology_setup(void)
135{
136 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
137 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
138#ifdef CONFIG_HIGHMEM
139 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
140#endif
141
142 /* Place all memblock_regions in the same node and merge contiguous
143 * memblock_regions
144 */
145 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
146}
147
148void __init initmem_init(void)
149{
150 sparse_init();
151}
152
153/* mark pages that don't exist as nosave */
154static int __init mark_nonram_nosave(void)
155{
156 unsigned long spfn, epfn, prev = 0;
157 int i;
158
159 for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) {
160 if (prev && prev < spfn)
161 register_nosave_region(prev, spfn);
162
163 prev = epfn;
164 }
165
166 return 0;
167}
168#else /* CONFIG_NUMA */
169static int __init mark_nonram_nosave(void)
170{
171 return 0;
172}
173#endif
174
175/*
176 * Zones usage:
177 *
178 * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
179 * everything else. GFP_DMA32 page allocations automatically fall back to
180 * ZONE_DMA.
181 *
182 * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
183 * generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU
184 * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
185 * ZONE_DMA.
186 */
187static unsigned long max_zone_pfns[MAX_NR_ZONES];
188
189/*
190 * paging_init() sets up the page tables - in fact we've already done this.
191 */
192void __init paging_init(void)
193{
194 unsigned long long total_ram = memblock_phys_mem_size();
195 phys_addr_t top_of_ram = memblock_end_of_DRAM();
196
197#ifdef CONFIG_HIGHMEM
198 unsigned long v = __fix_to_virt(FIX_KMAP_END);
199 unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN);
200
201 for (; v < end; v += PAGE_SIZE)
202 map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
203
204 map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
205 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
206#endif /* CONFIG_HIGHMEM */
207
208 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
209 (unsigned long long)top_of_ram, total_ram);
210 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
211 (long int)((top_of_ram - total_ram) >> 20));
212
213 /*
214 * Allow 30-bit DMA for very limited Broadcom wifi chips on many
215 * powerbooks.
216 */
217 if (IS_ENABLED(CONFIG_PPC32))
218 zone_dma_bits = 30;
219 else
220 zone_dma_bits = 31;
221
222#ifdef CONFIG_ZONE_DMA
223 max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
224 1UL << (zone_dma_bits - PAGE_SHIFT));
225#endif
226 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
227#ifdef CONFIG_HIGHMEM
228 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
229#endif
230
231 free_area_init(max_zone_pfns);
232
233 mark_nonram_nosave();
234}
235
236void __init mem_init(void)
237{
238 /*
239 * book3s is limited to 16 page sizes due to encoding this in
240 * a 4-bit field for slices.
241 */
242 BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
243
244#ifdef CONFIG_SWIOTLB
245 /*
246 * Some platforms (e.g. 85xx) limit DMA-able memory way below
247 * 4G. We force memblock to bottom-up mode to ensure that the
248 * memory allocated in swiotlb_init() is DMA-able.
249 * As it's the last memblock allocation, no need to reset it
250 * back to to-down.
251 */
252 memblock_set_bottom_up(true);
253 if (is_secure_guest())
254 svm_swiotlb_init();
255 else
256 swiotlb_init(0);
257#endif
258
259 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
260 set_max_mapnr(max_pfn);
261
262 kasan_late_init();
263
264 memblock_free_all();
265
266#ifdef CONFIG_HIGHMEM
267 {
268 unsigned long pfn, highmem_mapnr;
269
270 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
271 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
272 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
273 struct page *page = pfn_to_page(pfn);
274 if (!memblock_is_reserved(paddr))
275 free_highmem_page(page);
276 }
277 }
278#endif /* CONFIG_HIGHMEM */
279
280#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
281 /*
282 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
283 * functions.... do it here for the non-smp case.
284 */
285 per_cpu(next_tlbcam_idx, smp_processor_id()) =
286 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
287#endif
288
289#ifdef CONFIG_PPC32
290 pr_info("Kernel virtual memory layout:\n");
291#ifdef CONFIG_KASAN
292 pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n",
293 KASAN_SHADOW_START, KASAN_SHADOW_END);
294#endif
295 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
296#ifdef CONFIG_HIGHMEM
297 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
298 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
299#endif /* CONFIG_HIGHMEM */
300 if (ioremap_bot != IOREMAP_TOP)
301 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
302 ioremap_bot, IOREMAP_TOP);
303 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
304 VMALLOC_START, VMALLOC_END);
305#ifdef MODULES_VADDR
306 pr_info(" * 0x%08lx..0x%08lx : modules\n",
307 MODULES_VADDR, MODULES_END);
308#endif
309#endif /* CONFIG_PPC32 */
310}
311
312void free_initmem(void)
313{
314 ppc_md.progress = ppc_printk_progress;
315 mark_initmem_nx();
316 init_mem_is_free = true;
317 free_initmem_default(POISON_FREE_INITMEM);
318}
319
320/*
321 * System memory should not be in /proc/iomem but various tools expect it
322 * (eg kdump).
323 */
324static int __init add_system_ram_resources(void)
325{
326 phys_addr_t start, end;
327 u64 i;
328
329 for_each_mem_range(i, &start, &end) {
330 struct resource *res;
331
332 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
333 WARN_ON(!res);
334
335 if (res) {
336 res->name = "System RAM";
337 res->start = start;
338 /*
339 * In memblock, end points to the first byte after
340 * the range while in resourses, end points to the
341 * last byte in the range.
342 */
343 res->end = end - 1;
344 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
345 WARN_ON(request_resource(&iomem_resource, res) < 0);
346 }
347 }
348
349 return 0;
350}
351subsys_initcall(add_system_ram_resources);
352
353#ifdef CONFIG_STRICT_DEVMEM
354/*
355 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
356 * is valid. The argument is a physical page number.
357 *
358 * Access has to be given to non-kernel-ram areas as well, these contain the
359 * PCI mmio resources as well as potential bios/acpi data regions.
360 */
361int devmem_is_allowed(unsigned long pfn)
362{
363 if (page_is_rtas_user_buf(pfn))
364 return 1;
365 if (iomem_is_exclusive(PFN_PHYS(pfn)))
366 return 0;
367 if (!page_is_ram(pfn))
368 return 1;
369 return 0;
370}
371#endif /* CONFIG_STRICT_DEVMEM */
372
373/*
374 * This is defined in kernel/resource.c but only powerpc needs to export it, for
375 * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
376 */
377EXPORT_SYMBOL_GPL(walk_system_ram_range);
1/*
2 * PowerPC version
3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
4 *
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
8 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
9 *
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 *
18 */
19
20#include <linux/export.h>
21#include <linux/sched.h>
22#include <linux/kernel.h>
23#include <linux/errno.h>
24#include <linux/string.h>
25#include <linux/gfp.h>
26#include <linux/types.h>
27#include <linux/mm.h>
28#include <linux/stddef.h>
29#include <linux/init.h>
30#include <linux/bootmem.h>
31#include <linux/highmem.h>
32#include <linux/initrd.h>
33#include <linux/pagemap.h>
34#include <linux/suspend.h>
35#include <linux/memblock.h>
36#include <linux/hugetlb.h>
37#include <linux/slab.h>
38
39#include <asm/pgalloc.h>
40#include <asm/prom.h>
41#include <asm/io.h>
42#include <asm/mmu_context.h>
43#include <asm/pgtable.h>
44#include <asm/mmu.h>
45#include <asm/smp.h>
46#include <asm/machdep.h>
47#include <asm/btext.h>
48#include <asm/tlb.h>
49#include <asm/sections.h>
50#include <asm/sparsemem.h>
51#include <asm/vdso.h>
52#include <asm/fixmap.h>
53#include <asm/swiotlb.h>
54#include <asm/rtas.h>
55
56#include "mmu_decl.h"
57
58#ifndef CPU_FTR_COHERENT_ICACHE
59#define CPU_FTR_COHERENT_ICACHE 0 /* XXX for now */
60#define CPU_FTR_NOEXECUTE 0
61#endif
62
63int init_bootmem_done;
64int mem_init_done;
65phys_addr_t memory_limit;
66
67#ifdef CONFIG_HIGHMEM
68pte_t *kmap_pte;
69pgprot_t kmap_prot;
70
71EXPORT_SYMBOL(kmap_prot);
72EXPORT_SYMBOL(kmap_pte);
73
74static inline pte_t *virt_to_kpte(unsigned long vaddr)
75{
76 return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
77 vaddr), vaddr), vaddr);
78}
79#endif
80
81int page_is_ram(unsigned long pfn)
82{
83#ifndef CONFIG_PPC64 /* XXX for now */
84 return pfn < max_pfn;
85#else
86 unsigned long paddr = (pfn << PAGE_SHIFT);
87 struct memblock_region *reg;
88
89 for_each_memblock(memory, reg)
90 if (paddr >= reg->base && paddr < (reg->base + reg->size))
91 return 1;
92 return 0;
93#endif
94}
95
96pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
97 unsigned long size, pgprot_t vma_prot)
98{
99 if (ppc_md.phys_mem_access_prot)
100 return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
101
102 if (!page_is_ram(pfn))
103 vma_prot = pgprot_noncached(vma_prot);
104
105 return vma_prot;
106}
107EXPORT_SYMBOL(phys_mem_access_prot);
108
109#ifdef CONFIG_MEMORY_HOTPLUG
110
111#ifdef CONFIG_NUMA
112int memory_add_physaddr_to_nid(u64 start)
113{
114 return hot_add_scn_to_nid(start);
115}
116#endif
117
118int arch_add_memory(int nid, u64 start, u64 size)
119{
120 struct pglist_data *pgdata;
121 struct zone *zone;
122 unsigned long start_pfn = start >> PAGE_SHIFT;
123 unsigned long nr_pages = size >> PAGE_SHIFT;
124
125 pgdata = NODE_DATA(nid);
126
127 start = (unsigned long)__va(start);
128 if (create_section_mapping(start, start + size))
129 return -EINVAL;
130
131 /* this should work for most non-highmem platforms */
132 zone = pgdata->node_zones;
133
134 return __add_pages(nid, zone, start_pfn, nr_pages);
135}
136#endif /* CONFIG_MEMORY_HOTPLUG */
137
138/*
139 * walk_memory_resource() needs to make sure there is no holes in a given
140 * memory range. PPC64 does not maintain the memory layout in /proc/iomem.
141 * Instead it maintains it in memblock.memory structures. Walk through the
142 * memory regions, find holes and callback for contiguous regions.
143 */
144int
145walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
146 void *arg, int (*func)(unsigned long, unsigned long, void *))
147{
148 struct memblock_region *reg;
149 unsigned long end_pfn = start_pfn + nr_pages;
150 unsigned long tstart, tend;
151 int ret = -1;
152
153 for_each_memblock(memory, reg) {
154 tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
155 tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
156 if (tstart >= tend)
157 continue;
158 ret = (*func)(tstart, tend - tstart, arg);
159 if (ret)
160 break;
161 }
162 return ret;
163}
164EXPORT_SYMBOL_GPL(walk_system_ram_range);
165
166/*
167 * Initialize the bootmem system and give it all the memory we
168 * have available. If we are using highmem, we only put the
169 * lowmem into the bootmem system.
170 */
171#ifndef CONFIG_NEED_MULTIPLE_NODES
172void __init do_init_bootmem(void)
173{
174 unsigned long start, bootmap_pages;
175 unsigned long total_pages;
176 struct memblock_region *reg;
177 int boot_mapsize;
178
179 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
180 total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
181#ifdef CONFIG_HIGHMEM
182 total_pages = total_lowmem >> PAGE_SHIFT;
183 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
184#endif
185
186 /*
187 * Find an area to use for the bootmem bitmap. Calculate the size of
188 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
189 * Add 1 additional page in case the address isn't page-aligned.
190 */
191 bootmap_pages = bootmem_bootmap_pages(total_pages);
192
193 start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
194
195 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
196 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
197
198 /* Add active regions with valid PFNs */
199 for_each_memblock(memory, reg) {
200 unsigned long start_pfn, end_pfn;
201 start_pfn = memblock_region_memory_base_pfn(reg);
202 end_pfn = memblock_region_memory_end_pfn(reg);
203 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
204 }
205
206 /* Add all physical memory to the bootmem map, mark each area
207 * present.
208 */
209#ifdef CONFIG_HIGHMEM
210 free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
211
212 /* reserve the sections we're already using */
213 for_each_memblock(reserved, reg) {
214 unsigned long top = reg->base + reg->size - 1;
215 if (top < lowmem_end_addr)
216 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
217 else if (reg->base < lowmem_end_addr) {
218 unsigned long trunc_size = lowmem_end_addr - reg->base;
219 reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
220 }
221 }
222#else
223 free_bootmem_with_active_regions(0, max_pfn);
224
225 /* reserve the sections we're already using */
226 for_each_memblock(reserved, reg)
227 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
228#endif
229 /* XXX need to clip this if using highmem? */
230 sparse_memory_present_with_active_regions(0);
231
232 init_bootmem_done = 1;
233}
234
235/* mark pages that don't exist as nosave */
236static int __init mark_nonram_nosave(void)
237{
238 struct memblock_region *reg, *prev = NULL;
239
240 for_each_memblock(memory, reg) {
241 if (prev &&
242 memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
243 register_nosave_region(memblock_region_memory_end_pfn(prev),
244 memblock_region_memory_base_pfn(reg));
245 prev = reg;
246 }
247 return 0;
248}
249
250/*
251 * paging_init() sets up the page tables - in fact we've already done this.
252 */
253void __init paging_init(void)
254{
255 unsigned long long total_ram = memblock_phys_mem_size();
256 phys_addr_t top_of_ram = memblock_end_of_DRAM();
257 unsigned long max_zone_pfns[MAX_NR_ZONES];
258
259#ifdef CONFIG_PPC32
260 unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
261 unsigned long end = __fix_to_virt(FIX_HOLE);
262
263 for (; v < end; v += PAGE_SIZE)
264 map_page(v, 0, 0); /* XXX gross */
265#endif
266
267#ifdef CONFIG_HIGHMEM
268 map_page(PKMAP_BASE, 0, 0); /* XXX gross */
269 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
270
271 kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
272 kmap_prot = PAGE_KERNEL;
273#endif /* CONFIG_HIGHMEM */
274
275 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
276 (unsigned long long)top_of_ram, total_ram);
277 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
278 (long int)((top_of_ram - total_ram) >> 20));
279 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
280#ifdef CONFIG_HIGHMEM
281 max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
282 max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
283#else
284 max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
285#endif
286 free_area_init_nodes(max_zone_pfns);
287
288 mark_nonram_nosave();
289}
290#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
291
292void __init mem_init(void)
293{
294#ifdef CONFIG_NEED_MULTIPLE_NODES
295 int nid;
296#endif
297 pg_data_t *pgdat;
298 unsigned long i;
299 struct page *page;
300 unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
301
302#ifdef CONFIG_SWIOTLB
303 if (ppc_swiotlb_enable)
304 swiotlb_init(1);
305#endif
306
307 num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT;
308 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
309
310#ifdef CONFIG_NEED_MULTIPLE_NODES
311 for_each_online_node(nid) {
312 if (NODE_DATA(nid)->node_spanned_pages != 0) {
313 printk("freeing bootmem node %d\n", nid);
314 totalram_pages +=
315 free_all_bootmem_node(NODE_DATA(nid));
316 }
317 }
318#else
319 max_mapnr = max_pfn;
320 totalram_pages += free_all_bootmem();
321#endif
322 for_each_online_pgdat(pgdat) {
323 for (i = 0; i < pgdat->node_spanned_pages; i++) {
324 if (!pfn_valid(pgdat->node_start_pfn + i))
325 continue;
326 page = pgdat_page_nr(pgdat, i);
327 if (PageReserved(page))
328 reservedpages++;
329 }
330 }
331
332 codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
333 datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
334 initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
335 bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
336
337#ifdef CONFIG_HIGHMEM
338 {
339 unsigned long pfn, highmem_mapnr;
340
341 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
342 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
343 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
344 struct page *page = pfn_to_page(pfn);
345 if (memblock_is_reserved(paddr))
346 continue;
347 ClearPageReserved(page);
348 init_page_count(page);
349 __free_page(page);
350 totalhigh_pages++;
351 reservedpages--;
352 }
353 totalram_pages += totalhigh_pages;
354 printk(KERN_DEBUG "High memory: %luk\n",
355 totalhigh_pages << (PAGE_SHIFT-10));
356 }
357#endif /* CONFIG_HIGHMEM */
358
359#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
360 /*
361 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
362 * functions.... do it here for the non-smp case.
363 */
364 per_cpu(next_tlbcam_idx, smp_processor_id()) =
365 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
366#endif
367
368 printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
369 "%luk reserved, %luk data, %luk bss, %luk init)\n",
370 nr_free_pages() << (PAGE_SHIFT-10),
371 num_physpages << (PAGE_SHIFT-10),
372 codesize >> 10,
373 reservedpages << (PAGE_SHIFT-10),
374 datasize >> 10,
375 bsssize >> 10,
376 initsize >> 10);
377
378#ifdef CONFIG_PPC32
379 pr_info("Kernel virtual memory layout:\n");
380 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
381#ifdef CONFIG_HIGHMEM
382 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
383 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
384#endif /* CONFIG_HIGHMEM */
385#ifdef CONFIG_NOT_COHERENT_CACHE
386 pr_info(" * 0x%08lx..0x%08lx : consistent mem\n",
387 IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
388#endif /* CONFIG_NOT_COHERENT_CACHE */
389 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
390 ioremap_bot, IOREMAP_TOP);
391 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
392 VMALLOC_START, VMALLOC_END);
393#endif /* CONFIG_PPC32 */
394
395 mem_init_done = 1;
396}
397
398void free_initmem(void)
399{
400 unsigned long addr;
401
402 ppc_md.progress = ppc_printk_progress;
403
404 addr = (unsigned long)__init_begin;
405 for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
406 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
407 ClearPageReserved(virt_to_page(addr));
408 init_page_count(virt_to_page(addr));
409 free_page(addr);
410 totalram_pages++;
411 }
412 pr_info("Freeing unused kernel memory: %luk freed\n",
413 ((unsigned long)__init_end -
414 (unsigned long)__init_begin) >> 10);
415}
416
417#ifdef CONFIG_BLK_DEV_INITRD
418void __init free_initrd_mem(unsigned long start, unsigned long end)
419{
420 if (start >= end)
421 return;
422
423 start = _ALIGN_DOWN(start, PAGE_SIZE);
424 end = _ALIGN_UP(end, PAGE_SIZE);
425 pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
426
427 for (; start < end; start += PAGE_SIZE) {
428 ClearPageReserved(virt_to_page(start));
429 init_page_count(virt_to_page(start));
430 free_page(start);
431 totalram_pages++;
432 }
433}
434#endif
435
436/*
437 * This is called when a page has been modified by the kernel.
438 * It just marks the page as not i-cache clean. We do the i-cache
439 * flush later when the page is given to a user process, if necessary.
440 */
441void flush_dcache_page(struct page *page)
442{
443 if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
444 return;
445 /* avoid an atomic op if possible */
446 if (test_bit(PG_arch_1, &page->flags))
447 clear_bit(PG_arch_1, &page->flags);
448}
449EXPORT_SYMBOL(flush_dcache_page);
450
451void flush_dcache_icache_page(struct page *page)
452{
453#ifdef CONFIG_HUGETLB_PAGE
454 if (PageCompound(page)) {
455 flush_dcache_icache_hugepage(page);
456 return;
457 }
458#endif
459#ifdef CONFIG_BOOKE
460 {
461 void *start = kmap_atomic(page);
462 __flush_dcache_icache(start);
463 kunmap_atomic(start);
464 }
465#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
466 /* On 8xx there is no need to kmap since highmem is not supported */
467 __flush_dcache_icache(page_address(page));
468#else
469 __flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
470#endif
471}
472
473void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
474{
475 clear_page(page);
476
477 /*
478 * We shouldn't have to do this, but some versions of glibc
479 * require it (ld.so assumes zero filled pages are icache clean)
480 * - Anton
481 */
482 flush_dcache_page(pg);
483}
484EXPORT_SYMBOL(clear_user_page);
485
486void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
487 struct page *pg)
488{
489 copy_page(vto, vfrom);
490
491 /*
492 * We should be able to use the following optimisation, however
493 * there are two problems.
494 * Firstly a bug in some versions of binutils meant PLT sections
495 * were not marked executable.
496 * Secondly the first word in the GOT section is blrl, used
497 * to establish the GOT address. Until recently the GOT was
498 * not marked executable.
499 * - Anton
500 */
501#if 0
502 if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
503 return;
504#endif
505
506 flush_dcache_page(pg);
507}
508
509void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
510 unsigned long addr, int len)
511{
512 unsigned long maddr;
513
514 maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
515 flush_icache_range(maddr, maddr + len);
516 kunmap(page);
517}
518EXPORT_SYMBOL(flush_icache_user_range);
519
520/*
521 * This is called at the end of handling a user page fault, when the
522 * fault has been handled by updating a PTE in the linux page tables.
523 * We use it to preload an HPTE into the hash table corresponding to
524 * the updated linux PTE.
525 *
526 * This must always be called with the pte lock held.
527 */
528void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
529 pte_t *ptep)
530{
531#ifdef CONFIG_PPC_STD_MMU
532 unsigned long access = 0, trap;
533
534 /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
535 if (!pte_young(*ptep) || address >= TASK_SIZE)
536 return;
537
538 /* We try to figure out if we are coming from an instruction
539 * access fault and pass that down to __hash_page so we avoid
540 * double-faulting on execution of fresh text. We have to test
541 * for regs NULL since init will get here first thing at boot
542 *
543 * We also avoid filling the hash if not coming from a fault
544 */
545 if (current->thread.regs == NULL)
546 return;
547 trap = TRAP(current->thread.regs);
548 if (trap == 0x400)
549 access |= _PAGE_EXEC;
550 else if (trap != 0x300)
551 return;
552 hash_preload(vma->vm_mm, address, access, trap);
553#endif /* CONFIG_PPC_STD_MMU */
554#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
555 && defined(CONFIG_HUGETLB_PAGE)
556 if (is_vm_hugetlb_page(vma))
557 book3e_hugetlb_preload(vma, address, *ptep);
558#endif
559}
560
561/*
562 * System memory should not be in /proc/iomem but various tools expect it
563 * (eg kdump).
564 */
565static int add_system_ram_resources(void)
566{
567 struct memblock_region *reg;
568
569 for_each_memblock(memory, reg) {
570 struct resource *res;
571 unsigned long base = reg->base;
572 unsigned long size = reg->size;
573
574 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
575 WARN_ON(!res);
576
577 if (res) {
578 res->name = "System RAM";
579 res->start = base;
580 res->end = base + size - 1;
581 res->flags = IORESOURCE_MEM;
582 WARN_ON(request_resource(&iomem_resource, res) < 0);
583 }
584 }
585
586 return 0;
587}
588subsys_initcall(add_system_ram_resources);
589
590#ifdef CONFIG_STRICT_DEVMEM
591/*
592 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
593 * is valid. The argument is a physical page number.
594 *
595 * Access has to be given to non-kernel-ram areas as well, these contain the
596 * PCI mmio resources as well as potential bios/acpi data regions.
597 */
598int devmem_is_allowed(unsigned long pfn)
599{
600 if (iomem_is_exclusive(pfn << PAGE_SHIFT))
601 return 0;
602 if (!page_is_ram(pfn))
603 return 1;
604 if (page_is_rtas_user_buf(pfn))
605 return 1;
606 return 0;
607}
608#endif /* CONFIG_STRICT_DEVMEM */