Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *  PowerPC version
  4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  5 *
  6 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  7 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  8 *    Copyright (C) 1996 Paul Mackerras
  9 *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
 10 *
 11 *  Derived from "arch/i386/mm/init.c"
 12 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 
 
 
 
 
 
 13 */
 14
 15#include <linux/export.h>
 16#include <linux/sched.h>
 17#include <linux/kernel.h>
 18#include <linux/errno.h>
 19#include <linux/string.h>
 20#include <linux/gfp.h>
 21#include <linux/types.h>
 22#include <linux/mm.h>
 23#include <linux/stddef.h>
 24#include <linux/init.h>
 25#include <linux/memblock.h>
 26#include <linux/highmem.h>
 27#include <linux/initrd.h>
 28#include <linux/pagemap.h>
 29#include <linux/suspend.h>
 
 30#include <linux/hugetlb.h>
 31#include <linux/slab.h>
 32#include <linux/vmalloc.h>
 33#include <linux/memremap.h>
 34
 35#include <asm/pgalloc.h>
 36#include <asm/prom.h>
 37#include <asm/io.h>
 38#include <asm/mmu_context.h>
 39#include <asm/pgtable.h>
 40#include <asm/mmu.h>
 41#include <asm/smp.h>
 42#include <asm/machdep.h>
 43#include <asm/btext.h>
 44#include <asm/tlb.h>
 45#include <asm/sections.h>
 46#include <asm/sparsemem.h>
 47#include <asm/vdso.h>
 48#include <asm/fixmap.h>
 49#include <asm/swiotlb.h>
 50#include <asm/rtas.h>
 51
 52#include <mm/mmu_decl.h>
 53
 54#ifndef CPU_FTR_COHERENT_ICACHE
 55#define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
 56#define CPU_FTR_NOEXECUTE	0
 57#endif
 58
 59unsigned long long memory_limit;
 60bool init_mem_is_free;
 
 61
 62#ifdef CONFIG_HIGHMEM
 63pte_t *kmap_pte;
 64EXPORT_SYMBOL(kmap_pte);
 65pgprot_t kmap_prot;
 
 66EXPORT_SYMBOL(kmap_prot);
 
 67
 68static inline pte_t *virt_to_kpte(unsigned long vaddr)
 69{
 70	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
 71			vaddr), vaddr), vaddr);
 72}
 73#endif
 74
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 76			      unsigned long size, pgprot_t vma_prot)
 77{
 78	if (ppc_md.phys_mem_access_prot)
 79		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
 80
 81	if (!page_is_ram(pfn))
 82		vma_prot = pgprot_noncached(vma_prot);
 83
 84	return vma_prot;
 85}
 86EXPORT_SYMBOL(phys_mem_access_prot);
 87
 88#ifdef CONFIG_MEMORY_HOTPLUG
 89
 90#ifdef CONFIG_NUMA
 91int memory_add_physaddr_to_nid(u64 start)
 92{
 93	return hot_add_scn_to_nid(start);
 94}
 95#endif
 96
 97int __weak create_section_mapping(unsigned long start, unsigned long end, int nid)
 98{
 99	return -ENODEV;
100}
101
102int __weak remove_section_mapping(unsigned long start, unsigned long end)
103{
104	return -ENODEV;
105}
106
107int __ref arch_add_memory(int nid, u64 start, u64 size,
108			struct mhp_restrictions *restrictions)
109{
 
 
110	unsigned long start_pfn = start >> PAGE_SHIFT;
111	unsigned long nr_pages = size >> PAGE_SHIFT;
112	int rc;
113
114	resize_hpt_for_hotplug(memblock_phys_mem_size());
115
116	start = (unsigned long)__va(start);
117	rc = create_section_mapping(start, start + size, nid);
118	if (rc) {
119		pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
120			start, start + size, rc);
121		return -EFAULT;
122	}
123	flush_dcache_range(start, start + size);
124
125	return __add_pages(nid, start_pfn, nr_pages, restrictions);
 
 
 
126}
 
127
128void __ref arch_remove_memory(int nid, u64 start, u64 size,
129			     struct vmem_altmap *altmap)
 
 
 
 
 
 
 
130{
131	unsigned long start_pfn = start >> PAGE_SHIFT;
132	unsigned long nr_pages = size >> PAGE_SHIFT;
133	struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
134	int ret;
135
136	__remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
137
138	/* Remove htab bolted mappings for this section of memory */
139	start = (unsigned long)__va(start);
140	flush_dcache_range(start, start + size);
141	ret = remove_section_mapping(start, start + size);
142	WARN_ON_ONCE(ret);
143
144	/* Ensure all vmalloc mappings are flushed in case they also
145	 * hit that section of memory
146	 */
147	vm_unmap_aliases();
148
149	if (resize_hpt_for_hotplug(memblock_phys_mem_size()) == -ENOSPC)
150		pr_warn("Hash collision while resizing HPT\n");
 
 
 
 
 
 
 
 
151}
152#endif
153
 
 
 
 
 
154#ifndef CONFIG_NEED_MULTIPLE_NODES
155void __init mem_topology_setup(void)
156{
 
 
 
 
 
157	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
158	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
159#ifdef CONFIG_HIGHMEM
 
160	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
161#endif
162
163	/* Place all memblock_regions in the same node and merge contiguous
164	 * memblock_regions
 
 
165	 */
166	memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
167}
168
169void __init initmem_init(void)
170{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
171	/* XXX need to clip this if using highmem? */
172	sparse_memory_present_with_active_regions(0);
173	sparse_init();
 
174}
175
176/* mark pages that don't exist as nosave */
177static int __init mark_nonram_nosave(void)
178{
179	struct memblock_region *reg, *prev = NULL;
180
181	for_each_memblock(memory, reg) {
182		if (prev &&
183		    memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
184			register_nosave_region(memblock_region_memory_end_pfn(prev),
185					       memblock_region_memory_base_pfn(reg));
186		prev = reg;
187	}
188	return 0;
189}
190#else /* CONFIG_NEED_MULTIPLE_NODES */
191static int __init mark_nonram_nosave(void)
192{
193	return 0;
194}
195#endif
196
197/*
198 * Zones usage:
199 *
200 * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
201 * everything else. GFP_DMA32 page allocations automatically fall back to
202 * ZONE_DMA.
203 *
204 * By using 31-bit unconditionally, we can exploit ARCH_ZONE_DMA_BITS to
205 * inform the generic DMA mapping code.  32-bit only devices (if not handled
206 * by an IOMMU anyway) will take a first dip into ZONE_NORMAL and get
207 * otherwise served by ZONE_DMA.
208 */
209static unsigned long max_zone_pfns[MAX_NR_ZONES];
210
211/*
212 * paging_init() sets up the page tables - in fact we've already done this.
213 */
214void __init paging_init(void)
215{
216	unsigned long long total_ram = memblock_phys_mem_size();
217	phys_addr_t top_of_ram = memblock_end_of_DRAM();
 
218
219#ifdef CONFIG_PPC32
220	unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
221	unsigned long end = __fix_to_virt(FIX_HOLE);
222
223	for (; v < end; v += PAGE_SIZE)
224		map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
225#endif
226
227#ifdef CONFIG_HIGHMEM
228	map_kernel_page(PKMAP_BASE, 0, __pgprot(0));	/* XXX gross */
229	pkmap_page_table = virt_to_kpte(PKMAP_BASE);
230
231	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
232	kmap_prot = PAGE_KERNEL;
233#endif /* CONFIG_HIGHMEM */
234
235	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
236	       (unsigned long long)top_of_ram, total_ram);
237	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
238	       (long int)((top_of_ram - total_ram) >> 20));
239
240#ifdef CONFIG_ZONE_DMA
241	max_zone_pfns[ZONE_DMA]	= min(max_low_pfn,
242				      1UL << (ARCH_ZONE_DMA_BITS - PAGE_SHIFT));
243#endif
244	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
245#ifdef CONFIG_HIGHMEM
246	max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
 
 
 
247#endif
248
249	free_area_init_nodes(max_zone_pfns);
250
251	mark_nonram_nosave();
252}
 
253
254void __init mem_init(void)
255{
256	/*
257	 * book3s is limited to 16 page sizes due to encoding this in
258	 * a 4-bit field for slices.
259	 */
260	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
 
 
261
262#ifdef CONFIG_SWIOTLB
263	swiotlb_init(0);
 
264#endif
265
 
266	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
267	set_max_mapnr(max_pfn);
268	memblock_free_all();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
269
270#ifdef CONFIG_HIGHMEM
271	{
272		unsigned long pfn, highmem_mapnr;
273
274		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
275		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
276			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
277			struct page *page = pfn_to_page(pfn);
278			if (!memblock_is_reserved(paddr))
279				free_highmem_page(page);
 
 
 
 
 
280		}
 
 
 
281	}
282#endif /* CONFIG_HIGHMEM */
283
284#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
285	/*
286	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
287	 * functions.... do it here for the non-smp case.
288	 */
289	per_cpu(next_tlbcam_idx, smp_processor_id()) =
290		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
291#endif
292
293	mem_init_print_info(NULL);
 
 
 
 
 
 
 
 
 
294#ifdef CONFIG_PPC32
295	pr_info("Kernel virtual memory layout:\n");
296#ifdef CONFIG_KASAN
297	pr_info("  * 0x%08lx..0x%08lx  : kasan shadow mem\n",
298		KASAN_SHADOW_START, KASAN_SHADOW_END);
299#endif
300	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
301#ifdef CONFIG_HIGHMEM
302	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
303		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
304#endif /* CONFIG_HIGHMEM */
305	if (ioremap_bot != IOREMAP_TOP)
306		pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
307			ioremap_bot, IOREMAP_TOP);
 
 
 
308	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
309		VMALLOC_START, VMALLOC_END);
310#endif /* CONFIG_PPC32 */
 
 
311}
312
313void free_initmem(void)
314{
 
 
315	ppc_md.progress = ppc_printk_progress;
316	mark_initmem_nx();
317	init_mem_is_free = true;
318	free_initmem_default(POISON_FREE_INITMEM);
 
 
 
 
 
 
 
 
 
319}
320
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
321/*
322 * This is called when a page has been modified by the kernel.
323 * It just marks the page as not i-cache clean.  We do the i-cache
324 * flush later when the page is given to a user process, if necessary.
325 */
326void flush_dcache_page(struct page *page)
327{
328	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
329		return;
330	/* avoid an atomic op if possible */
331	if (test_bit(PG_arch_1, &page->flags))
332		clear_bit(PG_arch_1, &page->flags);
333}
334EXPORT_SYMBOL(flush_dcache_page);
335
336void flush_dcache_icache_page(struct page *page)
337{
338#ifdef CONFIG_HUGETLB_PAGE
339	if (PageCompound(page)) {
340		flush_dcache_icache_hugepage(page);
341		return;
342	}
343#endif
344#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
345	/* On 8xx there is no need to kmap since highmem is not supported */
346	__flush_dcache_icache(page_address(page));
347#else
348	if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
349		void *start = kmap_atomic(page);
350		__flush_dcache_icache(start);
351		kunmap_atomic(start);
352	} else {
353		__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
354	}
 
 
 
 
 
355#endif
356}
357EXPORT_SYMBOL(flush_dcache_icache_page);
358
359void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
360{
361	clear_page(page);
362
363	/*
364	 * We shouldn't have to do this, but some versions of glibc
365	 * require it (ld.so assumes zero filled pages are icache clean)
366	 * - Anton
367	 */
368	flush_dcache_page(pg);
369}
370EXPORT_SYMBOL(clear_user_page);
371
372void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
373		    struct page *pg)
374{
375	copy_page(vto, vfrom);
376
377	/*
378	 * We should be able to use the following optimisation, however
379	 * there are two problems.
380	 * Firstly a bug in some versions of binutils meant PLT sections
381	 * were not marked executable.
382	 * Secondly the first word in the GOT section is blrl, used
383	 * to establish the GOT address. Until recently the GOT was
384	 * not marked executable.
385	 * - Anton
386	 */
387#if 0
388	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
389		return;
390#endif
391
392	flush_dcache_page(pg);
393}
394
395void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
396			     unsigned long addr, int len)
397{
398	unsigned long maddr;
399
400	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
401	flush_icache_range(maddr, maddr + len);
402	kunmap(page);
403}
404EXPORT_SYMBOL(flush_icache_user_range);
405
406/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
407 * System memory should not be in /proc/iomem but various tools expect it
408 * (eg kdump).
409 */
410static int __init add_system_ram_resources(void)
411{
412	struct memblock_region *reg;
413
414	for_each_memblock(memory, reg) {
415		struct resource *res;
416		unsigned long base = reg->base;
417		unsigned long size = reg->size;
418
419		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
420		WARN_ON(!res);
421
422		if (res) {
423			res->name = "System RAM";
424			res->start = base;
425			res->end = base + size - 1;
426			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
427			WARN_ON(request_resource(&iomem_resource, res) < 0);
428		}
429	}
430
431	return 0;
432}
433subsys_initcall(add_system_ram_resources);
434
435#ifdef CONFIG_STRICT_DEVMEM
436/*
437 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
438 * is valid. The argument is a physical page number.
439 *
440 * Access has to be given to non-kernel-ram areas as well, these contain the
441 * PCI mmio resources as well as potential bios/acpi data regions.
442 */
443int devmem_is_allowed(unsigned long pfn)
444{
445	if (page_is_rtas_user_buf(pfn))
446		return 1;
447	if (iomem_is_exclusive(PFN_PHYS(pfn)))
448		return 0;
449	if (!page_is_ram(pfn))
450		return 1;
 
 
451	return 0;
452}
453#endif /* CONFIG_STRICT_DEVMEM */
454
455/*
456 * This is defined in kernel/resource.c but only powerpc needs to export it, for
457 * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
458 */
459EXPORT_SYMBOL_GPL(walk_system_ram_range);
v3.5.6
 
  1/*
  2 *  PowerPC version
  3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4 *
  5 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  6 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  7 *    Copyright (C) 1996 Paul Mackerras
  8 *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
  9 *
 10 *  Derived from "arch/i386/mm/init.c"
 11 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 12 *
 13 *  This program is free software; you can redistribute it and/or
 14 *  modify it under the terms of the GNU General Public License
 15 *  as published by the Free Software Foundation; either version
 16 *  2 of the License, or (at your option) any later version.
 17 *
 18 */
 19
 20#include <linux/export.h>
 21#include <linux/sched.h>
 22#include <linux/kernel.h>
 23#include <linux/errno.h>
 24#include <linux/string.h>
 25#include <linux/gfp.h>
 26#include <linux/types.h>
 27#include <linux/mm.h>
 28#include <linux/stddef.h>
 29#include <linux/init.h>
 30#include <linux/bootmem.h>
 31#include <linux/highmem.h>
 32#include <linux/initrd.h>
 33#include <linux/pagemap.h>
 34#include <linux/suspend.h>
 35#include <linux/memblock.h>
 36#include <linux/hugetlb.h>
 37#include <linux/slab.h>
 
 
 38
 39#include <asm/pgalloc.h>
 40#include <asm/prom.h>
 41#include <asm/io.h>
 42#include <asm/mmu_context.h>
 43#include <asm/pgtable.h>
 44#include <asm/mmu.h>
 45#include <asm/smp.h>
 46#include <asm/machdep.h>
 47#include <asm/btext.h>
 48#include <asm/tlb.h>
 49#include <asm/sections.h>
 50#include <asm/sparsemem.h>
 51#include <asm/vdso.h>
 52#include <asm/fixmap.h>
 53#include <asm/swiotlb.h>
 54#include <asm/rtas.h>
 55
 56#include "mmu_decl.h"
 57
 58#ifndef CPU_FTR_COHERENT_ICACHE
 59#define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
 60#define CPU_FTR_NOEXECUTE	0
 61#endif
 62
 63int init_bootmem_done;
 64int mem_init_done;
 65phys_addr_t memory_limit;
 66
 67#ifdef CONFIG_HIGHMEM
 68pte_t *kmap_pte;
 
 69pgprot_t kmap_prot;
 70
 71EXPORT_SYMBOL(kmap_prot);
 72EXPORT_SYMBOL(kmap_pte);
 73
 74static inline pte_t *virt_to_kpte(unsigned long vaddr)
 75{
 76	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
 77			vaddr), vaddr), vaddr);
 78}
 79#endif
 80
 81int page_is_ram(unsigned long pfn)
 82{
 83#ifndef CONFIG_PPC64	/* XXX for now */
 84	return pfn < max_pfn;
 85#else
 86	unsigned long paddr = (pfn << PAGE_SHIFT);
 87	struct memblock_region *reg;
 88
 89	for_each_memblock(memory, reg)
 90		if (paddr >= reg->base && paddr < (reg->base + reg->size))
 91			return 1;
 92	return 0;
 93#endif
 94}
 95
 96pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 97			      unsigned long size, pgprot_t vma_prot)
 98{
 99	if (ppc_md.phys_mem_access_prot)
100		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
101
102	if (!page_is_ram(pfn))
103		vma_prot = pgprot_noncached(vma_prot);
104
105	return vma_prot;
106}
107EXPORT_SYMBOL(phys_mem_access_prot);
108
109#ifdef CONFIG_MEMORY_HOTPLUG
110
111#ifdef CONFIG_NUMA
112int memory_add_physaddr_to_nid(u64 start)
113{
114	return hot_add_scn_to_nid(start);
115}
116#endif
117
118int arch_add_memory(int nid, u64 start, u64 size)
 
 
 
 
 
 
 
 
 
 
 
119{
120	struct pglist_data *pgdata;
121	struct zone *zone;
122	unsigned long start_pfn = start >> PAGE_SHIFT;
123	unsigned long nr_pages = size >> PAGE_SHIFT;
 
124
125	pgdata = NODE_DATA(nid);
126
127	start = (unsigned long)__va(start);
128	if (create_section_mapping(start, start + size))
129		return -EINVAL;
 
 
 
 
 
130
131	/* this should work for most non-highmem platforms */
132	zone = pgdata->node_zones;
133
134	return __add_pages(nid, zone, start_pfn, nr_pages);
135}
136#endif /* CONFIG_MEMORY_HOTPLUG */
137
138/*
139 * walk_memory_resource() needs to make sure there is no holes in a given
140 * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
141 * Instead it maintains it in memblock.memory structures.  Walk through the
142 * memory regions, find holes and callback for contiguous regions.
143 */
144int
145walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
146		void *arg, int (*func)(unsigned long, unsigned long, void *))
147{
148	struct memblock_region *reg;
149	unsigned long end_pfn = start_pfn + nr_pages;
150	unsigned long tstart, tend;
151	int ret = -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
152
153	for_each_memblock(memory, reg) {
154		tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
155		tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
156		if (tstart >= tend)
157			continue;
158		ret = (*func)(tstart, tend - tstart, arg);
159		if (ret)
160			break;
161	}
162	return ret;
163}
164EXPORT_SYMBOL_GPL(walk_system_ram_range);
165
166/*
167 * Initialize the bootmem system and give it all the memory we
168 * have available.  If we are using highmem, we only put the
169 * lowmem into the bootmem system.
170 */
171#ifndef CONFIG_NEED_MULTIPLE_NODES
172void __init do_init_bootmem(void)
173{
174	unsigned long start, bootmap_pages;
175	unsigned long total_pages;
176	struct memblock_region *reg;
177	int boot_mapsize;
178
179	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
180	total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
181#ifdef CONFIG_HIGHMEM
182	total_pages = total_lowmem >> PAGE_SHIFT;
183	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
184#endif
185
186	/*
187	 * Find an area to use for the bootmem bitmap.  Calculate the size of
188	 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
189	 * Add 1 additional page in case the address isn't page-aligned.
190	 */
191	bootmap_pages = bootmem_bootmap_pages(total_pages);
 
192
193	start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
194
195	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
196	boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
197
198	/* Add active regions with valid PFNs */
199	for_each_memblock(memory, reg) {
200		unsigned long start_pfn, end_pfn;
201		start_pfn = memblock_region_memory_base_pfn(reg);
202		end_pfn = memblock_region_memory_end_pfn(reg);
203		memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
204	}
205
206	/* Add all physical memory to the bootmem map, mark each area
207	 * present.
208	 */
209#ifdef CONFIG_HIGHMEM
210	free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
211
212	/* reserve the sections we're already using */
213	for_each_memblock(reserved, reg) {
214		unsigned long top = reg->base + reg->size - 1;
215		if (top < lowmem_end_addr)
216			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
217		else if (reg->base < lowmem_end_addr) {
218			unsigned long trunc_size = lowmem_end_addr - reg->base;
219			reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
220		}
221	}
222#else
223	free_bootmem_with_active_regions(0, max_pfn);
224
225	/* reserve the sections we're already using */
226	for_each_memblock(reserved, reg)
227		reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
228#endif
229	/* XXX need to clip this if using highmem? */
230	sparse_memory_present_with_active_regions(0);
231
232	init_bootmem_done = 1;
233}
234
235/* mark pages that don't exist as nosave */
236static int __init mark_nonram_nosave(void)
237{
238	struct memblock_region *reg, *prev = NULL;
239
240	for_each_memblock(memory, reg) {
241		if (prev &&
242		    memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
243			register_nosave_region(memblock_region_memory_end_pfn(prev),
244					       memblock_region_memory_base_pfn(reg));
245		prev = reg;
246	}
247	return 0;
248}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
249
250/*
251 * paging_init() sets up the page tables - in fact we've already done this.
252 */
253void __init paging_init(void)
254{
255	unsigned long long total_ram = memblock_phys_mem_size();
256	phys_addr_t top_of_ram = memblock_end_of_DRAM();
257	unsigned long max_zone_pfns[MAX_NR_ZONES];
258
259#ifdef CONFIG_PPC32
260	unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
261	unsigned long end = __fix_to_virt(FIX_HOLE);
262
263	for (; v < end; v += PAGE_SIZE)
264		map_page(v, 0, 0); /* XXX gross */
265#endif
266
267#ifdef CONFIG_HIGHMEM
268	map_page(PKMAP_BASE, 0, 0);	/* XXX gross */
269	pkmap_page_table = virt_to_kpte(PKMAP_BASE);
270
271	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
272	kmap_prot = PAGE_KERNEL;
273#endif /* CONFIG_HIGHMEM */
274
275	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
276	       (unsigned long long)top_of_ram, total_ram);
277	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
278	       (long int)((top_of_ram - total_ram) >> 20));
279	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 
 
 
 
 
280#ifdef CONFIG_HIGHMEM
281	max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
282	max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
283#else
284	max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
285#endif
 
286	free_area_init_nodes(max_zone_pfns);
287
288	mark_nonram_nosave();
289}
290#endif /* ! CONFIG_NEED_MULTIPLE_NODES */
291
292void __init mem_init(void)
293{
294#ifdef CONFIG_NEED_MULTIPLE_NODES
295	int nid;
296#endif
297	pg_data_t *pgdat;
298	unsigned long i;
299	struct page *page;
300	unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
301
302#ifdef CONFIG_SWIOTLB
303	if (ppc_swiotlb_enable)
304		swiotlb_init(1);
305#endif
306
307	num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT;
308	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
309
310#ifdef CONFIG_NEED_MULTIPLE_NODES
311        for_each_online_node(nid) {
312		if (NODE_DATA(nid)->node_spanned_pages != 0) {
313			printk("freeing bootmem node %d\n", nid);
314			totalram_pages +=
315				free_all_bootmem_node(NODE_DATA(nid));
316		}
317	}
318#else
319	max_mapnr = max_pfn;
320	totalram_pages += free_all_bootmem();
321#endif
322	for_each_online_pgdat(pgdat) {
323		for (i = 0; i < pgdat->node_spanned_pages; i++) {
324			if (!pfn_valid(pgdat->node_start_pfn + i))
325				continue;
326			page = pgdat_page_nr(pgdat, i);
327			if (PageReserved(page))
328				reservedpages++;
329		}
330	}
331
332	codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
333	datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
334	initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
335	bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;
336
337#ifdef CONFIG_HIGHMEM
338	{
339		unsigned long pfn, highmem_mapnr;
340
341		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
342		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
343			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
344			struct page *page = pfn_to_page(pfn);
345			if (memblock_is_reserved(paddr))
346				continue;
347			ClearPageReserved(page);
348			init_page_count(page);
349			__free_page(page);
350			totalhigh_pages++;
351			reservedpages--;
352		}
353		totalram_pages += totalhigh_pages;
354		printk(KERN_DEBUG "High memory: %luk\n",
355		       totalhigh_pages << (PAGE_SHIFT-10));
356	}
357#endif /* CONFIG_HIGHMEM */
358
359#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
360	/*
361	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
362	 * functions.... do it here for the non-smp case.
363	 */
364	per_cpu(next_tlbcam_idx, smp_processor_id()) =
365		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
366#endif
367
368	printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
369	       "%luk reserved, %luk data, %luk bss, %luk init)\n",
370		nr_free_pages() << (PAGE_SHIFT-10),
371		num_physpages << (PAGE_SHIFT-10),
372		codesize >> 10,
373		reservedpages << (PAGE_SHIFT-10),
374		datasize >> 10,
375		bsssize >> 10,
376		initsize >> 10);
377
378#ifdef CONFIG_PPC32
379	pr_info("Kernel virtual memory layout:\n");
 
 
 
 
380	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
381#ifdef CONFIG_HIGHMEM
382	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
383		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
384#endif /* CONFIG_HIGHMEM */
385#ifdef CONFIG_NOT_COHERENT_CACHE
386	pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n",
387		IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
388#endif /* CONFIG_NOT_COHERENT_CACHE */
389	pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
390		ioremap_bot, IOREMAP_TOP);
391	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
392		VMALLOC_START, VMALLOC_END);
393#endif /* CONFIG_PPC32 */
394
395	mem_init_done = 1;
396}
397
398void free_initmem(void)
399{
400	unsigned long addr;
401
402	ppc_md.progress = ppc_printk_progress;
403
404	addr = (unsigned long)__init_begin;
405	for (; addr < (unsigned long)__init_end; addr += PAGE_SIZE) {
406		memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
407		ClearPageReserved(virt_to_page(addr));
408		init_page_count(virt_to_page(addr));
409		free_page(addr);
410		totalram_pages++;
411	}
412	pr_info("Freeing unused kernel memory: %luk freed\n",
413		((unsigned long)__init_end -
414		(unsigned long)__init_begin) >> 10);
415}
416
417#ifdef CONFIG_BLK_DEV_INITRD
418void __init free_initrd_mem(unsigned long start, unsigned long end)
419{
420	if (start >= end)
421		return;
422
423	start = _ALIGN_DOWN(start, PAGE_SIZE);
424	end = _ALIGN_UP(end, PAGE_SIZE);
425	pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
426
427	for (; start < end; start += PAGE_SIZE) {
428		ClearPageReserved(virt_to_page(start));
429		init_page_count(virt_to_page(start));
430		free_page(start);
431		totalram_pages++;
432	}
433}
434#endif
435
436/*
437 * This is called when a page has been modified by the kernel.
438 * It just marks the page as not i-cache clean.  We do the i-cache
439 * flush later when the page is given to a user process, if necessary.
440 */
441void flush_dcache_page(struct page *page)
442{
443	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
444		return;
445	/* avoid an atomic op if possible */
446	if (test_bit(PG_arch_1, &page->flags))
447		clear_bit(PG_arch_1, &page->flags);
448}
449EXPORT_SYMBOL(flush_dcache_page);
450
451void flush_dcache_icache_page(struct page *page)
452{
453#ifdef CONFIG_HUGETLB_PAGE
454	if (PageCompound(page)) {
455		flush_dcache_icache_hugepage(page);
456		return;
457	}
458#endif
459#ifdef CONFIG_BOOKE
460	{
 
 
 
461		void *start = kmap_atomic(page);
462		__flush_dcache_icache(start);
463		kunmap_atomic(start);
 
 
464	}
465#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
466	/* On 8xx there is no need to kmap since highmem is not supported */
467	__flush_dcache_icache(page_address(page)); 
468#else
469	__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
470#endif
471}
 
472
473void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
474{
475	clear_page(page);
476
477	/*
478	 * We shouldn't have to do this, but some versions of glibc
479	 * require it (ld.so assumes zero filled pages are icache clean)
480	 * - Anton
481	 */
482	flush_dcache_page(pg);
483}
484EXPORT_SYMBOL(clear_user_page);
485
486void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
487		    struct page *pg)
488{
489	copy_page(vto, vfrom);
490
491	/*
492	 * We should be able to use the following optimisation, however
493	 * there are two problems.
494	 * Firstly a bug in some versions of binutils meant PLT sections
495	 * were not marked executable.
496	 * Secondly the first word in the GOT section is blrl, used
497	 * to establish the GOT address. Until recently the GOT was
498	 * not marked executable.
499	 * - Anton
500	 */
501#if 0
502	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
503		return;
504#endif
505
506	flush_dcache_page(pg);
507}
508
509void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
510			     unsigned long addr, int len)
511{
512	unsigned long maddr;
513
514	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
515	flush_icache_range(maddr, maddr + len);
516	kunmap(page);
517}
518EXPORT_SYMBOL(flush_icache_user_range);
519
520/*
521 * This is called at the end of handling a user page fault, when the
522 * fault has been handled by updating a PTE in the linux page tables.
523 * We use it to preload an HPTE into the hash table corresponding to
524 * the updated linux PTE.
525 * 
526 * This must always be called with the pte lock held.
527 */
528void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
529		      pte_t *ptep)
530{
531#ifdef CONFIG_PPC_STD_MMU
532	unsigned long access = 0, trap;
533
534	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
535	if (!pte_young(*ptep) || address >= TASK_SIZE)
536		return;
537
538	/* We try to figure out if we are coming from an instruction
539	 * access fault and pass that down to __hash_page so we avoid
540	 * double-faulting on execution of fresh text. We have to test
541	 * for regs NULL since init will get here first thing at boot
542	 *
543	 * We also avoid filling the hash if not coming from a fault
544	 */
545	if (current->thread.regs == NULL)
546		return;
547	trap = TRAP(current->thread.regs);
548	if (trap == 0x400)
549		access |= _PAGE_EXEC;
550	else if (trap != 0x300)
551		return;
552	hash_preload(vma->vm_mm, address, access, trap);
553#endif /* CONFIG_PPC_STD_MMU */
554#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
555	&& defined(CONFIG_HUGETLB_PAGE)
556	if (is_vm_hugetlb_page(vma))
557		book3e_hugetlb_preload(vma, address, *ptep);
558#endif
559}
560
561/*
562 * System memory should not be in /proc/iomem but various tools expect it
563 * (eg kdump).
564 */
565static int add_system_ram_resources(void)
566{
567	struct memblock_region *reg;
568
569	for_each_memblock(memory, reg) {
570		struct resource *res;
571		unsigned long base = reg->base;
572		unsigned long size = reg->size;
573
574		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
575		WARN_ON(!res);
576
577		if (res) {
578			res->name = "System RAM";
579			res->start = base;
580			res->end = base + size - 1;
581			res->flags = IORESOURCE_MEM;
582			WARN_ON(request_resource(&iomem_resource, res) < 0);
583		}
584	}
585
586	return 0;
587}
588subsys_initcall(add_system_ram_resources);
589
590#ifdef CONFIG_STRICT_DEVMEM
591/*
592 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
593 * is valid. The argument is a physical page number.
594 *
595 * Access has to be given to non-kernel-ram areas as well, these contain the
596 * PCI mmio resources as well as potential bios/acpi data regions.
597 */
598int devmem_is_allowed(unsigned long pfn)
599{
600	if (iomem_is_exclusive(pfn << PAGE_SHIFT))
 
 
601		return 0;
602	if (!page_is_ram(pfn))
603		return 1;
604	if (page_is_rtas_user_buf(pfn))
605		return 1;
606	return 0;
607}
608#endif /* CONFIG_STRICT_DEVMEM */