Linux Audio

Check our new training course

Loading...
  1/*
  2 *  PowerPC version
  3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4 *
  5 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  6 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  7 *    Copyright (C) 1996 Paul Mackerras
  8 *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
  9 *
 10 *  Derived from "arch/i386/mm/init.c"
 11 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 12 *
 13 *  This program is free software; you can redistribute it and/or
 14 *  modify it under the terms of the GNU General Public License
 15 *  as published by the Free Software Foundation; either version
 16 *  2 of the License, or (at your option) any later version.
 17 *
 18 */
 19
 20#include <linux/export.h>
 21#include <linux/sched.h>
 22#include <linux/kernel.h>
 23#include <linux/errno.h>
 24#include <linux/string.h>
 25#include <linux/gfp.h>
 26#include <linux/types.h>
 27#include <linux/mm.h>
 28#include <linux/stddef.h>
 29#include <linux/init.h>
 30#include <linux/bootmem.h>
 31#include <linux/highmem.h>
 32#include <linux/initrd.h>
 33#include <linux/pagemap.h>
 34#include <linux/suspend.h>
 35#include <linux/memblock.h>
 36#include <linux/hugetlb.h>
 37#include <linux/slab.h>
 38#include <linux/vmalloc.h>
 
 39
 40#include <asm/pgalloc.h>
 41#include <asm/prom.h>
 42#include <asm/io.h>
 43#include <asm/mmu_context.h>
 44#include <asm/pgtable.h>
 45#include <asm/mmu.h>
 46#include <asm/smp.h>
 47#include <asm/machdep.h>
 48#include <asm/btext.h>
 49#include <asm/tlb.h>
 50#include <asm/sections.h>
 51#include <asm/sparsemem.h>
 52#include <asm/vdso.h>
 53#include <asm/fixmap.h>
 54#include <asm/swiotlb.h>
 55#include <asm/rtas.h>
 56
 57#include "mmu_decl.h"
 58
 59#ifndef CPU_FTR_COHERENT_ICACHE
 60#define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
 61#define CPU_FTR_NOEXECUTE	0
 62#endif
 63
 64unsigned long long memory_limit;
 65
 66#ifdef CONFIG_HIGHMEM
 67pte_t *kmap_pte;
 68EXPORT_SYMBOL(kmap_pte);
 69pgprot_t kmap_prot;
 70EXPORT_SYMBOL(kmap_prot);
 
 71
 72static inline pte_t *virt_to_kpte(unsigned long vaddr)
 73{
 74	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
 75			vaddr), vaddr), vaddr);
 76}
 
 
 77#endif
 78
 79int page_is_ram(unsigned long pfn)
 80{
 81#ifndef CONFIG_PPC64	/* XXX for now */
 82	return pfn < max_pfn;
 83#else
 84	unsigned long paddr = (pfn << PAGE_SHIFT);
 85	struct memblock_region *reg;
 86
 87	for_each_memblock(memory, reg)
 88		if (paddr >= reg->base && paddr < (reg->base + reg->size))
 89			return 1;
 90	return 0;
 91#endif
 92}
 93
 94pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 95			      unsigned long size, pgprot_t vma_prot)
 96{
 97	if (ppc_md.phys_mem_access_prot)
 98		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
 99
100	if (!page_is_ram(pfn))
101		vma_prot = pgprot_noncached(vma_prot);
102
103	return vma_prot;
104}
105EXPORT_SYMBOL(phys_mem_access_prot);
106
107#ifdef CONFIG_MEMORY_HOTPLUG
108
109#ifdef CONFIG_NUMA
110int memory_add_physaddr_to_nid(u64 start)
111{
112	return hot_add_scn_to_nid(start);
113}
114#endif
115
116int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
 
 
 
 
 
 
 
 
 
 
 
117{
118	struct pglist_data *pgdata;
119	struct zone *zone;
120	unsigned long start_pfn = start >> PAGE_SHIFT;
121	unsigned long nr_pages = size >> PAGE_SHIFT;
122	int rc;
123
124	pgdata = NODE_DATA(nid);
125
126	start = (unsigned long)__va(start);
127	rc = create_section_mapping(start, start + size);
128	if (rc) {
129		pr_warning(
130			"Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
131			start, start + size, rc);
132		return -EFAULT;
133	}
 
134
135	/* this should work for most non-highmem platforms */
136	zone = pgdata->node_zones +
137		zone_for_memory(nid, start, size, 0, for_device);
138
139	return __add_pages(nid, zone, start_pfn, nr_pages);
140}
141
142#ifdef CONFIG_MEMORY_HOTREMOVE
143int arch_remove_memory(u64 start, u64 size)
144{
145	unsigned long start_pfn = start >> PAGE_SHIFT;
146	unsigned long nr_pages = size >> PAGE_SHIFT;
147	struct zone *zone;
148	int ret;
149
150	zone = page_zone(pfn_to_page(start_pfn));
151	ret = __remove_pages(zone, start_pfn, nr_pages);
 
 
 
 
 
 
 
152	if (ret)
153		return ret;
154
155	/* Remove htab bolted mappings for this section of memory */
156	start = (unsigned long)__va(start);
 
157	ret = remove_section_mapping(start, start + size);
158
159	/* Ensure all vmalloc mappings are flushed in case they also
160	 * hit that section of memory
161	 */
162	vm_unmap_aliases();
163
 
 
164	return ret;
165}
166#endif
167#endif /* CONFIG_MEMORY_HOTPLUG */
168
169/*
170 * walk_memory_resource() needs to make sure there is no holes in a given
171 * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
172 * Instead it maintains it in memblock.memory structures.  Walk through the
173 * memory regions, find holes and callback for contiguous regions.
174 */
175int
176walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
177		void *arg, int (*func)(unsigned long, unsigned long, void *))
178{
179	struct memblock_region *reg;
180	unsigned long end_pfn = start_pfn + nr_pages;
181	unsigned long tstart, tend;
182	int ret = -1;
183
184	for_each_memblock(memory, reg) {
185		tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
186		tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
187		if (tstart >= tend)
188			continue;
189		ret = (*func)(tstart, tend - tstart, arg);
190		if (ret)
191			break;
192	}
193	return ret;
194}
195EXPORT_SYMBOL_GPL(walk_system_ram_range);
196
197#ifndef CONFIG_NEED_MULTIPLE_NODES
198void __init initmem_init(void)
199{
200	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
201	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
202#ifdef CONFIG_HIGHMEM
203	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
204#endif
205
206	/* Place all memblock_regions in the same node and merge contiguous
207	 * memblock_regions
208	 */
209	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
 
210
 
 
211	/* XXX need to clip this if using highmem? */
212	sparse_memory_present_with_active_regions(0);
213	sparse_init();
214}
215
216/* mark pages that don't exist as nosave */
217static int __init mark_nonram_nosave(void)
218{
219	struct memblock_region *reg, *prev = NULL;
220
221	for_each_memblock(memory, reg) {
222		if (prev &&
223		    memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
224			register_nosave_region(memblock_region_memory_end_pfn(prev),
225					       memblock_region_memory_base_pfn(reg));
226		prev = reg;
227	}
228	return 0;
229}
230#else /* CONFIG_NEED_MULTIPLE_NODES */
231static int __init mark_nonram_nosave(void)
232{
233	return 0;
234}
235#endif
236
237static bool zone_limits_final;
238
 
 
 
 
 
239static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
240	[0 ... MAX_NR_ZONES - 1] = ~0UL
 
241};
242
243/*
244 * Restrict the specified zone and all more restrictive zones
245 * to be below the specified pfn.  May not be called after
246 * paging_init().
247 */
248void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
249{
250	int i;
251
252	if (WARN_ON(zone_limits_final))
253		return;
254
255	for (i = zone; i >= 0; i--) {
256		if (max_zone_pfns[i] > pfn_limit)
257			max_zone_pfns[i] = pfn_limit;
258	}
259}
260
261/*
262 * Find the least restrictive zone that is entirely below the
263 * specified pfn limit.  Returns < 0 if no suitable zone is found.
264 *
265 * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
266 * systems -- the DMA limit can be higher than any possible real pfn.
267 */
268int dma_pfn_limit_to_zone(u64 pfn_limit)
269{
270	enum zone_type top_zone = ZONE_NORMAL;
271	int i;
272
273#ifdef CONFIG_HIGHMEM
274	top_zone = ZONE_HIGHMEM;
275#endif
276
277	for (i = top_zone; i >= 0; i--) {
278		if (max_zone_pfns[i] <= pfn_limit)
279			return i;
280	}
281
282	return -EPERM;
283}
284
285/*
286 * paging_init() sets up the page tables - in fact we've already done this.
287 */
288void __init paging_init(void)
289{
290	unsigned long long total_ram = memblock_phys_mem_size();
291	phys_addr_t top_of_ram = memblock_end_of_DRAM();
292	enum zone_type top_zone;
293
294#ifdef CONFIG_PPC32
295	unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
296	unsigned long end = __fix_to_virt(FIX_HOLE);
297
298	for (; v < end; v += PAGE_SIZE)
299		map_page(v, 0, 0); /* XXX gross */
300#endif
301
302#ifdef CONFIG_HIGHMEM
303	map_page(PKMAP_BASE, 0, 0);	/* XXX gross */
304	pkmap_page_table = virt_to_kpte(PKMAP_BASE);
305
306	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
307	kmap_prot = PAGE_KERNEL;
308#endif /* CONFIG_HIGHMEM */
309
310	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
311	       (unsigned long long)top_of_ram, total_ram);
312	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
313	       (long int)((top_of_ram - total_ram) >> 20));
314
315#ifdef CONFIG_HIGHMEM
316	top_zone = ZONE_HIGHMEM;
317	limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT);
318#else
319	top_zone = ZONE_NORMAL;
320#endif
321
322	limit_zone_pfn(top_zone, top_of_ram >> PAGE_SHIFT);
323	zone_limits_final = true;
324	free_area_init_nodes(max_zone_pfns);
325
326	mark_nonram_nosave();
327}
328
329void __init mem_init(void)
330{
331	/*
332	 * book3s is limited to 16 page sizes due to encoding this in
333	 * a 4-bit field for slices.
334	 */
335	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
336
337#ifdef CONFIG_SWIOTLB
338	swiotlb_init(0);
339#endif
340
341	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
342	set_max_mapnr(max_pfn);
343	free_all_bootmem();
344
345#ifdef CONFIG_HIGHMEM
346	{
347		unsigned long pfn, highmem_mapnr;
348
349		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
350		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
351			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
352			struct page *page = pfn_to_page(pfn);
353			if (!memblock_is_reserved(paddr))
354				free_highmem_page(page);
355		}
356	}
357#endif /* CONFIG_HIGHMEM */
358
359#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
360	/*
361	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
362	 * functions.... do it here for the non-smp case.
363	 */
364	per_cpu(next_tlbcam_idx, smp_processor_id()) =
365		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
366#endif
367
368	mem_init_print_info(NULL);
369#ifdef CONFIG_PPC32
370	pr_info("Kernel virtual memory layout:\n");
371	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
372#ifdef CONFIG_HIGHMEM
373	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
374		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
375#endif /* CONFIG_HIGHMEM */
376#ifdef CONFIG_NOT_COHERENT_CACHE
377	pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n",
378		IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
379#endif /* CONFIG_NOT_COHERENT_CACHE */
380	pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
381		ioremap_bot, IOREMAP_TOP);
382	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
383		VMALLOC_START, VMALLOC_END);
384#endif /* CONFIG_PPC32 */
385}
386
387void free_initmem(void)
388{
389	ppc_md.progress = ppc_printk_progress;
 
390	free_initmem_default(POISON_FREE_INITMEM);
391}
392
393#ifdef CONFIG_BLK_DEV_INITRD
394void __init free_initrd_mem(unsigned long start, unsigned long end)
395{
396	free_reserved_area((void *)start, (void *)end, -1, "initrd");
397}
398#endif
399
400/*
401 * This is called when a page has been modified by the kernel.
402 * It just marks the page as not i-cache clean.  We do the i-cache
403 * flush later when the page is given to a user process, if necessary.
404 */
405void flush_dcache_page(struct page *page)
406{
407	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
408		return;
409	/* avoid an atomic op if possible */
410	if (test_bit(PG_arch_1, &page->flags))
411		clear_bit(PG_arch_1, &page->flags);
412}
413EXPORT_SYMBOL(flush_dcache_page);
414
415void flush_dcache_icache_page(struct page *page)
416{
417#ifdef CONFIG_HUGETLB_PAGE
418	if (PageCompound(page)) {
419		flush_dcache_icache_hugepage(page);
420		return;
421	}
422#endif
423#if defined(CONFIG_8xx) || defined(CONFIG_PPC64)
424	/* On 8xx there is no need to kmap since highmem is not supported */
425	__flush_dcache_icache(page_address(page));
426#else
427	if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
428		void *start = kmap_atomic(page);
429		__flush_dcache_icache(start);
430		kunmap_atomic(start);
431	} else {
432		__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
433	}
434#endif
435}
436EXPORT_SYMBOL(flush_dcache_icache_page);
437
438void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
439{
440	clear_page(page);
441
442	/*
443	 * We shouldn't have to do this, but some versions of glibc
444	 * require it (ld.so assumes zero filled pages are icache clean)
445	 * - Anton
446	 */
447	flush_dcache_page(pg);
448}
449EXPORT_SYMBOL(clear_user_page);
450
451void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
452		    struct page *pg)
453{
454	copy_page(vto, vfrom);
455
456	/*
457	 * We should be able to use the following optimisation, however
458	 * there are two problems.
459	 * Firstly a bug in some versions of binutils meant PLT sections
460	 * were not marked executable.
461	 * Secondly the first word in the GOT section is blrl, used
462	 * to establish the GOT address. Until recently the GOT was
463	 * not marked executable.
464	 * - Anton
465	 */
466#if 0
467	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
468		return;
469#endif
470
471	flush_dcache_page(pg);
472}
473
474void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
475			     unsigned long addr, int len)
476{
477	unsigned long maddr;
478
479	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
480	flush_icache_range(maddr, maddr + len);
481	kunmap(page);
482}
483EXPORT_SYMBOL(flush_icache_user_range);
484
485/*
486 * This is called at the end of handling a user page fault, when the
487 * fault has been handled by updating a PTE in the linux page tables.
488 * We use it to preload an HPTE into the hash table corresponding to
489 * the updated linux PTE.
490 * 
491 * This must always be called with the pte lock held.
492 */
493void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
494		      pte_t *ptep)
495{
496#ifdef CONFIG_PPC_STD_MMU
497	/*
498	 * We don't need to worry about _PAGE_PRESENT here because we are
499	 * called with either mm->page_table_lock held or ptl lock held
500	 */
501	unsigned long access = 0, trap;
 
 
 
502
503	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
504	if (!pte_young(*ptep) || address >= TASK_SIZE)
505		return;
506
507	/* We try to figure out if we are coming from an instruction
508	 * access fault and pass that down to __hash_page so we avoid
509	 * double-faulting on execution of fresh text. We have to test
510	 * for regs NULL since init will get here first thing at boot
511	 *
512	 * We also avoid filling the hash if not coming from a fault
513	 */
514	if (current->thread.regs == NULL)
515		return;
516	trap = TRAP(current->thread.regs);
517	if (trap == 0x400)
518		access |= _PAGE_EXEC;
519	else if (trap != 0x300)
 
 
 
 
520		return;
 
 
521	hash_preload(vma->vm_mm, address, access, trap);
522#endif /* CONFIG_PPC_STD_MMU */
523#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
524	&& defined(CONFIG_HUGETLB_PAGE)
525	if (is_vm_hugetlb_page(vma))
526		book3e_hugetlb_preload(vma, address, *ptep);
527#endif
528}
529
530/*
531 * System memory should not be in /proc/iomem but various tools expect it
532 * (eg kdump).
533 */
534static int __init add_system_ram_resources(void)
535{
536	struct memblock_region *reg;
537
538	for_each_memblock(memory, reg) {
539		struct resource *res;
540		unsigned long base = reg->base;
541		unsigned long size = reg->size;
542
543		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
544		WARN_ON(!res);
545
546		if (res) {
547			res->name = "System RAM";
548			res->start = base;
549			res->end = base + size - 1;
550			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
551			WARN_ON(request_resource(&iomem_resource, res) < 0);
552		}
553	}
554
555	return 0;
556}
557subsys_initcall(add_system_ram_resources);
558
559#ifdef CONFIG_STRICT_DEVMEM
560/*
561 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
562 * is valid. The argument is a physical page number.
563 *
564 * Access has to be given to non-kernel-ram areas as well, these contain the
565 * PCI mmio resources as well as potential bios/acpi data regions.
566 */
567int devmem_is_allowed(unsigned long pfn)
568{
569	if (page_is_rtas_user_buf(pfn))
570		return 1;
571	if (iomem_is_exclusive(PFN_PHYS(pfn)))
572		return 0;
573	if (!page_is_ram(pfn))
574		return 1;
575	return 0;
576}
577#endif /* CONFIG_STRICT_DEVMEM */
  1/*
  2 *  PowerPC version
  3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4 *
  5 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
  6 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  7 *    Copyright (C) 1996 Paul Mackerras
  8 *  PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
  9 *
 10 *  Derived from "arch/i386/mm/init.c"
 11 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 12 *
 13 *  This program is free software; you can redistribute it and/or
 14 *  modify it under the terms of the GNU General Public License
 15 *  as published by the Free Software Foundation; either version
 16 *  2 of the License, or (at your option) any later version.
 17 *
 18 */
 19
 20#include <linux/export.h>
 21#include <linux/sched.h>
 22#include <linux/kernel.h>
 23#include <linux/errno.h>
 24#include <linux/string.h>
 25#include <linux/gfp.h>
 26#include <linux/types.h>
 27#include <linux/mm.h>
 28#include <linux/stddef.h>
 29#include <linux/init.h>
 30#include <linux/bootmem.h>
 31#include <linux/highmem.h>
 32#include <linux/initrd.h>
 33#include <linux/pagemap.h>
 34#include <linux/suspend.h>
 35#include <linux/memblock.h>
 36#include <linux/hugetlb.h>
 37#include <linux/slab.h>
 38#include <linux/vmalloc.h>
 39#include <linux/memremap.h>
 40
 41#include <asm/pgalloc.h>
 42#include <asm/prom.h>
 43#include <asm/io.h>
 44#include <asm/mmu_context.h>
 45#include <asm/pgtable.h>
 46#include <asm/mmu.h>
 47#include <asm/smp.h>
 48#include <asm/machdep.h>
 49#include <asm/btext.h>
 50#include <asm/tlb.h>
 51#include <asm/sections.h>
 52#include <asm/sparsemem.h>
 53#include <asm/vdso.h>
 54#include <asm/fixmap.h>
 55#include <asm/swiotlb.h>
 56#include <asm/rtas.h>
 57
 58#include "mmu_decl.h"
 59
 60#ifndef CPU_FTR_COHERENT_ICACHE
 61#define CPU_FTR_COHERENT_ICACHE	0	/* XXX for now */
 62#define CPU_FTR_NOEXECUTE	0
 63#endif
 64
 65unsigned long long memory_limit;
 66
 67#ifdef CONFIG_HIGHMEM
 68pte_t *kmap_pte;
 69EXPORT_SYMBOL(kmap_pte);
 70pgprot_t kmap_prot;
 71EXPORT_SYMBOL(kmap_prot);
 72#define TOP_ZONE ZONE_HIGHMEM
 73
 74static inline pte_t *virt_to_kpte(unsigned long vaddr)
 75{
 76	return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
 77			vaddr), vaddr), vaddr);
 78}
 79#else
 80#define TOP_ZONE ZONE_NORMAL
 81#endif
 82
 83int page_is_ram(unsigned long pfn)
 84{
 85	return memblock_is_memory(__pfn_to_phys(pfn));
 
 
 
 
 
 
 
 
 
 
 86}
 87
 88pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 89			      unsigned long size, pgprot_t vma_prot)
 90{
 91	if (ppc_md.phys_mem_access_prot)
 92		return ppc_md.phys_mem_access_prot(file, pfn, size, vma_prot);
 93
 94	if (!page_is_ram(pfn))
 95		vma_prot = pgprot_noncached(vma_prot);
 96
 97	return vma_prot;
 98}
 99EXPORT_SYMBOL(phys_mem_access_prot);
100
101#ifdef CONFIG_MEMORY_HOTPLUG
102
103#ifdef CONFIG_NUMA
104int memory_add_physaddr_to_nid(u64 start)
105{
106	return hot_add_scn_to_nid(start);
107}
108#endif
109
110int __weak create_section_mapping(unsigned long start, unsigned long end, int nid)
111{
112	return -ENODEV;
113}
114
115int __weak remove_section_mapping(unsigned long start, unsigned long end)
116{
117	return -ENODEV;
118}
119
120int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
121		bool want_memblock)
122{
 
 
123	unsigned long start_pfn = start >> PAGE_SHIFT;
124	unsigned long nr_pages = size >> PAGE_SHIFT;
125	int rc;
126
127	resize_hpt_for_hotplug(memblock_phys_mem_size());
128
129	start = (unsigned long)__va(start);
130	rc = create_section_mapping(start, start + size, nid);
131	if (rc) {
132		pr_warn("Unable to create mapping for hot added memory 0x%llx..0x%llx: %d\n",
 
133			start, start + size, rc);
134		return -EFAULT;
135	}
136	flush_inval_dcache_range(start, start + size);
137
138	return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
 
 
 
 
139}
140
141#ifdef CONFIG_MEMORY_HOTREMOVE
142int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
143{
144	unsigned long start_pfn = start >> PAGE_SHIFT;
145	unsigned long nr_pages = size >> PAGE_SHIFT;
146	struct page *page;
147	int ret;
148
149	/*
150	 * If we have an altmap then we need to skip over any reserved PFNs
151	 * when querying the zone.
152	 */
153	page = pfn_to_page(start_pfn);
154	if (altmap)
155		page += vmem_altmap_offset(altmap);
156
157	ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
158	if (ret)
159		return ret;
160
161	/* Remove htab bolted mappings for this section of memory */
162	start = (unsigned long)__va(start);
163	flush_inval_dcache_range(start, start + size);
164	ret = remove_section_mapping(start, start + size);
165
166	/* Ensure all vmalloc mappings are flushed in case they also
167	 * hit that section of memory
168	 */
169	vm_unmap_aliases();
170
171	resize_hpt_for_hotplug(memblock_phys_mem_size());
172
173	return ret;
174}
175#endif
176#endif /* CONFIG_MEMORY_HOTPLUG */
177
178/*
179 * walk_memory_resource() needs to make sure there is no holes in a given
180 * memory range.  PPC64 does not maintain the memory layout in /proc/iomem.
181 * Instead it maintains it in memblock.memory structures.  Walk through the
182 * memory regions, find holes and callback for contiguous regions.
183 */
184int
185walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
186		void *arg, int (*func)(unsigned long, unsigned long, void *))
187{
188	struct memblock_region *reg;
189	unsigned long end_pfn = start_pfn + nr_pages;
190	unsigned long tstart, tend;
191	int ret = -1;
192
193	for_each_memblock(memory, reg) {
194		tstart = max(start_pfn, memblock_region_memory_base_pfn(reg));
195		tend = min(end_pfn, memblock_region_memory_end_pfn(reg));
196		if (tstart >= tend)
197			continue;
198		ret = (*func)(tstart, tend - tstart, arg);
199		if (ret)
200			break;
201	}
202	return ret;
203}
204EXPORT_SYMBOL_GPL(walk_system_ram_range);
205
206#ifndef CONFIG_NEED_MULTIPLE_NODES
207void __init mem_topology_setup(void)
208{
209	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
210	min_low_pfn = MEMORY_START >> PAGE_SHIFT;
211#ifdef CONFIG_HIGHMEM
212	max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
213#endif
214
215	/* Place all memblock_regions in the same node and merge contiguous
216	 * memblock_regions
217	 */
218	memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
219}
220
221void __init initmem_init(void)
222{
223	/* XXX need to clip this if using highmem? */
224	sparse_memory_present_with_active_regions(0);
225	sparse_init();
226}
227
228/* mark pages that don't exist as nosave */
229static int __init mark_nonram_nosave(void)
230{
231	struct memblock_region *reg, *prev = NULL;
232
233	for_each_memblock(memory, reg) {
234		if (prev &&
235		    memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg))
236			register_nosave_region(memblock_region_memory_end_pfn(prev),
237					       memblock_region_memory_base_pfn(reg));
238		prev = reg;
239	}
240	return 0;
241}
242#else /* CONFIG_NEED_MULTIPLE_NODES */
243static int __init mark_nonram_nosave(void)
244{
245	return 0;
246}
247#endif
248
249static bool zone_limits_final;
250
251/*
252 * The memory zones past TOP_ZONE are managed by generic mm code.
253 * These should be set to zero since that's what every other
254 * architecture does.
255 */
256static unsigned long max_zone_pfns[MAX_NR_ZONES] = {
257	[0            ... TOP_ZONE        ] = ~0UL,
258	[TOP_ZONE + 1 ... MAX_NR_ZONES - 1] = 0
259};
260
261/*
262 * Restrict the specified zone and all more restrictive zones
263 * to be below the specified pfn.  May not be called after
264 * paging_init().
265 */
266void __init limit_zone_pfn(enum zone_type zone, unsigned long pfn_limit)
267{
268	int i;
269
270	if (WARN_ON(zone_limits_final))
271		return;
272
273	for (i = zone; i >= 0; i--) {
274		if (max_zone_pfns[i] > pfn_limit)
275			max_zone_pfns[i] = pfn_limit;
276	}
277}
278
279/*
280 * Find the least restrictive zone that is entirely below the
281 * specified pfn limit.  Returns < 0 if no suitable zone is found.
282 *
283 * pfn_limit must be u64 because it can exceed 32 bits even on 32-bit
284 * systems -- the DMA limit can be higher than any possible real pfn.
285 */
286int dma_pfn_limit_to_zone(u64 pfn_limit)
287{
 
288	int i;
289
290	for (i = TOP_ZONE; i >= 0; i--) {
 
 
 
 
291		if (max_zone_pfns[i] <= pfn_limit)
292			return i;
293	}
294
295	return -EPERM;
296}
297
298/*
299 * paging_init() sets up the page tables - in fact we've already done this.
300 */
301void __init paging_init(void)
302{
303	unsigned long long total_ram = memblock_phys_mem_size();
304	phys_addr_t top_of_ram = memblock_end_of_DRAM();
 
305
306#ifdef CONFIG_PPC32
307	unsigned long v = __fix_to_virt(__end_of_fixed_addresses - 1);
308	unsigned long end = __fix_to_virt(FIX_HOLE);
309
310	for (; v < end; v += PAGE_SIZE)
311		map_kernel_page(v, 0, 0); /* XXX gross */
312#endif
313
314#ifdef CONFIG_HIGHMEM
315	map_kernel_page(PKMAP_BASE, 0, 0);	/* XXX gross */
316	pkmap_page_table = virt_to_kpte(PKMAP_BASE);
317
318	kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
319	kmap_prot = PAGE_KERNEL;
320#endif /* CONFIG_HIGHMEM */
321
322	printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
323	       (unsigned long long)top_of_ram, total_ram);
324	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
325	       (long int)((top_of_ram - total_ram) >> 20));
326
327#ifdef CONFIG_HIGHMEM
 
328	limit_zone_pfn(ZONE_NORMAL, lowmem_end_addr >> PAGE_SHIFT);
 
 
329#endif
330	limit_zone_pfn(TOP_ZONE, top_of_ram >> PAGE_SHIFT);
 
331	zone_limits_final = true;
332	free_area_init_nodes(max_zone_pfns);
333
334	mark_nonram_nosave();
335}
336
337void __init mem_init(void)
338{
339	/*
340	 * book3s is limited to 16 page sizes due to encoding this in
341	 * a 4-bit field for slices.
342	 */
343	BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
344
345#ifdef CONFIG_SWIOTLB
346	swiotlb_init(0);
347#endif
348
349	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
350	set_max_mapnr(max_pfn);
351	free_all_bootmem();
352
353#ifdef CONFIG_HIGHMEM
354	{
355		unsigned long pfn, highmem_mapnr;
356
357		highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
358		for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
359			phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
360			struct page *page = pfn_to_page(pfn);
361			if (!memblock_is_reserved(paddr))
362				free_highmem_page(page);
363		}
364	}
365#endif /* CONFIG_HIGHMEM */
366
367#if defined(CONFIG_PPC_FSL_BOOK3E) && !defined(CONFIG_SMP)
368	/*
369	 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
370	 * functions.... do it here for the non-smp case.
371	 */
372	per_cpu(next_tlbcam_idx, smp_processor_id()) =
373		(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
374#endif
375
376	mem_init_print_info(NULL);
377#ifdef CONFIG_PPC32
378	pr_info("Kernel virtual memory layout:\n");
379	pr_info("  * 0x%08lx..0x%08lx  : fixmap\n", FIXADDR_START, FIXADDR_TOP);
380#ifdef CONFIG_HIGHMEM
381	pr_info("  * 0x%08lx..0x%08lx  : highmem PTEs\n",
382		PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
383#endif /* CONFIG_HIGHMEM */
384#ifdef CONFIG_NOT_COHERENT_CACHE
385	pr_info("  * 0x%08lx..0x%08lx  : consistent mem\n",
386		IOREMAP_TOP, IOREMAP_TOP + CONFIG_CONSISTENT_SIZE);
387#endif /* CONFIG_NOT_COHERENT_CACHE */
388	pr_info("  * 0x%08lx..0x%08lx  : early ioremap\n",
389		ioremap_bot, IOREMAP_TOP);
390	pr_info("  * 0x%08lx..0x%08lx  : vmalloc & ioremap\n",
391		VMALLOC_START, VMALLOC_END);
392#endif /* CONFIG_PPC32 */
393}
394
395void free_initmem(void)
396{
397	ppc_md.progress = ppc_printk_progress;
398	mark_initmem_nx();
399	free_initmem_default(POISON_FREE_INITMEM);
400}
401
402#ifdef CONFIG_BLK_DEV_INITRD
403void __init free_initrd_mem(unsigned long start, unsigned long end)
404{
405	free_reserved_area((void *)start, (void *)end, -1, "initrd");
406}
407#endif
408
409/*
410 * This is called when a page has been modified by the kernel.
411 * It just marks the page as not i-cache clean.  We do the i-cache
412 * flush later when the page is given to a user process, if necessary.
413 */
414void flush_dcache_page(struct page *page)
415{
416	if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
417		return;
418	/* avoid an atomic op if possible */
419	if (test_bit(PG_arch_1, &page->flags))
420		clear_bit(PG_arch_1, &page->flags);
421}
422EXPORT_SYMBOL(flush_dcache_page);
423
424void flush_dcache_icache_page(struct page *page)
425{
426#ifdef CONFIG_HUGETLB_PAGE
427	if (PageCompound(page)) {
428		flush_dcache_icache_hugepage(page);
429		return;
430	}
431#endif
432#if defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC64)
433	/* On 8xx there is no need to kmap since highmem is not supported */
434	__flush_dcache_icache(page_address(page));
435#else
436	if (IS_ENABLED(CONFIG_BOOKE) || sizeof(phys_addr_t) > sizeof(void *)) {
437		void *start = kmap_atomic(page);
438		__flush_dcache_icache(start);
439		kunmap_atomic(start);
440	} else {
441		__flush_dcache_icache_phys(page_to_pfn(page) << PAGE_SHIFT);
442	}
443#endif
444}
445EXPORT_SYMBOL(flush_dcache_icache_page);
446
447void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
448{
449	clear_page(page);
450
451	/*
452	 * We shouldn't have to do this, but some versions of glibc
453	 * require it (ld.so assumes zero filled pages are icache clean)
454	 * - Anton
455	 */
456	flush_dcache_page(pg);
457}
458EXPORT_SYMBOL(clear_user_page);
459
460void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
461		    struct page *pg)
462{
463	copy_page(vto, vfrom);
464
465	/*
466	 * We should be able to use the following optimisation, however
467	 * there are two problems.
468	 * Firstly a bug in some versions of binutils meant PLT sections
469	 * were not marked executable.
470	 * Secondly the first word in the GOT section is blrl, used
471	 * to establish the GOT address. Until recently the GOT was
472	 * not marked executable.
473	 * - Anton
474	 */
475#if 0
476	if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
477		return;
478#endif
479
480	flush_dcache_page(pg);
481}
482
483void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
484			     unsigned long addr, int len)
485{
486	unsigned long maddr;
487
488	maddr = (unsigned long) kmap(page) + (addr & ~PAGE_MASK);
489	flush_icache_range(maddr, maddr + len);
490	kunmap(page);
491}
492EXPORT_SYMBOL(flush_icache_user_range);
493
494/*
495 * This is called at the end of handling a user page fault, when the
496 * fault has been handled by updating a PTE in the linux page tables.
497 * We use it to preload an HPTE into the hash table corresponding to
498 * the updated linux PTE.
499 * 
500 * This must always be called with the pte lock held.
501 */
502void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
503		      pte_t *ptep)
504{
505#ifdef CONFIG_PPC_STD_MMU
506	/*
507	 * We don't need to worry about _PAGE_PRESENT here because we are
508	 * called with either mm->page_table_lock held or ptl lock held
509	 */
510	unsigned long access, trap;
511
512	if (radix_enabled())
513		return;
514
515	/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
516	if (!pte_young(*ptep) || address >= TASK_SIZE)
517		return;
518
519	/* We try to figure out if we are coming from an instruction
520	 * access fault and pass that down to __hash_page so we avoid
521	 * double-faulting on execution of fresh text. We have to test
522	 * for regs NULL since init will get here first thing at boot
523	 *
524	 * We also avoid filling the hash if not coming from a fault
525	 */
526
527	trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL;
528	switch (trap) {
529	case 0x300:
530		access = 0UL;
531		break;
532	case 0x400:
533		access = _PAGE_EXEC;
534		break;
535	default:
536		return;
537	}
538
539	hash_preload(vma->vm_mm, address, access, trap);
540#endif /* CONFIG_PPC_STD_MMU */
541#if (defined(CONFIG_PPC_BOOK3E_64) || defined(CONFIG_PPC_FSL_BOOK3E)) \
542	&& defined(CONFIG_HUGETLB_PAGE)
543	if (is_vm_hugetlb_page(vma))
544		book3e_hugetlb_preload(vma, address, *ptep);
545#endif
546}
547
548/*
549 * System memory should not be in /proc/iomem but various tools expect it
550 * (eg kdump).
551 */
552static int __init add_system_ram_resources(void)
553{
554	struct memblock_region *reg;
555
556	for_each_memblock(memory, reg) {
557		struct resource *res;
558		unsigned long base = reg->base;
559		unsigned long size = reg->size;
560
561		res = kzalloc(sizeof(struct resource), GFP_KERNEL);
562		WARN_ON(!res);
563
564		if (res) {
565			res->name = "System RAM";
566			res->start = base;
567			res->end = base + size - 1;
568			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
569			WARN_ON(request_resource(&iomem_resource, res) < 0);
570		}
571	}
572
573	return 0;
574}
575subsys_initcall(add_system_ram_resources);
576
577#ifdef CONFIG_STRICT_DEVMEM
578/*
579 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
580 * is valid. The argument is a physical page number.
581 *
582 * Access has to be given to non-kernel-ram areas as well, these contain the
583 * PCI mmio resources as well as potential bios/acpi data regions.
584 */
585int devmem_is_allowed(unsigned long pfn)
586{
587	if (page_is_rtas_user_buf(pfn))
588		return 1;
589	if (iomem_is_exclusive(PFN_PHYS(pfn)))
590		return 0;
591	if (!page_is_ram(pfn))
592		return 1;
593	return 0;
594}
595#endif /* CONFIG_STRICT_DEVMEM */