Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1/*
  2 * This file contains the routines for TLB flushing.
  3 * On machines where the MMU does not use a hash table to store virtual to
  4 * physical translations (ie, SW loaded TLBs or Book3E compilant processors,
  5 * this does -not- include 603 however which shares the implementation with
  6 * hash based processors)
  7 *
  8 *  -- BenH
  9 *
 10 * Copyright 2008,2009 Ben Herrenschmidt <benh@kernel.crashing.org>
 11 *                     IBM Corp.
 12 *
 13 *  Derived from arch/ppc/mm/init.c:
 14 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
 15 *
 16 *  Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
 17 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 18 *    Copyright (C) 1996 Paul Mackerras
 19 *
 20 *  Derived from "arch/i386/mm/init.c"
 21 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 22 *
 23 *  This program is free software; you can redistribute it and/or
 24 *  modify it under the terms of the GNU General Public License
 25 *  as published by the Free Software Foundation; either version
 26 *  2 of the License, or (at your option) any later version.
 27 *
 28 */
 29
 30#include <linux/kernel.h>
 31#include <linux/export.h>
 32#include <linux/mm.h>
 33#include <linux/init.h>
 34#include <linux/highmem.h>
 35#include <linux/pagemap.h>
 36#include <linux/preempt.h>
 37#include <linux/spinlock.h>
 38#include <linux/memblock.h>
 39#include <linux/of_fdt.h>
 40#include <linux/hugetlb.h>
 41
 42#include <asm/tlbflush.h>
 43#include <asm/tlb.h>
 44#include <asm/code-patching.h>
 45#include <asm/cputhreads.h>
 46#include <asm/hugetlb.h>
 47#include <asm/paca.h>
 48
 49#include "mmu_decl.h"
 50
 51/*
 52 * This struct lists the sw-supported page sizes.  The hardawre MMU may support
 53 * other sizes not listed here.   The .ind field is only used on MMUs that have
 54 * indirect page table entries.
 55 */
 56#ifdef CONFIG_PPC_BOOK3E_MMU
 57#ifdef CONFIG_PPC_FSL_BOOK3E
 58struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
 59	[MMU_PAGE_4K] = {
 60		.shift	= 12,
 61		.enc	= BOOK3E_PAGESZ_4K,
 62	},
 63	[MMU_PAGE_2M] = {
 64		.shift	= 21,
 65		.enc	= BOOK3E_PAGESZ_2M,
 66	},
 67	[MMU_PAGE_4M] = {
 68		.shift	= 22,
 69		.enc	= BOOK3E_PAGESZ_4M,
 70	},
 71	[MMU_PAGE_16M] = {
 72		.shift	= 24,
 73		.enc	= BOOK3E_PAGESZ_16M,
 74	},
 75	[MMU_PAGE_64M] = {
 76		.shift	= 26,
 77		.enc	= BOOK3E_PAGESZ_64M,
 78	},
 79	[MMU_PAGE_256M] = {
 80		.shift	= 28,
 81		.enc	= BOOK3E_PAGESZ_256M,
 82	},
 83	[MMU_PAGE_1G] = {
 84		.shift	= 30,
 85		.enc	= BOOK3E_PAGESZ_1GB,
 86	},
 87};
 88#else
 89struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT] = {
 90	[MMU_PAGE_4K] = {
 91		.shift	= 12,
 92		.ind	= 20,
 93		.enc	= BOOK3E_PAGESZ_4K,
 94	},
 95	[MMU_PAGE_16K] = {
 96		.shift	= 14,
 97		.enc	= BOOK3E_PAGESZ_16K,
 98	},
 99	[MMU_PAGE_64K] = {
100		.shift	= 16,
101		.ind	= 28,
102		.enc	= BOOK3E_PAGESZ_64K,
103	},
104	[MMU_PAGE_1M] = {
105		.shift	= 20,
106		.enc	= BOOK3E_PAGESZ_1M,
107	},
108	[MMU_PAGE_16M] = {
109		.shift	= 24,
110		.ind	= 36,
111		.enc	= BOOK3E_PAGESZ_16M,
112	},
113	[MMU_PAGE_256M] = {
114		.shift	= 28,
115		.enc	= BOOK3E_PAGESZ_256M,
116	},
117	[MMU_PAGE_1G] = {
118		.shift	= 30,
119		.enc	= BOOK3E_PAGESZ_1GB,
120	},
121};
122#endif /* CONFIG_FSL_BOOKE */
123
124static inline int mmu_get_tsize(int psize)
125{
126	return mmu_psize_defs[psize].enc;
127}
128#else
129static inline int mmu_get_tsize(int psize)
130{
131	/* This isn't used on !Book3E for now */
132	return 0;
133}
134#endif /* CONFIG_PPC_BOOK3E_MMU */
135
136/* The variables below are currently only used on 64-bit Book3E
137 * though this will probably be made common with other nohash
138 * implementations at some point
139 */
140#ifdef CONFIG_PPC64
141
142int mmu_linear_psize;		/* Page size used for the linear mapping */
143int mmu_pte_psize;		/* Page size used for PTE pages */
144int mmu_vmemmap_psize;		/* Page size used for the virtual mem map */
145int book3e_htw_mode;		/* HW tablewalk?  Value is PPC_HTW_* */
146unsigned long linear_map_top;	/* Top of linear mapping */
147
148
149/*
150 * Number of bytes to add to SPRN_SPRG_TLB_EXFRAME on crit/mcheck/debug
151 * exceptions.  This is used for bolted and e6500 TLB miss handlers which
152 * do not modify this SPRG in the TLB miss code; for other TLB miss handlers,
153 * this is set to zero.
154 */
155int extlb_level_exc;
156
157#endif /* CONFIG_PPC64 */
158
159#ifdef CONFIG_PPC_FSL_BOOK3E
160/* next_tlbcam_idx is used to round-robin tlbcam entry assignment */
161DEFINE_PER_CPU(int, next_tlbcam_idx);
162EXPORT_PER_CPU_SYMBOL(next_tlbcam_idx);
163#endif
164
165/*
166 * Base TLB flushing operations:
167 *
168 *  - flush_tlb_mm(mm) flushes the specified mm context TLB's
169 *  - flush_tlb_page(vma, vmaddr) flushes one page
170 *  - flush_tlb_range(vma, start, end) flushes a range of pages
171 *  - flush_tlb_kernel_range(start, end) flushes kernel pages
172 *
173 *  - local_* variants of page and mm only apply to the current
174 *    processor
175 */
176
177/*
178 * These are the base non-SMP variants of page and mm flushing
179 */
180void local_flush_tlb_mm(struct mm_struct *mm)
181{
182	unsigned int pid;
183
184	preempt_disable();
185	pid = mm->context.id;
186	if (pid != MMU_NO_CONTEXT)
187		_tlbil_pid(pid);
188	preempt_enable();
189}
190EXPORT_SYMBOL(local_flush_tlb_mm);
191
192void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
193			    int tsize, int ind)
194{
195	unsigned int pid;
196
197	preempt_disable();
198	pid = mm ? mm->context.id : 0;
199	if (pid != MMU_NO_CONTEXT)
200		_tlbil_va(vmaddr, pid, tsize, ind);
201	preempt_enable();
202}
203
204void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
205{
206	__local_flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
207			       mmu_get_tsize(mmu_virtual_psize), 0);
208}
209EXPORT_SYMBOL(local_flush_tlb_page);
210
211/*
212 * And here are the SMP non-local implementations
213 */
214#ifdef CONFIG_SMP
215
216static DEFINE_RAW_SPINLOCK(tlbivax_lock);
217
218static int mm_is_core_local(struct mm_struct *mm)
219{
220	return cpumask_subset(mm_cpumask(mm),
221			      topology_sibling_cpumask(smp_processor_id()));
222}
223
224struct tlb_flush_param {
225	unsigned long addr;
226	unsigned int pid;
227	unsigned int tsize;
228	unsigned int ind;
229};
230
231static void do_flush_tlb_mm_ipi(void *param)
232{
233	struct tlb_flush_param *p = param;
234
235	_tlbil_pid(p ? p->pid : 0);
236}
237
238static void do_flush_tlb_page_ipi(void *param)
239{
240	struct tlb_flush_param *p = param;
241
242	_tlbil_va(p->addr, p->pid, p->tsize, p->ind);
243}
244
245
246/* Note on invalidations and PID:
247 *
248 * We snapshot the PID with preempt disabled. At this point, it can still
249 * change either because:
250 * - our context is being stolen (PID -> NO_CONTEXT) on another CPU
251 * - we are invaliating some target that isn't currently running here
252 *   and is concurrently acquiring a new PID on another CPU
253 * - some other CPU is re-acquiring a lost PID for this mm
254 * etc...
255 *
256 * However, this shouldn't be a problem as we only guarantee
257 * invalidation of TLB entries present prior to this call, so we
258 * don't care about the PID changing, and invalidating a stale PID
259 * is generally harmless.
260 */
261
262void flush_tlb_mm(struct mm_struct *mm)
263{
264	unsigned int pid;
265
266	preempt_disable();
267	pid = mm->context.id;
268	if (unlikely(pid == MMU_NO_CONTEXT))
269		goto no_context;
270	if (!mm_is_core_local(mm)) {
271		struct tlb_flush_param p = { .pid = pid };
272		/* Ignores smp_processor_id() even if set. */
273		smp_call_function_many(mm_cpumask(mm),
274				       do_flush_tlb_mm_ipi, &p, 1);
275	}
276	_tlbil_pid(pid);
277 no_context:
278	preempt_enable();
279}
280EXPORT_SYMBOL(flush_tlb_mm);
281
282void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr,
283		      int tsize, int ind)
284{
285	struct cpumask *cpu_mask;
286	unsigned int pid;
287
288	/*
289	 * This function as well as __local_flush_tlb_page() must only be called
290	 * for user contexts.
291	 */
292	if (unlikely(WARN_ON(!mm)))
293		return;
294
295	preempt_disable();
296	pid = mm->context.id;
297	if (unlikely(pid == MMU_NO_CONTEXT))
298		goto bail;
299	cpu_mask = mm_cpumask(mm);
300	if (!mm_is_core_local(mm)) {
301		/* If broadcast tlbivax is supported, use it */
302		if (mmu_has_feature(MMU_FTR_USE_TLBIVAX_BCAST)) {
303			int lock = mmu_has_feature(MMU_FTR_LOCK_BCAST_INVAL);
304			if (lock)
305				raw_spin_lock(&tlbivax_lock);
306			_tlbivax_bcast(vmaddr, pid, tsize, ind);
307			if (lock)
308				raw_spin_unlock(&tlbivax_lock);
309			goto bail;
310		} else {
311			struct tlb_flush_param p = {
312				.pid = pid,
313				.addr = vmaddr,
314				.tsize = tsize,
315				.ind = ind,
316			};
317			/* Ignores smp_processor_id() even if set in cpu_mask */
318			smp_call_function_many(cpu_mask,
319					       do_flush_tlb_page_ipi, &p, 1);
320		}
321	}
322	_tlbil_va(vmaddr, pid, tsize, ind);
323 bail:
324	preempt_enable();
325}
326
327void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
328{
329#ifdef CONFIG_HUGETLB_PAGE
330	if (vma && is_vm_hugetlb_page(vma))
331		flush_hugetlb_page(vma, vmaddr);
332#endif
333
334	__flush_tlb_page(vma ? vma->vm_mm : NULL, vmaddr,
335			 mmu_get_tsize(mmu_virtual_psize), 0);
336}
337EXPORT_SYMBOL(flush_tlb_page);
338
339#endif /* CONFIG_SMP */
340
341#ifdef CONFIG_PPC_47x
342void __init early_init_mmu_47x(void)
343{
344#ifdef CONFIG_SMP
345	unsigned long root = of_get_flat_dt_root();
346	if (of_get_flat_dt_prop(root, "cooperative-partition", NULL))
347		mmu_clear_feature(MMU_FTR_USE_TLBIVAX_BCAST);
348#endif /* CONFIG_SMP */
349}
350#endif /* CONFIG_PPC_47x */
351
352/*
353 * Flush kernel TLB entries in the given range
354 */
355void flush_tlb_kernel_range(unsigned long start, unsigned long end)
356{
357#ifdef CONFIG_SMP
358	preempt_disable();
359	smp_call_function(do_flush_tlb_mm_ipi, NULL, 1);
360	_tlbil_pid(0);
361	preempt_enable();
362#else
363	_tlbil_pid(0);
364#endif
365}
366EXPORT_SYMBOL(flush_tlb_kernel_range);
367
368/*
369 * Currently, for range flushing, we just do a full mm flush. This should
370 * be optimized based on a threshold on the size of the range, since
371 * some implementation can stack multiple tlbivax before a tlbsync but
372 * for now, we keep it that way
373 */
374void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
375		     unsigned long end)
376
377{
378	flush_tlb_mm(vma->vm_mm);
379}
380EXPORT_SYMBOL(flush_tlb_range);
381
382void tlb_flush(struct mmu_gather *tlb)
383{
384	flush_tlb_mm(tlb->mm);
385}
386
387/*
388 * Below are functions specific to the 64-bit variant of Book3E though that
389 * may change in the future
390 */
391
392#ifdef CONFIG_PPC64
393
394/*
395 * Handling of virtual linear page tables or indirect TLB entries
396 * flushing when PTE pages are freed
397 */
398void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address)
399{
400	int tsize = mmu_psize_defs[mmu_pte_psize].enc;
401
402	if (book3e_htw_mode != PPC_HTW_NONE) {
403		unsigned long start = address & PMD_MASK;
404		unsigned long end = address + PMD_SIZE;
405		unsigned long size = 1UL << mmu_psize_defs[mmu_pte_psize].shift;
406
407		/* This isn't the most optimal, ideally we would factor out the
408		 * while preempt & CPU mask mucking around, or even the IPI but
409		 * it will do for now
410		 */
411		while (start < end) {
412			__flush_tlb_page(tlb->mm, start, tsize, 1);
413			start += size;
414		}
415	} else {
416		unsigned long rmask = 0xf000000000000000ul;
417		unsigned long rid = (address & rmask) | 0x1000000000000000ul;
418		unsigned long vpte = address & ~rmask;
419
420#ifdef CONFIG_PPC_64K_PAGES
421		vpte = (vpte >> (PAGE_SHIFT - 4)) & ~0xfffful;
422#else
423		vpte = (vpte >> (PAGE_SHIFT - 3)) & ~0xffful;
424#endif
425		vpte |= rid;
426		__flush_tlb_page(tlb->mm, vpte, tsize, 0);
427	}
428}
429
430static void setup_page_sizes(void)
431{
432	unsigned int tlb0cfg;
433	unsigned int tlb0ps;
434	unsigned int eptcfg;
435	int i, psize;
436
437#ifdef CONFIG_PPC_FSL_BOOK3E
438	unsigned int mmucfg = mfspr(SPRN_MMUCFG);
439	int fsl_mmu = mmu_has_feature(MMU_FTR_TYPE_FSL_E);
440
441	if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
442		unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG);
443		unsigned int min_pg, max_pg;
444
445		min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT;
446		max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT;
447
448		for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
449			struct mmu_psize_def *def;
450			unsigned int shift;
451
452			def = &mmu_psize_defs[psize];
453			shift = def->shift;
454
455			if (shift == 0 || shift & 1)
456				continue;
457
458			/* adjust to be in terms of 4^shift Kb */
459			shift = (shift - 10) >> 1;
460
461			if ((shift >= min_pg) && (shift <= max_pg))
462				def->flags |= MMU_PAGE_SIZE_DIRECT;
463		}
464
465		goto out;
466	}
467
468	if (fsl_mmu && (mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
469		u32 tlb1cfg, tlb1ps;
470
471		tlb0cfg = mfspr(SPRN_TLB0CFG);
472		tlb1cfg = mfspr(SPRN_TLB1CFG);
473		tlb1ps = mfspr(SPRN_TLB1PS);
474		eptcfg = mfspr(SPRN_EPTCFG);
475
476		if ((tlb1cfg & TLBnCFG_IND) && (tlb0cfg & TLBnCFG_PT))
477			book3e_htw_mode = PPC_HTW_E6500;
478
479		/*
480		 * We expect 4K subpage size and unrestricted indirect size.
481		 * The lack of a restriction on indirect size is a Freescale
482		 * extension, indicated by PSn = 0 but SPSn != 0.
483		 */
484		if (eptcfg != 2)
485			book3e_htw_mode = PPC_HTW_NONE;
486
487		for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
488			struct mmu_psize_def *def = &mmu_psize_defs[psize];
489
490			if (tlb1ps & (1U << (def->shift - 10))) {
491				def->flags |= MMU_PAGE_SIZE_DIRECT;
492
493				if (book3e_htw_mode && psize == MMU_PAGE_2M)
494					def->flags |= MMU_PAGE_SIZE_INDIRECT;
495			}
496		}
497
498		goto out;
499	}
500#endif
501
502	tlb0cfg = mfspr(SPRN_TLB0CFG);
503	tlb0ps = mfspr(SPRN_TLB0PS);
504	eptcfg = mfspr(SPRN_EPTCFG);
505
506	/* Look for supported direct sizes */
507	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
508		struct mmu_psize_def *def = &mmu_psize_defs[psize];
509
510		if (tlb0ps & (1U << (def->shift - 10)))
511			def->flags |= MMU_PAGE_SIZE_DIRECT;
512	}
513
514	/* Indirect page sizes supported ? */
515	if ((tlb0cfg & TLBnCFG_IND) == 0 ||
516	    (tlb0cfg & TLBnCFG_PT) == 0)
517		goto out;
518
519	book3e_htw_mode = PPC_HTW_IBM;
520
521	/* Now, we only deal with one IND page size for each
522	 * direct size. Hopefully all implementations today are
523	 * unambiguous, but we might want to be careful in the
524	 * future.
525	 */
526	for (i = 0; i < 3; i++) {
527		unsigned int ps, sps;
528
529		sps = eptcfg & 0x1f;
530		eptcfg >>= 5;
531		ps = eptcfg & 0x1f;
532		eptcfg >>= 5;
533		if (!ps || !sps)
534			continue;
535		for (psize = 0; psize < MMU_PAGE_COUNT; psize++) {
536			struct mmu_psize_def *def = &mmu_psize_defs[psize];
537
538			if (ps == (def->shift - 10))
539				def->flags |= MMU_PAGE_SIZE_INDIRECT;
540			if (sps == (def->shift - 10))
541				def->ind = ps + 10;
542		}
543	}
544
545out:
546	/* Cleanup array and print summary */
547	pr_info("MMU: Supported page sizes\n");
548	for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) {
549		struct mmu_psize_def *def = &mmu_psize_defs[psize];
550		const char *__page_type_names[] = {
551			"unsupported",
552			"direct",
553			"indirect",
554			"direct & indirect"
555		};
556		if (def->flags == 0) {
557			def->shift = 0;	
558			continue;
559		}
560		pr_info("  %8ld KB as %s\n", 1ul << (def->shift - 10),
561			__page_type_names[def->flags & 0x3]);
562	}
563}
564
565static void setup_mmu_htw(void)
566{
567	/*
568	 * If we want to use HW tablewalk, enable it by patching the TLB miss
569	 * handlers to branch to the one dedicated to it.
570	 */
571
572	switch (book3e_htw_mode) {
573	case PPC_HTW_IBM:
574		patch_exception(0x1c0, exc_data_tlb_miss_htw_book3e);
575		patch_exception(0x1e0, exc_instruction_tlb_miss_htw_book3e);
576		break;
577#ifdef CONFIG_PPC_FSL_BOOK3E
578	case PPC_HTW_E6500:
579		extlb_level_exc = EX_TLB_SIZE;
580		patch_exception(0x1c0, exc_data_tlb_miss_e6500_book3e);
581		patch_exception(0x1e0, exc_instruction_tlb_miss_e6500_book3e);
582		break;
583#endif
584	}
585	pr_info("MMU: Book3E HW tablewalk %s\n",
586		book3e_htw_mode != PPC_HTW_NONE ? "enabled" : "not supported");
587}
588
589/*
590 * Early initialization of the MMU TLB code
591 */
592static void early_init_this_mmu(void)
593{
594	unsigned int mas4;
595
596	/* Set MAS4 based on page table setting */
597
598	mas4 = 0x4 << MAS4_WIMGED_SHIFT;
599	switch (book3e_htw_mode) {
600	case PPC_HTW_E6500:
601		mas4 |= MAS4_INDD;
602		mas4 |= BOOK3E_PAGESZ_2M << MAS4_TSIZED_SHIFT;
603		mas4 |= MAS4_TLBSELD(1);
604		mmu_pte_psize = MMU_PAGE_2M;
605		break;
606
607	case PPC_HTW_IBM:
608		mas4 |= MAS4_INDD;
609#ifdef CONFIG_PPC_64K_PAGES
610		mas4 |=	BOOK3E_PAGESZ_256M << MAS4_TSIZED_SHIFT;
611		mmu_pte_psize = MMU_PAGE_256M;
612#else
613		mas4 |=	BOOK3E_PAGESZ_1M << MAS4_TSIZED_SHIFT;
614		mmu_pte_psize = MMU_PAGE_1M;
615#endif
616		break;
617
618	case PPC_HTW_NONE:
619#ifdef CONFIG_PPC_64K_PAGES
620		mas4 |=	BOOK3E_PAGESZ_64K << MAS4_TSIZED_SHIFT;
621#else
622		mas4 |=	BOOK3E_PAGESZ_4K << MAS4_TSIZED_SHIFT;
623#endif
624		mmu_pte_psize = mmu_virtual_psize;
625		break;
626	}
627	mtspr(SPRN_MAS4, mas4);
628
629#ifdef CONFIG_PPC_FSL_BOOK3E
630	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
631		unsigned int num_cams;
632		int __maybe_unused cpu = smp_processor_id();
633		bool map = true;
634
635		/* use a quarter of the TLBCAM for bolted linear map */
636		num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
637
638		/*
639		 * Only do the mapping once per core, or else the
640		 * transient mapping would cause problems.
641		 */
642#ifdef CONFIG_SMP
643		if (hweight32(get_tensr()) > 1)
644			map = false;
645#endif
646
647		if (map)
648			linear_map_top = map_mem_in_cams(linear_map_top,
649							 num_cams, false);
650	}
651#endif
652
653	/* A sync won't hurt us after mucking around with
654	 * the MMU configuration
655	 */
656	mb();
657}
658
659static void __init early_init_mmu_global(void)
660{
661	/* XXX This will have to be decided at runtime, but right
662	 * now our boot and TLB miss code hard wires it. Ideally
663	 * we should find out a suitable page size and patch the
664	 * TLB miss code (either that or use the PACA to store
665	 * the value we want)
666	 */
667	mmu_linear_psize = MMU_PAGE_1G;
668
669	/* XXX This should be decided at runtime based on supported
670	 * page sizes in the TLB, but for now let's assume 16M is
671	 * always there and a good fit (which it probably is)
672	 *
673	 * Freescale booke only supports 4K pages in TLB0, so use that.
674	 */
675	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E))
676		mmu_vmemmap_psize = MMU_PAGE_4K;
677	else
678		mmu_vmemmap_psize = MMU_PAGE_16M;
679
680	/* XXX This code only checks for TLB 0 capabilities and doesn't
681	 *     check what page size combos are supported by the HW. It
682	 *     also doesn't handle the case where a separate array holds
683	 *     the IND entries from the array loaded by the PT.
684	 */
685	/* Look for supported page sizes */
686	setup_page_sizes();
687
688	/* Look for HW tablewalk support */
689	setup_mmu_htw();
690
691#ifdef CONFIG_PPC_FSL_BOOK3E
692	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
693		if (book3e_htw_mode == PPC_HTW_NONE) {
694			extlb_level_exc = EX_TLB_SIZE;
695			patch_exception(0x1c0, exc_data_tlb_miss_bolted_book3e);
696			patch_exception(0x1e0,
697				exc_instruction_tlb_miss_bolted_book3e);
698		}
699	}
700#endif
701
702	/* Set the global containing the top of the linear mapping
703	 * for use by the TLB miss code
704	 */
705	linear_map_top = memblock_end_of_DRAM();
706}
707
708static void __init early_mmu_set_memory_limit(void)
709{
710#ifdef CONFIG_PPC_FSL_BOOK3E
711	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
712		/*
713		 * Limit memory so we dont have linear faults.
714		 * Unlike memblock_set_current_limit, which limits
715		 * memory available during early boot, this permanently
716		 * reduces the memory available to Linux.  We need to
717		 * do this because highmem is not supported on 64-bit.
718		 */
719		memblock_enforce_memory_limit(linear_map_top);
720	}
721#endif
722
723	memblock_set_current_limit(linear_map_top);
724}
725
726/* boot cpu only */
727void __init early_init_mmu(void)
728{
729	early_init_mmu_global();
730	early_init_this_mmu();
731	early_mmu_set_memory_limit();
732}
733
734void early_init_mmu_secondary(void)
735{
736	early_init_this_mmu();
737}
738
739void setup_initial_memory_limit(phys_addr_t first_memblock_base,
740				phys_addr_t first_memblock_size)
741{
742	/* On non-FSL Embedded 64-bit, we adjust the RMA size to match
743	 * the bolted TLB entry. We know for now that only 1G
744	 * entries are supported though that may eventually
745	 * change.
746	 *
747	 * on FSL Embedded 64-bit, usually all RAM is bolted, but with
748	 * unusual memory sizes it's possible for some RAM to not be mapped
749	 * (such RAM is not used at all by Linux, since we don't support
750	 * highmem on 64-bit).  We limit ppc64_rma_size to what would be
751	 * mappable if this memblock is the only one.  Additional memblocks
752	 * can only increase, not decrease, the amount that ends up getting
753	 * mapped.  We still limit max to 1G even if we'll eventually map
754	 * more.  This is due to what the early init code is set up to do.
755	 *
756	 * We crop it to the size of the first MEMBLOCK to
757	 * avoid going over total available memory just in case...
758	 */
759#ifdef CONFIG_PPC_FSL_BOOK3E
760	if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) {
761		unsigned long linear_sz;
762		unsigned int num_cams;
763
764		/* use a quarter of the TLBCAM for bolted linear map */
765		num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4;
766
767		linear_sz = map_mem_in_cams(first_memblock_size, num_cams,
768					    true);
769
770		ppc64_rma_size = min_t(u64, linear_sz, 0x40000000);
771	} else
772#endif
773		ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000);
774
775	/* Finally limit subsequent allocations */
776	memblock_set_current_limit(first_memblock_base + ppc64_rma_size);
777}
778#else /* ! CONFIG_PPC64 */
779void __init early_init_mmu(void)
780{
781#ifdef CONFIG_PPC_47x
782	early_init_mmu_47x();
783#endif
784}
785#endif /* CONFIG_PPC64 */