Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *  This file contains pgtable related functions for 64-bit machines.
  4 *
  5 *  Derived from arch/ppc64/mm/init.c
  6 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  7 *
  8 *  Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
  9 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
 10 *    Copyright (C) 1996 Paul Mackerras
 11 *
 12 *  Derived from "arch/i386/mm/init.c"
 13 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 14 *
 15 *  Dave Engebretsen <engebret@us.ibm.com>
 16 *      Rework for PPC64 port.
 
 
 
 
 
 
 17 */
 18
 19#include <linux/signal.h>
 20#include <linux/sched.h>
 21#include <linux/kernel.h>
 22#include <linux/errno.h>
 23#include <linux/string.h>
 24#include <linux/export.h>
 25#include <linux/types.h>
 26#include <linux/mman.h>
 27#include <linux/mm.h>
 28#include <linux/swap.h>
 29#include <linux/stddef.h>
 30#include <linux/vmalloc.h>
 
 31#include <linux/slab.h>
 32#include <linux/hugetlb.h>
 33
 
 34#include <asm/page.h>
 
 
 35#include <asm/mmu_context.h>
 
 36#include <asm/mmu.h>
 37#include <asm/smp.h>
 38#include <asm/machdep.h>
 39#include <asm/tlb.h>
 
 40#include <asm/processor.h>
 41#include <asm/cputable.h>
 42#include <asm/sections.h>
 43#include <asm/firmware.h>
 44#include <asm/dma.h>
 
 45
 46#include <mm/mmu_decl.h>
 47
 48
 49#ifdef CONFIG_PPC_BOOK3S_64
 50/*
 51 * partition table and process table for ISA 3.0
 52 */
 53struct prtb_entry *process_tb;
 54struct patb_entry *partition_tb;
 55/*
 56 * page table size
 57 */
 58unsigned long __pte_index_size;
 59EXPORT_SYMBOL(__pte_index_size);
 60unsigned long __pmd_index_size;
 61EXPORT_SYMBOL(__pmd_index_size);
 62unsigned long __pud_index_size;
 63EXPORT_SYMBOL(__pud_index_size);
 64unsigned long __pgd_index_size;
 65EXPORT_SYMBOL(__pgd_index_size);
 
 
 66unsigned long __pud_cache_index;
 67EXPORT_SYMBOL(__pud_cache_index);
 68unsigned long __pte_table_size;
 69EXPORT_SYMBOL(__pte_table_size);
 70unsigned long __pmd_table_size;
 71EXPORT_SYMBOL(__pmd_table_size);
 72unsigned long __pud_table_size;
 73EXPORT_SYMBOL(__pud_table_size);
 74unsigned long __pgd_table_size;
 75EXPORT_SYMBOL(__pgd_table_size);
 76unsigned long __pmd_val_bits;
 77EXPORT_SYMBOL(__pmd_val_bits);
 78unsigned long __pud_val_bits;
 79EXPORT_SYMBOL(__pud_val_bits);
 80unsigned long __pgd_val_bits;
 81EXPORT_SYMBOL(__pgd_val_bits);
 82unsigned long __kernel_virt_start;
 83EXPORT_SYMBOL(__kernel_virt_start);
 
 
 84unsigned long __vmalloc_start;
 85EXPORT_SYMBOL(__vmalloc_start);
 86unsigned long __vmalloc_end;
 87EXPORT_SYMBOL(__vmalloc_end);
 88unsigned long __kernel_io_start;
 89EXPORT_SYMBOL(__kernel_io_start);
 90unsigned long __kernel_io_end;
 91struct page *vmemmap;
 92EXPORT_SYMBOL(vmemmap);
 93unsigned long __pte_frag_nr;
 94EXPORT_SYMBOL(__pte_frag_nr);
 95unsigned long __pte_frag_size_shift;
 96EXPORT_SYMBOL(__pte_frag_size_shift);
 
 
 
 97#endif
 98
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 99#ifndef __PAGETABLE_PUD_FOLDED
100/* 4 level page table */
101struct page *p4d_page(p4d_t p4d)
102{
103	if (p4d_is_leaf(p4d)) {
104		if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
105			VM_WARN_ON(!p4d_huge(p4d));
106		return pte_page(p4d_pte(p4d));
107	}
108	return virt_to_page(p4d_pgtable(p4d));
109}
110#endif
111
112struct page *pud_page(pud_t pud)
113{
114	if (pud_is_leaf(pud)) {
115		if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
116			VM_WARN_ON(!pud_huge(pud));
117		return pte_page(pud_pte(pud));
118	}
119	return virt_to_page(pud_pgtable(pud));
120}
121
122/*
123 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
124 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
125 */
126struct page *pmd_page(pmd_t pmd)
127{
128	if (pmd_is_leaf(pmd)) {
 
 
 
 
 
 
 
 
 
 
 
 
 
129		/*
130		 * vmalloc_to_page may be called on any vmap address (not only
131		 * vmalloc), and it uses pmd_page() etc., when huge vmap is
132		 * enabled so these checks can't be used.
133		 */
134		if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP))
135			VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd)));
136		return pte_page(pmd_pte(pmd));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137	}
138	return virt_to_page(pmd_page_vaddr(pmd));
139}
140
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141#ifdef CONFIG_STRICT_KERNEL_RWX
142void mark_rodata_ro(void)
143{
144	if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) {
145		pr_warn("Warning: Unable to mark rodata read only on this CPU.\n");
146		return;
147	}
148
149	if (radix_enabled())
150		radix__mark_rodata_ro();
151	else
152		hash__mark_rodata_ro();
153
154	// mark_initmem_nx() should have already run by now
155	ptdump_check_wx();
156}
157
158void mark_initmem_nx(void)
159{
160	if (radix_enabled())
161		radix__mark_initmem_nx();
162	else
163		hash__mark_initmem_nx();
164}
165#endif
v4.17
 
  1/*
  2 *  This file contains ioremap and related functions for 64-bit machines.
  3 *
  4 *  Derived from arch/ppc64/mm/init.c
  5 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  6 *
  7 *  Modifications by Paul Mackerras (PowerMac) (paulus@samba.org)
  8 *  and Cort Dougan (PReP) (cort@cs.nmt.edu)
  9 *    Copyright (C) 1996 Paul Mackerras
 10 *
 11 *  Derived from "arch/i386/mm/init.c"
 12 *    Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 13 *
 14 *  Dave Engebretsen <engebret@us.ibm.com>
 15 *      Rework for PPC64 port.
 16 *
 17 *  This program is free software; you can redistribute it and/or
 18 *  modify it under the terms of the GNU General Public License
 19 *  as published by the Free Software Foundation; either version
 20 *  2 of the License, or (at your option) any later version.
 21 *
 22 */
 23
 24#include <linux/signal.h>
 25#include <linux/sched.h>
 26#include <linux/kernel.h>
 27#include <linux/errno.h>
 28#include <linux/string.h>
 29#include <linux/export.h>
 30#include <linux/types.h>
 31#include <linux/mman.h>
 32#include <linux/mm.h>
 33#include <linux/swap.h>
 34#include <linux/stddef.h>
 35#include <linux/vmalloc.h>
 36#include <linux/memblock.h>
 37#include <linux/slab.h>
 38#include <linux/hugetlb.h>
 39
 40#include <asm/pgalloc.h>
 41#include <asm/page.h>
 42#include <asm/prom.h>
 43#include <asm/io.h>
 44#include <asm/mmu_context.h>
 45#include <asm/pgtable.h>
 46#include <asm/mmu.h>
 47#include <asm/smp.h>
 48#include <asm/machdep.h>
 49#include <asm/tlb.h>
 50#include <asm/trace.h>
 51#include <asm/processor.h>
 52#include <asm/cputable.h>
 53#include <asm/sections.h>
 54#include <asm/firmware.h>
 55#include <asm/dma.h>
 56#include <asm/powernv.h>
 57
 58#include "mmu_decl.h"
 59
 60
 61#ifdef CONFIG_PPC_BOOK3S_64
 62/*
 63 * partition table and process table for ISA 3.0
 64 */
 65struct prtb_entry *process_tb;
 66struct patb_entry *partition_tb;
 67/*
 68 * page table size
 69 */
 70unsigned long __pte_index_size;
 71EXPORT_SYMBOL(__pte_index_size);
 72unsigned long __pmd_index_size;
 73EXPORT_SYMBOL(__pmd_index_size);
 74unsigned long __pud_index_size;
 75EXPORT_SYMBOL(__pud_index_size);
 76unsigned long __pgd_index_size;
 77EXPORT_SYMBOL(__pgd_index_size);
 78unsigned long __pmd_cache_index;
 79EXPORT_SYMBOL(__pmd_cache_index);
 80unsigned long __pud_cache_index;
 81EXPORT_SYMBOL(__pud_cache_index);
 82unsigned long __pte_table_size;
 83EXPORT_SYMBOL(__pte_table_size);
 84unsigned long __pmd_table_size;
 85EXPORT_SYMBOL(__pmd_table_size);
 86unsigned long __pud_table_size;
 87EXPORT_SYMBOL(__pud_table_size);
 88unsigned long __pgd_table_size;
 89EXPORT_SYMBOL(__pgd_table_size);
 90unsigned long __pmd_val_bits;
 91EXPORT_SYMBOL(__pmd_val_bits);
 92unsigned long __pud_val_bits;
 93EXPORT_SYMBOL(__pud_val_bits);
 94unsigned long __pgd_val_bits;
 95EXPORT_SYMBOL(__pgd_val_bits);
 96unsigned long __kernel_virt_start;
 97EXPORT_SYMBOL(__kernel_virt_start);
 98unsigned long __kernel_virt_size;
 99EXPORT_SYMBOL(__kernel_virt_size);
100unsigned long __vmalloc_start;
101EXPORT_SYMBOL(__vmalloc_start);
102unsigned long __vmalloc_end;
103EXPORT_SYMBOL(__vmalloc_end);
104unsigned long __kernel_io_start;
105EXPORT_SYMBOL(__kernel_io_start);
 
106struct page *vmemmap;
107EXPORT_SYMBOL(vmemmap);
108unsigned long __pte_frag_nr;
109EXPORT_SYMBOL(__pte_frag_nr);
110unsigned long __pte_frag_size_shift;
111EXPORT_SYMBOL(__pte_frag_size_shift);
112unsigned long ioremap_bot;
113#else /* !CONFIG_PPC_BOOK3S_64 */
114unsigned long ioremap_bot = IOREMAP_BASE;
115#endif
116
117/**
118 * __ioremap_at - Low level function to establish the page tables
119 *                for an IO mapping
120 */
121void __iomem * __ioremap_at(phys_addr_t pa, void *ea, unsigned long size,
122			    unsigned long flags)
123{
124	unsigned long i;
125
126	/* Make sure we have the base flags */
127	if ((flags & _PAGE_PRESENT) == 0)
128		flags |= pgprot_val(PAGE_KERNEL);
129
130	/* We don't support the 4K PFN hack with ioremap */
131	if (flags & H_PAGE_4K_PFN)
132		return NULL;
133
134	WARN_ON(pa & ~PAGE_MASK);
135	WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
136	WARN_ON(size & ~PAGE_MASK);
137
138	for (i = 0; i < size; i += PAGE_SIZE)
139		if (map_kernel_page((unsigned long)ea+i, pa+i, flags))
140			return NULL;
141
142	return (void __iomem *)ea;
143}
144
145/**
146 * __iounmap_from - Low level function to tear down the page tables
147 *                  for an IO mapping. This is used for mappings that
148 *                  are manipulated manually, like partial unmapping of
149 *                  PCI IOs or ISA space.
150 */
151void __iounmap_at(void *ea, unsigned long size)
152{
153	WARN_ON(((unsigned long)ea) & ~PAGE_MASK);
154	WARN_ON(size & ~PAGE_MASK);
155
156	unmap_kernel_range((unsigned long)ea, size);
157}
158
159void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
160				unsigned long flags, void *caller)
161{
162	phys_addr_t paligned;
163	void __iomem *ret;
164
165	/*
166	 * Choose an address to map it to.
167	 * Once the imalloc system is running, we use it.
168	 * Before that, we map using addresses going
169	 * up from ioremap_bot.  imalloc will use
170	 * the addresses from ioremap_bot through
171	 * IMALLOC_END
172	 * 
173	 */
174	paligned = addr & PAGE_MASK;
175	size = PAGE_ALIGN(addr + size) - paligned;
176
177	if ((size == 0) || (paligned == 0))
178		return NULL;
179
180	if (slab_is_available()) {
181		struct vm_struct *area;
182
183		area = __get_vm_area_caller(size, VM_IOREMAP,
184					    ioremap_bot, IOREMAP_END,
185					    caller);
186		if (area == NULL)
187			return NULL;
188
189		area->phys_addr = paligned;
190		ret = __ioremap_at(paligned, area->addr, size, flags);
191		if (!ret)
192			vunmap(area->addr);
193	} else {
194		ret = __ioremap_at(paligned, (void *)ioremap_bot, size, flags);
195		if (ret)
196			ioremap_bot += size;
197	}
198
199	if (ret)
200		ret += addr & ~PAGE_MASK;
201	return ret;
202}
203
204void __iomem * __ioremap(phys_addr_t addr, unsigned long size,
205			 unsigned long flags)
206{
207	return __ioremap_caller(addr, size, flags, __builtin_return_address(0));
208}
209
210void __iomem * ioremap(phys_addr_t addr, unsigned long size)
211{
212	unsigned long flags = pgprot_val(pgprot_noncached(__pgprot(0)));
213	void *caller = __builtin_return_address(0);
214
215	if (ppc_md.ioremap)
216		return ppc_md.ioremap(addr, size, flags, caller);
217	return __ioremap_caller(addr, size, flags, caller);
218}
219
220void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size)
221{
222	unsigned long flags = pgprot_val(pgprot_noncached_wc(__pgprot(0)));
223	void *caller = __builtin_return_address(0);
224
225	if (ppc_md.ioremap)
226		return ppc_md.ioremap(addr, size, flags, caller);
227	return __ioremap_caller(addr, size, flags, caller);
228}
229
230void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size,
231			     unsigned long flags)
232{
233	void *caller = __builtin_return_address(0);
234
235	/* writeable implies dirty for kernel addresses */
236	if (flags & _PAGE_WRITE)
237		flags |= _PAGE_DIRTY;
238
239	/* we don't want to let _PAGE_EXEC leak out */
240	flags &= ~_PAGE_EXEC;
241	/*
242	 * Force kernel mapping.
243	 */
244	flags &= ~_PAGE_USER;
245	flags |= _PAGE_PRIVILEGED;
246
247	if (ppc_md.ioremap)
248		return ppc_md.ioremap(addr, size, flags, caller);
249	return __ioremap_caller(addr, size, flags, caller);
250}
251
252
253/*  
254 * Unmap an IO region and remove it from imalloc'd list.
255 * Access to IO memory should be serialized by driver.
256 */
257void __iounmap(volatile void __iomem *token)
258{
259	void *addr;
260
261	if (!slab_is_available())
262		return;
263	
264	addr = (void *) ((unsigned long __force)
265			 PCI_FIX_ADDR(token) & PAGE_MASK);
266	if ((unsigned long)addr < ioremap_bot) {
267		printk(KERN_WARNING "Attempt to iounmap early bolted mapping"
268		       " at 0x%p\n", addr);
269		return;
270	}
271	vunmap(addr);
272}
273
274void iounmap(volatile void __iomem *token)
275{
276	if (ppc_md.iounmap)
277		ppc_md.iounmap(token);
278	else
279		__iounmap(token);
280}
281
282EXPORT_SYMBOL(ioremap);
283EXPORT_SYMBOL(ioremap_wc);
284EXPORT_SYMBOL(ioremap_prot);
285EXPORT_SYMBOL(__ioremap);
286EXPORT_SYMBOL(__ioremap_at);
287EXPORT_SYMBOL(iounmap);
288EXPORT_SYMBOL(__iounmap);
289EXPORT_SYMBOL(__iounmap_at);
290
291#ifndef __PAGETABLE_PUD_FOLDED
292/* 4 level page table */
293struct page *pgd_page(pgd_t pgd)
294{
295	if (pgd_huge(pgd))
296		return pte_page(pgd_pte(pgd));
297	return virt_to_page(pgd_page_vaddr(pgd));
 
 
 
298}
299#endif
300
301struct page *pud_page(pud_t pud)
302{
303	if (pud_huge(pud))
 
 
304		return pte_page(pud_pte(pud));
305	return virt_to_page(pud_page_vaddr(pud));
 
306}
307
308/*
309 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
310 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
311 */
312struct page *pmd_page(pmd_t pmd)
313{
314	if (pmd_trans_huge(pmd) || pmd_huge(pmd) || pmd_devmap(pmd))
315		return pte_page(pmd_pte(pmd));
316	return virt_to_page(pmd_page_vaddr(pmd));
317}
318
319#ifdef CONFIG_PPC_64K_PAGES
320static pte_t *get_from_cache(struct mm_struct *mm)
321{
322	void *pte_frag, *ret;
323
324	spin_lock(&mm->page_table_lock);
325	ret = mm->context.pte_frag;
326	if (ret) {
327		pte_frag = ret + PTE_FRAG_SIZE;
328		/*
329		 * If we have taken up all the fragments mark PTE page NULL
 
 
330		 */
331		if (((unsigned long)pte_frag & ~PAGE_MASK) == 0)
332			pte_frag = NULL;
333		mm->context.pte_frag = pte_frag;
334	}
335	spin_unlock(&mm->page_table_lock);
336	return (pte_t *)ret;
337}
338
339static pte_t *__alloc_for_cache(struct mm_struct *mm, int kernel)
340{
341	void *ret = NULL;
342	struct page *page;
343
344	if (!kernel) {
345		page = alloc_page(PGALLOC_GFP | __GFP_ACCOUNT);
346		if (!page)
347			return NULL;
348		if (!pgtable_page_ctor(page)) {
349			__free_page(page);
350			return NULL;
351		}
352	} else {
353		page = alloc_page(PGALLOC_GFP);
354		if (!page)
355			return NULL;
356	}
357
358	ret = page_address(page);
359	spin_lock(&mm->page_table_lock);
360	/*
361	 * If we find pgtable_page set, we return
362	 * the allocated page with single fragement
363	 * count.
364	 */
365	if (likely(!mm->context.pte_frag)) {
366		set_page_count(page, PTE_FRAG_NR);
367		mm->context.pte_frag = ret + PTE_FRAG_SIZE;
368	}
369	spin_unlock(&mm->page_table_lock);
370
371	return (pte_t *)ret;
372}
373
374pte_t *pte_fragment_alloc(struct mm_struct *mm, unsigned long vmaddr, int kernel)
375{
376	pte_t *pte;
377
378	pte = get_from_cache(mm);
379	if (pte)
380		return pte;
381
382	return __alloc_for_cache(mm, kernel);
383}
384#endif /* CONFIG_PPC_64K_PAGES */
385
386void pte_fragment_free(unsigned long *table, int kernel)
387{
388	struct page *page = virt_to_page(table);
389	if (put_page_testzero(page)) {
390		if (!kernel)
391			pgtable_page_dtor(page);
392		free_unref_page(page);
393	}
 
394}
395
396#ifdef CONFIG_SMP
397void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
398{
399	unsigned long pgf = (unsigned long)table;
400
401	BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
402	pgf |= shift;
403	tlb_remove_table(tlb, (void *)pgf);
404}
405
406void __tlb_remove_table(void *_table)
407{
408	void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
409	unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
410
411	if (!shift)
412		/* PTE page needs special handling */
413		pte_fragment_free(table, 0);
414	else {
415		BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
416		kmem_cache_free(PGT_CACHE(shift), table);
417	}
418}
419#else
420void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
421{
422	if (!shift) {
423		/* PTE page needs special handling */
424		pte_fragment_free(table, 0);
425	} else {
426		BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
427		kmem_cache_free(PGT_CACHE(shift), table);
428	}
429}
430#endif
431
432#ifdef CONFIG_PPC_BOOK3S_64
433void __init mmu_partition_table_init(void)
434{
435	unsigned long patb_size = 1UL << PATB_SIZE_SHIFT;
436	unsigned long ptcr;
437
438	BUILD_BUG_ON_MSG((PATB_SIZE_SHIFT > 36), "Partition table size too large.");
439	partition_tb = __va(memblock_alloc_base(patb_size, patb_size,
440						MEMBLOCK_ALLOC_ANYWHERE));
441
442	/* Initialize the Partition Table with no entries */
443	memset((void *)partition_tb, 0, patb_size);
444
445	/*
446	 * update partition table control register,
447	 * 64 K size.
448	 */
449	ptcr = __pa(partition_tb) | (PATB_SIZE_SHIFT - 12);
450	mtspr(SPRN_PTCR, ptcr);
451	powernv_set_nmmu_ptcr(ptcr);
452}
453
454void mmu_partition_table_set_entry(unsigned int lpid, unsigned long dw0,
455				   unsigned long dw1)
456{
457	unsigned long old = be64_to_cpu(partition_tb[lpid].patb0);
458
459	partition_tb[lpid].patb0 = cpu_to_be64(dw0);
460	partition_tb[lpid].patb1 = cpu_to_be64(dw1);
461
462	/*
463	 * Global flush of TLBs and partition table caches for this lpid.
464	 * The type of flush (hash or radix) depends on what the previous
465	 * use of this partition ID was, not the new use.
466	 */
467	asm volatile("ptesync" : : : "memory");
468	if (old & PATB_HR) {
469		asm volatile(PPC_TLBIE_5(%0,%1,2,0,1) : :
470			     "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
471		asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : :
472			     "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
473		trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 1);
474	} else {
475		asm volatile(PPC_TLBIE_5(%0,%1,2,0,0) : :
476			     "r" (TLBIEL_INVAL_SET_LPID), "r" (lpid));
477		trace_tlbie(lpid, 0, TLBIEL_INVAL_SET_LPID, lpid, 2, 0, 0);
478	}
479	/* do we need fixup here ?*/
480	asm volatile("eieio; tlbsync; ptesync" : : : "memory");
481}
482EXPORT_SYMBOL_GPL(mmu_partition_table_set_entry);
483#endif /* CONFIG_PPC_BOOK3S_64 */
484
485#ifdef CONFIG_STRICT_KERNEL_RWX
486void mark_rodata_ro(void)
487{
488	if (!mmu_has_feature(MMU_FTR_KERNEL_RO)) {
489		pr_warn("Warning: Unable to mark rodata read only on this CPU.\n");
490		return;
491	}
492
493	if (radix_enabled())
494		radix__mark_rodata_ro();
495	else
496		hash__mark_rodata_ro();
 
 
 
497}
498
499void mark_initmem_nx(void)
500{
501	if (radix_enabled())
502		radix__mark_initmem_nx();
503	else
504		hash__mark_initmem_nx();
505}
506#endif