Linux Audio

Check our new training course

Loading...
v4.17
  1/*
  2 * arch/sh/mm/tlb-pteaex.c
  3 *
  4 * TLB operations for SH-X3 CPUs featuring PTE ASID Extensions.
  5 *
  6 * Copyright (C) 2009 Paul Mundt
  7 *
  8 * This file is subject to the terms and conditions of the GNU General Public
  9 * License.  See the file "COPYING" in the main directory of this archive
 10 * for more details.
 11 */
 12#include <linux/kernel.h>
 13#include <linux/mm.h>
 14#include <linux/io.h>
 
 15#include <asm/mmu_context.h>
 16#include <asm/cacheflush.h>
 17
 18void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
 19{
 20	unsigned long flags, pteval, vpn;
 21
 22	/*
 23	 * Handle debugger faulting in for debugee.
 24	 */
 25	if (vma && current->active_mm != vma->vm_mm)
 26		return;
 27
 28	local_irq_save(flags);
 29
 30	/* Set PTEH register */
 31	vpn = address & MMU_VPN_MASK;
 32	__raw_writel(vpn, MMU_PTEH);
 33
 34	/* Set PTEAEX */
 35	__raw_writel(get_asid(), MMU_PTEAEX);
 36
 37	pteval = pte.pte_low;
 38
 39	/* Set PTEA register */
 40#ifdef CONFIG_X2TLB
 41	/*
 42	 * For the extended mode TLB this is trivial, only the ESZ and
 43	 * EPR bits need to be written out to PTEA, with the remainder of
 44	 * the protection bits (with the exception of the compat-mode SZ
 45	 * and PR bits, which are cleared) being written out in PTEL.
 46	 */
 47	__raw_writel(pte.pte_high, MMU_PTEA);
 48#endif
 49
 50	/* Set PTEL register */
 51	pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
 52#ifdef CONFIG_CACHE_WRITETHROUGH
 53	pteval |= _PAGE_WT;
 54#endif
 55	/* conveniently, we want all the software flags to be 0 anyway */
 56	__raw_writel(pteval, MMU_PTEL);
 57
 58	/* Load the TLB */
 59	asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
 60	local_irq_restore(flags);
 61}
 62
 63/*
 64 * While SH-X2 extended TLB mode splits out the memory-mapped I/UTLB
 65 * data arrays, SH-X3 cores with PTEAEX split out the memory-mapped
 66 * address arrays. In compat mode the second array is inaccessible, while
 67 * in extended mode, the legacy 8-bit ASID field in address array 1 has
 68 * undefined behaviour.
 69 */
 70void local_flush_tlb_one(unsigned long asid, unsigned long page)
 71{
 72	jump_to_uncached();
 73	__raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
 74	__raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
 75	__raw_writel(page, MMU_ITLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
 76	__raw_writel(asid, MMU_ITLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
 77	back_to_cached();
 78}
 79
 80void local_flush_tlb_all(void)
 81{
 82	unsigned long flags, status;
 83	int i;
 84
 85	/*
 86	 * Flush all the TLB.
 87	 */
 88	local_irq_save(flags);
 89	jump_to_uncached();
 90
 91	status = __raw_readl(MMUCR);
 92	status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
 93
 94	if (status == 0)
 95		status = MMUCR_URB_NENTRIES;
 96
 97	for (i = 0; i < status; i++)
 98		__raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
 99
100	for (i = 0; i < 4; i++)
101		__raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
102
103	back_to_cached();
104	ctrl_barrier();
105	local_irq_restore(flags);
106}
v3.1
  1/*
  2 * arch/sh/mm/tlb-pteaex.c
  3 *
  4 * TLB operations for SH-X3 CPUs featuring PTE ASID Extensions.
  5 *
  6 * Copyright (C) 2009 Paul Mundt
  7 *
  8 * This file is subject to the terms and conditions of the GNU General Public
  9 * License.  See the file "COPYING" in the main directory of this archive
 10 * for more details.
 11 */
 12#include <linux/kernel.h>
 13#include <linux/mm.h>
 14#include <linux/io.h>
 15#include <asm/system.h>
 16#include <asm/mmu_context.h>
 17#include <asm/cacheflush.h>
 18
 19void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
 20{
 21	unsigned long flags, pteval, vpn;
 22
 23	/*
 24	 * Handle debugger faulting in for debugee.
 25	 */
 26	if (vma && current->active_mm != vma->vm_mm)
 27		return;
 28
 29	local_irq_save(flags);
 30
 31	/* Set PTEH register */
 32	vpn = address & MMU_VPN_MASK;
 33	__raw_writel(vpn, MMU_PTEH);
 34
 35	/* Set PTEAEX */
 36	__raw_writel(get_asid(), MMU_PTEAEX);
 37
 38	pteval = pte.pte_low;
 39
 40	/* Set PTEA register */
 41#ifdef CONFIG_X2TLB
 42	/*
 43	 * For the extended mode TLB this is trivial, only the ESZ and
 44	 * EPR bits need to be written out to PTEA, with the remainder of
 45	 * the protection bits (with the exception of the compat-mode SZ
 46	 * and PR bits, which are cleared) being written out in PTEL.
 47	 */
 48	__raw_writel(pte.pte_high, MMU_PTEA);
 49#endif
 50
 51	/* Set PTEL register */
 52	pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
 53#ifdef CONFIG_CACHE_WRITETHROUGH
 54	pteval |= _PAGE_WT;
 55#endif
 56	/* conveniently, we want all the software flags to be 0 anyway */
 57	__raw_writel(pteval, MMU_PTEL);
 58
 59	/* Load the TLB */
 60	asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
 61	local_irq_restore(flags);
 62}
 63
 64/*
 65 * While SH-X2 extended TLB mode splits out the memory-mapped I/UTLB
 66 * data arrays, SH-X3 cores with PTEAEX split out the memory-mapped
 67 * address arrays. In compat mode the second array is inaccessible, while
 68 * in extended mode, the legacy 8-bit ASID field in address array 1 has
 69 * undefined behaviour.
 70 */
 71void local_flush_tlb_one(unsigned long asid, unsigned long page)
 72{
 73	jump_to_uncached();
 74	__raw_writel(page, MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
 75	__raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
 76	__raw_writel(page, MMU_ITLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT);
 77	__raw_writel(asid, MMU_ITLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
 78	back_to_cached();
 79}
 80
 81void local_flush_tlb_all(void)
 82{
 83	unsigned long flags, status;
 84	int i;
 85
 86	/*
 87	 * Flush all the TLB.
 88	 */
 89	local_irq_save(flags);
 90	jump_to_uncached();
 91
 92	status = __raw_readl(MMUCR);
 93	status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
 94
 95	if (status == 0)
 96		status = MMUCR_URB_NENTRIES;
 97
 98	for (i = 0; i < status; i++)
 99		__raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
100
101	for (i = 0; i < 4; i++)
102		__raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
103
104	back_to_cached();
105	ctrl_barrier();
106	local_irq_restore(flags);
107}