Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3
  4#include <linux/init.h>
  5#include <linux/mm.h>
  6#include <linux/module.h>
  7#include <linux/sched.h>
  8
  9#include <asm/mmu_context.h>
 10#include <asm/setup.h>
 11
 12/*
 13 * One C-SKY MMU TLB entry contain two PFN/page entry, ie:
 14 * 1VPN -> 2PFN
 15 */
 16#define TLB_ENTRY_SIZE		(PAGE_SIZE * 2)
 17#define TLB_ENTRY_SIZE_MASK	(PAGE_MASK << 1)
 18
 19void flush_tlb_all(void)
 20{
 21	tlb_invalid_all();
 22}
 23
 24void flush_tlb_mm(struct mm_struct *mm)
 25{
 26#ifdef CONFIG_CPU_HAS_TLBI
 27	sync_is();
 28	asm volatile(
 29		"tlbi.asids %0	\n"
 30		"sync.i		\n"
 31		:
 32		: "r" (cpu_asid(mm))
 33		: "memory");
 34#else
 35	tlb_invalid_all();
 36#endif
 37}
 38
 39/*
 40 * MMU operation regs only could invalid tlb entry in jtlb and we
 41 * need change asid field to invalid I-utlb & D-utlb.
 42 */
 43#ifndef CONFIG_CPU_HAS_TLBI
 44#define restore_asid_inv_utlb(oldpid, newpid) \
 45do { \
 46	if (oldpid == newpid) \
 47		write_mmu_entryhi(oldpid + 1); \
 48	write_mmu_entryhi(oldpid); \
 49} while (0)
 50#endif
 51
 52void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 53			unsigned long end)
 54{
 55	unsigned long newpid = cpu_asid(vma->vm_mm);
 56
 57	start &= TLB_ENTRY_SIZE_MASK;
 58	end   += TLB_ENTRY_SIZE - 1;
 59	end   &= TLB_ENTRY_SIZE_MASK;
 60
 61#ifdef CONFIG_CPU_HAS_TLBI
 62	sync_is();
 63	while (start < end) {
 64		asm volatile(
 65			"tlbi.vas %0	\n"
 66			:
 67			: "r" (start | newpid)
 68			: "memory");
 69
 70		start += 2*PAGE_SIZE;
 71	}
 72	asm volatile("sync.i\n");
 73#else
 74	{
 75	unsigned long flags, oldpid;
 76
 77	local_irq_save(flags);
 78	oldpid = read_mmu_entryhi() & ASID_MASK;
 79	while (start < end) {
 80		int idx;
 81
 82		write_mmu_entryhi(start | newpid);
 83		start += 2*PAGE_SIZE;
 84		tlb_probe();
 85		idx = read_mmu_index();
 86		if (idx >= 0)
 87			tlb_invalid_indexed();
 88	}
 89	restore_asid_inv_utlb(oldpid, newpid);
 90	local_irq_restore(flags);
 91	}
 92#endif
 93}
 94
 95void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 96{
 97	start &= TLB_ENTRY_SIZE_MASK;
 98	end   += TLB_ENTRY_SIZE - 1;
 99	end   &= TLB_ENTRY_SIZE_MASK;
100
101#ifdef CONFIG_CPU_HAS_TLBI
102	sync_is();
103	while (start < end) {
104		asm volatile(
105			"tlbi.vaas %0	\n"
106			:
107			: "r" (start)
108			: "memory");
109
110		start += 2*PAGE_SIZE;
111	}
112	asm volatile("sync.i\n");
113#else
114	{
115	unsigned long flags, oldpid;
116
117	local_irq_save(flags);
118	oldpid = read_mmu_entryhi() & ASID_MASK;
119	while (start < end) {
120		int idx;
121
122		write_mmu_entryhi(start | oldpid);
123		start += 2*PAGE_SIZE;
124		tlb_probe();
125		idx = read_mmu_index();
126		if (idx >= 0)
127			tlb_invalid_indexed();
128	}
129	restore_asid_inv_utlb(oldpid, oldpid);
130	local_irq_restore(flags);
131	}
132#endif
133}
134
135void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
136{
137	int newpid = cpu_asid(vma->vm_mm);
138
139	addr &= TLB_ENTRY_SIZE_MASK;
140
141#ifdef CONFIG_CPU_HAS_TLBI
 
142	sync_is();
143	asm volatile(
144		"tlbi.vas %0	\n"
145		"sync.i		\n"
146		:
147		: "r" (addr | newpid)
148		: "memory");
149#else
150	{
151	int oldpid, idx;
152	unsigned long flags;
153
154	local_irq_save(flags);
155	oldpid = read_mmu_entryhi() & ASID_MASK;
156	write_mmu_entryhi(addr | newpid);
157	tlb_probe();
158	idx = read_mmu_index();
159	if (idx >= 0)
160		tlb_invalid_indexed();
161
162	restore_asid_inv_utlb(oldpid, newpid);
163	local_irq_restore(flags);
164	}
165#endif
166}
167
168void flush_tlb_one(unsigned long addr)
169{
170	addr &= TLB_ENTRY_SIZE_MASK;
171
172#ifdef CONFIG_CPU_HAS_TLBI
 
173	sync_is();
174	asm volatile(
175		"tlbi.vaas %0	\n"
176		"sync.i		\n"
177		:
178		: "r" (addr)
179		: "memory");
180#else
181	{
182	int oldpid, idx;
183	unsigned long flags;
184
185	local_irq_save(flags);
186	oldpid = read_mmu_entryhi() & ASID_MASK;
187	write_mmu_entryhi(addr | oldpid);
188	tlb_probe();
189	idx = read_mmu_index();
190	if (idx >= 0)
191		tlb_invalid_indexed();
192
193	restore_asid_inv_utlb(oldpid, oldpid);
194	local_irq_restore(flags);
195	}
196#endif
197}
198EXPORT_SYMBOL(flush_tlb_one);
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
  3
  4#include <linux/init.h>
  5#include <linux/mm.h>
  6#include <linux/module.h>
  7#include <linux/sched.h>
  8
  9#include <asm/mmu_context.h>
 10#include <asm/setup.h>
 11
 12/*
 13 * One C-SKY MMU TLB entry contain two PFN/page entry, ie:
 14 * 1VPN -> 2PFN
 15 */
 16#define TLB_ENTRY_SIZE		(PAGE_SIZE * 2)
 17#define TLB_ENTRY_SIZE_MASK	(PAGE_MASK << 1)
 18
 19void flush_tlb_all(void)
 20{
 21	tlb_invalid_all();
 22}
 23
 24void flush_tlb_mm(struct mm_struct *mm)
 25{
 26#ifdef CONFIG_CPU_HAS_TLBI
 27	asm volatile("tlbi.asids %0"::"r"(cpu_asid(mm)));
 
 
 
 
 
 
 28#else
 29	tlb_invalid_all();
 30#endif
 31}
 32
 33/*
 34 * MMU operation regs only could invalid tlb entry in jtlb and we
 35 * need change asid field to invalid I-utlb & D-utlb.
 36 */
 37#ifndef CONFIG_CPU_HAS_TLBI
 38#define restore_asid_inv_utlb(oldpid, newpid) \
 39do { \
 40	if (oldpid == newpid) \
 41		write_mmu_entryhi(oldpid + 1); \
 42	write_mmu_entryhi(oldpid); \
 43} while (0)
 44#endif
 45
 46void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
 47			unsigned long end)
 48{
 49	unsigned long newpid = cpu_asid(vma->vm_mm);
 50
 51	start &= TLB_ENTRY_SIZE_MASK;
 52	end   += TLB_ENTRY_SIZE - 1;
 53	end   &= TLB_ENTRY_SIZE_MASK;
 54
 55#ifdef CONFIG_CPU_HAS_TLBI
 
 56	while (start < end) {
 57		asm volatile("tlbi.vas %0"::"r"(start | newpid));
 
 
 
 
 
 58		start += 2*PAGE_SIZE;
 59	}
 60	sync_is();
 61#else
 62	{
 63	unsigned long flags, oldpid;
 64
 65	local_irq_save(flags);
 66	oldpid = read_mmu_entryhi() & ASID_MASK;
 67	while (start < end) {
 68		int idx;
 69
 70		write_mmu_entryhi(start | newpid);
 71		start += 2*PAGE_SIZE;
 72		tlb_probe();
 73		idx = read_mmu_index();
 74		if (idx >= 0)
 75			tlb_invalid_indexed();
 76	}
 77	restore_asid_inv_utlb(oldpid, newpid);
 78	local_irq_restore(flags);
 79	}
 80#endif
 81}
 82
 83void flush_tlb_kernel_range(unsigned long start, unsigned long end)
 84{
 85	start &= TLB_ENTRY_SIZE_MASK;
 86	end   += TLB_ENTRY_SIZE - 1;
 87	end   &= TLB_ENTRY_SIZE_MASK;
 88
 89#ifdef CONFIG_CPU_HAS_TLBI
 
 90	while (start < end) {
 91		asm volatile("tlbi.vaas %0"::"r"(start));
 
 
 
 
 
 92		start += 2*PAGE_SIZE;
 93	}
 94	sync_is();
 95#else
 96	{
 97	unsigned long flags, oldpid;
 98
 99	local_irq_save(flags);
100	oldpid = read_mmu_entryhi() & ASID_MASK;
101	while (start < end) {
102		int idx;
103
104		write_mmu_entryhi(start | oldpid);
105		start += 2*PAGE_SIZE;
106		tlb_probe();
107		idx = read_mmu_index();
108		if (idx >= 0)
109			tlb_invalid_indexed();
110	}
111	restore_asid_inv_utlb(oldpid, oldpid);
112	local_irq_restore(flags);
113	}
114#endif
115}
116
117void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
118{
119	int newpid = cpu_asid(vma->vm_mm);
120
121	addr &= TLB_ENTRY_SIZE_MASK;
122
123#ifdef CONFIG_CPU_HAS_TLBI
124	asm volatile("tlbi.vas %0"::"r"(addr | newpid));
125	sync_is();
 
 
 
 
 
 
126#else
127	{
128	int oldpid, idx;
129	unsigned long flags;
130
131	local_irq_save(flags);
132	oldpid = read_mmu_entryhi() & ASID_MASK;
133	write_mmu_entryhi(addr | newpid);
134	tlb_probe();
135	idx = read_mmu_index();
136	if (idx >= 0)
137		tlb_invalid_indexed();
138
139	restore_asid_inv_utlb(oldpid, newpid);
140	local_irq_restore(flags);
141	}
142#endif
143}
144
145void flush_tlb_one(unsigned long addr)
146{
147	addr &= TLB_ENTRY_SIZE_MASK;
148
149#ifdef CONFIG_CPU_HAS_TLBI
150	asm volatile("tlbi.vaas %0"::"r"(addr));
151	sync_is();
 
 
 
 
 
 
152#else
153	{
154	int oldpid, idx;
155	unsigned long flags;
156
157	local_irq_save(flags);
158	oldpid = read_mmu_entryhi() & ASID_MASK;
159	write_mmu_entryhi(addr | oldpid);
160	tlb_probe();
161	idx = read_mmu_index();
162	if (idx >= 0)
163		tlb_invalid_indexed();
164
165	restore_asid_inv_utlb(oldpid, oldpid);
166	local_irq_restore(flags);
167	}
168#endif
169}
170EXPORT_SYMBOL(flush_tlb_one);