Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Mar 24-27, 2025, special US time zones
Register
Loading...
v6.9.4
  1/*
  2 * arch/xtensa/mm/tlb.c
  3 *
  4 * Logic that manipulates the Xtensa MMU.  Derived from MIPS.
  5 *
  6 * This file is subject to the terms and conditions of the GNU General Public
  7 * License.  See the file "COPYING" in the main directory of this archive
  8 * for more details.
  9 *
 10 * Copyright (C) 2001 - 2003 Tensilica Inc.
 11 *
 12 * Joe Taylor
 13 * Chris Zankel	<chris@zankel.net>
 14 * Marc Gauthier
 15 */
 16
 17#include <linux/mm.h>
 18#include <asm/processor.h>
 19#include <asm/mmu_context.h>
 20#include <asm/tlb.h>
 21#include <asm/tlbflush.h>
 22#include <asm/cacheflush.h>
 23
 24
 25static inline void __flush_itlb_all (void)
 26{
 27	int w, i;
 28
 29	for (w = 0; w < ITLB_ARF_WAYS; w++) {
 30		for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
 31			int e = w + (i << PAGE_SHIFT);
 32			invalidate_itlb_entry_no_isync(e);
 33		}
 34	}
 35	asm volatile ("isync\n");
 36}
 37
 38static inline void __flush_dtlb_all (void)
 39{
 40	int w, i;
 41
 42	for (w = 0; w < DTLB_ARF_WAYS; w++) {
 43		for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
 44			int e = w + (i << PAGE_SHIFT);
 45			invalidate_dtlb_entry_no_isync(e);
 46		}
 47	}
 48	asm volatile ("isync\n");
 49}
 50
 51
 52void local_flush_tlb_all(void)
 53{
 54	__flush_itlb_all();
 55	__flush_dtlb_all();
 56}
 57
 58/* If mm is current, we simply assign the current task a new ASID, thus,
 59 * invalidating all previous tlb entries. If mm is someone else's user mapping,
 60 * wie invalidate the context, thus, when that user mapping is swapped in,
 61 * a new context will be assigned to it.
 62 */
 63
 64void local_flush_tlb_mm(struct mm_struct *mm)
 65{
 66	int cpu = smp_processor_id();
 67
 68	if (mm == current->active_mm) {
 69		unsigned long flags;
 70		local_irq_save(flags);
 71		mm->context.asid[cpu] = NO_CONTEXT;
 72		activate_context(mm, cpu);
 73		local_irq_restore(flags);
 74	} else {
 75		mm->context.asid[cpu] = NO_CONTEXT;
 76		mm->context.cpu = -1;
 77	}
 78}
 79
 80
 81#define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
 82#define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
 83#if _ITLB_ENTRIES > _DTLB_ENTRIES
 84# define _TLB_ENTRIES _ITLB_ENTRIES
 85#else
 86# define _TLB_ENTRIES _DTLB_ENTRIES
 87#endif
 88
 89void local_flush_tlb_range(struct vm_area_struct *vma,
 90		unsigned long start, unsigned long end)
 91{
 92	int cpu = smp_processor_id();
 93	struct mm_struct *mm = vma->vm_mm;
 94	unsigned long flags;
 95
 96	if (mm->context.asid[cpu] == NO_CONTEXT)
 97		return;
 98
 99	pr_debug("[tlbrange<%02lx,%08lx,%08lx>]\n",
100		 (unsigned long)mm->context.asid[cpu], start, end);
 
 
101	local_irq_save(flags);
102
103	if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
104		int oldpid = get_rasid_register();
105
106		set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
107		start &= PAGE_MASK;
108		if (vma->vm_flags & VM_EXEC)
109			while(start < end) {
110				invalidate_itlb_mapping(start);
111				invalidate_dtlb_mapping(start);
112				start += PAGE_SIZE;
113			}
114		else
115			while(start < end) {
116				invalidate_dtlb_mapping(start);
117				start += PAGE_SIZE;
118			}
119
120		set_rasid_register(oldpid);
121	} else {
122		local_flush_tlb_mm(mm);
123	}
124	local_irq_restore(flags);
125}
126
127void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
128{
129	int cpu = smp_processor_id();
130	struct mm_struct* mm = vma->vm_mm;
131	unsigned long flags;
132	int oldpid;
133
134	if (mm->context.asid[cpu] == NO_CONTEXT)
135		return;
136
137	local_irq_save(flags);
138
139	oldpid = get_rasid_register();
140	set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
141
142	if (vma->vm_flags & VM_EXEC)
143		invalidate_itlb_mapping(page);
144	invalidate_dtlb_mapping(page);
145
146	set_rasid_register(oldpid);
147
148	local_irq_restore(flags);
149}
150
151void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
152{
153	if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
154	    end - start < _TLB_ENTRIES << PAGE_SHIFT) {
155		start &= PAGE_MASK;
156		while (start < end) {
157			invalidate_itlb_mapping(start);
158			invalidate_dtlb_mapping(start);
159			start += PAGE_SIZE;
160		}
161	} else {
162		local_flush_tlb_all();
163	}
164}
165
166void update_mmu_tlb(struct vm_area_struct *vma,
167		    unsigned long address, pte_t *ptep)
168{
169	local_flush_tlb_page(vma, address);
170}
171
172#ifdef CONFIG_DEBUG_TLB_SANITY
173
174static unsigned get_pte_for_vaddr(unsigned vaddr)
175{
176	struct task_struct *task = get_current();
177	struct mm_struct *mm = task->mm;
178	pgd_t *pgd;
179	p4d_t *p4d;
180	pud_t *pud;
181	pmd_t *pmd;
182	pte_t *pte;
183	unsigned int pteval;
184
185	if (!mm)
186		mm = task->active_mm;
187	pgd = pgd_offset(mm, vaddr);
188	if (pgd_none_or_clear_bad(pgd))
189		return 0;
190	p4d = p4d_offset(pgd, vaddr);
191	if (p4d_none_or_clear_bad(p4d))
192		return 0;
193	pud = pud_offset(p4d, vaddr);
194	if (pud_none_or_clear_bad(pud))
195		return 0;
196	pmd = pmd_offset(pud, vaddr);
197	if (pmd_none_or_clear_bad(pmd))
198		return 0;
199	pte = pte_offset_map(pmd, vaddr);
200	if (!pte)
201		return 0;
202	pteval = pte_val(*pte);
203	pte_unmap(pte);
204	return pteval;
205}
206
207enum {
208	TLB_SUSPICIOUS	= 1,
209	TLB_INSANE	= 2,
210};
211
212static void tlb_insane(void)
213{
214	BUG_ON(1);
215}
216
217static void tlb_suspicious(void)
218{
219	WARN_ON(1);
220}
221
222/*
223 * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
224 * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
225 *
226 * Check that valid TLB entries either have the same PA as the PTE, or PTE is
227 * marked as non-present. Non-present PTE and the page with non-zero refcount
228 * and zero mapcount is normal for batched TLB flush operation. Zero refcount
229 * means that the page was freed prematurely. Non-zero mapcount is unusual,
230 * but does not necessary means an error, thus marked as suspicious.
231 */
232static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
233{
234	unsigned tlbidx = w | (e << PAGE_SHIFT);
235	unsigned r0 = dtlb ?
236		read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
237	unsigned r1 = dtlb ?
238		read_dtlb_translation(tlbidx) : read_itlb_translation(tlbidx);
239	unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
240	unsigned pte = get_pte_for_vaddr(vpn);
241	unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
242	unsigned tlb_asid = r0 & ASID_MASK;
243	bool kernel = tlb_asid == 1;
244	int rc = 0;
245
246	if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
247		pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
248				dtlb ? 'D' : 'I', w, e, vpn,
249				kernel ? "kernel" : "user");
250		rc |= TLB_INSANE;
251	}
252
253	if (tlb_asid == mm_asid) {
 
 
254		if ((pte ^ r1) & PAGE_MASK) {
255			pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
256					dtlb ? 'D' : 'I', w, e, r0, r1, pte);
257			if (pte == 0 || !pte_present(__pte(pte))) {
258				struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
259				pr_err("page refcount: %d, mapcount: %d\n",
260						page_count(p),
261						page_mapcount(p));
262				if (!page_count(p))
263					rc |= TLB_INSANE;
264				else if (page_mapcount(p))
265					rc |= TLB_SUSPICIOUS;
266			} else {
267				rc |= TLB_INSANE;
268			}
269		}
270	}
271	return rc;
272}
273
274void check_tlb_sanity(void)
275{
276	unsigned long flags;
277	unsigned w, e;
278	int bug = 0;
279
280	local_irq_save(flags);
281	for (w = 0; w < DTLB_ARF_WAYS; ++w)
282		for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
283			bug |= check_tlb_entry(w, e, true);
284	for (w = 0; w < ITLB_ARF_WAYS; ++w)
285		for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
286			bug |= check_tlb_entry(w, e, false);
287	if (bug & TLB_INSANE)
288		tlb_insane();
289	if (bug & TLB_SUSPICIOUS)
290		tlb_suspicious();
291	local_irq_restore(flags);
292}
293
294#endif /* CONFIG_DEBUG_TLB_SANITY */
v4.6
  1/*
  2 * arch/xtensa/mm/tlb.c
  3 *
  4 * Logic that manipulates the Xtensa MMU.  Derived from MIPS.
  5 *
  6 * This file is subject to the terms and conditions of the GNU General Public
  7 * License.  See the file "COPYING" in the main directory of this archive
  8 * for more details.
  9 *
 10 * Copyright (C) 2001 - 2003 Tensilica Inc.
 11 *
 12 * Joe Taylor
 13 * Chris Zankel	<chris@zankel.net>
 14 * Marc Gauthier
 15 */
 16
 17#include <linux/mm.h>
 18#include <asm/processor.h>
 19#include <asm/mmu_context.h>
 
 20#include <asm/tlbflush.h>
 21#include <asm/cacheflush.h>
 22
 23
 24static inline void __flush_itlb_all (void)
 25{
 26	int w, i;
 27
 28	for (w = 0; w < ITLB_ARF_WAYS; w++) {
 29		for (i = 0; i < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); i++) {
 30			int e = w + (i << PAGE_SHIFT);
 31			invalidate_itlb_entry_no_isync(e);
 32		}
 33	}
 34	asm volatile ("isync\n");
 35}
 36
 37static inline void __flush_dtlb_all (void)
 38{
 39	int w, i;
 40
 41	for (w = 0; w < DTLB_ARF_WAYS; w++) {
 42		for (i = 0; i < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); i++) {
 43			int e = w + (i << PAGE_SHIFT);
 44			invalidate_dtlb_entry_no_isync(e);
 45		}
 46	}
 47	asm volatile ("isync\n");
 48}
 49
 50
 51void local_flush_tlb_all(void)
 52{
 53	__flush_itlb_all();
 54	__flush_dtlb_all();
 55}
 56
 57/* If mm is current, we simply assign the current task a new ASID, thus,
 58 * invalidating all previous tlb entries. If mm is someone else's user mapping,
 59 * wie invalidate the context, thus, when that user mapping is swapped in,
 60 * a new context will be assigned to it.
 61 */
 62
 63void local_flush_tlb_mm(struct mm_struct *mm)
 64{
 65	int cpu = smp_processor_id();
 66
 67	if (mm == current->active_mm) {
 68		unsigned long flags;
 69		local_irq_save(flags);
 70		mm->context.asid[cpu] = NO_CONTEXT;
 71		activate_context(mm, cpu);
 72		local_irq_restore(flags);
 73	} else {
 74		mm->context.asid[cpu] = NO_CONTEXT;
 75		mm->context.cpu = -1;
 76	}
 77}
 78
 79
 80#define _ITLB_ENTRIES (ITLB_ARF_WAYS << XCHAL_ITLB_ARF_ENTRIES_LOG2)
 81#define _DTLB_ENTRIES (DTLB_ARF_WAYS << XCHAL_DTLB_ARF_ENTRIES_LOG2)
 82#if _ITLB_ENTRIES > _DTLB_ENTRIES
 83# define _TLB_ENTRIES _ITLB_ENTRIES
 84#else
 85# define _TLB_ENTRIES _DTLB_ENTRIES
 86#endif
 87
 88void local_flush_tlb_range(struct vm_area_struct *vma,
 89		unsigned long start, unsigned long end)
 90{
 91	int cpu = smp_processor_id();
 92	struct mm_struct *mm = vma->vm_mm;
 93	unsigned long flags;
 94
 95	if (mm->context.asid[cpu] == NO_CONTEXT)
 96		return;
 97
 98#if 0
 99	printk("[tlbrange<%02lx,%08lx,%08lx>]\n",
100			(unsigned long)mm->context.asid[cpu], start, end);
101#endif
102	local_irq_save(flags);
103
104	if (end-start + (PAGE_SIZE-1) <= _TLB_ENTRIES << PAGE_SHIFT) {
105		int oldpid = get_rasid_register();
106
107		set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
108		start &= PAGE_MASK;
109		if (vma->vm_flags & VM_EXEC)
110			while(start < end) {
111				invalidate_itlb_mapping(start);
112				invalidate_dtlb_mapping(start);
113				start += PAGE_SIZE;
114			}
115		else
116			while(start < end) {
117				invalidate_dtlb_mapping(start);
118				start += PAGE_SIZE;
119			}
120
121		set_rasid_register(oldpid);
122	} else {
123		local_flush_tlb_mm(mm);
124	}
125	local_irq_restore(flags);
126}
127
128void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
129{
130	int cpu = smp_processor_id();
131	struct mm_struct* mm = vma->vm_mm;
132	unsigned long flags;
133	int oldpid;
134
135	if (mm->context.asid[cpu] == NO_CONTEXT)
136		return;
137
138	local_irq_save(flags);
139
140	oldpid = get_rasid_register();
141	set_rasid_register(ASID_INSERT(mm->context.asid[cpu]));
142
143	if (vma->vm_flags & VM_EXEC)
144		invalidate_itlb_mapping(page);
145	invalidate_dtlb_mapping(page);
146
147	set_rasid_register(oldpid);
148
149	local_irq_restore(flags);
150}
151
152void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
153{
154	if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
155	    end - start < _TLB_ENTRIES << PAGE_SHIFT) {
156		start &= PAGE_MASK;
157		while (start < end) {
158			invalidate_itlb_mapping(start);
159			invalidate_dtlb_mapping(start);
160			start += PAGE_SIZE;
161		}
162	} else {
163		local_flush_tlb_all();
164	}
165}
166
 
 
 
 
 
 
167#ifdef CONFIG_DEBUG_TLB_SANITY
168
169static unsigned get_pte_for_vaddr(unsigned vaddr)
170{
171	struct task_struct *task = get_current();
172	struct mm_struct *mm = task->mm;
173	pgd_t *pgd;
 
 
174	pmd_t *pmd;
175	pte_t *pte;
 
176
177	if (!mm)
178		mm = task->active_mm;
179	pgd = pgd_offset(mm, vaddr);
180	if (pgd_none_or_clear_bad(pgd))
181		return 0;
182	pmd = pmd_offset(pgd, vaddr);
 
 
 
 
 
 
183	if (pmd_none_or_clear_bad(pmd))
184		return 0;
185	pte = pte_offset_map(pmd, vaddr);
186	if (!pte)
187		return 0;
188	return pte_val(*pte);
 
 
189}
190
191enum {
192	TLB_SUSPICIOUS	= 1,
193	TLB_INSANE	= 2,
194};
195
196static void tlb_insane(void)
197{
198	BUG_ON(1);
199}
200
201static void tlb_suspicious(void)
202{
203	WARN_ON(1);
204}
205
206/*
207 * Check that TLB entries with kernel ASID (1) have kernel VMA (>= TASK_SIZE),
208 * and TLB entries with user ASID (>=4) have VMA < TASK_SIZE.
209 *
210 * Check that valid TLB entries either have the same PA as the PTE, or PTE is
211 * marked as non-present. Non-present PTE and the page with non-zero refcount
212 * and zero mapcount is normal for batched TLB flush operation. Zero refcount
213 * means that the page was freed prematurely. Non-zero mapcount is unusual,
214 * but does not necessary means an error, thus marked as suspicious.
215 */
216static int check_tlb_entry(unsigned w, unsigned e, bool dtlb)
217{
218	unsigned tlbidx = w | (e << PAGE_SHIFT);
219	unsigned r0 = dtlb ?
220		read_dtlb_virtual(tlbidx) : read_itlb_virtual(tlbidx);
 
 
221	unsigned vpn = (r0 & PAGE_MASK) | (e << PAGE_SHIFT);
222	unsigned pte = get_pte_for_vaddr(vpn);
223	unsigned mm_asid = (get_rasid_register() >> 8) & ASID_MASK;
224	unsigned tlb_asid = r0 & ASID_MASK;
225	bool kernel = tlb_asid == 1;
226	int rc = 0;
227
228	if (tlb_asid > 0 && ((vpn < TASK_SIZE) == kernel)) {
229		pr_err("%cTLB: way: %u, entry: %u, VPN %08x in %s PTE\n",
230				dtlb ? 'D' : 'I', w, e, vpn,
231				kernel ? "kernel" : "user");
232		rc |= TLB_INSANE;
233	}
234
235	if (tlb_asid == mm_asid) {
236		unsigned r1 = dtlb ? read_dtlb_translation(tlbidx) :
237			read_itlb_translation(tlbidx);
238		if ((pte ^ r1) & PAGE_MASK) {
239			pr_err("%cTLB: way: %u, entry: %u, mapping: %08x->%08x, PTE: %08x\n",
240					dtlb ? 'D' : 'I', w, e, r0, r1, pte);
241			if (pte == 0 || !pte_present(__pte(pte))) {
242				struct page *p = pfn_to_page(r1 >> PAGE_SHIFT);
243				pr_err("page refcount: %d, mapcount: %d\n",
244						page_count(p),
245						page_mapcount(p));
246				if (!page_count(p))
247					rc |= TLB_INSANE;
248				else if (page_mapcount(p))
249					rc |= TLB_SUSPICIOUS;
250			} else {
251				rc |= TLB_INSANE;
252			}
253		}
254	}
255	return rc;
256}
257
258void check_tlb_sanity(void)
259{
260	unsigned long flags;
261	unsigned w, e;
262	int bug = 0;
263
264	local_irq_save(flags);
265	for (w = 0; w < DTLB_ARF_WAYS; ++w)
266		for (e = 0; e < (1 << XCHAL_DTLB_ARF_ENTRIES_LOG2); ++e)
267			bug |= check_tlb_entry(w, e, true);
268	for (w = 0; w < ITLB_ARF_WAYS; ++w)
269		for (e = 0; e < (1 << XCHAL_ITLB_ARF_ENTRIES_LOG2); ++e)
270			bug |= check_tlb_entry(w, e, false);
271	if (bug & TLB_INSANE)
272		tlb_insane();
273	if (bug & TLB_SUSPICIOUS)
274		tlb_suspicious();
275	local_irq_restore(flags);
276}
277
278#endif /* CONFIG_DEBUG_TLB_SANITY */