Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Dump R4x00 TLB for debugging purposes.
  4 *
  5 * Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle.
  6 * Copyright (C) 1999 by Silicon Graphics, Inc.
  7 */
  8#include <linux/kernel.h>
  9#include <linux/mm.h>
 10
 11#include <asm/hazards.h>
 12#include <asm/mipsregs.h>
 
 13#include <asm/page.h>
 14#include <asm/pgtable.h>
 15#include <asm/tlbdebug.h>
 16
 17void dump_tlb_regs(void)
 18{
 19	const int field = 2 * sizeof(unsigned long);
 20
 21	pr_info("Index    : %0x\n", read_c0_index());
 22	pr_info("PageMask : %0x\n", read_c0_pagemask());
 23	if (cpu_has_guestid)
 24		pr_info("GuestCtl1: %0x\n", read_c0_guestctl1());
 25	pr_info("EntryHi  : %0*lx\n", field, read_c0_entryhi());
 26	pr_info("EntryLo0 : %0*lx\n", field, read_c0_entrylo0());
 27	pr_info("EntryLo1 : %0*lx\n", field, read_c0_entrylo1());
 28	pr_info("Wired    : %0x\n", read_c0_wired());
 29	switch (current_cpu_type()) {
 30	case CPU_R10000:
 31	case CPU_R12000:
 32	case CPU_R14000:
 33	case CPU_R16000:
 34		pr_info("FrameMask: %0x\n", read_c0_framemask());
 35		break;
 36	}
 37	if (cpu_has_small_pages || cpu_has_rixi || cpu_has_xpa)
 38		pr_info("PageGrain: %0x\n", read_c0_pagegrain());
 39	if (cpu_has_htw) {
 40		pr_info("PWField  : %0*lx\n", field, read_c0_pwfield());
 41		pr_info("PWSize   : %0*lx\n", field, read_c0_pwsize());
 42		pr_info("PWCtl    : %0x\n", read_c0_pwctl());
 43	}
 44}
 45
 46static inline const char *msk2str(unsigned int mask)
 47{
 48	switch (mask) {
 49	case PM_4K:	return "4kb";
 50	case PM_16K:	return "16kb";
 51	case PM_64K:	return "64kb";
 52	case PM_256K:	return "256kb";
 53#ifdef CONFIG_CPU_CAVIUM_OCTEON
 54	case PM_8K:	return "8kb";
 55	case PM_32K:	return "32kb";
 56	case PM_128K:	return "128kb";
 57	case PM_512K:	return "512kb";
 58	case PM_2M:	return "2Mb";
 59	case PM_8M:	return "8Mb";
 60	case PM_32M:	return "32Mb";
 61#endif
 62#ifndef CONFIG_CPU_VR41XX
 63	case PM_1M:	return "1Mb";
 64	case PM_4M:	return "4Mb";
 65	case PM_16M:	return "16Mb";
 66	case PM_64M:	return "64Mb";
 67	case PM_256M:	return "256Mb";
 68	case PM_1G:	return "1Gb";
 69#endif
 70	}
 71	return "";
 72}
 73
 74static void dump_tlb(int first, int last)
 75{
 76	unsigned long s_entryhi, entryhi, asid;
 77	unsigned long long entrylo0, entrylo1, pa;
 78	unsigned int s_index, s_pagemask, s_guestctl1 = 0;
 79	unsigned int pagemask, guestctl1 = 0, c0, c1, i;
 80	unsigned long asidmask = cpu_asid_mask(&current_cpu_data);
 81	int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
 
 82#ifdef CONFIG_32BIT
 83	bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
 84	int pwidth = xpa ? 11 : 8;
 85	int vwidth = 8;
 86#else
 87	bool xpa = false;
 88	int pwidth = 11;
 89	int vwidth = 11;
 90#endif
 91
 92	s_pagemask = read_c0_pagemask();
 93	s_entryhi = read_c0_entryhi();
 94	s_index = read_c0_index();
 95	asid = s_entryhi & asidmask;
 
 
 
 
 
 96	if (cpu_has_guestid)
 97		s_guestctl1 = read_c0_guestctl1();
 98
 99	for (i = first; i <= last; i++) {
100		write_c0_index(i);
101		mtc0_tlbr_hazard();
102		tlb_read();
103		tlb_read_hazard();
104		pagemask = read_c0_pagemask();
105		entryhi	 = read_c0_entryhi();
106		entrylo0 = read_c0_entrylo0();
107		entrylo1 = read_c0_entrylo1();
 
 
 
 
 
 
108		if (cpu_has_guestid)
109			guestctl1 = read_c0_guestctl1();
110
111		/* EHINV bit marks entire entry as invalid */
112		if (cpu_has_tlbinv && entryhi & MIPS_ENTRYHI_EHINV)
113			continue;
114		/*
115		 * Prior to tlbinv, unused entries have a virtual address of
116		 * CKSEG0.
117		 */
118		if ((entryhi & ~0x1ffffUL) == CKSEG0)
119			continue;
120		/*
121		 * ASID takes effect in absence of G (global) bit.
122		 * We check both G bits, even though architecturally they should
123		 * match one another, because some revisions of the SB1 core may
124		 * leave only a single G bit set after a machine check exception
125		 * due to duplicate TLB entry.
126		 */
127		if (!((entrylo0 | entrylo1) & ENTRYLO_G) &&
128		    (entryhi & asidmask) != asid)
129			continue;
130
131		/*
132		 * Only print entries in use
133		 */
134		printk("Index: %2d pgmask=%s ", i, msk2str(pagemask));
135
136		c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
137		c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
138
139		pr_cont("va=%0*lx asid=%0*lx",
140			vwidth, (entryhi & ~0x1fffUL),
141			asidwidth, entryhi & asidmask);
142		if (cpu_has_guestid)
143			pr_cont(" gid=%02lx",
144				(guestctl1 & MIPS_GCTL1_RID)
145					>> MIPS_GCTL1_RID_SHIFT);
146		/* RI/XI are in awkward places, so mask them off separately */
147		pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
148		if (xpa)
149			pa |= (unsigned long long)readx_c0_entrylo0() << 30;
150		pa = (pa << 6) & PAGE_MASK;
151		pr_cont("\n\t[");
152		if (cpu_has_rixi)
153			pr_cont("ri=%d xi=%d ",
154				(entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
155				(entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
156		pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d] [",
157			pwidth, pa, c0,
158			(entrylo0 & ENTRYLO_D) ? 1 : 0,
159			(entrylo0 & ENTRYLO_V) ? 1 : 0,
160			(entrylo0 & ENTRYLO_G) ? 1 : 0);
161		/* RI/XI are in awkward places, so mask them off separately */
162		pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
163		if (xpa)
164			pa |= (unsigned long long)readx_c0_entrylo1() << 30;
165		pa = (pa << 6) & PAGE_MASK;
166		if (cpu_has_rixi)
167			pr_cont("ri=%d xi=%d ",
168				(entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
169				(entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
170		pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
171			pwidth, pa, c1,
172			(entrylo1 & ENTRYLO_D) ? 1 : 0,
173			(entrylo1 & ENTRYLO_V) ? 1 : 0,
174			(entrylo1 & ENTRYLO_G) ? 1 : 0);
175	}
176	printk("\n");
177
178	write_c0_entryhi(s_entryhi);
179	write_c0_index(s_index);
180	write_c0_pagemask(s_pagemask);
181	if (cpu_has_guestid)
182		write_c0_guestctl1(s_guestctl1);
183}
184
185void dump_tlb_all(void)
186{
187	dump_tlb(0, current_cpu_data.tlbsize - 1);
188}
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Dump R4x00 TLB for debugging purposes.
  4 *
  5 * Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle.
  6 * Copyright (C) 1999 by Silicon Graphics, Inc.
  7 */
  8#include <linux/kernel.h>
  9#include <linux/mm.h>
 10
 11#include <asm/hazards.h>
 12#include <asm/mipsregs.h>
 13#include <asm/mmu_context.h>
 14#include <asm/page.h>
 15#include <asm/pgtable.h>
 16#include <asm/tlbdebug.h>
 17
 18void dump_tlb_regs(void)
 19{
 20	const int field = 2 * sizeof(unsigned long);
 21
 22	pr_info("Index    : %0x\n", read_c0_index());
 23	pr_info("PageMask : %0x\n", read_c0_pagemask());
 24	if (cpu_has_guestid)
 25		pr_info("GuestCtl1: %0x\n", read_c0_guestctl1());
 26	pr_info("EntryHi  : %0*lx\n", field, read_c0_entryhi());
 27	pr_info("EntryLo0 : %0*lx\n", field, read_c0_entrylo0());
 28	pr_info("EntryLo1 : %0*lx\n", field, read_c0_entrylo1());
 29	pr_info("Wired    : %0x\n", read_c0_wired());
 30	switch (current_cpu_type()) {
 31	case CPU_R10000:
 32	case CPU_R12000:
 33	case CPU_R14000:
 34	case CPU_R16000:
 35		pr_info("FrameMask: %0x\n", read_c0_framemask());
 36		break;
 37	}
 38	if (cpu_has_small_pages || cpu_has_rixi || cpu_has_xpa)
 39		pr_info("PageGrain: %0x\n", read_c0_pagegrain());
 40	if (cpu_has_htw) {
 41		pr_info("PWField  : %0*lx\n", field, read_c0_pwfield());
 42		pr_info("PWSize   : %0*lx\n", field, read_c0_pwsize());
 43		pr_info("PWCtl    : %0x\n", read_c0_pwctl());
 44	}
 45}
 46
 47static inline const char *msk2str(unsigned int mask)
 48{
 49	switch (mask) {
 50	case PM_4K:	return "4kb";
 51	case PM_16K:	return "16kb";
 52	case PM_64K:	return "64kb";
 53	case PM_256K:	return "256kb";
 54#ifdef CONFIG_CPU_CAVIUM_OCTEON
 55	case PM_8K:	return "8kb";
 56	case PM_32K:	return "32kb";
 57	case PM_128K:	return "128kb";
 58	case PM_512K:	return "512kb";
 59	case PM_2M:	return "2Mb";
 60	case PM_8M:	return "8Mb";
 61	case PM_32M:	return "32Mb";
 62#endif
 63#ifndef CONFIG_CPU_VR41XX
 64	case PM_1M:	return "1Mb";
 65	case PM_4M:	return "4Mb";
 66	case PM_16M:	return "16Mb";
 67	case PM_64M:	return "64Mb";
 68	case PM_256M:	return "256Mb";
 69	case PM_1G:	return "1Gb";
 70#endif
 71	}
 72	return "";
 73}
 74
 75static void dump_tlb(int first, int last)
 76{
 77	unsigned long s_entryhi, entryhi, asid, mmid;
 78	unsigned long long entrylo0, entrylo1, pa;
 79	unsigned int s_index, s_pagemask, s_guestctl1 = 0;
 80	unsigned int pagemask, guestctl1 = 0, c0, c1, i;
 81	unsigned long asidmask = cpu_asid_mask(&current_cpu_data);
 82	int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
 83	unsigned long uninitialized_var(s_mmid);
 84#ifdef CONFIG_32BIT
 85	bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
 86	int pwidth = xpa ? 11 : 8;
 87	int vwidth = 8;
 88#else
 89	bool xpa = false;
 90	int pwidth = 11;
 91	int vwidth = 11;
 92#endif
 93
 94	s_pagemask = read_c0_pagemask();
 95	s_entryhi = read_c0_entryhi();
 96	s_index = read_c0_index();
 97
 98	if (cpu_has_mmid)
 99		asid = s_mmid = read_c0_memorymapid();
100	else
101		asid = s_entryhi & asidmask;
102
103	if (cpu_has_guestid)
104		s_guestctl1 = read_c0_guestctl1();
105
106	for (i = first; i <= last; i++) {
107		write_c0_index(i);
108		mtc0_tlbr_hazard();
109		tlb_read();
110		tlb_read_hazard();
111		pagemask = read_c0_pagemask();
112		entryhi	 = read_c0_entryhi();
113		entrylo0 = read_c0_entrylo0();
114		entrylo1 = read_c0_entrylo1();
115
116		if (cpu_has_mmid)
117			mmid = read_c0_memorymapid();
118		else
119			mmid = entryhi & asidmask;
120
121		if (cpu_has_guestid)
122			guestctl1 = read_c0_guestctl1();
123
124		/* EHINV bit marks entire entry as invalid */
125		if (cpu_has_tlbinv && entryhi & MIPS_ENTRYHI_EHINV)
126			continue;
127		/*
128		 * Prior to tlbinv, unused entries have a virtual address of
129		 * CKSEG0.
130		 */
131		if ((entryhi & ~0x1ffffUL) == CKSEG0)
132			continue;
133		/*
134		 * ASID takes effect in absence of G (global) bit.
135		 * We check both G bits, even though architecturally they should
136		 * match one another, because some revisions of the SB1 core may
137		 * leave only a single G bit set after a machine check exception
138		 * due to duplicate TLB entry.
139		 */
140		if (!((entrylo0 | entrylo1) & ENTRYLO_G) && (mmid != asid))
 
141			continue;
142
143		/*
144		 * Only print entries in use
145		 */
146		printk("Index: %2d pgmask=%s ", i, msk2str(pagemask));
147
148		c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
149		c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
150
151		pr_cont("va=%0*lx asid=%0*lx",
152			vwidth, (entryhi & ~0x1fffUL),
153			asidwidth, mmid);
154		if (cpu_has_guestid)
155			pr_cont(" gid=%02lx",
156				(guestctl1 & MIPS_GCTL1_RID)
157					>> MIPS_GCTL1_RID_SHIFT);
158		/* RI/XI are in awkward places, so mask them off separately */
159		pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
160		if (xpa)
161			pa |= (unsigned long long)readx_c0_entrylo0() << 30;
162		pa = (pa << 6) & PAGE_MASK;
163		pr_cont("\n\t[");
164		if (cpu_has_rixi)
165			pr_cont("ri=%d xi=%d ",
166				(entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
167				(entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
168		pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d] [",
169			pwidth, pa, c0,
170			(entrylo0 & ENTRYLO_D) ? 1 : 0,
171			(entrylo0 & ENTRYLO_V) ? 1 : 0,
172			(entrylo0 & ENTRYLO_G) ? 1 : 0);
173		/* RI/XI are in awkward places, so mask them off separately */
174		pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
175		if (xpa)
176			pa |= (unsigned long long)readx_c0_entrylo1() << 30;
177		pa = (pa << 6) & PAGE_MASK;
178		if (cpu_has_rixi)
179			pr_cont("ri=%d xi=%d ",
180				(entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
181				(entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
182		pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
183			pwidth, pa, c1,
184			(entrylo1 & ENTRYLO_D) ? 1 : 0,
185			(entrylo1 & ENTRYLO_V) ? 1 : 0,
186			(entrylo1 & ENTRYLO_G) ? 1 : 0);
187	}
188	printk("\n");
189
190	write_c0_entryhi(s_entryhi);
191	write_c0_index(s_index);
192	write_c0_pagemask(s_pagemask);
193	if (cpu_has_guestid)
194		write_c0_guestctl1(s_guestctl1);
195}
196
197void dump_tlb_all(void)
198{
199	dump_tlb(0, current_cpu_data.tlbsize - 1);
200}