Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Dump R4x00 TLB for debugging purposes.
  4 *
  5 * Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle.
  6 * Copyright (C) 1999 by Silicon Graphics, Inc.
  7 */
  8#include <linux/kernel.h>
  9#include <linux/mm.h>
 10
 11#include <asm/hazards.h>
 12#include <asm/mipsregs.h>
 13#include <asm/mmu_context.h>
 14#include <asm/page.h>
 15#include <asm/tlbdebug.h>
 16
 17void dump_tlb_regs(void)
 18{
 19	const int field = 2 * sizeof(unsigned long);
 20
 21	pr_info("Index    : %0x\n", read_c0_index());
 22	pr_info("PageMask : %0x\n", read_c0_pagemask());
 23	if (cpu_has_guestid)
 24		pr_info("GuestCtl1: %0x\n", read_c0_guestctl1());
 25	pr_info("EntryHi  : %0*lx\n", field, read_c0_entryhi());
 26	pr_info("EntryLo0 : %0*lx\n", field, read_c0_entrylo0());
 27	pr_info("EntryLo1 : %0*lx\n", field, read_c0_entrylo1());
 28	pr_info("Wired    : %0x\n", read_c0_wired());
 29	switch (current_cpu_type()) {
 30	case CPU_R10000:
 31	case CPU_R12000:
 32	case CPU_R14000:
 33	case CPU_R16000:
 34		pr_info("FrameMask: %0x\n", read_c0_framemask());
 35		break;
 36	}
 37	if (cpu_has_small_pages || cpu_has_rixi || cpu_has_xpa)
 38		pr_info("PageGrain: %0x\n", read_c0_pagegrain());
 39	if (cpu_has_htw) {
 40		pr_info("PWField  : %0*lx\n", field, read_c0_pwfield());
 41		pr_info("PWSize   : %0*lx\n", field, read_c0_pwsize());
 42		pr_info("PWCtl    : %0x\n", read_c0_pwctl());
 43	}
 44}
 45
 46static inline const char *msk2str(unsigned int mask)
 47{
 48	switch (mask) {
 49	case PM_4K:	return "4kb";
 50	case PM_16K:	return "16kb";
 51	case PM_64K:	return "64kb";
 52	case PM_256K:	return "256kb";
 53#ifdef CONFIG_CPU_CAVIUM_OCTEON
 54	case PM_8K:	return "8kb";
 55	case PM_32K:	return "32kb";
 56	case PM_128K:	return "128kb";
 57	case PM_512K:	return "512kb";
 58	case PM_2M:	return "2Mb";
 59	case PM_8M:	return "8Mb";
 60	case PM_32M:	return "32Mb";
 61#endif
 
 
 
 
 
 
 
 
 62	}
 63	return "";
 64}
 65
 66static void dump_tlb(int first, int last)
 67{
 68	unsigned long s_entryhi, entryhi, asid, mmid;
 69	unsigned long long entrylo0, entrylo1, pa;
 70	unsigned int s_index, s_pagemask, s_guestctl1 = 0;
 71	unsigned int pagemask, guestctl1 = 0, c0, c1, i;
 72	unsigned long asidmask = cpu_asid_mask(&current_cpu_data);
 73	int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
 74	unsigned long s_mmid;
 75#ifdef CONFIG_32BIT
 76	bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
 77	int pwidth = xpa ? 11 : 8;
 78	int vwidth = 8;
 79#else
 80	bool xpa = false;
 81	int pwidth = 11;
 82	int vwidth = 11;
 83#endif
 84
 85	s_pagemask = read_c0_pagemask();
 86	s_entryhi = read_c0_entryhi();
 87	s_index = read_c0_index();
 88
 89	if (cpu_has_mmid)
 90		asid = s_mmid = read_c0_memorymapid();
 91	else
 92		asid = s_entryhi & asidmask;
 93
 94	if (cpu_has_guestid)
 95		s_guestctl1 = read_c0_guestctl1();
 96
 97	for (i = first; i <= last; i++) {
 98		write_c0_index(i);
 99		mtc0_tlbr_hazard();
100		tlb_read();
101		tlb_read_hazard();
102		pagemask = read_c0_pagemask();
103		entryhi	 = read_c0_entryhi();
104		entrylo0 = read_c0_entrylo0();
105		entrylo1 = read_c0_entrylo1();
106
107		if (cpu_has_mmid)
108			mmid = read_c0_memorymapid();
109		else
110			mmid = entryhi & asidmask;
111
112		if (cpu_has_guestid)
113			guestctl1 = read_c0_guestctl1();
114
115		/* EHINV bit marks entire entry as invalid */
116		if (cpu_has_tlbinv && entryhi & MIPS_ENTRYHI_EHINV)
117			continue;
118		/*
119		 * Prior to tlbinv, unused entries have a virtual address of
120		 * CKSEG0.
121		 */
122		if ((entryhi & ~0x1ffffUL) == CKSEG0)
123			continue;
124		/*
125		 * ASID takes effect in absence of G (global) bit.
126		 * We check both G bits, even though architecturally they should
127		 * match one another, because some revisions of the SB1 core may
128		 * leave only a single G bit set after a machine check exception
129		 * due to duplicate TLB entry.
130		 */
131		if (!((entrylo0 | entrylo1) & ENTRYLO_G) && (mmid != asid))
132			continue;
133
134		/*
135		 * Only print entries in use
136		 */
137		printk("Index: %2d pgmask=%s ", i, msk2str(pagemask));
138
139		c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
140		c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
141
142		pr_cont("va=%0*lx asid=%0*lx",
143			vwidth, (entryhi & ~0x1fffUL),
144			asidwidth, mmid);
145		if (cpu_has_guestid)
146			pr_cont(" gid=%02lx",
147				(guestctl1 & MIPS_GCTL1_RID)
148					>> MIPS_GCTL1_RID_SHIFT);
149		/* RI/XI are in awkward places, so mask them off separately */
150		pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
151		if (xpa)
152			pa |= (unsigned long long)readx_c0_entrylo0() << 30;
153		pa = (pa << 6) & PAGE_MASK;
154		pr_cont("\n\t[");
155		if (cpu_has_rixi)
156			pr_cont("ri=%d xi=%d ",
157				(entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
158				(entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
159		pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d] [",
160			pwidth, pa, c0,
161			(entrylo0 & ENTRYLO_D) ? 1 : 0,
162			(entrylo0 & ENTRYLO_V) ? 1 : 0,
163			(entrylo0 & ENTRYLO_G) ? 1 : 0);
164		/* RI/XI are in awkward places, so mask them off separately */
165		pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
166		if (xpa)
167			pa |= (unsigned long long)readx_c0_entrylo1() << 30;
168		pa = (pa << 6) & PAGE_MASK;
169		if (cpu_has_rixi)
170			pr_cont("ri=%d xi=%d ",
171				(entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
172				(entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
173		pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
174			pwidth, pa, c1,
175			(entrylo1 & ENTRYLO_D) ? 1 : 0,
176			(entrylo1 & ENTRYLO_V) ? 1 : 0,
177			(entrylo1 & ENTRYLO_G) ? 1 : 0);
178	}
179	printk("\n");
180
181	write_c0_entryhi(s_entryhi);
182	write_c0_index(s_index);
183	write_c0_pagemask(s_pagemask);
184	if (cpu_has_guestid)
185		write_c0_guestctl1(s_guestctl1);
186}
187
188void dump_tlb_all(void)
189{
190	dump_tlb(0, current_cpu_data.tlbsize - 1);
191}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Dump R4x00 TLB for debugging purposes.
  4 *
  5 * Copyright (C) 1994, 1995 by Waldorf Electronics, written by Ralf Baechle.
  6 * Copyright (C) 1999 by Silicon Graphics, Inc.
  7 */
  8#include <linux/kernel.h>
  9#include <linux/mm.h>
 10
 11#include <asm/hazards.h>
 12#include <asm/mipsregs.h>
 13#include <asm/mmu_context.h>
 14#include <asm/page.h>
 15#include <asm/tlbdebug.h>
 16
 17void dump_tlb_regs(void)
 18{
 19	const int field = 2 * sizeof(unsigned long);
 20
 21	pr_info("Index    : %0x\n", read_c0_index());
 22	pr_info("PageMask : %0x\n", read_c0_pagemask());
 23	if (cpu_has_guestid)
 24		pr_info("GuestCtl1: %0x\n", read_c0_guestctl1());
 25	pr_info("EntryHi  : %0*lx\n", field, read_c0_entryhi());
 26	pr_info("EntryLo0 : %0*lx\n", field, read_c0_entrylo0());
 27	pr_info("EntryLo1 : %0*lx\n", field, read_c0_entrylo1());
 28	pr_info("Wired    : %0x\n", read_c0_wired());
 29	switch (current_cpu_type()) {
 30	case CPU_R10000:
 31	case CPU_R12000:
 32	case CPU_R14000:
 33	case CPU_R16000:
 34		pr_info("FrameMask: %0x\n", read_c0_framemask());
 35		break;
 36	}
 37	if (cpu_has_small_pages || cpu_has_rixi || cpu_has_xpa)
 38		pr_info("PageGrain: %0x\n", read_c0_pagegrain());
 39	if (cpu_has_htw) {
 40		pr_info("PWField  : %0*lx\n", field, read_c0_pwfield());
 41		pr_info("PWSize   : %0*lx\n", field, read_c0_pwsize());
 42		pr_info("PWCtl    : %0x\n", read_c0_pwctl());
 43	}
 44}
 45
 46static inline const char *msk2str(unsigned int mask)
 47{
 48	switch (mask) {
 49	case PM_4K:	return "4kb";
 50	case PM_16K:	return "16kb";
 51	case PM_64K:	return "64kb";
 52	case PM_256K:	return "256kb";
 53#ifdef CONFIG_CPU_CAVIUM_OCTEON
 54	case PM_8K:	return "8kb";
 55	case PM_32K:	return "32kb";
 56	case PM_128K:	return "128kb";
 57	case PM_512K:	return "512kb";
 58	case PM_2M:	return "2Mb";
 59	case PM_8M:	return "8Mb";
 60	case PM_32M:	return "32Mb";
 61#endif
 62#ifndef CONFIG_CPU_VR41XX
 63	case PM_1M:	return "1Mb";
 64	case PM_4M:	return "4Mb";
 65	case PM_16M:	return "16Mb";
 66	case PM_64M:	return "64Mb";
 67	case PM_256M:	return "256Mb";
 68	case PM_1G:	return "1Gb";
 69#endif
 70	}
 71	return "";
 72}
 73
 74static void dump_tlb(int first, int last)
 75{
 76	unsigned long s_entryhi, entryhi, asid, mmid;
 77	unsigned long long entrylo0, entrylo1, pa;
 78	unsigned int s_index, s_pagemask, s_guestctl1 = 0;
 79	unsigned int pagemask, guestctl1 = 0, c0, c1, i;
 80	unsigned long asidmask = cpu_asid_mask(&current_cpu_data);
 81	int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
 82	unsigned long s_mmid;
 83#ifdef CONFIG_32BIT
 84	bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
 85	int pwidth = xpa ? 11 : 8;
 86	int vwidth = 8;
 87#else
 88	bool xpa = false;
 89	int pwidth = 11;
 90	int vwidth = 11;
 91#endif
 92
 93	s_pagemask = read_c0_pagemask();
 94	s_entryhi = read_c0_entryhi();
 95	s_index = read_c0_index();
 96
 97	if (cpu_has_mmid)
 98		asid = s_mmid = read_c0_memorymapid();
 99	else
100		asid = s_entryhi & asidmask;
101
102	if (cpu_has_guestid)
103		s_guestctl1 = read_c0_guestctl1();
104
105	for (i = first; i <= last; i++) {
106		write_c0_index(i);
107		mtc0_tlbr_hazard();
108		tlb_read();
109		tlb_read_hazard();
110		pagemask = read_c0_pagemask();
111		entryhi	 = read_c0_entryhi();
112		entrylo0 = read_c0_entrylo0();
113		entrylo1 = read_c0_entrylo1();
114
115		if (cpu_has_mmid)
116			mmid = read_c0_memorymapid();
117		else
118			mmid = entryhi & asidmask;
119
120		if (cpu_has_guestid)
121			guestctl1 = read_c0_guestctl1();
122
123		/* EHINV bit marks entire entry as invalid */
124		if (cpu_has_tlbinv && entryhi & MIPS_ENTRYHI_EHINV)
125			continue;
126		/*
127		 * Prior to tlbinv, unused entries have a virtual address of
128		 * CKSEG0.
129		 */
130		if ((entryhi & ~0x1ffffUL) == CKSEG0)
131			continue;
132		/*
133		 * ASID takes effect in absence of G (global) bit.
134		 * We check both G bits, even though architecturally they should
135		 * match one another, because some revisions of the SB1 core may
136		 * leave only a single G bit set after a machine check exception
137		 * due to duplicate TLB entry.
138		 */
139		if (!((entrylo0 | entrylo1) & ENTRYLO_G) && (mmid != asid))
140			continue;
141
142		/*
143		 * Only print entries in use
144		 */
145		printk("Index: %2d pgmask=%s ", i, msk2str(pagemask));
146
147		c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
148		c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
149
150		pr_cont("va=%0*lx asid=%0*lx",
151			vwidth, (entryhi & ~0x1fffUL),
152			asidwidth, mmid);
153		if (cpu_has_guestid)
154			pr_cont(" gid=%02lx",
155				(guestctl1 & MIPS_GCTL1_RID)
156					>> MIPS_GCTL1_RID_SHIFT);
157		/* RI/XI are in awkward places, so mask them off separately */
158		pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
159		if (xpa)
160			pa |= (unsigned long long)readx_c0_entrylo0() << 30;
161		pa = (pa << 6) & PAGE_MASK;
162		pr_cont("\n\t[");
163		if (cpu_has_rixi)
164			pr_cont("ri=%d xi=%d ",
165				(entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
166				(entrylo0 & MIPS_ENTRYLO_XI) ? 1 : 0);
167		pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d] [",
168			pwidth, pa, c0,
169			(entrylo0 & ENTRYLO_D) ? 1 : 0,
170			(entrylo0 & ENTRYLO_V) ? 1 : 0,
171			(entrylo0 & ENTRYLO_G) ? 1 : 0);
172		/* RI/XI are in awkward places, so mask them off separately */
173		pa = entrylo1 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
174		if (xpa)
175			pa |= (unsigned long long)readx_c0_entrylo1() << 30;
176		pa = (pa << 6) & PAGE_MASK;
177		if (cpu_has_rixi)
178			pr_cont("ri=%d xi=%d ",
179				(entrylo1 & MIPS_ENTRYLO_RI) ? 1 : 0,
180				(entrylo1 & MIPS_ENTRYLO_XI) ? 1 : 0);
181		pr_cont("pa=%0*llx c=%d d=%d v=%d g=%d]\n",
182			pwidth, pa, c1,
183			(entrylo1 & ENTRYLO_D) ? 1 : 0,
184			(entrylo1 & ENTRYLO_V) ? 1 : 0,
185			(entrylo1 & ENTRYLO_G) ? 1 : 0);
186	}
187	printk("\n");
188
189	write_c0_entryhi(s_entryhi);
190	write_c0_index(s_index);
191	write_c0_pagemask(s_pagemask);
192	if (cpu_has_guestid)
193		write_c0_guestctl1(s_guestctl1);
194}
195
196void dump_tlb_all(void)
197{
198	dump_tlb(0, current_cpu_data.tlbsize - 1);
199}