Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/kernel.h>
  3#include <linux/mmzone.h>
  4#include <linux/nodemask.h>
  5#include <linux/spinlock.h>
  6#include <linux/smp.h>
  7#include <linux/atomic.h>
  8#include <asm/sn/types.h>
  9#include <asm/sn/addrs.h>
 10#include <asm/sn/nmi.h>
 11#include <asm/sn/arch.h>
 12#include <asm/sn/agent.h>
 13
 14#include "ip27-common.h"
 15
 16#if 0
 17#define NODE_NUM_CPUS(n)	CNODE_NUM_CPUS(n)
 18#else
 19#define NODE_NUM_CPUS(n)	CPUS_PER_NODE
 20#endif
 21
 22#define SEND_NMI(_nasid, _slice)	\
 23	REMOTE_HUB_S((_nasid),  (PI_NMI_A + ((_slice) * PI_NMI_OFFSET)), 1)
 24
 25typedef unsigned long machreg_t;
 26
 27static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 28static void nmi_dump(void);
 
 
 
 
 
 
 
 
 
 29
 30void install_cpu_nmi_handler(int slice)
 31{
 32	nmi_t *nmi_addr;
 33
 34	nmi_addr = (nmi_t *)NMI_ADDR(get_nasid(), slice);
 35	if (nmi_addr->call_addr)
 36		return;
 37	nmi_addr->magic = NMI_MAGIC;
 38	nmi_addr->call_addr = (void *)nmi_dump;
 39	nmi_addr->call_addr_c =
 40		(void *)(~((unsigned long)(nmi_addr->call_addr)));
 41	nmi_addr->call_parm = 0;
 42}
 43
 44/*
 45 * Copy the cpu registers which have been saved in the IP27prom format
 46 * into the eframe format for the node under consideration.
 47 */
 48
 49static void nmi_cpu_eframe_save(nasid_t nasid, int slice)
 50{
 51	struct reg_struct *nr;
 52	int		i;
 53
 54	/* Get the pointer to the current cpu's register set. */
 55	nr = (struct reg_struct *)
 56		(TO_UNCAC(TO_NODE(nasid, IP27_NMI_KREGS_OFFSET)) +
 57		slice * IP27_NMI_KREGS_CPU_SIZE);
 58
 59	pr_emerg("NMI nasid %d: slice %d\n", nasid, slice);
 60
 61	/*
 62	 * Saved main processor registers
 63	 */
 64	for (i = 0; i < 32; ) {
 65		if ((i % 4) == 0)
 66			pr_emerg("$%2d   :", i);
 67		pr_cont(" %016lx", nr->gpr[i]);
 68
 69		i++;
 70		if ((i % 4) == 0)
 71			pr_cont("\n");
 72	}
 73
 74	pr_emerg("Hi    : (value lost)\n");
 75	pr_emerg("Lo    : (value lost)\n");
 76
 77	/*
 78	 * Saved cp0 registers
 79	 */
 80	pr_emerg("epc   : %016lx %pS\n", nr->epc, (void *)nr->epc);
 81	pr_emerg("%s\n", print_tainted());
 82	pr_emerg("ErrEPC: %016lx %pS\n", nr->error_epc, (void *)nr->error_epc);
 83	pr_emerg("ra    : %016lx %pS\n", nr->gpr[31], (void *)nr->gpr[31]);
 84	pr_emerg("Status: %08lx	      ", nr->sr);
 85
 86	if (nr->sr & ST0_KX)
 87		pr_cont("KX ");
 88	if (nr->sr & ST0_SX)
 89		pr_cont("SX ");
 90	if (nr->sr & ST0_UX)
 91		pr_cont("UX ");
 92
 93	switch (nr->sr & ST0_KSU) {
 94	case KSU_USER:
 95		pr_cont("USER ");
 96		break;
 97	case KSU_SUPERVISOR:
 98		pr_cont("SUPERVISOR ");
 99		break;
100	case KSU_KERNEL:
101		pr_cont("KERNEL ");
102		break;
103	default:
104		pr_cont("BAD_MODE ");
105		break;
106	}
107
108	if (nr->sr & ST0_ERL)
109		pr_cont("ERL ");
110	if (nr->sr & ST0_EXL)
111		pr_cont("EXL ");
112	if (nr->sr & ST0_IE)
113		pr_cont("IE ");
114	pr_cont("\n");
115
116	pr_emerg("Cause : %08lx\n", nr->cause);
117	pr_emerg("PrId  : %08x\n", read_c0_prid());
118	pr_emerg("BadVA : %016lx\n", nr->badva);
119	pr_emerg("CErr  : %016lx\n", nr->cache_err);
120	pr_emerg("NMI_SR: %016lx\n", nr->nmi_sr);
121
122	pr_emerg("\n");
123}
124
125static void nmi_dump_hub_irq(nasid_t nasid, int slice)
126{
127	u64 mask0, mask1, pend0, pend1;
128
129	if (slice == 0) {				/* Slice A */
130		mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_A);
131		mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_A);
132	} else {					/* Slice B */
133		mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_B);
134		mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_B);
135	}
136
137	pend0 = REMOTE_HUB_L(nasid, PI_INT_PEND0);
138	pend1 = REMOTE_HUB_L(nasid, PI_INT_PEND1);
139
140	pr_emerg("PI_INT_MASK0: %16llx PI_INT_MASK1: %16llx\n", mask0, mask1);
141	pr_emerg("PI_INT_PEND0: %16llx PI_INT_PEND1: %16llx\n", pend0, pend1);
142	pr_emerg("\n\n");
143}
144
145/*
146 * Copy the cpu registers which have been saved in the IP27prom format
147 * into the eframe format for the node under consideration.
148 */
149static void nmi_node_eframe_save(nasid_t nasid)
150{
151	int slice;
152
153	if (nasid == INVALID_NASID)
154		return;
155
156	/* Save the registers into eframe for each cpu */
157	for (slice = 0; slice < NODE_NUM_CPUS(slice); slice++) {
158		nmi_cpu_eframe_save(nasid, slice);
159		nmi_dump_hub_irq(nasid, slice);
160	}
161}
162
163/*
164 * Save the nmi cpu registers for all cpus in the system.
165 */
166static void nmi_eframes_save(void)
 
167{
168	nasid_t nasid;
169
170	for_each_online_node(nasid)
171		nmi_node_eframe_save(nasid);
172}
173
174static void nmi_dump(void)
 
175{
176#ifndef REAL_NMI_SIGNAL
177	static atomic_t nmied_cpus = ATOMIC_INIT(0);
178
179	atomic_inc(&nmied_cpus);
180#endif
181	/*
182	 * Only allow 1 cpu to proceed
183	 */
184	arch_spin_lock(&nmi_lock);
185
186#ifdef REAL_NMI_SIGNAL
187	/*
188	 * Wait up to 15 seconds for the other cpus to respond to the NMI.
189	 * If a cpu has not responded after 10 sec, send it 1 additional NMI.
190	 * This is for 2 reasons:
191	 *	- sometimes a MMSC fail to NMI all cpus.
192	 *	- on 512p SN0 system, the MMSC will only send NMIs to
193	 *	  half the cpus. Unfortunately, we don't know which cpus may be
194	 *	  NMIed - it depends on how the site chooses to configure.
195	 *
196	 * Note: it has been measure that it takes the MMSC up to 2.3 secs to
197	 * send NMIs to all cpus on a 256p system.
198	 */
199	for (i=0; i < 1500; i++) {
200		for_each_online_node(node)
201			if (NODEPDA(node)->dump_count == 0)
202				break;
203		if (node == MAX_NUMNODES)
204			break;
205		if (i == 1000) {
206			for_each_online_node(node)
207				if (NODEPDA(node)->dump_count == 0) {
208					cpu = cpumask_first(cpumask_of_node(node));
209					for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) {
210						CPUMASK_SETB(nmied_cpus, cpu);
211						/*
212						 * cputonasid, cputoslice
213						 * needs kernel cpuid
214						 */
215						SEND_NMI((cputonasid(cpu)), (cputoslice(cpu)));
216					}
217				}
218
219		}
220		udelay(10000);
221	}
222#else
223	while (atomic_read(&nmied_cpus) != num_online_cpus());
224#endif
225
226	/*
227	 * Save the nmi cpu registers for all cpu in the eframe format.
228	 */
229	nmi_eframes_save();
230	LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
231}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/kernel.h>
  3#include <linux/mmzone.h>
  4#include <linux/nodemask.h>
  5#include <linux/spinlock.h>
  6#include <linux/smp.h>
  7#include <linux/atomic.h>
  8#include <asm/sn/types.h>
  9#include <asm/sn/addrs.h>
 10#include <asm/sn/nmi.h>
 11#include <asm/sn/arch.h>
 12#include <asm/sn/agent.h>
 13
 
 
 14#if 0
 15#define NODE_NUM_CPUS(n)	CNODE_NUM_CPUS(n)
 16#else
 17#define NODE_NUM_CPUS(n)	CPUS_PER_NODE
 18#endif
 19
 20#define SEND_NMI(_nasid, _slice)	\
 21	REMOTE_HUB_S((_nasid),  (PI_NMI_A + ((_slice) * PI_NMI_OFFSET)), 1)
 22
 23typedef unsigned long machreg_t;
 24
 25static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 26
 27/*
 28 * Let's see what else we need to do here. Set up sp, gp?
 29 */
 30void nmi_dump(void)
 31{
 32	void cont_nmi_dump(void);
 33
 34	cont_nmi_dump();
 35}
 36
 37void install_cpu_nmi_handler(int slice)
 38{
 39	nmi_t *nmi_addr;
 40
 41	nmi_addr = (nmi_t *)NMI_ADDR(get_nasid(), slice);
 42	if (nmi_addr->call_addr)
 43		return;
 44	nmi_addr->magic = NMI_MAGIC;
 45	nmi_addr->call_addr = (void *)nmi_dump;
 46	nmi_addr->call_addr_c =
 47		(void *)(~((unsigned long)(nmi_addr->call_addr)));
 48	nmi_addr->call_parm = 0;
 49}
 50
 51/*
 52 * Copy the cpu registers which have been saved in the IP27prom format
 53 * into the eframe format for the node under consideration.
 54 */
 55
 56void nmi_cpu_eframe_save(nasid_t nasid, int slice)
 57{
 58	struct reg_struct *nr;
 59	int		i;
 60
 61	/* Get the pointer to the current cpu's register set. */
 62	nr = (struct reg_struct *)
 63		(TO_UNCAC(TO_NODE(nasid, IP27_NMI_KREGS_OFFSET)) +
 64		slice * IP27_NMI_KREGS_CPU_SIZE);
 65
 66	pr_emerg("NMI nasid %d: slice %d\n", nasid, slice);
 67
 68	/*
 69	 * Saved main processor registers
 70	 */
 71	for (i = 0; i < 32; ) {
 72		if ((i % 4) == 0)
 73			pr_emerg("$%2d   :", i);
 74		pr_cont(" %016lx", nr->gpr[i]);
 75
 76		i++;
 77		if ((i % 4) == 0)
 78			pr_cont("\n");
 79	}
 80
 81	pr_emerg("Hi    : (value lost)\n");
 82	pr_emerg("Lo    : (value lost)\n");
 83
 84	/*
 85	 * Saved cp0 registers
 86	 */
 87	pr_emerg("epc   : %016lx %pS\n", nr->epc, (void *)nr->epc);
 88	pr_emerg("%s\n", print_tainted());
 89	pr_emerg("ErrEPC: %016lx %pS\n", nr->error_epc, (void *)nr->error_epc);
 90	pr_emerg("ra    : %016lx %pS\n", nr->gpr[31], (void *)nr->gpr[31]);
 91	pr_emerg("Status: %08lx	      ", nr->sr);
 92
 93	if (nr->sr & ST0_KX)
 94		pr_cont("KX ");
 95	if (nr->sr & ST0_SX)
 96		pr_cont("SX ");
 97	if (nr->sr & ST0_UX)
 98		pr_cont("UX ");
 99
100	switch (nr->sr & ST0_KSU) {
101	case KSU_USER:
102		pr_cont("USER ");
103		break;
104	case KSU_SUPERVISOR:
105		pr_cont("SUPERVISOR ");
106		break;
107	case KSU_KERNEL:
108		pr_cont("KERNEL ");
109		break;
110	default:
111		pr_cont("BAD_MODE ");
112		break;
113	}
114
115	if (nr->sr & ST0_ERL)
116		pr_cont("ERL ");
117	if (nr->sr & ST0_EXL)
118		pr_cont("EXL ");
119	if (nr->sr & ST0_IE)
120		pr_cont("IE ");
121	pr_cont("\n");
122
123	pr_emerg("Cause : %08lx\n", nr->cause);
124	pr_emerg("PrId  : %08x\n", read_c0_prid());
125	pr_emerg("BadVA : %016lx\n", nr->badva);
126	pr_emerg("CErr  : %016lx\n", nr->cache_err);
127	pr_emerg("NMI_SR: %016lx\n", nr->nmi_sr);
128
129	pr_emerg("\n");
130}
131
132void nmi_dump_hub_irq(nasid_t nasid, int slice)
133{
134	u64 mask0, mask1, pend0, pend1;
135
136	if (slice == 0) {				/* Slice A */
137		mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_A);
138		mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_A);
139	} else {					/* Slice B */
140		mask0 = REMOTE_HUB_L(nasid, PI_INT_MASK0_B);
141		mask1 = REMOTE_HUB_L(nasid, PI_INT_MASK1_B);
142	}
143
144	pend0 = REMOTE_HUB_L(nasid, PI_INT_PEND0);
145	pend1 = REMOTE_HUB_L(nasid, PI_INT_PEND1);
146
147	pr_emerg("PI_INT_MASK0: %16llx PI_INT_MASK1: %16llx\n", mask0, mask1);
148	pr_emerg("PI_INT_PEND0: %16llx PI_INT_PEND1: %16llx\n", pend0, pend1);
149	pr_emerg("\n\n");
150}
151
152/*
153 * Copy the cpu registers which have been saved in the IP27prom format
154 * into the eframe format for the node under consideration.
155 */
156void nmi_node_eframe_save(nasid_t nasid)
157{
158	int slice;
159
160	if (nasid == INVALID_NASID)
161		return;
162
163	/* Save the registers into eframe for each cpu */
164	for (slice = 0; slice < NODE_NUM_CPUS(slice); slice++) {
165		nmi_cpu_eframe_save(nasid, slice);
166		nmi_dump_hub_irq(nasid, slice);
167	}
168}
169
170/*
171 * Save the nmi cpu registers for all cpus in the system.
172 */
173void
174nmi_eframes_save(void)
175{
176	nasid_t nasid;
177
178	for_each_online_node(nasid)
179		nmi_node_eframe_save(nasid);
180}
181
182void
183cont_nmi_dump(void)
184{
185#ifndef REAL_NMI_SIGNAL
186	static atomic_t nmied_cpus = ATOMIC_INIT(0);
187
188	atomic_inc(&nmied_cpus);
189#endif
190	/*
191	 * Only allow 1 cpu to proceed
192	 */
193	arch_spin_lock(&nmi_lock);
194
195#ifdef REAL_NMI_SIGNAL
196	/*
197	 * Wait up to 15 seconds for the other cpus to respond to the NMI.
198	 * If a cpu has not responded after 10 sec, send it 1 additional NMI.
199	 * This is for 2 reasons:
200	 *	- sometimes a MMSC fail to NMI all cpus.
201	 *	- on 512p SN0 system, the MMSC will only send NMIs to
202	 *	  half the cpus. Unfortunately, we don't know which cpus may be
203	 *	  NMIed - it depends on how the site chooses to configure.
204	 *
205	 * Note: it has been measure that it takes the MMSC up to 2.3 secs to
206	 * send NMIs to all cpus on a 256p system.
207	 */
208	for (i=0; i < 1500; i++) {
209		for_each_online_node(node)
210			if (NODEPDA(node)->dump_count == 0)
211				break;
212		if (node == MAX_NUMNODES)
213			break;
214		if (i == 1000) {
215			for_each_online_node(node)
216				if (NODEPDA(node)->dump_count == 0) {
217					cpu = cpumask_first(cpumask_of_node(node));
218					for (n=0; n < CNODE_NUM_CPUS(node); cpu++, n++) {
219						CPUMASK_SETB(nmied_cpus, cpu);
220						/*
221						 * cputonasid, cputoslice
222						 * needs kernel cpuid
223						 */
224						SEND_NMI((cputonasid(cpu)), (cputoslice(cpu)));
225					}
226				}
227
228		}
229		udelay(10000);
230	}
231#else
232	while (atomic_read(&nmied_cpus) != num_online_cpus());
233#endif
234
235	/*
236	 * Save the nmi cpu registers for all cpu in the eframe format.
237	 */
238	nmi_eframes_save();
239	LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET);
240}