Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * arch/ia64/kernel/crash.c
  3 *
  4 * Architecture specific (ia64) functions for kexec based crash dumps.
  5 *
  6 * Created by: Khalid Aziz <khalid.aziz@hp.com>
  7 * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  8 * Copyright (C) 2005 Intel Corp	Zou Nan hai <nanhai.zou@intel.com>
  9 *
 10 */
 11#include <linux/smp.h>
 12#include <linux/delay.h>
 13#include <linux/crash_dump.h>
 14#include <linux/bootmem.h>
 15#include <linux/kexec.h>
 16#include <linux/elfcore.h>
 17#include <linux/sysctl.h>
 18#include <linux/init.h>
 19#include <linux/kdebug.h>
 20
 21#include <asm/mca.h>
 22
 23int kdump_status[NR_CPUS];
 24static atomic_t kdump_cpu_frozen;
 25atomic_t kdump_in_progress;
 26static int kdump_freeze_monarch;
 27static int kdump_on_init = 1;
 28static int kdump_on_fatal_mca = 1;
 29
 30static inline Elf64_Word
 31*append_elf_note(Elf64_Word *buf, char *name, unsigned type, void *data,
 32		size_t data_len)
 33{
 34	struct elf_note *note = (struct elf_note *)buf;
 35	note->n_namesz = strlen(name) + 1;
 36	note->n_descsz = data_len;
 37	note->n_type   = type;
 38	buf += (sizeof(*note) + 3)/4;
 39	memcpy(buf, name, note->n_namesz);
 40	buf += (note->n_namesz + 3)/4;
 41	memcpy(buf, data, data_len);
 42	buf += (data_len + 3)/4;
 43	return buf;
 44}
 45
 46static void
 47final_note(void *buf)
 48{
 49	memset(buf, 0, sizeof(struct elf_note));
 50}
 51
 52extern void ia64_dump_cpu_regs(void *);
 53
 54static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus);
 55
 56void
 57crash_save_this_cpu(void)
 58{
 59	void *buf;
 60	unsigned long cfm, sof, sol;
 61
 62	int cpu = smp_processor_id();
 63	struct elf_prstatus *prstatus = &per_cpu(elf_prstatus, cpu);
 64
 65	elf_greg_t *dst = (elf_greg_t *)&(prstatus->pr_reg);
 66	memset(prstatus, 0, sizeof(*prstatus));
 67	prstatus->pr_pid = current->pid;
 68
 69	ia64_dump_cpu_regs(dst);
 70	cfm = dst[43];
 71	sol = (cfm >> 7) & 0x7f;
 72	sof = cfm & 0x7f;
 73	dst[46] = (unsigned long)ia64_rse_skip_regs((unsigned long *)dst[46],
 74			sof - sol);
 75
 76	buf = (u64 *) per_cpu_ptr(crash_notes, cpu);
 77	if (!buf)
 78		return;
 79	buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, prstatus,
 80			sizeof(*prstatus));
 81	final_note(buf);
 82}
 83
 84#ifdef CONFIG_SMP
 85static int
 86kdump_wait_cpu_freeze(void)
 87{
 88	int cpu_num = num_online_cpus() - 1;
 89	int timeout = 1000;
 90	while(timeout-- > 0) {
 91		if (atomic_read(&kdump_cpu_frozen) == cpu_num)
 92			return 0;
 93		udelay(1000);
 94	}
 95	return 1;
 96}
 97#endif
 98
 99void
100machine_crash_shutdown(struct pt_regs *pt)
101{
102	/* This function is only called after the system
103	 * has paniced or is otherwise in a critical state.
104	 * The minimum amount of code to allow a kexec'd kernel
105	 * to run successfully needs to happen here.
106	 *
107	 * In practice this means shooting down the other cpus in
108	 * an SMP system.
109	 */
110	kexec_disable_iosapic();
111#ifdef CONFIG_SMP
112	/*
113	 * If kdump_on_init is set and an INIT is asserted here, kdump will
114	 * be started again via INIT monarch.
115	 */
116	local_irq_disable();
117	ia64_set_psr_mc();	/* mask MCA/INIT */
118	if (atomic_inc_return(&kdump_in_progress) != 1)
119		unw_init_running(kdump_cpu_freeze, NULL);
120
121	/*
122	 * Now this cpu is ready for kdump.
123	 * Stop all others by IPI or INIT.  They could receive INIT from
124	 * outside and might be INIT monarch, but only thing they have to
125	 * do is falling into kdump_cpu_freeze().
126	 *
127	 * If an INIT is asserted here:
128	 * - All receivers might be slaves, since some of cpus could already
129	 *   be frozen and INIT might be masked on monarch.  In this case,
130	 *   all slaves will be frozen soon since kdump_in_progress will let
131	 *   them into DIE_INIT_SLAVE_LEAVE.
132	 * - One might be a monarch, but INIT rendezvous will fail since
133	 *   at least this cpu already have INIT masked so it never join
134	 *   to the rendezvous.  In this case, all slaves and monarch will
135	 *   be frozen soon with no wait since the INIT rendezvous is skipped
136	 *   by kdump_in_progress.
137	 */
138	kdump_smp_send_stop();
139	/* not all cpu response to IPI, send INIT to freeze them */
140	if (kdump_wait_cpu_freeze()) {
141		kdump_smp_send_init();
142		/* wait again, don't go ahead if possible */
143		kdump_wait_cpu_freeze();
144	}
145#endif
146}
147
148static void
149machine_kdump_on_init(void)
150{
151	crash_save_vmcoreinfo();
152	local_irq_disable();
153	kexec_disable_iosapic();
154	machine_kexec(ia64_kimage);
155}
156
157void
158kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
159{
160	int cpuid;
161
162	local_irq_disable();
163	cpuid = smp_processor_id();
164	crash_save_this_cpu();
165	current->thread.ksp = (__u64)info->sw - 16;
166
167	ia64_set_psr_mc();	/* mask MCA/INIT and stop reentrance */
168
169	atomic_inc(&kdump_cpu_frozen);
170	kdump_status[cpuid] = 1;
171	mb();
172	for (;;)
173		cpu_relax();
174}
175
176static int
177kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
178{
179	struct ia64_mca_notify_die *nd;
180	struct die_args *args = data;
181
182	if (atomic_read(&kdump_in_progress)) {
183		switch (val) {
184		case DIE_INIT_MONARCH_LEAVE:
185			if (!kdump_freeze_monarch)
186				break;
187			/* fall through */
188		case DIE_INIT_SLAVE_LEAVE:
189		case DIE_INIT_MONARCH_ENTER:
190		case DIE_MCA_RENDZVOUS_LEAVE:
191			unw_init_running(kdump_cpu_freeze, NULL);
192			break;
193		}
194	}
195
196	if (!kdump_on_init && !kdump_on_fatal_mca)
197		return NOTIFY_DONE;
198
199	if (!ia64_kimage) {
200		if (val == DIE_INIT_MONARCH_LEAVE)
201			ia64_mca_printk(KERN_NOTICE
202					"%s: kdump not configured\n",
203					__func__);
204		return NOTIFY_DONE;
205	}
206
207	if (val != DIE_INIT_MONARCH_LEAVE &&
208	    val != DIE_INIT_MONARCH_PROCESS &&
209	    val != DIE_MCA_MONARCH_LEAVE)
210		return NOTIFY_DONE;
211
212	nd = (struct ia64_mca_notify_die *)args->err;
213
214	switch (val) {
215	case DIE_INIT_MONARCH_PROCESS:
216		/* Reason code 1 means machine check rendezvous*/
217		if (kdump_on_init && (nd->sos->rv_rc != 1)) {
218			if (atomic_inc_return(&kdump_in_progress) != 1)
219				kdump_freeze_monarch = 1;
220		}
221		break;
222	case DIE_INIT_MONARCH_LEAVE:
223		/* Reason code 1 means machine check rendezvous*/
224		if (kdump_on_init && (nd->sos->rv_rc != 1))
225			machine_kdump_on_init();
226		break;
227	case DIE_MCA_MONARCH_LEAVE:
228		/* *(nd->data) indicate if MCA is recoverable */
229		if (kdump_on_fatal_mca && !(*(nd->data))) {
230			if (atomic_inc_return(&kdump_in_progress) == 1)
231				machine_kdump_on_init();
232			/* We got fatal MCA while kdump!? No way!! */
233		}
234		break;
235	}
236	return NOTIFY_DONE;
237}
238
239#ifdef CONFIG_SYSCTL
240static ctl_table kdump_ctl_table[] = {
241	{
242		.procname = "kdump_on_init",
243		.data = &kdump_on_init,
244		.maxlen = sizeof(int),
245		.mode = 0644,
246		.proc_handler = proc_dointvec,
247	},
248	{
249		.procname = "kdump_on_fatal_mca",
250		.data = &kdump_on_fatal_mca,
251		.maxlen = sizeof(int),
252		.mode = 0644,
253		.proc_handler = proc_dointvec,
254	},
255	{ }
256};
257
258static ctl_table sys_table[] = {
259	{
260	  .procname = "kernel",
261	  .mode = 0555,
262	  .child = kdump_ctl_table,
263	},
264	{ }
265};
266#endif
267
268static int
269machine_crash_setup(void)
270{
271	/* be notified before default_monarch_init_process */
272	static struct notifier_block kdump_init_notifier_nb = {
273		.notifier_call = kdump_init_notifier,
274		.priority = 1,
275	};
276	int ret;
277	if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
278		return ret;
279#ifdef CONFIG_SYSCTL
280	register_sysctl_table(sys_table);
281#endif
282	return 0;
283}
284
285__initcall(machine_crash_setup);
286
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * arch/ia64/kernel/crash.c
  4 *
  5 * Architecture specific (ia64) functions for kexec based crash dumps.
  6 *
  7 * Created by: Khalid Aziz <khalid.aziz@hp.com>
  8 * Copyright (C) 2005 Hewlett-Packard Development Company, L.P.
  9 * Copyright (C) 2005 Intel Corp	Zou Nan hai <nanhai.zou@intel.com>
 10 *
 11 */
 12#include <linux/smp.h>
 13#include <linux/delay.h>
 14#include <linux/crash_dump.h>
 15#include <linux/memblock.h>
 16#include <linux/kexec.h>
 17#include <linux/elfcore.h>
 18#include <linux/sysctl.h>
 19#include <linux/init.h>
 20#include <linux/kdebug.h>
 21
 22#include <asm/mca.h>
 23
 24int kdump_status[NR_CPUS];
 25static atomic_t kdump_cpu_frozen;
 26atomic_t kdump_in_progress;
 27static int kdump_freeze_monarch;
 28static int kdump_on_init = 1;
 29static int kdump_on_fatal_mca = 1;
 30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31extern void ia64_dump_cpu_regs(void *);
 32
 33static DEFINE_PER_CPU(struct elf_prstatus, elf_prstatus);
 34
 35void
 36crash_save_this_cpu(void)
 37{
 38	void *buf;
 39	unsigned long cfm, sof, sol;
 40
 41	int cpu = smp_processor_id();
 42	struct elf_prstatus *prstatus = &per_cpu(elf_prstatus, cpu);
 43
 44	elf_greg_t *dst = (elf_greg_t *)&(prstatus->pr_reg);
 45	memset(prstatus, 0, sizeof(*prstatus));
 46	prstatus->pr_pid = current->pid;
 47
 48	ia64_dump_cpu_regs(dst);
 49	cfm = dst[43];
 50	sol = (cfm >> 7) & 0x7f;
 51	sof = cfm & 0x7f;
 52	dst[46] = (unsigned long)ia64_rse_skip_regs((unsigned long *)dst[46],
 53			sof - sol);
 54
 55	buf = (u64 *) per_cpu_ptr(crash_notes, cpu);
 56	if (!buf)
 57		return;
 58	buf = append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS, prstatus,
 59			sizeof(*prstatus));
 60	final_note(buf);
 61}
 62
 63#ifdef CONFIG_SMP
 64static int
 65kdump_wait_cpu_freeze(void)
 66{
 67	int cpu_num = num_online_cpus() - 1;
 68	int timeout = 1000;
 69	while(timeout-- > 0) {
 70		if (atomic_read(&kdump_cpu_frozen) == cpu_num)
 71			return 0;
 72		udelay(1000);
 73	}
 74	return 1;
 75}
 76#endif
 77
 78void
 79machine_crash_shutdown(struct pt_regs *pt)
 80{
 81	/* This function is only called after the system
 82	 * has paniced or is otherwise in a critical state.
 83	 * The minimum amount of code to allow a kexec'd kernel
 84	 * to run successfully needs to happen here.
 85	 *
 86	 * In practice this means shooting down the other cpus in
 87	 * an SMP system.
 88	 */
 89	kexec_disable_iosapic();
 90#ifdef CONFIG_SMP
 91	/*
 92	 * If kdump_on_init is set and an INIT is asserted here, kdump will
 93	 * be started again via INIT monarch.
 94	 */
 95	local_irq_disable();
 96	ia64_set_psr_mc();	/* mask MCA/INIT */
 97	if (atomic_inc_return(&kdump_in_progress) != 1)
 98		unw_init_running(kdump_cpu_freeze, NULL);
 99
100	/*
101	 * Now this cpu is ready for kdump.
102	 * Stop all others by IPI or INIT.  They could receive INIT from
103	 * outside and might be INIT monarch, but only thing they have to
104	 * do is falling into kdump_cpu_freeze().
105	 *
106	 * If an INIT is asserted here:
107	 * - All receivers might be slaves, since some of cpus could already
108	 *   be frozen and INIT might be masked on monarch.  In this case,
109	 *   all slaves will be frozen soon since kdump_in_progress will let
110	 *   them into DIE_INIT_SLAVE_LEAVE.
111	 * - One might be a monarch, but INIT rendezvous will fail since
112	 *   at least this cpu already have INIT masked so it never join
113	 *   to the rendezvous.  In this case, all slaves and monarch will
114	 *   be frozen soon with no wait since the INIT rendezvous is skipped
115	 *   by kdump_in_progress.
116	 */
117	kdump_smp_send_stop();
118	/* not all cpu response to IPI, send INIT to freeze them */
119	if (kdump_wait_cpu_freeze()) {
120		kdump_smp_send_init();
121		/* wait again, don't go ahead if possible */
122		kdump_wait_cpu_freeze();
123	}
124#endif
125}
126
127static void
128machine_kdump_on_init(void)
129{
130	crash_save_vmcoreinfo();
131	local_irq_disable();
132	kexec_disable_iosapic();
133	machine_kexec(ia64_kimage);
134}
135
136void
137kdump_cpu_freeze(struct unw_frame_info *info, void *arg)
138{
139	int cpuid;
140
141	local_irq_disable();
142	cpuid = smp_processor_id();
143	crash_save_this_cpu();
144	current->thread.ksp = (__u64)info->sw - 16;
145
146	ia64_set_psr_mc();	/* mask MCA/INIT and stop reentrance */
147
148	atomic_inc(&kdump_cpu_frozen);
149	kdump_status[cpuid] = 1;
150	mb();
151	for (;;)
152		cpu_relax();
153}
154
155static int
156kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
157{
158	struct ia64_mca_notify_die *nd;
159	struct die_args *args = data;
160
161	if (atomic_read(&kdump_in_progress)) {
162		switch (val) {
163		case DIE_INIT_MONARCH_LEAVE:
164			if (!kdump_freeze_monarch)
165				break;
166			fallthrough;
167		case DIE_INIT_SLAVE_LEAVE:
168		case DIE_INIT_MONARCH_ENTER:
169		case DIE_MCA_RENDZVOUS_LEAVE:
170			unw_init_running(kdump_cpu_freeze, NULL);
171			break;
172		}
173	}
174
175	if (!kdump_on_init && !kdump_on_fatal_mca)
176		return NOTIFY_DONE;
177
178	if (!ia64_kimage) {
179		if (val == DIE_INIT_MONARCH_LEAVE)
180			ia64_mca_printk(KERN_NOTICE
181					"%s: kdump not configured\n",
182					__func__);
183		return NOTIFY_DONE;
184	}
185
186	if (val != DIE_INIT_MONARCH_LEAVE &&
187	    val != DIE_INIT_MONARCH_PROCESS &&
188	    val != DIE_MCA_MONARCH_LEAVE)
189		return NOTIFY_DONE;
190
191	nd = (struct ia64_mca_notify_die *)args->err;
192
193	switch (val) {
194	case DIE_INIT_MONARCH_PROCESS:
195		/* Reason code 1 means machine check rendezvous*/
196		if (kdump_on_init && (nd->sos->rv_rc != 1)) {
197			if (atomic_inc_return(&kdump_in_progress) != 1)
198				kdump_freeze_monarch = 1;
199		}
200		break;
201	case DIE_INIT_MONARCH_LEAVE:
202		/* Reason code 1 means machine check rendezvous*/
203		if (kdump_on_init && (nd->sos->rv_rc != 1))
204			machine_kdump_on_init();
205		break;
206	case DIE_MCA_MONARCH_LEAVE:
207		/* *(nd->data) indicate if MCA is recoverable */
208		if (kdump_on_fatal_mca && !(*(nd->data))) {
209			if (atomic_inc_return(&kdump_in_progress) == 1)
210				machine_kdump_on_init();
211			/* We got fatal MCA while kdump!? No way!! */
212		}
213		break;
214	}
215	return NOTIFY_DONE;
216}
217
218#ifdef CONFIG_SYSCTL
219static struct ctl_table kdump_ctl_table[] = {
220	{
221		.procname = "kdump_on_init",
222		.data = &kdump_on_init,
223		.maxlen = sizeof(int),
224		.mode = 0644,
225		.proc_handler = proc_dointvec,
226	},
227	{
228		.procname = "kdump_on_fatal_mca",
229		.data = &kdump_on_fatal_mca,
230		.maxlen = sizeof(int),
231		.mode = 0644,
232		.proc_handler = proc_dointvec,
233	},
234	{ }
235};
236
237static struct ctl_table sys_table[] = {
238	{
239	  .procname = "kernel",
240	  .mode = 0555,
241	  .child = kdump_ctl_table,
242	},
243	{ }
244};
245#endif
246
247static int
248machine_crash_setup(void)
249{
250	/* be notified before default_monarch_init_process */
251	static struct notifier_block kdump_init_notifier_nb = {
252		.notifier_call = kdump_init_notifier,
253		.priority = 1,
254	};
255	int ret;
256	if((ret = register_die_notifier(&kdump_init_notifier_nb)) != 0)
257		return ret;
258#ifdef CONFIG_SYSCTL
259	register_sysctl_table(sys_table);
260#endif
261	return 0;
262}
263
264__initcall(machine_crash_setup);
265