Linux Audio

Check our new training course

Loading...
v3.1
 
   1/*
   2 * File:	mca.c
   3 * Purpose:	Generic MCA handling layer
   4 *
   5 * Copyright (C) 2003 Hewlett-Packard Co
   6 *	David Mosberger-Tang <davidm@hpl.hp.com>
   7 *
   8 * Copyright (C) 2002 Dell Inc.
   9 * Copyright (C) Matt Domsch <Matt_Domsch@dell.com>
  10 *
  11 * Copyright (C) 2002 Intel
  12 * Copyright (C) Jenna Hall <jenna.s.hall@intel.com>
  13 *
  14 * Copyright (C) 2001 Intel
  15 * Copyright (C) Fred Lewis <frederick.v.lewis@intel.com>
  16 *
  17 * Copyright (C) 2000 Intel
  18 * Copyright (C) Chuck Fleckenstein <cfleck@co.intel.com>
  19 *
  20 * Copyright (C) 1999, 2004-2008 Silicon Graphics, Inc.
  21 * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
  22 *
  23 * Copyright (C) 2006 FUJITSU LIMITED
  24 * Copyright (C) Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
  25 *
  26 * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com>
  27 *	      Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
  28 *	      added min save state dump, added INIT handler.
  29 *
  30 * 2001-01-03 Fred Lewis <frederick.v.lewis@intel.com>
  31 *	      Added setup of CMCI and CPEI IRQs, logging of corrected platform
  32 *	      errors, completed code for logging of corrected & uncorrected
  33 *	      machine check errors, and updated for conformance with Nov. 2000
  34 *	      revision of the SAL 3.0 spec.
  35 *
  36 * 2002-01-04 Jenna Hall <jenna.s.hall@intel.com>
  37 *	      Aligned MCA stack to 16 bytes, added platform vs. CPU error flag,
  38 *	      set SAL default return values, changed error record structure to
  39 *	      linked list, added init call to sal_get_state_info_size().
  40 *
  41 * 2002-03-25 Matt Domsch <Matt_Domsch@dell.com>
  42 *	      GUID cleanups.
  43 *
  44 * 2003-04-15 David Mosberger-Tang <davidm@hpl.hp.com>
  45 *	      Added INIT backtrace support.
  46 *
  47 * 2003-12-08 Keith Owens <kaos@sgi.com>
  48 *	      smp_call_function() must not be called from interrupt context
  49 *	      (can deadlock on tasklist_lock).
  50 *	      Use keventd to call smp_call_function().
  51 *
  52 * 2004-02-01 Keith Owens <kaos@sgi.com>
  53 *	      Avoid deadlock when using printk() for MCA and INIT records.
  54 *	      Delete all record printing code, moved to salinfo_decode in user
  55 *	      space.  Mark variables and functions static where possible.
  56 *	      Delete dead variables and functions.  Reorder to remove the need
  57 *	      for forward declarations and to consolidate related code.
  58 *
  59 * 2005-08-12 Keith Owens <kaos@sgi.com>
  60 *	      Convert MCA/INIT handlers to use per event stacks and SAL/OS
  61 *	      state.
  62 *
  63 * 2005-10-07 Keith Owens <kaos@sgi.com>
  64 *	      Add notify_die() hooks.
  65 *
  66 * 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
  67 *	      Add printing support for MCA/INIT.
  68 *
  69 * 2007-04-27 Russ Anderson <rja@sgi.com>
  70 *	      Support multiple cpus going through OS_MCA in the same event.
  71 */
  72#include <linux/jiffies.h>
  73#include <linux/types.h>
  74#include <linux/init.h>
  75#include <linux/sched.h>
 
 
  76#include <linux/interrupt.h>
  77#include <linux/irq.h>
  78#include <linux/bootmem.h>
  79#include <linux/acpi.h>
  80#include <linux/timer.h>
  81#include <linux/module.h>
  82#include <linux/kernel.h>
  83#include <linux/smp.h>
  84#include <linux/workqueue.h>
  85#include <linux/cpumask.h>
  86#include <linux/kdebug.h>
  87#include <linux/cpu.h>
  88#include <linux/gfp.h>
  89
  90#include <asm/delay.h>
  91#include <asm/machvec.h>
  92#include <asm/meminit.h>
  93#include <asm/page.h>
  94#include <asm/ptrace.h>
  95#include <asm/system.h>
  96#include <asm/sal.h>
  97#include <asm/mca.h>
  98#include <asm/kexec.h>
  99
 100#include <asm/irq.h>
 101#include <asm/hw_irq.h>
 102#include <asm/tlb.h>
 103
 104#include "mca_drv.h"
 105#include "entry.h"
 
 106
 107#if defined(IA64_MCA_DEBUG_INFO)
 108# define IA64_MCA_DEBUG(fmt...)	printk(fmt)
 109#else
 110# define IA64_MCA_DEBUG(fmt...)
 111#endif
 112
 113#define NOTIFY_INIT(event, regs, arg, spin)				\
 114do {									\
 115	if ((notify_die((event), "INIT", (regs), (arg), 0, 0)		\
 116			== NOTIFY_STOP) && ((spin) == 1))		\
 117		ia64_mca_spin(__func__);				\
 118} while (0)
 119
 120#define NOTIFY_MCA(event, regs, arg, spin)				\
 121do {									\
 122	if ((notify_die((event), "MCA", (regs), (arg), 0, 0)		\
 123			== NOTIFY_STOP) && ((spin) == 1))		\
 124		ia64_mca_spin(__func__);				\
 125} while (0)
 126
 127/* Used by mca_asm.S */
 128DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
 129DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
 130DEFINE_PER_CPU(u64, ia64_mca_pal_pte);	    /* PTE to map PAL code */
 131DEFINE_PER_CPU(u64, ia64_mca_pal_base);    /* vaddr PAL code granule */
 132DEFINE_PER_CPU(u64, ia64_mca_tr_reload);   /* Flag for TR reload */
 133
 134unsigned long __per_cpu_mca[NR_CPUS];
 135
 136/* In mca_asm.S */
 137extern void			ia64_os_init_dispatch_monarch (void);
 138extern void			ia64_os_init_dispatch_slave (void);
 139
 140static int monarch_cpu = -1;
 141
 142static ia64_mc_info_t		ia64_mc_info;
 143
 144#define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
 145#define MIN_CPE_POLL_INTERVAL (2*60*HZ)  /* 2 minutes */
 146#define CMC_POLL_INTERVAL     (1*60*HZ)  /* 1 minute */
 147#define CPE_HISTORY_LENGTH    5
 148#define CMC_HISTORY_LENGTH    5
 149
 150#ifdef CONFIG_ACPI
 151static struct timer_list cpe_poll_timer;
 152#endif
 153static struct timer_list cmc_poll_timer;
 154/*
 155 * This variable tells whether we are currently in polling mode.
 156 * Start with this in the wrong state so we won't play w/ timers
 157 * before the system is ready.
 158 */
 159static int cmc_polling_enabled = 1;
 160
 161/*
 162 * Clearing this variable prevents CPE polling from getting activated
 163 * in mca_late_init.  Use it if your system doesn't provide a CPEI,
 164 * but encounters problems retrieving CPE logs.  This should only be
 165 * necessary for debugging.
 166 */
 167static int cpe_poll_enabled = 1;
 168
 169extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
 170
 171static int mca_init __initdata;
 172
 173/*
 174 * limited & delayed printing support for MCA/INIT handler
 175 */
 176
 177#define mprintk(fmt...) ia64_mca_printk(fmt)
 178
 179#define MLOGBUF_SIZE (512+256*NR_CPUS)
 180#define MLOGBUF_MSGMAX 256
 181static char mlogbuf[MLOGBUF_SIZE];
 182static DEFINE_SPINLOCK(mlogbuf_wlock);	/* mca context only */
 183static DEFINE_SPINLOCK(mlogbuf_rlock);	/* normal context only */
 184static unsigned long mlogbuf_start;
 185static unsigned long mlogbuf_end;
 186static unsigned int mlogbuf_finished = 0;
 187static unsigned long mlogbuf_timestamp = 0;
 188
 189static int loglevel_save = -1;
 190#define BREAK_LOGLEVEL(__console_loglevel)		\
 191	oops_in_progress = 1;				\
 192	if (loglevel_save < 0)				\
 193		loglevel_save = __console_loglevel;	\
 194	__console_loglevel = 15;
 195
 196#define RESTORE_LOGLEVEL(__console_loglevel)		\
 197	if (loglevel_save >= 0) {			\
 198		__console_loglevel = loglevel_save;	\
 199		loglevel_save = -1;			\
 200	}						\
 201	mlogbuf_finished = 0;				\
 202	oops_in_progress = 0;
 203
 204/*
 205 * Push messages into buffer, print them later if not urgent.
 206 */
 207void ia64_mca_printk(const char *fmt, ...)
 208{
 209	va_list args;
 210	int printed_len;
 211	char temp_buf[MLOGBUF_MSGMAX];
 212	char *p;
 213
 214	va_start(args, fmt);
 215	printed_len = vscnprintf(temp_buf, sizeof(temp_buf), fmt, args);
 216	va_end(args);
 217
 218	/* Copy the output into mlogbuf */
 219	if (oops_in_progress) {
 220		/* mlogbuf was abandoned, use printk directly instead. */
 221		printk(temp_buf);
 222	} else {
 223		spin_lock(&mlogbuf_wlock);
 224		for (p = temp_buf; *p; p++) {
 225			unsigned long next = (mlogbuf_end + 1) % MLOGBUF_SIZE;
 226			if (next != mlogbuf_start) {
 227				mlogbuf[mlogbuf_end] = *p;
 228				mlogbuf_end = next;
 229			} else {
 230				/* buffer full */
 231				break;
 232			}
 233		}
 234		mlogbuf[mlogbuf_end] = '\0';
 235		spin_unlock(&mlogbuf_wlock);
 236	}
 237}
 238EXPORT_SYMBOL(ia64_mca_printk);
 239
 240/*
 241 * Print buffered messages.
 242 *  NOTE: call this after returning normal context. (ex. from salinfod)
 243 */
 244void ia64_mlogbuf_dump(void)
 245{
 246	char temp_buf[MLOGBUF_MSGMAX];
 247	char *p;
 248	unsigned long index;
 249	unsigned long flags;
 250	unsigned int printed_len;
 251
 252	/* Get output from mlogbuf */
 253	while (mlogbuf_start != mlogbuf_end) {
 254		temp_buf[0] = '\0';
 255		p = temp_buf;
 256		printed_len = 0;
 257
 258		spin_lock_irqsave(&mlogbuf_rlock, flags);
 259
 260		index = mlogbuf_start;
 261		while (index != mlogbuf_end) {
 262			*p = mlogbuf[index];
 263			index = (index + 1) % MLOGBUF_SIZE;
 264			if (!*p)
 265				break;
 266			p++;
 267			if (++printed_len >= MLOGBUF_MSGMAX - 1)
 268				break;
 269		}
 270		*p = '\0';
 271		if (temp_buf[0])
 272			printk(temp_buf);
 273		mlogbuf_start = index;
 274
 275		mlogbuf_timestamp = 0;
 276		spin_unlock_irqrestore(&mlogbuf_rlock, flags);
 277	}
 278}
 279EXPORT_SYMBOL(ia64_mlogbuf_dump);
 280
 281/*
 282 * Call this if system is going to down or if immediate flushing messages to
 283 * console is required. (ex. recovery was failed, crash dump is going to be
 284 * invoked, long-wait rendezvous etc.)
 285 *  NOTE: this should be called from monarch.
 286 */
 287static void ia64_mlogbuf_finish(int wait)
 288{
 289	BREAK_LOGLEVEL(console_loglevel);
 290
 291	spin_lock_init(&mlogbuf_rlock);
 292	ia64_mlogbuf_dump();
 293	printk(KERN_EMERG "mlogbuf_finish: printing switched to urgent mode, "
 294		"MCA/INIT might be dodgy or fail.\n");
 295
 296	if (!wait)
 297		return;
 298
 299	/* wait for console */
 300	printk("Delaying for 5 seconds...\n");
 301	udelay(5*1000000);
 302
 303	mlogbuf_finished = 1;
 304}
 305
 306/*
 307 * Print buffered messages from INIT context.
 308 */
 309static void ia64_mlogbuf_dump_from_init(void)
 310{
 311	if (mlogbuf_finished)
 312		return;
 313
 314	if (mlogbuf_timestamp &&
 315			time_before(jiffies, mlogbuf_timestamp + 30 * HZ)) {
 316		printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT "
 317			" and the system seems to be messed up.\n");
 318		ia64_mlogbuf_finish(0);
 319		return;
 320	}
 321
 322	if (!spin_trylock(&mlogbuf_rlock)) {
 323		printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT. "
 324			"Generated messages other than stack dump will be "
 325			"buffered to mlogbuf and will be printed later.\n");
 326		printk(KERN_ERR "INIT: If messages would not printed after "
 327			"this INIT, wait 30sec and assert INIT again.\n");
 328		if (!mlogbuf_timestamp)
 329			mlogbuf_timestamp = jiffies;
 330		return;
 331	}
 332	spin_unlock(&mlogbuf_rlock);
 333	ia64_mlogbuf_dump();
 334}
 335
 336static void inline
 337ia64_mca_spin(const char *func)
 338{
 339	if (monarch_cpu == smp_processor_id())
 340		ia64_mlogbuf_finish(0);
 341	mprintk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func);
 342	while (1)
 343		cpu_relax();
 344}
 345/*
 346 * IA64_MCA log support
 347 */
 348#define IA64_MAX_LOGS		2	/* Double-buffering for nested MCAs */
 349#define IA64_MAX_LOG_TYPES      4   /* MCA, INIT, CMC, CPE */
 350
 351typedef struct ia64_state_log_s
 352{
 353	spinlock_t	isl_lock;
 354	int		isl_index;
 355	unsigned long	isl_count;
 356	ia64_err_rec_t  *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */
 357} ia64_state_log_t;
 358
 359static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
 360
 361#define IA64_LOG_ALLOCATE(it, size) \
 362	{ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] = \
 363		(ia64_err_rec_t *)alloc_bootmem(size); \
 364	ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] = \
 365		(ia64_err_rec_t *)alloc_bootmem(size);}
 366#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
 367#define IA64_LOG_LOCK(it)      spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
 368#define IA64_LOG_UNLOCK(it)    spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
 369#define IA64_LOG_NEXT_INDEX(it)    ia64_state_log[it].isl_index
 370#define IA64_LOG_CURR_INDEX(it)    1 - ia64_state_log[it].isl_index
 371#define IA64_LOG_INDEX_INC(it) \
 372    {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
 373    ia64_state_log[it].isl_count++;}
 374#define IA64_LOG_INDEX_DEC(it) \
 375    ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
 376#define IA64_LOG_NEXT_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
 377#define IA64_LOG_CURR_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
 378#define IA64_LOG_COUNT(it)         ia64_state_log[it].isl_count
 379
 
 
 
 
 
 
 
 
 
 
 
 
 
 380/*
 381 * ia64_log_init
 382 *	Reset the OS ia64 log buffer
 383 * Inputs   :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
 384 * Outputs	:	None
 385 */
 386static void __init
 387ia64_log_init(int sal_info_type)
 388{
 389	u64	max_size = 0;
 390
 391	IA64_LOG_NEXT_INDEX(sal_info_type) = 0;
 392	IA64_LOG_LOCK_INIT(sal_info_type);
 393
 394	// SAL will tell us the maximum size of any error record of this type
 395	max_size = ia64_sal_get_state_info_size(sal_info_type);
 396	if (!max_size)
 397		/* alloc_bootmem() doesn't like zero-sized allocations! */
 398		return;
 399
 400	// set up OS data structures to hold error info
 401	IA64_LOG_ALLOCATE(sal_info_type, max_size);
 402	memset(IA64_LOG_CURR_BUFFER(sal_info_type), 0, max_size);
 403	memset(IA64_LOG_NEXT_BUFFER(sal_info_type), 0, max_size);
 404}
 405
 406/*
 407 * ia64_log_get
 408 *
 409 *	Get the current MCA log from SAL and copy it into the OS log buffer.
 410 *
 411 *  Inputs  :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
 412 *              irq_safe    whether you can use printk at this point
 413 *  Outputs :   size        (total record length)
 414 *              *buffer     (ptr to error record)
 415 *
 416 */
 417static u64
 418ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
 419{
 420	sal_log_record_header_t     *log_buffer;
 421	u64                         total_len = 0;
 422	unsigned long               s;
 423
 424	IA64_LOG_LOCK(sal_info_type);
 425
 426	/* Get the process state information */
 427	log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
 428
 429	total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
 430
 431	if (total_len) {
 432		IA64_LOG_INDEX_INC(sal_info_type);
 433		IA64_LOG_UNLOCK(sal_info_type);
 434		if (irq_safe) {
 435			IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. Record length = %ld\n",
 436				       __func__, sal_info_type, total_len);
 437		}
 438		*buffer = (u8 *) log_buffer;
 439		return total_len;
 440	} else {
 441		IA64_LOG_UNLOCK(sal_info_type);
 442		return 0;
 443	}
 444}
 445
 446/*
 447 *  ia64_mca_log_sal_error_record
 448 *
 449 *  This function retrieves a specified error record type from SAL
 450 *  and wakes up any processes waiting for error records.
 451 *
 452 *  Inputs  :   sal_info_type   (Type of error record MCA/CMC/CPE)
 453 *              FIXME: remove MCA and irq_safe.
 454 */
 455static void
 456ia64_mca_log_sal_error_record(int sal_info_type)
 457{
 458	u8 *buffer;
 459	sal_log_record_header_t *rh;
 460	u64 size;
 461	int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA;
 462#ifdef IA64_MCA_DEBUG_INFO
 463	static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
 464#endif
 465
 466	size = ia64_log_get(sal_info_type, &buffer, irq_safe);
 467	if (!size)
 468		return;
 469
 470	salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);
 471
 472	if (irq_safe)
 473		IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
 474			smp_processor_id(),
 475			sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN");
 476
 477	/* Clear logs from corrected errors in case there's no user-level logger */
 478	rh = (sal_log_record_header_t *)buffer;
 479	if (rh->severity == sal_log_severity_corrected)
 480		ia64_sal_clear_state_info(sal_info_type);
 481}
 482
 483/*
 484 * search_mca_table
 485 *  See if the MCA surfaced in an instruction range
 486 *  that has been tagged as recoverable.
 487 *
 488 *  Inputs
 489 *	first	First address range to check
 490 *	last	Last address range to check
 491 *	ip	Instruction pointer, address we are looking for
 492 *
 493 * Return value:
 494 *      1 on Success (in the table)/ 0 on Failure (not in the  table)
 495 */
 496int
 497search_mca_table (const struct mca_table_entry *first,
 498                const struct mca_table_entry *last,
 499                unsigned long ip)
 500{
 501        const struct mca_table_entry *curr;
 502        u64 curr_start, curr_end;
 503
 504        curr = first;
 505        while (curr <= last) {
 506                curr_start = (u64) &curr->start_addr + curr->start_addr;
 507                curr_end = (u64) &curr->end_addr + curr->end_addr;
 508
 509                if ((ip >= curr_start) && (ip <= curr_end)) {
 510                        return 1;
 511                }
 512                curr++;
 513        }
 514        return 0;
 515}
 516
 517/* Given an address, look for it in the mca tables. */
 518int mca_recover_range(unsigned long addr)
 519{
 520	extern struct mca_table_entry __start___mca_table[];
 521	extern struct mca_table_entry __stop___mca_table[];
 522
 523	return search_mca_table(__start___mca_table, __stop___mca_table-1, addr);
 524}
 525EXPORT_SYMBOL_GPL(mca_recover_range);
 526
 527#ifdef CONFIG_ACPI
 528
 529int cpe_vector = -1;
 530int ia64_cpe_irq = -1;
 531
 532static irqreturn_t
 533ia64_mca_cpe_int_handler (int cpe_irq, void *arg)
 534{
 535	static unsigned long	cpe_history[CPE_HISTORY_LENGTH];
 536	static int		index;
 537	static DEFINE_SPINLOCK(cpe_history_lock);
 538
 539	IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
 540		       __func__, cpe_irq, smp_processor_id());
 541
 542	/* SAL spec states this should run w/ interrupts enabled */
 543	local_irq_enable();
 544
 545	spin_lock(&cpe_history_lock);
 546	if (!cpe_poll_enabled && cpe_vector >= 0) {
 547
 548		int i, count = 1; /* we know 1 happened now */
 549		unsigned long now = jiffies;
 550
 551		for (i = 0; i < CPE_HISTORY_LENGTH; i++) {
 552			if (now - cpe_history[i] <= HZ)
 553				count++;
 554		}
 555
 556		IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH);
 557		if (count >= CPE_HISTORY_LENGTH) {
 558
 559			cpe_poll_enabled = 1;
 560			spin_unlock(&cpe_history_lock);
 561			disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));
 562
 563			/*
 564			 * Corrected errors will still be corrected, but
 565			 * make sure there's a log somewhere that indicates
 566			 * something is generating more than we can handle.
 567			 */
 568			printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n");
 569
 570			mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);
 571
 572			/* lock already released, get out now */
 573			goto out;
 574		} else {
 575			cpe_history[index++] = now;
 576			if (index == CPE_HISTORY_LENGTH)
 577				index = 0;
 578		}
 579	}
 580	spin_unlock(&cpe_history_lock);
 581out:
 582	/* Get the CPE error record and log it */
 583	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
 584
 585	local_irq_disable();
 586
 587	return IRQ_HANDLED;
 588}
 589
 590#endif /* CONFIG_ACPI */
 591
 592#ifdef CONFIG_ACPI
 593/*
 594 * ia64_mca_register_cpev
 595 *
 596 *  Register the corrected platform error vector with SAL.
 597 *
 598 *  Inputs
 599 *      cpev        Corrected Platform Error Vector number
 600 *
 601 *  Outputs
 602 *      None
 603 */
 604void
 605ia64_mca_register_cpev (int cpev)
 606{
 607	/* Register the CPE interrupt vector with SAL */
 608	struct ia64_sal_retval isrv;
 609
 610	isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);
 611	if (isrv.status) {
 612		printk(KERN_ERR "Failed to register Corrected Platform "
 613		       "Error interrupt vector with SAL (status %ld)\n", isrv.status);
 614		return;
 615	}
 616
 617	IA64_MCA_DEBUG("%s: corrected platform error "
 618		       "vector %#x registered\n", __func__, cpev);
 619}
 620#endif /* CONFIG_ACPI */
 621
 622/*
 623 * ia64_mca_cmc_vector_setup
 624 *
 625 *  Setup the corrected machine check vector register in the processor.
 626 *  (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
 627 *  This function is invoked on a per-processor basis.
 628 *
 629 * Inputs
 630 *      None
 631 *
 632 * Outputs
 633 *	None
 634 */
 635void __cpuinit
 636ia64_mca_cmc_vector_setup (void)
 637{
 638	cmcv_reg_t	cmcv;
 639
 640	cmcv.cmcv_regval	= 0;
 641	cmcv.cmcv_mask		= 1;        /* Mask/disable interrupt at first */
 642	cmcv.cmcv_vector	= IA64_CMC_VECTOR;
 643	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
 644
 645	IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x registered.\n",
 646		       __func__, smp_processor_id(), IA64_CMC_VECTOR);
 647
 648	IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
 649		       __func__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
 650}
 651
 652/*
 653 * ia64_mca_cmc_vector_disable
 654 *
 655 *  Mask the corrected machine check vector register in the processor.
 656 *  This function is invoked on a per-processor basis.
 657 *
 658 * Inputs
 659 *      dummy(unused)
 660 *
 661 * Outputs
 662 *	None
 663 */
 664static void
 665ia64_mca_cmc_vector_disable (void *dummy)
 666{
 667	cmcv_reg_t	cmcv;
 668
 669	cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
 670
 671	cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
 672	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
 673
 674	IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x disabled.\n",
 675		       __func__, smp_processor_id(), cmcv.cmcv_vector);
 676}
 677
 678/*
 679 * ia64_mca_cmc_vector_enable
 680 *
 681 *  Unmask the corrected machine check vector register in the processor.
 682 *  This function is invoked on a per-processor basis.
 683 *
 684 * Inputs
 685 *      dummy(unused)
 686 *
 687 * Outputs
 688 *	None
 689 */
 690static void
 691ia64_mca_cmc_vector_enable (void *dummy)
 692{
 693	cmcv_reg_t	cmcv;
 694
 695	cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
 696
 697	cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
 698	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
 699
 700	IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x enabled.\n",
 701		       __func__, smp_processor_id(), cmcv.cmcv_vector);
 702}
 703
 704/*
 705 * ia64_mca_cmc_vector_disable_keventd
 706 *
 707 * Called via keventd (smp_call_function() is not safe in interrupt context) to
 708 * disable the cmc interrupt vector.
 709 */
 710static void
 711ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
 712{
 713	on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0);
 714}
 715
 716/*
 717 * ia64_mca_cmc_vector_enable_keventd
 718 *
 719 * Called via keventd (smp_call_function() is not safe in interrupt context) to
 720 * enable the cmc interrupt vector.
 721 */
 722static void
 723ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
 724{
 725	on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0);
 726}
 727
 728/*
 729 * ia64_mca_wakeup
 730 *
 731 *	Send an inter-cpu interrupt to wake-up a particular cpu.
 732 *
 733 *  Inputs  :   cpuid
 734 *  Outputs :   None
 735 */
 736static void
 737ia64_mca_wakeup(int cpu)
 738{
 739	platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
 740}
 741
 742/*
 743 * ia64_mca_wakeup_all
 744 *
 745 *	Wakeup all the slave cpus which have rendez'ed previously.
 746 *
 747 *  Inputs  :   None
 748 *  Outputs :   None
 749 */
 750static void
 751ia64_mca_wakeup_all(void)
 752{
 753	int cpu;
 754
 755	/* Clear the Rendez checkin flag for all cpus */
 756	for_each_online_cpu(cpu) {
 757		if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
 758			ia64_mca_wakeup(cpu);
 759	}
 760
 761}
 762
 763/*
 764 * ia64_mca_rendez_interrupt_handler
 765 *
 766 *	This is handler used to put slave processors into spinloop
 767 *	while the monarch processor does the mca handling and later
 768 *	wake each slave up once the monarch is done.  The state
 769 *	IA64_MCA_RENDEZ_CHECKIN_DONE indicates the cpu is rendez'ed
 770 *	in SAL.  The state IA64_MCA_RENDEZ_CHECKIN_NOTDONE indicates
 771 *	the cpu has come out of OS rendezvous.
 772 *
 773 *  Inputs  :   None
 774 *  Outputs :   None
 775 */
 776static irqreturn_t
 777ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
 778{
 779	unsigned long flags;
 780	int cpu = smp_processor_id();
 781	struct ia64_mca_notify_die nd =
 782		{ .sos = NULL, .monarch_cpu = &monarch_cpu };
 783
 784	/* Mask all interrupts */
 785	local_irq_save(flags);
 786
 787	NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1);
 788
 789	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
 790	/* Register with the SAL monarch that the slave has
 791	 * reached SAL
 792	 */
 793	ia64_sal_mc_rendez();
 794
 795	NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1);
 796
 797	/* Wait for the monarch cpu to exit. */
 798	while (monarch_cpu != -1)
 799	       cpu_relax();	/* spin until monarch leaves */
 800
 801	NOTIFY_MCA(DIE_MCA_RENDZVOUS_LEAVE, get_irq_regs(), (long)&nd, 1);
 802
 803	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
 804	/* Enable all interrupts */
 805	local_irq_restore(flags);
 806	return IRQ_HANDLED;
 807}
 808
 809/*
 810 * ia64_mca_wakeup_int_handler
 811 *
 812 *	The interrupt handler for processing the inter-cpu interrupt to the
 813 *	slave cpu which was spinning in the rendez loop.
 814 *	Since this spinning is done by turning off the interrupts and
 815 *	polling on the wakeup-interrupt bit in the IRR, there is
 816 *	nothing useful to be done in the handler.
 817 *
 818 *  Inputs  :   wakeup_irq  (Wakeup-interrupt bit)
 819 *	arg		(Interrupt handler specific argument)
 820 *  Outputs :   None
 821 *
 822 */
 823static irqreturn_t
 824ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg)
 825{
 826	return IRQ_HANDLED;
 827}
 828
 829/* Function pointer for extra MCA recovery */
 830int (*ia64_mca_ucmc_extension)
 831	(void*,struct ia64_sal_os_state*)
 832	= NULL;
 833
 834int
 835ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *))
 836{
 837	if (ia64_mca_ucmc_extension)
 838		return 1;
 839
 840	ia64_mca_ucmc_extension = fn;
 841	return 0;
 842}
 843
 844void
 845ia64_unreg_MCA_extension(void)
 846{
 847	if (ia64_mca_ucmc_extension)
 848		ia64_mca_ucmc_extension = NULL;
 849}
 850
 851EXPORT_SYMBOL(ia64_reg_MCA_extension);
 852EXPORT_SYMBOL(ia64_unreg_MCA_extension);
 853
 854
 855static inline void
 856copy_reg(const u64 *fr, u64 fnat, unsigned long *tr, unsigned long *tnat)
 857{
 858	u64 fslot, tslot, nat;
 859	*tr = *fr;
 860	fslot = ((unsigned long)fr >> 3) & 63;
 861	tslot = ((unsigned long)tr >> 3) & 63;
 862	*tnat &= ~(1UL << tslot);
 863	nat = (fnat >> fslot) & 1;
 864	*tnat |= (nat << tslot);
 865}
 866
 867/* Change the comm field on the MCA/INT task to include the pid that
 868 * was interrupted, it makes for easier debugging.  If that pid was 0
 869 * (swapper or nested MCA/INIT) then use the start of the previous comm
 870 * field suffixed with its cpu.
 871 */
 872
 873static void
 874ia64_mca_modify_comm(const struct task_struct *previous_current)
 875{
 876	char *p, comm[sizeof(current->comm)];
 877	if (previous_current->pid)
 878		snprintf(comm, sizeof(comm), "%s %d",
 879			current->comm, previous_current->pid);
 880	else {
 881		int l;
 882		if ((p = strchr(previous_current->comm, ' ')))
 883			l = p - previous_current->comm;
 884		else
 885			l = strlen(previous_current->comm);
 886		snprintf(comm, sizeof(comm), "%s %*s %d",
 887			current->comm, l, previous_current->comm,
 888			task_thread_info(previous_current)->cpu);
 889	}
 890	memcpy(current->comm, comm, sizeof(current->comm));
 891}
 892
 893static void
 894finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos,
 895		unsigned long *nat)
 896{
 897	const pal_min_state_area_t *ms = sos->pal_min_state;
 898	const u64 *bank;
 899
 900	/* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
 901	 * pmsa_{xip,xpsr,xfs}
 902	 */
 903	if (ia64_psr(regs)->ic) {
 904		regs->cr_iip = ms->pmsa_iip;
 905		regs->cr_ipsr = ms->pmsa_ipsr;
 906		regs->cr_ifs = ms->pmsa_ifs;
 907	} else {
 908		regs->cr_iip = ms->pmsa_xip;
 909		regs->cr_ipsr = ms->pmsa_xpsr;
 910		regs->cr_ifs = ms->pmsa_xfs;
 911
 912		sos->iip = ms->pmsa_iip;
 913		sos->ipsr = ms->pmsa_ipsr;
 914		sos->ifs = ms->pmsa_ifs;
 915	}
 916	regs->pr = ms->pmsa_pr;
 917	regs->b0 = ms->pmsa_br0;
 918	regs->ar_rsc = ms->pmsa_rsc;
 919	copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &regs->r1, nat);
 920	copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &regs->r2, nat);
 921	copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &regs->r3, nat);
 922	copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &regs->r8, nat);
 923	copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &regs->r9, nat);
 924	copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &regs->r10, nat);
 925	copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &regs->r11, nat);
 926	copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &regs->r12, nat);
 927	copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &regs->r13, nat);
 928	copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &regs->r14, nat);
 929	copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &regs->r15, nat);
 930	if (ia64_psr(regs)->bn)
 931		bank = ms->pmsa_bank1_gr;
 932	else
 933		bank = ms->pmsa_bank0_gr;
 934	copy_reg(&bank[16-16], ms->pmsa_nat_bits, &regs->r16, nat);
 935	copy_reg(&bank[17-16], ms->pmsa_nat_bits, &regs->r17, nat);
 936	copy_reg(&bank[18-16], ms->pmsa_nat_bits, &regs->r18, nat);
 937	copy_reg(&bank[19-16], ms->pmsa_nat_bits, &regs->r19, nat);
 938	copy_reg(&bank[20-16], ms->pmsa_nat_bits, &regs->r20, nat);
 939	copy_reg(&bank[21-16], ms->pmsa_nat_bits, &regs->r21, nat);
 940	copy_reg(&bank[22-16], ms->pmsa_nat_bits, &regs->r22, nat);
 941	copy_reg(&bank[23-16], ms->pmsa_nat_bits, &regs->r23, nat);
 942	copy_reg(&bank[24-16], ms->pmsa_nat_bits, &regs->r24, nat);
 943	copy_reg(&bank[25-16], ms->pmsa_nat_bits, &regs->r25, nat);
 944	copy_reg(&bank[26-16], ms->pmsa_nat_bits, &regs->r26, nat);
 945	copy_reg(&bank[27-16], ms->pmsa_nat_bits, &regs->r27, nat);
 946	copy_reg(&bank[28-16], ms->pmsa_nat_bits, &regs->r28, nat);
 947	copy_reg(&bank[29-16], ms->pmsa_nat_bits, &regs->r29, nat);
 948	copy_reg(&bank[30-16], ms->pmsa_nat_bits, &regs->r30, nat);
 949	copy_reg(&bank[31-16], ms->pmsa_nat_bits, &regs->r31, nat);
 950}
 951
 952/* On entry to this routine, we are running on the per cpu stack, see
 953 * mca_asm.h.  The original stack has not been touched by this event.  Some of
 954 * the original stack's registers will be in the RBS on this stack.  This stack
 955 * also contains a partial pt_regs and switch_stack, the rest of the data is in
 956 * PAL minstate.
 957 *
 958 * The first thing to do is modify the original stack to look like a blocked
 959 * task so we can run backtrace on the original task.  Also mark the per cpu
 960 * stack as current to ensure that we use the correct task state, it also means
 961 * that we can do backtrace on the MCA/INIT handler code itself.
 962 */
 963
 964static struct task_struct *
 965ia64_mca_modify_original_stack(struct pt_regs *regs,
 966		const struct switch_stack *sw,
 967		struct ia64_sal_os_state *sos,
 968		const char *type)
 969{
 970	char *p;
 971	ia64_va va;
 972	extern char ia64_leave_kernel[];	/* Need asm address, not function descriptor */
 973	const pal_min_state_area_t *ms = sos->pal_min_state;
 974	struct task_struct *previous_current;
 975	struct pt_regs *old_regs;
 976	struct switch_stack *old_sw;
 977	unsigned size = sizeof(struct pt_regs) +
 978			sizeof(struct switch_stack) + 16;
 979	unsigned long *old_bspstore, *old_bsp;
 980	unsigned long *new_bspstore, *new_bsp;
 981	unsigned long old_unat, old_rnat, new_rnat, nat;
 982	u64 slots, loadrs = regs->loadrs;
 983	u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1];
 984	u64 ar_bspstore = regs->ar_bspstore;
 985	u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16);
 986	const char *msg;
 987	int cpu = smp_processor_id();
 988
 989	previous_current = curr_task(cpu);
 990	set_curr_task(cpu, current);
 991	if ((p = strchr(current->comm, ' ')))
 992		*p = '\0';
 993
 994	/* Best effort attempt to cope with MCA/INIT delivered while in
 995	 * physical mode.
 996	 */
 997	regs->cr_ipsr = ms->pmsa_ipsr;
 998	if (ia64_psr(regs)->dt == 0) {
 999		va.l = r12;
1000		if (va.f.reg == 0) {
1001			va.f.reg = 7;
1002			r12 = va.l;
1003		}
1004		va.l = r13;
1005		if (va.f.reg == 0) {
1006			va.f.reg = 7;
1007			r13 = va.l;
1008		}
1009	}
1010	if (ia64_psr(regs)->rt == 0) {
1011		va.l = ar_bspstore;
1012		if (va.f.reg == 0) {
1013			va.f.reg = 7;
1014			ar_bspstore = va.l;
1015		}
1016		va.l = ar_bsp;
1017		if (va.f.reg == 0) {
1018			va.f.reg = 7;
1019			ar_bsp = va.l;
1020		}
1021	}
1022
1023	/* mca_asm.S ia64_old_stack() cannot assume that the dirty registers
1024	 * have been copied to the old stack, the old stack may fail the
1025	 * validation tests below.  So ia64_old_stack() must restore the dirty
1026	 * registers from the new stack.  The old and new bspstore probably
1027	 * have different alignments, so loadrs calculated on the old bsp
1028	 * cannot be used to restore from the new bsp.  Calculate a suitable
1029	 * loadrs for the new stack and save it in the new pt_regs, where
1030	 * ia64_old_stack() can get it.
1031	 */
1032	old_bspstore = (unsigned long *)ar_bspstore;
1033	old_bsp = (unsigned long *)ar_bsp;
1034	slots = ia64_rse_num_regs(old_bspstore, old_bsp);
1035	new_bspstore = (unsigned long *)((u64)current + IA64_RBS_OFFSET);
1036	new_bsp = ia64_rse_skip_regs(new_bspstore, slots);
1037	regs->loadrs = (new_bsp - new_bspstore) * 8 << 16;
1038
1039	/* Verify the previous stack state before we change it */
1040	if (user_mode(regs)) {
1041		msg = "occurred in user space";
1042		/* previous_current is guaranteed to be valid when the task was
1043		 * in user space, so ...
1044		 */
1045		ia64_mca_modify_comm(previous_current);
1046		goto no_mod;
1047	}
1048
1049	if (r13 != sos->prev_IA64_KR_CURRENT) {
1050		msg = "inconsistent previous current and r13";
1051		goto no_mod;
1052	}
1053
1054	if (!mca_recover_range(ms->pmsa_iip)) {
1055		if ((r12 - r13) >= KERNEL_STACK_SIZE) {
1056			msg = "inconsistent r12 and r13";
1057			goto no_mod;
1058		}
1059		if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) {
1060			msg = "inconsistent ar.bspstore and r13";
1061			goto no_mod;
1062		}
1063		va.p = old_bspstore;
1064		if (va.f.reg < 5) {
1065			msg = "old_bspstore is in the wrong region";
1066			goto no_mod;
1067		}
1068		if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) {
1069			msg = "inconsistent ar.bsp and r13";
1070			goto no_mod;
1071		}
1072		size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;
1073		if (ar_bspstore + size > r12) {
1074			msg = "no room for blocked state";
1075			goto no_mod;
1076		}
1077	}
1078
1079	ia64_mca_modify_comm(previous_current);
1080
1081	/* Make the original task look blocked.  First stack a struct pt_regs,
1082	 * describing the state at the time of interrupt.  mca_asm.S built a
1083	 * partial pt_regs, copy it and fill in the blanks using minstate.
1084	 */
1085	p = (char *)r12 - sizeof(*regs);
1086	old_regs = (struct pt_regs *)p;
1087	memcpy(old_regs, regs, sizeof(*regs));
1088	old_regs->loadrs = loadrs;
1089	old_unat = old_regs->ar_unat;
1090	finish_pt_regs(old_regs, sos, &old_unat);
1091
1092	/* Next stack a struct switch_stack.  mca_asm.S built a partial
1093	 * switch_stack, copy it and fill in the blanks using pt_regs and
1094	 * minstate.
1095	 *
1096	 * In the synthesized switch_stack, b0 points to ia64_leave_kernel,
1097	 * ar.pfs is set to 0.
1098	 *
1099	 * unwind.c::unw_unwind() does special processing for interrupt frames.
1100	 * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate
1101	 * is clear then unw_unwind() does _not_ adjust bsp over pt_regs.  Not
1102	 * that this is documented, of course.  Set PRED_NON_SYSCALL in the
1103	 * switch_stack on the original stack so it will unwind correctly when
1104	 * unwind.c reads pt_regs.
1105	 *
1106	 * thread.ksp is updated to point to the synthesized switch_stack.
1107	 */
1108	p -= sizeof(struct switch_stack);
1109	old_sw = (struct switch_stack *)p;
1110	memcpy(old_sw, sw, sizeof(*sw));
1111	old_sw->caller_unat = old_unat;
1112	old_sw->ar_fpsr = old_regs->ar_fpsr;
1113	copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat);
1114	copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat);
1115	copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat);
1116	copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat);
1117	old_sw->b0 = (u64)ia64_leave_kernel;
1118	old_sw->b1 = ms->pmsa_br1;
1119	old_sw->ar_pfs = 0;
1120	old_sw->ar_unat = old_unat;
1121	old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL);
1122	previous_current->thread.ksp = (u64)p - 16;
1123
1124	/* Finally copy the original stack's registers back to its RBS.
1125	 * Registers from ar.bspstore through ar.bsp at the time of the event
1126	 * are in the current RBS, copy them back to the original stack.  The
1127	 * copy must be done register by register because the original bspstore
1128	 * and the current one have different alignments, so the saved RNAT
1129	 * data occurs at different places.
1130	 *
1131	 * mca_asm does cover, so the old_bsp already includes all registers at
1132	 * the time of MCA/INIT.  It also does flushrs, so all registers before
1133	 * this function have been written to backing store on the MCA/INIT
1134	 * stack.
1135	 */
1136	new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore));
1137	old_rnat = regs->ar_rnat;
1138	while (slots--) {
1139		if (ia64_rse_is_rnat_slot(new_bspstore)) {
1140			new_rnat = ia64_get_rnat(new_bspstore++);
1141		}
1142		if (ia64_rse_is_rnat_slot(old_bspstore)) {
1143			*old_bspstore++ = old_rnat;
1144			old_rnat = 0;
1145		}
1146		nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL;
1147		old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore));
1148		old_rnat |= (nat << ia64_rse_slot_num(old_bspstore));
1149		*old_bspstore++ = *new_bspstore++;
1150	}
1151	old_sw->ar_bspstore = (unsigned long)old_bspstore;
1152	old_sw->ar_rnat = old_rnat;
1153
1154	sos->prev_task = previous_current;
1155	return previous_current;
1156
1157no_mod:
1158	mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
1159			smp_processor_id(), type, msg);
1160	old_unat = regs->ar_unat;
1161	finish_pt_regs(regs, sos, &old_unat);
1162	return previous_current;
1163}
1164
1165/* The monarch/slave interaction is based on monarch_cpu and requires that all
1166 * slaves have entered rendezvous before the monarch leaves.  If any cpu has
1167 * not entered rendezvous yet then wait a bit.  The assumption is that any
1168 * slave that has not rendezvoused after a reasonable time is never going to do
1169 * so.  In this context, slave includes cpus that respond to the MCA rendezvous
1170 * interrupt, as well as cpus that receive the INIT slave event.
1171 */
1172
1173static void
1174ia64_wait_for_slaves(int monarch, const char *type)
1175{
1176	int c, i , wait;
1177
1178	/*
1179	 * wait 5 seconds total for slaves (arbitrary)
1180	 */
1181	for (i = 0; i < 5000; i++) {
1182		wait = 0;
1183		for_each_online_cpu(c) {
1184			if (c == monarch)
1185				continue;
1186			if (ia64_mc_info.imi_rendez_checkin[c]
1187					== IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
1188				udelay(1000);		/* short wait */
1189				wait = 1;
1190				break;
1191			}
1192		}
1193		if (!wait)
1194			goto all_in;
1195	}
1196
1197	/*
1198	 * Maybe slave(s) dead. Print buffered messages immediately.
1199	 */
1200	ia64_mlogbuf_finish(0);
1201	mprintk(KERN_INFO "OS %s slave did not rendezvous on cpu", type);
1202	for_each_online_cpu(c) {
1203		if (c == monarch)
1204			continue;
1205		if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE)
1206			mprintk(" %d", c);
1207	}
1208	mprintk("\n");
1209	return;
1210
1211all_in:
1212	mprintk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type);
1213	return;
1214}
1215
1216/*  mca_insert_tr
1217 *
1218 *  Switch rid when TR reload and needed!
1219 *  iord: 1: itr, 2: itr;
1220 *
1221*/
1222static void mca_insert_tr(u64 iord)
1223{
1224
1225	int i;
1226	u64 old_rr;
1227	struct ia64_tr_entry *p;
1228	unsigned long psr;
1229	int cpu = smp_processor_id();
1230
1231	if (!ia64_idtrs[cpu])
1232		return;
1233
1234	psr = ia64_clear_ic();
1235	for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
1236		p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX;
1237		if (p->pte & 0x1) {
1238			old_rr = ia64_get_rr(p->ifa);
1239			if (old_rr != p->rr) {
1240				ia64_set_rr(p->ifa, p->rr);
1241				ia64_srlz_d();
1242			}
1243			ia64_ptr(iord, p->ifa, p->itir >> 2);
1244			ia64_srlz_i();
1245			if (iord & 0x1) {
1246				ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2);
1247				ia64_srlz_i();
1248			}
1249			if (iord & 0x2) {
1250				ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2);
1251				ia64_srlz_i();
1252			}
1253			if (old_rr != p->rr) {
1254				ia64_set_rr(p->ifa, old_rr);
1255				ia64_srlz_d();
1256			}
1257		}
1258	}
1259	ia64_set_psr(psr);
1260}
1261
1262/*
1263 * ia64_mca_handler
1264 *
1265 *	This is uncorrectable machine check handler called from OS_MCA
1266 *	dispatch code which is in turn called from SAL_CHECK().
1267 *	This is the place where the core of OS MCA handling is done.
1268 *	Right now the logs are extracted and displayed in a well-defined
1269 *	format. This handler code is supposed to be run only on the
1270 *	monarch processor. Once the monarch is done with MCA handling
1271 *	further MCA logging is enabled by clearing logs.
1272 *	Monarch also has the duty of sending wakeup-IPIs to pull the
1273 *	slave processors out of rendezvous spinloop.
1274 *
1275 *	If multiple processors call into OS_MCA, the first will become
1276 *	the monarch.  Subsequent cpus will be recorded in the mca_cpu
1277 *	bitmask.  After the first monarch has processed its MCA, it
1278 *	will wake up the next cpu in the mca_cpu bitmask and then go
1279 *	into the rendezvous loop.  When all processors have serviced
1280 *	their MCA, the last monarch frees up the rest of the processors.
1281 */
1282void
1283ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1284		 struct ia64_sal_os_state *sos)
1285{
1286	int recover, cpu = smp_processor_id();
1287	struct task_struct *previous_current;
1288	struct ia64_mca_notify_die nd =
1289		{ .sos = sos, .monarch_cpu = &monarch_cpu, .data = &recover };
1290	static atomic_t mca_count;
1291	static cpumask_t mca_cpu;
1292
1293	if (atomic_add_return(1, &mca_count) == 1) {
1294		monarch_cpu = cpu;
1295		sos->monarch = 1;
1296	} else {
1297		cpu_set(cpu, mca_cpu);
1298		sos->monarch = 0;
1299	}
1300	mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d "
1301		"monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch);
1302
1303	previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
1304
1305	NOTIFY_MCA(DIE_MCA_MONARCH_ENTER, regs, (long)&nd, 1);
1306
1307	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
1308	if (sos->monarch) {
1309		ia64_wait_for_slaves(cpu, "MCA");
1310
1311		/* Wakeup all the processors which are spinning in the
1312		 * rendezvous loop.  They will leave SAL, then spin in the OS
1313		 * with interrupts disabled until this monarch cpu leaves the
1314		 * MCA handler.  That gets control back to the OS so we can
1315		 * backtrace the other cpus, backtrace when spinning in SAL
1316		 * does not work.
1317		 */
1318		ia64_mca_wakeup_all();
1319	} else {
1320		while (cpu_isset(cpu, mca_cpu))
1321			cpu_relax();	/* spin until monarch wakes us */
1322	}
1323
1324	NOTIFY_MCA(DIE_MCA_MONARCH_PROCESS, regs, (long)&nd, 1);
1325
1326	/* Get the MCA error record and log it */
1327	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
1328
1329	/* MCA error recovery */
1330	recover = (ia64_mca_ucmc_extension
1331		&& ia64_mca_ucmc_extension(
1332			IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
1333			sos));
1334
1335	if (recover) {
1336		sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
1337		rh->severity = sal_log_severity_corrected;
1338		ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
1339		sos->os_status = IA64_MCA_CORRECTED;
1340	} else {
1341		/* Dump buffered message to console */
1342		ia64_mlogbuf_finish(1);
1343	}
1344
1345	if (__get_cpu_var(ia64_mca_tr_reload)) {
1346		mca_insert_tr(0x1); /*Reload dynamic itrs*/
1347		mca_insert_tr(0x2); /*Reload dynamic itrs*/
1348	}
1349
1350	NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1);
1351
1352	if (atomic_dec_return(&mca_count) > 0) {
1353		int i;
1354
1355		/* wake up the next monarch cpu,
1356		 * and put this cpu in the rendez loop.
1357		 */
1358		for_each_online_cpu(i) {
1359			if (cpu_isset(i, mca_cpu)) {
1360				monarch_cpu = i;
1361				cpu_clear(i, mca_cpu);	/* wake next cpu */
1362				while (monarch_cpu != -1)
1363					cpu_relax();	/* spin until last cpu leaves */
1364				set_curr_task(cpu, previous_current);
1365				ia64_mc_info.imi_rendez_checkin[cpu]
1366						= IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1367				return;
1368			}
1369		}
1370	}
1371	set_curr_task(cpu, previous_current);
1372	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1373	monarch_cpu = -1;	/* This frees the slaves and previous monarchs */
1374}
1375
1376static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
1377static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);
1378
1379/*
1380 * ia64_mca_cmc_int_handler
1381 *
1382 *  This is corrected machine check interrupt handler.
1383 *	Right now the logs are extracted and displayed in a well-defined
1384 *	format.
1385 *
1386 * Inputs
1387 *      interrupt number
1388 *      client data arg ptr
1389 *
1390 * Outputs
1391 *	None
1392 */
1393static irqreturn_t
1394ia64_mca_cmc_int_handler(int cmc_irq, void *arg)
1395{
1396	static unsigned long	cmc_history[CMC_HISTORY_LENGTH];
1397	static int		index;
1398	static DEFINE_SPINLOCK(cmc_history_lock);
1399
1400	IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
1401		       __func__, cmc_irq, smp_processor_id());
1402
1403	/* SAL spec states this should run w/ interrupts enabled */
1404	local_irq_enable();
1405
1406	spin_lock(&cmc_history_lock);
1407	if (!cmc_polling_enabled) {
1408		int i, count = 1; /* we know 1 happened now */
1409		unsigned long now = jiffies;
1410
1411		for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
1412			if (now - cmc_history[i] <= HZ)
1413				count++;
1414		}
1415
1416		IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);
1417		if (count >= CMC_HISTORY_LENGTH) {
1418
1419			cmc_polling_enabled = 1;
1420			spin_unlock(&cmc_history_lock);
1421			/* If we're being hit with CMC interrupts, we won't
1422			 * ever execute the schedule_work() below.  Need to
1423			 * disable CMC interrupts on this processor now.
1424			 */
1425			ia64_mca_cmc_vector_disable(NULL);
1426			schedule_work(&cmc_disable_work);
1427
1428			/*
1429			 * Corrected errors will still be corrected, but
1430			 * make sure there's a log somewhere that indicates
1431			 * something is generating more than we can handle.
1432			 */
1433			printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n");
1434
1435			mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
1436
1437			/* lock already released, get out now */
1438			goto out;
1439		} else {
1440			cmc_history[index++] = now;
1441			if (index == CMC_HISTORY_LENGTH)
1442				index = 0;
1443		}
1444	}
1445	spin_unlock(&cmc_history_lock);
1446out:
1447	/* Get the CMC error record and log it */
1448	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
1449
 
 
1450	return IRQ_HANDLED;
1451}
1452
1453/*
1454 *  ia64_mca_cmc_int_caller
1455 *
1456 * 	Triggered by sw interrupt from CMC polling routine.  Calls
1457 * 	real interrupt handler and either triggers a sw interrupt
1458 * 	on the next cpu or does cleanup at the end.
1459 *
1460 * Inputs
1461 *	interrupt number
1462 *	client data arg ptr
1463 * Outputs
1464 * 	handled
1465 */
1466static irqreturn_t
1467ia64_mca_cmc_int_caller(int cmc_irq, void *arg)
1468{
1469	static int start_count = -1;
1470	unsigned int cpuid;
1471
1472	cpuid = smp_processor_id();
1473
1474	/* If first cpu, update count */
1475	if (start_count == -1)
1476		start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
1477
1478	ia64_mca_cmc_int_handler(cmc_irq, arg);
1479
1480	cpuid = cpumask_next(cpuid+1, cpu_online_mask);
1481
1482	if (cpuid < nr_cpu_ids) {
1483		platform_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
1484	} else {
1485		/* If no log record, switch out of polling mode */
1486		if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
1487
1488			printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");
1489			schedule_work(&cmc_enable_work);
1490			cmc_polling_enabled = 0;
1491
1492		} else {
1493
1494			mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
1495		}
1496
1497		start_count = -1;
1498	}
1499
1500	return IRQ_HANDLED;
1501}
1502
1503/*
1504 *  ia64_mca_cmc_poll
1505 *
1506 *	Poll for Corrected Machine Checks (CMCs)
1507 *
1508 * Inputs   :   dummy(unused)
1509 * Outputs  :   None
1510 *
1511 */
1512static void
1513ia64_mca_cmc_poll (unsigned long dummy)
1514{
1515	/* Trigger a CMC interrupt cascade  */
1516	platform_send_ipi(first_cpu(cpu_online_map), IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
 
1517}
1518
1519/*
1520 *  ia64_mca_cpe_int_caller
1521 *
1522 * 	Triggered by sw interrupt from CPE polling routine.  Calls
1523 * 	real interrupt handler and either triggers a sw interrupt
1524 * 	on the next cpu or does cleanup at the end.
1525 *
1526 * Inputs
1527 *	interrupt number
1528 *	client data arg ptr
1529 * Outputs
1530 * 	handled
1531 */
1532#ifdef CONFIG_ACPI
1533
1534static irqreturn_t
1535ia64_mca_cpe_int_caller(int cpe_irq, void *arg)
1536{
1537	static int start_count = -1;
1538	static int poll_time = MIN_CPE_POLL_INTERVAL;
1539	unsigned int cpuid;
1540
1541	cpuid = smp_processor_id();
1542
1543	/* If first cpu, update count */
1544	if (start_count == -1)
1545		start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
1546
1547	ia64_mca_cpe_int_handler(cpe_irq, arg);
1548
1549	cpuid = cpumask_next(cpuid+1, cpu_online_mask);
1550
1551	if (cpuid < NR_CPUS) {
1552		platform_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
1553	} else {
1554		/*
1555		 * If a log was recorded, increase our polling frequency,
1556		 * otherwise, backoff or return to interrupt mode.
1557		 */
1558		if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
1559			poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
1560		} else if (cpe_vector < 0) {
1561			poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
1562		} else {
1563			poll_time = MIN_CPE_POLL_INTERVAL;
1564
1565			printk(KERN_WARNING "Returning to interrupt driven CPE handler\n");
1566			enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));
1567			cpe_poll_enabled = 0;
1568		}
1569
1570		if (cpe_poll_enabled)
1571			mod_timer(&cpe_poll_timer, jiffies + poll_time);
1572		start_count = -1;
1573	}
1574
1575	return IRQ_HANDLED;
1576}
1577
1578/*
1579 *  ia64_mca_cpe_poll
1580 *
1581 *	Poll for Corrected Platform Errors (CPEs), trigger interrupt
1582 *	on first cpu, from there it will trickle through all the cpus.
1583 *
1584 * Inputs   :   dummy(unused)
1585 * Outputs  :   None
1586 *
1587 */
1588static void
1589ia64_mca_cpe_poll (unsigned long dummy)
1590{
1591	/* Trigger a CPE interrupt cascade  */
1592	platform_send_ipi(first_cpu(cpu_online_map), IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
 
1593}
1594
1595#endif /* CONFIG_ACPI */
1596
1597static int
1598default_monarch_init_process(struct notifier_block *self, unsigned long val, void *data)
1599{
1600	int c;
1601	struct task_struct *g, *t;
1602	if (val != DIE_INIT_MONARCH_PROCESS)
1603		return NOTIFY_DONE;
1604#ifdef CONFIG_KEXEC
1605	if (atomic_read(&kdump_in_progress))
1606		return NOTIFY_DONE;
1607#endif
1608
1609	/*
1610	 * FIXME: mlogbuf will brim over with INIT stack dumps.
1611	 * To enable show_stack from INIT, we use oops_in_progress which should
1612	 * be used in real oops. This would cause something wrong after INIT.
1613	 */
1614	BREAK_LOGLEVEL(console_loglevel);
1615	ia64_mlogbuf_dump_from_init();
1616
1617	printk(KERN_ERR "Processes interrupted by INIT -");
1618	for_each_online_cpu(c) {
1619		struct ia64_sal_os_state *s;
1620		t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET);
1621		s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET);
1622		g = s->prev_task;
1623		if (g) {
1624			if (g->pid)
1625				printk(" %d", g->pid);
1626			else
1627				printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g);
1628		}
1629	}
1630	printk("\n\n");
1631	if (read_trylock(&tasklist_lock)) {
1632		do_each_thread (g, t) {
1633			printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
1634			show_stack(t, NULL);
1635		} while_each_thread (g, t);
1636		read_unlock(&tasklist_lock);
1637	}
1638	/* FIXME: This will not restore zapped printk locks. */
1639	RESTORE_LOGLEVEL(console_loglevel);
1640	return NOTIFY_DONE;
1641}
1642
1643/*
1644 * C portion of the OS INIT handler
1645 *
1646 * Called from ia64_os_init_dispatch
1647 *
1648 * Inputs: pointer to pt_regs where processor info was saved.  SAL/OS state for
1649 * this event.  This code is used for both monarch and slave INIT events, see
1650 * sos->monarch.
1651 *
1652 * All INIT events switch to the INIT stack and change the previous process to
1653 * blocked status.  If one of the INIT events is the monarch then we are
1654 * probably processing the nmi button/command.  Use the monarch cpu to dump all
1655 * the processes.  The slave INIT events all spin until the monarch cpu
1656 * returns.  We can also get INIT slave events for MCA, in which case the MCA
1657 * process is the monarch.
1658 */
1659
1660void
1661ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1662		  struct ia64_sal_os_state *sos)
1663{
1664	static atomic_t slaves;
1665	static atomic_t monarchs;
1666	struct task_struct *previous_current;
1667	int cpu = smp_processor_id();
1668	struct ia64_mca_notify_die nd =
1669		{ .sos = sos, .monarch_cpu = &monarch_cpu };
1670
1671	NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0);
1672
1673	mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
1674		sos->proc_state_param, cpu, sos->monarch);
1675	salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0);
1676
1677	previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT");
1678	sos->os_status = IA64_INIT_RESUME;
1679
1680	/* FIXME: Workaround for broken proms that drive all INIT events as
1681	 * slaves.  The last slave that enters is promoted to be a monarch.
1682	 * Remove this code in September 2006, that gives platforms a year to
1683	 * fix their proms and get their customers updated.
1684	 */
1685	if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) {
1686		mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n",
1687		        __func__, cpu);
1688		atomic_dec(&slaves);
1689		sos->monarch = 1;
1690	}
1691
1692	/* FIXME: Workaround for broken proms that drive all INIT events as
1693	 * monarchs.  Second and subsequent monarchs are demoted to slaves.
1694	 * Remove this code in September 2006, that gives platforms a year to
1695	 * fix their proms and get their customers updated.
1696	 */
1697	if (sos->monarch && atomic_add_return(1, &monarchs) > 1) {
1698		mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n",
1699			       __func__, cpu);
1700		atomic_dec(&monarchs);
1701		sos->monarch = 0;
1702	}
1703
1704	if (!sos->monarch) {
1705		ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
1706
1707#ifdef CONFIG_KEXEC
1708		while (monarch_cpu == -1 && !atomic_read(&kdump_in_progress))
1709			udelay(1000);
1710#else
1711		while (monarch_cpu == -1)
1712			cpu_relax();	/* spin until monarch enters */
1713#endif
1714
1715		NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1);
1716		NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1);
1717
1718#ifdef CONFIG_KEXEC
1719		while (monarch_cpu != -1 && !atomic_read(&kdump_in_progress))
1720			udelay(1000);
1721#else
1722		while (monarch_cpu != -1)
1723			cpu_relax();	/* spin until monarch leaves */
1724#endif
1725
1726		NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1);
1727
1728		mprintk("Slave on cpu %d returning to normal service.\n", cpu);
1729		set_curr_task(cpu, previous_current);
1730		ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1731		atomic_dec(&slaves);
1732		return;
1733	}
1734
1735	monarch_cpu = cpu;
1736	NOTIFY_INIT(DIE_INIT_MONARCH_ENTER, regs, (long)&nd, 1);
1737
1738	/*
1739	 * Wait for a bit.  On some machines (e.g., HP's zx2000 and zx6000, INIT can be
1740	 * generated via the BMC's command-line interface, but since the console is on the
1741	 * same serial line, the user will need some time to switch out of the BMC before
1742	 * the dump begins.
1743	 */
1744	mprintk("Delaying for 5 seconds...\n");
1745	udelay(5*1000000);
1746	ia64_wait_for_slaves(cpu, "INIT");
1747	/* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through
1748	 * to default_monarch_init_process() above and just print all the
1749	 * tasks.
1750	 */
1751	NOTIFY_INIT(DIE_INIT_MONARCH_PROCESS, regs, (long)&nd, 1);
1752	NOTIFY_INIT(DIE_INIT_MONARCH_LEAVE, regs, (long)&nd, 1);
1753
1754	mprintk("\nINIT dump complete.  Monarch on cpu %d returning to normal service.\n", cpu);
1755	atomic_dec(&monarchs);
1756	set_curr_task(cpu, previous_current);
1757	monarch_cpu = -1;
1758	return;
1759}
1760
1761static int __init
1762ia64_mca_disable_cpe_polling(char *str)
1763{
1764	cpe_poll_enabled = 0;
1765	return 1;
1766}
1767
1768__setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
1769
1770static struct irqaction cmci_irqaction = {
1771	.handler =	ia64_mca_cmc_int_handler,
1772	.flags =	IRQF_DISABLED,
1773	.name =		"cmc_hndlr"
1774};
1775
1776static struct irqaction cmcp_irqaction = {
1777	.handler =	ia64_mca_cmc_int_caller,
1778	.flags =	IRQF_DISABLED,
1779	.name =		"cmc_poll"
1780};
1781
1782static struct irqaction mca_rdzv_irqaction = {
1783	.handler =	ia64_mca_rendez_int_handler,
1784	.flags =	IRQF_DISABLED,
1785	.name =		"mca_rdzv"
1786};
1787
1788static struct irqaction mca_wkup_irqaction = {
1789	.handler =	ia64_mca_wakeup_int_handler,
1790	.flags =	IRQF_DISABLED,
1791	.name =		"mca_wkup"
1792};
1793
1794#ifdef CONFIG_ACPI
1795static struct irqaction mca_cpe_irqaction = {
1796	.handler =	ia64_mca_cpe_int_handler,
1797	.flags =	IRQF_DISABLED,
1798	.name =		"cpe_hndlr"
1799};
1800
1801static struct irqaction mca_cpep_irqaction = {
1802	.handler =	ia64_mca_cpe_int_caller,
1803	.flags =	IRQF_DISABLED,
1804	.name =		"cpe_poll"
1805};
1806#endif /* CONFIG_ACPI */
1807
1808/* Minimal format of the MCA/INIT stacks.  The pseudo processes that run on
1809 * these stacks can never sleep, they cannot return from the kernel to user
1810 * space, they do not appear in a normal ps listing.  So there is no need to
1811 * format most of the fields.
1812 */
1813
1814static void __cpuinit
1815format_mca_init_stack(void *mca_data, unsigned long offset,
1816		const char *type, int cpu)
1817{
1818	struct task_struct *p = (struct task_struct *)((char *)mca_data + offset);
1819	struct thread_info *ti;
1820	memset(p, 0, KERNEL_STACK_SIZE);
1821	ti = task_thread_info(p);
1822	ti->flags = _TIF_MCA_INIT;
1823	ti->preempt_count = 1;
1824	ti->task = p;
1825	ti->cpu = cpu;
1826	p->stack = ti;
1827	p->state = TASK_UNINTERRUPTIBLE;
1828	cpu_set(cpu, p->cpus_allowed);
1829	INIT_LIST_HEAD(&p->tasks);
1830	p->parent = p->real_parent = p->group_leader = p;
1831	INIT_LIST_HEAD(&p->children);
1832	INIT_LIST_HEAD(&p->sibling);
1833	strncpy(p->comm, type, sizeof(p->comm)-1);
1834}
1835
1836/* Caller prevents this from being called after init */
1837static void * __init_refok mca_bootmem(void)
1838{
1839	return __alloc_bootmem(sizeof(struct ia64_mca_cpu),
1840	                    KERNEL_STACK_SIZE, 0);
1841}
1842
1843/* Do per-CPU MCA-related initialization.  */
1844void __cpuinit
1845ia64_mca_cpu_init(void *cpu_data)
1846{
1847	void *pal_vaddr;
1848	void *data;
1849	long sz = sizeof(struct ia64_mca_cpu);
1850	int cpu = smp_processor_id();
1851	static int first_time = 1;
1852
1853	/*
1854	 * Structure will already be allocated if cpu has been online,
1855	 * then offlined.
1856	 */
1857	if (__per_cpu_mca[cpu]) {
1858		data = __va(__per_cpu_mca[cpu]);
1859	} else {
1860		if (first_time) {
1861			data = mca_bootmem();
1862			first_time = 0;
1863		} else
1864			data = (void *)__get_free_pages(GFP_KERNEL,
1865							get_order(sz));
1866		if (!data)
1867			panic("Could not allocate MCA memory for cpu %d\n",
1868					cpu);
1869	}
1870	format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, mca_stack),
1871		"MCA", cpu);
1872	format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack),
1873		"INIT", cpu);
1874	__get_cpu_var(ia64_mca_data) = __per_cpu_mca[cpu] = __pa(data);
1875
1876	/*
1877	 * Stash away a copy of the PTE needed to map the per-CPU page.
1878	 * We may need it during MCA recovery.
1879	 */
1880	__get_cpu_var(ia64_mca_per_cpu_pte) =
1881		pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL));
1882
1883	/*
1884	 * Also, stash away a copy of the PAL address and the PTE
1885	 * needed to map it.
1886	 */
1887	pal_vaddr = efi_get_pal_addr();
1888	if (!pal_vaddr)
1889		return;
1890	__get_cpu_var(ia64_mca_pal_base) =
1891		GRANULEROUNDDOWN((unsigned long) pal_vaddr);
1892	__get_cpu_var(ia64_mca_pal_pte) = pte_val(mk_pte_phys(__pa(pal_vaddr),
1893							      PAGE_KERNEL));
1894}
1895
1896static void __cpuinit ia64_mca_cmc_vector_adjust(void *dummy)
1897{
1898	unsigned long flags;
1899
1900	local_irq_save(flags);
1901	if (!cmc_polling_enabled)
1902		ia64_mca_cmc_vector_enable(NULL);
1903	local_irq_restore(flags);
 
1904}
1905
1906static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
1907				      unsigned long action,
1908				      void *hcpu)
1909{
1910	int hotcpu = (unsigned long) hcpu;
1911
1912	switch (action) {
1913	case CPU_ONLINE:
1914	case CPU_ONLINE_FROZEN:
1915		smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust,
1916					 NULL, 0);
1917		break;
1918	}
1919	return NOTIFY_OK;
1920}
1921
1922static struct notifier_block mca_cpu_notifier __cpuinitdata = {
1923	.notifier_call = mca_cpu_callback
1924};
1925
1926/*
1927 * ia64_mca_init
1928 *
1929 *  Do all the system level mca specific initialization.
1930 *
1931 *	1. Register spinloop and wakeup request interrupt vectors
1932 *
1933 *	2. Register OS_MCA handler entry point
1934 *
1935 *	3. Register OS_INIT handler entry point
1936 *
1937 *  4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
1938 *
1939 *  Note that this initialization is done very early before some kernel
1940 *  services are available.
1941 *
1942 *  Inputs  :   None
1943 *
1944 *  Outputs :   None
1945 */
1946void __init
1947ia64_mca_init(void)
1948{
1949	ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch;
1950	ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave;
1951	ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
1952	int i;
1953	long rc;
1954	struct ia64_sal_retval isrv;
1955	unsigned long timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */
1956	static struct notifier_block default_init_monarch_nb = {
1957		.notifier_call = default_monarch_init_process,
1958		.priority = 0/* we need to notified last */
1959	};
1960
1961	IA64_MCA_DEBUG("%s: begin\n", __func__);
1962
1963	/* Clear the Rendez checkin flag for all cpus */
1964	for(i = 0 ; i < NR_CPUS; i++)
1965		ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1966
1967	/*
1968	 * Register the rendezvous spinloop and wakeup mechanism with SAL
1969	 */
1970
1971	/* Register the rendezvous interrupt vector with SAL */
1972	while (1) {
1973		isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT,
1974					      SAL_MC_PARAM_MECHANISM_INT,
1975					      IA64_MCA_RENDEZ_VECTOR,
1976					      timeout,
1977					      SAL_MC_PARAM_RZ_ALWAYS);
1978		rc = isrv.status;
1979		if (rc == 0)
1980			break;
1981		if (rc == -2) {
1982			printk(KERN_INFO "Increasing MCA rendezvous timeout from "
1983				"%ld to %ld milliseconds\n", timeout, isrv.v0);
1984			timeout = isrv.v0;
1985			NOTIFY_MCA(DIE_MCA_NEW_TIMEOUT, NULL, timeout, 0);
1986			continue;
1987		}
1988		printk(KERN_ERR "Failed to register rendezvous interrupt "
1989		       "with SAL (status %ld)\n", rc);
1990		return;
1991	}
1992
1993	/* Register the wakeup interrupt vector with SAL */
1994	isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP,
1995				      SAL_MC_PARAM_MECHANISM_INT,
1996				      IA64_MCA_WAKEUP_VECTOR,
1997				      0, 0);
1998	rc = isrv.status;
1999	if (rc) {
2000		printk(KERN_ERR "Failed to register wakeup interrupt with SAL "
2001		       "(status %ld)\n", rc);
2002		return;
2003	}
2004
2005	IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __func__);
2006
2007	ia64_mc_info.imi_mca_handler        = ia64_tpa(mca_hldlr_ptr->fp);
2008	/*
2009	 * XXX - disable SAL checksum by setting size to 0; should be
2010	 *	ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
2011	 */
2012	ia64_mc_info.imi_mca_handler_size	= 0;
2013
2014	/* Register the os mca handler with SAL */
2015	if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
2016				       ia64_mc_info.imi_mca_handler,
2017				       ia64_tpa(mca_hldlr_ptr->gp),
2018				       ia64_mc_info.imi_mca_handler_size,
2019				       0, 0, 0)))
2020	{
2021		printk(KERN_ERR "Failed to register OS MCA handler with SAL "
2022		       "(status %ld)\n", rc);
2023		return;
2024	}
2025
2026	IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __func__,
2027		       ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
2028
2029	/*
2030	 * XXX - disable SAL checksum by setting size to 0, should be
2031	 * size of the actual init handler in mca_asm.S.
2032	 */
2033	ia64_mc_info.imi_monarch_init_handler		= ia64_tpa(init_hldlr_ptr_monarch->fp);
2034	ia64_mc_info.imi_monarch_init_handler_size	= 0;
2035	ia64_mc_info.imi_slave_init_handler		= ia64_tpa(init_hldlr_ptr_slave->fp);
2036	ia64_mc_info.imi_slave_init_handler_size	= 0;
2037
2038	IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __func__,
2039		       ia64_mc_info.imi_monarch_init_handler);
2040
2041	/* Register the os init handler with SAL */
2042	if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
2043				       ia64_mc_info.imi_monarch_init_handler,
2044				       ia64_tpa(ia64_getreg(_IA64_REG_GP)),
2045				       ia64_mc_info.imi_monarch_init_handler_size,
2046				       ia64_mc_info.imi_slave_init_handler,
2047				       ia64_tpa(ia64_getreg(_IA64_REG_GP)),
2048				       ia64_mc_info.imi_slave_init_handler_size)))
2049	{
2050		printk(KERN_ERR "Failed to register m/s INIT handlers with SAL "
2051		       "(status %ld)\n", rc);
2052		return;
2053	}
2054	if (register_die_notifier(&default_init_monarch_nb)) {
2055		printk(KERN_ERR "Failed to register default monarch INIT process\n");
2056		return;
2057	}
2058
2059	IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__);
2060
2061	/* Initialize the areas set aside by the OS to buffer the
2062	 * platform/processor error states for MCA/INIT/CMC
2063	 * handling.
2064	 */
2065	ia64_log_init(SAL_INFO_TYPE_MCA);
2066	ia64_log_init(SAL_INFO_TYPE_INIT);
2067	ia64_log_init(SAL_INFO_TYPE_CMC);
2068	ia64_log_init(SAL_INFO_TYPE_CPE);
2069
2070	mca_init = 1;
2071	printk(KERN_INFO "MCA related initialization done\n");
2072}
2073
 
2074/*
2075 * ia64_mca_late_init
2076 *
2077 *	Opportunity to setup things that require initialization later
2078 *	than ia64_mca_init.  Setup a timer to poll for CPEs if the
2079 *	platform doesn't support an interrupt driven mechanism.
2080 *
2081 *  Inputs  :   None
2082 *  Outputs :   Status
2083 */
2084static int __init
2085ia64_mca_late_init(void)
2086{
2087	if (!mca_init)
2088		return 0;
2089
2090	/*
2091	 *  Configure the CMCI/P vector and handler. Interrupts for CMC are
2092	 *  per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
2093	 */
2094	register_percpu_irq(IA64_CMC_VECTOR, &cmci_irqaction);
2095	register_percpu_irq(IA64_CMCP_VECTOR, &cmcp_irqaction);
 
 
2096	ia64_mca_cmc_vector_setup();       /* Setup vector on BSP */
2097
2098	/* Setup the MCA rendezvous interrupt vector */
2099	register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, &mca_rdzv_irqaction);
 
2100
2101	/* Setup the MCA wakeup interrupt vector */
2102	register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
 
2103
2104#ifdef CONFIG_ACPI
2105	/* Setup the CPEI/P handler */
2106	register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
2107#endif
 
2108
2109	register_hotcpu_notifier(&mca_cpu_notifier);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2110
2111	/* Setup the CMCI/P vector and handler */
2112	init_timer(&cmc_poll_timer);
2113	cmc_poll_timer.function = ia64_mca_cmc_poll;
2114
2115	/* Unmask/enable the vector */
2116	cmc_polling_enabled = 0;
2117	schedule_work(&cmc_enable_work);
2118
2119	IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__);
2120
2121#ifdef CONFIG_ACPI
2122	/* Setup the CPEI/P vector and handler */
2123	cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
2124	init_timer(&cpe_poll_timer);
2125	cpe_poll_timer.function = ia64_mca_cpe_poll;
2126
2127	{
2128		unsigned int irq;
2129
2130		if (cpe_vector >= 0) {
2131			/* If platform supports CPEI, enable the irq. */
2132			irq = local_vector_to_irq(cpe_vector);
2133			if (irq > 0) {
2134				cpe_poll_enabled = 0;
2135				irq_set_status_flags(irq, IRQ_PER_CPU);
2136				setup_irq(irq, &mca_cpe_irqaction);
 
 
2137				ia64_cpe_irq = irq;
2138				ia64_mca_register_cpev(cpe_vector);
2139				IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n",
2140					__func__);
2141				return 0;
2142			}
2143			printk(KERN_ERR "%s: Failed to find irq for CPE "
2144					"interrupt handler, vector %d\n",
2145					__func__, cpe_vector);
2146		}
2147		/* If platform doesn't support CPEI, get the timer going. */
2148		if (cpe_poll_enabled) {
2149			ia64_mca_cpe_poll(0UL);
2150			IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __func__);
2151		}
2152	}
2153#endif
2154
2155	return 0;
2156}
2157
2158device_initcall(ia64_mca_late_init);
v5.9
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * File:	mca.c
   4 * Purpose:	Generic MCA handling layer
   5 *
   6 * Copyright (C) 2003 Hewlett-Packard Co
   7 *	David Mosberger-Tang <davidm@hpl.hp.com>
   8 *
   9 * Copyright (C) 2002 Dell Inc.
  10 * Copyright (C) Matt Domsch <Matt_Domsch@dell.com>
  11 *
  12 * Copyright (C) 2002 Intel
  13 * Copyright (C) Jenna Hall <jenna.s.hall@intel.com>
  14 *
  15 * Copyright (C) 2001 Intel
  16 * Copyright (C) Fred Lewis <frederick.v.lewis@intel.com>
  17 *
  18 * Copyright (C) 2000 Intel
  19 * Copyright (C) Chuck Fleckenstein <cfleck@co.intel.com>
  20 *
  21 * Copyright (C) 1999, 2004-2008 Silicon Graphics, Inc.
  22 * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
  23 *
  24 * Copyright (C) 2006 FUJITSU LIMITED
  25 * Copyright (C) Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
  26 *
  27 * 2000-03-29 Chuck Fleckenstein <cfleck@co.intel.com>
  28 *	      Fixed PAL/SAL update issues, began MCA bug fixes, logging issues,
  29 *	      added min save state dump, added INIT handler.
  30 *
  31 * 2001-01-03 Fred Lewis <frederick.v.lewis@intel.com>
  32 *	      Added setup of CMCI and CPEI IRQs, logging of corrected platform
  33 *	      errors, completed code for logging of corrected & uncorrected
  34 *	      machine check errors, and updated for conformance with Nov. 2000
  35 *	      revision of the SAL 3.0 spec.
  36 *
  37 * 2002-01-04 Jenna Hall <jenna.s.hall@intel.com>
  38 *	      Aligned MCA stack to 16 bytes, added platform vs. CPU error flag,
  39 *	      set SAL default return values, changed error record structure to
  40 *	      linked list, added init call to sal_get_state_info_size().
  41 *
  42 * 2002-03-25 Matt Domsch <Matt_Domsch@dell.com>
  43 *	      GUID cleanups.
  44 *
  45 * 2003-04-15 David Mosberger-Tang <davidm@hpl.hp.com>
  46 *	      Added INIT backtrace support.
  47 *
  48 * 2003-12-08 Keith Owens <kaos@sgi.com>
  49 *	      smp_call_function() must not be called from interrupt context
  50 *	      (can deadlock on tasklist_lock).
  51 *	      Use keventd to call smp_call_function().
  52 *
  53 * 2004-02-01 Keith Owens <kaos@sgi.com>
  54 *	      Avoid deadlock when using printk() for MCA and INIT records.
  55 *	      Delete all record printing code, moved to salinfo_decode in user
  56 *	      space.  Mark variables and functions static where possible.
  57 *	      Delete dead variables and functions.  Reorder to remove the need
  58 *	      for forward declarations and to consolidate related code.
  59 *
  60 * 2005-08-12 Keith Owens <kaos@sgi.com>
  61 *	      Convert MCA/INIT handlers to use per event stacks and SAL/OS
  62 *	      state.
  63 *
  64 * 2005-10-07 Keith Owens <kaos@sgi.com>
  65 *	      Add notify_die() hooks.
  66 *
  67 * 2006-09-15 Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
  68 *	      Add printing support for MCA/INIT.
  69 *
  70 * 2007-04-27 Russ Anderson <rja@sgi.com>
  71 *	      Support multiple cpus going through OS_MCA in the same event.
  72 */
  73#include <linux/jiffies.h>
  74#include <linux/types.h>
  75#include <linux/init.h>
  76#include <linux/sched/signal.h>
  77#include <linux/sched/debug.h>
  78#include <linux/sched/task.h>
  79#include <linux/interrupt.h>
  80#include <linux/irq.h>
  81#include <linux/memblock.h>
  82#include <linux/acpi.h>
  83#include <linux/timer.h>
  84#include <linux/module.h>
  85#include <linux/kernel.h>
  86#include <linux/smp.h>
  87#include <linux/workqueue.h>
  88#include <linux/cpumask.h>
  89#include <linux/kdebug.h>
  90#include <linux/cpu.h>
  91#include <linux/gfp.h>
  92
  93#include <asm/delay.h>
 
  94#include <asm/meminit.h>
  95#include <asm/page.h>
  96#include <asm/ptrace.h>
 
  97#include <asm/sal.h>
  98#include <asm/mca.h>
  99#include <asm/kexec.h>
 100
 101#include <asm/irq.h>
 102#include <asm/hw_irq.h>
 103#include <asm/tlb.h>
 104
 105#include "mca_drv.h"
 106#include "entry.h"
 107#include "irq.h"
 108
 109#if defined(IA64_MCA_DEBUG_INFO)
 110# define IA64_MCA_DEBUG(fmt...)	printk(fmt)
 111#else
 112# define IA64_MCA_DEBUG(fmt...)
 113#endif
 114
 115#define NOTIFY_INIT(event, regs, arg, spin)				\
 116do {									\
 117	if ((notify_die((event), "INIT", (regs), (arg), 0, 0)		\
 118			== NOTIFY_STOP) && ((spin) == 1))		\
 119		ia64_mca_spin(__func__);				\
 120} while (0)
 121
 122#define NOTIFY_MCA(event, regs, arg, spin)				\
 123do {									\
 124	if ((notify_die((event), "MCA", (regs), (arg), 0, 0)		\
 125			== NOTIFY_STOP) && ((spin) == 1))		\
 126		ia64_mca_spin(__func__);				\
 127} while (0)
 128
 129/* Used by mca_asm.S */
 130DEFINE_PER_CPU(u64, ia64_mca_data); /* == __per_cpu_mca[smp_processor_id()] */
 131DEFINE_PER_CPU(u64, ia64_mca_per_cpu_pte); /* PTE to map per-CPU area */
 132DEFINE_PER_CPU(u64, ia64_mca_pal_pte);	    /* PTE to map PAL code */
 133DEFINE_PER_CPU(u64, ia64_mca_pal_base);    /* vaddr PAL code granule */
 134DEFINE_PER_CPU(u64, ia64_mca_tr_reload);   /* Flag for TR reload */
 135
 136unsigned long __per_cpu_mca[NR_CPUS];
 137
 138/* In mca_asm.S */
 139extern void			ia64_os_init_dispatch_monarch (void);
 140extern void			ia64_os_init_dispatch_slave (void);
 141
 142static int monarch_cpu = -1;
 143
 144static ia64_mc_info_t		ia64_mc_info;
 145
 146#define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
 147#define MIN_CPE_POLL_INTERVAL (2*60*HZ)  /* 2 minutes */
 148#define CMC_POLL_INTERVAL     (1*60*HZ)  /* 1 minute */
 149#define CPE_HISTORY_LENGTH    5
 150#define CMC_HISTORY_LENGTH    5
 151
 
 152static struct timer_list cpe_poll_timer;
 
 153static struct timer_list cmc_poll_timer;
 154/*
 155 * This variable tells whether we are currently in polling mode.
 156 * Start with this in the wrong state so we won't play w/ timers
 157 * before the system is ready.
 158 */
 159static int cmc_polling_enabled = 1;
 160
 161/*
 162 * Clearing this variable prevents CPE polling from getting activated
 163 * in mca_late_init.  Use it if your system doesn't provide a CPEI,
 164 * but encounters problems retrieving CPE logs.  This should only be
 165 * necessary for debugging.
 166 */
 167static int cpe_poll_enabled = 1;
 168
 169extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
 170
 171static int mca_init __initdata;
 172
 173/*
 174 * limited & delayed printing support for MCA/INIT handler
 175 */
 176
 177#define mprintk(fmt...) ia64_mca_printk(fmt)
 178
 179#define MLOGBUF_SIZE (512+256*NR_CPUS)
 180#define MLOGBUF_MSGMAX 256
 181static char mlogbuf[MLOGBUF_SIZE];
 182static DEFINE_SPINLOCK(mlogbuf_wlock);	/* mca context only */
 183static DEFINE_SPINLOCK(mlogbuf_rlock);	/* normal context only */
 184static unsigned long mlogbuf_start;
 185static unsigned long mlogbuf_end;
 186static unsigned int mlogbuf_finished = 0;
 187static unsigned long mlogbuf_timestamp = 0;
 188
 189static int loglevel_save = -1;
 190#define BREAK_LOGLEVEL(__console_loglevel)		\
 191	oops_in_progress = 1;				\
 192	if (loglevel_save < 0)				\
 193		loglevel_save = __console_loglevel;	\
 194	__console_loglevel = 15;
 195
 196#define RESTORE_LOGLEVEL(__console_loglevel)		\
 197	if (loglevel_save >= 0) {			\
 198		__console_loglevel = loglevel_save;	\
 199		loglevel_save = -1;			\
 200	}						\
 201	mlogbuf_finished = 0;				\
 202	oops_in_progress = 0;
 203
 204/*
 205 * Push messages into buffer, print them later if not urgent.
 206 */
 207void ia64_mca_printk(const char *fmt, ...)
 208{
 209	va_list args;
 210	int printed_len;
 211	char temp_buf[MLOGBUF_MSGMAX];
 212	char *p;
 213
 214	va_start(args, fmt);
 215	printed_len = vscnprintf(temp_buf, sizeof(temp_buf), fmt, args);
 216	va_end(args);
 217
 218	/* Copy the output into mlogbuf */
 219	if (oops_in_progress) {
 220		/* mlogbuf was abandoned, use printk directly instead. */
 221		printk("%s", temp_buf);
 222	} else {
 223		spin_lock(&mlogbuf_wlock);
 224		for (p = temp_buf; *p; p++) {
 225			unsigned long next = (mlogbuf_end + 1) % MLOGBUF_SIZE;
 226			if (next != mlogbuf_start) {
 227				mlogbuf[mlogbuf_end] = *p;
 228				mlogbuf_end = next;
 229			} else {
 230				/* buffer full */
 231				break;
 232			}
 233		}
 234		mlogbuf[mlogbuf_end] = '\0';
 235		spin_unlock(&mlogbuf_wlock);
 236	}
 237}
 238EXPORT_SYMBOL(ia64_mca_printk);
 239
 240/*
 241 * Print buffered messages.
 242 *  NOTE: call this after returning normal context. (ex. from salinfod)
 243 */
 244void ia64_mlogbuf_dump(void)
 245{
 246	char temp_buf[MLOGBUF_MSGMAX];
 247	char *p;
 248	unsigned long index;
 249	unsigned long flags;
 250	unsigned int printed_len;
 251
 252	/* Get output from mlogbuf */
 253	while (mlogbuf_start != mlogbuf_end) {
 254		temp_buf[0] = '\0';
 255		p = temp_buf;
 256		printed_len = 0;
 257
 258		spin_lock_irqsave(&mlogbuf_rlock, flags);
 259
 260		index = mlogbuf_start;
 261		while (index != mlogbuf_end) {
 262			*p = mlogbuf[index];
 263			index = (index + 1) % MLOGBUF_SIZE;
 264			if (!*p)
 265				break;
 266			p++;
 267			if (++printed_len >= MLOGBUF_MSGMAX - 1)
 268				break;
 269		}
 270		*p = '\0';
 271		if (temp_buf[0])
 272			printk("%s", temp_buf);
 273		mlogbuf_start = index;
 274
 275		mlogbuf_timestamp = 0;
 276		spin_unlock_irqrestore(&mlogbuf_rlock, flags);
 277	}
 278}
 279EXPORT_SYMBOL(ia64_mlogbuf_dump);
 280
 281/*
 282 * Call this if system is going to down or if immediate flushing messages to
 283 * console is required. (ex. recovery was failed, crash dump is going to be
 284 * invoked, long-wait rendezvous etc.)
 285 *  NOTE: this should be called from monarch.
 286 */
 287static void ia64_mlogbuf_finish(int wait)
 288{
 289	BREAK_LOGLEVEL(console_loglevel);
 290
 291	spin_lock_init(&mlogbuf_rlock);
 292	ia64_mlogbuf_dump();
 293	printk(KERN_EMERG "mlogbuf_finish: printing switched to urgent mode, "
 294		"MCA/INIT might be dodgy or fail.\n");
 295
 296	if (!wait)
 297		return;
 298
 299	/* wait for console */
 300	printk("Delaying for 5 seconds...\n");
 301	udelay(5*1000000);
 302
 303	mlogbuf_finished = 1;
 304}
 305
 306/*
 307 * Print buffered messages from INIT context.
 308 */
 309static void ia64_mlogbuf_dump_from_init(void)
 310{
 311	if (mlogbuf_finished)
 312		return;
 313
 314	if (mlogbuf_timestamp &&
 315			time_before(jiffies, mlogbuf_timestamp + 30 * HZ)) {
 316		printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT "
 317			" and the system seems to be messed up.\n");
 318		ia64_mlogbuf_finish(0);
 319		return;
 320	}
 321
 322	if (!spin_trylock(&mlogbuf_rlock)) {
 323		printk(KERN_ERR "INIT: mlogbuf_dump is interrupted by INIT. "
 324			"Generated messages other than stack dump will be "
 325			"buffered to mlogbuf and will be printed later.\n");
 326		printk(KERN_ERR "INIT: If messages would not printed after "
 327			"this INIT, wait 30sec and assert INIT again.\n");
 328		if (!mlogbuf_timestamp)
 329			mlogbuf_timestamp = jiffies;
 330		return;
 331	}
 332	spin_unlock(&mlogbuf_rlock);
 333	ia64_mlogbuf_dump();
 334}
 335
 336static inline void
 337ia64_mca_spin(const char *func)
 338{
 339	if (monarch_cpu == smp_processor_id())
 340		ia64_mlogbuf_finish(0);
 341	mprintk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func);
 342	while (1)
 343		cpu_relax();
 344}
 345/*
 346 * IA64_MCA log support
 347 */
 348#define IA64_MAX_LOGS		2	/* Double-buffering for nested MCAs */
 349#define IA64_MAX_LOG_TYPES      4   /* MCA, INIT, CMC, CPE */
 350
 351typedef struct ia64_state_log_s
 352{
 353	spinlock_t	isl_lock;
 354	int		isl_index;
 355	unsigned long	isl_count;
 356	ia64_err_rec_t  *isl_log[IA64_MAX_LOGS]; /* need space to store header + error log */
 357} ia64_state_log_t;
 358
 359static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES];
 360
 
 
 
 
 
 361#define IA64_LOG_LOCK_INIT(it) spin_lock_init(&ia64_state_log[it].isl_lock)
 362#define IA64_LOG_LOCK(it)      spin_lock_irqsave(&ia64_state_log[it].isl_lock, s)
 363#define IA64_LOG_UNLOCK(it)    spin_unlock_irqrestore(&ia64_state_log[it].isl_lock,s)
 364#define IA64_LOG_NEXT_INDEX(it)    ia64_state_log[it].isl_index
 365#define IA64_LOG_CURR_INDEX(it)    1 - ia64_state_log[it].isl_index
 366#define IA64_LOG_INDEX_INC(it) \
 367    {ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index; \
 368    ia64_state_log[it].isl_count++;}
 369#define IA64_LOG_INDEX_DEC(it) \
 370    ia64_state_log[it].isl_index = 1 - ia64_state_log[it].isl_index
 371#define IA64_LOG_NEXT_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)]))
 372#define IA64_LOG_CURR_BUFFER(it)   (void *)((ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)]))
 373#define IA64_LOG_COUNT(it)         ia64_state_log[it].isl_count
 374
 375static inline void ia64_log_allocate(int it, u64 size)
 376{
 377	ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)] =
 378		(ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);
 379	if (!ia64_state_log[it].isl_log[IA64_LOG_CURR_INDEX(it)])
 380		panic("%s: Failed to allocate %llu bytes\n", __func__, size);
 381
 382	ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)] =
 383		(ia64_err_rec_t *)memblock_alloc(size, SMP_CACHE_BYTES);
 384	if (!ia64_state_log[it].isl_log[IA64_LOG_NEXT_INDEX(it)])
 385		panic("%s: Failed to allocate %llu bytes\n", __func__, size);
 386}
 387
 388/*
 389 * ia64_log_init
 390 *	Reset the OS ia64 log buffer
 391 * Inputs   :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
 392 * Outputs	:	None
 393 */
 394static void __init
 395ia64_log_init(int sal_info_type)
 396{
 397	u64	max_size = 0;
 398
 399	IA64_LOG_NEXT_INDEX(sal_info_type) = 0;
 400	IA64_LOG_LOCK_INIT(sal_info_type);
 401
 402	// SAL will tell us the maximum size of any error record of this type
 403	max_size = ia64_sal_get_state_info_size(sal_info_type);
 404	if (!max_size)
 405		/* alloc_bootmem() doesn't like zero-sized allocations! */
 406		return;
 407
 408	// set up OS data structures to hold error info
 409	ia64_log_allocate(sal_info_type, max_size);
 
 
 410}
 411
 412/*
 413 * ia64_log_get
 414 *
 415 *	Get the current MCA log from SAL and copy it into the OS log buffer.
 416 *
 417 *  Inputs  :   info_type   (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE})
 418 *              irq_safe    whether you can use printk at this point
 419 *  Outputs :   size        (total record length)
 420 *              *buffer     (ptr to error record)
 421 *
 422 */
 423static u64
 424ia64_log_get(int sal_info_type, u8 **buffer, int irq_safe)
 425{
 426	sal_log_record_header_t     *log_buffer;
 427	u64                         total_len = 0;
 428	unsigned long               s;
 429
 430	IA64_LOG_LOCK(sal_info_type);
 431
 432	/* Get the process state information */
 433	log_buffer = IA64_LOG_NEXT_BUFFER(sal_info_type);
 434
 435	total_len = ia64_sal_get_state_info(sal_info_type, (u64 *)log_buffer);
 436
 437	if (total_len) {
 438		IA64_LOG_INDEX_INC(sal_info_type);
 439		IA64_LOG_UNLOCK(sal_info_type);
 440		if (irq_safe) {
 441			IA64_MCA_DEBUG("%s: SAL error record type %d retrieved. Record length = %ld\n",
 442				       __func__, sal_info_type, total_len);
 443		}
 444		*buffer = (u8 *) log_buffer;
 445		return total_len;
 446	} else {
 447		IA64_LOG_UNLOCK(sal_info_type);
 448		return 0;
 449	}
 450}
 451
 452/*
 453 *  ia64_mca_log_sal_error_record
 454 *
 455 *  This function retrieves a specified error record type from SAL
 456 *  and wakes up any processes waiting for error records.
 457 *
 458 *  Inputs  :   sal_info_type   (Type of error record MCA/CMC/CPE)
 459 *              FIXME: remove MCA and irq_safe.
 460 */
 461static void
 462ia64_mca_log_sal_error_record(int sal_info_type)
 463{
 464	u8 *buffer;
 465	sal_log_record_header_t *rh;
 466	u64 size;
 467	int irq_safe = sal_info_type != SAL_INFO_TYPE_MCA;
 468#ifdef IA64_MCA_DEBUG_INFO
 469	static const char * const rec_name[] = { "MCA", "INIT", "CMC", "CPE" };
 470#endif
 471
 472	size = ia64_log_get(sal_info_type, &buffer, irq_safe);
 473	if (!size)
 474		return;
 475
 476	salinfo_log_wakeup(sal_info_type, buffer, size, irq_safe);
 477
 478	if (irq_safe)
 479		IA64_MCA_DEBUG("CPU %d: SAL log contains %s error record\n",
 480			smp_processor_id(),
 481			sal_info_type < ARRAY_SIZE(rec_name) ? rec_name[sal_info_type] : "UNKNOWN");
 482
 483	/* Clear logs from corrected errors in case there's no user-level logger */
 484	rh = (sal_log_record_header_t *)buffer;
 485	if (rh->severity == sal_log_severity_corrected)
 486		ia64_sal_clear_state_info(sal_info_type);
 487}
 488
 489/*
 490 * search_mca_table
 491 *  See if the MCA surfaced in an instruction range
 492 *  that has been tagged as recoverable.
 493 *
 494 *  Inputs
 495 *	first	First address range to check
 496 *	last	Last address range to check
 497 *	ip	Instruction pointer, address we are looking for
 498 *
 499 * Return value:
 500 *      1 on Success (in the table)/ 0 on Failure (not in the  table)
 501 */
 502int
 503search_mca_table (const struct mca_table_entry *first,
 504                const struct mca_table_entry *last,
 505                unsigned long ip)
 506{
 507        const struct mca_table_entry *curr;
 508        u64 curr_start, curr_end;
 509
 510        curr = first;
 511        while (curr <= last) {
 512                curr_start = (u64) &curr->start_addr + curr->start_addr;
 513                curr_end = (u64) &curr->end_addr + curr->end_addr;
 514
 515                if ((ip >= curr_start) && (ip <= curr_end)) {
 516                        return 1;
 517                }
 518                curr++;
 519        }
 520        return 0;
 521}
 522
 523/* Given an address, look for it in the mca tables. */
 524int mca_recover_range(unsigned long addr)
 525{
 526	extern struct mca_table_entry __start___mca_table[];
 527	extern struct mca_table_entry __stop___mca_table[];
 528
 529	return search_mca_table(__start___mca_table, __stop___mca_table-1, addr);
 530}
 531EXPORT_SYMBOL_GPL(mca_recover_range);
 532
 
 
 533int cpe_vector = -1;
 534int ia64_cpe_irq = -1;
 535
 536static irqreturn_t
 537ia64_mca_cpe_int_handler (int cpe_irq, void *arg)
 538{
 539	static unsigned long	cpe_history[CPE_HISTORY_LENGTH];
 540	static int		index;
 541	static DEFINE_SPINLOCK(cpe_history_lock);
 542
 543	IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
 544		       __func__, cpe_irq, smp_processor_id());
 545
 546	/* SAL spec states this should run w/ interrupts enabled */
 547	local_irq_enable();
 548
 549	spin_lock(&cpe_history_lock);
 550	if (!cpe_poll_enabled && cpe_vector >= 0) {
 551
 552		int i, count = 1; /* we know 1 happened now */
 553		unsigned long now = jiffies;
 554
 555		for (i = 0; i < CPE_HISTORY_LENGTH; i++) {
 556			if (now - cpe_history[i] <= HZ)
 557				count++;
 558		}
 559
 560		IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH);
 561		if (count >= CPE_HISTORY_LENGTH) {
 562
 563			cpe_poll_enabled = 1;
 564			spin_unlock(&cpe_history_lock);
 565			disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));
 566
 567			/*
 568			 * Corrected errors will still be corrected, but
 569			 * make sure there's a log somewhere that indicates
 570			 * something is generating more than we can handle.
 571			 */
 572			printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n");
 573
 574			mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);
 575
 576			/* lock already released, get out now */
 577			goto out;
 578		} else {
 579			cpe_history[index++] = now;
 580			if (index == CPE_HISTORY_LENGTH)
 581				index = 0;
 582		}
 583	}
 584	spin_unlock(&cpe_history_lock);
 585out:
 586	/* Get the CPE error record and log it */
 587	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
 588
 589	local_irq_disable();
 590
 591	return IRQ_HANDLED;
 592}
 593
 
 
 
 594/*
 595 * ia64_mca_register_cpev
 596 *
 597 *  Register the corrected platform error vector with SAL.
 598 *
 599 *  Inputs
 600 *      cpev        Corrected Platform Error Vector number
 601 *
 602 *  Outputs
 603 *      None
 604 */
 605void
 606ia64_mca_register_cpev (int cpev)
 607{
 608	/* Register the CPE interrupt vector with SAL */
 609	struct ia64_sal_retval isrv;
 610
 611	isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_CPE_INT, SAL_MC_PARAM_MECHANISM_INT, cpev, 0, 0);
 612	if (isrv.status) {
 613		printk(KERN_ERR "Failed to register Corrected Platform "
 614		       "Error interrupt vector with SAL (status %ld)\n", isrv.status);
 615		return;
 616	}
 617
 618	IA64_MCA_DEBUG("%s: corrected platform error "
 619		       "vector %#x registered\n", __func__, cpev);
 620}
 
 621
 622/*
 623 * ia64_mca_cmc_vector_setup
 624 *
 625 *  Setup the corrected machine check vector register in the processor.
 626 *  (The interrupt is masked on boot. ia64_mca_late_init unmask this.)
 627 *  This function is invoked on a per-processor basis.
 628 *
 629 * Inputs
 630 *      None
 631 *
 632 * Outputs
 633 *	None
 634 */
 635void
 636ia64_mca_cmc_vector_setup (void)
 637{
 638	cmcv_reg_t	cmcv;
 639
 640	cmcv.cmcv_regval	= 0;
 641	cmcv.cmcv_mask		= 1;        /* Mask/disable interrupt at first */
 642	cmcv.cmcv_vector	= IA64_CMC_VECTOR;
 643	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
 644
 645	IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x registered.\n",
 646		       __func__, smp_processor_id(), IA64_CMC_VECTOR);
 647
 648	IA64_MCA_DEBUG("%s: CPU %d CMCV = %#016lx\n",
 649		       __func__, smp_processor_id(), ia64_getreg(_IA64_REG_CR_CMCV));
 650}
 651
 652/*
 653 * ia64_mca_cmc_vector_disable
 654 *
 655 *  Mask the corrected machine check vector register in the processor.
 656 *  This function is invoked on a per-processor basis.
 657 *
 658 * Inputs
 659 *      dummy(unused)
 660 *
 661 * Outputs
 662 *	None
 663 */
 664static void
 665ia64_mca_cmc_vector_disable (void *dummy)
 666{
 667	cmcv_reg_t	cmcv;
 668
 669	cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
 670
 671	cmcv.cmcv_mask = 1; /* Mask/disable interrupt */
 672	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
 673
 674	IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x disabled.\n",
 675		       __func__, smp_processor_id(), cmcv.cmcv_vector);
 676}
 677
 678/*
 679 * ia64_mca_cmc_vector_enable
 680 *
 681 *  Unmask the corrected machine check vector register in the processor.
 682 *  This function is invoked on a per-processor basis.
 683 *
 684 * Inputs
 685 *      dummy(unused)
 686 *
 687 * Outputs
 688 *	None
 689 */
 690static void
 691ia64_mca_cmc_vector_enable (void *dummy)
 692{
 693	cmcv_reg_t	cmcv;
 694
 695	cmcv.cmcv_regval = ia64_getreg(_IA64_REG_CR_CMCV);
 696
 697	cmcv.cmcv_mask = 0; /* Unmask/enable interrupt */
 698	ia64_setreg(_IA64_REG_CR_CMCV, cmcv.cmcv_regval);
 699
 700	IA64_MCA_DEBUG("%s: CPU %d corrected machine check vector %#x enabled.\n",
 701		       __func__, smp_processor_id(), cmcv.cmcv_vector);
 702}
 703
 704/*
 705 * ia64_mca_cmc_vector_disable_keventd
 706 *
 707 * Called via keventd (smp_call_function() is not safe in interrupt context) to
 708 * disable the cmc interrupt vector.
 709 */
 710static void
 711ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused)
 712{
 713	on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0);
 714}
 715
 716/*
 717 * ia64_mca_cmc_vector_enable_keventd
 718 *
 719 * Called via keventd (smp_call_function() is not safe in interrupt context) to
 720 * enable the cmc interrupt vector.
 721 */
 722static void
 723ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
 724{
 725	on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0);
 726}
 727
 728/*
 729 * ia64_mca_wakeup
 730 *
 731 *	Send an inter-cpu interrupt to wake-up a particular cpu.
 732 *
 733 *  Inputs  :   cpuid
 734 *  Outputs :   None
 735 */
 736static void
 737ia64_mca_wakeup(int cpu)
 738{
 739	ia64_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
 740}
 741
 742/*
 743 * ia64_mca_wakeup_all
 744 *
 745 *	Wakeup all the slave cpus which have rendez'ed previously.
 746 *
 747 *  Inputs  :   None
 748 *  Outputs :   None
 749 */
 750static void
 751ia64_mca_wakeup_all(void)
 752{
 753	int cpu;
 754
 755	/* Clear the Rendez checkin flag for all cpus */
 756	for_each_online_cpu(cpu) {
 757		if (ia64_mc_info.imi_rendez_checkin[cpu] == IA64_MCA_RENDEZ_CHECKIN_DONE)
 758			ia64_mca_wakeup(cpu);
 759	}
 760
 761}
 762
 763/*
 764 * ia64_mca_rendez_interrupt_handler
 765 *
 766 *	This is handler used to put slave processors into spinloop
 767 *	while the monarch processor does the mca handling and later
 768 *	wake each slave up once the monarch is done.  The state
 769 *	IA64_MCA_RENDEZ_CHECKIN_DONE indicates the cpu is rendez'ed
 770 *	in SAL.  The state IA64_MCA_RENDEZ_CHECKIN_NOTDONE indicates
 771 *	the cpu has come out of OS rendezvous.
 772 *
 773 *  Inputs  :   None
 774 *  Outputs :   None
 775 */
 776static irqreturn_t
 777ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
 778{
 779	unsigned long flags;
 780	int cpu = smp_processor_id();
 781	struct ia64_mca_notify_die nd =
 782		{ .sos = NULL, .monarch_cpu = &monarch_cpu };
 783
 784	/* Mask all interrupts */
 785	local_irq_save(flags);
 786
 787	NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1);
 788
 789	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
 790	/* Register with the SAL monarch that the slave has
 791	 * reached SAL
 792	 */
 793	ia64_sal_mc_rendez();
 794
 795	NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1);
 796
 797	/* Wait for the monarch cpu to exit. */
 798	while (monarch_cpu != -1)
 799	       cpu_relax();	/* spin until monarch leaves */
 800
 801	NOTIFY_MCA(DIE_MCA_RENDZVOUS_LEAVE, get_irq_regs(), (long)&nd, 1);
 802
 803	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
 804	/* Enable all interrupts */
 805	local_irq_restore(flags);
 806	return IRQ_HANDLED;
 807}
 808
 809/*
 810 * ia64_mca_wakeup_int_handler
 811 *
 812 *	The interrupt handler for processing the inter-cpu interrupt to the
 813 *	slave cpu which was spinning in the rendez loop.
 814 *	Since this spinning is done by turning off the interrupts and
 815 *	polling on the wakeup-interrupt bit in the IRR, there is
 816 *	nothing useful to be done in the handler.
 817 *
 818 *  Inputs  :   wakeup_irq  (Wakeup-interrupt bit)
 819 *	arg		(Interrupt handler specific argument)
 820 *  Outputs :   None
 821 *
 822 */
 823static irqreturn_t
 824ia64_mca_wakeup_int_handler(int wakeup_irq, void *arg)
 825{
 826	return IRQ_HANDLED;
 827}
 828
 829/* Function pointer for extra MCA recovery */
 830int (*ia64_mca_ucmc_extension)
 831	(void*,struct ia64_sal_os_state*)
 832	= NULL;
 833
 834int
 835ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *))
 836{
 837	if (ia64_mca_ucmc_extension)
 838		return 1;
 839
 840	ia64_mca_ucmc_extension = fn;
 841	return 0;
 842}
 843
 844void
 845ia64_unreg_MCA_extension(void)
 846{
 847	if (ia64_mca_ucmc_extension)
 848		ia64_mca_ucmc_extension = NULL;
 849}
 850
 851EXPORT_SYMBOL(ia64_reg_MCA_extension);
 852EXPORT_SYMBOL(ia64_unreg_MCA_extension);
 853
 854
 855static inline void
 856copy_reg(const u64 *fr, u64 fnat, unsigned long *tr, unsigned long *tnat)
 857{
 858	u64 fslot, tslot, nat;
 859	*tr = *fr;
 860	fslot = ((unsigned long)fr >> 3) & 63;
 861	tslot = ((unsigned long)tr >> 3) & 63;
 862	*tnat &= ~(1UL << tslot);
 863	nat = (fnat >> fslot) & 1;
 864	*tnat |= (nat << tslot);
 865}
 866
 867/* Change the comm field on the MCA/INT task to include the pid that
 868 * was interrupted, it makes for easier debugging.  If that pid was 0
 869 * (swapper or nested MCA/INIT) then use the start of the previous comm
 870 * field suffixed with its cpu.
 871 */
 872
 873static void
 874ia64_mca_modify_comm(const struct task_struct *previous_current)
 875{
 876	char *p, comm[sizeof(current->comm)];
 877	if (previous_current->pid)
 878		snprintf(comm, sizeof(comm), "%s %d",
 879			current->comm, previous_current->pid);
 880	else {
 881		int l;
 882		if ((p = strchr(previous_current->comm, ' ')))
 883			l = p - previous_current->comm;
 884		else
 885			l = strlen(previous_current->comm);
 886		snprintf(comm, sizeof(comm), "%s %*s %d",
 887			current->comm, l, previous_current->comm,
 888			task_thread_info(previous_current)->cpu);
 889	}
 890	memcpy(current->comm, comm, sizeof(current->comm));
 891}
 892
 893static void
 894finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos,
 895		unsigned long *nat)
 896{
 897	const pal_min_state_area_t *ms = sos->pal_min_state;
 898	const u64 *bank;
 899
 900	/* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
 901	 * pmsa_{xip,xpsr,xfs}
 902	 */
 903	if (ia64_psr(regs)->ic) {
 904		regs->cr_iip = ms->pmsa_iip;
 905		regs->cr_ipsr = ms->pmsa_ipsr;
 906		regs->cr_ifs = ms->pmsa_ifs;
 907	} else {
 908		regs->cr_iip = ms->pmsa_xip;
 909		regs->cr_ipsr = ms->pmsa_xpsr;
 910		regs->cr_ifs = ms->pmsa_xfs;
 911
 912		sos->iip = ms->pmsa_iip;
 913		sos->ipsr = ms->pmsa_ipsr;
 914		sos->ifs = ms->pmsa_ifs;
 915	}
 916	regs->pr = ms->pmsa_pr;
 917	regs->b0 = ms->pmsa_br0;
 918	regs->ar_rsc = ms->pmsa_rsc;
 919	copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &regs->r1, nat);
 920	copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &regs->r2, nat);
 921	copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &regs->r3, nat);
 922	copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &regs->r8, nat);
 923	copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &regs->r9, nat);
 924	copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &regs->r10, nat);
 925	copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &regs->r11, nat);
 926	copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &regs->r12, nat);
 927	copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &regs->r13, nat);
 928	copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &regs->r14, nat);
 929	copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &regs->r15, nat);
 930	if (ia64_psr(regs)->bn)
 931		bank = ms->pmsa_bank1_gr;
 932	else
 933		bank = ms->pmsa_bank0_gr;
 934	copy_reg(&bank[16-16], ms->pmsa_nat_bits, &regs->r16, nat);
 935	copy_reg(&bank[17-16], ms->pmsa_nat_bits, &regs->r17, nat);
 936	copy_reg(&bank[18-16], ms->pmsa_nat_bits, &regs->r18, nat);
 937	copy_reg(&bank[19-16], ms->pmsa_nat_bits, &regs->r19, nat);
 938	copy_reg(&bank[20-16], ms->pmsa_nat_bits, &regs->r20, nat);
 939	copy_reg(&bank[21-16], ms->pmsa_nat_bits, &regs->r21, nat);
 940	copy_reg(&bank[22-16], ms->pmsa_nat_bits, &regs->r22, nat);
 941	copy_reg(&bank[23-16], ms->pmsa_nat_bits, &regs->r23, nat);
 942	copy_reg(&bank[24-16], ms->pmsa_nat_bits, &regs->r24, nat);
 943	copy_reg(&bank[25-16], ms->pmsa_nat_bits, &regs->r25, nat);
 944	copy_reg(&bank[26-16], ms->pmsa_nat_bits, &regs->r26, nat);
 945	copy_reg(&bank[27-16], ms->pmsa_nat_bits, &regs->r27, nat);
 946	copy_reg(&bank[28-16], ms->pmsa_nat_bits, &regs->r28, nat);
 947	copy_reg(&bank[29-16], ms->pmsa_nat_bits, &regs->r29, nat);
 948	copy_reg(&bank[30-16], ms->pmsa_nat_bits, &regs->r30, nat);
 949	copy_reg(&bank[31-16], ms->pmsa_nat_bits, &regs->r31, nat);
 950}
 951
 952/* On entry to this routine, we are running on the per cpu stack, see
 953 * mca_asm.h.  The original stack has not been touched by this event.  Some of
 954 * the original stack's registers will be in the RBS on this stack.  This stack
 955 * also contains a partial pt_regs and switch_stack, the rest of the data is in
 956 * PAL minstate.
 957 *
 958 * The first thing to do is modify the original stack to look like a blocked
 959 * task so we can run backtrace on the original task.  Also mark the per cpu
 960 * stack as current to ensure that we use the correct task state, it also means
 961 * that we can do backtrace on the MCA/INIT handler code itself.
 962 */
 963
 964static struct task_struct *
 965ia64_mca_modify_original_stack(struct pt_regs *regs,
 966		const struct switch_stack *sw,
 967		struct ia64_sal_os_state *sos,
 968		const char *type)
 969{
 970	char *p;
 971	ia64_va va;
 972	extern char ia64_leave_kernel[];	/* Need asm address, not function descriptor */
 973	const pal_min_state_area_t *ms = sos->pal_min_state;
 974	struct task_struct *previous_current;
 975	struct pt_regs *old_regs;
 976	struct switch_stack *old_sw;
 977	unsigned size = sizeof(struct pt_regs) +
 978			sizeof(struct switch_stack) + 16;
 979	unsigned long *old_bspstore, *old_bsp;
 980	unsigned long *new_bspstore, *new_bsp;
 981	unsigned long old_unat, old_rnat, new_rnat, nat;
 982	u64 slots, loadrs = regs->loadrs;
 983	u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1];
 984	u64 ar_bspstore = regs->ar_bspstore;
 985	u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16);
 986	const char *msg;
 987	int cpu = smp_processor_id();
 988
 989	previous_current = curr_task(cpu);
 990	ia64_set_curr_task(cpu, current);
 991	if ((p = strchr(current->comm, ' ')))
 992		*p = '\0';
 993
 994	/* Best effort attempt to cope with MCA/INIT delivered while in
 995	 * physical mode.
 996	 */
 997	regs->cr_ipsr = ms->pmsa_ipsr;
 998	if (ia64_psr(regs)->dt == 0) {
 999		va.l = r12;
1000		if (va.f.reg == 0) {
1001			va.f.reg = 7;
1002			r12 = va.l;
1003		}
1004		va.l = r13;
1005		if (va.f.reg == 0) {
1006			va.f.reg = 7;
1007			r13 = va.l;
1008		}
1009	}
1010	if (ia64_psr(regs)->rt == 0) {
1011		va.l = ar_bspstore;
1012		if (va.f.reg == 0) {
1013			va.f.reg = 7;
1014			ar_bspstore = va.l;
1015		}
1016		va.l = ar_bsp;
1017		if (va.f.reg == 0) {
1018			va.f.reg = 7;
1019			ar_bsp = va.l;
1020		}
1021	}
1022
1023	/* mca_asm.S ia64_old_stack() cannot assume that the dirty registers
1024	 * have been copied to the old stack, the old stack may fail the
1025	 * validation tests below.  So ia64_old_stack() must restore the dirty
1026	 * registers from the new stack.  The old and new bspstore probably
1027	 * have different alignments, so loadrs calculated on the old bsp
1028	 * cannot be used to restore from the new bsp.  Calculate a suitable
1029	 * loadrs for the new stack and save it in the new pt_regs, where
1030	 * ia64_old_stack() can get it.
1031	 */
1032	old_bspstore = (unsigned long *)ar_bspstore;
1033	old_bsp = (unsigned long *)ar_bsp;
1034	slots = ia64_rse_num_regs(old_bspstore, old_bsp);
1035	new_bspstore = (unsigned long *)((u64)current + IA64_RBS_OFFSET);
1036	new_bsp = ia64_rse_skip_regs(new_bspstore, slots);
1037	regs->loadrs = (new_bsp - new_bspstore) * 8 << 16;
1038
1039	/* Verify the previous stack state before we change it */
1040	if (user_mode(regs)) {
1041		msg = "occurred in user space";
1042		/* previous_current is guaranteed to be valid when the task was
1043		 * in user space, so ...
1044		 */
1045		ia64_mca_modify_comm(previous_current);
1046		goto no_mod;
1047	}
1048
1049	if (r13 != sos->prev_IA64_KR_CURRENT) {
1050		msg = "inconsistent previous current and r13";
1051		goto no_mod;
1052	}
1053
1054	if (!mca_recover_range(ms->pmsa_iip)) {
1055		if ((r12 - r13) >= KERNEL_STACK_SIZE) {
1056			msg = "inconsistent r12 and r13";
1057			goto no_mod;
1058		}
1059		if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) {
1060			msg = "inconsistent ar.bspstore and r13";
1061			goto no_mod;
1062		}
1063		va.p = old_bspstore;
1064		if (va.f.reg < 5) {
1065			msg = "old_bspstore is in the wrong region";
1066			goto no_mod;
1067		}
1068		if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) {
1069			msg = "inconsistent ar.bsp and r13";
1070			goto no_mod;
1071		}
1072		size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8;
1073		if (ar_bspstore + size > r12) {
1074			msg = "no room for blocked state";
1075			goto no_mod;
1076		}
1077	}
1078
1079	ia64_mca_modify_comm(previous_current);
1080
1081	/* Make the original task look blocked.  First stack a struct pt_regs,
1082	 * describing the state at the time of interrupt.  mca_asm.S built a
1083	 * partial pt_regs, copy it and fill in the blanks using minstate.
1084	 */
1085	p = (char *)r12 - sizeof(*regs);
1086	old_regs = (struct pt_regs *)p;
1087	memcpy(old_regs, regs, sizeof(*regs));
1088	old_regs->loadrs = loadrs;
1089	old_unat = old_regs->ar_unat;
1090	finish_pt_regs(old_regs, sos, &old_unat);
1091
1092	/* Next stack a struct switch_stack.  mca_asm.S built a partial
1093	 * switch_stack, copy it and fill in the blanks using pt_regs and
1094	 * minstate.
1095	 *
1096	 * In the synthesized switch_stack, b0 points to ia64_leave_kernel,
1097	 * ar.pfs is set to 0.
1098	 *
1099	 * unwind.c::unw_unwind() does special processing for interrupt frames.
1100	 * It checks if the PRED_NON_SYSCALL predicate is set, if the predicate
1101	 * is clear then unw_unwind() does _not_ adjust bsp over pt_regs.  Not
1102	 * that this is documented, of course.  Set PRED_NON_SYSCALL in the
1103	 * switch_stack on the original stack so it will unwind correctly when
1104	 * unwind.c reads pt_regs.
1105	 *
1106	 * thread.ksp is updated to point to the synthesized switch_stack.
1107	 */
1108	p -= sizeof(struct switch_stack);
1109	old_sw = (struct switch_stack *)p;
1110	memcpy(old_sw, sw, sizeof(*sw));
1111	old_sw->caller_unat = old_unat;
1112	old_sw->ar_fpsr = old_regs->ar_fpsr;
1113	copy_reg(&ms->pmsa_gr[4-1], ms->pmsa_nat_bits, &old_sw->r4, &old_unat);
1114	copy_reg(&ms->pmsa_gr[5-1], ms->pmsa_nat_bits, &old_sw->r5, &old_unat);
1115	copy_reg(&ms->pmsa_gr[6-1], ms->pmsa_nat_bits, &old_sw->r6, &old_unat);
1116	copy_reg(&ms->pmsa_gr[7-1], ms->pmsa_nat_bits, &old_sw->r7, &old_unat);
1117	old_sw->b0 = (u64)ia64_leave_kernel;
1118	old_sw->b1 = ms->pmsa_br1;
1119	old_sw->ar_pfs = 0;
1120	old_sw->ar_unat = old_unat;
1121	old_sw->pr = old_regs->pr | (1UL << PRED_NON_SYSCALL);
1122	previous_current->thread.ksp = (u64)p - 16;
1123
1124	/* Finally copy the original stack's registers back to its RBS.
1125	 * Registers from ar.bspstore through ar.bsp at the time of the event
1126	 * are in the current RBS, copy them back to the original stack.  The
1127	 * copy must be done register by register because the original bspstore
1128	 * and the current one have different alignments, so the saved RNAT
1129	 * data occurs at different places.
1130	 *
1131	 * mca_asm does cover, so the old_bsp already includes all registers at
1132	 * the time of MCA/INIT.  It also does flushrs, so all registers before
1133	 * this function have been written to backing store on the MCA/INIT
1134	 * stack.
1135	 */
1136	new_rnat = ia64_get_rnat(ia64_rse_rnat_addr(new_bspstore));
1137	old_rnat = regs->ar_rnat;
1138	while (slots--) {
1139		if (ia64_rse_is_rnat_slot(new_bspstore)) {
1140			new_rnat = ia64_get_rnat(new_bspstore++);
1141		}
1142		if (ia64_rse_is_rnat_slot(old_bspstore)) {
1143			*old_bspstore++ = old_rnat;
1144			old_rnat = 0;
1145		}
1146		nat = (new_rnat >> ia64_rse_slot_num(new_bspstore)) & 1UL;
1147		old_rnat &= ~(1UL << ia64_rse_slot_num(old_bspstore));
1148		old_rnat |= (nat << ia64_rse_slot_num(old_bspstore));
1149		*old_bspstore++ = *new_bspstore++;
1150	}
1151	old_sw->ar_bspstore = (unsigned long)old_bspstore;
1152	old_sw->ar_rnat = old_rnat;
1153
1154	sos->prev_task = previous_current;
1155	return previous_current;
1156
1157no_mod:
1158	mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
1159			smp_processor_id(), type, msg);
1160	old_unat = regs->ar_unat;
1161	finish_pt_regs(regs, sos, &old_unat);
1162	return previous_current;
1163}
1164
1165/* The monarch/slave interaction is based on monarch_cpu and requires that all
1166 * slaves have entered rendezvous before the monarch leaves.  If any cpu has
1167 * not entered rendezvous yet then wait a bit.  The assumption is that any
1168 * slave that has not rendezvoused after a reasonable time is never going to do
1169 * so.  In this context, slave includes cpus that respond to the MCA rendezvous
1170 * interrupt, as well as cpus that receive the INIT slave event.
1171 */
1172
1173static void
1174ia64_wait_for_slaves(int monarch, const char *type)
1175{
1176	int c, i , wait;
1177
1178	/*
1179	 * wait 5 seconds total for slaves (arbitrary)
1180	 */
1181	for (i = 0; i < 5000; i++) {
1182		wait = 0;
1183		for_each_online_cpu(c) {
1184			if (c == monarch)
1185				continue;
1186			if (ia64_mc_info.imi_rendez_checkin[c]
1187					== IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
1188				udelay(1000);		/* short wait */
1189				wait = 1;
1190				break;
1191			}
1192		}
1193		if (!wait)
1194			goto all_in;
1195	}
1196
1197	/*
1198	 * Maybe slave(s) dead. Print buffered messages immediately.
1199	 */
1200	ia64_mlogbuf_finish(0);
1201	mprintk(KERN_INFO "OS %s slave did not rendezvous on cpu", type);
1202	for_each_online_cpu(c) {
1203		if (c == monarch)
1204			continue;
1205		if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE)
1206			mprintk(" %d", c);
1207	}
1208	mprintk("\n");
1209	return;
1210
1211all_in:
1212	mprintk(KERN_INFO "All OS %s slaves have reached rendezvous\n", type);
1213	return;
1214}
1215
1216/*  mca_insert_tr
1217 *
1218 *  Switch rid when TR reload and needed!
1219 *  iord: 1: itr, 2: itr;
1220 *
1221*/
1222static void mca_insert_tr(u64 iord)
1223{
1224
1225	int i;
1226	u64 old_rr;
1227	struct ia64_tr_entry *p;
1228	unsigned long psr;
1229	int cpu = smp_processor_id();
1230
1231	if (!ia64_idtrs[cpu])
1232		return;
1233
1234	psr = ia64_clear_ic();
1235	for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
1236		p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX;
1237		if (p->pte & 0x1) {
1238			old_rr = ia64_get_rr(p->ifa);
1239			if (old_rr != p->rr) {
1240				ia64_set_rr(p->ifa, p->rr);
1241				ia64_srlz_d();
1242			}
1243			ia64_ptr(iord, p->ifa, p->itir >> 2);
1244			ia64_srlz_i();
1245			if (iord & 0x1) {
1246				ia64_itr(0x1, i, p->ifa, p->pte, p->itir >> 2);
1247				ia64_srlz_i();
1248			}
1249			if (iord & 0x2) {
1250				ia64_itr(0x2, i, p->ifa, p->pte, p->itir >> 2);
1251				ia64_srlz_i();
1252			}
1253			if (old_rr != p->rr) {
1254				ia64_set_rr(p->ifa, old_rr);
1255				ia64_srlz_d();
1256			}
1257		}
1258	}
1259	ia64_set_psr(psr);
1260}
1261
1262/*
1263 * ia64_mca_handler
1264 *
1265 *	This is uncorrectable machine check handler called from OS_MCA
1266 *	dispatch code which is in turn called from SAL_CHECK().
1267 *	This is the place where the core of OS MCA handling is done.
1268 *	Right now the logs are extracted and displayed in a well-defined
1269 *	format. This handler code is supposed to be run only on the
1270 *	monarch processor. Once the monarch is done with MCA handling
1271 *	further MCA logging is enabled by clearing logs.
1272 *	Monarch also has the duty of sending wakeup-IPIs to pull the
1273 *	slave processors out of rendezvous spinloop.
1274 *
1275 *	If multiple processors call into OS_MCA, the first will become
1276 *	the monarch.  Subsequent cpus will be recorded in the mca_cpu
1277 *	bitmask.  After the first monarch has processed its MCA, it
1278 *	will wake up the next cpu in the mca_cpu bitmask and then go
1279 *	into the rendezvous loop.  When all processors have serviced
1280 *	their MCA, the last monarch frees up the rest of the processors.
1281 */
1282void
1283ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
1284		 struct ia64_sal_os_state *sos)
1285{
1286	int recover, cpu = smp_processor_id();
1287	struct task_struct *previous_current;
1288	struct ia64_mca_notify_die nd =
1289		{ .sos = sos, .monarch_cpu = &monarch_cpu, .data = &recover };
1290	static atomic_t mca_count;
1291	static cpumask_t mca_cpu;
1292
1293	if (atomic_add_return(1, &mca_count) == 1) {
1294		monarch_cpu = cpu;
1295		sos->monarch = 1;
1296	} else {
1297		cpumask_set_cpu(cpu, &mca_cpu);
1298		sos->monarch = 0;
1299	}
1300	mprintk(KERN_INFO "Entered OS MCA handler. PSP=%lx cpu=%d "
1301		"monarch=%ld\n", sos->proc_state_param, cpu, sos->monarch);
1302
1303	previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
1304
1305	NOTIFY_MCA(DIE_MCA_MONARCH_ENTER, regs, (long)&nd, 1);
1306
1307	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
1308	if (sos->monarch) {
1309		ia64_wait_for_slaves(cpu, "MCA");
1310
1311		/* Wakeup all the processors which are spinning in the
1312		 * rendezvous loop.  They will leave SAL, then spin in the OS
1313		 * with interrupts disabled until this monarch cpu leaves the
1314		 * MCA handler.  That gets control back to the OS so we can
1315		 * backtrace the other cpus, backtrace when spinning in SAL
1316		 * does not work.
1317		 */
1318		ia64_mca_wakeup_all();
1319	} else {
1320		while (cpumask_test_cpu(cpu, &mca_cpu))
1321			cpu_relax();	/* spin until monarch wakes us */
1322	}
1323
1324	NOTIFY_MCA(DIE_MCA_MONARCH_PROCESS, regs, (long)&nd, 1);
1325
1326	/* Get the MCA error record and log it */
1327	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
1328
1329	/* MCA error recovery */
1330	recover = (ia64_mca_ucmc_extension
1331		&& ia64_mca_ucmc_extension(
1332			IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA),
1333			sos));
1334
1335	if (recover) {
1336		sal_log_record_header_t *rh = IA64_LOG_CURR_BUFFER(SAL_INFO_TYPE_MCA);
1337		rh->severity = sal_log_severity_corrected;
1338		ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
1339		sos->os_status = IA64_MCA_CORRECTED;
1340	} else {
1341		/* Dump buffered message to console */
1342		ia64_mlogbuf_finish(1);
1343	}
1344
1345	if (__this_cpu_read(ia64_mca_tr_reload)) {
1346		mca_insert_tr(0x1); /*Reload dynamic itrs*/
1347		mca_insert_tr(0x2); /*Reload dynamic itrs*/
1348	}
1349
1350	NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1);
1351
1352	if (atomic_dec_return(&mca_count) > 0) {
1353		int i;
1354
1355		/* wake up the next monarch cpu,
1356		 * and put this cpu in the rendez loop.
1357		 */
1358		for_each_online_cpu(i) {
1359			if (cpumask_test_cpu(i, &mca_cpu)) {
1360				monarch_cpu = i;
1361				cpumask_clear_cpu(i, &mca_cpu);	/* wake next cpu */
1362				while (monarch_cpu != -1)
1363					cpu_relax();	/* spin until last cpu leaves */
1364				ia64_set_curr_task(cpu, previous_current);
1365				ia64_mc_info.imi_rendez_checkin[cpu]
1366						= IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1367				return;
1368			}
1369		}
1370	}
1371	ia64_set_curr_task(cpu, previous_current);
1372	ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1373	monarch_cpu = -1;	/* This frees the slaves and previous monarchs */
1374}
1375
1376static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
1377static DECLARE_WORK(cmc_enable_work, ia64_mca_cmc_vector_enable_keventd);
1378
1379/*
1380 * ia64_mca_cmc_int_handler
1381 *
1382 *  This is corrected machine check interrupt handler.
1383 *	Right now the logs are extracted and displayed in a well-defined
1384 *	format.
1385 *
1386 * Inputs
1387 *      interrupt number
1388 *      client data arg ptr
1389 *
1390 * Outputs
1391 *	None
1392 */
1393static irqreturn_t
1394ia64_mca_cmc_int_handler(int cmc_irq, void *arg)
1395{
1396	static unsigned long	cmc_history[CMC_HISTORY_LENGTH];
1397	static int		index;
1398	static DEFINE_SPINLOCK(cmc_history_lock);
1399
1400	IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
1401		       __func__, cmc_irq, smp_processor_id());
1402
1403	/* SAL spec states this should run w/ interrupts enabled */
1404	local_irq_enable();
1405
1406	spin_lock(&cmc_history_lock);
1407	if (!cmc_polling_enabled) {
1408		int i, count = 1; /* we know 1 happened now */
1409		unsigned long now = jiffies;
1410
1411		for (i = 0; i < CMC_HISTORY_LENGTH; i++) {
1412			if (now - cmc_history[i] <= HZ)
1413				count++;
1414		}
1415
1416		IA64_MCA_DEBUG(KERN_INFO "CMC threshold %d/%d\n", count, CMC_HISTORY_LENGTH);
1417		if (count >= CMC_HISTORY_LENGTH) {
1418
1419			cmc_polling_enabled = 1;
1420			spin_unlock(&cmc_history_lock);
1421			/* If we're being hit with CMC interrupts, we won't
1422			 * ever execute the schedule_work() below.  Need to
1423			 * disable CMC interrupts on this processor now.
1424			 */
1425			ia64_mca_cmc_vector_disable(NULL);
1426			schedule_work(&cmc_disable_work);
1427
1428			/*
1429			 * Corrected errors will still be corrected, but
1430			 * make sure there's a log somewhere that indicates
1431			 * something is generating more than we can handle.
1432			 */
1433			printk(KERN_WARNING "WARNING: Switching to polling CMC handler; error records may be lost\n");
1434
1435			mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
1436
1437			/* lock already released, get out now */
1438			goto out;
1439		} else {
1440			cmc_history[index++] = now;
1441			if (index == CMC_HISTORY_LENGTH)
1442				index = 0;
1443		}
1444	}
1445	spin_unlock(&cmc_history_lock);
1446out:
1447	/* Get the CMC error record and log it */
1448	ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CMC);
1449
1450	local_irq_disable();
1451
1452	return IRQ_HANDLED;
1453}
1454
1455/*
1456 *  ia64_mca_cmc_int_caller
1457 *
1458 * 	Triggered by sw interrupt from CMC polling routine.  Calls
1459 * 	real interrupt handler and either triggers a sw interrupt
1460 * 	on the next cpu or does cleanup at the end.
1461 *
1462 * Inputs
1463 *	interrupt number
1464 *	client data arg ptr
1465 * Outputs
1466 * 	handled
1467 */
1468static irqreturn_t
1469ia64_mca_cmc_int_caller(int cmc_irq, void *arg)
1470{
1471	static int start_count = -1;
1472	unsigned int cpuid;
1473
1474	cpuid = smp_processor_id();
1475
1476	/* If first cpu, update count */
1477	if (start_count == -1)
1478		start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
1479
1480	ia64_mca_cmc_int_handler(cmc_irq, arg);
1481
1482	cpuid = cpumask_next(cpuid+1, cpu_online_mask);
1483
1484	if (cpuid < nr_cpu_ids) {
1485		ia64_send_ipi(cpuid, IA64_CMCP_VECTOR, IA64_IPI_DM_INT, 0);
1486	} else {
1487		/* If no log record, switch out of polling mode */
1488		if (start_count == IA64_LOG_COUNT(SAL_INFO_TYPE_CMC)) {
1489
1490			printk(KERN_WARNING "Returning to interrupt driven CMC handler\n");
1491			schedule_work(&cmc_enable_work);
1492			cmc_polling_enabled = 0;
1493
1494		} else {
1495
1496			mod_timer(&cmc_poll_timer, jiffies + CMC_POLL_INTERVAL);
1497		}
1498
1499		start_count = -1;
1500	}
1501
1502	return IRQ_HANDLED;
1503}
1504
1505/*
1506 *  ia64_mca_cmc_poll
1507 *
1508 *	Poll for Corrected Machine Checks (CMCs)
1509 *
1510 * Inputs   :   dummy(unused)
1511 * Outputs  :   None
1512 *
1513 */
1514static void
1515ia64_mca_cmc_poll (struct timer_list *unused)
1516{
1517	/* Trigger a CMC interrupt cascade  */
1518	ia64_send_ipi(cpumask_first(cpu_online_mask), IA64_CMCP_VECTOR,
1519							IA64_IPI_DM_INT, 0);
1520}
1521
1522/*
1523 *  ia64_mca_cpe_int_caller
1524 *
1525 * 	Triggered by sw interrupt from CPE polling routine.  Calls
1526 * 	real interrupt handler and either triggers a sw interrupt
1527 * 	on the next cpu or does cleanup at the end.
1528 *
1529 * Inputs
1530 *	interrupt number
1531 *	client data arg ptr
1532 * Outputs
1533 * 	handled
1534 */
 
 
1535static irqreturn_t
1536ia64_mca_cpe_int_caller(int cpe_irq, void *arg)
1537{
1538	static int start_count = -1;
1539	static int poll_time = MIN_CPE_POLL_INTERVAL;
1540	unsigned int cpuid;
1541
1542	cpuid = smp_processor_id();
1543
1544	/* If first cpu, update count */
1545	if (start_count == -1)
1546		start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CPE);
1547
1548	ia64_mca_cpe_int_handler(cpe_irq, arg);
1549
1550	cpuid = cpumask_next(cpuid+1, cpu_online_mask);
1551
1552	if (cpuid < NR_CPUS) {
1553		ia64_send_ipi(cpuid, IA64_CPEP_VECTOR, IA64_IPI_DM_INT, 0);
1554	} else {
1555		/*
1556		 * If a log was recorded, increase our polling frequency,
1557		 * otherwise, backoff or return to interrupt mode.
1558		 */
1559		if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
1560			poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
1561		} else if (cpe_vector < 0) {
1562			poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
1563		} else {
1564			poll_time = MIN_CPE_POLL_INTERVAL;
1565
1566			printk(KERN_WARNING "Returning to interrupt driven CPE handler\n");
1567			enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));
1568			cpe_poll_enabled = 0;
1569		}
1570
1571		if (cpe_poll_enabled)
1572			mod_timer(&cpe_poll_timer, jiffies + poll_time);
1573		start_count = -1;
1574	}
1575
1576	return IRQ_HANDLED;
1577}
1578
1579/*
1580 *  ia64_mca_cpe_poll
1581 *
1582 *	Poll for Corrected Platform Errors (CPEs), trigger interrupt
1583 *	on first cpu, from there it will trickle through all the cpus.
1584 *
1585 * Inputs   :   dummy(unused)
1586 * Outputs  :   None
1587 *
1588 */
1589static void
1590ia64_mca_cpe_poll (struct timer_list *unused)
1591{
1592	/* Trigger a CPE interrupt cascade  */
1593	ia64_send_ipi(cpumask_first(cpu_online_mask), IA64_CPEP_VECTOR,
1594							IA64_IPI_DM_INT, 0);
1595}
1596
 
 
1597static int
1598default_monarch_init_process(struct notifier_block *self, unsigned long val, void *data)
1599{
1600	int c;
1601	struct task_struct *g, *t;
1602	if (val != DIE_INIT_MONARCH_PROCESS)
1603		return NOTIFY_DONE;
1604#ifdef CONFIG_KEXEC
1605	if (atomic_read(&kdump_in_progress))
1606		return NOTIFY_DONE;
1607#endif
1608
1609	/*
1610	 * FIXME: mlogbuf will brim over with INIT stack dumps.
1611	 * To enable show_stack from INIT, we use oops_in_progress which should
1612	 * be used in real oops. This would cause something wrong after INIT.
1613	 */
1614	BREAK_LOGLEVEL(console_loglevel);
1615	ia64_mlogbuf_dump_from_init();
1616
1617	printk(KERN_ERR "Processes interrupted by INIT -");
1618	for_each_online_cpu(c) {
1619		struct ia64_sal_os_state *s;
1620		t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET);
1621		s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET);
1622		g = s->prev_task;
1623		if (g) {
1624			if (g->pid)
1625				printk(" %d", g->pid);
1626			else
1627				printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g);
1628		}
1629	}
1630	printk("\n\n");
1631	if (read_trylock(&tasklist_lock)) {
1632		do_each_thread (g, t) {
1633			printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
1634			show_stack(t, NULL, KERN_DEFAULT);
1635		} while_each_thread (g, t);
1636		read_unlock(&tasklist_lock);
1637	}
1638	/* FIXME: This will not restore zapped printk locks. */
1639	RESTORE_LOGLEVEL(console_loglevel);
1640	return NOTIFY_DONE;
1641}
1642
1643/*
1644 * C portion of the OS INIT handler
1645 *
1646 * Called from ia64_os_init_dispatch
1647 *
1648 * Inputs: pointer to pt_regs where processor info was saved.  SAL/OS state for
1649 * this event.  This code is used for both monarch and slave INIT events, see
1650 * sos->monarch.
1651 *
1652 * All INIT events switch to the INIT stack and change the previous process to
1653 * blocked status.  If one of the INIT events is the monarch then we are
1654 * probably processing the nmi button/command.  Use the monarch cpu to dump all
1655 * the processes.  The slave INIT events all spin until the monarch cpu
1656 * returns.  We can also get INIT slave events for MCA, in which case the MCA
1657 * process is the monarch.
1658 */
1659
1660void
1661ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
1662		  struct ia64_sal_os_state *sos)
1663{
1664	static atomic_t slaves;
1665	static atomic_t monarchs;
1666	struct task_struct *previous_current;
1667	int cpu = smp_processor_id();
1668	struct ia64_mca_notify_die nd =
1669		{ .sos = sos, .monarch_cpu = &monarch_cpu };
1670
1671	NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0);
1672
1673	mprintk(KERN_INFO "Entered OS INIT handler. PSP=%lx cpu=%d monarch=%ld\n",
1674		sos->proc_state_param, cpu, sos->monarch);
1675	salinfo_log_wakeup(SAL_INFO_TYPE_INIT, NULL, 0, 0);
1676
1677	previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "INIT");
1678	sos->os_status = IA64_INIT_RESUME;
1679
1680	/* FIXME: Workaround for broken proms that drive all INIT events as
1681	 * slaves.  The last slave that enters is promoted to be a monarch.
1682	 * Remove this code in September 2006, that gives platforms a year to
1683	 * fix their proms and get their customers updated.
1684	 */
1685	if (!sos->monarch && atomic_add_return(1, &slaves) == num_online_cpus()) {
1686		mprintk(KERN_WARNING "%s: Promoting cpu %d to monarch.\n",
1687		        __func__, cpu);
1688		atomic_dec(&slaves);
1689		sos->monarch = 1;
1690	}
1691
1692	/* FIXME: Workaround for broken proms that drive all INIT events as
1693	 * monarchs.  Second and subsequent monarchs are demoted to slaves.
1694	 * Remove this code in September 2006, that gives platforms a year to
1695	 * fix their proms and get their customers updated.
1696	 */
1697	if (sos->monarch && atomic_add_return(1, &monarchs) > 1) {
1698		mprintk(KERN_WARNING "%s: Demoting cpu %d to slave.\n",
1699			       __func__, cpu);
1700		atomic_dec(&monarchs);
1701		sos->monarch = 0;
1702	}
1703
1704	if (!sos->monarch) {
1705		ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
1706
1707#ifdef CONFIG_KEXEC
1708		while (monarch_cpu == -1 && !atomic_read(&kdump_in_progress))
1709			udelay(1000);
1710#else
1711		while (monarch_cpu == -1)
1712			cpu_relax();	/* spin until monarch enters */
1713#endif
1714
1715		NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1);
1716		NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1);
1717
1718#ifdef CONFIG_KEXEC
1719		while (monarch_cpu != -1 && !atomic_read(&kdump_in_progress))
1720			udelay(1000);
1721#else
1722		while (monarch_cpu != -1)
1723			cpu_relax();	/* spin until monarch leaves */
1724#endif
1725
1726		NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1);
1727
1728		mprintk("Slave on cpu %d returning to normal service.\n", cpu);
1729		ia64_set_curr_task(cpu, previous_current);
1730		ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1731		atomic_dec(&slaves);
1732		return;
1733	}
1734
1735	monarch_cpu = cpu;
1736	NOTIFY_INIT(DIE_INIT_MONARCH_ENTER, regs, (long)&nd, 1);
1737
1738	/*
1739	 * Wait for a bit.  On some machines (e.g., HP's zx2000 and zx6000, INIT can be
1740	 * generated via the BMC's command-line interface, but since the console is on the
1741	 * same serial line, the user will need some time to switch out of the BMC before
1742	 * the dump begins.
1743	 */
1744	mprintk("Delaying for 5 seconds...\n");
1745	udelay(5*1000000);
1746	ia64_wait_for_slaves(cpu, "INIT");
1747	/* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through
1748	 * to default_monarch_init_process() above and just print all the
1749	 * tasks.
1750	 */
1751	NOTIFY_INIT(DIE_INIT_MONARCH_PROCESS, regs, (long)&nd, 1);
1752	NOTIFY_INIT(DIE_INIT_MONARCH_LEAVE, regs, (long)&nd, 1);
1753
1754	mprintk("\nINIT dump complete.  Monarch on cpu %d returning to normal service.\n", cpu);
1755	atomic_dec(&monarchs);
1756	ia64_set_curr_task(cpu, previous_current);
1757	monarch_cpu = -1;
1758	return;
1759}
1760
1761static int __init
1762ia64_mca_disable_cpe_polling(char *str)
1763{
1764	cpe_poll_enabled = 0;
1765	return 1;
1766}
1767
1768__setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
1769
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1770/* Minimal format of the MCA/INIT stacks.  The pseudo processes that run on
1771 * these stacks can never sleep, they cannot return from the kernel to user
1772 * space, they do not appear in a normal ps listing.  So there is no need to
1773 * format most of the fields.
1774 */
1775
1776static void
1777format_mca_init_stack(void *mca_data, unsigned long offset,
1778		const char *type, int cpu)
1779{
1780	struct task_struct *p = (struct task_struct *)((char *)mca_data + offset);
1781	struct thread_info *ti;
1782	memset(p, 0, KERNEL_STACK_SIZE);
1783	ti = task_thread_info(p);
1784	ti->flags = _TIF_MCA_INIT;
1785	ti->preempt_count = 1;
1786	ti->task = p;
1787	ti->cpu = cpu;
1788	p->stack = ti;
1789	p->state = TASK_UNINTERRUPTIBLE;
1790	cpumask_set_cpu(cpu, &p->cpus_mask);
1791	INIT_LIST_HEAD(&p->tasks);
1792	p->parent = p->real_parent = p->group_leader = p;
1793	INIT_LIST_HEAD(&p->children);
1794	INIT_LIST_HEAD(&p->sibling);
1795	strncpy(p->comm, type, sizeof(p->comm)-1);
1796}
1797
1798/* Caller prevents this from being called after init */
1799static void * __ref mca_bootmem(void)
1800{
1801	return memblock_alloc(sizeof(struct ia64_mca_cpu), KERNEL_STACK_SIZE);
 
1802}
1803
1804/* Do per-CPU MCA-related initialization.  */
1805void
1806ia64_mca_cpu_init(void *cpu_data)
1807{
1808	void *pal_vaddr;
1809	void *data;
1810	long sz = sizeof(struct ia64_mca_cpu);
1811	int cpu = smp_processor_id();
1812	static int first_time = 1;
1813
1814	/*
1815	 * Structure will already be allocated if cpu has been online,
1816	 * then offlined.
1817	 */
1818	if (__per_cpu_mca[cpu]) {
1819		data = __va(__per_cpu_mca[cpu]);
1820	} else {
1821		if (first_time) {
1822			data = mca_bootmem();
1823			first_time = 0;
1824		} else
1825			data = (void *)__get_free_pages(GFP_KERNEL,
1826							get_order(sz));
1827		if (!data)
1828			panic("Could not allocate MCA memory for cpu %d\n",
1829					cpu);
1830	}
1831	format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, mca_stack),
1832		"MCA", cpu);
1833	format_mca_init_stack(data, offsetof(struct ia64_mca_cpu, init_stack),
1834		"INIT", cpu);
1835	__this_cpu_write(ia64_mca_data, (__per_cpu_mca[cpu] = __pa(data)));
1836
1837	/*
1838	 * Stash away a copy of the PTE needed to map the per-CPU page.
1839	 * We may need it during MCA recovery.
1840	 */
1841	__this_cpu_write(ia64_mca_per_cpu_pte,
1842		pte_val(mk_pte_phys(__pa(cpu_data), PAGE_KERNEL)));
1843
1844	/*
1845	 * Also, stash away a copy of the PAL address and the PTE
1846	 * needed to map it.
1847	 */
1848	pal_vaddr = efi_get_pal_addr();
1849	if (!pal_vaddr)
1850		return;
1851	__this_cpu_write(ia64_mca_pal_base,
1852		GRANULEROUNDDOWN((unsigned long) pal_vaddr));
1853	__this_cpu_write(ia64_mca_pal_pte, pte_val(mk_pte_phys(__pa(pal_vaddr),
1854							      PAGE_KERNEL)));
1855}
1856
1857static int ia64_mca_cpu_online(unsigned int cpu)
1858{
1859	unsigned long flags;
1860
1861	local_irq_save(flags);
1862	if (!cmc_polling_enabled)
1863		ia64_mca_cmc_vector_enable(NULL);
1864	local_irq_restore(flags);
1865	return 0;
1866}
1867
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1868/*
1869 * ia64_mca_init
1870 *
1871 *  Do all the system level mca specific initialization.
1872 *
1873 *	1. Register spinloop and wakeup request interrupt vectors
1874 *
1875 *	2. Register OS_MCA handler entry point
1876 *
1877 *	3. Register OS_INIT handler entry point
1878 *
1879 *  4. Initialize MCA/CMC/INIT related log buffers maintained by the OS.
1880 *
1881 *  Note that this initialization is done very early before some kernel
1882 *  services are available.
1883 *
1884 *  Inputs  :   None
1885 *
1886 *  Outputs :   None
1887 */
1888void __init
1889ia64_mca_init(void)
1890{
1891	ia64_fptr_t *init_hldlr_ptr_monarch = (ia64_fptr_t *)ia64_os_init_dispatch_monarch;
1892	ia64_fptr_t *init_hldlr_ptr_slave = (ia64_fptr_t *)ia64_os_init_dispatch_slave;
1893	ia64_fptr_t *mca_hldlr_ptr = (ia64_fptr_t *)ia64_os_mca_dispatch;
1894	int i;
1895	long rc;
1896	struct ia64_sal_retval isrv;
1897	unsigned long timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */
1898	static struct notifier_block default_init_monarch_nb = {
1899		.notifier_call = default_monarch_init_process,
1900		.priority = 0/* we need to notified last */
1901	};
1902
1903	IA64_MCA_DEBUG("%s: begin\n", __func__);
1904
1905	/* Clear the Rendez checkin flag for all cpus */
1906	for(i = 0 ; i < NR_CPUS; i++)
1907		ia64_mc_info.imi_rendez_checkin[i] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
1908
1909	/*
1910	 * Register the rendezvous spinloop and wakeup mechanism with SAL
1911	 */
1912
1913	/* Register the rendezvous interrupt vector with SAL */
1914	while (1) {
1915		isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_INT,
1916					      SAL_MC_PARAM_MECHANISM_INT,
1917					      IA64_MCA_RENDEZ_VECTOR,
1918					      timeout,
1919					      SAL_MC_PARAM_RZ_ALWAYS);
1920		rc = isrv.status;
1921		if (rc == 0)
1922			break;
1923		if (rc == -2) {
1924			printk(KERN_INFO "Increasing MCA rendezvous timeout from "
1925				"%ld to %ld milliseconds\n", timeout, isrv.v0);
1926			timeout = isrv.v0;
1927			NOTIFY_MCA(DIE_MCA_NEW_TIMEOUT, NULL, timeout, 0);
1928			continue;
1929		}
1930		printk(KERN_ERR "Failed to register rendezvous interrupt "
1931		       "with SAL (status %ld)\n", rc);
1932		return;
1933	}
1934
1935	/* Register the wakeup interrupt vector with SAL */
1936	isrv = ia64_sal_mc_set_params(SAL_MC_PARAM_RENDEZ_WAKEUP,
1937				      SAL_MC_PARAM_MECHANISM_INT,
1938				      IA64_MCA_WAKEUP_VECTOR,
1939				      0, 0);
1940	rc = isrv.status;
1941	if (rc) {
1942		printk(KERN_ERR "Failed to register wakeup interrupt with SAL "
1943		       "(status %ld)\n", rc);
1944		return;
1945	}
1946
1947	IA64_MCA_DEBUG("%s: registered MCA rendezvous spinloop and wakeup mech.\n", __func__);
1948
1949	ia64_mc_info.imi_mca_handler        = ia64_tpa(mca_hldlr_ptr->fp);
1950	/*
1951	 * XXX - disable SAL checksum by setting size to 0; should be
1952	 *	ia64_tpa(ia64_os_mca_dispatch_end) - ia64_tpa(ia64_os_mca_dispatch);
1953	 */
1954	ia64_mc_info.imi_mca_handler_size	= 0;
1955
1956	/* Register the os mca handler with SAL */
1957	if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_MCA,
1958				       ia64_mc_info.imi_mca_handler,
1959				       ia64_tpa(mca_hldlr_ptr->gp),
1960				       ia64_mc_info.imi_mca_handler_size,
1961				       0, 0, 0)))
1962	{
1963		printk(KERN_ERR "Failed to register OS MCA handler with SAL "
1964		       "(status %ld)\n", rc);
1965		return;
1966	}
1967
1968	IA64_MCA_DEBUG("%s: registered OS MCA handler with SAL at 0x%lx, gp = 0x%lx\n", __func__,
1969		       ia64_mc_info.imi_mca_handler, ia64_tpa(mca_hldlr_ptr->gp));
1970
1971	/*
1972	 * XXX - disable SAL checksum by setting size to 0, should be
1973	 * size of the actual init handler in mca_asm.S.
1974	 */
1975	ia64_mc_info.imi_monarch_init_handler		= ia64_tpa(init_hldlr_ptr_monarch->fp);
1976	ia64_mc_info.imi_monarch_init_handler_size	= 0;
1977	ia64_mc_info.imi_slave_init_handler		= ia64_tpa(init_hldlr_ptr_slave->fp);
1978	ia64_mc_info.imi_slave_init_handler_size	= 0;
1979
1980	IA64_MCA_DEBUG("%s: OS INIT handler at %lx\n", __func__,
1981		       ia64_mc_info.imi_monarch_init_handler);
1982
1983	/* Register the os init handler with SAL */
1984	if ((rc = ia64_sal_set_vectors(SAL_VECTOR_OS_INIT,
1985				       ia64_mc_info.imi_monarch_init_handler,
1986				       ia64_tpa(ia64_getreg(_IA64_REG_GP)),
1987				       ia64_mc_info.imi_monarch_init_handler_size,
1988				       ia64_mc_info.imi_slave_init_handler,
1989				       ia64_tpa(ia64_getreg(_IA64_REG_GP)),
1990				       ia64_mc_info.imi_slave_init_handler_size)))
1991	{
1992		printk(KERN_ERR "Failed to register m/s INIT handlers with SAL "
1993		       "(status %ld)\n", rc);
1994		return;
1995	}
1996	if (register_die_notifier(&default_init_monarch_nb)) {
1997		printk(KERN_ERR "Failed to register default monarch INIT process\n");
1998		return;
1999	}
2000
2001	IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __func__);
2002
2003	/* Initialize the areas set aside by the OS to buffer the
2004	 * platform/processor error states for MCA/INIT/CMC
2005	 * handling.
2006	 */
2007	ia64_log_init(SAL_INFO_TYPE_MCA);
2008	ia64_log_init(SAL_INFO_TYPE_INIT);
2009	ia64_log_init(SAL_INFO_TYPE_CMC);
2010	ia64_log_init(SAL_INFO_TYPE_CPE);
2011
2012	mca_init = 1;
2013	printk(KERN_INFO "MCA related initialization done\n");
2014}
2015
2016
2017/*
2018 * These pieces cannot be done in ia64_mca_init() because it is called before
2019 * early_irq_init() which would wipe out our percpu irq registrations. But we
2020 * cannot leave them until ia64_mca_late_init() because by then all the other
2021 * processors have been brought online and have set their own CMC vectors to
2022 * point at a non-existant action. Called from arch_early_irq_init().
 
 
 
2023 */
2024void __init ia64_mca_irq_init(void)
 
2025{
 
 
 
2026	/*
2027	 *  Configure the CMCI/P vector and handler. Interrupts for CMC are
2028	 *  per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c).
2029	 */
2030	register_percpu_irq(IA64_CMC_VECTOR, ia64_mca_cmc_int_handler, 0,
2031			    "cmc_hndlr");
2032	register_percpu_irq(IA64_CMCP_VECTOR, ia64_mca_cmc_int_caller, 0,
2033			    "cmc_poll");
2034	ia64_mca_cmc_vector_setup();       /* Setup vector on BSP */
2035
2036	/* Setup the MCA rendezvous interrupt vector */
2037	register_percpu_irq(IA64_MCA_RENDEZ_VECTOR, ia64_mca_rendez_int_handler,
2038			    0, "mca_rdzv");
2039
2040	/* Setup the MCA wakeup interrupt vector */
2041	register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, ia64_mca_wakeup_int_handler,
2042			    0, "mca_wkup");
2043
 
2044	/* Setup the CPEI/P handler */
2045	register_percpu_irq(IA64_CPEP_VECTOR, ia64_mca_cpe_int_caller, 0,
2046			    "cpe_poll");
2047}
2048
2049/*
2050 * ia64_mca_late_init
2051 *
2052 *	Opportunity to setup things that require initialization later
2053 *	than ia64_mca_init.  Setup a timer to poll for CPEs if the
2054 *	platform doesn't support an interrupt driven mechanism.
2055 *
2056 *  Inputs  :   None
2057 *  Outputs :   Status
2058 */
2059static int __init
2060ia64_mca_late_init(void)
2061{
2062	if (!mca_init)
2063		return 0;
2064
2065	/* Setup the CMCI/P vector and handler */
2066	timer_setup(&cmc_poll_timer, ia64_mca_cmc_poll, 0);
 
2067
2068	/* Unmask/enable the vector */
2069	cmc_polling_enabled = 0;
2070	cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ia64/mca:online",
2071			  ia64_mca_cpu_online, NULL);
2072	IA64_MCA_DEBUG("%s: CMCI/P setup and enabled.\n", __func__);
2073
 
2074	/* Setup the CPEI/P vector and handler */
2075	cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
2076	timer_setup(&cpe_poll_timer, ia64_mca_cpe_poll, 0);
 
2077
2078	{
2079		unsigned int irq;
2080
2081		if (cpe_vector >= 0) {
2082			/* If platform supports CPEI, enable the irq. */
2083			irq = local_vector_to_irq(cpe_vector);
2084			if (irq > 0) {
2085				cpe_poll_enabled = 0;
2086				irq_set_status_flags(irq, IRQ_PER_CPU);
2087				if (request_irq(irq, ia64_mca_cpe_int_handler,
2088						0, "cpe_hndlr", NULL))
2089					pr_err("Failed to register cpe_hndlr interrupt\n");
2090				ia64_cpe_irq = irq;
2091				ia64_mca_register_cpev(cpe_vector);
2092				IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n",
2093					__func__);
2094				return 0;
2095			}
2096			printk(KERN_ERR "%s: Failed to find irq for CPE "
2097					"interrupt handler, vector %d\n",
2098					__func__, cpe_vector);
2099		}
2100		/* If platform doesn't support CPEI, get the timer going. */
2101		if (cpe_poll_enabled) {
2102			ia64_mca_cpe_poll(0UL);
2103			IA64_MCA_DEBUG("%s: CPEP setup and enabled.\n", __func__);
2104		}
2105	}
 
2106
2107	return 0;
2108}
2109
2110device_initcall(ia64_mca_late_init);