Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
   1/*
   2 * Set up the interrupt priorities
   3 *
   4 * Copyright  2004-2009 Analog Devices Inc.
   5 *                 2003 Bas Vermeulen <bas@buyways.nl>
   6 *                 2002 Arcturus Networks Inc. MaTed <mated@sympatico.ca>
   7 *            2000-2001 Lineo, Inc. D. Jefff Dionne <jeff@lineo.ca>
   8 *                 1999 D. Jeff Dionne <jeff@uclinux.org>
   9 *                 1996 Roman Zippel
  10 *
  11 * Licensed under the GPL-2
  12 */
  13
  14#include <linux/module.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/seq_file.h>
  17#include <linux/irq.h>
  18#include <linux/sched.h>
  19#include <linux/syscore_ops.h>
  20#include <linux/gpio.h>
  21#include <asm/delay.h>
  22#ifdef CONFIG_IPIPE
  23#include <linux/ipipe.h>
  24#endif
  25#include <asm/traps.h>
  26#include <asm/blackfin.h>
  27#include <asm/irq_handler.h>
  28#include <asm/dpmc.h>
  29#include <asm/traps.h>
  30
  31/*
  32 * NOTES:
  33 * - we have separated the physical Hardware interrupt from the
  34 * levels that the LINUX kernel sees (see the description in irq.h)
  35 * -
  36 */
  37
  38#ifndef CONFIG_SMP
  39/* Initialize this to an actual value to force it into the .data
  40 * section so that we know it is properly initialized at entry into
  41 * the kernel but before bss is initialized to zero (which is where
  42 * it would live otherwise).  The 0x1f magic represents the IRQs we
  43 * cannot actually mask out in hardware.
  44 */
  45unsigned long bfin_irq_flags = 0x1f;
  46EXPORT_SYMBOL(bfin_irq_flags);
  47#endif
  48
  49#ifdef CONFIG_PM
  50unsigned long bfin_sic_iwr[3];	/* Up to 3 SIC_IWRx registers */
  51unsigned vr_wakeup;
  52#endif
  53
  54#ifndef SEC_GCTL
  55static struct ivgx {
  56	/* irq number for request_irq, available in mach-bf5xx/irq.h */
  57	unsigned int irqno;
  58	/* corresponding bit in the SIC_ISR register */
  59	unsigned int isrflag;
  60} ivg_table[NR_PERI_INTS];
  61
  62static struct ivg_slice {
  63	/* position of first irq in ivg_table for given ivg */
  64	struct ivgx *ifirst;
  65	struct ivgx *istop;
  66} ivg7_13[IVG13 - IVG7 + 1];
  67
  68
  69/*
  70 * Search SIC_IAR and fill tables with the irqvalues
  71 * and their positions in the SIC_ISR register.
  72 */
  73static void __init search_IAR(void)
  74{
  75	unsigned ivg, irq_pos = 0;
  76	for (ivg = 0; ivg <= IVG13 - IVG7; ivg++) {
  77		int irqN;
  78
  79		ivg7_13[ivg].istop = ivg7_13[ivg].ifirst = &ivg_table[irq_pos];
  80
  81		for (irqN = 0; irqN < NR_PERI_INTS; irqN += 4) {
  82			int irqn;
  83			u32 iar =
  84				bfin_read32((unsigned long *)SIC_IAR0 +
  85#if defined(CONFIG_BF51x) || defined(CONFIG_BF52x) || \
  86	defined(CONFIG_BF538) || defined(CONFIG_BF539)
  87				((irqN % 32) >> 3) + ((irqN / 32) * ((SIC_IAR4 - SIC_IAR0) / 4))
  88#else
  89				(irqN >> 3)
  90#endif
  91				);
  92			for (irqn = irqN; irqn < irqN + 4; ++irqn) {
  93				int iar_shift = (irqn & 7) * 4;
  94				if (ivg == (0xf & (iar >> iar_shift))) {
  95					ivg_table[irq_pos].irqno = IVG7 + irqn;
  96					ivg_table[irq_pos].isrflag = 1 << (irqn % 32);
  97					ivg7_13[ivg].istop++;
  98					irq_pos++;
  99				}
 100			}
 101		}
 102	}
 103}
 104#endif
 105
 106/*
 107 * This is for core internal IRQs
 108 */
 109void bfin_ack_noop(struct irq_data *d)
 110{
 111	/* Dummy function.  */
 112}
 113
 114static void bfin_core_mask_irq(struct irq_data *d)
 115{
 116	bfin_irq_flags &= ~(1 << d->irq);
 117	if (!hard_irqs_disabled())
 118		hard_local_irq_enable();
 119}
 120
 121static void bfin_core_unmask_irq(struct irq_data *d)
 122{
 123	bfin_irq_flags |= 1 << d->irq;
 124	/*
 125	 * If interrupts are enabled, IMASK must contain the same value
 126	 * as bfin_irq_flags.  Make sure that invariant holds.  If interrupts
 127	 * are currently disabled we need not do anything; one of the
 128	 * callers will take care of setting IMASK to the proper value
 129	 * when reenabling interrupts.
 130	 * local_irq_enable just does "STI bfin_irq_flags", so it's exactly
 131	 * what we need.
 132	 */
 133	if (!hard_irqs_disabled())
 134		hard_local_irq_enable();
 135	return;
 136}
 137
 138#ifndef SEC_GCTL
 139void bfin_internal_mask_irq(unsigned int irq)
 140{
 141	unsigned long flags = hard_local_irq_save();
 142#ifdef SIC_IMASK0
 143	unsigned mask_bank = BFIN_SYSIRQ(irq) / 32;
 144	unsigned mask_bit = BFIN_SYSIRQ(irq) % 32;
 145	bfin_write_SIC_IMASK(mask_bank, bfin_read_SIC_IMASK(mask_bank) &
 146			~(1 << mask_bit));
 147# if defined(CONFIG_SMP) || defined(CONFIG_ICC)
 148	bfin_write_SICB_IMASK(mask_bank, bfin_read_SICB_IMASK(mask_bank) &
 149			~(1 << mask_bit));
 150# endif
 151#else
 152	bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() &
 153			~(1 << BFIN_SYSIRQ(irq)));
 154#endif /* end of SIC_IMASK0 */
 155	hard_local_irq_restore(flags);
 156}
 157
 158static void bfin_internal_mask_irq_chip(struct irq_data *d)
 159{
 160	bfin_internal_mask_irq(d->irq);
 161}
 162
 163#ifdef CONFIG_SMP
 164void bfin_internal_unmask_irq_affinity(unsigned int irq,
 165		const struct cpumask *affinity)
 166#else
 167void bfin_internal_unmask_irq(unsigned int irq)
 168#endif
 169{
 170	unsigned long flags = hard_local_irq_save();
 171
 172#ifdef SIC_IMASK0
 173	unsigned mask_bank = BFIN_SYSIRQ(irq) / 32;
 174	unsigned mask_bit = BFIN_SYSIRQ(irq) % 32;
 175# ifdef CONFIG_SMP
 176	if (cpumask_test_cpu(0, affinity))
 177# endif
 178		bfin_write_SIC_IMASK(mask_bank,
 179				bfin_read_SIC_IMASK(mask_bank) |
 180				(1 << mask_bit));
 181# ifdef CONFIG_SMP
 182	if (cpumask_test_cpu(1, affinity))
 183		bfin_write_SICB_IMASK(mask_bank,
 184				bfin_read_SICB_IMASK(mask_bank) |
 185				(1 << mask_bit));
 186# endif
 187#else
 188	bfin_write_SIC_IMASK(bfin_read_SIC_IMASK() |
 189			(1 << BFIN_SYSIRQ(irq)));
 190#endif
 191	hard_local_irq_restore(flags);
 192}
 193
 194#ifdef CONFIG_SMP
 195static void bfin_internal_unmask_irq_chip(struct irq_data *d)
 196{
 197	bfin_internal_unmask_irq_affinity(d->irq,
 198					  irq_data_get_affinity_mask(d));
 199}
 200
 201static int bfin_internal_set_affinity(struct irq_data *d,
 202				      const struct cpumask *mask, bool force)
 203{
 204	bfin_internal_mask_irq(d->irq);
 205	bfin_internal_unmask_irq_affinity(d->irq, mask);
 206
 207	return 0;
 208}
 209#else
 210static void bfin_internal_unmask_irq_chip(struct irq_data *d)
 211{
 212	bfin_internal_unmask_irq(d->irq);
 213}
 214#endif
 215
 216#if defined(CONFIG_PM)
 217int bfin_internal_set_wake(unsigned int irq, unsigned int state)
 218{
 219	u32 bank, bit, wakeup = 0;
 220	unsigned long flags;
 221	bank = BFIN_SYSIRQ(irq) / 32;
 222	bit = BFIN_SYSIRQ(irq) % 32;
 223
 224	switch (irq) {
 225#ifdef IRQ_RTC
 226	case IRQ_RTC:
 227	wakeup |= WAKE;
 228	break;
 229#endif
 230#ifdef IRQ_CAN0_RX
 231	case IRQ_CAN0_RX:
 232	wakeup |= CANWE;
 233	break;
 234#endif
 235#ifdef IRQ_CAN1_RX
 236	case IRQ_CAN1_RX:
 237	wakeup |= CANWE;
 238	break;
 239#endif
 240#ifdef IRQ_USB_INT0
 241	case IRQ_USB_INT0:
 242	wakeup |= USBWE;
 243	break;
 244#endif
 245#ifdef CONFIG_BF54x
 246	case IRQ_CNT:
 247	wakeup |= ROTWE;
 248	break;
 249#endif
 250	default:
 251	break;
 252	}
 253
 254	flags = hard_local_irq_save();
 255
 256	if (state) {
 257		bfin_sic_iwr[bank] |= (1 << bit);
 258		vr_wakeup  |= wakeup;
 259
 260	} else {
 261		bfin_sic_iwr[bank] &= ~(1 << bit);
 262		vr_wakeup  &= ~wakeup;
 263	}
 264
 265	hard_local_irq_restore(flags);
 266
 267	return 0;
 268}
 269
 270static int bfin_internal_set_wake_chip(struct irq_data *d, unsigned int state)
 271{
 272	return bfin_internal_set_wake(d->irq, state);
 273}
 274#else
 275inline int bfin_internal_set_wake(unsigned int irq, unsigned int state)
 276{
 277	return 0;
 278}
 279# define bfin_internal_set_wake_chip NULL
 280#endif
 281
 282#else /* SEC_GCTL */
 283static void bfin_sec_preflow_handler(struct irq_data *d)
 284{
 285	unsigned long flags = hard_local_irq_save();
 286	unsigned int sid = BFIN_SYSIRQ(d->irq);
 287
 288	bfin_write_SEC_SCI(0, SEC_CSID, sid);
 289
 290	hard_local_irq_restore(flags);
 291}
 292
 293static void bfin_sec_mask_ack_irq(struct irq_data *d)
 294{
 295	unsigned long flags = hard_local_irq_save();
 296	unsigned int sid = BFIN_SYSIRQ(d->irq);
 297
 298	bfin_write_SEC_SCI(0, SEC_CSID, sid);
 299
 300	hard_local_irq_restore(flags);
 301}
 302
 303static void bfin_sec_unmask_irq(struct irq_data *d)
 304{
 305	unsigned long flags = hard_local_irq_save();
 306	unsigned int sid = BFIN_SYSIRQ(d->irq);
 307
 308	bfin_write32(SEC_END, sid);
 309
 310	hard_local_irq_restore(flags);
 311}
 312
 313static void bfin_sec_enable_ssi(unsigned int sid)
 314{
 315	unsigned long flags = hard_local_irq_save();
 316	uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 317
 318	reg_sctl |= SEC_SCTL_SRC_EN;
 319	bfin_write_SEC_SCTL(sid, reg_sctl);
 320
 321	hard_local_irq_restore(flags);
 322}
 323
 324static void bfin_sec_disable_ssi(unsigned int sid)
 325{
 326	unsigned long flags = hard_local_irq_save();
 327	uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 328
 329	reg_sctl &= ((uint32_t)~SEC_SCTL_SRC_EN);
 330	bfin_write_SEC_SCTL(sid, reg_sctl);
 331
 332	hard_local_irq_restore(flags);
 333}
 334
 335static void bfin_sec_set_ssi_coreid(unsigned int sid, unsigned int coreid)
 336{
 337	unsigned long flags = hard_local_irq_save();
 338	uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 339
 340	reg_sctl &= ((uint32_t)~SEC_SCTL_CTG);
 341	bfin_write_SEC_SCTL(sid, reg_sctl | ((coreid << 20) & SEC_SCTL_CTG));
 342
 343	hard_local_irq_restore(flags);
 344}
 345
 346static void bfin_sec_enable_sci(unsigned int sid)
 347{
 348	unsigned long flags = hard_local_irq_save();
 349	uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 350
 351	if (sid == BFIN_SYSIRQ(IRQ_WATCH0))
 352		reg_sctl |= SEC_SCTL_FAULT_EN;
 353	else
 354		reg_sctl |= SEC_SCTL_INT_EN;
 355	bfin_write_SEC_SCTL(sid, reg_sctl);
 356
 357	hard_local_irq_restore(flags);
 358}
 359
 360static void bfin_sec_disable_sci(unsigned int sid)
 361{
 362	unsigned long flags = hard_local_irq_save();
 363	uint32_t reg_sctl = bfin_read_SEC_SCTL(sid);
 364
 365	reg_sctl &= ((uint32_t)~SEC_SCTL_INT_EN);
 366	bfin_write_SEC_SCTL(sid, reg_sctl);
 367
 368	hard_local_irq_restore(flags);
 369}
 370
 371static void bfin_sec_enable(struct irq_data *d)
 372{
 373	unsigned long flags = hard_local_irq_save();
 374	unsigned int sid = BFIN_SYSIRQ(d->irq);
 375
 376	bfin_sec_enable_sci(sid);
 377	bfin_sec_enable_ssi(sid);
 378
 379	hard_local_irq_restore(flags);
 380}
 381
 382static void bfin_sec_disable(struct irq_data *d)
 383{
 384	unsigned long flags = hard_local_irq_save();
 385	unsigned int sid = BFIN_SYSIRQ(d->irq);
 386
 387	bfin_sec_disable_sci(sid);
 388	bfin_sec_disable_ssi(sid);
 389
 390	hard_local_irq_restore(flags);
 391}
 392
 393static void bfin_sec_set_priority(unsigned int sec_int_levels, u8 *sec_int_priority)
 394{
 395	unsigned long flags = hard_local_irq_save();
 396	uint32_t reg_sctl;
 397	int i;
 398
 399	bfin_write_SEC_SCI(0, SEC_CPLVL, sec_int_levels);
 400
 401	for (i = 0; i < SYS_IRQS - BFIN_IRQ(0); i++) {
 402		reg_sctl = bfin_read_SEC_SCTL(i) & ~SEC_SCTL_PRIO;
 403		reg_sctl |= sec_int_priority[i] << SEC_SCTL_PRIO_OFFSET;
 404		bfin_write_SEC_SCTL(i, reg_sctl);
 405	}
 406
 407	hard_local_irq_restore(flags);
 408}
 409
 410void bfin_sec_raise_irq(unsigned int irq)
 411{
 412	unsigned long flags = hard_local_irq_save();
 413	unsigned int sid = BFIN_SYSIRQ(irq);
 414
 415	bfin_write32(SEC_RAISE, sid);
 416
 417	hard_local_irq_restore(flags);
 418}
 419
 420static void init_software_driven_irq(void)
 421{
 422	bfin_sec_set_ssi_coreid(34, 0);
 423	bfin_sec_set_ssi_coreid(35, 1);
 424
 425	bfin_sec_enable_sci(35);
 426	bfin_sec_enable_ssi(35);
 427	bfin_sec_set_ssi_coreid(36, 0);
 428	bfin_sec_set_ssi_coreid(37, 1);
 429	bfin_sec_enable_sci(37);
 430	bfin_sec_enable_ssi(37);
 431}
 432
 433void handle_sec_sfi_fault(uint32_t gstat)
 434{
 435
 436}
 437
 438void handle_sec_sci_fault(uint32_t gstat)
 439{
 440	uint32_t core_id;
 441	uint32_t cstat;
 442
 443	core_id = gstat & SEC_GSTAT_SCI;
 444	cstat = bfin_read_SEC_SCI(core_id, SEC_CSTAT);
 445	if (cstat & SEC_CSTAT_ERR) {
 446		switch (cstat & SEC_CSTAT_ERRC) {
 447		case SEC_CSTAT_ACKERR:
 448			printk(KERN_DEBUG "sec ack err\n");
 449			break;
 450		default:
 451			printk(KERN_DEBUG "sec sci unknown err\n");
 452		}
 453	}
 454
 455}
 456
 457void handle_sec_ssi_fault(uint32_t gstat)
 458{
 459	uint32_t sid;
 460	uint32_t sstat;
 461
 462	sid = gstat & SEC_GSTAT_SID;
 463	sstat = bfin_read_SEC_SSTAT(sid);
 464
 465}
 466
 467void handle_sec_fault(uint32_t sec_gstat)
 468{
 469	if (sec_gstat & SEC_GSTAT_ERR) {
 470
 471		switch (sec_gstat & SEC_GSTAT_ERRC) {
 472		case 0:
 473			handle_sec_sfi_fault(sec_gstat);
 474			break;
 475		case SEC_GSTAT_SCIERR:
 476			handle_sec_sci_fault(sec_gstat);
 477			break;
 478		case SEC_GSTAT_SSIERR:
 479			handle_sec_ssi_fault(sec_gstat);
 480			break;
 481		}
 482
 483
 484	}
 485}
 486
 487static struct irqaction bfin_fault_irq = {
 488	.name = "Blackfin fault",
 489};
 490
 491static irqreturn_t bfin_fault_routine(int irq, void *data)
 492{
 493	struct pt_regs *fp = get_irq_regs();
 494
 495	switch (irq) {
 496	case IRQ_C0_DBL_FAULT:
 497		double_fault_c(fp);
 498		break;
 499	case IRQ_C0_HW_ERR:
 500		dump_bfin_process(fp);
 501		dump_bfin_mem(fp);
 502		show_regs(fp);
 503		printk(KERN_NOTICE "Kernel Stack\n");
 504		show_stack(current, NULL);
 505		print_modules();
 506		panic("Core 0 hardware error");
 507		break;
 508	case IRQ_C0_NMI_L1_PARITY_ERR:
 509		panic("Core 0 NMI L1 parity error");
 510		break;
 511	case IRQ_SEC_ERR:
 512		pr_err("SEC error\n");
 513		handle_sec_fault(bfin_read32(SEC_GSTAT));
 514		break;
 515	default:
 516		panic("Unknown fault %d", irq);
 517	}
 518
 519	return IRQ_HANDLED;
 520}
 521#endif /* SEC_GCTL */
 522
 523static struct irq_chip bfin_core_irqchip = {
 524	.name = "CORE",
 525	.irq_mask = bfin_core_mask_irq,
 526	.irq_unmask = bfin_core_unmask_irq,
 527};
 528
 529#ifndef SEC_GCTL
 530static struct irq_chip bfin_internal_irqchip = {
 531	.name = "INTN",
 532	.irq_mask = bfin_internal_mask_irq_chip,
 533	.irq_unmask = bfin_internal_unmask_irq_chip,
 534	.irq_disable = bfin_internal_mask_irq_chip,
 535	.irq_enable = bfin_internal_unmask_irq_chip,
 536#ifdef CONFIG_SMP
 537	.irq_set_affinity = bfin_internal_set_affinity,
 538#endif
 539	.irq_set_wake = bfin_internal_set_wake_chip,
 540};
 541#else
 542static struct irq_chip bfin_sec_irqchip = {
 543	.name = "SEC",
 544	.irq_mask_ack = bfin_sec_mask_ack_irq,
 545	.irq_mask = bfin_sec_mask_ack_irq,
 546	.irq_unmask = bfin_sec_unmask_irq,
 547	.irq_eoi = bfin_sec_unmask_irq,
 548	.irq_disable = bfin_sec_disable,
 549	.irq_enable = bfin_sec_enable,
 550};
 551#endif
 552
 553void bfin_handle_irq(unsigned irq)
 554{
 555#ifdef CONFIG_IPIPE
 556	struct pt_regs regs;    /* Contents not used. */
 557	ipipe_trace_irq_entry(irq);
 558	__ipipe_handle_irq(irq, &regs);
 559	ipipe_trace_irq_exit(irq);
 560#else /* !CONFIG_IPIPE */
 561	generic_handle_irq(irq);
 562#endif  /* !CONFIG_IPIPE */
 563}
 564
 565#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
 566static int mac_stat_int_mask;
 567
 568static void bfin_mac_status_ack_irq(unsigned int irq)
 569{
 570	switch (irq) {
 571	case IRQ_MAC_MMCINT:
 572		bfin_write_EMAC_MMC_TIRQS(
 573			bfin_read_EMAC_MMC_TIRQE() &
 574			bfin_read_EMAC_MMC_TIRQS());
 575		bfin_write_EMAC_MMC_RIRQS(
 576			bfin_read_EMAC_MMC_RIRQE() &
 577			bfin_read_EMAC_MMC_RIRQS());
 578		break;
 579	case IRQ_MAC_RXFSINT:
 580		bfin_write_EMAC_RX_STKY(
 581			bfin_read_EMAC_RX_IRQE() &
 582			bfin_read_EMAC_RX_STKY());
 583		break;
 584	case IRQ_MAC_TXFSINT:
 585		bfin_write_EMAC_TX_STKY(
 586			bfin_read_EMAC_TX_IRQE() &
 587			bfin_read_EMAC_TX_STKY());
 588		break;
 589	case IRQ_MAC_WAKEDET:
 590		 bfin_write_EMAC_WKUP_CTL(
 591			bfin_read_EMAC_WKUP_CTL() | MPKS | RWKS);
 592		break;
 593	default:
 594		/* These bits are W1C */
 595		bfin_write_EMAC_SYSTAT(1L << (irq - IRQ_MAC_PHYINT));
 596		break;
 597	}
 598}
 599
 600static void bfin_mac_status_mask_irq(struct irq_data *d)
 601{
 602	unsigned int irq = d->irq;
 603
 604	mac_stat_int_mask &= ~(1L << (irq - IRQ_MAC_PHYINT));
 605#ifdef BF537_FAMILY
 606	switch (irq) {
 607	case IRQ_MAC_PHYINT:
 608		bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() & ~PHYIE);
 609		break;
 610	default:
 611		break;
 612	}
 613#else
 614	if (!mac_stat_int_mask)
 615		bfin_internal_mask_irq(IRQ_MAC_ERROR);
 616#endif
 617	bfin_mac_status_ack_irq(irq);
 618}
 619
 620static void bfin_mac_status_unmask_irq(struct irq_data *d)
 621{
 622	unsigned int irq = d->irq;
 623
 624#ifdef BF537_FAMILY
 625	switch (irq) {
 626	case IRQ_MAC_PHYINT:
 627		bfin_write_EMAC_SYSCTL(bfin_read_EMAC_SYSCTL() | PHYIE);
 628		break;
 629	default:
 630		break;
 631	}
 632#else
 633	if (!mac_stat_int_mask)
 634		bfin_internal_unmask_irq(IRQ_MAC_ERROR);
 635#endif
 636	mac_stat_int_mask |= 1L << (irq - IRQ_MAC_PHYINT);
 637}
 638
 639#ifdef CONFIG_PM
 640int bfin_mac_status_set_wake(struct irq_data *d, unsigned int state)
 641{
 642#ifdef BF537_FAMILY
 643	return bfin_internal_set_wake(IRQ_GENERIC_ERROR, state);
 644#else
 645	return bfin_internal_set_wake(IRQ_MAC_ERROR, state);
 646#endif
 647}
 648#else
 649# define bfin_mac_status_set_wake NULL
 650#endif
 651
 652static struct irq_chip bfin_mac_status_irqchip = {
 653	.name = "MACST",
 654	.irq_mask = bfin_mac_status_mask_irq,
 655	.irq_unmask = bfin_mac_status_unmask_irq,
 656	.irq_set_wake = bfin_mac_status_set_wake,
 657};
 658
 659void bfin_demux_mac_status_irq(struct irq_desc *inta_desc)
 660{
 661	int i, irq = 0;
 662	u32 status = bfin_read_EMAC_SYSTAT();
 663
 664	for (i = 0; i <= (IRQ_MAC_STMDONE - IRQ_MAC_PHYINT); i++)
 665		if (status & (1L << i)) {
 666			irq = IRQ_MAC_PHYINT + i;
 667			break;
 668		}
 669
 670	if (irq) {
 671		if (mac_stat_int_mask & (1L << (irq - IRQ_MAC_PHYINT))) {
 672			bfin_handle_irq(irq);
 673		} else {
 674			bfin_mac_status_ack_irq(irq);
 675			pr_debug("IRQ %d:"
 676					" MASKED MAC ERROR INTERRUPT ASSERTED\n",
 677					irq);
 678		}
 679	} else
 680		printk(KERN_ERR
 681				"%s : %s : LINE %d :\nIRQ ?: MAC ERROR"
 682				" INTERRUPT ASSERTED BUT NO SOURCE FOUND"
 683				"(EMAC_SYSTAT=0x%X)\n",
 684				__func__, __FILE__, __LINE__, status);
 685}
 686#endif
 687
 688static inline void bfin_set_irq_handler(struct irq_data *d, irq_flow_handler_t handle)
 689{
 690#ifdef CONFIG_IPIPE
 691	handle = handle_level_irq;
 692#endif
 693	irq_set_handler_locked(d, handle);
 694}
 695
 696#ifdef CONFIG_GPIO_ADI
 697
 698static DECLARE_BITMAP(gpio_enabled, MAX_BLACKFIN_GPIOS);
 699
 700static void bfin_gpio_ack_irq(struct irq_data *d)
 701{
 702	/* AFAIK ack_irq in case mask_ack is provided
 703	 * get's only called for edge sense irqs
 704	 */
 705	set_gpio_data(irq_to_gpio(d->irq), 0);
 706}
 707
 708static void bfin_gpio_mask_ack_irq(struct irq_data *d)
 709{
 710	unsigned int irq = d->irq;
 711	u32 gpionr = irq_to_gpio(irq);
 712
 713	if (!irqd_is_level_type(d))
 714		set_gpio_data(gpionr, 0);
 715
 716	set_gpio_maska(gpionr, 0);
 717}
 718
 719static void bfin_gpio_mask_irq(struct irq_data *d)
 720{
 721	set_gpio_maska(irq_to_gpio(d->irq), 0);
 722}
 723
 724static void bfin_gpio_unmask_irq(struct irq_data *d)
 725{
 726	set_gpio_maska(irq_to_gpio(d->irq), 1);
 727}
 728
 729static unsigned int bfin_gpio_irq_startup(struct irq_data *d)
 730{
 731	u32 gpionr = irq_to_gpio(d->irq);
 732
 733	if (__test_and_set_bit(gpionr, gpio_enabled))
 734		bfin_gpio_irq_prepare(gpionr);
 735
 736	bfin_gpio_unmask_irq(d);
 737
 738	return 0;
 739}
 740
 741static void bfin_gpio_irq_shutdown(struct irq_data *d)
 742{
 743	u32 gpionr = irq_to_gpio(d->irq);
 744
 745	bfin_gpio_mask_irq(d);
 746	__clear_bit(gpionr, gpio_enabled);
 747	bfin_gpio_irq_free(gpionr);
 748}
 749
 750static int bfin_gpio_irq_type(struct irq_data *d, unsigned int type)
 751{
 752	unsigned int irq = d->irq;
 753	int ret;
 754	char buf[16];
 755	u32 gpionr = irq_to_gpio(irq);
 756
 757	if (type == IRQ_TYPE_PROBE) {
 758		/* only probe unenabled GPIO interrupt lines */
 759		if (test_bit(gpionr, gpio_enabled))
 760			return 0;
 761		type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
 762	}
 763
 764	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING |
 765		    IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) {
 766
 767		snprintf(buf, 16, "gpio-irq%d", irq);
 768		ret = bfin_gpio_irq_request(gpionr, buf);
 769		if (ret)
 770			return ret;
 771
 772		if (__test_and_set_bit(gpionr, gpio_enabled))
 773			bfin_gpio_irq_prepare(gpionr);
 774
 775	} else {
 776		__clear_bit(gpionr, gpio_enabled);
 777		return 0;
 778	}
 779
 780	set_gpio_inen(gpionr, 0);
 781	set_gpio_dir(gpionr, 0);
 782
 783	if ((type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
 784	    == (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
 785		set_gpio_both(gpionr, 1);
 786	else
 787		set_gpio_both(gpionr, 0);
 788
 789	if ((type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)))
 790		set_gpio_polar(gpionr, 1);	/* low or falling edge denoted by one */
 791	else
 792		set_gpio_polar(gpionr, 0);	/* high or rising edge denoted by zero */
 793
 794	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
 795		set_gpio_edge(gpionr, 1);
 796		set_gpio_inen(gpionr, 1);
 797		set_gpio_data(gpionr, 0);
 798
 799	} else {
 800		set_gpio_edge(gpionr, 0);
 801		set_gpio_inen(gpionr, 1);
 802	}
 803
 804	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
 805		bfin_set_irq_handler(d, handle_edge_irq);
 806	else
 807		bfin_set_irq_handler(d, handle_level_irq);
 808
 809	return 0;
 810}
 811
 812static void bfin_demux_gpio_block(unsigned int irq)
 813{
 814	unsigned int gpio, mask;
 815
 816	gpio = irq_to_gpio(irq);
 817	mask = get_gpiop_data(gpio) & get_gpiop_maska(gpio);
 818
 819	while (mask) {
 820		if (mask & 1)
 821			bfin_handle_irq(irq);
 822		irq++;
 823		mask >>= 1;
 824	}
 825}
 826
 827void bfin_demux_gpio_irq(struct irq_desc *desc)
 828{
 829	unsigned int inta_irq = irq_desc_get_irq(desc);
 830	unsigned int irq;
 831
 832	switch (inta_irq) {
 833#if defined(BF537_FAMILY)
 834	case IRQ_PF_INTA_PG_INTA:
 835		bfin_demux_gpio_block(IRQ_PF0);
 836		irq = IRQ_PG0;
 837		break;
 838	case IRQ_PH_INTA_MAC_RX:
 839		irq = IRQ_PH0;
 840		break;
 841#elif defined(BF533_FAMILY)
 842	case IRQ_PROG_INTA:
 843		irq = IRQ_PF0;
 844		break;
 845#elif defined(BF538_FAMILY)
 846	case IRQ_PORTF_INTA:
 847		irq = IRQ_PF0;
 848		break;
 849#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
 850	case IRQ_PORTF_INTA:
 851		irq = IRQ_PF0;
 852		break;
 853	case IRQ_PORTG_INTA:
 854		irq = IRQ_PG0;
 855		break;
 856	case IRQ_PORTH_INTA:
 857		irq = IRQ_PH0;
 858		break;
 859#elif defined(CONFIG_BF561)
 860	case IRQ_PROG0_INTA:
 861		irq = IRQ_PF0;
 862		break;
 863	case IRQ_PROG1_INTA:
 864		irq = IRQ_PF16;
 865		break;
 866	case IRQ_PROG2_INTA:
 867		irq = IRQ_PF32;
 868		break;
 869#endif
 870	default:
 871		BUG();
 872		return;
 873	}
 874
 875	bfin_demux_gpio_block(irq);
 876}
 877
 878#ifdef CONFIG_PM
 879
 880static int bfin_gpio_set_wake(struct irq_data *d, unsigned int state)
 881{
 882	return bfin_gpio_pm_wakeup_ctrl(irq_to_gpio(d->irq), state);
 883}
 884
 885#else
 886
 887# define bfin_gpio_set_wake NULL
 888
 889#endif
 890
 891static struct irq_chip bfin_gpio_irqchip = {
 892	.name = "GPIO",
 893	.irq_ack = bfin_gpio_ack_irq,
 894	.irq_mask = bfin_gpio_mask_irq,
 895	.irq_mask_ack = bfin_gpio_mask_ack_irq,
 896	.irq_unmask = bfin_gpio_unmask_irq,
 897	.irq_disable = bfin_gpio_mask_irq,
 898	.irq_enable = bfin_gpio_unmask_irq,
 899	.irq_set_type = bfin_gpio_irq_type,
 900	.irq_startup = bfin_gpio_irq_startup,
 901	.irq_shutdown = bfin_gpio_irq_shutdown,
 902	.irq_set_wake = bfin_gpio_set_wake,
 903};
 904
 905#endif
 906
 907#ifdef CONFIG_PM
 908
 909#ifdef SEC_GCTL
 910static u32 save_pint_sec_ctl[NR_PINT_SYS_IRQS];
 911
 912static int sec_suspend(void)
 913{
 914	u32 bank;
 915
 916	for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++)
 917		save_pint_sec_ctl[bank] = bfin_read_SEC_SCTL(bank + BFIN_SYSIRQ(IRQ_PINT0));
 918	return 0;
 919}
 920
 921static void sec_resume(void)
 922{
 923	u32 bank;
 924
 925	bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
 926	udelay(100);
 927	bfin_write_SEC_GCTL(SEC_GCTL_EN);
 928	bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
 929
 930	for (bank = 0; bank < NR_PINT_SYS_IRQS; bank++)
 931		bfin_write_SEC_SCTL(bank + BFIN_SYSIRQ(IRQ_PINT0), save_pint_sec_ctl[bank]);
 932}
 933
 934static struct syscore_ops sec_pm_syscore_ops = {
 935	.suspend = sec_suspend,
 936	.resume = sec_resume,
 937};
 938#endif
 939
 940#endif
 941
 942void init_exception_vectors(void)
 943{
 944	/* cannot program in software:
 945	 * evt0 - emulation (jtag)
 946	 * evt1 - reset
 947	 */
 948	bfin_write_EVT2(evt_nmi);
 949	bfin_write_EVT3(trap);
 950	bfin_write_EVT5(evt_ivhw);
 951	bfin_write_EVT6(evt_timer);
 952	bfin_write_EVT7(evt_evt7);
 953	bfin_write_EVT8(evt_evt8);
 954	bfin_write_EVT9(evt_evt9);
 955	bfin_write_EVT10(evt_evt10);
 956	bfin_write_EVT11(evt_evt11);
 957	bfin_write_EVT12(evt_evt12);
 958	bfin_write_EVT13(evt_evt13);
 959	bfin_write_EVT14(evt_evt14);
 960	bfin_write_EVT15(evt_system_call);
 961	CSYNC();
 962}
 963
 964#ifndef SEC_GCTL
 965/*
 966 * This function should be called during kernel startup to initialize
 967 * the BFin IRQ handling routines.
 968 */
 969
 970int __init init_arch_irq(void)
 971{
 972	int irq;
 973	unsigned long ilat = 0;
 974
 975	/*  Disable all the peripheral intrs  - page 4-29 HW Ref manual */
 976#ifdef SIC_IMASK0
 977	bfin_write_SIC_IMASK0(SIC_UNMASK_ALL);
 978	bfin_write_SIC_IMASK1(SIC_UNMASK_ALL);
 979# ifdef SIC_IMASK2
 980	bfin_write_SIC_IMASK2(SIC_UNMASK_ALL);
 981# endif
 982# if defined(CONFIG_SMP) || defined(CONFIG_ICC)
 983	bfin_write_SICB_IMASK0(SIC_UNMASK_ALL);
 984	bfin_write_SICB_IMASK1(SIC_UNMASK_ALL);
 985# endif
 986#else
 987	bfin_write_SIC_IMASK(SIC_UNMASK_ALL);
 988#endif
 989
 990	local_irq_disable();
 991
 992	for (irq = 0; irq <= SYS_IRQS; irq++) {
 993		if (irq <= IRQ_CORETMR)
 994			irq_set_chip(irq, &bfin_core_irqchip);
 995		else
 996			irq_set_chip(irq, &bfin_internal_irqchip);
 997
 998		switch (irq) {
 999#if !BFIN_GPIO_PINT
1000#if defined(BF537_FAMILY)
1001		case IRQ_PH_INTA_MAC_RX:
1002		case IRQ_PF_INTA_PG_INTA:
1003#elif defined(BF533_FAMILY)
1004		case IRQ_PROG_INTA:
1005#elif defined(CONFIG_BF52x) || defined(CONFIG_BF51x)
1006		case IRQ_PORTF_INTA:
1007		case IRQ_PORTG_INTA:
1008		case IRQ_PORTH_INTA:
1009#elif defined(CONFIG_BF561)
1010		case IRQ_PROG0_INTA:
1011		case IRQ_PROG1_INTA:
1012		case IRQ_PROG2_INTA:
1013#elif defined(BF538_FAMILY)
1014		case IRQ_PORTF_INTA:
1015#endif
1016			irq_set_chained_handler(irq, bfin_demux_gpio_irq);
1017			break;
1018#endif
1019#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
1020		case IRQ_MAC_ERROR:
1021			irq_set_chained_handler(irq,
1022						bfin_demux_mac_status_irq);
1023			break;
1024#endif
1025#if defined(CONFIG_SMP) || defined(CONFIG_ICC)
1026		case IRQ_SUPPLE_0:
1027		case IRQ_SUPPLE_1:
1028			irq_set_handler(irq, handle_percpu_irq);
1029			break;
1030#endif
1031
1032#ifdef CONFIG_TICKSOURCE_CORETMR
1033		case IRQ_CORETMR:
1034# ifdef CONFIG_SMP
1035			irq_set_handler(irq, handle_percpu_irq);
1036# else
1037			irq_set_handler(irq, handle_simple_irq);
1038# endif
1039			break;
1040#endif
1041
1042#ifdef CONFIG_TICKSOURCE_GPTMR0
1043		case IRQ_TIMER0:
1044			irq_set_handler(irq, handle_simple_irq);
1045			break;
1046#endif
1047
1048		default:
1049#ifdef CONFIG_IPIPE
1050			irq_set_handler(irq, handle_level_irq);
1051#else
1052			irq_set_handler(irq, handle_simple_irq);
1053#endif
1054			break;
1055		}
1056	}
1057
1058	init_mach_irq();
1059
1060#if (defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE))
1061	for (irq = IRQ_MAC_PHYINT; irq <= IRQ_MAC_STMDONE; irq++)
1062		irq_set_chip_and_handler(irq, &bfin_mac_status_irqchip,
1063					 handle_level_irq);
1064#endif
1065	/* if configured as edge, then will be changed to do_edge_IRQ */
1066#ifdef CONFIG_GPIO_ADI
1067	for (irq = GPIO_IRQ_BASE;
1068		irq < (GPIO_IRQ_BASE + MAX_BLACKFIN_GPIOS); irq++)
1069		irq_set_chip_and_handler(irq, &bfin_gpio_irqchip,
1070					 handle_level_irq);
1071#endif
1072	bfin_write_IMASK(0);
1073	CSYNC();
1074	ilat = bfin_read_ILAT();
1075	CSYNC();
1076	bfin_write_ILAT(ilat);
1077	CSYNC();
1078
1079	printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
1080	/* IMASK=xxx is equivalent to STI xx or bfin_irq_flags=xx,
1081	 * local_irq_enable()
1082	 */
1083	program_IAR();
1084	/* Therefore it's better to setup IARs before interrupts enabled */
1085	search_IAR();
1086
1087	/* Enable interrupts IVG7-15 */
1088	bfin_irq_flags |= IMASK_IVG15 |
1089		IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1090		IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1091
1092
1093	/* This implicitly covers ANOMALY_05000171
1094	 * Boot-ROM code modifies SICA_IWRx wakeup registers
1095	 */
1096#ifdef SIC_IWR0
1097	bfin_write_SIC_IWR0(IWR_DISABLE_ALL);
1098# ifdef SIC_IWR1
1099	/* BF52x/BF51x system reset does not properly reset SIC_IWR1 which
1100	 * will screw up the bootrom as it relies on MDMA0/1 waking it
1101	 * up from IDLE instructions.  See this report for more info:
1102	 * http://blackfin.uclinux.org/gf/tracker/4323
1103	 */
1104	if (ANOMALY_05000435)
1105		bfin_write_SIC_IWR1(IWR_ENABLE(10) | IWR_ENABLE(11));
1106	else
1107		bfin_write_SIC_IWR1(IWR_DISABLE_ALL);
1108# endif
1109# ifdef SIC_IWR2
1110	bfin_write_SIC_IWR2(IWR_DISABLE_ALL);
1111# endif
1112#else
1113	bfin_write_SIC_IWR(IWR_DISABLE_ALL);
1114#endif
1115	return 0;
1116}
1117
1118#ifdef CONFIG_DO_IRQ_L1
1119__attribute__((l1_text))
1120#endif
1121static int vec_to_irq(int vec)
1122{
1123	struct ivgx *ivg = ivg7_13[vec - IVG7].ifirst;
1124	struct ivgx *ivg_stop = ivg7_13[vec - IVG7].istop;
1125	unsigned long sic_status[3];
1126	if (likely(vec == EVT_IVTMR_P))
1127		return IRQ_CORETMR;
1128#ifdef SIC_ISR
1129	sic_status[0] = bfin_read_SIC_IMASK() & bfin_read_SIC_ISR();
1130#else
1131	if (smp_processor_id()) {
1132# ifdef SICB_ISR0
1133		/* This will be optimized out in UP mode. */
1134		sic_status[0] = bfin_read_SICB_ISR0() & bfin_read_SICB_IMASK0();
1135		sic_status[1] = bfin_read_SICB_ISR1() & bfin_read_SICB_IMASK1();
1136# endif
1137	} else {
1138		sic_status[0] = bfin_read_SIC_ISR0() & bfin_read_SIC_IMASK0();
1139		sic_status[1] = bfin_read_SIC_ISR1() & bfin_read_SIC_IMASK1();
1140	}
1141#endif
1142#ifdef SIC_ISR2
1143	sic_status[2] = bfin_read_SIC_ISR2() & bfin_read_SIC_IMASK2();
1144#endif
1145
1146	for (;; ivg++) {
1147		if (ivg >= ivg_stop)
1148			return -1;
1149#ifdef SIC_ISR
1150		if (sic_status[0] & ivg->isrflag)
1151#else
1152		if (sic_status[(ivg->irqno - IVG7) / 32] & ivg->isrflag)
1153#endif
1154			return ivg->irqno;
1155	}
1156}
1157
1158#else /* SEC_GCTL */
1159
1160/*
1161 * This function should be called during kernel startup to initialize
1162 * the BFin IRQ handling routines.
1163 */
1164
1165int __init init_arch_irq(void)
1166{
1167	int irq;
1168	unsigned long ilat = 0;
1169
1170	bfin_write_SEC_GCTL(SEC_GCTL_RESET);
1171
1172	local_irq_disable();
1173
1174	for (irq = 0; irq <= SYS_IRQS; irq++) {
1175		if (irq <= IRQ_CORETMR) {
1176			irq_set_chip_and_handler(irq, &bfin_core_irqchip,
1177				handle_simple_irq);
1178#if defined(CONFIG_TICKSOURCE_CORETMR) && defined(CONFIG_SMP)
1179			if (irq == IRQ_CORETMR)
1180				irq_set_handler(irq, handle_percpu_irq);
1181#endif
1182		} else if (irq >= BFIN_IRQ(34) && irq <= BFIN_IRQ(37)) {
1183			irq_set_chip_and_handler(irq, &bfin_sec_irqchip,
1184				handle_percpu_irq);
1185		} else {
1186			irq_set_chip(irq, &bfin_sec_irqchip);
1187			irq_set_handler(irq, handle_fasteoi_irq);
1188			__irq_set_preflow_handler(irq, bfin_sec_preflow_handler);
1189		}
1190	}
1191
1192	bfin_write_IMASK(0);
1193	CSYNC();
1194	ilat = bfin_read_ILAT();
1195	CSYNC();
1196	bfin_write_ILAT(ilat);
1197	CSYNC();
1198
1199	printk(KERN_INFO "Configuring Blackfin Priority Driven Interrupts\n");
1200
1201	bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
1202
1203	/* Enable interrupts IVG7-15 */
1204	bfin_irq_flags |= IMASK_IVG15 |
1205	    IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
1206	    IMASK_IVG10 | IMASK_IVG9 | IMASK_IVG8 | IMASK_IVG7 | IMASK_IVGHW;
1207
1208
1209	bfin_write_SEC_FCTL(SEC_FCTL_EN | SEC_FCTL_SYSRST_EN | SEC_FCTL_FLTIN_EN);
1210	bfin_sec_enable_sci(BFIN_SYSIRQ(IRQ_WATCH0));
1211	bfin_sec_enable_ssi(BFIN_SYSIRQ(IRQ_WATCH0));
1212	bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_RESET);
1213	udelay(100);
1214	bfin_write_SEC_GCTL(SEC_GCTL_EN);
1215	bfin_write_SEC_SCI(0, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
1216	bfin_write_SEC_SCI(1, SEC_CCTL, SEC_CCTL_EN | SEC_CCTL_NMI_EN);
1217
1218	init_software_driven_irq();
1219
1220#ifdef CONFIG_PM
1221	register_syscore_ops(&sec_pm_syscore_ops);
1222#endif
1223
1224	bfin_fault_irq.handler = bfin_fault_routine;
1225#ifdef CONFIG_L1_PARITY_CHECK
1226	setup_irq(IRQ_C0_NMI_L1_PARITY_ERR, &bfin_fault_irq);
1227#endif
1228	setup_irq(IRQ_C0_DBL_FAULT, &bfin_fault_irq);
1229	setup_irq(IRQ_SEC_ERR, &bfin_fault_irq);
1230
1231	return 0;
1232}
1233
1234#ifdef CONFIG_DO_IRQ_L1
1235__attribute__((l1_text))
1236#endif
1237static int vec_to_irq(int vec)
1238{
1239	if (likely(vec == EVT_IVTMR_P))
1240		return IRQ_CORETMR;
1241
1242	return BFIN_IRQ(bfin_read_SEC_SCI(0, SEC_CSID));
1243}
1244#endif  /* SEC_GCTL */
1245
1246#ifdef CONFIG_DO_IRQ_L1
1247__attribute__((l1_text))
1248#endif
1249void do_irq(int vec, struct pt_regs *fp)
1250{
1251	int irq = vec_to_irq(vec);
1252	if (irq == -1)
1253		return;
1254	asm_do_IRQ(irq, fp);
1255}
1256
1257#ifdef CONFIG_IPIPE
1258
1259int __ipipe_get_irq_priority(unsigned irq)
1260{
1261	int ient, prio;
1262
1263	if (irq <= IRQ_CORETMR)
1264		return irq;
1265
1266#ifdef SEC_GCTL
1267	if (irq >= BFIN_IRQ(0))
1268		return IVG11;
1269#else
1270	for (ient = 0; ient < NR_PERI_INTS; ient++) {
1271		struct ivgx *ivg = ivg_table + ient;
1272		if (ivg->irqno == irq) {
1273			for (prio = 0; prio <= IVG13-IVG7; prio++) {
1274				if (ivg7_13[prio].ifirst <= ivg &&
1275				    ivg7_13[prio].istop > ivg)
1276					return IVG7 + prio;
1277			}
1278		}
1279	}
1280#endif
1281
1282	return IVG15;
1283}
1284
1285/* Hw interrupts are disabled on entry (check SAVE_CONTEXT). */
1286#ifdef CONFIG_DO_IRQ_L1
1287__attribute__((l1_text))
1288#endif
1289asmlinkage int __ipipe_grab_irq(int vec, struct pt_regs *regs)
1290{
1291	struct ipipe_percpu_domain_data *p = ipipe_root_cpudom_ptr();
1292	struct ipipe_domain *this_domain = __ipipe_current_domain;
1293	int irq, s = 0;
1294
1295	irq = vec_to_irq(vec);
1296	if (irq == -1)
1297		return 0;
1298
1299	if (irq == IRQ_SYSTMR) {
1300#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_TICKSOURCE_GPTMR0)
1301		bfin_write_TIMER_STATUS(1); /* Latch TIMIL0 */
1302#endif
1303		/* This is basically what we need from the register frame. */
1304		__this_cpu_write(__ipipe_tick_regs.ipend, regs->ipend);
1305		__this_cpu_write(__ipipe_tick_regs.pc, regs->pc);
1306		if (this_domain != ipipe_root_domain)
1307			__this_cpu_and(__ipipe_tick_regs.ipend, ~0x10);
1308		else
1309			__this_cpu_or(__ipipe_tick_regs.ipend, 0x10);
1310	}
1311
1312	/*
1313	 * We don't want Linux interrupt handlers to run at the
1314	 * current core priority level (i.e. < EVT15), since this
1315	 * might delay other interrupts handled by a high priority
1316	 * domain. Here is what we do instead:
1317	 *
1318	 * - we raise the SYNCDEFER bit to prevent
1319	 * __ipipe_handle_irq() to sync the pipeline for the root
1320	 * stage for the incoming interrupt. Upon return, that IRQ is
1321	 * pending in the interrupt log.
1322	 *
1323	 * - we raise the TIF_IRQ_SYNC bit for the current thread, so
1324	 * that _schedule_and_signal_from_int will eventually sync the
1325	 * pipeline from EVT15.
1326	 */
1327	if (this_domain == ipipe_root_domain) {
1328		s = __test_and_set_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1329		barrier();
1330	}
1331
1332	ipipe_trace_irq_entry(irq);
1333	__ipipe_handle_irq(irq, regs);
1334	ipipe_trace_irq_exit(irq);
1335
1336	if (user_mode(regs) &&
1337	    !ipipe_test_foreign_stack() &&
1338	    (current->ipipe_flags & PF_EVTRET) != 0) {
1339		/*
1340		 * Testing for user_regs() does NOT fully eliminate
1341		 * foreign stack contexts, because of the forged
1342		 * interrupt returns we do through
1343		 * __ipipe_call_irqtail. In that case, we might have
1344		 * preempted a foreign stack context in a high
1345		 * priority domain, with a single interrupt level now
1346		 * pending after the irqtail unwinding is done. In
1347		 * which case user_mode() is now true, and the event
1348		 * gets dispatched spuriously.
1349		 */
1350		current->ipipe_flags &= ~PF_EVTRET;
1351		__ipipe_dispatch_event(IPIPE_EVENT_RETURN, regs);
1352	}
1353
1354	if (this_domain == ipipe_root_domain) {
1355		set_thread_flag(TIF_IRQ_SYNC);
1356		if (!s) {
1357			__clear_bit(IPIPE_SYNCDEFER_FLAG, &p->status);
1358			return !test_bit(IPIPE_STALL_FLAG, &p->status);
1359		}
1360	}
1361
1362	return 0;
1363}
1364
1365#endif /* CONFIG_IPIPE */