Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
   1/*
   2 * linux/kernel/irq/chip.c
   3 *
   4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
   6 *
   7 * This file contains the core interrupt handling code, for irq-chip
   8 * based architectures.
   9 *
  10 * Detailed information is available in Documentation/DocBook/genericirq
  11 */
  12
  13#include <linux/irq.h>
  14#include <linux/msi.h>
  15#include <linux/module.h>
  16#include <linux/interrupt.h>
  17#include <linux/kernel_stat.h>
  18#include <linux/irqdomain.h>
  19
  20#include <trace/events/irq.h>
  21
  22#include "internals.h"
  23
  24static irqreturn_t bad_chained_irq(int irq, void *dev_id)
  25{
  26	WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
  27	return IRQ_NONE;
  28}
  29
  30/*
  31 * Chained handlers should never call action on their IRQ. This default
  32 * action will emit warning if such thing happens.
  33 */
  34struct irqaction chained_action = {
  35	.handler = bad_chained_irq,
  36};
  37
  38/**
  39 *	irq_set_chip - set the irq chip for an irq
  40 *	@irq:	irq number
  41 *	@chip:	pointer to irq chip description structure
  42 */
  43int irq_set_chip(unsigned int irq, struct irq_chip *chip)
  44{
  45	unsigned long flags;
  46	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  47
  48	if (!desc)
  49		return -EINVAL;
  50
  51	if (!chip)
  52		chip = &no_irq_chip;
  53
  54	desc->irq_data.chip = chip;
  55	irq_put_desc_unlock(desc, flags);
  56	/*
  57	 * For !CONFIG_SPARSE_IRQ make the irq show up in
  58	 * allocated_irqs.
  59	 */
  60	irq_mark_irq(irq);
  61	return 0;
  62}
  63EXPORT_SYMBOL(irq_set_chip);
  64
  65/**
  66 *	irq_set_type - set the irq trigger type for an irq
  67 *	@irq:	irq number
  68 *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
  69 */
  70int irq_set_irq_type(unsigned int irq, unsigned int type)
  71{
  72	unsigned long flags;
  73	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  74	int ret = 0;
  75
  76	if (!desc)
  77		return -EINVAL;
  78
  79	type &= IRQ_TYPE_SENSE_MASK;
  80	ret = __irq_set_trigger(desc, type);
  81	irq_put_desc_busunlock(desc, flags);
  82	return ret;
  83}
  84EXPORT_SYMBOL(irq_set_irq_type);
  85
  86/**
  87 *	irq_set_handler_data - set irq handler data for an irq
  88 *	@irq:	Interrupt number
  89 *	@data:	Pointer to interrupt specific data
  90 *
  91 *	Set the hardware irq controller data for an irq
  92 */
  93int irq_set_handler_data(unsigned int irq, void *data)
  94{
  95	unsigned long flags;
  96	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  97
  98	if (!desc)
  99		return -EINVAL;
 100	desc->irq_common_data.handler_data = data;
 101	irq_put_desc_unlock(desc, flags);
 102	return 0;
 103}
 104EXPORT_SYMBOL(irq_set_handler_data);
 105
 106/**
 107 *	irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
 108 *	@irq_base:	Interrupt number base
 109 *	@irq_offset:	Interrupt number offset
 110 *	@entry:		Pointer to MSI descriptor data
 111 *
 112 *	Set the MSI descriptor entry for an irq at offset
 113 */
 114int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
 115			 struct msi_desc *entry)
 116{
 117	unsigned long flags;
 118	struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 119
 120	if (!desc)
 121		return -EINVAL;
 122	desc->irq_common_data.msi_desc = entry;
 123	if (entry && !irq_offset)
 124		entry->irq = irq_base;
 125	irq_put_desc_unlock(desc, flags);
 126	return 0;
 127}
 128
 129/**
 130 *	irq_set_msi_desc - set MSI descriptor data for an irq
 131 *	@irq:	Interrupt number
 132 *	@entry:	Pointer to MSI descriptor data
 133 *
 134 *	Set the MSI descriptor entry for an irq
 135 */
 136int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
 137{
 138	return irq_set_msi_desc_off(irq, 0, entry);
 139}
 140
 141/**
 142 *	irq_set_chip_data - set irq chip data for an irq
 143 *	@irq:	Interrupt number
 144 *	@data:	Pointer to chip specific data
 145 *
 146 *	Set the hardware irq chip data for an irq
 147 */
 148int irq_set_chip_data(unsigned int irq, void *data)
 149{
 150	unsigned long flags;
 151	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 152
 153	if (!desc)
 154		return -EINVAL;
 155	desc->irq_data.chip_data = data;
 156	irq_put_desc_unlock(desc, flags);
 157	return 0;
 158}
 159EXPORT_SYMBOL(irq_set_chip_data);
 160
 161struct irq_data *irq_get_irq_data(unsigned int irq)
 162{
 163	struct irq_desc *desc = irq_to_desc(irq);
 164
 165	return desc ? &desc->irq_data : NULL;
 166}
 167EXPORT_SYMBOL_GPL(irq_get_irq_data);
 168
 169static void irq_state_clr_disabled(struct irq_desc *desc)
 170{
 171	irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
 172}
 173
 174static void irq_state_set_disabled(struct irq_desc *desc)
 175{
 176	irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
 177}
 178
 179static void irq_state_clr_masked(struct irq_desc *desc)
 180{
 181	irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
 182}
 183
 184static void irq_state_set_masked(struct irq_desc *desc)
 185{
 186	irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
 187}
 188
 189int irq_startup(struct irq_desc *desc, bool resend)
 190{
 191	int ret = 0;
 192
 193	irq_state_clr_disabled(desc);
 194	desc->depth = 0;
 195
 196	irq_domain_activate_irq(&desc->irq_data);
 197	if (desc->irq_data.chip->irq_startup) {
 198		ret = desc->irq_data.chip->irq_startup(&desc->irq_data);
 199		irq_state_clr_masked(desc);
 200	} else {
 201		irq_enable(desc);
 202	}
 203	if (resend)
 204		check_irq_resend(desc);
 205	return ret;
 206}
 207
 208void irq_shutdown(struct irq_desc *desc)
 209{
 210	irq_state_set_disabled(desc);
 211	desc->depth = 1;
 212	if (desc->irq_data.chip->irq_shutdown)
 213		desc->irq_data.chip->irq_shutdown(&desc->irq_data);
 214	else if (desc->irq_data.chip->irq_disable)
 215		desc->irq_data.chip->irq_disable(&desc->irq_data);
 216	else
 217		desc->irq_data.chip->irq_mask(&desc->irq_data);
 218	irq_domain_deactivate_irq(&desc->irq_data);
 219	irq_state_set_masked(desc);
 220}
 221
 222void irq_enable(struct irq_desc *desc)
 223{
 224	irq_state_clr_disabled(desc);
 225	if (desc->irq_data.chip->irq_enable)
 226		desc->irq_data.chip->irq_enable(&desc->irq_data);
 227	else
 228		desc->irq_data.chip->irq_unmask(&desc->irq_data);
 229	irq_state_clr_masked(desc);
 230}
 231
 232/**
 233 * irq_disable - Mark interrupt disabled
 234 * @desc:	irq descriptor which should be disabled
 235 *
 236 * If the chip does not implement the irq_disable callback, we
 237 * use a lazy disable approach. That means we mark the interrupt
 238 * disabled, but leave the hardware unmasked. That's an
 239 * optimization because we avoid the hardware access for the
 240 * common case where no interrupt happens after we marked it
 241 * disabled. If an interrupt happens, then the interrupt flow
 242 * handler masks the line at the hardware level and marks it
 243 * pending.
 244 *
 245 * If the interrupt chip does not implement the irq_disable callback,
 246 * a driver can disable the lazy approach for a particular irq line by
 247 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
 248 * be used for devices which cannot disable the interrupt at the
 249 * device level under certain circumstances and have to use
 250 * disable_irq[_nosync] instead.
 251 */
 252void irq_disable(struct irq_desc *desc)
 253{
 254	irq_state_set_disabled(desc);
 255	if (desc->irq_data.chip->irq_disable) {
 256		desc->irq_data.chip->irq_disable(&desc->irq_data);
 257		irq_state_set_masked(desc);
 258	} else if (irq_settings_disable_unlazy(desc)) {
 259		mask_irq(desc);
 260	}
 261}
 262
 263void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
 264{
 265	if (desc->irq_data.chip->irq_enable)
 266		desc->irq_data.chip->irq_enable(&desc->irq_data);
 267	else
 268		desc->irq_data.chip->irq_unmask(&desc->irq_data);
 269	cpumask_set_cpu(cpu, desc->percpu_enabled);
 270}
 271
 272void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
 273{
 274	if (desc->irq_data.chip->irq_disable)
 275		desc->irq_data.chip->irq_disable(&desc->irq_data);
 276	else
 277		desc->irq_data.chip->irq_mask(&desc->irq_data);
 278	cpumask_clear_cpu(cpu, desc->percpu_enabled);
 279}
 280
 281static inline void mask_ack_irq(struct irq_desc *desc)
 282{
 283	if (desc->irq_data.chip->irq_mask_ack)
 284		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
 285	else {
 286		desc->irq_data.chip->irq_mask(&desc->irq_data);
 287		if (desc->irq_data.chip->irq_ack)
 288			desc->irq_data.chip->irq_ack(&desc->irq_data);
 289	}
 290	irq_state_set_masked(desc);
 291}
 292
 293void mask_irq(struct irq_desc *desc)
 294{
 295	if (desc->irq_data.chip->irq_mask) {
 296		desc->irq_data.chip->irq_mask(&desc->irq_data);
 297		irq_state_set_masked(desc);
 298	}
 299}
 300
 301void unmask_irq(struct irq_desc *desc)
 302{
 303	if (desc->irq_data.chip->irq_unmask) {
 304		desc->irq_data.chip->irq_unmask(&desc->irq_data);
 305		irq_state_clr_masked(desc);
 306	}
 307}
 308
 309void unmask_threaded_irq(struct irq_desc *desc)
 310{
 311	struct irq_chip *chip = desc->irq_data.chip;
 312
 313	if (chip->flags & IRQCHIP_EOI_THREADED)
 314		chip->irq_eoi(&desc->irq_data);
 315
 316	if (chip->irq_unmask) {
 317		chip->irq_unmask(&desc->irq_data);
 318		irq_state_clr_masked(desc);
 319	}
 320}
 321
 322/*
 323 *	handle_nested_irq - Handle a nested irq from a irq thread
 324 *	@irq:	the interrupt number
 325 *
 326 *	Handle interrupts which are nested into a threaded interrupt
 327 *	handler. The handler function is called inside the calling
 328 *	threads context.
 329 */
 330void handle_nested_irq(unsigned int irq)
 331{
 332	struct irq_desc *desc = irq_to_desc(irq);
 333	struct irqaction *action;
 334	irqreturn_t action_ret;
 335
 336	might_sleep();
 337
 338	raw_spin_lock_irq(&desc->lock);
 339
 340	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 341
 342	action = desc->action;
 343	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
 344		desc->istate |= IRQS_PENDING;
 345		goto out_unlock;
 346	}
 347
 348	kstat_incr_irqs_this_cpu(desc);
 349	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 350	raw_spin_unlock_irq(&desc->lock);
 351
 352	action_ret = action->thread_fn(action->irq, action->dev_id);
 353	if (!noirqdebug)
 354		note_interrupt(desc, action_ret);
 355
 356	raw_spin_lock_irq(&desc->lock);
 357	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 358
 359out_unlock:
 360	raw_spin_unlock_irq(&desc->lock);
 361}
 362EXPORT_SYMBOL_GPL(handle_nested_irq);
 363
 364static bool irq_check_poll(struct irq_desc *desc)
 365{
 366	if (!(desc->istate & IRQS_POLL_INPROGRESS))
 367		return false;
 368	return irq_wait_for_poll(desc);
 369}
 370
 371static bool irq_may_run(struct irq_desc *desc)
 372{
 373	unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
 374
 375	/*
 376	 * If the interrupt is not in progress and is not an armed
 377	 * wakeup interrupt, proceed.
 378	 */
 379	if (!irqd_has_set(&desc->irq_data, mask))
 380		return true;
 381
 382	/*
 383	 * If the interrupt is an armed wakeup source, mark it pending
 384	 * and suspended, disable it and notify the pm core about the
 385	 * event.
 386	 */
 387	if (irq_pm_check_wakeup(desc))
 388		return false;
 389
 390	/*
 391	 * Handle a potential concurrent poll on a different core.
 392	 */
 393	return irq_check_poll(desc);
 394}
 395
 396/**
 397 *	handle_simple_irq - Simple and software-decoded IRQs.
 398 *	@desc:	the interrupt description structure for this irq
 399 *
 400 *	Simple interrupts are either sent from a demultiplexing interrupt
 401 *	handler or come from hardware, where no interrupt hardware control
 402 *	is necessary.
 403 *
 404 *	Note: The caller is expected to handle the ack, clear, mask and
 405 *	unmask issues if necessary.
 406 */
 407void handle_simple_irq(struct irq_desc *desc)
 408{
 409	raw_spin_lock(&desc->lock);
 410
 411	if (!irq_may_run(desc))
 412		goto out_unlock;
 413
 414	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 415
 416	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 417		desc->istate |= IRQS_PENDING;
 418		goto out_unlock;
 419	}
 420
 421	kstat_incr_irqs_this_cpu(desc);
 422	handle_irq_event(desc);
 423
 424out_unlock:
 425	raw_spin_unlock(&desc->lock);
 426}
 427EXPORT_SYMBOL_GPL(handle_simple_irq);
 428
 429/*
 430 * Called unconditionally from handle_level_irq() and only for oneshot
 431 * interrupts from handle_fasteoi_irq()
 432 */
 433static void cond_unmask_irq(struct irq_desc *desc)
 434{
 435	/*
 436	 * We need to unmask in the following cases:
 437	 * - Standard level irq (IRQF_ONESHOT is not set)
 438	 * - Oneshot irq which did not wake the thread (caused by a
 439	 *   spurious interrupt or a primary handler handling it
 440	 *   completely).
 441	 */
 442	if (!irqd_irq_disabled(&desc->irq_data) &&
 443	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
 444		unmask_irq(desc);
 445}
 446
 447/**
 448 *	handle_level_irq - Level type irq handler
 449 *	@desc:	the interrupt description structure for this irq
 450 *
 451 *	Level type interrupts are active as long as the hardware line has
 452 *	the active level. This may require to mask the interrupt and unmask
 453 *	it after the associated handler has acknowledged the device, so the
 454 *	interrupt line is back to inactive.
 455 */
 456void handle_level_irq(struct irq_desc *desc)
 457{
 458	raw_spin_lock(&desc->lock);
 459	mask_ack_irq(desc);
 460
 461	if (!irq_may_run(desc))
 462		goto out_unlock;
 463
 464	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 465
 466	/*
 467	 * If its disabled or no action available
 468	 * keep it masked and get out of here
 469	 */
 470	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 471		desc->istate |= IRQS_PENDING;
 472		goto out_unlock;
 473	}
 474
 475	kstat_incr_irqs_this_cpu(desc);
 476	handle_irq_event(desc);
 477
 478	cond_unmask_irq(desc);
 479
 480out_unlock:
 481	raw_spin_unlock(&desc->lock);
 482}
 483EXPORT_SYMBOL_GPL(handle_level_irq);
 484
 485#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
 486static inline void preflow_handler(struct irq_desc *desc)
 487{
 488	if (desc->preflow_handler)
 489		desc->preflow_handler(&desc->irq_data);
 490}
 491#else
 492static inline void preflow_handler(struct irq_desc *desc) { }
 493#endif
 494
 495static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
 496{
 497	if (!(desc->istate & IRQS_ONESHOT)) {
 498		chip->irq_eoi(&desc->irq_data);
 499		return;
 500	}
 501	/*
 502	 * We need to unmask in the following cases:
 503	 * - Oneshot irq which did not wake the thread (caused by a
 504	 *   spurious interrupt or a primary handler handling it
 505	 *   completely).
 506	 */
 507	if (!irqd_irq_disabled(&desc->irq_data) &&
 508	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
 509		chip->irq_eoi(&desc->irq_data);
 510		unmask_irq(desc);
 511	} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
 512		chip->irq_eoi(&desc->irq_data);
 513	}
 514}
 515
 516/**
 517 *	handle_fasteoi_irq - irq handler for transparent controllers
 518 *	@desc:	the interrupt description structure for this irq
 519 *
 520 *	Only a single callback will be issued to the chip: an ->eoi()
 521 *	call when the interrupt has been serviced. This enables support
 522 *	for modern forms of interrupt handlers, which handle the flow
 523 *	details in hardware, transparently.
 524 */
 525void handle_fasteoi_irq(struct irq_desc *desc)
 526{
 527	struct irq_chip *chip = desc->irq_data.chip;
 528
 529	raw_spin_lock(&desc->lock);
 530
 531	if (!irq_may_run(desc))
 532		goto out;
 533
 534	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 535
 536	/*
 537	 * If its disabled or no action available
 538	 * then mask it and get out of here:
 539	 */
 540	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 541		desc->istate |= IRQS_PENDING;
 542		mask_irq(desc);
 543		goto out;
 544	}
 545
 546	kstat_incr_irqs_this_cpu(desc);
 547	if (desc->istate & IRQS_ONESHOT)
 548		mask_irq(desc);
 549
 550	preflow_handler(desc);
 551	handle_irq_event(desc);
 552
 553	cond_unmask_eoi_irq(desc, chip);
 554
 555	raw_spin_unlock(&desc->lock);
 556	return;
 557out:
 558	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
 559		chip->irq_eoi(&desc->irq_data);
 560	raw_spin_unlock(&desc->lock);
 561}
 562EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
 563
 564/**
 565 *	handle_edge_irq - edge type IRQ handler
 566 *	@desc:	the interrupt description structure for this irq
 567 *
 568 *	Interrupt occures on the falling and/or rising edge of a hardware
 569 *	signal. The occurrence is latched into the irq controller hardware
 570 *	and must be acked in order to be reenabled. After the ack another
 571 *	interrupt can happen on the same source even before the first one
 572 *	is handled by the associated event handler. If this happens it
 573 *	might be necessary to disable (mask) the interrupt depending on the
 574 *	controller hardware. This requires to reenable the interrupt inside
 575 *	of the loop which handles the interrupts which have arrived while
 576 *	the handler was running. If all pending interrupts are handled, the
 577 *	loop is left.
 578 */
 579void handle_edge_irq(struct irq_desc *desc)
 580{
 581	raw_spin_lock(&desc->lock);
 582
 583	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 584
 585	if (!irq_may_run(desc)) {
 586		desc->istate |= IRQS_PENDING;
 587		mask_ack_irq(desc);
 588		goto out_unlock;
 589	}
 590
 591	/*
 592	 * If its disabled or no action available then mask it and get
 593	 * out of here.
 594	 */
 595	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
 596		desc->istate |= IRQS_PENDING;
 597		mask_ack_irq(desc);
 598		goto out_unlock;
 599	}
 600
 601	kstat_incr_irqs_this_cpu(desc);
 602
 603	/* Start handling the irq */
 604	desc->irq_data.chip->irq_ack(&desc->irq_data);
 605
 606	do {
 607		if (unlikely(!desc->action)) {
 608			mask_irq(desc);
 609			goto out_unlock;
 610		}
 611
 612		/*
 613		 * When another irq arrived while we were handling
 614		 * one, we could have masked the irq.
 615		 * Renable it, if it was not disabled in meantime.
 616		 */
 617		if (unlikely(desc->istate & IRQS_PENDING)) {
 618			if (!irqd_irq_disabled(&desc->irq_data) &&
 619			    irqd_irq_masked(&desc->irq_data))
 620				unmask_irq(desc);
 621		}
 622
 623		handle_irq_event(desc);
 624
 625	} while ((desc->istate & IRQS_PENDING) &&
 626		 !irqd_irq_disabled(&desc->irq_data));
 627
 628out_unlock:
 629	raw_spin_unlock(&desc->lock);
 630}
 631EXPORT_SYMBOL(handle_edge_irq);
 632
 633#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
 634/**
 635 *	handle_edge_eoi_irq - edge eoi type IRQ handler
 636 *	@desc:	the interrupt description structure for this irq
 637 *
 638 * Similar as the above handle_edge_irq, but using eoi and w/o the
 639 * mask/unmask logic.
 640 */
 641void handle_edge_eoi_irq(struct irq_desc *desc)
 642{
 643	struct irq_chip *chip = irq_desc_get_chip(desc);
 644
 645	raw_spin_lock(&desc->lock);
 646
 647	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 648
 649	if (!irq_may_run(desc)) {
 650		desc->istate |= IRQS_PENDING;
 651		goto out_eoi;
 652	}
 653
 654	/*
 655	 * If its disabled or no action available then mask it and get
 656	 * out of here.
 657	 */
 658	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
 659		desc->istate |= IRQS_PENDING;
 660		goto out_eoi;
 661	}
 662
 663	kstat_incr_irqs_this_cpu(desc);
 664
 665	do {
 666		if (unlikely(!desc->action))
 667			goto out_eoi;
 668
 669		handle_irq_event(desc);
 670
 671	} while ((desc->istate & IRQS_PENDING) &&
 672		 !irqd_irq_disabled(&desc->irq_data));
 673
 674out_eoi:
 675	chip->irq_eoi(&desc->irq_data);
 676	raw_spin_unlock(&desc->lock);
 677}
 678#endif
 679
 680/**
 681 *	handle_percpu_irq - Per CPU local irq handler
 682 *	@desc:	the interrupt description structure for this irq
 683 *
 684 *	Per CPU interrupts on SMP machines without locking requirements
 685 */
 686void handle_percpu_irq(struct irq_desc *desc)
 687{
 688	struct irq_chip *chip = irq_desc_get_chip(desc);
 689
 690	kstat_incr_irqs_this_cpu(desc);
 691
 692	if (chip->irq_ack)
 693		chip->irq_ack(&desc->irq_data);
 694
 695	handle_irq_event_percpu(desc);
 696
 697	if (chip->irq_eoi)
 698		chip->irq_eoi(&desc->irq_data);
 699}
 700
 701/**
 702 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
 703 * @desc:	the interrupt description structure for this irq
 704 *
 705 * Per CPU interrupts on SMP machines without locking requirements. Same as
 706 * handle_percpu_irq() above but with the following extras:
 707 *
 708 * action->percpu_dev_id is a pointer to percpu variables which
 709 * contain the real device id for the cpu on which this handler is
 710 * called
 711 */
 712void handle_percpu_devid_irq(struct irq_desc *desc)
 713{
 714	struct irq_chip *chip = irq_desc_get_chip(desc);
 715	struct irqaction *action = desc->action;
 716	void *dev_id = raw_cpu_ptr(action->percpu_dev_id);
 717	unsigned int irq = irq_desc_get_irq(desc);
 718	irqreturn_t res;
 719
 720	kstat_incr_irqs_this_cpu(desc);
 721
 722	if (chip->irq_ack)
 723		chip->irq_ack(&desc->irq_data);
 724
 725	trace_irq_handler_entry(irq, action);
 726	res = action->handler(irq, dev_id);
 727	trace_irq_handler_exit(irq, action, res);
 728
 729	if (chip->irq_eoi)
 730		chip->irq_eoi(&desc->irq_data);
 731}
 732
 733void
 734__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
 735		     int is_chained, const char *name)
 736{
 737	if (!handle) {
 738		handle = handle_bad_irq;
 739	} else {
 740		struct irq_data *irq_data = &desc->irq_data;
 741#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
 742		/*
 743		 * With hierarchical domains we might run into a
 744		 * situation where the outermost chip is not yet set
 745		 * up, but the inner chips are there.  Instead of
 746		 * bailing we install the handler, but obviously we
 747		 * cannot enable/startup the interrupt at this point.
 748		 */
 749		while (irq_data) {
 750			if (irq_data->chip != &no_irq_chip)
 751				break;
 752			/*
 753			 * Bail out if the outer chip is not set up
 754			 * and the interrrupt supposed to be started
 755			 * right away.
 756			 */
 757			if (WARN_ON(is_chained))
 758				return;
 759			/* Try the parent */
 760			irq_data = irq_data->parent_data;
 761		}
 762#endif
 763		if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
 764			return;
 765	}
 766
 767	/* Uninstall? */
 768	if (handle == handle_bad_irq) {
 769		if (desc->irq_data.chip != &no_irq_chip)
 770			mask_ack_irq(desc);
 771		irq_state_set_disabled(desc);
 772		if (is_chained)
 773			desc->action = NULL;
 774		desc->depth = 1;
 775	}
 776	desc->handle_irq = handle;
 777	desc->name = name;
 778
 779	if (handle != handle_bad_irq && is_chained) {
 780		irq_settings_set_noprobe(desc);
 781		irq_settings_set_norequest(desc);
 782		irq_settings_set_nothread(desc);
 783		desc->action = &chained_action;
 784		irq_startup(desc, true);
 785	}
 786}
 787
 788void
 789__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
 790		  const char *name)
 791{
 792	unsigned long flags;
 793	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
 794
 795	if (!desc)
 796		return;
 797
 798	__irq_do_set_handler(desc, handle, is_chained, name);
 799	irq_put_desc_busunlock(desc, flags);
 800}
 801EXPORT_SYMBOL_GPL(__irq_set_handler);
 802
 803void
 804irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
 805				 void *data)
 806{
 807	unsigned long flags;
 808	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
 809
 810	if (!desc)
 811		return;
 812
 813	__irq_do_set_handler(desc, handle, 1, NULL);
 814	desc->irq_common_data.handler_data = data;
 815
 816	irq_put_desc_busunlock(desc, flags);
 817}
 818EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
 819
 820void
 821irq_set_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
 822			      irq_flow_handler_t handle, const char *name)
 823{
 824	irq_set_chip(irq, chip);
 825	__irq_set_handler(irq, handle, 0, name);
 826}
 827EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
 828
 829void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
 830{
 831	unsigned long flags;
 832	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 833
 834	if (!desc)
 835		return;
 836	irq_settings_clr_and_set(desc, clr, set);
 837
 838	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
 839		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
 840	if (irq_settings_has_no_balance_set(desc))
 841		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
 842	if (irq_settings_is_per_cpu(desc))
 843		irqd_set(&desc->irq_data, IRQD_PER_CPU);
 844	if (irq_settings_can_move_pcntxt(desc))
 845		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
 846	if (irq_settings_is_level(desc))
 847		irqd_set(&desc->irq_data, IRQD_LEVEL);
 848
 849	irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc));
 850
 851	irq_put_desc_unlock(desc, flags);
 852}
 853EXPORT_SYMBOL_GPL(irq_modify_status);
 854
 855/**
 856 *	irq_cpu_online - Invoke all irq_cpu_online functions.
 857 *
 858 *	Iterate through all irqs and invoke the chip.irq_cpu_online()
 859 *	for each.
 860 */
 861void irq_cpu_online(void)
 862{
 863	struct irq_desc *desc;
 864	struct irq_chip *chip;
 865	unsigned long flags;
 866	unsigned int irq;
 867
 868	for_each_active_irq(irq) {
 869		desc = irq_to_desc(irq);
 870		if (!desc)
 871			continue;
 872
 873		raw_spin_lock_irqsave(&desc->lock, flags);
 874
 875		chip = irq_data_get_irq_chip(&desc->irq_data);
 876		if (chip && chip->irq_cpu_online &&
 877		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
 878		     !irqd_irq_disabled(&desc->irq_data)))
 879			chip->irq_cpu_online(&desc->irq_data);
 880
 881		raw_spin_unlock_irqrestore(&desc->lock, flags);
 882	}
 883}
 884
 885/**
 886 *	irq_cpu_offline - Invoke all irq_cpu_offline functions.
 887 *
 888 *	Iterate through all irqs and invoke the chip.irq_cpu_offline()
 889 *	for each.
 890 */
 891void irq_cpu_offline(void)
 892{
 893	struct irq_desc *desc;
 894	struct irq_chip *chip;
 895	unsigned long flags;
 896	unsigned int irq;
 897
 898	for_each_active_irq(irq) {
 899		desc = irq_to_desc(irq);
 900		if (!desc)
 901			continue;
 902
 903		raw_spin_lock_irqsave(&desc->lock, flags);
 904
 905		chip = irq_data_get_irq_chip(&desc->irq_data);
 906		if (chip && chip->irq_cpu_offline &&
 907		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
 908		     !irqd_irq_disabled(&desc->irq_data)))
 909			chip->irq_cpu_offline(&desc->irq_data);
 910
 911		raw_spin_unlock_irqrestore(&desc->lock, flags);
 912	}
 913}
 914
 915#ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
 916/**
 917 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
 918 * NULL)
 919 * @data:	Pointer to interrupt specific data
 920 */
 921void irq_chip_enable_parent(struct irq_data *data)
 922{
 923	data = data->parent_data;
 924	if (data->chip->irq_enable)
 925		data->chip->irq_enable(data);
 926	else
 927		data->chip->irq_unmask(data);
 928}
 929
 930/**
 931 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
 932 * NULL)
 933 * @data:	Pointer to interrupt specific data
 934 */
 935void irq_chip_disable_parent(struct irq_data *data)
 936{
 937	data = data->parent_data;
 938	if (data->chip->irq_disable)
 939		data->chip->irq_disable(data);
 940	else
 941		data->chip->irq_mask(data);
 942}
 943
 944/**
 945 * irq_chip_ack_parent - Acknowledge the parent interrupt
 946 * @data:	Pointer to interrupt specific data
 947 */
 948void irq_chip_ack_parent(struct irq_data *data)
 949{
 950	data = data->parent_data;
 951	data->chip->irq_ack(data);
 952}
 953EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
 954
 955/**
 956 * irq_chip_mask_parent - Mask the parent interrupt
 957 * @data:	Pointer to interrupt specific data
 958 */
 959void irq_chip_mask_parent(struct irq_data *data)
 960{
 961	data = data->parent_data;
 962	data->chip->irq_mask(data);
 963}
 964EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
 965
 966/**
 967 * irq_chip_unmask_parent - Unmask the parent interrupt
 968 * @data:	Pointer to interrupt specific data
 969 */
 970void irq_chip_unmask_parent(struct irq_data *data)
 971{
 972	data = data->parent_data;
 973	data->chip->irq_unmask(data);
 974}
 975EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
 976
 977/**
 978 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
 979 * @data:	Pointer to interrupt specific data
 980 */
 981void irq_chip_eoi_parent(struct irq_data *data)
 982{
 983	data = data->parent_data;
 984	data->chip->irq_eoi(data);
 985}
 986EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
 987
 988/**
 989 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
 990 * @data:	Pointer to interrupt specific data
 991 * @dest:	The affinity mask to set
 992 * @force:	Flag to enforce setting (disable online checks)
 993 *
 994 * Conditinal, as the underlying parent chip might not implement it.
 995 */
 996int irq_chip_set_affinity_parent(struct irq_data *data,
 997				 const struct cpumask *dest, bool force)
 998{
 999	data = data->parent_data;
1000	if (data->chip->irq_set_affinity)
1001		return data->chip->irq_set_affinity(data, dest, force);
1002
1003	return -ENOSYS;
1004}
1005
1006/**
1007 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1008 * @data:	Pointer to interrupt specific data
1009 * @type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1010 *
1011 * Conditional, as the underlying parent chip might not implement it.
1012 */
1013int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1014{
1015	data = data->parent_data;
1016
1017	if (data->chip->irq_set_type)
1018		return data->chip->irq_set_type(data, type);
1019
1020	return -ENOSYS;
1021}
1022EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1023
1024/**
1025 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1026 * @data:	Pointer to interrupt specific data
1027 *
1028 * Iterate through the domain hierarchy of the interrupt and check
1029 * whether a hw retrigger function exists. If yes, invoke it.
1030 */
1031int irq_chip_retrigger_hierarchy(struct irq_data *data)
1032{
1033	for (data = data->parent_data; data; data = data->parent_data)
1034		if (data->chip && data->chip->irq_retrigger)
1035			return data->chip->irq_retrigger(data);
1036
1037	return 0;
1038}
1039
1040/**
1041 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1042 * @data:	Pointer to interrupt specific data
1043 * @vcpu_info:	The vcpu affinity information
1044 */
1045int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1046{
1047	data = data->parent_data;
1048	if (data->chip->irq_set_vcpu_affinity)
1049		return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1050
1051	return -ENOSYS;
1052}
1053
1054/**
1055 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1056 * @data:	Pointer to interrupt specific data
1057 * @on:		Whether to set or reset the wake-up capability of this irq
1058 *
1059 * Conditional, as the underlying parent chip might not implement it.
1060 */
1061int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1062{
1063	data = data->parent_data;
1064	if (data->chip->irq_set_wake)
1065		return data->chip->irq_set_wake(data, on);
1066
1067	return -ENOSYS;
1068}
1069#endif
1070
1071/**
1072 * irq_chip_compose_msi_msg - Componse msi message for a irq chip
1073 * @data:	Pointer to interrupt specific data
1074 * @msg:	Pointer to the MSI message
1075 *
1076 * For hierarchical domains we find the first chip in the hierarchy
1077 * which implements the irq_compose_msi_msg callback. For non
1078 * hierarchical we use the top level chip.
1079 */
1080int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1081{
1082	struct irq_data *pos = NULL;
1083
1084#ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
1085	for (; data; data = data->parent_data)
1086#endif
1087		if (data->chip && data->chip->irq_compose_msi_msg)
1088			pos = data;
1089	if (!pos)
1090		return -ENOSYS;
1091
1092	pos->chip->irq_compose_msi_msg(pos, msg);
1093
1094	return 0;
1095}
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   4 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
   5 *
   6 * This file contains the core interrupt handling code, for irq-chip based
   7 * architectures. Detailed information is available in
   8 * Documentation/core-api/genericirq.rst
   9 */
  10
  11#include <linux/irq.h>
  12#include <linux/msi.h>
  13#include <linux/module.h>
  14#include <linux/interrupt.h>
  15#include <linux/kernel_stat.h>
  16#include <linux/irqdomain.h>
  17
  18#include <trace/events/irq.h>
  19
  20#include "internals.h"
  21
  22static irqreturn_t bad_chained_irq(int irq, void *dev_id)
  23{
  24	WARN_ONCE(1, "Chained irq %d should not call an action\n", irq);
  25	return IRQ_NONE;
  26}
  27
  28/*
  29 * Chained handlers should never call action on their IRQ. This default
  30 * action will emit warning if such thing happens.
  31 */
  32struct irqaction chained_action = {
  33	.handler = bad_chained_irq,
  34};
  35
  36/**
  37 *	irq_set_chip - set the irq chip for an irq
  38 *	@irq:	irq number
  39 *	@chip:	pointer to irq chip description structure
  40 */
  41int irq_set_chip(unsigned int irq, const struct irq_chip *chip)
  42{
  43	unsigned long flags;
  44	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  45
  46	if (!desc)
  47		return -EINVAL;
  48
  49	desc->irq_data.chip = (struct irq_chip *)(chip ?: &no_irq_chip);
  50	irq_put_desc_unlock(desc, flags);
  51	/*
  52	 * For !CONFIG_SPARSE_IRQ make the irq show up in
  53	 * allocated_irqs.
  54	 */
  55	irq_mark_irq(irq);
  56	return 0;
  57}
  58EXPORT_SYMBOL(irq_set_chip);
  59
  60/**
  61 *	irq_set_irq_type - set the irq trigger type for an irq
  62 *	@irq:	irq number
  63 *	@type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
  64 */
  65int irq_set_irq_type(unsigned int irq, unsigned int type)
  66{
  67	unsigned long flags;
  68	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
  69	int ret = 0;
  70
  71	if (!desc)
  72		return -EINVAL;
  73
  74	ret = __irq_set_trigger(desc, type);
  75	irq_put_desc_busunlock(desc, flags);
  76	return ret;
  77}
  78EXPORT_SYMBOL(irq_set_irq_type);
  79
  80/**
  81 *	irq_set_handler_data - set irq handler data for an irq
  82 *	@irq:	Interrupt number
  83 *	@data:	Pointer to interrupt specific data
  84 *
  85 *	Set the hardware irq controller data for an irq
  86 */
  87int irq_set_handler_data(unsigned int irq, void *data)
  88{
  89	unsigned long flags;
  90	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
  91
  92	if (!desc)
  93		return -EINVAL;
  94	desc->irq_common_data.handler_data = data;
  95	irq_put_desc_unlock(desc, flags);
  96	return 0;
  97}
  98EXPORT_SYMBOL(irq_set_handler_data);
  99
 100/**
 101 *	irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
 102 *	@irq_base:	Interrupt number base
 103 *	@irq_offset:	Interrupt number offset
 104 *	@entry:		Pointer to MSI descriptor data
 105 *
 106 *	Set the MSI descriptor entry for an irq at offset
 107 */
 108int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
 109			 struct msi_desc *entry)
 110{
 111	unsigned long flags;
 112	struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 113
 114	if (!desc)
 115		return -EINVAL;
 116	desc->irq_common_data.msi_desc = entry;
 117	if (entry && !irq_offset)
 118		entry->irq = irq_base;
 119	irq_put_desc_unlock(desc, flags);
 120	return 0;
 121}
 122
 123/**
 124 *	irq_set_msi_desc - set MSI descriptor data for an irq
 125 *	@irq:	Interrupt number
 126 *	@entry:	Pointer to MSI descriptor data
 127 *
 128 *	Set the MSI descriptor entry for an irq
 129 */
 130int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry)
 131{
 132	return irq_set_msi_desc_off(irq, 0, entry);
 133}
 134
 135/**
 136 *	irq_set_chip_data - set irq chip data for an irq
 137 *	@irq:	Interrupt number
 138 *	@data:	Pointer to chip specific data
 139 *
 140 *	Set the hardware irq chip data for an irq
 141 */
 142int irq_set_chip_data(unsigned int irq, void *data)
 143{
 144	unsigned long flags;
 145	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 146
 147	if (!desc)
 148		return -EINVAL;
 149	desc->irq_data.chip_data = data;
 150	irq_put_desc_unlock(desc, flags);
 151	return 0;
 152}
 153EXPORT_SYMBOL(irq_set_chip_data);
 154
 155struct irq_data *irq_get_irq_data(unsigned int irq)
 156{
 157	struct irq_desc *desc = irq_to_desc(irq);
 158
 159	return desc ? &desc->irq_data : NULL;
 160}
 161EXPORT_SYMBOL_GPL(irq_get_irq_data);
 162
 163static void irq_state_clr_disabled(struct irq_desc *desc)
 164{
 165	irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED);
 166}
 167
 168static void irq_state_clr_masked(struct irq_desc *desc)
 169{
 170	irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED);
 171}
 172
 173static void irq_state_clr_started(struct irq_desc *desc)
 174{
 175	irqd_clear(&desc->irq_data, IRQD_IRQ_STARTED);
 176}
 177
 178static void irq_state_set_started(struct irq_desc *desc)
 179{
 180	irqd_set(&desc->irq_data, IRQD_IRQ_STARTED);
 181}
 182
 183enum {
 184	IRQ_STARTUP_NORMAL,
 185	IRQ_STARTUP_MANAGED,
 186	IRQ_STARTUP_ABORT,
 187};
 188
 189#ifdef CONFIG_SMP
 190static int
 191__irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
 192		      bool force)
 193{
 194	struct irq_data *d = irq_desc_get_irq_data(desc);
 195
 196	if (!irqd_affinity_is_managed(d))
 197		return IRQ_STARTUP_NORMAL;
 198
 199	irqd_clr_managed_shutdown(d);
 200
 201	if (cpumask_any_and(aff, cpu_online_mask) >= nr_cpu_ids) {
 202		/*
 203		 * Catch code which fiddles with enable_irq() on a managed
 204		 * and potentially shutdown IRQ. Chained interrupt
 205		 * installment or irq auto probing should not happen on
 206		 * managed irqs either.
 207		 */
 208		if (WARN_ON_ONCE(force))
 209			return IRQ_STARTUP_ABORT;
 210		/*
 211		 * The interrupt was requested, but there is no online CPU
 212		 * in it's affinity mask. Put it into managed shutdown
 213		 * state and let the cpu hotplug mechanism start it up once
 214		 * a CPU in the mask becomes available.
 215		 */
 216		return IRQ_STARTUP_ABORT;
 217	}
 218	/*
 219	 * Managed interrupts have reserved resources, so this should not
 220	 * happen.
 221	 */
 222	if (WARN_ON(irq_domain_activate_irq(d, false)))
 223		return IRQ_STARTUP_ABORT;
 224	return IRQ_STARTUP_MANAGED;
 225}
 226#else
 227static __always_inline int
 228__irq_startup_managed(struct irq_desc *desc, const struct cpumask *aff,
 229		      bool force)
 230{
 231	return IRQ_STARTUP_NORMAL;
 232}
 233#endif
 234
 235static int __irq_startup(struct irq_desc *desc)
 236{
 237	struct irq_data *d = irq_desc_get_irq_data(desc);
 238	int ret = 0;
 239
 240	/* Warn if this interrupt is not activated but try nevertheless */
 241	WARN_ON_ONCE(!irqd_is_activated(d));
 242
 243	if (d->chip->irq_startup) {
 244		ret = d->chip->irq_startup(d);
 245		irq_state_clr_disabled(desc);
 246		irq_state_clr_masked(desc);
 247	} else {
 248		irq_enable(desc);
 249	}
 250	irq_state_set_started(desc);
 251	return ret;
 252}
 253
 254int irq_startup(struct irq_desc *desc, bool resend, bool force)
 255{
 256	struct irq_data *d = irq_desc_get_irq_data(desc);
 257	const struct cpumask *aff = irq_data_get_affinity_mask(d);
 258	int ret = 0;
 259
 260	desc->depth = 0;
 261
 262	if (irqd_is_started(d)) {
 263		irq_enable(desc);
 264	} else {
 265		switch (__irq_startup_managed(desc, aff, force)) {
 266		case IRQ_STARTUP_NORMAL:
 267			if (d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP)
 268				irq_setup_affinity(desc);
 269			ret = __irq_startup(desc);
 270			if (!(d->chip->flags & IRQCHIP_AFFINITY_PRE_STARTUP))
 271				irq_setup_affinity(desc);
 272			break;
 273		case IRQ_STARTUP_MANAGED:
 274			irq_do_set_affinity(d, aff, false);
 275			ret = __irq_startup(desc);
 276			break;
 277		case IRQ_STARTUP_ABORT:
 278			irqd_set_managed_shutdown(d);
 279			return 0;
 280		}
 281	}
 282	if (resend)
 283		check_irq_resend(desc, false);
 284
 285	return ret;
 286}
 287
 288int irq_activate(struct irq_desc *desc)
 289{
 290	struct irq_data *d = irq_desc_get_irq_data(desc);
 291
 292	if (!irqd_affinity_is_managed(d))
 293		return irq_domain_activate_irq(d, false);
 294	return 0;
 295}
 296
 297int irq_activate_and_startup(struct irq_desc *desc, bool resend)
 298{
 299	if (WARN_ON(irq_activate(desc)))
 300		return 0;
 301	return irq_startup(desc, resend, IRQ_START_FORCE);
 302}
 303
 304static void __irq_disable(struct irq_desc *desc, bool mask);
 305
 306void irq_shutdown(struct irq_desc *desc)
 307{
 308	if (irqd_is_started(&desc->irq_data)) {
 309		desc->depth = 1;
 310		if (desc->irq_data.chip->irq_shutdown) {
 311			desc->irq_data.chip->irq_shutdown(&desc->irq_data);
 312			irq_state_set_disabled(desc);
 313			irq_state_set_masked(desc);
 314		} else {
 315			__irq_disable(desc, true);
 316		}
 317		irq_state_clr_started(desc);
 318	}
 319}
 320
 321
 322void irq_shutdown_and_deactivate(struct irq_desc *desc)
 323{
 324	irq_shutdown(desc);
 325	/*
 326	 * This must be called even if the interrupt was never started up,
 327	 * because the activation can happen before the interrupt is
 328	 * available for request/startup. It has it's own state tracking so
 329	 * it's safe to call it unconditionally.
 330	 */
 331	irq_domain_deactivate_irq(&desc->irq_data);
 332}
 333
 334void irq_enable(struct irq_desc *desc)
 335{
 336	if (!irqd_irq_disabled(&desc->irq_data)) {
 337		unmask_irq(desc);
 338	} else {
 339		irq_state_clr_disabled(desc);
 340		if (desc->irq_data.chip->irq_enable) {
 341			desc->irq_data.chip->irq_enable(&desc->irq_data);
 342			irq_state_clr_masked(desc);
 343		} else {
 344			unmask_irq(desc);
 345		}
 346	}
 347}
 348
 349static void __irq_disable(struct irq_desc *desc, bool mask)
 350{
 351	if (irqd_irq_disabled(&desc->irq_data)) {
 352		if (mask)
 353			mask_irq(desc);
 354	} else {
 355		irq_state_set_disabled(desc);
 356		if (desc->irq_data.chip->irq_disable) {
 357			desc->irq_data.chip->irq_disable(&desc->irq_data);
 358			irq_state_set_masked(desc);
 359		} else if (mask) {
 360			mask_irq(desc);
 361		}
 362	}
 363}
 364
 365/**
 366 * irq_disable - Mark interrupt disabled
 367 * @desc:	irq descriptor which should be disabled
 368 *
 369 * If the chip does not implement the irq_disable callback, we
 370 * use a lazy disable approach. That means we mark the interrupt
 371 * disabled, but leave the hardware unmasked. That's an
 372 * optimization because we avoid the hardware access for the
 373 * common case where no interrupt happens after we marked it
 374 * disabled. If an interrupt happens, then the interrupt flow
 375 * handler masks the line at the hardware level and marks it
 376 * pending.
 377 *
 378 * If the interrupt chip does not implement the irq_disable callback,
 379 * a driver can disable the lazy approach for a particular irq line by
 380 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
 381 * be used for devices which cannot disable the interrupt at the
 382 * device level under certain circumstances and have to use
 383 * disable_irq[_nosync] instead.
 384 */
 385void irq_disable(struct irq_desc *desc)
 386{
 387	__irq_disable(desc, irq_settings_disable_unlazy(desc));
 388}
 389
 390void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
 391{
 392	if (desc->irq_data.chip->irq_enable)
 393		desc->irq_data.chip->irq_enable(&desc->irq_data);
 394	else
 395		desc->irq_data.chip->irq_unmask(&desc->irq_data);
 396	cpumask_set_cpu(cpu, desc->percpu_enabled);
 397}
 398
 399void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu)
 400{
 401	if (desc->irq_data.chip->irq_disable)
 402		desc->irq_data.chip->irq_disable(&desc->irq_data);
 403	else
 404		desc->irq_data.chip->irq_mask(&desc->irq_data);
 405	cpumask_clear_cpu(cpu, desc->percpu_enabled);
 406}
 407
 408static inline void mask_ack_irq(struct irq_desc *desc)
 409{
 410	if (desc->irq_data.chip->irq_mask_ack) {
 411		desc->irq_data.chip->irq_mask_ack(&desc->irq_data);
 412		irq_state_set_masked(desc);
 413	} else {
 414		mask_irq(desc);
 415		if (desc->irq_data.chip->irq_ack)
 416			desc->irq_data.chip->irq_ack(&desc->irq_data);
 417	}
 418}
 419
 420void mask_irq(struct irq_desc *desc)
 421{
 422	if (irqd_irq_masked(&desc->irq_data))
 423		return;
 424
 425	if (desc->irq_data.chip->irq_mask) {
 426		desc->irq_data.chip->irq_mask(&desc->irq_data);
 427		irq_state_set_masked(desc);
 428	}
 429}
 430
 431void unmask_irq(struct irq_desc *desc)
 432{
 433	if (!irqd_irq_masked(&desc->irq_data))
 434		return;
 435
 436	if (desc->irq_data.chip->irq_unmask) {
 437		desc->irq_data.chip->irq_unmask(&desc->irq_data);
 438		irq_state_clr_masked(desc);
 439	}
 440}
 441
 442void unmask_threaded_irq(struct irq_desc *desc)
 443{
 444	struct irq_chip *chip = desc->irq_data.chip;
 445
 446	if (chip->flags & IRQCHIP_EOI_THREADED)
 447		chip->irq_eoi(&desc->irq_data);
 448
 449	unmask_irq(desc);
 450}
 451
 452/*
 453 *	handle_nested_irq - Handle a nested irq from a irq thread
 454 *	@irq:	the interrupt number
 455 *
 456 *	Handle interrupts which are nested into a threaded interrupt
 457 *	handler. The handler function is called inside the calling
 458 *	threads context.
 459 */
 460void handle_nested_irq(unsigned int irq)
 461{
 462	struct irq_desc *desc = irq_to_desc(irq);
 463	struct irqaction *action;
 464	irqreturn_t action_ret;
 465
 466	might_sleep();
 467
 468	raw_spin_lock_irq(&desc->lock);
 469
 470	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 471
 472	action = desc->action;
 473	if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
 474		desc->istate |= IRQS_PENDING;
 475		goto out_unlock;
 476	}
 477
 478	kstat_incr_irqs_this_cpu(desc);
 479	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 480	raw_spin_unlock_irq(&desc->lock);
 481
 482	action_ret = IRQ_NONE;
 483	for_each_action_of_desc(desc, action)
 484		action_ret |= action->thread_fn(action->irq, action->dev_id);
 485
 486	if (!irq_settings_no_debug(desc))
 487		note_interrupt(desc, action_ret);
 488
 489	raw_spin_lock_irq(&desc->lock);
 490	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 491
 492out_unlock:
 493	raw_spin_unlock_irq(&desc->lock);
 494}
 495EXPORT_SYMBOL_GPL(handle_nested_irq);
 496
 497static bool irq_check_poll(struct irq_desc *desc)
 498{
 499	if (!(desc->istate & IRQS_POLL_INPROGRESS))
 500		return false;
 501	return irq_wait_for_poll(desc);
 502}
 503
 504static bool irq_may_run(struct irq_desc *desc)
 505{
 506	unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
 507
 508	/*
 509	 * If the interrupt is not in progress and is not an armed
 510	 * wakeup interrupt, proceed.
 511	 */
 512	if (!irqd_has_set(&desc->irq_data, mask))
 513		return true;
 514
 515	/*
 516	 * If the interrupt is an armed wakeup source, mark it pending
 517	 * and suspended, disable it and notify the pm core about the
 518	 * event.
 519	 */
 520	if (irq_pm_check_wakeup(desc))
 521		return false;
 522
 523	/*
 524	 * Handle a potential concurrent poll on a different core.
 525	 */
 526	return irq_check_poll(desc);
 527}
 528
 529/**
 530 *	handle_simple_irq - Simple and software-decoded IRQs.
 531 *	@desc:	the interrupt description structure for this irq
 532 *
 533 *	Simple interrupts are either sent from a demultiplexing interrupt
 534 *	handler or come from hardware, where no interrupt hardware control
 535 *	is necessary.
 536 *
 537 *	Note: The caller is expected to handle the ack, clear, mask and
 538 *	unmask issues if necessary.
 539 */
 540void handle_simple_irq(struct irq_desc *desc)
 541{
 542	raw_spin_lock(&desc->lock);
 543
 544	if (!irq_may_run(desc))
 545		goto out_unlock;
 546
 547	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 548
 549	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 550		desc->istate |= IRQS_PENDING;
 551		goto out_unlock;
 552	}
 553
 554	kstat_incr_irqs_this_cpu(desc);
 555	handle_irq_event(desc);
 556
 557out_unlock:
 558	raw_spin_unlock(&desc->lock);
 559}
 560EXPORT_SYMBOL_GPL(handle_simple_irq);
 561
 562/**
 563 *	handle_untracked_irq - Simple and software-decoded IRQs.
 564 *	@desc:	the interrupt description structure for this irq
 565 *
 566 *	Untracked interrupts are sent from a demultiplexing interrupt
 567 *	handler when the demultiplexer does not know which device it its
 568 *	multiplexed irq domain generated the interrupt. IRQ's handled
 569 *	through here are not subjected to stats tracking, randomness, or
 570 *	spurious interrupt detection.
 571 *
 572 *	Note: Like handle_simple_irq, the caller is expected to handle
 573 *	the ack, clear, mask and unmask issues if necessary.
 574 */
 575void handle_untracked_irq(struct irq_desc *desc)
 576{
 577	raw_spin_lock(&desc->lock);
 578
 579	if (!irq_may_run(desc))
 580		goto out_unlock;
 581
 582	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 583
 584	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 585		desc->istate |= IRQS_PENDING;
 586		goto out_unlock;
 587	}
 588
 589	desc->istate &= ~IRQS_PENDING;
 590	irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 591	raw_spin_unlock(&desc->lock);
 592
 593	__handle_irq_event_percpu(desc);
 594
 595	raw_spin_lock(&desc->lock);
 596	irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
 597
 598out_unlock:
 599	raw_spin_unlock(&desc->lock);
 600}
 601EXPORT_SYMBOL_GPL(handle_untracked_irq);
 602
 603/*
 604 * Called unconditionally from handle_level_irq() and only for oneshot
 605 * interrupts from handle_fasteoi_irq()
 606 */
 607static void cond_unmask_irq(struct irq_desc *desc)
 608{
 609	/*
 610	 * We need to unmask in the following cases:
 611	 * - Standard level irq (IRQF_ONESHOT is not set)
 612	 * - Oneshot irq which did not wake the thread (caused by a
 613	 *   spurious interrupt or a primary handler handling it
 614	 *   completely).
 615	 */
 616	if (!irqd_irq_disabled(&desc->irq_data) &&
 617	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot)
 618		unmask_irq(desc);
 619}
 620
 621/**
 622 *	handle_level_irq - Level type irq handler
 623 *	@desc:	the interrupt description structure for this irq
 624 *
 625 *	Level type interrupts are active as long as the hardware line has
 626 *	the active level. This may require to mask the interrupt and unmask
 627 *	it after the associated handler has acknowledged the device, so the
 628 *	interrupt line is back to inactive.
 629 */
 630void handle_level_irq(struct irq_desc *desc)
 631{
 632	raw_spin_lock(&desc->lock);
 633	mask_ack_irq(desc);
 634
 635	if (!irq_may_run(desc))
 636		goto out_unlock;
 637
 638	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 639
 640	/*
 641	 * If its disabled or no action available
 642	 * keep it masked and get out of here
 643	 */
 644	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 645		desc->istate |= IRQS_PENDING;
 646		goto out_unlock;
 647	}
 648
 649	kstat_incr_irqs_this_cpu(desc);
 650	handle_irq_event(desc);
 651
 652	cond_unmask_irq(desc);
 653
 654out_unlock:
 655	raw_spin_unlock(&desc->lock);
 656}
 657EXPORT_SYMBOL_GPL(handle_level_irq);
 658
 659static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
 660{
 661	if (!(desc->istate & IRQS_ONESHOT)) {
 662		chip->irq_eoi(&desc->irq_data);
 663		return;
 664	}
 665	/*
 666	 * We need to unmask in the following cases:
 667	 * - Oneshot irq which did not wake the thread (caused by a
 668	 *   spurious interrupt or a primary handler handling it
 669	 *   completely).
 670	 */
 671	if (!irqd_irq_disabled(&desc->irq_data) &&
 672	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
 673		chip->irq_eoi(&desc->irq_data);
 674		unmask_irq(desc);
 675	} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
 676		chip->irq_eoi(&desc->irq_data);
 677	}
 678}
 679
 680/**
 681 *	handle_fasteoi_irq - irq handler for transparent controllers
 682 *	@desc:	the interrupt description structure for this irq
 683 *
 684 *	Only a single callback will be issued to the chip: an ->eoi()
 685 *	call when the interrupt has been serviced. This enables support
 686 *	for modern forms of interrupt handlers, which handle the flow
 687 *	details in hardware, transparently.
 688 */
 689void handle_fasteoi_irq(struct irq_desc *desc)
 690{
 691	struct irq_chip *chip = desc->irq_data.chip;
 692
 693	raw_spin_lock(&desc->lock);
 694
 695	if (!irq_may_run(desc))
 696		goto out;
 697
 698	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 699
 700	/*
 701	 * If its disabled or no action available
 702	 * then mask it and get out of here:
 703	 */
 704	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
 705		desc->istate |= IRQS_PENDING;
 706		mask_irq(desc);
 707		goto out;
 708	}
 709
 710	kstat_incr_irqs_this_cpu(desc);
 711	if (desc->istate & IRQS_ONESHOT)
 712		mask_irq(desc);
 713
 714	handle_irq_event(desc);
 715
 716	cond_unmask_eoi_irq(desc, chip);
 717
 718	raw_spin_unlock(&desc->lock);
 719	return;
 720out:
 721	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
 722		chip->irq_eoi(&desc->irq_data);
 723	raw_spin_unlock(&desc->lock);
 724}
 725EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
 726
 727/**
 728 *	handle_fasteoi_nmi - irq handler for NMI interrupt lines
 729 *	@desc:	the interrupt description structure for this irq
 730 *
 731 *	A simple NMI-safe handler, considering the restrictions
 732 *	from request_nmi.
 733 *
 734 *	Only a single callback will be issued to the chip: an ->eoi()
 735 *	call when the interrupt has been serviced. This enables support
 736 *	for modern forms of interrupt handlers, which handle the flow
 737 *	details in hardware, transparently.
 738 */
 739void handle_fasteoi_nmi(struct irq_desc *desc)
 740{
 741	struct irq_chip *chip = irq_desc_get_chip(desc);
 742	struct irqaction *action = desc->action;
 743	unsigned int irq = irq_desc_get_irq(desc);
 744	irqreturn_t res;
 745
 746	__kstat_incr_irqs_this_cpu(desc);
 747
 748	trace_irq_handler_entry(irq, action);
 749	/*
 750	 * NMIs cannot be shared, there is only one action.
 751	 */
 752	res = action->handler(irq, action->dev_id);
 753	trace_irq_handler_exit(irq, action, res);
 754
 755	if (chip->irq_eoi)
 756		chip->irq_eoi(&desc->irq_data);
 757}
 758EXPORT_SYMBOL_GPL(handle_fasteoi_nmi);
 759
 760/**
 761 *	handle_edge_irq - edge type IRQ handler
 762 *	@desc:	the interrupt description structure for this irq
 763 *
 764 *	Interrupt occurs on the falling and/or rising edge of a hardware
 765 *	signal. The occurrence is latched into the irq controller hardware
 766 *	and must be acked in order to be reenabled. After the ack another
 767 *	interrupt can happen on the same source even before the first one
 768 *	is handled by the associated event handler. If this happens it
 769 *	might be necessary to disable (mask) the interrupt depending on the
 770 *	controller hardware. This requires to reenable the interrupt inside
 771 *	of the loop which handles the interrupts which have arrived while
 772 *	the handler was running. If all pending interrupts are handled, the
 773 *	loop is left.
 774 */
 775void handle_edge_irq(struct irq_desc *desc)
 776{
 777	raw_spin_lock(&desc->lock);
 778
 779	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 780
 781	if (!irq_may_run(desc)) {
 782		desc->istate |= IRQS_PENDING;
 783		mask_ack_irq(desc);
 784		goto out_unlock;
 785	}
 786
 787	/*
 788	 * If its disabled or no action available then mask it and get
 789	 * out of here.
 790	 */
 791	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
 792		desc->istate |= IRQS_PENDING;
 793		mask_ack_irq(desc);
 794		goto out_unlock;
 795	}
 796
 797	kstat_incr_irqs_this_cpu(desc);
 798
 799	/* Start handling the irq */
 800	desc->irq_data.chip->irq_ack(&desc->irq_data);
 801
 802	do {
 803		if (unlikely(!desc->action)) {
 804			mask_irq(desc);
 805			goto out_unlock;
 806		}
 807
 808		/*
 809		 * When another irq arrived while we were handling
 810		 * one, we could have masked the irq.
 811		 * Reenable it, if it was not disabled in meantime.
 812		 */
 813		if (unlikely(desc->istate & IRQS_PENDING)) {
 814			if (!irqd_irq_disabled(&desc->irq_data) &&
 815			    irqd_irq_masked(&desc->irq_data))
 816				unmask_irq(desc);
 817		}
 818
 819		handle_irq_event(desc);
 820
 821	} while ((desc->istate & IRQS_PENDING) &&
 822		 !irqd_irq_disabled(&desc->irq_data));
 823
 824out_unlock:
 825	raw_spin_unlock(&desc->lock);
 826}
 827EXPORT_SYMBOL(handle_edge_irq);
 828
 829#ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
 830/**
 831 *	handle_edge_eoi_irq - edge eoi type IRQ handler
 832 *	@desc:	the interrupt description structure for this irq
 833 *
 834 * Similar as the above handle_edge_irq, but using eoi and w/o the
 835 * mask/unmask logic.
 836 */
 837void handle_edge_eoi_irq(struct irq_desc *desc)
 838{
 839	struct irq_chip *chip = irq_desc_get_chip(desc);
 840
 841	raw_spin_lock(&desc->lock);
 842
 843	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 844
 845	if (!irq_may_run(desc)) {
 846		desc->istate |= IRQS_PENDING;
 847		goto out_eoi;
 848	}
 849
 850	/*
 851	 * If its disabled or no action available then mask it and get
 852	 * out of here.
 853	 */
 854	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
 855		desc->istate |= IRQS_PENDING;
 856		goto out_eoi;
 857	}
 858
 859	kstat_incr_irqs_this_cpu(desc);
 860
 861	do {
 862		if (unlikely(!desc->action))
 863			goto out_eoi;
 864
 865		handle_irq_event(desc);
 866
 867	} while ((desc->istate & IRQS_PENDING) &&
 868		 !irqd_irq_disabled(&desc->irq_data));
 869
 870out_eoi:
 871	chip->irq_eoi(&desc->irq_data);
 872	raw_spin_unlock(&desc->lock);
 873}
 874#endif
 875
 876/**
 877 *	handle_percpu_irq - Per CPU local irq handler
 878 *	@desc:	the interrupt description structure for this irq
 879 *
 880 *	Per CPU interrupts on SMP machines without locking requirements
 881 */
 882void handle_percpu_irq(struct irq_desc *desc)
 883{
 884	struct irq_chip *chip = irq_desc_get_chip(desc);
 885
 886	/*
 887	 * PER CPU interrupts are not serialized. Do not touch
 888	 * desc->tot_count.
 889	 */
 890	__kstat_incr_irqs_this_cpu(desc);
 891
 892	if (chip->irq_ack)
 893		chip->irq_ack(&desc->irq_data);
 894
 895	handle_irq_event_percpu(desc);
 896
 897	if (chip->irq_eoi)
 898		chip->irq_eoi(&desc->irq_data);
 899}
 900
 901/**
 902 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
 903 * @desc:	the interrupt description structure for this irq
 904 *
 905 * Per CPU interrupts on SMP machines without locking requirements. Same as
 906 * handle_percpu_irq() above but with the following extras:
 907 *
 908 * action->percpu_dev_id is a pointer to percpu variables which
 909 * contain the real device id for the cpu on which this handler is
 910 * called
 911 */
 912void handle_percpu_devid_irq(struct irq_desc *desc)
 913{
 914	struct irq_chip *chip = irq_desc_get_chip(desc);
 915	struct irqaction *action = desc->action;
 916	unsigned int irq = irq_desc_get_irq(desc);
 917	irqreturn_t res;
 918
 919	/*
 920	 * PER CPU interrupts are not serialized. Do not touch
 921	 * desc->tot_count.
 922	 */
 923	__kstat_incr_irqs_this_cpu(desc);
 924
 925	if (chip->irq_ack)
 926		chip->irq_ack(&desc->irq_data);
 927
 928	if (likely(action)) {
 929		trace_irq_handler_entry(irq, action);
 930		res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
 931		trace_irq_handler_exit(irq, action, res);
 932	} else {
 933		unsigned int cpu = smp_processor_id();
 934		bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
 935
 936		if (enabled)
 937			irq_percpu_disable(desc, cpu);
 938
 939		pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
 940			    enabled ? " and unmasked" : "", irq, cpu);
 941	}
 942
 943	if (chip->irq_eoi)
 944		chip->irq_eoi(&desc->irq_data);
 945}
 946
 947/**
 948 * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
 949 *				     dev ids
 950 * @desc:	the interrupt description structure for this irq
 951 *
 952 * Similar to handle_fasteoi_nmi, but handling the dev_id cookie
 953 * as a percpu pointer.
 954 */
 955void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
 956{
 957	struct irq_chip *chip = irq_desc_get_chip(desc);
 958	struct irqaction *action = desc->action;
 959	unsigned int irq = irq_desc_get_irq(desc);
 960	irqreturn_t res;
 961
 962	__kstat_incr_irqs_this_cpu(desc);
 963
 964	trace_irq_handler_entry(irq, action);
 965	res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
 966	trace_irq_handler_exit(irq, action, res);
 967
 968	if (chip->irq_eoi)
 969		chip->irq_eoi(&desc->irq_data);
 970}
 971
 972static void
 973__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
 974		     int is_chained, const char *name)
 975{
 976	if (!handle) {
 977		handle = handle_bad_irq;
 978	} else {
 979		struct irq_data *irq_data = &desc->irq_data;
 980#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
 981		/*
 982		 * With hierarchical domains we might run into a
 983		 * situation where the outermost chip is not yet set
 984		 * up, but the inner chips are there.  Instead of
 985		 * bailing we install the handler, but obviously we
 986		 * cannot enable/startup the interrupt at this point.
 987		 */
 988		while (irq_data) {
 989			if (irq_data->chip != &no_irq_chip)
 990				break;
 991			/*
 992			 * Bail out if the outer chip is not set up
 993			 * and the interrupt supposed to be started
 994			 * right away.
 995			 */
 996			if (WARN_ON(is_chained))
 997				return;
 998			/* Try the parent */
 999			irq_data = irq_data->parent_data;
1000		}
1001#endif
1002		if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip))
1003			return;
1004	}
1005
1006	/* Uninstall? */
1007	if (handle == handle_bad_irq) {
1008		if (desc->irq_data.chip != &no_irq_chip)
1009			mask_ack_irq(desc);
1010		irq_state_set_disabled(desc);
1011		if (is_chained) {
1012			desc->action = NULL;
1013			WARN_ON(irq_chip_pm_put(irq_desc_get_irq_data(desc)));
1014		}
1015		desc->depth = 1;
1016	}
1017	desc->handle_irq = handle;
1018	desc->name = name;
1019
1020	if (handle != handle_bad_irq && is_chained) {
1021		unsigned int type = irqd_get_trigger_type(&desc->irq_data);
1022
1023		/*
1024		 * We're about to start this interrupt immediately,
1025		 * hence the need to set the trigger configuration.
1026		 * But the .set_type callback may have overridden the
1027		 * flow handler, ignoring that we're dealing with a
1028		 * chained interrupt. Reset it immediately because we
1029		 * do know better.
1030		 */
1031		if (type != IRQ_TYPE_NONE) {
1032			__irq_set_trigger(desc, type);
1033			desc->handle_irq = handle;
1034		}
1035
1036		irq_settings_set_noprobe(desc);
1037		irq_settings_set_norequest(desc);
1038		irq_settings_set_nothread(desc);
1039		desc->action = &chained_action;
1040		WARN_ON(irq_chip_pm_get(irq_desc_get_irq_data(desc)));
1041		irq_activate_and_startup(desc, IRQ_RESEND);
1042	}
1043}
1044
1045void
1046__irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
1047		  const char *name)
1048{
1049	unsigned long flags;
1050	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1051
1052	if (!desc)
1053		return;
1054
1055	__irq_do_set_handler(desc, handle, is_chained, name);
1056	irq_put_desc_busunlock(desc, flags);
1057}
1058EXPORT_SYMBOL_GPL(__irq_set_handler);
1059
1060void
1061irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle,
1062				 void *data)
1063{
1064	unsigned long flags;
1065	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0);
1066
1067	if (!desc)
1068		return;
1069
1070	desc->irq_common_data.handler_data = data;
1071	__irq_do_set_handler(desc, handle, 1, NULL);
1072
1073	irq_put_desc_busunlock(desc, flags);
1074}
1075EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data);
1076
1077void
1078irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip,
1079			      irq_flow_handler_t handle, const char *name)
1080{
1081	irq_set_chip(irq, chip);
1082	__irq_set_handler(irq, handle, 0, name);
1083}
1084EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name);
1085
1086void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
1087{
1088	unsigned long flags, trigger, tmp;
1089	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1090
1091	if (!desc)
1092		return;
1093
1094	/*
1095	 * Warn when a driver sets the no autoenable flag on an already
1096	 * active interrupt.
1097	 */
1098	WARN_ON_ONCE(!desc->depth && (set & _IRQ_NOAUTOEN));
1099
1100	irq_settings_clr_and_set(desc, clr, set);
1101
1102	trigger = irqd_get_trigger_type(&desc->irq_data);
1103
1104	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU |
1105		   IRQD_TRIGGER_MASK | IRQD_LEVEL | IRQD_MOVE_PCNTXT);
1106	if (irq_settings_has_no_balance_set(desc))
1107		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1108	if (irq_settings_is_per_cpu(desc))
1109		irqd_set(&desc->irq_data, IRQD_PER_CPU);
1110	if (irq_settings_can_move_pcntxt(desc))
1111		irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT);
1112	if (irq_settings_is_level(desc))
1113		irqd_set(&desc->irq_data, IRQD_LEVEL);
1114
1115	tmp = irq_settings_get_trigger_mask(desc);
1116	if (tmp != IRQ_TYPE_NONE)
1117		trigger = tmp;
1118
1119	irqd_set(&desc->irq_data, trigger);
1120
1121	irq_put_desc_unlock(desc, flags);
1122}
1123EXPORT_SYMBOL_GPL(irq_modify_status);
1124
1125#ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE
1126/**
1127 *	irq_cpu_online - Invoke all irq_cpu_online functions.
1128 *
1129 *	Iterate through all irqs and invoke the chip.irq_cpu_online()
1130 *	for each.
1131 */
1132void irq_cpu_online(void)
1133{
1134	struct irq_desc *desc;
1135	struct irq_chip *chip;
1136	unsigned long flags;
1137	unsigned int irq;
1138
1139	for_each_active_irq(irq) {
1140		desc = irq_to_desc(irq);
1141		if (!desc)
1142			continue;
1143
1144		raw_spin_lock_irqsave(&desc->lock, flags);
1145
1146		chip = irq_data_get_irq_chip(&desc->irq_data);
1147		if (chip && chip->irq_cpu_online &&
1148		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1149		     !irqd_irq_disabled(&desc->irq_data)))
1150			chip->irq_cpu_online(&desc->irq_data);
1151
1152		raw_spin_unlock_irqrestore(&desc->lock, flags);
1153	}
1154}
1155
1156/**
1157 *	irq_cpu_offline - Invoke all irq_cpu_offline functions.
1158 *
1159 *	Iterate through all irqs and invoke the chip.irq_cpu_offline()
1160 *	for each.
1161 */
1162void irq_cpu_offline(void)
1163{
1164	struct irq_desc *desc;
1165	struct irq_chip *chip;
1166	unsigned long flags;
1167	unsigned int irq;
1168
1169	for_each_active_irq(irq) {
1170		desc = irq_to_desc(irq);
1171		if (!desc)
1172			continue;
1173
1174		raw_spin_lock_irqsave(&desc->lock, flags);
1175
1176		chip = irq_data_get_irq_chip(&desc->irq_data);
1177		if (chip && chip->irq_cpu_offline &&
1178		    (!(chip->flags & IRQCHIP_ONOFFLINE_ENABLED) ||
1179		     !irqd_irq_disabled(&desc->irq_data)))
1180			chip->irq_cpu_offline(&desc->irq_data);
1181
1182		raw_spin_unlock_irqrestore(&desc->lock, flags);
1183	}
1184}
1185#endif
1186
1187#ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
1188
1189#ifdef CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS
1190/**
1191 *	handle_fasteoi_ack_irq - irq handler for edge hierarchy
1192 *	stacked on transparent controllers
1193 *
1194 *	@desc:	the interrupt description structure for this irq
1195 *
1196 *	Like handle_fasteoi_irq(), but for use with hierarchy where
1197 *	the irq_chip also needs to have its ->irq_ack() function
1198 *	called.
1199 */
1200void handle_fasteoi_ack_irq(struct irq_desc *desc)
1201{
1202	struct irq_chip *chip = desc->irq_data.chip;
1203
1204	raw_spin_lock(&desc->lock);
1205
1206	if (!irq_may_run(desc))
1207		goto out;
1208
1209	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1210
1211	/*
1212	 * If its disabled or no action available
1213	 * then mask it and get out of here:
1214	 */
1215	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1216		desc->istate |= IRQS_PENDING;
1217		mask_irq(desc);
1218		goto out;
1219	}
1220
1221	kstat_incr_irqs_this_cpu(desc);
1222	if (desc->istate & IRQS_ONESHOT)
1223		mask_irq(desc);
1224
1225	/* Start handling the irq */
1226	desc->irq_data.chip->irq_ack(&desc->irq_data);
1227
1228	handle_irq_event(desc);
1229
1230	cond_unmask_eoi_irq(desc, chip);
1231
1232	raw_spin_unlock(&desc->lock);
1233	return;
1234out:
1235	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1236		chip->irq_eoi(&desc->irq_data);
1237	raw_spin_unlock(&desc->lock);
1238}
1239EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
1240
1241/**
1242 *	handle_fasteoi_mask_irq - irq handler for level hierarchy
1243 *	stacked on transparent controllers
1244 *
1245 *	@desc:	the interrupt description structure for this irq
1246 *
1247 *	Like handle_fasteoi_irq(), but for use with hierarchy where
1248 *	the irq_chip also needs to have its ->irq_mask_ack() function
1249 *	called.
1250 */
1251void handle_fasteoi_mask_irq(struct irq_desc *desc)
1252{
1253	struct irq_chip *chip = desc->irq_data.chip;
1254
1255	raw_spin_lock(&desc->lock);
1256	mask_ack_irq(desc);
1257
1258	if (!irq_may_run(desc))
1259		goto out;
1260
1261	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
1262
1263	/*
1264	 * If its disabled or no action available
1265	 * then mask it and get out of here:
1266	 */
1267	if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
1268		desc->istate |= IRQS_PENDING;
1269		mask_irq(desc);
1270		goto out;
1271	}
1272
1273	kstat_incr_irqs_this_cpu(desc);
1274	if (desc->istate & IRQS_ONESHOT)
1275		mask_irq(desc);
1276
1277	handle_irq_event(desc);
1278
1279	cond_unmask_eoi_irq(desc, chip);
1280
1281	raw_spin_unlock(&desc->lock);
1282	return;
1283out:
1284	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
1285		chip->irq_eoi(&desc->irq_data);
1286	raw_spin_unlock(&desc->lock);
1287}
1288EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
1289
1290#endif /* CONFIG_IRQ_FASTEOI_HIERARCHY_HANDLERS */
1291
1292/**
1293 * irq_chip_set_parent_state - set the state of a parent interrupt.
1294 *
1295 * @data: Pointer to interrupt specific data
1296 * @which: State to be restored (one of IRQCHIP_STATE_*)
1297 * @val: Value corresponding to @which
1298 *
1299 * Conditional success, if the underlying irqchip does not implement it.
1300 */
1301int irq_chip_set_parent_state(struct irq_data *data,
1302			      enum irqchip_irq_state which,
1303			      bool val)
1304{
1305	data = data->parent_data;
1306
1307	if (!data || !data->chip->irq_set_irqchip_state)
1308		return 0;
1309
1310	return data->chip->irq_set_irqchip_state(data, which, val);
1311}
1312EXPORT_SYMBOL_GPL(irq_chip_set_parent_state);
1313
1314/**
1315 * irq_chip_get_parent_state - get the state of a parent interrupt.
1316 *
1317 * @data: Pointer to interrupt specific data
1318 * @which: one of IRQCHIP_STATE_* the caller wants to know
1319 * @state: a pointer to a boolean where the state is to be stored
1320 *
1321 * Conditional success, if the underlying irqchip does not implement it.
1322 */
1323int irq_chip_get_parent_state(struct irq_data *data,
1324			      enum irqchip_irq_state which,
1325			      bool *state)
1326{
1327	data = data->parent_data;
1328
1329	if (!data || !data->chip->irq_get_irqchip_state)
1330		return 0;
1331
1332	return data->chip->irq_get_irqchip_state(data, which, state);
1333}
1334EXPORT_SYMBOL_GPL(irq_chip_get_parent_state);
1335
1336/**
1337 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1338 * NULL)
1339 * @data:	Pointer to interrupt specific data
1340 */
1341void irq_chip_enable_parent(struct irq_data *data)
1342{
1343	data = data->parent_data;
1344	if (data->chip->irq_enable)
1345		data->chip->irq_enable(data);
1346	else
1347		data->chip->irq_unmask(data);
1348}
1349EXPORT_SYMBOL_GPL(irq_chip_enable_parent);
1350
1351/**
1352 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1353 * NULL)
1354 * @data:	Pointer to interrupt specific data
1355 */
1356void irq_chip_disable_parent(struct irq_data *data)
1357{
1358	data = data->parent_data;
1359	if (data->chip->irq_disable)
1360		data->chip->irq_disable(data);
1361	else
1362		data->chip->irq_mask(data);
1363}
1364EXPORT_SYMBOL_GPL(irq_chip_disable_parent);
1365
1366/**
1367 * irq_chip_ack_parent - Acknowledge the parent interrupt
1368 * @data:	Pointer to interrupt specific data
1369 */
1370void irq_chip_ack_parent(struct irq_data *data)
1371{
1372	data = data->parent_data;
1373	data->chip->irq_ack(data);
1374}
1375EXPORT_SYMBOL_GPL(irq_chip_ack_parent);
1376
1377/**
1378 * irq_chip_mask_parent - Mask the parent interrupt
1379 * @data:	Pointer to interrupt specific data
1380 */
1381void irq_chip_mask_parent(struct irq_data *data)
1382{
1383	data = data->parent_data;
1384	data->chip->irq_mask(data);
1385}
1386EXPORT_SYMBOL_GPL(irq_chip_mask_parent);
1387
1388/**
1389 * irq_chip_mask_ack_parent - Mask and acknowledge the parent interrupt
1390 * @data:	Pointer to interrupt specific data
1391 */
1392void irq_chip_mask_ack_parent(struct irq_data *data)
1393{
1394	data = data->parent_data;
1395	data->chip->irq_mask_ack(data);
1396}
1397EXPORT_SYMBOL_GPL(irq_chip_mask_ack_parent);
1398
1399/**
1400 * irq_chip_unmask_parent - Unmask the parent interrupt
1401 * @data:	Pointer to interrupt specific data
1402 */
1403void irq_chip_unmask_parent(struct irq_data *data)
1404{
1405	data = data->parent_data;
1406	data->chip->irq_unmask(data);
1407}
1408EXPORT_SYMBOL_GPL(irq_chip_unmask_parent);
1409
1410/**
1411 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1412 * @data:	Pointer to interrupt specific data
1413 */
1414void irq_chip_eoi_parent(struct irq_data *data)
1415{
1416	data = data->parent_data;
1417	data->chip->irq_eoi(data);
1418}
1419EXPORT_SYMBOL_GPL(irq_chip_eoi_parent);
1420
1421/**
1422 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1423 * @data:	Pointer to interrupt specific data
1424 * @dest:	The affinity mask to set
1425 * @force:	Flag to enforce setting (disable online checks)
1426 *
1427 * Conditional, as the underlying parent chip might not implement it.
1428 */
1429int irq_chip_set_affinity_parent(struct irq_data *data,
1430				 const struct cpumask *dest, bool force)
1431{
1432	data = data->parent_data;
1433	if (data->chip->irq_set_affinity)
1434		return data->chip->irq_set_affinity(data, dest, force);
1435
1436	return -ENOSYS;
1437}
1438EXPORT_SYMBOL_GPL(irq_chip_set_affinity_parent);
1439
1440/**
1441 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1442 * @data:	Pointer to interrupt specific data
1443 * @type:	IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1444 *
1445 * Conditional, as the underlying parent chip might not implement it.
1446 */
1447int irq_chip_set_type_parent(struct irq_data *data, unsigned int type)
1448{
1449	data = data->parent_data;
1450
1451	if (data->chip->irq_set_type)
1452		return data->chip->irq_set_type(data, type);
1453
1454	return -ENOSYS;
1455}
1456EXPORT_SYMBOL_GPL(irq_chip_set_type_parent);
1457
1458/**
1459 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1460 * @data:	Pointer to interrupt specific data
1461 *
1462 * Iterate through the domain hierarchy of the interrupt and check
1463 * whether a hw retrigger function exists. If yes, invoke it.
1464 */
1465int irq_chip_retrigger_hierarchy(struct irq_data *data)
1466{
1467	for (data = data->parent_data; data; data = data->parent_data)
1468		if (data->chip && data->chip->irq_retrigger)
1469			return data->chip->irq_retrigger(data);
1470
1471	return 0;
1472}
1473EXPORT_SYMBOL_GPL(irq_chip_retrigger_hierarchy);
1474
1475/**
1476 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1477 * @data:	Pointer to interrupt specific data
1478 * @vcpu_info:	The vcpu affinity information
1479 */
1480int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
1481{
1482	data = data->parent_data;
1483	if (data->chip->irq_set_vcpu_affinity)
1484		return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
1485
1486	return -ENOSYS;
1487}
1488EXPORT_SYMBOL_GPL(irq_chip_set_vcpu_affinity_parent);
1489/**
1490 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1491 * @data:	Pointer to interrupt specific data
1492 * @on:		Whether to set or reset the wake-up capability of this irq
1493 *
1494 * Conditional, as the underlying parent chip might not implement it.
1495 */
1496int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
1497{
1498	data = data->parent_data;
1499
1500	if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
1501		return 0;
1502
1503	if (data->chip->irq_set_wake)
1504		return data->chip->irq_set_wake(data, on);
1505
1506	return -ENOSYS;
1507}
1508EXPORT_SYMBOL_GPL(irq_chip_set_wake_parent);
1509
1510/**
1511 * irq_chip_request_resources_parent - Request resources on the parent interrupt
1512 * @data:	Pointer to interrupt specific data
1513 */
1514int irq_chip_request_resources_parent(struct irq_data *data)
1515{
1516	data = data->parent_data;
1517
1518	if (data->chip->irq_request_resources)
1519		return data->chip->irq_request_resources(data);
1520
1521	/* no error on missing optional irq_chip::irq_request_resources */
1522	return 0;
1523}
1524EXPORT_SYMBOL_GPL(irq_chip_request_resources_parent);
1525
1526/**
1527 * irq_chip_release_resources_parent - Release resources on the parent interrupt
1528 * @data:	Pointer to interrupt specific data
1529 */
1530void irq_chip_release_resources_parent(struct irq_data *data)
1531{
1532	data = data->parent_data;
1533	if (data->chip->irq_release_resources)
1534		data->chip->irq_release_resources(data);
1535}
1536EXPORT_SYMBOL_GPL(irq_chip_release_resources_parent);
1537#endif
1538
1539/**
1540 * irq_chip_compose_msi_msg - Compose msi message for a irq chip
1541 * @data:	Pointer to interrupt specific data
1542 * @msg:	Pointer to the MSI message
1543 *
1544 * For hierarchical domains we find the first chip in the hierarchy
1545 * which implements the irq_compose_msi_msg callback. For non
1546 * hierarchical we use the top level chip.
1547 */
1548int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
1549{
1550	struct irq_data *pos;
1551
1552	for (pos = NULL; !pos && data; data = irqd_get_parent_data(data)) {
1553		if (data->chip && data->chip->irq_compose_msi_msg)
1554			pos = data;
1555	}
1556
1557	if (!pos)
1558		return -ENOSYS;
1559
1560	pos->chip->irq_compose_msi_msg(pos, msg);
1561	return 0;
1562}
1563
1564static struct device *irq_get_pm_device(struct irq_data *data)
1565{
1566	if (data->domain)
1567		return data->domain->pm_dev;
1568
1569	return NULL;
1570}
1571
1572/**
1573 * irq_chip_pm_get - Enable power for an IRQ chip
1574 * @data:	Pointer to interrupt specific data
1575 *
1576 * Enable the power to the IRQ chip referenced by the interrupt data
1577 * structure.
1578 */
1579int irq_chip_pm_get(struct irq_data *data)
1580{
1581	struct device *dev = irq_get_pm_device(data);
1582	int retval = 0;
1583
1584	if (IS_ENABLED(CONFIG_PM) && dev)
1585		retval = pm_runtime_resume_and_get(dev);
1586
1587	return retval;
1588}
1589
1590/**
1591 * irq_chip_pm_put - Disable power for an IRQ chip
1592 * @data:	Pointer to interrupt specific data
1593 *
1594 * Disable the power to the IRQ chip referenced by the interrupt data
1595 * structure, belongs. Note that power will only be disabled, once this
1596 * function has been called for all IRQs that have called irq_chip_pm_get().
1597 */
1598int irq_chip_pm_put(struct irq_data *data)
1599{
1600	struct device *dev = irq_get_pm_device(data);
1601	int retval = 0;
1602
1603	if (IS_ENABLED(CONFIG_PM) && dev)
1604		retval = pm_runtime_put(dev);
1605
1606	return (retval < 0) ? retval : 0;
1607}