Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * linux/kernel/irq/manage.c
   3 *
   4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   5 * Copyright (C) 2005-2006 Thomas Gleixner
   6 *
   7 * This file contains driver APIs to the irq subsystem.
   8 */
   9
  10#define pr_fmt(fmt) "genirq: " fmt
  11
  12#include <linux/irq.h>
  13#include <linux/kthread.h>
  14#include <linux/module.h>
  15#include <linux/random.h>
  16#include <linux/interrupt.h>
 
  17#include <linux/slab.h>
  18#include <linux/sched.h>
  19#include <linux/sched/rt.h>
 
 
 
  20#include <linux/task_work.h>
  21
  22#include "internals.h"
  23
  24#ifdef CONFIG_IRQ_FORCED_THREADING
  25__read_mostly bool force_irqthreads;
 
  26
  27static int __init setup_forced_irqthreads(char *arg)
  28{
  29	force_irqthreads = true;
  30	return 0;
  31}
  32early_param("threadirqs", setup_forced_irqthreads);
  33#endif
  34
  35static void __synchronize_hardirq(struct irq_desc *desc)
  36{
 
  37	bool inprogress;
  38
  39	do {
  40		unsigned long flags;
  41
  42		/*
  43		 * Wait until we're out of the critical section.  This might
  44		 * give the wrong answer due to the lack of memory barriers.
  45		 */
  46		while (irqd_irq_inprogress(&desc->irq_data))
  47			cpu_relax();
  48
  49		/* Ok, that indicated we're done: double-check carefully. */
  50		raw_spin_lock_irqsave(&desc->lock, flags);
  51		inprogress = irqd_irq_inprogress(&desc->irq_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  52		raw_spin_unlock_irqrestore(&desc->lock, flags);
  53
  54		/* Oops, that failed? */
  55	} while (inprogress);
  56}
  57
  58/**
  59 *	synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
  60 *	@irq: interrupt number to wait for
  61 *
  62 *	This function waits for any pending hard IRQ handlers for this
  63 *	interrupt to complete before returning. If you use this
  64 *	function while holding a resource the IRQ handler may need you
  65 *	will deadlock. It does not take associated threaded handlers
  66 *	into account.
  67 *
  68 *	Do not use this for shutdown scenarios where you must be sure
  69 *	that all parts (hardirq and threaded handler) have completed.
  70 *
  71 *	Returns: false if a threaded handler is active.
  72 *
  73 *	This function may be called - with care - from IRQ context.
 
 
 
 
 
  74 */
  75bool synchronize_hardirq(unsigned int irq)
  76{
  77	struct irq_desc *desc = irq_to_desc(irq);
  78
  79	if (desc) {
  80		__synchronize_hardirq(desc);
  81		return !atomic_read(&desc->threads_active);
  82	}
  83
  84	return true;
  85}
  86EXPORT_SYMBOL(synchronize_hardirq);
  87
  88/**
  89 *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  90 *	@irq: interrupt number to wait for
  91 *
  92 *	This function waits for any pending IRQ handlers for this interrupt
  93 *	to complete before returning. If you use this function while
  94 *	holding a resource the IRQ handler may need you will deadlock.
  95 *
  96 *	This function may be called - with care - from IRQ context.
 
 
 
 
 
  97 */
  98void synchronize_irq(unsigned int irq)
  99{
 100	struct irq_desc *desc = irq_to_desc(irq);
 101
 102	if (desc) {
 103		__synchronize_hardirq(desc);
 104		/*
 105		 * We made sure that no hardirq handler is
 106		 * running. Now verify that no threaded handlers are
 107		 * active.
 108		 */
 109		wait_event(desc->wait_for_threads,
 110			   !atomic_read(&desc->threads_active));
 111	}
 112}
 113EXPORT_SYMBOL(synchronize_irq);
 114
 115#ifdef CONFIG_SMP
 116cpumask_var_t irq_default_affinity;
 117
 118static int __irq_can_set_affinity(struct irq_desc *desc)
 119{
 120	if (!desc || !irqd_can_balance(&desc->irq_data) ||
 121	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
 122		return 0;
 123	return 1;
 124}
 125
 126/**
 127 *	irq_can_set_affinity - Check if the affinity of a given irq can be set
 128 *	@irq:		Interrupt to check
 129 *
 130 */
 131int irq_can_set_affinity(unsigned int irq)
 132{
 133	return __irq_can_set_affinity(irq_to_desc(irq));
 134}
 135
 136/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 137 *	irq_set_thread_affinity - Notify irq threads to adjust affinity
 138 *	@desc:		irq descriptor which has affitnity changed
 139 *
 140 *	We just set IRQTF_AFFINITY and delegate the affinity setting
 141 *	to the interrupt thread itself. We can not call
 142 *	set_cpus_allowed_ptr() here as we hold desc->lock and this
 143 *	code can be called from hard interrupt context.
 144 */
 145void irq_set_thread_affinity(struct irq_desc *desc)
 146{
 147	struct irqaction *action;
 148
 149	for_each_action_of_desc(desc, action)
 150		if (action->thread)
 151			set_bit(IRQTF_AFFINITY, &action->thread_flags);
 152}
 153
 154#ifdef CONFIG_GENERIC_PENDING_IRQ
 155static inline bool irq_can_move_pcntxt(struct irq_data *data)
 156{
 157	return irqd_can_move_in_process_context(data);
 158}
 159static inline bool irq_move_pending(struct irq_data *data)
 160{
 161	return irqd_is_setaffinity_pending(data);
 162}
 163static inline void
 164irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
 165{
 166	cpumask_copy(desc->pending_mask, mask);
 
 
 
 
 
 
 167}
 168static inline void
 169irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
 
 170{
 171	cpumask_copy(mask, desc->pending_mask);
 172}
 173#else
 174static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
 175static inline bool irq_move_pending(struct irq_data *data) { return false; }
 176static inline void
 177irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
 178static inline void
 179irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
 180#endif
 181
 182int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
 183			bool force)
 184{
 185	struct irq_desc *desc = irq_data_to_desc(data);
 186	struct irq_chip *chip = irq_data_get_irq_chip(data);
 187	int ret;
 188
 189	ret = chip->irq_set_affinity(data, mask, force);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 190	switch (ret) {
 191	case IRQ_SET_MASK_OK:
 192	case IRQ_SET_MASK_OK_DONE:
 193		cpumask_copy(desc->irq_common_data.affinity, mask);
 
 194	case IRQ_SET_MASK_OK_NOCOPY:
 
 195		irq_set_thread_affinity(desc);
 196		ret = 0;
 197	}
 198
 199	return ret;
 200}
 201
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 202int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
 203			    bool force)
 204{
 205	struct irq_chip *chip = irq_data_get_irq_chip(data);
 206	struct irq_desc *desc = irq_data_to_desc(data);
 207	int ret = 0;
 208
 209	if (!chip || !chip->irq_set_affinity)
 210		return -EINVAL;
 211
 212	if (irq_can_move_pcntxt(data)) {
 213		ret = irq_do_set_affinity(data, mask, force);
 
 
 
 214	} else {
 215		irqd_set_move_pending(data);
 216		irq_copy_pending(desc, mask);
 217	}
 218
 219	if (desc->affinity_notify) {
 220		kref_get(&desc->affinity_notify->kref);
 221		schedule_work(&desc->affinity_notify->work);
 
 
 
 
 222	}
 223	irqd_set(data, IRQD_AFFINITY_SET);
 224
 225	return ret;
 226}
 227
 228int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
 229{
 230	struct irq_desc *desc = irq_to_desc(irq);
 231	unsigned long flags;
 232	int ret;
 233
 234	if (!desc)
 235		return -EINVAL;
 236
 237	raw_spin_lock_irqsave(&desc->lock, flags);
 238	ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
 239	raw_spin_unlock_irqrestore(&desc->lock, flags);
 240	return ret;
 241}
 242
 243int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
 244{
 245	unsigned long flags;
 246	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 247
 248	if (!desc)
 249		return -EINVAL;
 250	desc->affinity_hint = m;
 251	irq_put_desc_unlock(desc, flags);
 252	/* set the initial affinity to prevent every interrupt being on CPU0 */
 253	if (m)
 254		__irq_set_affinity(irq, m, false);
 255	return 0;
 256}
 257EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
 258
 259static void irq_affinity_notify(struct work_struct *work)
 260{
 261	struct irq_affinity_notify *notify =
 262		container_of(work, struct irq_affinity_notify, work);
 263	struct irq_desc *desc = irq_to_desc(notify->irq);
 264	cpumask_var_t cpumask;
 265	unsigned long flags;
 266
 267	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
 268		goto out;
 269
 270	raw_spin_lock_irqsave(&desc->lock, flags);
 271	if (irq_move_pending(&desc->irq_data))
 272		irq_get_pending(cpumask, desc);
 273	else
 274		cpumask_copy(cpumask, desc->irq_common_data.affinity);
 275	raw_spin_unlock_irqrestore(&desc->lock, flags);
 276
 277	notify->notify(notify, cpumask);
 278
 279	free_cpumask_var(cpumask);
 280out:
 281	kref_put(&notify->kref, notify->release);
 282}
 283
 284/**
 285 *	irq_set_affinity_notifier - control notification of IRQ affinity changes
 286 *	@irq:		Interrupt for which to enable/disable notification
 287 *	@notify:	Context for notification, or %NULL to disable
 288 *			notification.  Function pointers must be initialised;
 289 *			the other fields will be initialised by this function.
 290 *
 291 *	Must be called in process context.  Notification may only be enabled
 292 *	after the IRQ is allocated and must be disabled before the IRQ is
 293 *	freed using free_irq().
 294 */
 295int
 296irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
 297{
 298	struct irq_desc *desc = irq_to_desc(irq);
 299	struct irq_affinity_notify *old_notify;
 300	unsigned long flags;
 301
 302	/* The release function is promised process context */
 303	might_sleep();
 304
 305	if (!desc)
 306		return -EINVAL;
 307
 308	/* Complete initialisation of *notify */
 309	if (notify) {
 310		notify->irq = irq;
 311		kref_init(&notify->kref);
 312		INIT_WORK(&notify->work, irq_affinity_notify);
 313	}
 314
 315	raw_spin_lock_irqsave(&desc->lock, flags);
 316	old_notify = desc->affinity_notify;
 317	desc->affinity_notify = notify;
 318	raw_spin_unlock_irqrestore(&desc->lock, flags);
 319
 320	if (old_notify)
 
 
 
 
 321		kref_put(&old_notify->kref, old_notify->release);
 
 322
 323	return 0;
 324}
 325EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 326
 327#ifndef CONFIG_AUTO_IRQ_AFFINITY
 328/*
 329 * Generic version of the affinity autoselector.
 330 */
 331static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
 332{
 333	struct cpumask *set = irq_default_affinity;
 334	int node = irq_desc_get_node(desc);
 
 
 335
 336	/* Excludes PER_CPU and NO_BALANCE interrupts */
 337	if (!__irq_can_set_affinity(desc))
 338		return 0;
 339
 
 340	/*
 341	 * Preserve an userspace affinity setup, but make sure that
 342	 * one of the targets is online.
 343	 */
 344	if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
 
 345		if (cpumask_intersects(desc->irq_common_data.affinity,
 346				       cpu_online_mask))
 347			set = desc->irq_common_data.affinity;
 348		else
 349			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
 350	}
 351
 352	cpumask_and(mask, cpu_online_mask, set);
 
 
 
 353	if (node != NUMA_NO_NODE) {
 354		const struct cpumask *nodemask = cpumask_of_node(node);
 355
 356		/* make sure at least one of the cpus in nodemask is online */
 357		if (cpumask_intersects(mask, nodemask))
 358			cpumask_and(mask, mask, nodemask);
 359	}
 360	irq_do_set_affinity(&desc->irq_data, mask, false);
 361	return 0;
 
 362}
 363#else
 364/* Wrapper for ALPHA specific affinity selector magic */
 365static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask)
 366{
 367	return irq_select_affinity(irq_desc_get_irq(d));
 368}
 369#endif
 
 370
 371/*
 372 * Called when affinity is set via /proc/irq
 373 */
 374int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
 375{
 376	struct irq_desc *desc = irq_to_desc(irq);
 377	unsigned long flags;
 378	int ret;
 379
 380	raw_spin_lock_irqsave(&desc->lock, flags);
 381	ret = setup_affinity(desc, mask);
 382	raw_spin_unlock_irqrestore(&desc->lock, flags);
 383	return ret;
 384}
 385
 386#else
 387static inline int
 388setup_affinity(struct irq_desc *desc, struct cpumask *mask)
 389{
 390	return 0;
 391}
 392#endif
 393
 394/**
 395 *	irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
 396 *	@irq: interrupt number to set affinity
 397 *	@vcpu_info: vCPU specific data
 
 398 *
 399 *	This function uses the vCPU specific data to set the vCPU
 400 *	affinity for an irq. The vCPU specific data is passed from
 401 *	outside, such as KVM. One example code path is as below:
 402 *	KVM -> IOMMU -> irq_set_vcpu_affinity().
 403 */
 404int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
 405{
 406	unsigned long flags;
 407	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 408	struct irq_data *data;
 409	struct irq_chip *chip;
 410	int ret = -ENOSYS;
 411
 412	if (!desc)
 413		return -EINVAL;
 414
 415	data = irq_desc_get_irq_data(desc);
 416	chip = irq_data_get_irq_chip(data);
 417	if (chip && chip->irq_set_vcpu_affinity)
 
 
 
 
 
 
 
 
 
 
 418		ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
 419	irq_put_desc_unlock(desc, flags);
 420
 421	return ret;
 422}
 423EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
 424
 425void __disable_irq(struct irq_desc *desc)
 426{
 427	if (!desc->depth++)
 428		irq_disable(desc);
 429}
 430
 431static int __disable_irq_nosync(unsigned int irq)
 432{
 433	unsigned long flags;
 434	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 435
 436	if (!desc)
 437		return -EINVAL;
 438	__disable_irq(desc);
 439	irq_put_desc_busunlock(desc, flags);
 440	return 0;
 441}
 442
 443/**
 444 *	disable_irq_nosync - disable an irq without waiting
 445 *	@irq: Interrupt to disable
 446 *
 447 *	Disable the selected interrupt line.  Disables and Enables are
 448 *	nested.
 449 *	Unlike disable_irq(), this function does not ensure existing
 450 *	instances of the IRQ handler have completed before returning.
 451 *
 452 *	This function may be called from IRQ context.
 453 */
 454void disable_irq_nosync(unsigned int irq)
 455{
 456	__disable_irq_nosync(irq);
 457}
 458EXPORT_SYMBOL(disable_irq_nosync);
 459
 460/**
 461 *	disable_irq - disable an irq and wait for completion
 462 *	@irq: Interrupt to disable
 463 *
 464 *	Disable the selected interrupt line.  Enables and Disables are
 465 *	nested.
 466 *	This function waits for any pending IRQ handlers for this interrupt
 467 *	to complete before returning. If you use this function while
 468 *	holding a resource the IRQ handler may need you will deadlock.
 469 *
 470 *	This function may be called - with care - from IRQ context.
 471 */
 472void disable_irq(unsigned int irq)
 473{
 474	if (!__disable_irq_nosync(irq))
 475		synchronize_irq(irq);
 476}
 477EXPORT_SYMBOL(disable_irq);
 478
 479/**
 480 *	disable_hardirq - disables an irq and waits for hardirq completion
 481 *	@irq: Interrupt to disable
 482 *
 483 *	Disable the selected interrupt line.  Enables and Disables are
 484 *	nested.
 485 *	This function waits for any pending hard IRQ handlers for this
 486 *	interrupt to complete before returning. If you use this function while
 487 *	holding a resource the hard IRQ handler may need you will deadlock.
 488 *
 489 *	When used to optimistically disable an interrupt from atomic context
 490 *	the return value must be checked.
 491 *
 492 *	Returns: false if a threaded handler is active.
 493 *
 494 *	This function may be called - with care - from IRQ context.
 495 */
 496bool disable_hardirq(unsigned int irq)
 497{
 498	if (!__disable_irq_nosync(irq))
 499		return synchronize_hardirq(irq);
 500
 501	return false;
 502}
 503EXPORT_SYMBOL_GPL(disable_hardirq);
 504
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 505void __enable_irq(struct irq_desc *desc)
 506{
 507	switch (desc->depth) {
 508	case 0:
 509 err_out:
 510		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
 511		     irq_desc_get_irq(desc));
 512		break;
 513	case 1: {
 514		if (desc->istate & IRQS_SUSPENDED)
 515			goto err_out;
 516		/* Prevent probing on this irq: */
 517		irq_settings_set_noprobe(desc);
 518		irq_enable(desc);
 519		check_irq_resend(desc);
 520		/* fall-through */
 
 
 
 
 
 
 521	}
 522	default:
 523		desc->depth--;
 524	}
 525}
 526
 527/**
 528 *	enable_irq - enable handling of an irq
 529 *	@irq: Interrupt to enable
 530 *
 531 *	Undoes the effect of one call to disable_irq().  If this
 532 *	matches the last disable, processing of interrupts on this
 533 *	IRQ line is re-enabled.
 534 *
 535 *	This function may be called from IRQ context only when
 536 *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
 537 */
 538void enable_irq(unsigned int irq)
 539{
 540	unsigned long flags;
 541	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 542
 543	if (!desc)
 544		return;
 545	if (WARN(!desc->irq_data.chip,
 546		 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
 547		goto out;
 548
 549	__enable_irq(desc);
 550out:
 551	irq_put_desc_busunlock(desc, flags);
 552}
 553EXPORT_SYMBOL(enable_irq);
 554
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 555static int set_irq_wake_real(unsigned int irq, unsigned int on)
 556{
 557	struct irq_desc *desc = irq_to_desc(irq);
 558	int ret = -ENXIO;
 559
 560	if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
 561		return 0;
 562
 563	if (desc->irq_data.chip->irq_set_wake)
 564		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
 565
 566	return ret;
 567}
 568
 569/**
 570 *	irq_set_irq_wake - control irq power management wakeup
 571 *	@irq:	interrupt to control
 572 *	@on:	enable/disable power management wakeup
 573 *
 574 *	Enable/disable power management wakeup mode, which is
 575 *	disabled by default.  Enables and disables must match,
 576 *	just as they match for non-wakeup mode support.
 577 *
 578 *	Wakeup mode lets this IRQ wake the system from sleep
 579 *	states like "suspend to RAM".
 
 
 
 
 
 
 
 580 */
 581int irq_set_irq_wake(unsigned int irq, unsigned int on)
 582{
 583	unsigned long flags;
 584	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 585	int ret = 0;
 586
 587	if (!desc)
 588		return -EINVAL;
 589
 
 
 
 
 
 
 590	/* wakeup-capable irqs can be shared between drivers that
 591	 * don't need to have the same sleep mode behaviors.
 592	 */
 593	if (on) {
 594		if (desc->wake_depth++ == 0) {
 595			ret = set_irq_wake_real(irq, on);
 596			if (ret)
 597				desc->wake_depth = 0;
 598			else
 599				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
 600		}
 601	} else {
 602		if (desc->wake_depth == 0) {
 603			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
 604		} else if (--desc->wake_depth == 0) {
 605			ret = set_irq_wake_real(irq, on);
 606			if (ret)
 607				desc->wake_depth = 1;
 608			else
 609				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
 610		}
 611	}
 
 
 612	irq_put_desc_busunlock(desc, flags);
 613	return ret;
 614}
 615EXPORT_SYMBOL(irq_set_irq_wake);
 616
 617/*
 618 * Internal function that tells the architecture code whether a
 619 * particular irq has been exclusively allocated or is available
 620 * for driver use.
 621 */
 622int can_request_irq(unsigned int irq, unsigned long irqflags)
 623{
 624	unsigned long flags;
 625	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 626	int canrequest = 0;
 627
 628	if (!desc)
 629		return 0;
 630
 631	if (irq_settings_can_request(desc)) {
 632		if (!desc->action ||
 633		    irqflags & desc->action->flags & IRQF_SHARED)
 634			canrequest = 1;
 635	}
 636	irq_put_desc_unlock(desc, flags);
 637	return canrequest;
 638}
 639
 640int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
 641{
 642	struct irq_chip *chip = desc->irq_data.chip;
 643	int ret, unmask = 0;
 644
 645	if (!chip || !chip->irq_set_type) {
 646		/*
 647		 * IRQF_TRIGGER_* but the PIC does not support multiple
 648		 * flow-types?
 649		 */
 650		pr_debug("No set_type function for IRQ %d (%s)\n",
 651			 irq_desc_get_irq(desc),
 652			 chip ? (chip->name ? : "unknown") : "unknown");
 653		return 0;
 654	}
 655
 656	flags &= IRQ_TYPE_SENSE_MASK;
 657
 658	if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
 659		if (!irqd_irq_masked(&desc->irq_data))
 660			mask_irq(desc);
 661		if (!irqd_irq_disabled(&desc->irq_data))
 662			unmask = 1;
 663	}
 664
 665	/* caller masked out all except trigger mode flags */
 
 666	ret = chip->irq_set_type(&desc->irq_data, flags);
 667
 668	switch (ret) {
 669	case IRQ_SET_MASK_OK:
 670	case IRQ_SET_MASK_OK_DONE:
 671		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
 672		irqd_set(&desc->irq_data, flags);
 
 673
 674	case IRQ_SET_MASK_OK_NOCOPY:
 675		flags = irqd_get_trigger_type(&desc->irq_data);
 676		irq_settings_set_trigger_mask(desc, flags);
 677		irqd_clear(&desc->irq_data, IRQD_LEVEL);
 678		irq_settings_clr_level(desc);
 679		if (flags & IRQ_TYPE_LEVEL_MASK) {
 680			irq_settings_set_level(desc);
 681			irqd_set(&desc->irq_data, IRQD_LEVEL);
 682		}
 683
 684		ret = 0;
 685		break;
 686	default:
 687		pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
 688		       flags, irq_desc_get_irq(desc), chip->irq_set_type);
 689	}
 690	if (unmask)
 691		unmask_irq(desc);
 692	return ret;
 693}
 694
 695#ifdef CONFIG_HARDIRQS_SW_RESEND
 696int irq_set_parent(int irq, int parent_irq)
 697{
 698	unsigned long flags;
 699	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 700
 701	if (!desc)
 702		return -EINVAL;
 703
 704	desc->parent_irq = parent_irq;
 705
 706	irq_put_desc_unlock(desc, flags);
 707	return 0;
 708}
 
 709#endif
 710
 711/*
 712 * Default primary interrupt handler for threaded interrupts. Is
 713 * assigned as primary handler when request_threaded_irq is called
 714 * with handler == NULL. Useful for oneshot interrupts.
 715 */
 716static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
 717{
 718	return IRQ_WAKE_THREAD;
 719}
 720
 721/*
 722 * Primary handler for nested threaded interrupts. Should never be
 723 * called.
 724 */
 725static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
 726{
 727	WARN(1, "Primary handler called for nested irq %d\n", irq);
 728	return IRQ_NONE;
 729}
 730
 731static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
 732{
 733	WARN(1, "Secondary action handler called for irq %d\n", irq);
 734	return IRQ_NONE;
 735}
 736
 737static int irq_wait_for_interrupt(struct irqaction *action)
 738{
 739	set_current_state(TASK_INTERRUPTIBLE);
 
 740
 741	while (!kthread_should_stop()) {
 
 
 
 
 
 
 
 
 
 742
 743		if (test_and_clear_bit(IRQTF_RUNTHREAD,
 744				       &action->thread_flags)) {
 745			__set_current_state(TASK_RUNNING);
 746			return 0;
 747		}
 748		schedule();
 749		set_current_state(TASK_INTERRUPTIBLE);
 750	}
 751	__set_current_state(TASK_RUNNING);
 752	return -1;
 753}
 754
 755/*
 756 * Oneshot interrupts keep the irq line masked until the threaded
 757 * handler finished. unmask if the interrupt has not been disabled and
 758 * is marked MASKED.
 759 */
 760static void irq_finalize_oneshot(struct irq_desc *desc,
 761				 struct irqaction *action)
 762{
 763	if (!(desc->istate & IRQS_ONESHOT) ||
 764	    action->handler == irq_forced_secondary_handler)
 765		return;
 766again:
 767	chip_bus_lock(desc);
 768	raw_spin_lock_irq(&desc->lock);
 769
 770	/*
 771	 * Implausible though it may be we need to protect us against
 772	 * the following scenario:
 773	 *
 774	 * The thread is faster done than the hard interrupt handler
 775	 * on the other CPU. If we unmask the irq line then the
 776	 * interrupt can come in again and masks the line, leaves due
 777	 * to IRQS_INPROGRESS and the irq line is masked forever.
 778	 *
 779	 * This also serializes the state of shared oneshot handlers
 780	 * versus "desc->threads_onehsot |= action->thread_mask;" in
 781	 * irq_wake_thread(). See the comment there which explains the
 782	 * serialization.
 783	 */
 784	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
 785		raw_spin_unlock_irq(&desc->lock);
 786		chip_bus_sync_unlock(desc);
 787		cpu_relax();
 788		goto again;
 789	}
 790
 791	/*
 792	 * Now check again, whether the thread should run. Otherwise
 793	 * we would clear the threads_oneshot bit of this thread which
 794	 * was just set.
 795	 */
 796	if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
 797		goto out_unlock;
 798
 799	desc->threads_oneshot &= ~action->thread_mask;
 800
 801	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
 802	    irqd_irq_masked(&desc->irq_data))
 803		unmask_threaded_irq(desc);
 804
 805out_unlock:
 806	raw_spin_unlock_irq(&desc->lock);
 807	chip_bus_sync_unlock(desc);
 808}
 809
 810#ifdef CONFIG_SMP
 811/*
 812 * Check whether we need to change the affinity of the interrupt thread.
 813 */
 814static void
 815irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
 816{
 817	cpumask_var_t mask;
 818	bool valid = true;
 819
 820	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
 821		return;
 822
 823	/*
 824	 * In case we are out of memory we set IRQTF_AFFINITY again and
 825	 * try again next time
 826	 */
 827	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
 828		set_bit(IRQTF_AFFINITY, &action->thread_flags);
 829		return;
 830	}
 831
 832	raw_spin_lock_irq(&desc->lock);
 833	/*
 834	 * This code is triggered unconditionally. Check the affinity
 835	 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
 836	 */
 837	if (desc->irq_common_data.affinity)
 838		cpumask_copy(mask, desc->irq_common_data.affinity);
 839	else
 
 
 
 840		valid = false;
 
 841	raw_spin_unlock_irq(&desc->lock);
 842
 843	if (valid)
 844		set_cpus_allowed_ptr(current, mask);
 845	free_cpumask_var(mask);
 846}
 847#else
 848static inline void
 849irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
 850#endif
 851
 852/*
 853 * Interrupts which are not explicitely requested as threaded
 854 * interrupts rely on the implicit bh/preempt disable of the hard irq
 855 * context. So we need to disable bh here to avoid deadlocks and other
 856 * side effects.
 857 */
 858static irqreturn_t
 859irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
 860{
 861	irqreturn_t ret;
 862
 863	local_bh_disable();
 864	ret = action->thread_fn(action->irq, action->dev_id);
 
 
 
 865	irq_finalize_oneshot(desc, action);
 866	local_bh_enable();
 867	return ret;
 868}
 869
 870/*
 871 * Interrupts explicitly requested as threaded interrupts want to be
 872 * preemtible - many of them need to sleep and wait for slow busses to
 873 * complete.
 874 */
 875static irqreturn_t irq_thread_fn(struct irq_desc *desc,
 876		struct irqaction *action)
 877{
 878	irqreturn_t ret;
 879
 880	ret = action->thread_fn(action->irq, action->dev_id);
 
 
 
 881	irq_finalize_oneshot(desc, action);
 882	return ret;
 883}
 884
 885static void wake_threads_waitq(struct irq_desc *desc)
 886{
 887	if (atomic_dec_and_test(&desc->threads_active))
 888		wake_up(&desc->wait_for_threads);
 889}
 890
 891static void irq_thread_dtor(struct callback_head *unused)
 892{
 893	struct task_struct *tsk = current;
 894	struct irq_desc *desc;
 895	struct irqaction *action;
 896
 897	if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
 898		return;
 899
 900	action = kthread_data(tsk);
 901
 902	pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
 903	       tsk->comm, tsk->pid, action->irq);
 904
 905
 906	desc = irq_to_desc(action->irq);
 907	/*
 908	 * If IRQTF_RUNTHREAD is set, we need to decrement
 909	 * desc->threads_active and wake possible waiters.
 910	 */
 911	if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
 912		wake_threads_waitq(desc);
 913
 914	/* Prevent a stale desc->threads_oneshot */
 915	irq_finalize_oneshot(desc, action);
 916}
 917
 918static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
 919{
 920	struct irqaction *secondary = action->secondary;
 921
 922	if (WARN_ON_ONCE(!secondary))
 923		return;
 924
 925	raw_spin_lock_irq(&desc->lock);
 926	__irq_wake_thread(desc, secondary);
 927	raw_spin_unlock_irq(&desc->lock);
 928}
 929
 930/*
 931 * Interrupt handler thread
 932 */
 933static int irq_thread(void *data)
 934{
 935	struct callback_head on_exit_work;
 936	struct irqaction *action = data;
 937	struct irq_desc *desc = irq_to_desc(action->irq);
 938	irqreturn_t (*handler_fn)(struct irq_desc *desc,
 939			struct irqaction *action);
 940
 941	if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
 942					&action->thread_flags))
 943		handler_fn = irq_forced_thread_fn;
 944	else
 945		handler_fn = irq_thread_fn;
 946
 947	init_task_work(&on_exit_work, irq_thread_dtor);
 948	task_work_add(current, &on_exit_work, false);
 949
 950	irq_thread_check_affinity(desc, action);
 951
 952	while (!irq_wait_for_interrupt(action)) {
 953		irqreturn_t action_ret;
 954
 955		irq_thread_check_affinity(desc, action);
 956
 957		action_ret = handler_fn(desc, action);
 958		if (action_ret == IRQ_HANDLED)
 959			atomic_inc(&desc->threads_handled);
 960		if (action_ret == IRQ_WAKE_THREAD)
 961			irq_wake_secondary(desc, action);
 962
 963		wake_threads_waitq(desc);
 964	}
 965
 966	/*
 967	 * This is the regular exit path. __free_irq() is stopping the
 968	 * thread via kthread_stop() after calling
 969	 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
 970	 * oneshot mask bit can be set. We cannot verify that as we
 971	 * cannot touch the oneshot mask at this point anymore as
 972	 * __setup_irq() might have given out currents thread_mask
 973	 * again.
 974	 */
 975	task_work_cancel(current, irq_thread_dtor);
 976	return 0;
 977}
 978
 979/**
 980 *	irq_wake_thread - wake the irq thread for the action identified by dev_id
 981 *	@irq:		Interrupt line
 982 *	@dev_id:	Device identity for which the thread should be woken
 983 *
 984 */
 985void irq_wake_thread(unsigned int irq, void *dev_id)
 986{
 987	struct irq_desc *desc = irq_to_desc(irq);
 988	struct irqaction *action;
 989	unsigned long flags;
 990
 991	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
 992		return;
 993
 994	raw_spin_lock_irqsave(&desc->lock, flags);
 995	for_each_action_of_desc(desc, action) {
 996		if (action->dev_id == dev_id) {
 997			if (action->thread)
 998				__irq_wake_thread(desc, action);
 999			break;
1000		}
1001	}
1002	raw_spin_unlock_irqrestore(&desc->lock, flags);
1003}
1004EXPORT_SYMBOL_GPL(irq_wake_thread);
1005
1006static int irq_setup_forced_threading(struct irqaction *new)
1007{
1008	if (!force_irqthreads)
1009		return 0;
1010	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1011		return 0;
1012
 
 
 
 
 
 
 
1013	new->flags |= IRQF_ONESHOT;
1014
1015	/*
1016	 * Handle the case where we have a real primary handler and a
1017	 * thread handler. We force thread them as well by creating a
1018	 * secondary action.
1019	 */
1020	if (new->handler != irq_default_primary_handler && new->thread_fn) {
1021		/* Allocate the secondary action */
1022		new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1023		if (!new->secondary)
1024			return -ENOMEM;
1025		new->secondary->handler = irq_forced_secondary_handler;
1026		new->secondary->thread_fn = new->thread_fn;
1027		new->secondary->dev_id = new->dev_id;
1028		new->secondary->irq = new->irq;
1029		new->secondary->name = new->name;
1030	}
1031	/* Deal with the primary handler */
1032	set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1033	new->thread_fn = new->handler;
1034	new->handler = irq_default_primary_handler;
1035	return 0;
1036}
1037
1038static int irq_request_resources(struct irq_desc *desc)
1039{
1040	struct irq_data *d = &desc->irq_data;
1041	struct irq_chip *c = d->chip;
1042
1043	return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1044}
1045
1046static void irq_release_resources(struct irq_desc *desc)
1047{
1048	struct irq_data *d = &desc->irq_data;
1049	struct irq_chip *c = d->chip;
1050
1051	if (c->irq_release_resources)
1052		c->irq_release_resources(d);
1053}
1054
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1055static int
1056setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1057{
1058	struct task_struct *t;
1059	struct sched_param param = {
1060		.sched_priority = MAX_USER_RT_PRIO/2,
1061	};
1062
1063	if (!secondary) {
1064		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1065				   new->name);
1066	} else {
1067		t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1068				   new->name);
1069		param.sched_priority -= 1;
1070	}
1071
1072	if (IS_ERR(t))
1073		return PTR_ERR(t);
1074
1075	sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1076
1077	/*
1078	 * We keep the reference to the task struct even if
1079	 * the thread dies to avoid that the interrupt code
1080	 * references an already freed task_struct.
1081	 */
1082	get_task_struct(t);
1083	new->thread = t;
1084	/*
1085	 * Tell the thread to set its affinity. This is
1086	 * important for shared interrupt handlers as we do
1087	 * not invoke setup_affinity() for the secondary
1088	 * handlers as everything is already set up. Even for
1089	 * interrupts marked with IRQF_NO_BALANCE this is
1090	 * correct as we want the thread to move to the cpu(s)
1091	 * on which the requesting code placed the interrupt.
1092	 */
1093	set_bit(IRQTF_AFFINITY, &new->thread_flags);
1094	return 0;
1095}
1096
1097/*
1098 * Internal function to register an irqaction - typically used to
1099 * allocate special interrupts that are part of the architecture.
 
 
 
 
 
 
 
 
 
 
1100 */
1101static int
1102__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1103{
1104	struct irqaction *old, **old_ptr;
1105	unsigned long flags, thread_mask = 0;
1106	int ret, nested, shared = 0;
1107	cpumask_var_t mask;
1108
1109	if (!desc)
1110		return -EINVAL;
1111
1112	if (desc->irq_data.chip == &no_irq_chip)
1113		return -ENOSYS;
1114	if (!try_module_get(desc->owner))
1115		return -ENODEV;
1116
1117	new->irq = irq;
1118
1119	/*
 
 
 
 
 
 
 
1120	 * Check whether the interrupt nests into another interrupt
1121	 * thread.
1122	 */
1123	nested = irq_settings_is_nested_thread(desc);
1124	if (nested) {
1125		if (!new->thread_fn) {
1126			ret = -EINVAL;
1127			goto out_mput;
1128		}
1129		/*
1130		 * Replace the primary handler which was provided from
1131		 * the driver for non nested interrupt handling by the
1132		 * dummy function which warns when called.
1133		 */
1134		new->handler = irq_nested_primary_handler;
1135	} else {
1136		if (irq_settings_can_thread(desc)) {
1137			ret = irq_setup_forced_threading(new);
1138			if (ret)
1139				goto out_mput;
1140		}
1141	}
1142
1143	/*
1144	 * Create a handler thread when a thread function is supplied
1145	 * and the interrupt does not nest into another interrupt
1146	 * thread.
1147	 */
1148	if (new->thread_fn && !nested) {
1149		ret = setup_irq_thread(new, irq, false);
1150		if (ret)
1151			goto out_mput;
1152		if (new->secondary) {
1153			ret = setup_irq_thread(new->secondary, irq, true);
1154			if (ret)
1155				goto out_thread;
1156		}
1157	}
1158
1159	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1160		ret = -ENOMEM;
1161		goto out_thread;
1162	}
1163
1164	/*
1165	 * Drivers are often written to work w/o knowledge about the
1166	 * underlying irq chip implementation, so a request for a
1167	 * threaded irq without a primary hard irq context handler
1168	 * requires the ONESHOT flag to be set. Some irq chips like
1169	 * MSI based interrupts are per se one shot safe. Check the
1170	 * chip flags, so we can avoid the unmask dance at the end of
1171	 * the threaded handler for those.
1172	 */
1173	if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1174		new->flags &= ~IRQF_ONESHOT;
1175
1176	/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1177	 * The following block of code has to be executed atomically
 
 
 
1178	 */
1179	raw_spin_lock_irqsave(&desc->lock, flags);
1180	old_ptr = &desc->action;
1181	old = *old_ptr;
1182	if (old) {
1183		/*
1184		 * Can't share interrupts unless both agree to and are
1185		 * the same type (level, edge, polarity). So both flag
1186		 * fields must have IRQF_SHARED set and the bits which
1187		 * set the trigger type must match. Also all must
1188		 * agree on ONESHOT.
 
1189		 */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1190		if (!((old->flags & new->flags) & IRQF_SHARED) ||
1191		    ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
1192		    ((old->flags ^ new->flags) & IRQF_ONESHOT))
1193			goto mismatch;
1194
1195		/* All handlers must agree on per-cpuness */
1196		if ((old->flags & IRQF_PERCPU) !=
1197		    (new->flags & IRQF_PERCPU))
1198			goto mismatch;
1199
1200		/* add new interrupt at end of irq queue */
1201		do {
1202			/*
1203			 * Or all existing action->thread_mask bits,
1204			 * so we can find the next zero bit for this
1205			 * new action.
1206			 */
1207			thread_mask |= old->thread_mask;
1208			old_ptr = &old->next;
1209			old = *old_ptr;
1210		} while (old);
1211		shared = 1;
1212	}
1213
1214	/*
1215	 * Setup the thread mask for this irqaction for ONESHOT. For
1216	 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1217	 * conditional in irq_wake_thread().
1218	 */
1219	if (new->flags & IRQF_ONESHOT) {
1220		/*
1221		 * Unlikely to have 32 resp 64 irqs sharing one line,
1222		 * but who knows.
1223		 */
1224		if (thread_mask == ~0UL) {
1225			ret = -EBUSY;
1226			goto out_mask;
1227		}
1228		/*
1229		 * The thread_mask for the action is or'ed to
1230		 * desc->thread_active to indicate that the
1231		 * IRQF_ONESHOT thread handler has been woken, but not
1232		 * yet finished. The bit is cleared when a thread
1233		 * completes. When all threads of a shared interrupt
1234		 * line have completed desc->threads_active becomes
1235		 * zero and the interrupt line is unmasked. See
1236		 * handle.c:irq_wake_thread() for further information.
1237		 *
1238		 * If no thread is woken by primary (hard irq context)
1239		 * interrupt handlers, then desc->threads_active is
1240		 * also checked for zero to unmask the irq line in the
1241		 * affected hard irq flow handlers
1242		 * (handle_[fasteoi|level]_irq).
1243		 *
1244		 * The new action gets the first zero bit of
1245		 * thread_mask assigned. See the loop above which or's
1246		 * all existing action->thread_mask bits.
1247		 */
1248		new->thread_mask = 1 << ffz(thread_mask);
1249
1250	} else if (new->handler == irq_default_primary_handler &&
1251		   !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1252		/*
1253		 * The interrupt was requested with handler = NULL, so
1254		 * we use the default primary handler for it. But it
1255		 * does not have the oneshot flag set. In combination
1256		 * with level interrupts this is deadly, because the
1257		 * default primary handler just wakes the thread, then
1258		 * the irq lines is reenabled, but the device still
1259		 * has the level irq asserted. Rinse and repeat....
1260		 *
1261		 * While this works for edge type interrupts, we play
1262		 * it safe and reject unconditionally because we can't
1263		 * say for sure which type this interrupt really
1264		 * has. The type flags are unreliable as the
1265		 * underlying chip implementation can override them.
1266		 */
1267		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1268		       irq);
1269		ret = -EINVAL;
1270		goto out_mask;
1271	}
1272
1273	if (!shared) {
1274		ret = irq_request_resources(desc);
1275		if (ret) {
1276			pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1277			       new->name, irq, desc->irq_data.chip->name);
1278			goto out_mask;
1279		}
1280
1281		init_waitqueue_head(&desc->wait_for_threads);
1282
1283		/* Setup the type (level, edge polarity) if configured: */
1284		if (new->flags & IRQF_TRIGGER_MASK) {
1285			ret = __irq_set_trigger(desc,
1286						new->flags & IRQF_TRIGGER_MASK);
1287
1288			if (ret)
1289				goto out_mask;
1290		}
1291
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1292		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1293				  IRQS_ONESHOT | IRQS_WAITING);
1294		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1295
1296		if (new->flags & IRQF_PERCPU) {
1297			irqd_set(&desc->irq_data, IRQD_PER_CPU);
1298			irq_settings_set_per_cpu(desc);
1299		}
1300
1301		if (new->flags & IRQF_ONESHOT)
1302			desc->istate |= IRQS_ONESHOT;
1303
1304		if (irq_settings_can_autoenable(desc))
1305			irq_startup(desc, true);
1306		else
1307			/* Undo nested disables: */
1308			desc->depth = 1;
1309
1310		/* Exclude IRQ from balancing if requested */
1311		if (new->flags & IRQF_NOBALANCING) {
1312			irq_settings_set_no_balancing(desc);
1313			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1314		}
1315
1316		/* Set default affinity mask once everything is setup */
1317		setup_affinity(desc, mask);
 
 
 
 
 
 
 
 
 
 
 
1318
1319	} else if (new->flags & IRQF_TRIGGER_MASK) {
1320		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1321		unsigned int omsk = irq_settings_get_trigger_mask(desc);
1322
1323		if (nmsk != omsk)
1324			/* hope the handler works with current  trigger mode */
1325			pr_warn("irq %d uses trigger mode %u; requested %u\n",
1326				irq, nmsk, omsk);
1327	}
1328
1329	*old_ptr = new;
1330
1331	irq_pm_install_action(desc, new);
1332
1333	/* Reset broken irq detection when installing new handler */
1334	desc->irq_count = 0;
1335	desc->irqs_unhandled = 0;
1336
1337	/*
1338	 * Check whether we disabled the irq via the spurious handler
1339	 * before. Reenable it and give it another chance.
1340	 */
1341	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1342		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1343		__enable_irq(desc);
1344	}
1345
1346	raw_spin_unlock_irqrestore(&desc->lock, flags);
 
 
 
 
1347
1348	/*
1349	 * Strictly no need to wake it up, but hung_task complains
1350	 * when no hard interrupt wakes the thread up.
1351	 */
1352	if (new->thread)
1353		wake_up_process(new->thread);
1354	if (new->secondary)
1355		wake_up_process(new->secondary->thread);
1356
1357	register_irq_proc(irq, desc);
1358	new->dir = NULL;
1359	register_handler_proc(irq, new);
1360	free_cpumask_var(mask);
1361
1362	return 0;
1363
1364mismatch:
1365	if (!(new->flags & IRQF_PROBE_SHARED)) {
1366		pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1367		       irq, new->flags, new->name, old->flags, old->name);
1368#ifdef CONFIG_DEBUG_SHIRQ
1369		dump_stack();
1370#endif
1371	}
1372	ret = -EBUSY;
1373
1374out_mask:
1375	raw_spin_unlock_irqrestore(&desc->lock, flags);
1376	free_cpumask_var(mask);
 
 
 
 
 
1377
1378out_thread:
1379	if (new->thread) {
1380		struct task_struct *t = new->thread;
1381
1382		new->thread = NULL;
1383		kthread_stop(t);
1384		put_task_struct(t);
1385	}
1386	if (new->secondary && new->secondary->thread) {
1387		struct task_struct *t = new->secondary->thread;
1388
1389		new->secondary->thread = NULL;
1390		kthread_stop(t);
1391		put_task_struct(t);
1392	}
1393out_mput:
1394	module_put(desc->owner);
1395	return ret;
1396}
1397
1398/**
1399 *	setup_irq - setup an interrupt
1400 *	@irq: Interrupt line to setup
1401 *	@act: irqaction for the interrupt
1402 *
1403 * Used to statically setup interrupts in the early boot process.
1404 */
1405int setup_irq(unsigned int irq, struct irqaction *act)
1406{
1407	int retval;
1408	struct irq_desc *desc = irq_to_desc(irq);
1409
1410	if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1411		return -EINVAL;
1412	chip_bus_lock(desc);
1413	retval = __setup_irq(irq, desc, act);
1414	chip_bus_sync_unlock(desc);
1415
1416	return retval;
1417}
1418EXPORT_SYMBOL_GPL(setup_irq);
1419
1420/*
1421 * Internal function to unregister an irqaction - used to free
1422 * regular and special interrupts that are part of the architecture.
1423 */
1424static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1425{
1426	struct irq_desc *desc = irq_to_desc(irq);
1427	struct irqaction *action, **action_ptr;
1428	unsigned long flags;
1429
1430	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1431
1432	if (!desc)
1433		return NULL;
1434
1435	chip_bus_lock(desc);
1436	raw_spin_lock_irqsave(&desc->lock, flags);
1437
1438	/*
1439	 * There can be multiple actions per IRQ descriptor, find the right
1440	 * one based on the dev_id:
1441	 */
1442	action_ptr = &desc->action;
1443	for (;;) {
1444		action = *action_ptr;
1445
1446		if (!action) {
1447			WARN(1, "Trying to free already-free IRQ %d\n", irq);
1448			raw_spin_unlock_irqrestore(&desc->lock, flags);
1449			chip_bus_sync_unlock(desc);
 
1450			return NULL;
1451		}
1452
1453		if (action->dev_id == dev_id)
1454			break;
1455		action_ptr = &action->next;
1456	}
1457
1458	/* Found it - now remove it from the list of entries: */
1459	*action_ptr = action->next;
1460
1461	irq_pm_remove_action(desc, action);
1462
1463	/* If this was the last handler, shut down the IRQ line: */
1464	if (!desc->action) {
1465		irq_settings_clr_disable_unlazy(desc);
 
1466		irq_shutdown(desc);
1467		irq_release_resources(desc);
1468	}
1469
1470#ifdef CONFIG_SMP
1471	/* make sure affinity_hint is cleaned up */
1472	if (WARN_ON_ONCE(desc->affinity_hint))
1473		desc->affinity_hint = NULL;
1474#endif
1475
1476	raw_spin_unlock_irqrestore(&desc->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1477	chip_bus_sync_unlock(desc);
1478
1479	unregister_handler_proc(irq, action);
1480
1481	/* Make sure it's not being used on another CPU: */
1482	synchronize_irq(irq);
 
 
 
 
1483
1484#ifdef CONFIG_DEBUG_SHIRQ
1485	/*
1486	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1487	 * event to happen even now it's being freed, so let's make sure that
1488	 * is so by doing an extra call to the handler ....
1489	 *
1490	 * ( We do this after actually deregistering it, to make sure that a
1491	 *   'real' IRQ doesn't run in * parallel with our fake. )
1492	 */
1493	if (action->flags & IRQF_SHARED) {
1494		local_irq_save(flags);
1495		action->handler(irq, dev_id);
1496		local_irq_restore(flags);
1497	}
1498#endif
1499
 
 
 
 
 
 
1500	if (action->thread) {
1501		kthread_stop(action->thread);
1502		put_task_struct(action->thread);
1503		if (action->secondary && action->secondary->thread) {
1504			kthread_stop(action->secondary->thread);
1505			put_task_struct(action->secondary->thread);
1506		}
1507	}
1508
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1509	module_put(desc->owner);
1510	kfree(action->secondary);
1511	return action;
1512}
1513
1514/**
1515 *	remove_irq - free an interrupt
1516 *	@irq: Interrupt line to free
1517 *	@act: irqaction for the interrupt
1518 *
1519 * Used to remove interrupts statically setup by the early boot process.
1520 */
1521void remove_irq(unsigned int irq, struct irqaction *act)
1522{
1523	struct irq_desc *desc = irq_to_desc(irq);
1524
1525	if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1526	    __free_irq(irq, act->dev_id);
1527}
1528EXPORT_SYMBOL_GPL(remove_irq);
1529
1530/**
1531 *	free_irq - free an interrupt allocated with request_irq
1532 *	@irq: Interrupt line to free
1533 *	@dev_id: Device identity to free
1534 *
1535 *	Remove an interrupt handler. The handler is removed and if the
1536 *	interrupt line is no longer in use by any driver it is disabled.
1537 *	On a shared IRQ the caller must ensure the interrupt is disabled
1538 *	on the card it drives before calling this function. The function
1539 *	does not return until any executing interrupts for this IRQ
1540 *	have completed.
1541 *
1542 *	This function must not be called from interrupt context.
 
 
1543 */
1544void free_irq(unsigned int irq, void *dev_id)
1545{
1546	struct irq_desc *desc = irq_to_desc(irq);
 
 
1547
1548	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1549		return;
1550
1551#ifdef CONFIG_SMP
1552	if (WARN_ON(desc->affinity_notify))
1553		desc->affinity_notify = NULL;
1554#endif
1555
1556	kfree(__free_irq(irq, dev_id));
 
 
 
 
 
 
 
1557}
1558EXPORT_SYMBOL(free_irq);
1559
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1560/**
1561 *	request_threaded_irq - allocate an interrupt line
1562 *	@irq: Interrupt line to allocate
1563 *	@handler: Function to be called when the IRQ occurs.
1564 *		  Primary handler for threaded interrupts
1565 *		  If NULL and thread_fn != NULL the default
1566 *		  primary handler is installed
1567 *	@thread_fn: Function called from the irq handler thread
1568 *		    If NULL, no irq thread is created
1569 *	@irqflags: Interrupt type flags
1570 *	@devname: An ascii name for the claiming device
1571 *	@dev_id: A cookie passed back to the handler function
1572 *
1573 *	This call allocates interrupt resources and enables the
1574 *	interrupt line and IRQ handling. From the point this
1575 *	call is made your handler function may be invoked. Since
1576 *	your handler function must clear any interrupt the board
1577 *	raises, you must take care both to initialise your hardware
1578 *	and to set up the interrupt handler in the right order.
1579 *
1580 *	If you want to set up a threaded irq handler for your device
1581 *	then you need to supply @handler and @thread_fn. @handler is
1582 *	still called in hard interrupt context and has to check
1583 *	whether the interrupt originates from the device. If yes it
1584 *	needs to disable the interrupt on the device and return
1585 *	IRQ_WAKE_THREAD which will wake up the handler thread and run
1586 *	@thread_fn. This split handler design is necessary to support
1587 *	shared interrupts.
1588 *
1589 *	Dev_id must be globally unique. Normally the address of the
1590 *	device data structure is used as the cookie. Since the handler
1591 *	receives this value it makes sense to use it.
1592 *
1593 *	If your interrupt is shared you must pass a non NULL dev_id
1594 *	as this is required when freeing the interrupt.
1595 *
1596 *	Flags:
1597 *
1598 *	IRQF_SHARED		Interrupt is shared
1599 *	IRQF_TRIGGER_*		Specify active edge(s) or level
1600 *
1601 */
1602int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1603			 irq_handler_t thread_fn, unsigned long irqflags,
1604			 const char *devname, void *dev_id)
1605{
1606	struct irqaction *action;
1607	struct irq_desc *desc;
1608	int retval;
1609
1610	if (irq == IRQ_NOTCONNECTED)
1611		return -ENOTCONN;
1612
1613	/*
1614	 * Sanity-check: shared interrupts must pass in a real dev-ID,
1615	 * otherwise we'll have trouble later trying to figure out
1616	 * which interrupt is which (messes up the interrupt freeing
1617	 * logic etc).
1618	 *
1619	 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1620	 * it cannot be set along with IRQF_NO_SUSPEND.
1621	 */
1622	if (((irqflags & IRQF_SHARED) && !dev_id) ||
1623	    (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1624	    ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1625		return -EINVAL;
1626
1627	desc = irq_to_desc(irq);
1628	if (!desc)
1629		return -EINVAL;
1630
1631	if (!irq_settings_can_request(desc) ||
1632	    WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1633		return -EINVAL;
1634
1635	if (!handler) {
1636		if (!thread_fn)
1637			return -EINVAL;
1638		handler = irq_default_primary_handler;
1639	}
1640
1641	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1642	if (!action)
1643		return -ENOMEM;
1644
1645	action->handler = handler;
1646	action->thread_fn = thread_fn;
1647	action->flags = irqflags;
1648	action->name = devname;
1649	action->dev_id = dev_id;
1650
1651	chip_bus_lock(desc);
 
 
 
 
 
1652	retval = __setup_irq(irq, desc, action);
1653	chip_bus_sync_unlock(desc);
1654
1655	if (retval) {
 
1656		kfree(action->secondary);
1657		kfree(action);
1658	}
1659
1660#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1661	if (!retval && (irqflags & IRQF_SHARED)) {
1662		/*
1663		 * It's a shared IRQ -- the driver ought to be prepared for it
1664		 * to happen immediately, so let's make sure....
1665		 * We disable the irq to make sure that a 'real' IRQ doesn't
1666		 * run in parallel with our fake.
1667		 */
1668		unsigned long flags;
1669
1670		disable_irq(irq);
1671		local_irq_save(flags);
1672
1673		handler(irq, dev_id);
1674
1675		local_irq_restore(flags);
1676		enable_irq(irq);
1677	}
1678#endif
1679	return retval;
1680}
1681EXPORT_SYMBOL(request_threaded_irq);
1682
1683/**
1684 *	request_any_context_irq - allocate an interrupt line
1685 *	@irq: Interrupt line to allocate
1686 *	@handler: Function to be called when the IRQ occurs.
1687 *		  Threaded handler for threaded interrupts.
1688 *	@flags: Interrupt type flags
1689 *	@name: An ascii name for the claiming device
1690 *	@dev_id: A cookie passed back to the handler function
1691 *
1692 *	This call allocates interrupt resources and enables the
1693 *	interrupt line and IRQ handling. It selects either a
1694 *	hardirq or threaded handling method depending on the
1695 *	context.
1696 *
1697 *	On failure, it returns a negative value. On success,
1698 *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1699 */
1700int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1701			    unsigned long flags, const char *name, void *dev_id)
1702{
1703	struct irq_desc *desc;
1704	int ret;
1705
1706	if (irq == IRQ_NOTCONNECTED)
1707		return -ENOTCONN;
1708
1709	desc = irq_to_desc(irq);
1710	if (!desc)
1711		return -EINVAL;
1712
1713	if (irq_settings_is_nested_thread(desc)) {
1714		ret = request_threaded_irq(irq, NULL, handler,
1715					   flags, name, dev_id);
1716		return !ret ? IRQC_IS_NESTED : ret;
1717	}
1718
1719	ret = request_irq(irq, handler, flags, name, dev_id);
1720	return !ret ? IRQC_IS_HARDIRQ : ret;
1721}
1722EXPORT_SYMBOL_GPL(request_any_context_irq);
1723
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1724void enable_percpu_irq(unsigned int irq, unsigned int type)
1725{
1726	unsigned int cpu = smp_processor_id();
1727	unsigned long flags;
1728	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1729
1730	if (!desc)
1731		return;
1732
 
 
 
 
1733	type &= IRQ_TYPE_SENSE_MASK;
 
 
 
1734	if (type != IRQ_TYPE_NONE) {
1735		int ret;
1736
1737		ret = __irq_set_trigger(desc, type);
1738
1739		if (ret) {
1740			WARN(1, "failed to set type for IRQ%d\n", irq);
1741			goto out;
1742		}
1743	}
1744
1745	irq_percpu_enable(desc, cpu);
1746out:
1747	irq_put_desc_unlock(desc, flags);
1748}
1749EXPORT_SYMBOL_GPL(enable_percpu_irq);
1750
 
 
 
 
 
1751/**
1752 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1753 * @irq:	Linux irq number to check for
1754 *
1755 * Must be called from a non migratable context. Returns the enable
1756 * state of a per cpu interrupt on the current cpu.
1757 */
1758bool irq_percpu_is_enabled(unsigned int irq)
1759{
1760	unsigned int cpu = smp_processor_id();
1761	struct irq_desc *desc;
1762	unsigned long flags;
1763	bool is_enabled;
1764
1765	desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1766	if (!desc)
1767		return false;
1768
1769	is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1770	irq_put_desc_unlock(desc, flags);
1771
1772	return is_enabled;
1773}
1774EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1775
1776void disable_percpu_irq(unsigned int irq)
1777{
1778	unsigned int cpu = smp_processor_id();
1779	unsigned long flags;
1780	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1781
1782	if (!desc)
1783		return;
1784
1785	irq_percpu_disable(desc, cpu);
1786	irq_put_desc_unlock(desc, flags);
1787}
1788EXPORT_SYMBOL_GPL(disable_percpu_irq);
1789
 
 
 
 
 
1790/*
1791 * Internal function to unregister a percpu irqaction.
1792 */
1793static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1794{
1795	struct irq_desc *desc = irq_to_desc(irq);
1796	struct irqaction *action;
1797	unsigned long flags;
1798
1799	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1800
1801	if (!desc)
1802		return NULL;
1803
1804	raw_spin_lock_irqsave(&desc->lock, flags);
1805
1806	action = desc->action;
1807	if (!action || action->percpu_dev_id != dev_id) {
1808		WARN(1, "Trying to free already-free IRQ %d\n", irq);
1809		goto bad;
1810	}
1811
1812	if (!cpumask_empty(desc->percpu_enabled)) {
1813		WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1814		     irq, cpumask_first(desc->percpu_enabled));
1815		goto bad;
1816	}
1817
1818	/* Found it - now remove it from the list of entries: */
1819	desc->action = NULL;
1820
 
 
1821	raw_spin_unlock_irqrestore(&desc->lock, flags);
1822
1823	unregister_handler_proc(irq, action);
1824
 
1825	module_put(desc->owner);
1826	return action;
1827
1828bad:
1829	raw_spin_unlock_irqrestore(&desc->lock, flags);
1830	return NULL;
1831}
1832
1833/**
1834 *	remove_percpu_irq - free a per-cpu interrupt
1835 *	@irq: Interrupt line to free
1836 *	@act: irqaction for the interrupt
1837 *
1838 * Used to remove interrupts statically setup by the early boot process.
1839 */
1840void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1841{
1842	struct irq_desc *desc = irq_to_desc(irq);
1843
1844	if (desc && irq_settings_is_per_cpu_devid(desc))
1845	    __free_percpu_irq(irq, act->percpu_dev_id);
1846}
1847
1848/**
1849 *	free_percpu_irq - free an interrupt allocated with request_percpu_irq
1850 *	@irq: Interrupt line to free
1851 *	@dev_id: Device identity to free
1852 *
1853 *	Remove a percpu interrupt handler. The handler is removed, but
1854 *	the interrupt line is not disabled. This must be done on each
1855 *	CPU before calling this function. The function does not return
1856 *	until any executing interrupts for this IRQ have completed.
1857 *
1858 *	This function must not be called from interrupt context.
1859 */
1860void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1861{
1862	struct irq_desc *desc = irq_to_desc(irq);
1863
1864	if (!desc || !irq_settings_is_per_cpu_devid(desc))
1865		return;
1866
1867	chip_bus_lock(desc);
1868	kfree(__free_percpu_irq(irq, dev_id));
1869	chip_bus_sync_unlock(desc);
1870}
1871EXPORT_SYMBOL_GPL(free_percpu_irq);
1872
 
 
 
 
 
 
 
 
 
 
 
 
 
1873/**
1874 *	setup_percpu_irq - setup a per-cpu interrupt
1875 *	@irq: Interrupt line to setup
1876 *	@act: irqaction for the interrupt
1877 *
1878 * Used to statically setup per-cpu interrupts in the early boot process.
1879 */
1880int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1881{
1882	struct irq_desc *desc = irq_to_desc(irq);
1883	int retval;
1884
1885	if (!desc || !irq_settings_is_per_cpu_devid(desc))
1886		return -EINVAL;
1887	chip_bus_lock(desc);
 
 
 
 
1888	retval = __setup_irq(irq, desc, act);
1889	chip_bus_sync_unlock(desc);
 
 
1890
1891	return retval;
1892}
1893
1894/**
1895 *	request_percpu_irq - allocate a percpu interrupt line
1896 *	@irq: Interrupt line to allocate
1897 *	@handler: Function to be called when the IRQ occurs.
 
1898 *	@devname: An ascii name for the claiming device
1899 *	@dev_id: A percpu cookie passed back to the handler function
1900 *
1901 *	This call allocates interrupt resources and enables the
1902 *	interrupt on the local CPU. If the interrupt is supposed to be
1903 *	enabled on other CPUs, it has to be done on each CPU using
1904 *	enable_percpu_irq().
1905 *
1906 *	Dev_id must be globally unique. It is a per-cpu variable, and
1907 *	the handler gets called with the interrupted CPU's instance of
1908 *	that variable.
1909 */
1910int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1911		       const char *devname, void __percpu *dev_id)
 
1912{
1913	struct irqaction *action;
1914	struct irq_desc *desc;
1915	int retval;
1916
1917	if (!dev_id)
1918		return -EINVAL;
1919
1920	desc = irq_to_desc(irq);
1921	if (!desc || !irq_settings_can_request(desc) ||
1922	    !irq_settings_is_per_cpu_devid(desc))
1923		return -EINVAL;
1924
 
 
 
1925	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1926	if (!action)
1927		return -ENOMEM;
1928
1929	action->handler = handler;
1930	action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1931	action->name = devname;
1932	action->percpu_dev_id = dev_id;
1933
1934	chip_bus_lock(desc);
 
 
 
 
 
1935	retval = __setup_irq(irq, desc, action);
1936	chip_bus_sync_unlock(desc);
1937
1938	if (retval)
 
1939		kfree(action);
 
1940
1941	return retval;
1942}
1943EXPORT_SYMBOL_GPL(request_percpu_irq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1944
1945/**
1946 *	irq_get_irqchip_state - returns the irqchip state of a interrupt.
1947 *	@irq: Interrupt line that is forwarded to a VM
1948 *	@which: One of IRQCHIP_STATE_* the caller wants to know about
1949 *	@state: a pointer to a boolean where the state is to be storeed
1950 *
1951 *	This call snapshots the internal irqchip state of an
1952 *	interrupt, returning into @state the bit corresponding to
1953 *	stage @which
1954 *
1955 *	This function should be called with preemption disabled if the
1956 *	interrupt controller has per-cpu registers.
1957 */
1958int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
1959			  bool *state)
1960{
1961	struct irq_desc *desc;
1962	struct irq_data *data;
1963	struct irq_chip *chip;
1964	unsigned long flags;
1965	int err = -EINVAL;
1966
1967	desc = irq_get_desc_buslock(irq, &flags, 0);
1968	if (!desc)
1969		return err;
1970
1971	data = irq_desc_get_irq_data(desc);
1972
1973	do {
1974		chip = irq_data_get_irq_chip(data);
1975		if (chip->irq_get_irqchip_state)
1976			break;
1977#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1978		data = data->parent_data;
1979#else
1980		data = NULL;
1981#endif
1982	} while (data);
1983
1984	if (data)
1985		err = chip->irq_get_irqchip_state(data, which, state);
1986
1987	irq_put_desc_busunlock(desc, flags);
1988	return err;
1989}
1990EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
1991
1992/**
1993 *	irq_set_irqchip_state - set the state of a forwarded interrupt.
1994 *	@irq: Interrupt line that is forwarded to a VM
1995 *	@which: State to be restored (one of IRQCHIP_STATE_*)
1996 *	@val: Value corresponding to @which
1997 *
1998 *	This call sets the internal irqchip state of an interrupt,
1999 *	depending on the value of @which.
2000 *
2001 *	This function should be called with preemption disabled if the
2002 *	interrupt controller has per-cpu registers.
2003 */
2004int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2005			  bool val)
2006{
2007	struct irq_desc *desc;
2008	struct irq_data *data;
2009	struct irq_chip *chip;
2010	unsigned long flags;
2011	int err = -EINVAL;
2012
2013	desc = irq_get_desc_buslock(irq, &flags, 0);
2014	if (!desc)
2015		return err;
2016
2017	data = irq_desc_get_irq_data(desc);
2018
2019	do {
2020		chip = irq_data_get_irq_chip(data);
 
 
 
 
2021		if (chip->irq_set_irqchip_state)
2022			break;
2023#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2024		data = data->parent_data;
2025#else
2026		data = NULL;
2027#endif
2028	} while (data);
2029
2030	if (data)
2031		err = chip->irq_set_irqchip_state(data, which, val);
2032
 
2033	irq_put_desc_busunlock(desc, flags);
2034	return err;
2035}
2036EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
 
 
   3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   4 * Copyright (C) 2005-2006 Thomas Gleixner
   5 *
   6 * This file contains driver APIs to the irq subsystem.
   7 */
   8
   9#define pr_fmt(fmt) "genirq: " fmt
  10
  11#include <linux/irq.h>
  12#include <linux/kthread.h>
  13#include <linux/module.h>
  14#include <linux/random.h>
  15#include <linux/interrupt.h>
  16#include <linux/irqdomain.h>
  17#include <linux/slab.h>
  18#include <linux/sched.h>
  19#include <linux/sched/rt.h>
  20#include <linux/sched/task.h>
  21#include <linux/sched/isolation.h>
  22#include <uapi/linux/sched/types.h>
  23#include <linux/task_work.h>
  24
  25#include "internals.h"
  26
  27#if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
  28__read_mostly bool force_irqthreads;
  29EXPORT_SYMBOL_GPL(force_irqthreads);
  30
  31static int __init setup_forced_irqthreads(char *arg)
  32{
  33	force_irqthreads = true;
  34	return 0;
  35}
  36early_param("threadirqs", setup_forced_irqthreads);
  37#endif
  38
  39static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
  40{
  41	struct irq_data *irqd = irq_desc_get_irq_data(desc);
  42	bool inprogress;
  43
  44	do {
  45		unsigned long flags;
  46
  47		/*
  48		 * Wait until we're out of the critical section.  This might
  49		 * give the wrong answer due to the lack of memory barriers.
  50		 */
  51		while (irqd_irq_inprogress(&desc->irq_data))
  52			cpu_relax();
  53
  54		/* Ok, that indicated we're done: double-check carefully. */
  55		raw_spin_lock_irqsave(&desc->lock, flags);
  56		inprogress = irqd_irq_inprogress(&desc->irq_data);
  57
  58		/*
  59		 * If requested and supported, check at the chip whether it
  60		 * is in flight at the hardware level, i.e. already pending
  61		 * in a CPU and waiting for service and acknowledge.
  62		 */
  63		if (!inprogress && sync_chip) {
  64			/*
  65			 * Ignore the return code. inprogress is only updated
  66			 * when the chip supports it.
  67			 */
  68			__irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
  69						&inprogress);
  70		}
  71		raw_spin_unlock_irqrestore(&desc->lock, flags);
  72
  73		/* Oops, that failed? */
  74	} while (inprogress);
  75}
  76
  77/**
  78 *	synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
  79 *	@irq: interrupt number to wait for
  80 *
  81 *	This function waits for any pending hard IRQ handlers for this
  82 *	interrupt to complete before returning. If you use this
  83 *	function while holding a resource the IRQ handler may need you
  84 *	will deadlock. It does not take associated threaded handlers
  85 *	into account.
  86 *
  87 *	Do not use this for shutdown scenarios where you must be sure
  88 *	that all parts (hardirq and threaded handler) have completed.
  89 *
  90 *	Returns: false if a threaded handler is active.
  91 *
  92 *	This function may be called - with care - from IRQ context.
  93 *
  94 *	It does not check whether there is an interrupt in flight at the
  95 *	hardware level, but not serviced yet, as this might deadlock when
  96 *	called with interrupts disabled and the target CPU of the interrupt
  97 *	is the current CPU.
  98 */
  99bool synchronize_hardirq(unsigned int irq)
 100{
 101	struct irq_desc *desc = irq_to_desc(irq);
 102
 103	if (desc) {
 104		__synchronize_hardirq(desc, false);
 105		return !atomic_read(&desc->threads_active);
 106	}
 107
 108	return true;
 109}
 110EXPORT_SYMBOL(synchronize_hardirq);
 111
 112/**
 113 *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
 114 *	@irq: interrupt number to wait for
 115 *
 116 *	This function waits for any pending IRQ handlers for this interrupt
 117 *	to complete before returning. If you use this function while
 118 *	holding a resource the IRQ handler may need you will deadlock.
 119 *
 120 *	Can only be called from preemptible code as it might sleep when
 121 *	an interrupt thread is associated to @irq.
 122 *
 123 *	It optionally makes sure (when the irq chip supports that method)
 124 *	that the interrupt is not pending in any CPU and waiting for
 125 *	service.
 126 */
 127void synchronize_irq(unsigned int irq)
 128{
 129	struct irq_desc *desc = irq_to_desc(irq);
 130
 131	if (desc) {
 132		__synchronize_hardirq(desc, true);
 133		/*
 134		 * We made sure that no hardirq handler is
 135		 * running. Now verify that no threaded handlers are
 136		 * active.
 137		 */
 138		wait_event(desc->wait_for_threads,
 139			   !atomic_read(&desc->threads_active));
 140	}
 141}
 142EXPORT_SYMBOL(synchronize_irq);
 143
 144#ifdef CONFIG_SMP
 145cpumask_var_t irq_default_affinity;
 146
 147static bool __irq_can_set_affinity(struct irq_desc *desc)
 148{
 149	if (!desc || !irqd_can_balance(&desc->irq_data) ||
 150	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
 151		return false;
 152	return true;
 153}
 154
 155/**
 156 *	irq_can_set_affinity - Check if the affinity of a given irq can be set
 157 *	@irq:		Interrupt to check
 158 *
 159 */
 160int irq_can_set_affinity(unsigned int irq)
 161{
 162	return __irq_can_set_affinity(irq_to_desc(irq));
 163}
 164
 165/**
 166 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
 167 * @irq:	Interrupt to check
 168 *
 169 * Like irq_can_set_affinity() above, but additionally checks for the
 170 * AFFINITY_MANAGED flag.
 171 */
 172bool irq_can_set_affinity_usr(unsigned int irq)
 173{
 174	struct irq_desc *desc = irq_to_desc(irq);
 175
 176	return __irq_can_set_affinity(desc) &&
 177		!irqd_affinity_is_managed(&desc->irq_data);
 178}
 179
 180/**
 181 *	irq_set_thread_affinity - Notify irq threads to adjust affinity
 182 *	@desc:		irq descriptor which has affitnity changed
 183 *
 184 *	We just set IRQTF_AFFINITY and delegate the affinity setting
 185 *	to the interrupt thread itself. We can not call
 186 *	set_cpus_allowed_ptr() here as we hold desc->lock and this
 187 *	code can be called from hard interrupt context.
 188 */
 189void irq_set_thread_affinity(struct irq_desc *desc)
 190{
 191	struct irqaction *action;
 192
 193	for_each_action_of_desc(desc, action)
 194		if (action->thread)
 195			set_bit(IRQTF_AFFINITY, &action->thread_flags);
 196}
 197
 198#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
 199static void irq_validate_effective_affinity(struct irq_data *data)
 
 
 
 
 
 
 
 
 
 200{
 201	const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
 202	struct irq_chip *chip = irq_data_get_irq_chip(data);
 203
 204	if (!cpumask_empty(m))
 205		return;
 206	pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
 207		     chip->name, data->irq);
 208}
 209
 210static inline void irq_init_effective_affinity(struct irq_data *data,
 211					       const struct cpumask *mask)
 212{
 213	cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
 214}
 215#else
 216static inline void irq_validate_effective_affinity(struct irq_data *data) { }
 217static inline void irq_init_effective_affinity(struct irq_data *data,
 218					       const struct cpumask *mask) { }
 
 
 
 219#endif
 220
 221int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
 222			bool force)
 223{
 224	struct irq_desc *desc = irq_data_to_desc(data);
 225	struct irq_chip *chip = irq_data_get_irq_chip(data);
 226	int ret;
 227
 228	if (!chip || !chip->irq_set_affinity)
 229		return -EINVAL;
 230
 231	/*
 232	 * If this is a managed interrupt and housekeeping is enabled on
 233	 * it check whether the requested affinity mask intersects with
 234	 * a housekeeping CPU. If so, then remove the isolated CPUs from
 235	 * the mask and just keep the housekeeping CPU(s). This prevents
 236	 * the affinity setter from routing the interrupt to an isolated
 237	 * CPU to avoid that I/O submitted from a housekeeping CPU causes
 238	 * interrupts on an isolated one.
 239	 *
 240	 * If the masks do not intersect or include online CPU(s) then
 241	 * keep the requested mask. The isolated target CPUs are only
 242	 * receiving interrupts when the I/O operation was submitted
 243	 * directly from them.
 244	 *
 245	 * If all housekeeping CPUs in the affinity mask are offline, the
 246	 * interrupt will be migrated by the CPU hotplug code once a
 247	 * housekeeping CPU which belongs to the affinity mask comes
 248	 * online.
 249	 */
 250	if (irqd_affinity_is_managed(data) &&
 251	    housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) {
 252		const struct cpumask *hk_mask, *prog_mask;
 253
 254		static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
 255		static struct cpumask tmp_mask;
 256
 257		hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
 258
 259		raw_spin_lock(&tmp_mask_lock);
 260		cpumask_and(&tmp_mask, mask, hk_mask);
 261		if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
 262			prog_mask = mask;
 263		else
 264			prog_mask = &tmp_mask;
 265		ret = chip->irq_set_affinity(data, prog_mask, force);
 266		raw_spin_unlock(&tmp_mask_lock);
 267	} else {
 268		ret = chip->irq_set_affinity(data, mask, force);
 269	}
 270	switch (ret) {
 271	case IRQ_SET_MASK_OK:
 272	case IRQ_SET_MASK_OK_DONE:
 273		cpumask_copy(desc->irq_common_data.affinity, mask);
 274		fallthrough;
 275	case IRQ_SET_MASK_OK_NOCOPY:
 276		irq_validate_effective_affinity(data);
 277		irq_set_thread_affinity(desc);
 278		ret = 0;
 279	}
 280
 281	return ret;
 282}
 283
 284#ifdef CONFIG_GENERIC_PENDING_IRQ
 285static inline int irq_set_affinity_pending(struct irq_data *data,
 286					   const struct cpumask *dest)
 287{
 288	struct irq_desc *desc = irq_data_to_desc(data);
 289
 290	irqd_set_move_pending(data);
 291	irq_copy_pending(desc, dest);
 292	return 0;
 293}
 294#else
 295static inline int irq_set_affinity_pending(struct irq_data *data,
 296					   const struct cpumask *dest)
 297{
 298	return -EBUSY;
 299}
 300#endif
 301
 302static int irq_try_set_affinity(struct irq_data *data,
 303				const struct cpumask *dest, bool force)
 304{
 305	int ret = irq_do_set_affinity(data, dest, force);
 306
 307	/*
 308	 * In case that the underlying vector management is busy and the
 309	 * architecture supports the generic pending mechanism then utilize
 310	 * this to avoid returning an error to user space.
 311	 */
 312	if (ret == -EBUSY && !force)
 313		ret = irq_set_affinity_pending(data, dest);
 314	return ret;
 315}
 316
 317static bool irq_set_affinity_deactivated(struct irq_data *data,
 318					 const struct cpumask *mask, bool force)
 319{
 320	struct irq_desc *desc = irq_data_to_desc(data);
 321
 322	/*
 323	 * Handle irq chips which can handle affinity only in activated
 324	 * state correctly
 325	 *
 326	 * If the interrupt is not yet activated, just store the affinity
 327	 * mask and do not call the chip driver at all. On activation the
 328	 * driver has to make sure anyway that the interrupt is in a
 329	 * useable state so startup works.
 330	 */
 331	if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
 332	    irqd_is_activated(data) || !irqd_affinity_on_activate(data))
 333		return false;
 334
 335	cpumask_copy(desc->irq_common_data.affinity, mask);
 336	irq_init_effective_affinity(data, mask);
 337	irqd_set(data, IRQD_AFFINITY_SET);
 338	return true;
 339}
 340
 341int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
 342			    bool force)
 343{
 344	struct irq_chip *chip = irq_data_get_irq_chip(data);
 345	struct irq_desc *desc = irq_data_to_desc(data);
 346	int ret = 0;
 347
 348	if (!chip || !chip->irq_set_affinity)
 349		return -EINVAL;
 350
 351	if (irq_set_affinity_deactivated(data, mask, force))
 352		return 0;
 353
 354	if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
 355		ret = irq_try_set_affinity(data, mask, force);
 356	} else {
 357		irqd_set_move_pending(data);
 358		irq_copy_pending(desc, mask);
 359	}
 360
 361	if (desc->affinity_notify) {
 362		kref_get(&desc->affinity_notify->kref);
 363		if (!schedule_work(&desc->affinity_notify->work)) {
 364			/* Work was already scheduled, drop our extra ref */
 365			kref_put(&desc->affinity_notify->kref,
 366				 desc->affinity_notify->release);
 367		}
 368	}
 369	irqd_set(data, IRQD_AFFINITY_SET);
 370
 371	return ret;
 372}
 373
 374int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
 375{
 376	struct irq_desc *desc = irq_to_desc(irq);
 377	unsigned long flags;
 378	int ret;
 379
 380	if (!desc)
 381		return -EINVAL;
 382
 383	raw_spin_lock_irqsave(&desc->lock, flags);
 384	ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
 385	raw_spin_unlock_irqrestore(&desc->lock, flags);
 386	return ret;
 387}
 388
 389int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
 390{
 391	unsigned long flags;
 392	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 393
 394	if (!desc)
 395		return -EINVAL;
 396	desc->affinity_hint = m;
 397	irq_put_desc_unlock(desc, flags);
 398	/* set the initial affinity to prevent every interrupt being on CPU0 */
 399	if (m)
 400		__irq_set_affinity(irq, m, false);
 401	return 0;
 402}
 403EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
 404
 405static void irq_affinity_notify(struct work_struct *work)
 406{
 407	struct irq_affinity_notify *notify =
 408		container_of(work, struct irq_affinity_notify, work);
 409	struct irq_desc *desc = irq_to_desc(notify->irq);
 410	cpumask_var_t cpumask;
 411	unsigned long flags;
 412
 413	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
 414		goto out;
 415
 416	raw_spin_lock_irqsave(&desc->lock, flags);
 417	if (irq_move_pending(&desc->irq_data))
 418		irq_get_pending(cpumask, desc);
 419	else
 420		cpumask_copy(cpumask, desc->irq_common_data.affinity);
 421	raw_spin_unlock_irqrestore(&desc->lock, flags);
 422
 423	notify->notify(notify, cpumask);
 424
 425	free_cpumask_var(cpumask);
 426out:
 427	kref_put(&notify->kref, notify->release);
 428}
 429
 430/**
 431 *	irq_set_affinity_notifier - control notification of IRQ affinity changes
 432 *	@irq:		Interrupt for which to enable/disable notification
 433 *	@notify:	Context for notification, or %NULL to disable
 434 *			notification.  Function pointers must be initialised;
 435 *			the other fields will be initialised by this function.
 436 *
 437 *	Must be called in process context.  Notification may only be enabled
 438 *	after the IRQ is allocated and must be disabled before the IRQ is
 439 *	freed using free_irq().
 440 */
 441int
 442irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
 443{
 444	struct irq_desc *desc = irq_to_desc(irq);
 445	struct irq_affinity_notify *old_notify;
 446	unsigned long flags;
 447
 448	/* The release function is promised process context */
 449	might_sleep();
 450
 451	if (!desc || desc->istate & IRQS_NMI)
 452		return -EINVAL;
 453
 454	/* Complete initialisation of *notify */
 455	if (notify) {
 456		notify->irq = irq;
 457		kref_init(&notify->kref);
 458		INIT_WORK(&notify->work, irq_affinity_notify);
 459	}
 460
 461	raw_spin_lock_irqsave(&desc->lock, flags);
 462	old_notify = desc->affinity_notify;
 463	desc->affinity_notify = notify;
 464	raw_spin_unlock_irqrestore(&desc->lock, flags);
 465
 466	if (old_notify) {
 467		if (cancel_work_sync(&old_notify->work)) {
 468			/* Pending work had a ref, put that one too */
 469			kref_put(&old_notify->kref, old_notify->release);
 470		}
 471		kref_put(&old_notify->kref, old_notify->release);
 472	}
 473
 474	return 0;
 475}
 476EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 477
 478#ifndef CONFIG_AUTO_IRQ_AFFINITY
 479/*
 480 * Generic version of the affinity autoselector.
 481 */
 482int irq_setup_affinity(struct irq_desc *desc)
 483{
 484	struct cpumask *set = irq_default_affinity;
 485	int ret, node = irq_desc_get_node(desc);
 486	static DEFINE_RAW_SPINLOCK(mask_lock);
 487	static struct cpumask mask;
 488
 489	/* Excludes PER_CPU and NO_BALANCE interrupts */
 490	if (!__irq_can_set_affinity(desc))
 491		return 0;
 492
 493	raw_spin_lock(&mask_lock);
 494	/*
 495	 * Preserve the managed affinity setting and a userspace affinity
 496	 * setup, but make sure that one of the targets is online.
 497	 */
 498	if (irqd_affinity_is_managed(&desc->irq_data) ||
 499	    irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
 500		if (cpumask_intersects(desc->irq_common_data.affinity,
 501				       cpu_online_mask))
 502			set = desc->irq_common_data.affinity;
 503		else
 504			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
 505	}
 506
 507	cpumask_and(&mask, cpu_online_mask, set);
 508	if (cpumask_empty(&mask))
 509		cpumask_copy(&mask, cpu_online_mask);
 510
 511	if (node != NUMA_NO_NODE) {
 512		const struct cpumask *nodemask = cpumask_of_node(node);
 513
 514		/* make sure at least one of the cpus in nodemask is online */
 515		if (cpumask_intersects(&mask, nodemask))
 516			cpumask_and(&mask, &mask, nodemask);
 517	}
 518	ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
 519	raw_spin_unlock(&mask_lock);
 520	return ret;
 521}
 522#else
 523/* Wrapper for ALPHA specific affinity selector magic */
 524int irq_setup_affinity(struct irq_desc *desc)
 525{
 526	return irq_select_affinity(irq_desc_get_irq(desc));
 527}
 528#endif /* CONFIG_AUTO_IRQ_AFFINITY */
 529#endif /* CONFIG_SMP */
 530
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 531
 532/**
 533 *	irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
 534 *	@irq: interrupt number to set affinity
 535 *	@vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
 536 *	            specific data for percpu_devid interrupts
 537 *
 538 *	This function uses the vCPU specific data to set the vCPU
 539 *	affinity for an irq. The vCPU specific data is passed from
 540 *	outside, such as KVM. One example code path is as below:
 541 *	KVM -> IOMMU -> irq_set_vcpu_affinity().
 542 */
 543int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
 544{
 545	unsigned long flags;
 546	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 547	struct irq_data *data;
 548	struct irq_chip *chip;
 549	int ret = -ENOSYS;
 550
 551	if (!desc)
 552		return -EINVAL;
 553
 554	data = irq_desc_get_irq_data(desc);
 555	do {
 556		chip = irq_data_get_irq_chip(data);
 557		if (chip && chip->irq_set_vcpu_affinity)
 558			break;
 559#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
 560		data = data->parent_data;
 561#else
 562		data = NULL;
 563#endif
 564	} while (data);
 565
 566	if (data)
 567		ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
 568	irq_put_desc_unlock(desc, flags);
 569
 570	return ret;
 571}
 572EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
 573
 574void __disable_irq(struct irq_desc *desc)
 575{
 576	if (!desc->depth++)
 577		irq_disable(desc);
 578}
 579
 580static int __disable_irq_nosync(unsigned int irq)
 581{
 582	unsigned long flags;
 583	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 584
 585	if (!desc)
 586		return -EINVAL;
 587	__disable_irq(desc);
 588	irq_put_desc_busunlock(desc, flags);
 589	return 0;
 590}
 591
 592/**
 593 *	disable_irq_nosync - disable an irq without waiting
 594 *	@irq: Interrupt to disable
 595 *
 596 *	Disable the selected interrupt line.  Disables and Enables are
 597 *	nested.
 598 *	Unlike disable_irq(), this function does not ensure existing
 599 *	instances of the IRQ handler have completed before returning.
 600 *
 601 *	This function may be called from IRQ context.
 602 */
 603void disable_irq_nosync(unsigned int irq)
 604{
 605	__disable_irq_nosync(irq);
 606}
 607EXPORT_SYMBOL(disable_irq_nosync);
 608
 609/**
 610 *	disable_irq - disable an irq and wait for completion
 611 *	@irq: Interrupt to disable
 612 *
 613 *	Disable the selected interrupt line.  Enables and Disables are
 614 *	nested.
 615 *	This function waits for any pending IRQ handlers for this interrupt
 616 *	to complete before returning. If you use this function while
 617 *	holding a resource the IRQ handler may need you will deadlock.
 618 *
 619 *	This function may be called - with care - from IRQ context.
 620 */
 621void disable_irq(unsigned int irq)
 622{
 623	if (!__disable_irq_nosync(irq))
 624		synchronize_irq(irq);
 625}
 626EXPORT_SYMBOL(disable_irq);
 627
 628/**
 629 *	disable_hardirq - disables an irq and waits for hardirq completion
 630 *	@irq: Interrupt to disable
 631 *
 632 *	Disable the selected interrupt line.  Enables and Disables are
 633 *	nested.
 634 *	This function waits for any pending hard IRQ handlers for this
 635 *	interrupt to complete before returning. If you use this function while
 636 *	holding a resource the hard IRQ handler may need you will deadlock.
 637 *
 638 *	When used to optimistically disable an interrupt from atomic context
 639 *	the return value must be checked.
 640 *
 641 *	Returns: false if a threaded handler is active.
 642 *
 643 *	This function may be called - with care - from IRQ context.
 644 */
 645bool disable_hardirq(unsigned int irq)
 646{
 647	if (!__disable_irq_nosync(irq))
 648		return synchronize_hardirq(irq);
 649
 650	return false;
 651}
 652EXPORT_SYMBOL_GPL(disable_hardirq);
 653
 654/**
 655 *	disable_nmi_nosync - disable an nmi without waiting
 656 *	@irq: Interrupt to disable
 657 *
 658 *	Disable the selected interrupt line. Disables and enables are
 659 *	nested.
 660 *	The interrupt to disable must have been requested through request_nmi.
 661 *	Unlike disable_nmi(), this function does not ensure existing
 662 *	instances of the IRQ handler have completed before returning.
 663 */
 664void disable_nmi_nosync(unsigned int irq)
 665{
 666	disable_irq_nosync(irq);
 667}
 668
 669void __enable_irq(struct irq_desc *desc)
 670{
 671	switch (desc->depth) {
 672	case 0:
 673 err_out:
 674		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
 675		     irq_desc_get_irq(desc));
 676		break;
 677	case 1: {
 678		if (desc->istate & IRQS_SUSPENDED)
 679			goto err_out;
 680		/* Prevent probing on this irq: */
 681		irq_settings_set_noprobe(desc);
 682		/*
 683		 * Call irq_startup() not irq_enable() here because the
 684		 * interrupt might be marked NOAUTOEN. So irq_startup()
 685		 * needs to be invoked when it gets enabled the first
 686		 * time. If it was already started up, then irq_startup()
 687		 * will invoke irq_enable() under the hood.
 688		 */
 689		irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
 690		break;
 691	}
 692	default:
 693		desc->depth--;
 694	}
 695}
 696
 697/**
 698 *	enable_irq - enable handling of an irq
 699 *	@irq: Interrupt to enable
 700 *
 701 *	Undoes the effect of one call to disable_irq().  If this
 702 *	matches the last disable, processing of interrupts on this
 703 *	IRQ line is re-enabled.
 704 *
 705 *	This function may be called from IRQ context only when
 706 *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
 707 */
 708void enable_irq(unsigned int irq)
 709{
 710	unsigned long flags;
 711	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 712
 713	if (!desc)
 714		return;
 715	if (WARN(!desc->irq_data.chip,
 716		 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
 717		goto out;
 718
 719	__enable_irq(desc);
 720out:
 721	irq_put_desc_busunlock(desc, flags);
 722}
 723EXPORT_SYMBOL(enable_irq);
 724
 725/**
 726 *	enable_nmi - enable handling of an nmi
 727 *	@irq: Interrupt to enable
 728 *
 729 *	The interrupt to enable must have been requested through request_nmi.
 730 *	Undoes the effect of one call to disable_nmi(). If this
 731 *	matches the last disable, processing of interrupts on this
 732 *	IRQ line is re-enabled.
 733 */
 734void enable_nmi(unsigned int irq)
 735{
 736	enable_irq(irq);
 737}
 738
 739static int set_irq_wake_real(unsigned int irq, unsigned int on)
 740{
 741	struct irq_desc *desc = irq_to_desc(irq);
 742	int ret = -ENXIO;
 743
 744	if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
 745		return 0;
 746
 747	if (desc->irq_data.chip->irq_set_wake)
 748		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
 749
 750	return ret;
 751}
 752
 753/**
 754 *	irq_set_irq_wake - control irq power management wakeup
 755 *	@irq:	interrupt to control
 756 *	@on:	enable/disable power management wakeup
 757 *
 758 *	Enable/disable power management wakeup mode, which is
 759 *	disabled by default.  Enables and disables must match,
 760 *	just as they match for non-wakeup mode support.
 761 *
 762 *	Wakeup mode lets this IRQ wake the system from sleep
 763 *	states like "suspend to RAM".
 764 *
 765 *	Note: irq enable/disable state is completely orthogonal
 766 *	to the enable/disable state of irq wake. An irq can be
 767 *	disabled with disable_irq() and still wake the system as
 768 *	long as the irq has wake enabled. If this does not hold,
 769 *	then the underlying irq chip and the related driver need
 770 *	to be investigated.
 771 */
 772int irq_set_irq_wake(unsigned int irq, unsigned int on)
 773{
 774	unsigned long flags;
 775	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 776	int ret = 0;
 777
 778	if (!desc)
 779		return -EINVAL;
 780
 781	/* Don't use NMIs as wake up interrupts please */
 782	if (desc->istate & IRQS_NMI) {
 783		ret = -EINVAL;
 784		goto out_unlock;
 785	}
 786
 787	/* wakeup-capable irqs can be shared between drivers that
 788	 * don't need to have the same sleep mode behaviors.
 789	 */
 790	if (on) {
 791		if (desc->wake_depth++ == 0) {
 792			ret = set_irq_wake_real(irq, on);
 793			if (ret)
 794				desc->wake_depth = 0;
 795			else
 796				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
 797		}
 798	} else {
 799		if (desc->wake_depth == 0) {
 800			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
 801		} else if (--desc->wake_depth == 0) {
 802			ret = set_irq_wake_real(irq, on);
 803			if (ret)
 804				desc->wake_depth = 1;
 805			else
 806				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
 807		}
 808	}
 809
 810out_unlock:
 811	irq_put_desc_busunlock(desc, flags);
 812	return ret;
 813}
 814EXPORT_SYMBOL(irq_set_irq_wake);
 815
 816/*
 817 * Internal function that tells the architecture code whether a
 818 * particular irq has been exclusively allocated or is available
 819 * for driver use.
 820 */
 821int can_request_irq(unsigned int irq, unsigned long irqflags)
 822{
 823	unsigned long flags;
 824	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 825	int canrequest = 0;
 826
 827	if (!desc)
 828		return 0;
 829
 830	if (irq_settings_can_request(desc)) {
 831		if (!desc->action ||
 832		    irqflags & desc->action->flags & IRQF_SHARED)
 833			canrequest = 1;
 834	}
 835	irq_put_desc_unlock(desc, flags);
 836	return canrequest;
 837}
 838
 839int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
 840{
 841	struct irq_chip *chip = desc->irq_data.chip;
 842	int ret, unmask = 0;
 843
 844	if (!chip || !chip->irq_set_type) {
 845		/*
 846		 * IRQF_TRIGGER_* but the PIC does not support multiple
 847		 * flow-types?
 848		 */
 849		pr_debug("No set_type function for IRQ %d (%s)\n",
 850			 irq_desc_get_irq(desc),
 851			 chip ? (chip->name ? : "unknown") : "unknown");
 852		return 0;
 853	}
 854
 
 
 855	if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
 856		if (!irqd_irq_masked(&desc->irq_data))
 857			mask_irq(desc);
 858		if (!irqd_irq_disabled(&desc->irq_data))
 859			unmask = 1;
 860	}
 861
 862	/* Mask all flags except trigger mode */
 863	flags &= IRQ_TYPE_SENSE_MASK;
 864	ret = chip->irq_set_type(&desc->irq_data, flags);
 865
 866	switch (ret) {
 867	case IRQ_SET_MASK_OK:
 868	case IRQ_SET_MASK_OK_DONE:
 869		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
 870		irqd_set(&desc->irq_data, flags);
 871		fallthrough;
 872
 873	case IRQ_SET_MASK_OK_NOCOPY:
 874		flags = irqd_get_trigger_type(&desc->irq_data);
 875		irq_settings_set_trigger_mask(desc, flags);
 876		irqd_clear(&desc->irq_data, IRQD_LEVEL);
 877		irq_settings_clr_level(desc);
 878		if (flags & IRQ_TYPE_LEVEL_MASK) {
 879			irq_settings_set_level(desc);
 880			irqd_set(&desc->irq_data, IRQD_LEVEL);
 881		}
 882
 883		ret = 0;
 884		break;
 885	default:
 886		pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
 887		       flags, irq_desc_get_irq(desc), chip->irq_set_type);
 888	}
 889	if (unmask)
 890		unmask_irq(desc);
 891	return ret;
 892}
 893
 894#ifdef CONFIG_HARDIRQS_SW_RESEND
 895int irq_set_parent(int irq, int parent_irq)
 896{
 897	unsigned long flags;
 898	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 899
 900	if (!desc)
 901		return -EINVAL;
 902
 903	desc->parent_irq = parent_irq;
 904
 905	irq_put_desc_unlock(desc, flags);
 906	return 0;
 907}
 908EXPORT_SYMBOL_GPL(irq_set_parent);
 909#endif
 910
 911/*
 912 * Default primary interrupt handler for threaded interrupts. Is
 913 * assigned as primary handler when request_threaded_irq is called
 914 * with handler == NULL. Useful for oneshot interrupts.
 915 */
 916static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
 917{
 918	return IRQ_WAKE_THREAD;
 919}
 920
 921/*
 922 * Primary handler for nested threaded interrupts. Should never be
 923 * called.
 924 */
 925static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
 926{
 927	WARN(1, "Primary handler called for nested irq %d\n", irq);
 928	return IRQ_NONE;
 929}
 930
 931static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
 932{
 933	WARN(1, "Secondary action handler called for irq %d\n", irq);
 934	return IRQ_NONE;
 935}
 936
 937static int irq_wait_for_interrupt(struct irqaction *action)
 938{
 939	for (;;) {
 940		set_current_state(TASK_INTERRUPTIBLE);
 941
 942		if (kthread_should_stop()) {
 943			/* may need to run one last time */
 944			if (test_and_clear_bit(IRQTF_RUNTHREAD,
 945					       &action->thread_flags)) {
 946				__set_current_state(TASK_RUNNING);
 947				return 0;
 948			}
 949			__set_current_state(TASK_RUNNING);
 950			return -1;
 951		}
 952
 953		if (test_and_clear_bit(IRQTF_RUNTHREAD,
 954				       &action->thread_flags)) {
 955			__set_current_state(TASK_RUNNING);
 956			return 0;
 957		}
 958		schedule();
 
 959	}
 
 
 960}
 961
 962/*
 963 * Oneshot interrupts keep the irq line masked until the threaded
 964 * handler finished. unmask if the interrupt has not been disabled and
 965 * is marked MASKED.
 966 */
 967static void irq_finalize_oneshot(struct irq_desc *desc,
 968				 struct irqaction *action)
 969{
 970	if (!(desc->istate & IRQS_ONESHOT) ||
 971	    action->handler == irq_forced_secondary_handler)
 972		return;
 973again:
 974	chip_bus_lock(desc);
 975	raw_spin_lock_irq(&desc->lock);
 976
 977	/*
 978	 * Implausible though it may be we need to protect us against
 979	 * the following scenario:
 980	 *
 981	 * The thread is faster done than the hard interrupt handler
 982	 * on the other CPU. If we unmask the irq line then the
 983	 * interrupt can come in again and masks the line, leaves due
 984	 * to IRQS_INPROGRESS and the irq line is masked forever.
 985	 *
 986	 * This also serializes the state of shared oneshot handlers
 987	 * versus "desc->threads_onehsot |= action->thread_mask;" in
 988	 * irq_wake_thread(). See the comment there which explains the
 989	 * serialization.
 990	 */
 991	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
 992		raw_spin_unlock_irq(&desc->lock);
 993		chip_bus_sync_unlock(desc);
 994		cpu_relax();
 995		goto again;
 996	}
 997
 998	/*
 999	 * Now check again, whether the thread should run. Otherwise
1000	 * we would clear the threads_oneshot bit of this thread which
1001	 * was just set.
1002	 */
1003	if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1004		goto out_unlock;
1005
1006	desc->threads_oneshot &= ~action->thread_mask;
1007
1008	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
1009	    irqd_irq_masked(&desc->irq_data))
1010		unmask_threaded_irq(desc);
1011
1012out_unlock:
1013	raw_spin_unlock_irq(&desc->lock);
1014	chip_bus_sync_unlock(desc);
1015}
1016
1017#ifdef CONFIG_SMP
1018/*
1019 * Check whether we need to change the affinity of the interrupt thread.
1020 */
1021static void
1022irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1023{
1024	cpumask_var_t mask;
1025	bool valid = true;
1026
1027	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1028		return;
1029
1030	/*
1031	 * In case we are out of memory we set IRQTF_AFFINITY again and
1032	 * try again next time
1033	 */
1034	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1035		set_bit(IRQTF_AFFINITY, &action->thread_flags);
1036		return;
1037	}
1038
1039	raw_spin_lock_irq(&desc->lock);
1040	/*
1041	 * This code is triggered unconditionally. Check the affinity
1042	 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1043	 */
1044	if (cpumask_available(desc->irq_common_data.affinity)) {
1045		const struct cpumask *m;
1046
1047		m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1048		cpumask_copy(mask, m);
1049	} else {
1050		valid = false;
1051	}
1052	raw_spin_unlock_irq(&desc->lock);
1053
1054	if (valid)
1055		set_cpus_allowed_ptr(current, mask);
1056	free_cpumask_var(mask);
1057}
1058#else
1059static inline void
1060irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1061#endif
1062
1063/*
1064 * Interrupts which are not explicitly requested as threaded
1065 * interrupts rely on the implicit bh/preempt disable of the hard irq
1066 * context. So we need to disable bh here to avoid deadlocks and other
1067 * side effects.
1068 */
1069static irqreturn_t
1070irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1071{
1072	irqreturn_t ret;
1073
1074	local_bh_disable();
1075	ret = action->thread_fn(action->irq, action->dev_id);
1076	if (ret == IRQ_HANDLED)
1077		atomic_inc(&desc->threads_handled);
1078
1079	irq_finalize_oneshot(desc, action);
1080	local_bh_enable();
1081	return ret;
1082}
1083
1084/*
1085 * Interrupts explicitly requested as threaded interrupts want to be
1086 * preemtible - many of them need to sleep and wait for slow busses to
1087 * complete.
1088 */
1089static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1090		struct irqaction *action)
1091{
1092	irqreturn_t ret;
1093
1094	ret = action->thread_fn(action->irq, action->dev_id);
1095	if (ret == IRQ_HANDLED)
1096		atomic_inc(&desc->threads_handled);
1097
1098	irq_finalize_oneshot(desc, action);
1099	return ret;
1100}
1101
1102static void wake_threads_waitq(struct irq_desc *desc)
1103{
1104	if (atomic_dec_and_test(&desc->threads_active))
1105		wake_up(&desc->wait_for_threads);
1106}
1107
1108static void irq_thread_dtor(struct callback_head *unused)
1109{
1110	struct task_struct *tsk = current;
1111	struct irq_desc *desc;
1112	struct irqaction *action;
1113
1114	if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1115		return;
1116
1117	action = kthread_data(tsk);
1118
1119	pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1120	       tsk->comm, tsk->pid, action->irq);
1121
1122
1123	desc = irq_to_desc(action->irq);
1124	/*
1125	 * If IRQTF_RUNTHREAD is set, we need to decrement
1126	 * desc->threads_active and wake possible waiters.
1127	 */
1128	if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1129		wake_threads_waitq(desc);
1130
1131	/* Prevent a stale desc->threads_oneshot */
1132	irq_finalize_oneshot(desc, action);
1133}
1134
1135static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1136{
1137	struct irqaction *secondary = action->secondary;
1138
1139	if (WARN_ON_ONCE(!secondary))
1140		return;
1141
1142	raw_spin_lock_irq(&desc->lock);
1143	__irq_wake_thread(desc, secondary);
1144	raw_spin_unlock_irq(&desc->lock);
1145}
1146
1147/*
1148 * Interrupt handler thread
1149 */
1150static int irq_thread(void *data)
1151{
1152	struct callback_head on_exit_work;
1153	struct irqaction *action = data;
1154	struct irq_desc *desc = irq_to_desc(action->irq);
1155	irqreturn_t (*handler_fn)(struct irq_desc *desc,
1156			struct irqaction *action);
1157
1158	if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1159					&action->thread_flags))
1160		handler_fn = irq_forced_thread_fn;
1161	else
1162		handler_fn = irq_thread_fn;
1163
1164	init_task_work(&on_exit_work, irq_thread_dtor);
1165	task_work_add(current, &on_exit_work, false);
1166
1167	irq_thread_check_affinity(desc, action);
1168
1169	while (!irq_wait_for_interrupt(action)) {
1170		irqreturn_t action_ret;
1171
1172		irq_thread_check_affinity(desc, action);
1173
1174		action_ret = handler_fn(desc, action);
 
 
1175		if (action_ret == IRQ_WAKE_THREAD)
1176			irq_wake_secondary(desc, action);
1177
1178		wake_threads_waitq(desc);
1179	}
1180
1181	/*
1182	 * This is the regular exit path. __free_irq() is stopping the
1183	 * thread via kthread_stop() after calling
1184	 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1185	 * oneshot mask bit can be set.
 
 
 
1186	 */
1187	task_work_cancel(current, irq_thread_dtor);
1188	return 0;
1189}
1190
1191/**
1192 *	irq_wake_thread - wake the irq thread for the action identified by dev_id
1193 *	@irq:		Interrupt line
1194 *	@dev_id:	Device identity for which the thread should be woken
1195 *
1196 */
1197void irq_wake_thread(unsigned int irq, void *dev_id)
1198{
1199	struct irq_desc *desc = irq_to_desc(irq);
1200	struct irqaction *action;
1201	unsigned long flags;
1202
1203	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1204		return;
1205
1206	raw_spin_lock_irqsave(&desc->lock, flags);
1207	for_each_action_of_desc(desc, action) {
1208		if (action->dev_id == dev_id) {
1209			if (action->thread)
1210				__irq_wake_thread(desc, action);
1211			break;
1212		}
1213	}
1214	raw_spin_unlock_irqrestore(&desc->lock, flags);
1215}
1216EXPORT_SYMBOL_GPL(irq_wake_thread);
1217
1218static int irq_setup_forced_threading(struct irqaction *new)
1219{
1220	if (!force_irqthreads)
1221		return 0;
1222	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1223		return 0;
1224
1225	/*
1226	 * No further action required for interrupts which are requested as
1227	 * threaded interrupts already
1228	 */
1229	if (new->handler == irq_default_primary_handler)
1230		return 0;
1231
1232	new->flags |= IRQF_ONESHOT;
1233
1234	/*
1235	 * Handle the case where we have a real primary handler and a
1236	 * thread handler. We force thread them as well by creating a
1237	 * secondary action.
1238	 */
1239	if (new->handler && new->thread_fn) {
1240		/* Allocate the secondary action */
1241		new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1242		if (!new->secondary)
1243			return -ENOMEM;
1244		new->secondary->handler = irq_forced_secondary_handler;
1245		new->secondary->thread_fn = new->thread_fn;
1246		new->secondary->dev_id = new->dev_id;
1247		new->secondary->irq = new->irq;
1248		new->secondary->name = new->name;
1249	}
1250	/* Deal with the primary handler */
1251	set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1252	new->thread_fn = new->handler;
1253	new->handler = irq_default_primary_handler;
1254	return 0;
1255}
1256
1257static int irq_request_resources(struct irq_desc *desc)
1258{
1259	struct irq_data *d = &desc->irq_data;
1260	struct irq_chip *c = d->chip;
1261
1262	return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1263}
1264
1265static void irq_release_resources(struct irq_desc *desc)
1266{
1267	struct irq_data *d = &desc->irq_data;
1268	struct irq_chip *c = d->chip;
1269
1270	if (c->irq_release_resources)
1271		c->irq_release_resources(d);
1272}
1273
1274static bool irq_supports_nmi(struct irq_desc *desc)
1275{
1276	struct irq_data *d = irq_desc_get_irq_data(desc);
1277
1278#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1279	/* Only IRQs directly managed by the root irqchip can be set as NMI */
1280	if (d->parent_data)
1281		return false;
1282#endif
1283	/* Don't support NMIs for chips behind a slow bus */
1284	if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1285		return false;
1286
1287	return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1288}
1289
1290static int irq_nmi_setup(struct irq_desc *desc)
1291{
1292	struct irq_data *d = irq_desc_get_irq_data(desc);
1293	struct irq_chip *c = d->chip;
1294
1295	return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1296}
1297
1298static void irq_nmi_teardown(struct irq_desc *desc)
1299{
1300	struct irq_data *d = irq_desc_get_irq_data(desc);
1301	struct irq_chip *c = d->chip;
1302
1303	if (c->irq_nmi_teardown)
1304		c->irq_nmi_teardown(d);
1305}
1306
1307static int
1308setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1309{
1310	struct task_struct *t;
 
 
 
1311
1312	if (!secondary) {
1313		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1314				   new->name);
1315	} else {
1316		t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1317				   new->name);
 
1318	}
1319
1320	if (IS_ERR(t))
1321		return PTR_ERR(t);
1322
1323	sched_set_fifo(t);
1324
1325	/*
1326	 * We keep the reference to the task struct even if
1327	 * the thread dies to avoid that the interrupt code
1328	 * references an already freed task_struct.
1329	 */
1330	new->thread = get_task_struct(t);
 
1331	/*
1332	 * Tell the thread to set its affinity. This is
1333	 * important for shared interrupt handlers as we do
1334	 * not invoke setup_affinity() for the secondary
1335	 * handlers as everything is already set up. Even for
1336	 * interrupts marked with IRQF_NO_BALANCE this is
1337	 * correct as we want the thread to move to the cpu(s)
1338	 * on which the requesting code placed the interrupt.
1339	 */
1340	set_bit(IRQTF_AFFINITY, &new->thread_flags);
1341	return 0;
1342}
1343
1344/*
1345 * Internal function to register an irqaction - typically used to
1346 * allocate special interrupts that are part of the architecture.
1347 *
1348 * Locking rules:
1349 *
1350 * desc->request_mutex	Provides serialization against a concurrent free_irq()
1351 *   chip_bus_lock	Provides serialization for slow bus operations
1352 *     desc->lock	Provides serialization against hard interrupts
1353 *
1354 * chip_bus_lock and desc->lock are sufficient for all other management and
1355 * interrupt related functions. desc->request_mutex solely serializes
1356 * request/free_irq().
1357 */
1358static int
1359__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1360{
1361	struct irqaction *old, **old_ptr;
1362	unsigned long flags, thread_mask = 0;
1363	int ret, nested, shared = 0;
 
1364
1365	if (!desc)
1366		return -EINVAL;
1367
1368	if (desc->irq_data.chip == &no_irq_chip)
1369		return -ENOSYS;
1370	if (!try_module_get(desc->owner))
1371		return -ENODEV;
1372
1373	new->irq = irq;
1374
1375	/*
1376	 * If the trigger type is not specified by the caller,
1377	 * then use the default for this interrupt.
1378	 */
1379	if (!(new->flags & IRQF_TRIGGER_MASK))
1380		new->flags |= irqd_get_trigger_type(&desc->irq_data);
1381
1382	/*
1383	 * Check whether the interrupt nests into another interrupt
1384	 * thread.
1385	 */
1386	nested = irq_settings_is_nested_thread(desc);
1387	if (nested) {
1388		if (!new->thread_fn) {
1389			ret = -EINVAL;
1390			goto out_mput;
1391		}
1392		/*
1393		 * Replace the primary handler which was provided from
1394		 * the driver for non nested interrupt handling by the
1395		 * dummy function which warns when called.
1396		 */
1397		new->handler = irq_nested_primary_handler;
1398	} else {
1399		if (irq_settings_can_thread(desc)) {
1400			ret = irq_setup_forced_threading(new);
1401			if (ret)
1402				goto out_mput;
1403		}
1404	}
1405
1406	/*
1407	 * Create a handler thread when a thread function is supplied
1408	 * and the interrupt does not nest into another interrupt
1409	 * thread.
1410	 */
1411	if (new->thread_fn && !nested) {
1412		ret = setup_irq_thread(new, irq, false);
1413		if (ret)
1414			goto out_mput;
1415		if (new->secondary) {
1416			ret = setup_irq_thread(new->secondary, irq, true);
1417			if (ret)
1418				goto out_thread;
1419		}
1420	}
1421
 
 
 
 
 
1422	/*
1423	 * Drivers are often written to work w/o knowledge about the
1424	 * underlying irq chip implementation, so a request for a
1425	 * threaded irq without a primary hard irq context handler
1426	 * requires the ONESHOT flag to be set. Some irq chips like
1427	 * MSI based interrupts are per se one shot safe. Check the
1428	 * chip flags, so we can avoid the unmask dance at the end of
1429	 * the threaded handler for those.
1430	 */
1431	if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1432		new->flags &= ~IRQF_ONESHOT;
1433
1434	/*
1435	 * Protects against a concurrent __free_irq() call which might wait
1436	 * for synchronize_hardirq() to complete without holding the optional
1437	 * chip bus lock and desc->lock. Also protects against handing out
1438	 * a recycled oneshot thread_mask bit while it's still in use by
1439	 * its previous owner.
1440	 */
1441	mutex_lock(&desc->request_mutex);
1442
1443	/*
1444	 * Acquire bus lock as the irq_request_resources() callback below
1445	 * might rely on the serialization or the magic power management
1446	 * functions which are abusing the irq_bus_lock() callback,
1447	 */
1448	chip_bus_lock(desc);
1449
1450	/* First installed action requests resources. */
1451	if (!desc->action) {
1452		ret = irq_request_resources(desc);
1453		if (ret) {
1454			pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1455			       new->name, irq, desc->irq_data.chip->name);
1456			goto out_bus_unlock;
1457		}
1458	}
1459
1460	/*
1461	 * The following block of code has to be executed atomically
1462	 * protected against a concurrent interrupt and any of the other
1463	 * management calls which are not serialized via
1464	 * desc->request_mutex or the optional bus lock.
1465	 */
1466	raw_spin_lock_irqsave(&desc->lock, flags);
1467	old_ptr = &desc->action;
1468	old = *old_ptr;
1469	if (old) {
1470		/*
1471		 * Can't share interrupts unless both agree to and are
1472		 * the same type (level, edge, polarity). So both flag
1473		 * fields must have IRQF_SHARED set and the bits which
1474		 * set the trigger type must match. Also all must
1475		 * agree on ONESHOT.
1476		 * Interrupt lines used for NMIs cannot be shared.
1477		 */
1478		unsigned int oldtype;
1479
1480		if (desc->istate & IRQS_NMI) {
1481			pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1482				new->name, irq, desc->irq_data.chip->name);
1483			ret = -EINVAL;
1484			goto out_unlock;
1485		}
1486
1487		/*
1488		 * If nobody did set the configuration before, inherit
1489		 * the one provided by the requester.
1490		 */
1491		if (irqd_trigger_type_was_set(&desc->irq_data)) {
1492			oldtype = irqd_get_trigger_type(&desc->irq_data);
1493		} else {
1494			oldtype = new->flags & IRQF_TRIGGER_MASK;
1495			irqd_set_trigger_type(&desc->irq_data, oldtype);
1496		}
1497
1498		if (!((old->flags & new->flags) & IRQF_SHARED) ||
1499		    (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1500		    ((old->flags ^ new->flags) & IRQF_ONESHOT))
1501			goto mismatch;
1502
1503		/* All handlers must agree on per-cpuness */
1504		if ((old->flags & IRQF_PERCPU) !=
1505		    (new->flags & IRQF_PERCPU))
1506			goto mismatch;
1507
1508		/* add new interrupt at end of irq queue */
1509		do {
1510			/*
1511			 * Or all existing action->thread_mask bits,
1512			 * so we can find the next zero bit for this
1513			 * new action.
1514			 */
1515			thread_mask |= old->thread_mask;
1516			old_ptr = &old->next;
1517			old = *old_ptr;
1518		} while (old);
1519		shared = 1;
1520	}
1521
1522	/*
1523	 * Setup the thread mask for this irqaction for ONESHOT. For
1524	 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1525	 * conditional in irq_wake_thread().
1526	 */
1527	if (new->flags & IRQF_ONESHOT) {
1528		/*
1529		 * Unlikely to have 32 resp 64 irqs sharing one line,
1530		 * but who knows.
1531		 */
1532		if (thread_mask == ~0UL) {
1533			ret = -EBUSY;
1534			goto out_unlock;
1535		}
1536		/*
1537		 * The thread_mask for the action is or'ed to
1538		 * desc->thread_active to indicate that the
1539		 * IRQF_ONESHOT thread handler has been woken, but not
1540		 * yet finished. The bit is cleared when a thread
1541		 * completes. When all threads of a shared interrupt
1542		 * line have completed desc->threads_active becomes
1543		 * zero and the interrupt line is unmasked. See
1544		 * handle.c:irq_wake_thread() for further information.
1545		 *
1546		 * If no thread is woken by primary (hard irq context)
1547		 * interrupt handlers, then desc->threads_active is
1548		 * also checked for zero to unmask the irq line in the
1549		 * affected hard irq flow handlers
1550		 * (handle_[fasteoi|level]_irq).
1551		 *
1552		 * The new action gets the first zero bit of
1553		 * thread_mask assigned. See the loop above which or's
1554		 * all existing action->thread_mask bits.
1555		 */
1556		new->thread_mask = 1UL << ffz(thread_mask);
1557
1558	} else if (new->handler == irq_default_primary_handler &&
1559		   !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1560		/*
1561		 * The interrupt was requested with handler = NULL, so
1562		 * we use the default primary handler for it. But it
1563		 * does not have the oneshot flag set. In combination
1564		 * with level interrupts this is deadly, because the
1565		 * default primary handler just wakes the thread, then
1566		 * the irq lines is reenabled, but the device still
1567		 * has the level irq asserted. Rinse and repeat....
1568		 *
1569		 * While this works for edge type interrupts, we play
1570		 * it safe and reject unconditionally because we can't
1571		 * say for sure which type this interrupt really
1572		 * has. The type flags are unreliable as the
1573		 * underlying chip implementation can override them.
1574		 */
1575		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1576		       new->name, irq);
1577		ret = -EINVAL;
1578		goto out_unlock;
1579	}
1580
1581	if (!shared) {
 
 
 
 
 
 
 
1582		init_waitqueue_head(&desc->wait_for_threads);
1583
1584		/* Setup the type (level, edge polarity) if configured: */
1585		if (new->flags & IRQF_TRIGGER_MASK) {
1586			ret = __irq_set_trigger(desc,
1587						new->flags & IRQF_TRIGGER_MASK);
1588
1589			if (ret)
1590				goto out_unlock;
1591		}
1592
1593		/*
1594		 * Activate the interrupt. That activation must happen
1595		 * independently of IRQ_NOAUTOEN. request_irq() can fail
1596		 * and the callers are supposed to handle
1597		 * that. enable_irq() of an interrupt requested with
1598		 * IRQ_NOAUTOEN is not supposed to fail. The activation
1599		 * keeps it in shutdown mode, it merily associates
1600		 * resources if necessary and if that's not possible it
1601		 * fails. Interrupts which are in managed shutdown mode
1602		 * will simply ignore that activation request.
1603		 */
1604		ret = irq_activate(desc);
1605		if (ret)
1606			goto out_unlock;
1607
1608		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1609				  IRQS_ONESHOT | IRQS_WAITING);
1610		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1611
1612		if (new->flags & IRQF_PERCPU) {
1613			irqd_set(&desc->irq_data, IRQD_PER_CPU);
1614			irq_settings_set_per_cpu(desc);
1615		}
1616
1617		if (new->flags & IRQF_ONESHOT)
1618			desc->istate |= IRQS_ONESHOT;
1619
 
 
 
 
 
 
1620		/* Exclude IRQ from balancing if requested */
1621		if (new->flags & IRQF_NOBALANCING) {
1622			irq_settings_set_no_balancing(desc);
1623			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1624		}
1625
1626		if (irq_settings_can_autoenable(desc)) {
1627			irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1628		} else {
1629			/*
1630			 * Shared interrupts do not go well with disabling
1631			 * auto enable. The sharing interrupt might request
1632			 * it while it's still disabled and then wait for
1633			 * interrupts forever.
1634			 */
1635			WARN_ON_ONCE(new->flags & IRQF_SHARED);
1636			/* Undo nested disables: */
1637			desc->depth = 1;
1638		}
1639
1640	} else if (new->flags & IRQF_TRIGGER_MASK) {
1641		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1642		unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1643
1644		if (nmsk != omsk)
1645			/* hope the handler works with current  trigger mode */
1646			pr_warn("irq %d uses trigger mode %u; requested %u\n",
1647				irq, omsk, nmsk);
1648	}
1649
1650	*old_ptr = new;
1651
1652	irq_pm_install_action(desc, new);
1653
1654	/* Reset broken irq detection when installing new handler */
1655	desc->irq_count = 0;
1656	desc->irqs_unhandled = 0;
1657
1658	/*
1659	 * Check whether we disabled the irq via the spurious handler
1660	 * before. Reenable it and give it another chance.
1661	 */
1662	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1663		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1664		__enable_irq(desc);
1665	}
1666
1667	raw_spin_unlock_irqrestore(&desc->lock, flags);
1668	chip_bus_sync_unlock(desc);
1669	mutex_unlock(&desc->request_mutex);
1670
1671	irq_setup_timings(desc, new);
1672
1673	/*
1674	 * Strictly no need to wake it up, but hung_task complains
1675	 * when no hard interrupt wakes the thread up.
1676	 */
1677	if (new->thread)
1678		wake_up_process(new->thread);
1679	if (new->secondary)
1680		wake_up_process(new->secondary->thread);
1681
1682	register_irq_proc(irq, desc);
1683	new->dir = NULL;
1684	register_handler_proc(irq, new);
 
 
1685	return 0;
1686
1687mismatch:
1688	if (!(new->flags & IRQF_PROBE_SHARED)) {
1689		pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1690		       irq, new->flags, new->name, old->flags, old->name);
1691#ifdef CONFIG_DEBUG_SHIRQ
1692		dump_stack();
1693#endif
1694	}
1695	ret = -EBUSY;
1696
1697out_unlock:
1698	raw_spin_unlock_irqrestore(&desc->lock, flags);
1699
1700	if (!desc->action)
1701		irq_release_resources(desc);
1702out_bus_unlock:
1703	chip_bus_sync_unlock(desc);
1704	mutex_unlock(&desc->request_mutex);
1705
1706out_thread:
1707	if (new->thread) {
1708		struct task_struct *t = new->thread;
1709
1710		new->thread = NULL;
1711		kthread_stop(t);
1712		put_task_struct(t);
1713	}
1714	if (new->secondary && new->secondary->thread) {
1715		struct task_struct *t = new->secondary->thread;
1716
1717		new->secondary->thread = NULL;
1718		kthread_stop(t);
1719		put_task_struct(t);
1720	}
1721out_mput:
1722	module_put(desc->owner);
1723	return ret;
1724}
1725
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1726/*
1727 * Internal function to unregister an irqaction - used to free
1728 * regular and special interrupts that are part of the architecture.
1729 */
1730static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1731{
1732	unsigned irq = desc->irq_data.irq;
1733	struct irqaction *action, **action_ptr;
1734	unsigned long flags;
1735
1736	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1737
1738	mutex_lock(&desc->request_mutex);
 
 
1739	chip_bus_lock(desc);
1740	raw_spin_lock_irqsave(&desc->lock, flags);
1741
1742	/*
1743	 * There can be multiple actions per IRQ descriptor, find the right
1744	 * one based on the dev_id:
1745	 */
1746	action_ptr = &desc->action;
1747	for (;;) {
1748		action = *action_ptr;
1749
1750		if (!action) {
1751			WARN(1, "Trying to free already-free IRQ %d\n", irq);
1752			raw_spin_unlock_irqrestore(&desc->lock, flags);
1753			chip_bus_sync_unlock(desc);
1754			mutex_unlock(&desc->request_mutex);
1755			return NULL;
1756		}
1757
1758		if (action->dev_id == dev_id)
1759			break;
1760		action_ptr = &action->next;
1761	}
1762
1763	/* Found it - now remove it from the list of entries: */
1764	*action_ptr = action->next;
1765
1766	irq_pm_remove_action(desc, action);
1767
1768	/* If this was the last handler, shut down the IRQ line: */
1769	if (!desc->action) {
1770		irq_settings_clr_disable_unlazy(desc);
1771		/* Only shutdown. Deactivate after synchronize_hardirq() */
1772		irq_shutdown(desc);
 
1773	}
1774
1775#ifdef CONFIG_SMP
1776	/* make sure affinity_hint is cleaned up */
1777	if (WARN_ON_ONCE(desc->affinity_hint))
1778		desc->affinity_hint = NULL;
1779#endif
1780
1781	raw_spin_unlock_irqrestore(&desc->lock, flags);
1782	/*
1783	 * Drop bus_lock here so the changes which were done in the chip
1784	 * callbacks above are synced out to the irq chips which hang
1785	 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1786	 *
1787	 * Aside of that the bus_lock can also be taken from the threaded
1788	 * handler in irq_finalize_oneshot() which results in a deadlock
1789	 * because kthread_stop() would wait forever for the thread to
1790	 * complete, which is blocked on the bus lock.
1791	 *
1792	 * The still held desc->request_mutex() protects against a
1793	 * concurrent request_irq() of this irq so the release of resources
1794	 * and timing data is properly serialized.
1795	 */
1796	chip_bus_sync_unlock(desc);
1797
1798	unregister_handler_proc(irq, action);
1799
1800	/*
1801	 * Make sure it's not being used on another CPU and if the chip
1802	 * supports it also make sure that there is no (not yet serviced)
1803	 * interrupt in flight at the hardware level.
1804	 */
1805	__synchronize_hardirq(desc, true);
1806
1807#ifdef CONFIG_DEBUG_SHIRQ
1808	/*
1809	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1810	 * event to happen even now it's being freed, so let's make sure that
1811	 * is so by doing an extra call to the handler ....
1812	 *
1813	 * ( We do this after actually deregistering it, to make sure that a
1814	 *   'real' IRQ doesn't run in parallel with our fake. )
1815	 */
1816	if (action->flags & IRQF_SHARED) {
1817		local_irq_save(flags);
1818		action->handler(irq, dev_id);
1819		local_irq_restore(flags);
1820	}
1821#endif
1822
1823	/*
1824	 * The action has already been removed above, but the thread writes
1825	 * its oneshot mask bit when it completes. Though request_mutex is
1826	 * held across this which prevents __setup_irq() from handing out
1827	 * the same bit to a newly requested action.
1828	 */
1829	if (action->thread) {
1830		kthread_stop(action->thread);
1831		put_task_struct(action->thread);
1832		if (action->secondary && action->secondary->thread) {
1833			kthread_stop(action->secondary->thread);
1834			put_task_struct(action->secondary->thread);
1835		}
1836	}
1837
1838	/* Last action releases resources */
1839	if (!desc->action) {
1840		/*
1841		 * Reaquire bus lock as irq_release_resources() might
1842		 * require it to deallocate resources over the slow bus.
1843		 */
1844		chip_bus_lock(desc);
1845		/*
1846		 * There is no interrupt on the fly anymore. Deactivate it
1847		 * completely.
1848		 */
1849		raw_spin_lock_irqsave(&desc->lock, flags);
1850		irq_domain_deactivate_irq(&desc->irq_data);
1851		raw_spin_unlock_irqrestore(&desc->lock, flags);
1852
1853		irq_release_resources(desc);
1854		chip_bus_sync_unlock(desc);
1855		irq_remove_timings(desc);
1856	}
1857
1858	mutex_unlock(&desc->request_mutex);
1859
1860	irq_chip_pm_put(&desc->irq_data);
1861	module_put(desc->owner);
1862	kfree(action->secondary);
1863	return action;
1864}
1865
1866/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1867 *	free_irq - free an interrupt allocated with request_irq
1868 *	@irq: Interrupt line to free
1869 *	@dev_id: Device identity to free
1870 *
1871 *	Remove an interrupt handler. The handler is removed and if the
1872 *	interrupt line is no longer in use by any driver it is disabled.
1873 *	On a shared IRQ the caller must ensure the interrupt is disabled
1874 *	on the card it drives before calling this function. The function
1875 *	does not return until any executing interrupts for this IRQ
1876 *	have completed.
1877 *
1878 *	This function must not be called from interrupt context.
1879 *
1880 *	Returns the devname argument passed to request_irq.
1881 */
1882const void *free_irq(unsigned int irq, void *dev_id)
1883{
1884	struct irq_desc *desc = irq_to_desc(irq);
1885	struct irqaction *action;
1886	const char *devname;
1887
1888	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1889		return NULL;
1890
1891#ifdef CONFIG_SMP
1892	if (WARN_ON(desc->affinity_notify))
1893		desc->affinity_notify = NULL;
1894#endif
1895
1896	action = __free_irq(desc, dev_id);
1897
1898	if (!action)
1899		return NULL;
1900
1901	devname = action->name;
1902	kfree(action);
1903	return devname;
1904}
1905EXPORT_SYMBOL(free_irq);
1906
1907/* This function must be called with desc->lock held */
1908static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
1909{
1910	const char *devname = NULL;
1911
1912	desc->istate &= ~IRQS_NMI;
1913
1914	if (!WARN_ON(desc->action == NULL)) {
1915		irq_pm_remove_action(desc, desc->action);
1916		devname = desc->action->name;
1917		unregister_handler_proc(irq, desc->action);
1918
1919		kfree(desc->action);
1920		desc->action = NULL;
1921	}
1922
1923	irq_settings_clr_disable_unlazy(desc);
1924	irq_shutdown_and_deactivate(desc);
1925
1926	irq_release_resources(desc);
1927
1928	irq_chip_pm_put(&desc->irq_data);
1929	module_put(desc->owner);
1930
1931	return devname;
1932}
1933
1934const void *free_nmi(unsigned int irq, void *dev_id)
1935{
1936	struct irq_desc *desc = irq_to_desc(irq);
1937	unsigned long flags;
1938	const void *devname;
1939
1940	if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
1941		return NULL;
1942
1943	if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1944		return NULL;
1945
1946	/* NMI still enabled */
1947	if (WARN_ON(desc->depth == 0))
1948		disable_nmi_nosync(irq);
1949
1950	raw_spin_lock_irqsave(&desc->lock, flags);
1951
1952	irq_nmi_teardown(desc);
1953	devname = __cleanup_nmi(irq, desc);
1954
1955	raw_spin_unlock_irqrestore(&desc->lock, flags);
1956
1957	return devname;
1958}
1959
1960/**
1961 *	request_threaded_irq - allocate an interrupt line
1962 *	@irq: Interrupt line to allocate
1963 *	@handler: Function to be called when the IRQ occurs.
1964 *		  Primary handler for threaded interrupts
1965 *		  If NULL and thread_fn != NULL the default
1966 *		  primary handler is installed
1967 *	@thread_fn: Function called from the irq handler thread
1968 *		    If NULL, no irq thread is created
1969 *	@irqflags: Interrupt type flags
1970 *	@devname: An ascii name for the claiming device
1971 *	@dev_id: A cookie passed back to the handler function
1972 *
1973 *	This call allocates interrupt resources and enables the
1974 *	interrupt line and IRQ handling. From the point this
1975 *	call is made your handler function may be invoked. Since
1976 *	your handler function must clear any interrupt the board
1977 *	raises, you must take care both to initialise your hardware
1978 *	and to set up the interrupt handler in the right order.
1979 *
1980 *	If you want to set up a threaded irq handler for your device
1981 *	then you need to supply @handler and @thread_fn. @handler is
1982 *	still called in hard interrupt context and has to check
1983 *	whether the interrupt originates from the device. If yes it
1984 *	needs to disable the interrupt on the device and return
1985 *	IRQ_WAKE_THREAD which will wake up the handler thread and run
1986 *	@thread_fn. This split handler design is necessary to support
1987 *	shared interrupts.
1988 *
1989 *	Dev_id must be globally unique. Normally the address of the
1990 *	device data structure is used as the cookie. Since the handler
1991 *	receives this value it makes sense to use it.
1992 *
1993 *	If your interrupt is shared you must pass a non NULL dev_id
1994 *	as this is required when freeing the interrupt.
1995 *
1996 *	Flags:
1997 *
1998 *	IRQF_SHARED		Interrupt is shared
1999 *	IRQF_TRIGGER_*		Specify active edge(s) or level
2000 *
2001 */
2002int request_threaded_irq(unsigned int irq, irq_handler_t handler,
2003			 irq_handler_t thread_fn, unsigned long irqflags,
2004			 const char *devname, void *dev_id)
2005{
2006	struct irqaction *action;
2007	struct irq_desc *desc;
2008	int retval;
2009
2010	if (irq == IRQ_NOTCONNECTED)
2011		return -ENOTCONN;
2012
2013	/*
2014	 * Sanity-check: shared interrupts must pass in a real dev-ID,
2015	 * otherwise we'll have trouble later trying to figure out
2016	 * which interrupt is which (messes up the interrupt freeing
2017	 * logic etc).
2018	 *
2019	 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
2020	 * it cannot be set along with IRQF_NO_SUSPEND.
2021	 */
2022	if (((irqflags & IRQF_SHARED) && !dev_id) ||
2023	    (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
2024	    ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
2025		return -EINVAL;
2026
2027	desc = irq_to_desc(irq);
2028	if (!desc)
2029		return -EINVAL;
2030
2031	if (!irq_settings_can_request(desc) ||
2032	    WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2033		return -EINVAL;
2034
2035	if (!handler) {
2036		if (!thread_fn)
2037			return -EINVAL;
2038		handler = irq_default_primary_handler;
2039	}
2040
2041	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2042	if (!action)
2043		return -ENOMEM;
2044
2045	action->handler = handler;
2046	action->thread_fn = thread_fn;
2047	action->flags = irqflags;
2048	action->name = devname;
2049	action->dev_id = dev_id;
2050
2051	retval = irq_chip_pm_get(&desc->irq_data);
2052	if (retval < 0) {
2053		kfree(action);
2054		return retval;
2055	}
2056
2057	retval = __setup_irq(irq, desc, action);
 
2058
2059	if (retval) {
2060		irq_chip_pm_put(&desc->irq_data);
2061		kfree(action->secondary);
2062		kfree(action);
2063	}
2064
2065#ifdef CONFIG_DEBUG_SHIRQ_FIXME
2066	if (!retval && (irqflags & IRQF_SHARED)) {
2067		/*
2068		 * It's a shared IRQ -- the driver ought to be prepared for it
2069		 * to happen immediately, so let's make sure....
2070		 * We disable the irq to make sure that a 'real' IRQ doesn't
2071		 * run in parallel with our fake.
2072		 */
2073		unsigned long flags;
2074
2075		disable_irq(irq);
2076		local_irq_save(flags);
2077
2078		handler(irq, dev_id);
2079
2080		local_irq_restore(flags);
2081		enable_irq(irq);
2082	}
2083#endif
2084	return retval;
2085}
2086EXPORT_SYMBOL(request_threaded_irq);
2087
2088/**
2089 *	request_any_context_irq - allocate an interrupt line
2090 *	@irq: Interrupt line to allocate
2091 *	@handler: Function to be called when the IRQ occurs.
2092 *		  Threaded handler for threaded interrupts.
2093 *	@flags: Interrupt type flags
2094 *	@name: An ascii name for the claiming device
2095 *	@dev_id: A cookie passed back to the handler function
2096 *
2097 *	This call allocates interrupt resources and enables the
2098 *	interrupt line and IRQ handling. It selects either a
2099 *	hardirq or threaded handling method depending on the
2100 *	context.
2101 *
2102 *	On failure, it returns a negative value. On success,
2103 *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2104 */
2105int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2106			    unsigned long flags, const char *name, void *dev_id)
2107{
2108	struct irq_desc *desc;
2109	int ret;
2110
2111	if (irq == IRQ_NOTCONNECTED)
2112		return -ENOTCONN;
2113
2114	desc = irq_to_desc(irq);
2115	if (!desc)
2116		return -EINVAL;
2117
2118	if (irq_settings_is_nested_thread(desc)) {
2119		ret = request_threaded_irq(irq, NULL, handler,
2120					   flags, name, dev_id);
2121		return !ret ? IRQC_IS_NESTED : ret;
2122	}
2123
2124	ret = request_irq(irq, handler, flags, name, dev_id);
2125	return !ret ? IRQC_IS_HARDIRQ : ret;
2126}
2127EXPORT_SYMBOL_GPL(request_any_context_irq);
2128
2129/**
2130 *	request_nmi - allocate an interrupt line for NMI delivery
2131 *	@irq: Interrupt line to allocate
2132 *	@handler: Function to be called when the IRQ occurs.
2133 *		  Threaded handler for threaded interrupts.
2134 *	@irqflags: Interrupt type flags
2135 *	@name: An ascii name for the claiming device
2136 *	@dev_id: A cookie passed back to the handler function
2137 *
2138 *	This call allocates interrupt resources and enables the
2139 *	interrupt line and IRQ handling. It sets up the IRQ line
2140 *	to be handled as an NMI.
2141 *
2142 *	An interrupt line delivering NMIs cannot be shared and IRQ handling
2143 *	cannot be threaded.
2144 *
2145 *	Interrupt lines requested for NMI delivering must produce per cpu
2146 *	interrupts and have auto enabling setting disabled.
2147 *
2148 *	Dev_id must be globally unique. Normally the address of the
2149 *	device data structure is used as the cookie. Since the handler
2150 *	receives this value it makes sense to use it.
2151 *
2152 *	If the interrupt line cannot be used to deliver NMIs, function
2153 *	will fail and return a negative value.
2154 */
2155int request_nmi(unsigned int irq, irq_handler_t handler,
2156		unsigned long irqflags, const char *name, void *dev_id)
2157{
2158	struct irqaction *action;
2159	struct irq_desc *desc;
2160	unsigned long flags;
2161	int retval;
2162
2163	if (irq == IRQ_NOTCONNECTED)
2164		return -ENOTCONN;
2165
2166	/* NMI cannot be shared, used for Polling */
2167	if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2168		return -EINVAL;
2169
2170	if (!(irqflags & IRQF_PERCPU))
2171		return -EINVAL;
2172
2173	if (!handler)
2174		return -EINVAL;
2175
2176	desc = irq_to_desc(irq);
2177
2178	if (!desc || irq_settings_can_autoenable(desc) ||
2179	    !irq_settings_can_request(desc) ||
2180	    WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2181	    !irq_supports_nmi(desc))
2182		return -EINVAL;
2183
2184	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2185	if (!action)
2186		return -ENOMEM;
2187
2188	action->handler = handler;
2189	action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2190	action->name = name;
2191	action->dev_id = dev_id;
2192
2193	retval = irq_chip_pm_get(&desc->irq_data);
2194	if (retval < 0)
2195		goto err_out;
2196
2197	retval = __setup_irq(irq, desc, action);
2198	if (retval)
2199		goto err_irq_setup;
2200
2201	raw_spin_lock_irqsave(&desc->lock, flags);
2202
2203	/* Setup NMI state */
2204	desc->istate |= IRQS_NMI;
2205	retval = irq_nmi_setup(desc);
2206	if (retval) {
2207		__cleanup_nmi(irq, desc);
2208		raw_spin_unlock_irqrestore(&desc->lock, flags);
2209		return -EINVAL;
2210	}
2211
2212	raw_spin_unlock_irqrestore(&desc->lock, flags);
2213
2214	return 0;
2215
2216err_irq_setup:
2217	irq_chip_pm_put(&desc->irq_data);
2218err_out:
2219	kfree(action);
2220
2221	return retval;
2222}
2223
2224void enable_percpu_irq(unsigned int irq, unsigned int type)
2225{
2226	unsigned int cpu = smp_processor_id();
2227	unsigned long flags;
2228	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2229
2230	if (!desc)
2231		return;
2232
2233	/*
2234	 * If the trigger type is not specified by the caller, then
2235	 * use the default for this interrupt.
2236	 */
2237	type &= IRQ_TYPE_SENSE_MASK;
2238	if (type == IRQ_TYPE_NONE)
2239		type = irqd_get_trigger_type(&desc->irq_data);
2240
2241	if (type != IRQ_TYPE_NONE) {
2242		int ret;
2243
2244		ret = __irq_set_trigger(desc, type);
2245
2246		if (ret) {
2247			WARN(1, "failed to set type for IRQ%d\n", irq);
2248			goto out;
2249		}
2250	}
2251
2252	irq_percpu_enable(desc, cpu);
2253out:
2254	irq_put_desc_unlock(desc, flags);
2255}
2256EXPORT_SYMBOL_GPL(enable_percpu_irq);
2257
2258void enable_percpu_nmi(unsigned int irq, unsigned int type)
2259{
2260	enable_percpu_irq(irq, type);
2261}
2262
2263/**
2264 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2265 * @irq:	Linux irq number to check for
2266 *
2267 * Must be called from a non migratable context. Returns the enable
2268 * state of a per cpu interrupt on the current cpu.
2269 */
2270bool irq_percpu_is_enabled(unsigned int irq)
2271{
2272	unsigned int cpu = smp_processor_id();
2273	struct irq_desc *desc;
2274	unsigned long flags;
2275	bool is_enabled;
2276
2277	desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2278	if (!desc)
2279		return false;
2280
2281	is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2282	irq_put_desc_unlock(desc, flags);
2283
2284	return is_enabled;
2285}
2286EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2287
2288void disable_percpu_irq(unsigned int irq)
2289{
2290	unsigned int cpu = smp_processor_id();
2291	unsigned long flags;
2292	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2293
2294	if (!desc)
2295		return;
2296
2297	irq_percpu_disable(desc, cpu);
2298	irq_put_desc_unlock(desc, flags);
2299}
2300EXPORT_SYMBOL_GPL(disable_percpu_irq);
2301
2302void disable_percpu_nmi(unsigned int irq)
2303{
2304	disable_percpu_irq(irq);
2305}
2306
2307/*
2308 * Internal function to unregister a percpu irqaction.
2309 */
2310static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2311{
2312	struct irq_desc *desc = irq_to_desc(irq);
2313	struct irqaction *action;
2314	unsigned long flags;
2315
2316	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2317
2318	if (!desc)
2319		return NULL;
2320
2321	raw_spin_lock_irqsave(&desc->lock, flags);
2322
2323	action = desc->action;
2324	if (!action || action->percpu_dev_id != dev_id) {
2325		WARN(1, "Trying to free already-free IRQ %d\n", irq);
2326		goto bad;
2327	}
2328
2329	if (!cpumask_empty(desc->percpu_enabled)) {
2330		WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2331		     irq, cpumask_first(desc->percpu_enabled));
2332		goto bad;
2333	}
2334
2335	/* Found it - now remove it from the list of entries: */
2336	desc->action = NULL;
2337
2338	desc->istate &= ~IRQS_NMI;
2339
2340	raw_spin_unlock_irqrestore(&desc->lock, flags);
2341
2342	unregister_handler_proc(irq, action);
2343
2344	irq_chip_pm_put(&desc->irq_data);
2345	module_put(desc->owner);
2346	return action;
2347
2348bad:
2349	raw_spin_unlock_irqrestore(&desc->lock, flags);
2350	return NULL;
2351}
2352
2353/**
2354 *	remove_percpu_irq - free a per-cpu interrupt
2355 *	@irq: Interrupt line to free
2356 *	@act: irqaction for the interrupt
2357 *
2358 * Used to remove interrupts statically setup by the early boot process.
2359 */
2360void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2361{
2362	struct irq_desc *desc = irq_to_desc(irq);
2363
2364	if (desc && irq_settings_is_per_cpu_devid(desc))
2365	    __free_percpu_irq(irq, act->percpu_dev_id);
2366}
2367
2368/**
2369 *	free_percpu_irq - free an interrupt allocated with request_percpu_irq
2370 *	@irq: Interrupt line to free
2371 *	@dev_id: Device identity to free
2372 *
2373 *	Remove a percpu interrupt handler. The handler is removed, but
2374 *	the interrupt line is not disabled. This must be done on each
2375 *	CPU before calling this function. The function does not return
2376 *	until any executing interrupts for this IRQ have completed.
2377 *
2378 *	This function must not be called from interrupt context.
2379 */
2380void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2381{
2382	struct irq_desc *desc = irq_to_desc(irq);
2383
2384	if (!desc || !irq_settings_is_per_cpu_devid(desc))
2385		return;
2386
2387	chip_bus_lock(desc);
2388	kfree(__free_percpu_irq(irq, dev_id));
2389	chip_bus_sync_unlock(desc);
2390}
2391EXPORT_SYMBOL_GPL(free_percpu_irq);
2392
2393void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2394{
2395	struct irq_desc *desc = irq_to_desc(irq);
2396
2397	if (!desc || !irq_settings_is_per_cpu_devid(desc))
2398		return;
2399
2400	if (WARN_ON(!(desc->istate & IRQS_NMI)))
2401		return;
2402
2403	kfree(__free_percpu_irq(irq, dev_id));
2404}
2405
2406/**
2407 *	setup_percpu_irq - setup a per-cpu interrupt
2408 *	@irq: Interrupt line to setup
2409 *	@act: irqaction for the interrupt
2410 *
2411 * Used to statically setup per-cpu interrupts in the early boot process.
2412 */
2413int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2414{
2415	struct irq_desc *desc = irq_to_desc(irq);
2416	int retval;
2417
2418	if (!desc || !irq_settings_is_per_cpu_devid(desc))
2419		return -EINVAL;
2420
2421	retval = irq_chip_pm_get(&desc->irq_data);
2422	if (retval < 0)
2423		return retval;
2424
2425	retval = __setup_irq(irq, desc, act);
2426
2427	if (retval)
2428		irq_chip_pm_put(&desc->irq_data);
2429
2430	return retval;
2431}
2432
2433/**
2434 *	__request_percpu_irq - allocate a percpu interrupt line
2435 *	@irq: Interrupt line to allocate
2436 *	@handler: Function to be called when the IRQ occurs.
2437 *	@flags: Interrupt type flags (IRQF_TIMER only)
2438 *	@devname: An ascii name for the claiming device
2439 *	@dev_id: A percpu cookie passed back to the handler function
2440 *
2441 *	This call allocates interrupt resources and enables the
2442 *	interrupt on the local CPU. If the interrupt is supposed to be
2443 *	enabled on other CPUs, it has to be done on each CPU using
2444 *	enable_percpu_irq().
2445 *
2446 *	Dev_id must be globally unique. It is a per-cpu variable, and
2447 *	the handler gets called with the interrupted CPU's instance of
2448 *	that variable.
2449 */
2450int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2451			 unsigned long flags, const char *devname,
2452			 void __percpu *dev_id)
2453{
2454	struct irqaction *action;
2455	struct irq_desc *desc;
2456	int retval;
2457
2458	if (!dev_id)
2459		return -EINVAL;
2460
2461	desc = irq_to_desc(irq);
2462	if (!desc || !irq_settings_can_request(desc) ||
2463	    !irq_settings_is_per_cpu_devid(desc))
2464		return -EINVAL;
2465
2466	if (flags && flags != IRQF_TIMER)
2467		return -EINVAL;
2468
2469	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2470	if (!action)
2471		return -ENOMEM;
2472
2473	action->handler = handler;
2474	action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2475	action->name = devname;
2476	action->percpu_dev_id = dev_id;
2477
2478	retval = irq_chip_pm_get(&desc->irq_data);
2479	if (retval < 0) {
2480		kfree(action);
2481		return retval;
2482	}
2483
2484	retval = __setup_irq(irq, desc, action);
 
2485
2486	if (retval) {
2487		irq_chip_pm_put(&desc->irq_data);
2488		kfree(action);
2489	}
2490
2491	return retval;
2492}
2493EXPORT_SYMBOL_GPL(__request_percpu_irq);
2494
2495/**
2496 *	request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2497 *	@irq: Interrupt line to allocate
2498 *	@handler: Function to be called when the IRQ occurs.
2499 *	@name: An ascii name for the claiming device
2500 *	@dev_id: A percpu cookie passed back to the handler function
2501 *
2502 *	This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2503 *	have to be setup on each CPU by calling prepare_percpu_nmi() before
2504 *	being enabled on the same CPU by using enable_percpu_nmi().
2505 *
2506 *	Dev_id must be globally unique. It is a per-cpu variable, and
2507 *	the handler gets called with the interrupted CPU's instance of
2508 *	that variable.
2509 *
2510 *	Interrupt lines requested for NMI delivering should have auto enabling
2511 *	setting disabled.
2512 *
2513 *	If the interrupt line cannot be used to deliver NMIs, function
2514 *	will fail returning a negative value.
2515 */
2516int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2517		       const char *name, void __percpu *dev_id)
2518{
2519	struct irqaction *action;
2520	struct irq_desc *desc;
2521	unsigned long flags;
2522	int retval;
2523
2524	if (!handler)
2525		return -EINVAL;
2526
2527	desc = irq_to_desc(irq);
2528
2529	if (!desc || !irq_settings_can_request(desc) ||
2530	    !irq_settings_is_per_cpu_devid(desc) ||
2531	    irq_settings_can_autoenable(desc) ||
2532	    !irq_supports_nmi(desc))
2533		return -EINVAL;
2534
2535	/* The line cannot already be NMI */
2536	if (desc->istate & IRQS_NMI)
2537		return -EINVAL;
2538
2539	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2540	if (!action)
2541		return -ENOMEM;
2542
2543	action->handler = handler;
2544	action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2545		| IRQF_NOBALANCING;
2546	action->name = name;
2547	action->percpu_dev_id = dev_id;
2548
2549	retval = irq_chip_pm_get(&desc->irq_data);
2550	if (retval < 0)
2551		goto err_out;
2552
2553	retval = __setup_irq(irq, desc, action);
2554	if (retval)
2555		goto err_irq_setup;
2556
2557	raw_spin_lock_irqsave(&desc->lock, flags);
2558	desc->istate |= IRQS_NMI;
2559	raw_spin_unlock_irqrestore(&desc->lock, flags);
2560
2561	return 0;
2562
2563err_irq_setup:
2564	irq_chip_pm_put(&desc->irq_data);
2565err_out:
2566	kfree(action);
2567
2568	return retval;
2569}
2570
2571/**
2572 *	prepare_percpu_nmi - performs CPU local setup for NMI delivery
2573 *	@irq: Interrupt line to prepare for NMI delivery
2574 *
2575 *	This call prepares an interrupt line to deliver NMI on the current CPU,
2576 *	before that interrupt line gets enabled with enable_percpu_nmi().
2577 *
2578 *	As a CPU local operation, this should be called from non-preemptible
2579 *	context.
2580 *
2581 *	If the interrupt line cannot be used to deliver NMIs, function
2582 *	will fail returning a negative value.
2583 */
2584int prepare_percpu_nmi(unsigned int irq)
2585{
2586	unsigned long flags;
2587	struct irq_desc *desc;
2588	int ret = 0;
2589
2590	WARN_ON(preemptible());
2591
2592	desc = irq_get_desc_lock(irq, &flags,
2593				 IRQ_GET_DESC_CHECK_PERCPU);
2594	if (!desc)
2595		return -EINVAL;
2596
2597	if (WARN(!(desc->istate & IRQS_NMI),
2598		 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2599		 irq)) {
2600		ret = -EINVAL;
2601		goto out;
2602	}
2603
2604	ret = irq_nmi_setup(desc);
2605	if (ret) {
2606		pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2607		goto out;
2608	}
2609
2610out:
2611	irq_put_desc_unlock(desc, flags);
2612	return ret;
2613}
2614
2615/**
2616 *	teardown_percpu_nmi - undoes NMI setup of IRQ line
2617 *	@irq: Interrupt line from which CPU local NMI configuration should be
2618 *	      removed
2619 *
2620 *	This call undoes the setup done by prepare_percpu_nmi().
2621 *
2622 *	IRQ line should not be enabled for the current CPU.
2623 *
2624 *	As a CPU local operation, this should be called from non-preemptible
2625 *	context.
2626 */
2627void teardown_percpu_nmi(unsigned int irq)
2628{
2629	unsigned long flags;
2630	struct irq_desc *desc;
2631
2632	WARN_ON(preemptible());
2633
2634	desc = irq_get_desc_lock(irq, &flags,
2635				 IRQ_GET_DESC_CHECK_PERCPU);
2636	if (!desc)
2637		return;
2638
2639	if (WARN_ON(!(desc->istate & IRQS_NMI)))
2640		goto out;
2641
2642	irq_nmi_teardown(desc);
2643out:
2644	irq_put_desc_unlock(desc, flags);
2645}
2646
2647int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2648			    bool *state)
2649{
2650	struct irq_chip *chip;
2651	int err = -EINVAL;
2652
2653	do {
2654		chip = irq_data_get_irq_chip(data);
2655		if (WARN_ON_ONCE(!chip))
2656			return -ENODEV;
2657		if (chip->irq_get_irqchip_state)
2658			break;
2659#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2660		data = data->parent_data;
2661#else
2662		data = NULL;
2663#endif
2664	} while (data);
2665
2666	if (data)
2667		err = chip->irq_get_irqchip_state(data, which, state);
2668	return err;
2669}
2670
2671/**
2672 *	irq_get_irqchip_state - returns the irqchip state of a interrupt.
2673 *	@irq: Interrupt line that is forwarded to a VM
2674 *	@which: One of IRQCHIP_STATE_* the caller wants to know about
2675 *	@state: a pointer to a boolean where the state is to be storeed
2676 *
2677 *	This call snapshots the internal irqchip state of an
2678 *	interrupt, returning into @state the bit corresponding to
2679 *	stage @which
2680 *
2681 *	This function should be called with preemption disabled if the
2682 *	interrupt controller has per-cpu registers.
2683 */
2684int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2685			  bool *state)
2686{
2687	struct irq_desc *desc;
2688	struct irq_data *data;
 
2689	unsigned long flags;
2690	int err = -EINVAL;
2691
2692	desc = irq_get_desc_buslock(irq, &flags, 0);
2693	if (!desc)
2694		return err;
2695
2696	data = irq_desc_get_irq_data(desc);
2697
2698	err = __irq_get_irqchip_state(data, which, state);
 
 
 
 
 
 
 
 
 
 
 
 
2699
2700	irq_put_desc_busunlock(desc, flags);
2701	return err;
2702}
2703EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2704
2705/**
2706 *	irq_set_irqchip_state - set the state of a forwarded interrupt.
2707 *	@irq: Interrupt line that is forwarded to a VM
2708 *	@which: State to be restored (one of IRQCHIP_STATE_*)
2709 *	@val: Value corresponding to @which
2710 *
2711 *	This call sets the internal irqchip state of an interrupt,
2712 *	depending on the value of @which.
2713 *
2714 *	This function should be called with preemption disabled if the
2715 *	interrupt controller has per-cpu registers.
2716 */
2717int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2718			  bool val)
2719{
2720	struct irq_desc *desc;
2721	struct irq_data *data;
2722	struct irq_chip *chip;
2723	unsigned long flags;
2724	int err = -EINVAL;
2725
2726	desc = irq_get_desc_buslock(irq, &flags, 0);
2727	if (!desc)
2728		return err;
2729
2730	data = irq_desc_get_irq_data(desc);
2731
2732	do {
2733		chip = irq_data_get_irq_chip(data);
2734		if (WARN_ON_ONCE(!chip)) {
2735			err = -ENODEV;
2736			goto out_unlock;
2737		}
2738		if (chip->irq_set_irqchip_state)
2739			break;
2740#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2741		data = data->parent_data;
2742#else
2743		data = NULL;
2744#endif
2745	} while (data);
2746
2747	if (data)
2748		err = chip->irq_set_irqchip_state(data, which, val);
2749
2750out_unlock:
2751	irq_put_desc_busunlock(desc, flags);
2752	return err;
2753}
2754EXPORT_SYMBOL_GPL(irq_set_irqchip_state);