Linux Audio

Check our new training course

Loading...
v4.17
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   4 * Copyright (C) 2005-2006 Thomas Gleixner
   5 *
   6 * This file contains driver APIs to the irq subsystem.
   7 */
   8
   9#define pr_fmt(fmt) "genirq: " fmt
  10
  11#include <linux/irq.h>
  12#include <linux/kthread.h>
  13#include <linux/module.h>
  14#include <linux/random.h>
  15#include <linux/interrupt.h>
 
  16#include <linux/slab.h>
  17#include <linux/sched.h>
  18#include <linux/sched/rt.h>
  19#include <linux/sched/task.h>
 
  20#include <uapi/linux/sched/types.h>
  21#include <linux/task_work.h>
  22
  23#include "internals.h"
  24
  25#ifdef CONFIG_IRQ_FORCED_THREADING
  26__read_mostly bool force_irqthreads;
 
  27
  28static int __init setup_forced_irqthreads(char *arg)
  29{
  30	force_irqthreads = true;
  31	return 0;
  32}
  33early_param("threadirqs", setup_forced_irqthreads);
  34#endif
  35
  36static void __synchronize_hardirq(struct irq_desc *desc)
  37{
 
  38	bool inprogress;
  39
  40	do {
  41		unsigned long flags;
  42
  43		/*
  44		 * Wait until we're out of the critical section.  This might
  45		 * give the wrong answer due to the lack of memory barriers.
  46		 */
  47		while (irqd_irq_inprogress(&desc->irq_data))
  48			cpu_relax();
  49
  50		/* Ok, that indicated we're done: double-check carefully. */
  51		raw_spin_lock_irqsave(&desc->lock, flags);
  52		inprogress = irqd_irq_inprogress(&desc->irq_data);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  53		raw_spin_unlock_irqrestore(&desc->lock, flags);
  54
  55		/* Oops, that failed? */
  56	} while (inprogress);
  57}
  58
  59/**
  60 *	synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
  61 *	@irq: interrupt number to wait for
  62 *
  63 *	This function waits for any pending hard IRQ handlers for this
  64 *	interrupt to complete before returning. If you use this
  65 *	function while holding a resource the IRQ handler may need you
  66 *	will deadlock. It does not take associated threaded handlers
  67 *	into account.
  68 *
  69 *	Do not use this for shutdown scenarios where you must be sure
  70 *	that all parts (hardirq and threaded handler) have completed.
  71 *
  72 *	Returns: false if a threaded handler is active.
  73 *
  74 *	This function may be called - with care - from IRQ context.
 
 
 
 
 
  75 */
  76bool synchronize_hardirq(unsigned int irq)
  77{
  78	struct irq_desc *desc = irq_to_desc(irq);
  79
  80	if (desc) {
  81		__synchronize_hardirq(desc);
  82		return !atomic_read(&desc->threads_active);
  83	}
  84
  85	return true;
  86}
  87EXPORT_SYMBOL(synchronize_hardirq);
  88
  89/**
  90 *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  91 *	@irq: interrupt number to wait for
  92 *
  93 *	This function waits for any pending IRQ handlers for this interrupt
  94 *	to complete before returning. If you use this function while
  95 *	holding a resource the IRQ handler may need you will deadlock.
  96 *
  97 *	This function may be called - with care - from IRQ context.
 
 
 
 
 
  98 */
  99void synchronize_irq(unsigned int irq)
 100{
 101	struct irq_desc *desc = irq_to_desc(irq);
 102
 103	if (desc) {
 104		__synchronize_hardirq(desc);
 105		/*
 106		 * We made sure that no hardirq handler is
 107		 * running. Now verify that no threaded handlers are
 108		 * active.
 109		 */
 110		wait_event(desc->wait_for_threads,
 111			   !atomic_read(&desc->threads_active));
 112	}
 113}
 114EXPORT_SYMBOL(synchronize_irq);
 115
 116#ifdef CONFIG_SMP
 117cpumask_var_t irq_default_affinity;
 118
 119static bool __irq_can_set_affinity(struct irq_desc *desc)
 120{
 121	if (!desc || !irqd_can_balance(&desc->irq_data) ||
 122	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
 123		return false;
 124	return true;
 125}
 126
 127/**
 128 *	irq_can_set_affinity - Check if the affinity of a given irq can be set
 129 *	@irq:		Interrupt to check
 130 *
 131 */
 132int irq_can_set_affinity(unsigned int irq)
 133{
 134	return __irq_can_set_affinity(irq_to_desc(irq));
 135}
 136
 137/**
 138 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
 139 * @irq:	Interrupt to check
 140 *
 141 * Like irq_can_set_affinity() above, but additionally checks for the
 142 * AFFINITY_MANAGED flag.
 143 */
 144bool irq_can_set_affinity_usr(unsigned int irq)
 145{
 146	struct irq_desc *desc = irq_to_desc(irq);
 147
 148	return __irq_can_set_affinity(desc) &&
 149		!irqd_affinity_is_managed(&desc->irq_data);
 150}
 151
 152/**
 153 *	irq_set_thread_affinity - Notify irq threads to adjust affinity
 154 *	@desc:		irq descriptor which has affitnity changed
 155 *
 156 *	We just set IRQTF_AFFINITY and delegate the affinity setting
 157 *	to the interrupt thread itself. We can not call
 158 *	set_cpus_allowed_ptr() here as we hold desc->lock and this
 159 *	code can be called from hard interrupt context.
 160 */
 161void irq_set_thread_affinity(struct irq_desc *desc)
 162{
 163	struct irqaction *action;
 164
 165	for_each_action_of_desc(desc, action)
 166		if (action->thread)
 167			set_bit(IRQTF_AFFINITY, &action->thread_flags);
 168}
 169
 
 170static void irq_validate_effective_affinity(struct irq_data *data)
 171{
 172#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
 173	const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
 174	struct irq_chip *chip = irq_data_get_irq_chip(data);
 175
 176	if (!cpumask_empty(m))
 177		return;
 178	pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
 179		     chip->name, data->irq);
 180#endif
 181}
 182
 
 
 
 
 
 
 
 
 
 
 
 183int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
 184			bool force)
 185{
 186	struct irq_desc *desc = irq_data_to_desc(data);
 187	struct irq_chip *chip = irq_data_get_irq_chip(data);
 188	int ret;
 189
 190	if (!chip || !chip->irq_set_affinity)
 191		return -EINVAL;
 192
 193	ret = chip->irq_set_affinity(data, mask, force);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 194	switch (ret) {
 195	case IRQ_SET_MASK_OK:
 196	case IRQ_SET_MASK_OK_DONE:
 197		cpumask_copy(desc->irq_common_data.affinity, mask);
 
 198	case IRQ_SET_MASK_OK_NOCOPY:
 199		irq_validate_effective_affinity(data);
 200		irq_set_thread_affinity(desc);
 201		ret = 0;
 202	}
 203
 204	return ret;
 205}
 206
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 207int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
 208			    bool force)
 209{
 210	struct irq_chip *chip = irq_data_get_irq_chip(data);
 211	struct irq_desc *desc = irq_data_to_desc(data);
 212	int ret = 0;
 213
 214	if (!chip || !chip->irq_set_affinity)
 215		return -EINVAL;
 216
 217	if (irq_can_move_pcntxt(data)) {
 218		ret = irq_do_set_affinity(data, mask, force);
 
 
 
 219	} else {
 220		irqd_set_move_pending(data);
 221		irq_copy_pending(desc, mask);
 222	}
 223
 224	if (desc->affinity_notify) {
 225		kref_get(&desc->affinity_notify->kref);
 226		schedule_work(&desc->affinity_notify->work);
 
 
 
 
 227	}
 228	irqd_set(data, IRQD_AFFINITY_SET);
 229
 230	return ret;
 231}
 232
 233int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 234{
 235	struct irq_desc *desc = irq_to_desc(irq);
 236	unsigned long flags;
 237	int ret;
 238
 239	if (!desc)
 240		return -EINVAL;
 241
 242	raw_spin_lock_irqsave(&desc->lock, flags);
 243	ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
 244	raw_spin_unlock_irqrestore(&desc->lock, flags);
 245	return ret;
 246}
 247
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 248int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
 249{
 250	unsigned long flags;
 251	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 252
 253	if (!desc)
 254		return -EINVAL;
 255	desc->affinity_hint = m;
 256	irq_put_desc_unlock(desc, flags);
 257	/* set the initial affinity to prevent every interrupt being on CPU0 */
 258	if (m)
 259		__irq_set_affinity(irq, m, false);
 260	return 0;
 261}
 262EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
 263
 264static void irq_affinity_notify(struct work_struct *work)
 265{
 266	struct irq_affinity_notify *notify =
 267		container_of(work, struct irq_affinity_notify, work);
 268	struct irq_desc *desc = irq_to_desc(notify->irq);
 269	cpumask_var_t cpumask;
 270	unsigned long flags;
 271
 272	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
 273		goto out;
 274
 275	raw_spin_lock_irqsave(&desc->lock, flags);
 276	if (irq_move_pending(&desc->irq_data))
 277		irq_get_pending(cpumask, desc);
 278	else
 279		cpumask_copy(cpumask, desc->irq_common_data.affinity);
 280	raw_spin_unlock_irqrestore(&desc->lock, flags);
 281
 282	notify->notify(notify, cpumask);
 283
 284	free_cpumask_var(cpumask);
 285out:
 286	kref_put(&notify->kref, notify->release);
 287}
 288
 289/**
 290 *	irq_set_affinity_notifier - control notification of IRQ affinity changes
 291 *	@irq:		Interrupt for which to enable/disable notification
 292 *	@notify:	Context for notification, or %NULL to disable
 293 *			notification.  Function pointers must be initialised;
 294 *			the other fields will be initialised by this function.
 295 *
 296 *	Must be called in process context.  Notification may only be enabled
 297 *	after the IRQ is allocated and must be disabled before the IRQ is
 298 *	freed using free_irq().
 299 */
 300int
 301irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
 302{
 303	struct irq_desc *desc = irq_to_desc(irq);
 304	struct irq_affinity_notify *old_notify;
 305	unsigned long flags;
 306
 307	/* The release function is promised process context */
 308	might_sleep();
 309
 310	if (!desc)
 311		return -EINVAL;
 312
 313	/* Complete initialisation of *notify */
 314	if (notify) {
 315		notify->irq = irq;
 316		kref_init(&notify->kref);
 317		INIT_WORK(&notify->work, irq_affinity_notify);
 318	}
 319
 320	raw_spin_lock_irqsave(&desc->lock, flags);
 321	old_notify = desc->affinity_notify;
 322	desc->affinity_notify = notify;
 323	raw_spin_unlock_irqrestore(&desc->lock, flags);
 324
 325	if (old_notify)
 
 
 
 
 326		kref_put(&old_notify->kref, old_notify->release);
 
 327
 328	return 0;
 329}
 330EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 331
 332#ifndef CONFIG_AUTO_IRQ_AFFINITY
 333/*
 334 * Generic version of the affinity autoselector.
 335 */
 336int irq_setup_affinity(struct irq_desc *desc)
 337{
 338	struct cpumask *set = irq_default_affinity;
 339	int ret, node = irq_desc_get_node(desc);
 340	static DEFINE_RAW_SPINLOCK(mask_lock);
 341	static struct cpumask mask;
 342
 343	/* Excludes PER_CPU and NO_BALANCE interrupts */
 344	if (!__irq_can_set_affinity(desc))
 345		return 0;
 346
 347	raw_spin_lock(&mask_lock);
 348	/*
 349	 * Preserve the managed affinity setting and a userspace affinity
 350	 * setup, but make sure that one of the targets is online.
 351	 */
 352	if (irqd_affinity_is_managed(&desc->irq_data) ||
 353	    irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
 354		if (cpumask_intersects(desc->irq_common_data.affinity,
 355				       cpu_online_mask))
 356			set = desc->irq_common_data.affinity;
 357		else
 358			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
 359	}
 360
 361	cpumask_and(&mask, cpu_online_mask, set);
 
 
 
 362	if (node != NUMA_NO_NODE) {
 363		const struct cpumask *nodemask = cpumask_of_node(node);
 364
 365		/* make sure at least one of the cpus in nodemask is online */
 366		if (cpumask_intersects(&mask, nodemask))
 367			cpumask_and(&mask, &mask, nodemask);
 368	}
 369	ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
 370	raw_spin_unlock(&mask_lock);
 371	return ret;
 372}
 373#else
 374/* Wrapper for ALPHA specific affinity selector magic */
 375int irq_setup_affinity(struct irq_desc *desc)
 376{
 377	return irq_select_affinity(irq_desc_get_irq(desc));
 378}
 379#endif
 380
 381/*
 382 * Called when a bogus affinity is set via /proc/irq
 383 */
 384int irq_select_affinity_usr(unsigned int irq)
 385{
 386	struct irq_desc *desc = irq_to_desc(irq);
 387	unsigned long flags;
 388	int ret;
 389
 390	raw_spin_lock_irqsave(&desc->lock, flags);
 391	ret = irq_setup_affinity(desc);
 392	raw_spin_unlock_irqrestore(&desc->lock, flags);
 393	return ret;
 394}
 395#endif
 396
 397/**
 398 *	irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
 399 *	@irq: interrupt number to set affinity
 400 *	@vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
 401 *	            specific data for percpu_devid interrupts
 402 *
 403 *	This function uses the vCPU specific data to set the vCPU
 404 *	affinity for an irq. The vCPU specific data is passed from
 405 *	outside, such as KVM. One example code path is as below:
 406 *	KVM -> IOMMU -> irq_set_vcpu_affinity().
 407 */
 408int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
 409{
 410	unsigned long flags;
 411	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 412	struct irq_data *data;
 413	struct irq_chip *chip;
 414	int ret = -ENOSYS;
 415
 416	if (!desc)
 417		return -EINVAL;
 418
 419	data = irq_desc_get_irq_data(desc);
 420	do {
 421		chip = irq_data_get_irq_chip(data);
 422		if (chip && chip->irq_set_vcpu_affinity)
 423			break;
 424#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
 425		data = data->parent_data;
 426#else
 427		data = NULL;
 428#endif
 429	} while (data);
 430
 431	if (data)
 432		ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
 433	irq_put_desc_unlock(desc, flags);
 434
 435	return ret;
 436}
 437EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
 438
 439void __disable_irq(struct irq_desc *desc)
 440{
 441	if (!desc->depth++)
 442		irq_disable(desc);
 443}
 444
 445static int __disable_irq_nosync(unsigned int irq)
 446{
 447	unsigned long flags;
 448	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 449
 450	if (!desc)
 451		return -EINVAL;
 452	__disable_irq(desc);
 453	irq_put_desc_busunlock(desc, flags);
 454	return 0;
 455}
 456
 457/**
 458 *	disable_irq_nosync - disable an irq without waiting
 459 *	@irq: Interrupt to disable
 460 *
 461 *	Disable the selected interrupt line.  Disables and Enables are
 462 *	nested.
 463 *	Unlike disable_irq(), this function does not ensure existing
 464 *	instances of the IRQ handler have completed before returning.
 465 *
 466 *	This function may be called from IRQ context.
 467 */
 468void disable_irq_nosync(unsigned int irq)
 469{
 470	__disable_irq_nosync(irq);
 471}
 472EXPORT_SYMBOL(disable_irq_nosync);
 473
 474/**
 475 *	disable_irq - disable an irq and wait for completion
 476 *	@irq: Interrupt to disable
 477 *
 478 *	Disable the selected interrupt line.  Enables and Disables are
 479 *	nested.
 480 *	This function waits for any pending IRQ handlers for this interrupt
 481 *	to complete before returning. If you use this function while
 482 *	holding a resource the IRQ handler may need you will deadlock.
 483 *
 484 *	This function may be called - with care - from IRQ context.
 485 */
 486void disable_irq(unsigned int irq)
 487{
 488	if (!__disable_irq_nosync(irq))
 489		synchronize_irq(irq);
 490}
 491EXPORT_SYMBOL(disable_irq);
 492
 493/**
 494 *	disable_hardirq - disables an irq and waits for hardirq completion
 495 *	@irq: Interrupt to disable
 496 *
 497 *	Disable the selected interrupt line.  Enables and Disables are
 498 *	nested.
 499 *	This function waits for any pending hard IRQ handlers for this
 500 *	interrupt to complete before returning. If you use this function while
 501 *	holding a resource the hard IRQ handler may need you will deadlock.
 502 *
 503 *	When used to optimistically disable an interrupt from atomic context
 504 *	the return value must be checked.
 505 *
 506 *	Returns: false if a threaded handler is active.
 507 *
 508 *	This function may be called - with care - from IRQ context.
 509 */
 510bool disable_hardirq(unsigned int irq)
 511{
 512	if (!__disable_irq_nosync(irq))
 513		return synchronize_hardirq(irq);
 514
 515	return false;
 516}
 517EXPORT_SYMBOL_GPL(disable_hardirq);
 518
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 519void __enable_irq(struct irq_desc *desc)
 520{
 521	switch (desc->depth) {
 522	case 0:
 523 err_out:
 524		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
 525		     irq_desc_get_irq(desc));
 526		break;
 527	case 1: {
 528		if (desc->istate & IRQS_SUSPENDED)
 529			goto err_out;
 530		/* Prevent probing on this irq: */
 531		irq_settings_set_noprobe(desc);
 532		/*
 533		 * Call irq_startup() not irq_enable() here because the
 534		 * interrupt might be marked NOAUTOEN. So irq_startup()
 535		 * needs to be invoked when it gets enabled the first
 536		 * time. If it was already started up, then irq_startup()
 537		 * will invoke irq_enable() under the hood.
 538		 */
 539		irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
 540		break;
 541	}
 542	default:
 543		desc->depth--;
 544	}
 545}
 546
 547/**
 548 *	enable_irq - enable handling of an irq
 549 *	@irq: Interrupt to enable
 550 *
 551 *	Undoes the effect of one call to disable_irq().  If this
 552 *	matches the last disable, processing of interrupts on this
 553 *	IRQ line is re-enabled.
 554 *
 555 *	This function may be called from IRQ context only when
 556 *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
 557 */
 558void enable_irq(unsigned int irq)
 559{
 560	unsigned long flags;
 561	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 562
 563	if (!desc)
 564		return;
 565	if (WARN(!desc->irq_data.chip,
 566		 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
 567		goto out;
 568
 569	__enable_irq(desc);
 570out:
 571	irq_put_desc_busunlock(desc, flags);
 572}
 573EXPORT_SYMBOL(enable_irq);
 574
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 575static int set_irq_wake_real(unsigned int irq, unsigned int on)
 576{
 577	struct irq_desc *desc = irq_to_desc(irq);
 578	int ret = -ENXIO;
 579
 580	if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
 581		return 0;
 582
 583	if (desc->irq_data.chip->irq_set_wake)
 584		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
 585
 586	return ret;
 587}
 588
 589/**
 590 *	irq_set_irq_wake - control irq power management wakeup
 591 *	@irq:	interrupt to control
 592 *	@on:	enable/disable power management wakeup
 593 *
 594 *	Enable/disable power management wakeup mode, which is
 595 *	disabled by default.  Enables and disables must match,
 596 *	just as they match for non-wakeup mode support.
 597 *
 598 *	Wakeup mode lets this IRQ wake the system from sleep
 599 *	states like "suspend to RAM".
 
 
 
 
 
 
 
 600 */
 601int irq_set_irq_wake(unsigned int irq, unsigned int on)
 602{
 603	unsigned long flags;
 604	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 605	int ret = 0;
 606
 607	if (!desc)
 608		return -EINVAL;
 609
 
 
 
 
 
 
 610	/* wakeup-capable irqs can be shared between drivers that
 611	 * don't need to have the same sleep mode behaviors.
 612	 */
 613	if (on) {
 614		if (desc->wake_depth++ == 0) {
 615			ret = set_irq_wake_real(irq, on);
 616			if (ret)
 617				desc->wake_depth = 0;
 618			else
 619				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
 620		}
 621	} else {
 622		if (desc->wake_depth == 0) {
 623			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
 624		} else if (--desc->wake_depth == 0) {
 625			ret = set_irq_wake_real(irq, on);
 626			if (ret)
 627				desc->wake_depth = 1;
 628			else
 629				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
 630		}
 631	}
 
 
 632	irq_put_desc_busunlock(desc, flags);
 633	return ret;
 634}
 635EXPORT_SYMBOL(irq_set_irq_wake);
 636
 637/*
 638 * Internal function that tells the architecture code whether a
 639 * particular irq has been exclusively allocated or is available
 640 * for driver use.
 641 */
 642int can_request_irq(unsigned int irq, unsigned long irqflags)
 643{
 644	unsigned long flags;
 645	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 646	int canrequest = 0;
 647
 648	if (!desc)
 649		return 0;
 650
 651	if (irq_settings_can_request(desc)) {
 652		if (!desc->action ||
 653		    irqflags & desc->action->flags & IRQF_SHARED)
 654			canrequest = 1;
 655	}
 656	irq_put_desc_unlock(desc, flags);
 657	return canrequest;
 658}
 659
 660int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
 661{
 662	struct irq_chip *chip = desc->irq_data.chip;
 663	int ret, unmask = 0;
 664
 665	if (!chip || !chip->irq_set_type) {
 666		/*
 667		 * IRQF_TRIGGER_* but the PIC does not support multiple
 668		 * flow-types?
 669		 */
 670		pr_debug("No set_type function for IRQ %d (%s)\n",
 671			 irq_desc_get_irq(desc),
 672			 chip ? (chip->name ? : "unknown") : "unknown");
 673		return 0;
 674	}
 675
 676	if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
 677		if (!irqd_irq_masked(&desc->irq_data))
 678			mask_irq(desc);
 679		if (!irqd_irq_disabled(&desc->irq_data))
 680			unmask = 1;
 681	}
 682
 683	/* Mask all flags except trigger mode */
 684	flags &= IRQ_TYPE_SENSE_MASK;
 685	ret = chip->irq_set_type(&desc->irq_data, flags);
 686
 687	switch (ret) {
 688	case IRQ_SET_MASK_OK:
 689	case IRQ_SET_MASK_OK_DONE:
 690		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
 691		irqd_set(&desc->irq_data, flags);
 
 692
 693	case IRQ_SET_MASK_OK_NOCOPY:
 694		flags = irqd_get_trigger_type(&desc->irq_data);
 695		irq_settings_set_trigger_mask(desc, flags);
 696		irqd_clear(&desc->irq_data, IRQD_LEVEL);
 697		irq_settings_clr_level(desc);
 698		if (flags & IRQ_TYPE_LEVEL_MASK) {
 699			irq_settings_set_level(desc);
 700			irqd_set(&desc->irq_data, IRQD_LEVEL);
 701		}
 702
 703		ret = 0;
 704		break;
 705	default:
 706		pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
 707		       flags, irq_desc_get_irq(desc), chip->irq_set_type);
 708	}
 709	if (unmask)
 710		unmask_irq(desc);
 711	return ret;
 712}
 713
 714#ifdef CONFIG_HARDIRQS_SW_RESEND
 715int irq_set_parent(int irq, int parent_irq)
 716{
 717	unsigned long flags;
 718	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 719
 720	if (!desc)
 721		return -EINVAL;
 722
 723	desc->parent_irq = parent_irq;
 724
 725	irq_put_desc_unlock(desc, flags);
 726	return 0;
 727}
 728EXPORT_SYMBOL_GPL(irq_set_parent);
 729#endif
 730
 731/*
 732 * Default primary interrupt handler for threaded interrupts. Is
 733 * assigned as primary handler when request_threaded_irq is called
 734 * with handler == NULL. Useful for oneshot interrupts.
 735 */
 736static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
 737{
 738	return IRQ_WAKE_THREAD;
 739}
 740
 741/*
 742 * Primary handler for nested threaded interrupts. Should never be
 743 * called.
 744 */
 745static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
 746{
 747	WARN(1, "Primary handler called for nested irq %d\n", irq);
 748	return IRQ_NONE;
 749}
 750
 751static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
 752{
 753	WARN(1, "Secondary action handler called for irq %d\n", irq);
 754	return IRQ_NONE;
 755}
 756
 757static int irq_wait_for_interrupt(struct irqaction *action)
 758{
 759	set_current_state(TASK_INTERRUPTIBLE);
 
 760
 761	while (!kthread_should_stop()) {
 
 
 
 
 
 
 
 
 
 762
 763		if (test_and_clear_bit(IRQTF_RUNTHREAD,
 764				       &action->thread_flags)) {
 765			__set_current_state(TASK_RUNNING);
 766			return 0;
 767		}
 768		schedule();
 769		set_current_state(TASK_INTERRUPTIBLE);
 770	}
 771	__set_current_state(TASK_RUNNING);
 772	return -1;
 773}
 774
 775/*
 776 * Oneshot interrupts keep the irq line masked until the threaded
 777 * handler finished. unmask if the interrupt has not been disabled and
 778 * is marked MASKED.
 779 */
 780static void irq_finalize_oneshot(struct irq_desc *desc,
 781				 struct irqaction *action)
 782{
 783	if (!(desc->istate & IRQS_ONESHOT) ||
 784	    action->handler == irq_forced_secondary_handler)
 785		return;
 786again:
 787	chip_bus_lock(desc);
 788	raw_spin_lock_irq(&desc->lock);
 789
 790	/*
 791	 * Implausible though it may be we need to protect us against
 792	 * the following scenario:
 793	 *
 794	 * The thread is faster done than the hard interrupt handler
 795	 * on the other CPU. If we unmask the irq line then the
 796	 * interrupt can come in again and masks the line, leaves due
 797	 * to IRQS_INPROGRESS and the irq line is masked forever.
 798	 *
 799	 * This also serializes the state of shared oneshot handlers
 800	 * versus "desc->threads_onehsot |= action->thread_mask;" in
 801	 * irq_wake_thread(). See the comment there which explains the
 802	 * serialization.
 803	 */
 804	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
 805		raw_spin_unlock_irq(&desc->lock);
 806		chip_bus_sync_unlock(desc);
 807		cpu_relax();
 808		goto again;
 809	}
 810
 811	/*
 812	 * Now check again, whether the thread should run. Otherwise
 813	 * we would clear the threads_oneshot bit of this thread which
 814	 * was just set.
 815	 */
 816	if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
 817		goto out_unlock;
 818
 819	desc->threads_oneshot &= ~action->thread_mask;
 820
 821	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
 822	    irqd_irq_masked(&desc->irq_data))
 823		unmask_threaded_irq(desc);
 824
 825out_unlock:
 826	raw_spin_unlock_irq(&desc->lock);
 827	chip_bus_sync_unlock(desc);
 828}
 829
 830#ifdef CONFIG_SMP
 831/*
 832 * Check whether we need to change the affinity of the interrupt thread.
 833 */
 834static void
 835irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
 836{
 837	cpumask_var_t mask;
 838	bool valid = true;
 839
 840	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
 841		return;
 842
 843	/*
 844	 * In case we are out of memory we set IRQTF_AFFINITY again and
 845	 * try again next time
 846	 */
 847	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
 848		set_bit(IRQTF_AFFINITY, &action->thread_flags);
 849		return;
 850	}
 851
 852	raw_spin_lock_irq(&desc->lock);
 853	/*
 854	 * This code is triggered unconditionally. Check the affinity
 855	 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
 856	 */
 857	if (cpumask_available(desc->irq_common_data.affinity)) {
 858		const struct cpumask *m;
 859
 860		m = irq_data_get_effective_affinity_mask(&desc->irq_data);
 861		cpumask_copy(mask, m);
 862	} else {
 863		valid = false;
 864	}
 865	raw_spin_unlock_irq(&desc->lock);
 866
 867	if (valid)
 868		set_cpus_allowed_ptr(current, mask);
 869	free_cpumask_var(mask);
 870}
 871#else
 872static inline void
 873irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
 874#endif
 875
 876/*
 877 * Interrupts which are not explicitely requested as threaded
 878 * interrupts rely on the implicit bh/preempt disable of the hard irq
 879 * context. So we need to disable bh here to avoid deadlocks and other
 880 * side effects.
 881 */
 882static irqreturn_t
 883irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
 884{
 885	irqreturn_t ret;
 886
 887	local_bh_disable();
 
 
 888	ret = action->thread_fn(action->irq, action->dev_id);
 
 
 
 889	irq_finalize_oneshot(desc, action);
 
 
 890	local_bh_enable();
 891	return ret;
 892}
 893
 894/*
 895 * Interrupts explicitly requested as threaded interrupts want to be
 896 * preemtible - many of them need to sleep and wait for slow busses to
 897 * complete.
 898 */
 899static irqreturn_t irq_thread_fn(struct irq_desc *desc,
 900		struct irqaction *action)
 901{
 902	irqreturn_t ret;
 903
 904	ret = action->thread_fn(action->irq, action->dev_id);
 
 
 
 905	irq_finalize_oneshot(desc, action);
 906	return ret;
 907}
 908
 909static void wake_threads_waitq(struct irq_desc *desc)
 910{
 911	if (atomic_dec_and_test(&desc->threads_active))
 912		wake_up(&desc->wait_for_threads);
 913}
 914
 915static void irq_thread_dtor(struct callback_head *unused)
 916{
 917	struct task_struct *tsk = current;
 918	struct irq_desc *desc;
 919	struct irqaction *action;
 920
 921	if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
 922		return;
 923
 924	action = kthread_data(tsk);
 925
 926	pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
 927	       tsk->comm, tsk->pid, action->irq);
 928
 929
 930	desc = irq_to_desc(action->irq);
 931	/*
 932	 * If IRQTF_RUNTHREAD is set, we need to decrement
 933	 * desc->threads_active and wake possible waiters.
 934	 */
 935	if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
 936		wake_threads_waitq(desc);
 937
 938	/* Prevent a stale desc->threads_oneshot */
 939	irq_finalize_oneshot(desc, action);
 940}
 941
 942static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
 943{
 944	struct irqaction *secondary = action->secondary;
 945
 946	if (WARN_ON_ONCE(!secondary))
 947		return;
 948
 949	raw_spin_lock_irq(&desc->lock);
 950	__irq_wake_thread(desc, secondary);
 951	raw_spin_unlock_irq(&desc->lock);
 952}
 953
 954/*
 955 * Interrupt handler thread
 956 */
 957static int irq_thread(void *data)
 958{
 959	struct callback_head on_exit_work;
 960	struct irqaction *action = data;
 961	struct irq_desc *desc = irq_to_desc(action->irq);
 962	irqreturn_t (*handler_fn)(struct irq_desc *desc,
 963			struct irqaction *action);
 964
 965	if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
 966					&action->thread_flags))
 967		handler_fn = irq_forced_thread_fn;
 968	else
 969		handler_fn = irq_thread_fn;
 970
 971	init_task_work(&on_exit_work, irq_thread_dtor);
 972	task_work_add(current, &on_exit_work, false);
 973
 974	irq_thread_check_affinity(desc, action);
 975
 976	while (!irq_wait_for_interrupt(action)) {
 977		irqreturn_t action_ret;
 978
 979		irq_thread_check_affinity(desc, action);
 980
 981		action_ret = handler_fn(desc, action);
 982		if (action_ret == IRQ_HANDLED)
 983			atomic_inc(&desc->threads_handled);
 984		if (action_ret == IRQ_WAKE_THREAD)
 985			irq_wake_secondary(desc, action);
 986
 987		wake_threads_waitq(desc);
 988	}
 989
 990	/*
 991	 * This is the regular exit path. __free_irq() is stopping the
 992	 * thread via kthread_stop() after calling
 993	 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
 994	 * oneshot mask bit can be set. We cannot verify that as we
 995	 * cannot touch the oneshot mask at this point anymore as
 996	 * __setup_irq() might have given out currents thread_mask
 997	 * again.
 998	 */
 999	task_work_cancel(current, irq_thread_dtor);
1000	return 0;
1001}
1002
1003/**
1004 *	irq_wake_thread - wake the irq thread for the action identified by dev_id
1005 *	@irq:		Interrupt line
1006 *	@dev_id:	Device identity for which the thread should be woken
1007 *
1008 */
1009void irq_wake_thread(unsigned int irq, void *dev_id)
1010{
1011	struct irq_desc *desc = irq_to_desc(irq);
1012	struct irqaction *action;
1013	unsigned long flags;
1014
1015	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1016		return;
1017
1018	raw_spin_lock_irqsave(&desc->lock, flags);
1019	for_each_action_of_desc(desc, action) {
1020		if (action->dev_id == dev_id) {
1021			if (action->thread)
1022				__irq_wake_thread(desc, action);
1023			break;
1024		}
1025	}
1026	raw_spin_unlock_irqrestore(&desc->lock, flags);
1027}
1028EXPORT_SYMBOL_GPL(irq_wake_thread);
1029
1030static int irq_setup_forced_threading(struct irqaction *new)
1031{
1032	if (!force_irqthreads)
1033		return 0;
1034	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1035		return 0;
1036
 
 
 
 
 
 
 
1037	new->flags |= IRQF_ONESHOT;
1038
1039	/*
1040	 * Handle the case where we have a real primary handler and a
1041	 * thread handler. We force thread them as well by creating a
1042	 * secondary action.
1043	 */
1044	if (new->handler != irq_default_primary_handler && new->thread_fn) {
1045		/* Allocate the secondary action */
1046		new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1047		if (!new->secondary)
1048			return -ENOMEM;
1049		new->secondary->handler = irq_forced_secondary_handler;
1050		new->secondary->thread_fn = new->thread_fn;
1051		new->secondary->dev_id = new->dev_id;
1052		new->secondary->irq = new->irq;
1053		new->secondary->name = new->name;
1054	}
1055	/* Deal with the primary handler */
1056	set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1057	new->thread_fn = new->handler;
1058	new->handler = irq_default_primary_handler;
1059	return 0;
1060}
1061
1062static int irq_request_resources(struct irq_desc *desc)
1063{
1064	struct irq_data *d = &desc->irq_data;
1065	struct irq_chip *c = d->chip;
1066
1067	return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1068}
1069
1070static void irq_release_resources(struct irq_desc *desc)
1071{
1072	struct irq_data *d = &desc->irq_data;
1073	struct irq_chip *c = d->chip;
1074
1075	if (c->irq_release_resources)
1076		c->irq_release_resources(d);
1077}
1078
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1079static int
1080setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1081{
1082	struct task_struct *t;
1083	struct sched_param param = {
1084		.sched_priority = MAX_USER_RT_PRIO/2,
1085	};
1086
1087	if (!secondary) {
1088		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1089				   new->name);
1090	} else {
1091		t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1092				   new->name);
1093		param.sched_priority -= 1;
1094	}
1095
1096	if (IS_ERR(t))
1097		return PTR_ERR(t);
1098
1099	sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
1100
1101	/*
1102	 * We keep the reference to the task struct even if
1103	 * the thread dies to avoid that the interrupt code
1104	 * references an already freed task_struct.
1105	 */
1106	get_task_struct(t);
1107	new->thread = t;
1108	/*
1109	 * Tell the thread to set its affinity. This is
1110	 * important for shared interrupt handlers as we do
1111	 * not invoke setup_affinity() for the secondary
1112	 * handlers as everything is already set up. Even for
1113	 * interrupts marked with IRQF_NO_BALANCE this is
1114	 * correct as we want the thread to move to the cpu(s)
1115	 * on which the requesting code placed the interrupt.
1116	 */
1117	set_bit(IRQTF_AFFINITY, &new->thread_flags);
1118	return 0;
1119}
1120
1121/*
1122 * Internal function to register an irqaction - typically used to
1123 * allocate special interrupts that are part of the architecture.
1124 *
1125 * Locking rules:
1126 *
1127 * desc->request_mutex	Provides serialization against a concurrent free_irq()
1128 *   chip_bus_lock	Provides serialization for slow bus operations
1129 *     desc->lock	Provides serialization against hard interrupts
1130 *
1131 * chip_bus_lock and desc->lock are sufficient for all other management and
1132 * interrupt related functions. desc->request_mutex solely serializes
1133 * request/free_irq().
1134 */
1135static int
1136__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1137{
1138	struct irqaction *old, **old_ptr;
1139	unsigned long flags, thread_mask = 0;
1140	int ret, nested, shared = 0;
1141
1142	if (!desc)
1143		return -EINVAL;
1144
1145	if (desc->irq_data.chip == &no_irq_chip)
1146		return -ENOSYS;
1147	if (!try_module_get(desc->owner))
1148		return -ENODEV;
1149
1150	new->irq = irq;
1151
1152	/*
1153	 * If the trigger type is not specified by the caller,
1154	 * then use the default for this interrupt.
1155	 */
1156	if (!(new->flags & IRQF_TRIGGER_MASK))
1157		new->flags |= irqd_get_trigger_type(&desc->irq_data);
1158
1159	/*
1160	 * Check whether the interrupt nests into another interrupt
1161	 * thread.
1162	 */
1163	nested = irq_settings_is_nested_thread(desc);
1164	if (nested) {
1165		if (!new->thread_fn) {
1166			ret = -EINVAL;
1167			goto out_mput;
1168		}
1169		/*
1170		 * Replace the primary handler which was provided from
1171		 * the driver for non nested interrupt handling by the
1172		 * dummy function which warns when called.
1173		 */
1174		new->handler = irq_nested_primary_handler;
1175	} else {
1176		if (irq_settings_can_thread(desc)) {
1177			ret = irq_setup_forced_threading(new);
1178			if (ret)
1179				goto out_mput;
1180		}
1181	}
1182
1183	/*
1184	 * Create a handler thread when a thread function is supplied
1185	 * and the interrupt does not nest into another interrupt
1186	 * thread.
1187	 */
1188	if (new->thread_fn && !nested) {
1189		ret = setup_irq_thread(new, irq, false);
1190		if (ret)
1191			goto out_mput;
1192		if (new->secondary) {
1193			ret = setup_irq_thread(new->secondary, irq, true);
1194			if (ret)
1195				goto out_thread;
1196		}
1197	}
1198
1199	/*
1200	 * Drivers are often written to work w/o knowledge about the
1201	 * underlying irq chip implementation, so a request for a
1202	 * threaded irq without a primary hard irq context handler
1203	 * requires the ONESHOT flag to be set. Some irq chips like
1204	 * MSI based interrupts are per se one shot safe. Check the
1205	 * chip flags, so we can avoid the unmask dance at the end of
1206	 * the threaded handler for those.
1207	 */
1208	if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1209		new->flags &= ~IRQF_ONESHOT;
1210
1211	/*
1212	 * Protects against a concurrent __free_irq() call which might wait
1213	 * for synchronize_irq() to complete without holding the optional
1214	 * chip bus lock and desc->lock.
 
 
1215	 */
1216	mutex_lock(&desc->request_mutex);
1217
1218	/*
1219	 * Acquire bus lock as the irq_request_resources() callback below
1220	 * might rely on the serialization or the magic power management
1221	 * functions which are abusing the irq_bus_lock() callback,
1222	 */
1223	chip_bus_lock(desc);
1224
1225	/* First installed action requests resources. */
1226	if (!desc->action) {
1227		ret = irq_request_resources(desc);
1228		if (ret) {
1229			pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1230			       new->name, irq, desc->irq_data.chip->name);
1231			goto out_bus_unlock;
1232		}
1233	}
1234
1235	/*
1236	 * The following block of code has to be executed atomically
1237	 * protected against a concurrent interrupt and any of the other
1238	 * management calls which are not serialized via
1239	 * desc->request_mutex or the optional bus lock.
1240	 */
1241	raw_spin_lock_irqsave(&desc->lock, flags);
1242	old_ptr = &desc->action;
1243	old = *old_ptr;
1244	if (old) {
1245		/*
1246		 * Can't share interrupts unless both agree to and are
1247		 * the same type (level, edge, polarity). So both flag
1248		 * fields must have IRQF_SHARED set and the bits which
1249		 * set the trigger type must match. Also all must
1250		 * agree on ONESHOT.
 
1251		 */
1252		unsigned int oldtype;
1253
 
 
 
 
 
 
 
1254		/*
1255		 * If nobody did set the configuration before, inherit
1256		 * the one provided by the requester.
1257		 */
1258		if (irqd_trigger_type_was_set(&desc->irq_data)) {
1259			oldtype = irqd_get_trigger_type(&desc->irq_data);
1260		} else {
1261			oldtype = new->flags & IRQF_TRIGGER_MASK;
1262			irqd_set_trigger_type(&desc->irq_data, oldtype);
1263		}
1264
1265		if (!((old->flags & new->flags) & IRQF_SHARED) ||
1266		    (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1267		    ((old->flags ^ new->flags) & IRQF_ONESHOT))
1268			goto mismatch;
1269
1270		/* All handlers must agree on per-cpuness */
1271		if ((old->flags & IRQF_PERCPU) !=
1272		    (new->flags & IRQF_PERCPU))
1273			goto mismatch;
1274
1275		/* add new interrupt at end of irq queue */
1276		do {
1277			/*
1278			 * Or all existing action->thread_mask bits,
1279			 * so we can find the next zero bit for this
1280			 * new action.
1281			 */
1282			thread_mask |= old->thread_mask;
1283			old_ptr = &old->next;
1284			old = *old_ptr;
1285		} while (old);
1286		shared = 1;
1287	}
1288
1289	/*
1290	 * Setup the thread mask for this irqaction for ONESHOT. For
1291	 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1292	 * conditional in irq_wake_thread().
1293	 */
1294	if (new->flags & IRQF_ONESHOT) {
1295		/*
1296		 * Unlikely to have 32 resp 64 irqs sharing one line,
1297		 * but who knows.
1298		 */
1299		if (thread_mask == ~0UL) {
1300			ret = -EBUSY;
1301			goto out_unlock;
1302		}
1303		/*
1304		 * The thread_mask for the action is or'ed to
1305		 * desc->thread_active to indicate that the
1306		 * IRQF_ONESHOT thread handler has been woken, but not
1307		 * yet finished. The bit is cleared when a thread
1308		 * completes. When all threads of a shared interrupt
1309		 * line have completed desc->threads_active becomes
1310		 * zero and the interrupt line is unmasked. See
1311		 * handle.c:irq_wake_thread() for further information.
1312		 *
1313		 * If no thread is woken by primary (hard irq context)
1314		 * interrupt handlers, then desc->threads_active is
1315		 * also checked for zero to unmask the irq line in the
1316		 * affected hard irq flow handlers
1317		 * (handle_[fasteoi|level]_irq).
1318		 *
1319		 * The new action gets the first zero bit of
1320		 * thread_mask assigned. See the loop above which or's
1321		 * all existing action->thread_mask bits.
1322		 */
1323		new->thread_mask = 1UL << ffz(thread_mask);
1324
1325	} else if (new->handler == irq_default_primary_handler &&
1326		   !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1327		/*
1328		 * The interrupt was requested with handler = NULL, so
1329		 * we use the default primary handler for it. But it
1330		 * does not have the oneshot flag set. In combination
1331		 * with level interrupts this is deadly, because the
1332		 * default primary handler just wakes the thread, then
1333		 * the irq lines is reenabled, but the device still
1334		 * has the level irq asserted. Rinse and repeat....
1335		 *
1336		 * While this works for edge type interrupts, we play
1337		 * it safe and reject unconditionally because we can't
1338		 * say for sure which type this interrupt really
1339		 * has. The type flags are unreliable as the
1340		 * underlying chip implementation can override them.
1341		 */
1342		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1343		       irq);
1344		ret = -EINVAL;
1345		goto out_unlock;
1346	}
1347
1348	if (!shared) {
1349		init_waitqueue_head(&desc->wait_for_threads);
1350
1351		/* Setup the type (level, edge polarity) if configured: */
1352		if (new->flags & IRQF_TRIGGER_MASK) {
1353			ret = __irq_set_trigger(desc,
1354						new->flags & IRQF_TRIGGER_MASK);
1355
1356			if (ret)
1357				goto out_unlock;
1358		}
1359
1360		/*
1361		 * Activate the interrupt. That activation must happen
1362		 * independently of IRQ_NOAUTOEN. request_irq() can fail
1363		 * and the callers are supposed to handle
1364		 * that. enable_irq() of an interrupt requested with
1365		 * IRQ_NOAUTOEN is not supposed to fail. The activation
1366		 * keeps it in shutdown mode, it merily associates
1367		 * resources if necessary and if that's not possible it
1368		 * fails. Interrupts which are in managed shutdown mode
1369		 * will simply ignore that activation request.
1370		 */
1371		ret = irq_activate(desc);
1372		if (ret)
1373			goto out_unlock;
1374
1375		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1376				  IRQS_ONESHOT | IRQS_WAITING);
1377		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1378
1379		if (new->flags & IRQF_PERCPU) {
1380			irqd_set(&desc->irq_data, IRQD_PER_CPU);
1381			irq_settings_set_per_cpu(desc);
 
 
1382		}
1383
 
 
 
1384		if (new->flags & IRQF_ONESHOT)
1385			desc->istate |= IRQS_ONESHOT;
1386
1387		/* Exclude IRQ from balancing if requested */
1388		if (new->flags & IRQF_NOBALANCING) {
1389			irq_settings_set_no_balancing(desc);
1390			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1391		}
1392
1393		if (irq_settings_can_autoenable(desc)) {
 
1394			irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1395		} else {
1396			/*
1397			 * Shared interrupts do not go well with disabling
1398			 * auto enable. The sharing interrupt might request
1399			 * it while it's still disabled and then wait for
1400			 * interrupts forever.
1401			 */
1402			WARN_ON_ONCE(new->flags & IRQF_SHARED);
1403			/* Undo nested disables: */
1404			desc->depth = 1;
1405		}
1406
1407	} else if (new->flags & IRQF_TRIGGER_MASK) {
1408		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1409		unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1410
1411		if (nmsk != omsk)
1412			/* hope the handler works with current  trigger mode */
1413			pr_warn("irq %d uses trigger mode %u; requested %u\n",
1414				irq, omsk, nmsk);
1415	}
1416
1417	*old_ptr = new;
1418
1419	irq_pm_install_action(desc, new);
1420
1421	/* Reset broken irq detection when installing new handler */
1422	desc->irq_count = 0;
1423	desc->irqs_unhandled = 0;
1424
1425	/*
1426	 * Check whether we disabled the irq via the spurious handler
1427	 * before. Reenable it and give it another chance.
1428	 */
1429	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1430		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1431		__enable_irq(desc);
1432	}
1433
1434	raw_spin_unlock_irqrestore(&desc->lock, flags);
1435	chip_bus_sync_unlock(desc);
1436	mutex_unlock(&desc->request_mutex);
1437
1438	irq_setup_timings(desc, new);
1439
1440	/*
1441	 * Strictly no need to wake it up, but hung_task complains
1442	 * when no hard interrupt wakes the thread up.
1443	 */
1444	if (new->thread)
1445		wake_up_process(new->thread);
1446	if (new->secondary)
1447		wake_up_process(new->secondary->thread);
1448
1449	register_irq_proc(irq, desc);
1450	new->dir = NULL;
1451	register_handler_proc(irq, new);
1452	return 0;
1453
1454mismatch:
1455	if (!(new->flags & IRQF_PROBE_SHARED)) {
1456		pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1457		       irq, new->flags, new->name, old->flags, old->name);
1458#ifdef CONFIG_DEBUG_SHIRQ
1459		dump_stack();
1460#endif
1461	}
1462	ret = -EBUSY;
1463
1464out_unlock:
1465	raw_spin_unlock_irqrestore(&desc->lock, flags);
1466
1467	if (!desc->action)
1468		irq_release_resources(desc);
1469out_bus_unlock:
1470	chip_bus_sync_unlock(desc);
1471	mutex_unlock(&desc->request_mutex);
1472
1473out_thread:
1474	if (new->thread) {
1475		struct task_struct *t = new->thread;
1476
1477		new->thread = NULL;
1478		kthread_stop(t);
1479		put_task_struct(t);
1480	}
1481	if (new->secondary && new->secondary->thread) {
1482		struct task_struct *t = new->secondary->thread;
1483
1484		new->secondary->thread = NULL;
1485		kthread_stop(t);
1486		put_task_struct(t);
1487	}
1488out_mput:
1489	module_put(desc->owner);
1490	return ret;
1491}
1492
1493/**
1494 *	setup_irq - setup an interrupt
1495 *	@irq: Interrupt line to setup
1496 *	@act: irqaction for the interrupt
1497 *
1498 * Used to statically setup interrupts in the early boot process.
1499 */
1500int setup_irq(unsigned int irq, struct irqaction *act)
1501{
1502	int retval;
1503	struct irq_desc *desc = irq_to_desc(irq);
1504
1505	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1506		return -EINVAL;
1507
1508	retval = irq_chip_pm_get(&desc->irq_data);
1509	if (retval < 0)
1510		return retval;
1511
1512	retval = __setup_irq(irq, desc, act);
1513
1514	if (retval)
1515		irq_chip_pm_put(&desc->irq_data);
1516
1517	return retval;
1518}
1519EXPORT_SYMBOL_GPL(setup_irq);
1520
1521/*
1522 * Internal function to unregister an irqaction - used to free
1523 * regular and special interrupts that are part of the architecture.
1524 */
1525static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1526{
1527	unsigned irq = desc->irq_data.irq;
1528	struct irqaction *action, **action_ptr;
1529	unsigned long flags;
1530
1531	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1532
1533	if (!desc)
1534		return NULL;
1535
1536	mutex_lock(&desc->request_mutex);
1537	chip_bus_lock(desc);
1538	raw_spin_lock_irqsave(&desc->lock, flags);
1539
1540	/*
1541	 * There can be multiple actions per IRQ descriptor, find the right
1542	 * one based on the dev_id:
1543	 */
1544	action_ptr = &desc->action;
1545	for (;;) {
1546		action = *action_ptr;
1547
1548		if (!action) {
1549			WARN(1, "Trying to free already-free IRQ %d\n", irq);
1550			raw_spin_unlock_irqrestore(&desc->lock, flags);
1551			chip_bus_sync_unlock(desc);
1552			mutex_unlock(&desc->request_mutex);
1553			return NULL;
1554		}
1555
1556		if (action->dev_id == dev_id)
1557			break;
1558		action_ptr = &action->next;
1559	}
1560
1561	/* Found it - now remove it from the list of entries: */
1562	*action_ptr = action->next;
1563
1564	irq_pm_remove_action(desc, action);
1565
1566	/* If this was the last handler, shut down the IRQ line: */
1567	if (!desc->action) {
1568		irq_settings_clr_disable_unlazy(desc);
 
1569		irq_shutdown(desc);
1570	}
1571
1572#ifdef CONFIG_SMP
1573	/* make sure affinity_hint is cleaned up */
1574	if (WARN_ON_ONCE(desc->affinity_hint))
1575		desc->affinity_hint = NULL;
1576#endif
1577
1578	raw_spin_unlock_irqrestore(&desc->lock, flags);
1579	/*
1580	 * Drop bus_lock here so the changes which were done in the chip
1581	 * callbacks above are synced out to the irq chips which hang
1582	 * behind a slow bus (I2C, SPI) before calling synchronize_irq().
1583	 *
1584	 * Aside of that the bus_lock can also be taken from the threaded
1585	 * handler in irq_finalize_oneshot() which results in a deadlock
1586	 * because synchronize_irq() would wait forever for the thread to
1587	 * complete, which is blocked on the bus lock.
1588	 *
1589	 * The still held desc->request_mutex() protects against a
1590	 * concurrent request_irq() of this irq so the release of resources
1591	 * and timing data is properly serialized.
1592	 */
1593	chip_bus_sync_unlock(desc);
1594
1595	unregister_handler_proc(irq, action);
1596
1597	/* Make sure it's not being used on another CPU: */
1598	synchronize_irq(irq);
 
 
 
 
1599
1600#ifdef CONFIG_DEBUG_SHIRQ
1601	/*
1602	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1603	 * event to happen even now it's being freed, so let's make sure that
1604	 * is so by doing an extra call to the handler ....
1605	 *
1606	 * ( We do this after actually deregistering it, to make sure that a
1607	 *   'real' IRQ doesn't run in * parallel with our fake. )
1608	 */
1609	if (action->flags & IRQF_SHARED) {
1610		local_irq_save(flags);
1611		action->handler(irq, dev_id);
1612		local_irq_restore(flags);
1613	}
1614#endif
1615
 
 
 
 
 
 
1616	if (action->thread) {
1617		kthread_stop(action->thread);
1618		put_task_struct(action->thread);
1619		if (action->secondary && action->secondary->thread) {
1620			kthread_stop(action->secondary->thread);
1621			put_task_struct(action->secondary->thread);
1622		}
1623	}
1624
1625	/* Last action releases resources */
1626	if (!desc->action) {
1627		/*
1628		 * Reaquire bus lock as irq_release_resources() might
1629		 * require it to deallocate resources over the slow bus.
1630		 */
1631		chip_bus_lock(desc);
 
 
 
 
 
 
 
 
1632		irq_release_resources(desc);
1633		chip_bus_sync_unlock(desc);
1634		irq_remove_timings(desc);
1635	}
1636
1637	mutex_unlock(&desc->request_mutex);
1638
1639	irq_chip_pm_put(&desc->irq_data);
1640	module_put(desc->owner);
1641	kfree(action->secondary);
1642	return action;
1643}
1644
1645/**
1646 *	remove_irq - free an interrupt
1647 *	@irq: Interrupt line to free
1648 *	@act: irqaction for the interrupt
1649 *
1650 * Used to remove interrupts statically setup by the early boot process.
1651 */
1652void remove_irq(unsigned int irq, struct irqaction *act)
1653{
1654	struct irq_desc *desc = irq_to_desc(irq);
1655
1656	if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1657		__free_irq(desc, act->dev_id);
1658}
1659EXPORT_SYMBOL_GPL(remove_irq);
1660
1661/**
1662 *	free_irq - free an interrupt allocated with request_irq
1663 *	@irq: Interrupt line to free
1664 *	@dev_id: Device identity to free
1665 *
1666 *	Remove an interrupt handler. The handler is removed and if the
1667 *	interrupt line is no longer in use by any driver it is disabled.
1668 *	On a shared IRQ the caller must ensure the interrupt is disabled
1669 *	on the card it drives before calling this function. The function
1670 *	does not return until any executing interrupts for this IRQ
1671 *	have completed.
1672 *
1673 *	This function must not be called from interrupt context.
1674 *
1675 *	Returns the devname argument passed to request_irq.
1676 */
1677const void *free_irq(unsigned int irq, void *dev_id)
1678{
1679	struct irq_desc *desc = irq_to_desc(irq);
1680	struct irqaction *action;
1681	const char *devname;
1682
1683	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1684		return NULL;
1685
1686#ifdef CONFIG_SMP
1687	if (WARN_ON(desc->affinity_notify))
1688		desc->affinity_notify = NULL;
1689#endif
1690
1691	action = __free_irq(desc, dev_id);
1692
1693	if (!action)
1694		return NULL;
1695
1696	devname = action->name;
1697	kfree(action);
1698	return devname;
1699}
1700EXPORT_SYMBOL(free_irq);
1701
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1702/**
1703 *	request_threaded_irq - allocate an interrupt line
1704 *	@irq: Interrupt line to allocate
1705 *	@handler: Function to be called when the IRQ occurs.
1706 *		  Primary handler for threaded interrupts
1707 *		  If NULL and thread_fn != NULL the default
1708 *		  primary handler is installed
1709 *	@thread_fn: Function called from the irq handler thread
1710 *		    If NULL, no irq thread is created
1711 *	@irqflags: Interrupt type flags
1712 *	@devname: An ascii name for the claiming device
1713 *	@dev_id: A cookie passed back to the handler function
1714 *
1715 *	This call allocates interrupt resources and enables the
1716 *	interrupt line and IRQ handling. From the point this
1717 *	call is made your handler function may be invoked. Since
1718 *	your handler function must clear any interrupt the board
1719 *	raises, you must take care both to initialise your hardware
1720 *	and to set up the interrupt handler in the right order.
1721 *
1722 *	If you want to set up a threaded irq handler for your device
1723 *	then you need to supply @handler and @thread_fn. @handler is
1724 *	still called in hard interrupt context and has to check
1725 *	whether the interrupt originates from the device. If yes it
1726 *	needs to disable the interrupt on the device and return
1727 *	IRQ_WAKE_THREAD which will wake up the handler thread and run
1728 *	@thread_fn. This split handler design is necessary to support
1729 *	shared interrupts.
1730 *
1731 *	Dev_id must be globally unique. Normally the address of the
1732 *	device data structure is used as the cookie. Since the handler
1733 *	receives this value it makes sense to use it.
1734 *
1735 *	If your interrupt is shared you must pass a non NULL dev_id
1736 *	as this is required when freeing the interrupt.
1737 *
1738 *	Flags:
1739 *
1740 *	IRQF_SHARED		Interrupt is shared
1741 *	IRQF_TRIGGER_*		Specify active edge(s) or level
1742 *
1743 */
1744int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1745			 irq_handler_t thread_fn, unsigned long irqflags,
1746			 const char *devname, void *dev_id)
1747{
1748	struct irqaction *action;
1749	struct irq_desc *desc;
1750	int retval;
1751
1752	if (irq == IRQ_NOTCONNECTED)
1753		return -ENOTCONN;
1754
1755	/*
1756	 * Sanity-check: shared interrupts must pass in a real dev-ID,
1757	 * otherwise we'll have trouble later trying to figure out
1758	 * which interrupt is which (messes up the interrupt freeing
1759	 * logic etc).
1760	 *
 
 
 
 
1761	 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
1762	 * it cannot be set along with IRQF_NO_SUSPEND.
1763	 */
1764	if (((irqflags & IRQF_SHARED) && !dev_id) ||
 
1765	    (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
1766	    ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
1767		return -EINVAL;
1768
1769	desc = irq_to_desc(irq);
1770	if (!desc)
1771		return -EINVAL;
1772
1773	if (!irq_settings_can_request(desc) ||
1774	    WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1775		return -EINVAL;
1776
1777	if (!handler) {
1778		if (!thread_fn)
1779			return -EINVAL;
1780		handler = irq_default_primary_handler;
1781	}
1782
1783	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1784	if (!action)
1785		return -ENOMEM;
1786
1787	action->handler = handler;
1788	action->thread_fn = thread_fn;
1789	action->flags = irqflags;
1790	action->name = devname;
1791	action->dev_id = dev_id;
1792
1793	retval = irq_chip_pm_get(&desc->irq_data);
1794	if (retval < 0) {
1795		kfree(action);
1796		return retval;
1797	}
1798
1799	retval = __setup_irq(irq, desc, action);
1800
1801	if (retval) {
1802		irq_chip_pm_put(&desc->irq_data);
1803		kfree(action->secondary);
1804		kfree(action);
1805	}
1806
1807#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1808	if (!retval && (irqflags & IRQF_SHARED)) {
1809		/*
1810		 * It's a shared IRQ -- the driver ought to be prepared for it
1811		 * to happen immediately, so let's make sure....
1812		 * We disable the irq to make sure that a 'real' IRQ doesn't
1813		 * run in parallel with our fake.
1814		 */
1815		unsigned long flags;
1816
1817		disable_irq(irq);
1818		local_irq_save(flags);
1819
1820		handler(irq, dev_id);
1821
1822		local_irq_restore(flags);
1823		enable_irq(irq);
1824	}
1825#endif
1826	return retval;
1827}
1828EXPORT_SYMBOL(request_threaded_irq);
1829
1830/**
1831 *	request_any_context_irq - allocate an interrupt line
1832 *	@irq: Interrupt line to allocate
1833 *	@handler: Function to be called when the IRQ occurs.
1834 *		  Threaded handler for threaded interrupts.
1835 *	@flags: Interrupt type flags
1836 *	@name: An ascii name for the claiming device
1837 *	@dev_id: A cookie passed back to the handler function
1838 *
1839 *	This call allocates interrupt resources and enables the
1840 *	interrupt line and IRQ handling. It selects either a
1841 *	hardirq or threaded handling method depending on the
1842 *	context.
1843 *
1844 *	On failure, it returns a negative value. On success,
1845 *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1846 */
1847int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1848			    unsigned long flags, const char *name, void *dev_id)
1849{
1850	struct irq_desc *desc;
1851	int ret;
1852
1853	if (irq == IRQ_NOTCONNECTED)
1854		return -ENOTCONN;
1855
1856	desc = irq_to_desc(irq);
1857	if (!desc)
1858		return -EINVAL;
1859
1860	if (irq_settings_is_nested_thread(desc)) {
1861		ret = request_threaded_irq(irq, NULL, handler,
1862					   flags, name, dev_id);
1863		return !ret ? IRQC_IS_NESTED : ret;
1864	}
1865
1866	ret = request_irq(irq, handler, flags, name, dev_id);
1867	return !ret ? IRQC_IS_HARDIRQ : ret;
1868}
1869EXPORT_SYMBOL_GPL(request_any_context_irq);
1870
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1871void enable_percpu_irq(unsigned int irq, unsigned int type)
1872{
1873	unsigned int cpu = smp_processor_id();
1874	unsigned long flags;
1875	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1876
1877	if (!desc)
1878		return;
1879
1880	/*
1881	 * If the trigger type is not specified by the caller, then
1882	 * use the default for this interrupt.
1883	 */
1884	type &= IRQ_TYPE_SENSE_MASK;
1885	if (type == IRQ_TYPE_NONE)
1886		type = irqd_get_trigger_type(&desc->irq_data);
1887
1888	if (type != IRQ_TYPE_NONE) {
1889		int ret;
1890
1891		ret = __irq_set_trigger(desc, type);
1892
1893		if (ret) {
1894			WARN(1, "failed to set type for IRQ%d\n", irq);
1895			goto out;
1896		}
1897	}
1898
1899	irq_percpu_enable(desc, cpu);
1900out:
1901	irq_put_desc_unlock(desc, flags);
1902}
1903EXPORT_SYMBOL_GPL(enable_percpu_irq);
1904
 
 
 
 
 
1905/**
1906 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1907 * @irq:	Linux irq number to check for
1908 *
1909 * Must be called from a non migratable context. Returns the enable
1910 * state of a per cpu interrupt on the current cpu.
1911 */
1912bool irq_percpu_is_enabled(unsigned int irq)
1913{
1914	unsigned int cpu = smp_processor_id();
1915	struct irq_desc *desc;
1916	unsigned long flags;
1917	bool is_enabled;
1918
1919	desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1920	if (!desc)
1921		return false;
1922
1923	is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
1924	irq_put_desc_unlock(desc, flags);
1925
1926	return is_enabled;
1927}
1928EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
1929
1930void disable_percpu_irq(unsigned int irq)
1931{
1932	unsigned int cpu = smp_processor_id();
1933	unsigned long flags;
1934	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1935
1936	if (!desc)
1937		return;
1938
1939	irq_percpu_disable(desc, cpu);
1940	irq_put_desc_unlock(desc, flags);
1941}
1942EXPORT_SYMBOL_GPL(disable_percpu_irq);
1943
 
 
 
 
 
1944/*
1945 * Internal function to unregister a percpu irqaction.
1946 */
1947static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1948{
1949	struct irq_desc *desc = irq_to_desc(irq);
1950	struct irqaction *action;
1951	unsigned long flags;
1952
1953	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1954
1955	if (!desc)
1956		return NULL;
1957
1958	raw_spin_lock_irqsave(&desc->lock, flags);
1959
1960	action = desc->action;
1961	if (!action || action->percpu_dev_id != dev_id) {
1962		WARN(1, "Trying to free already-free IRQ %d\n", irq);
1963		goto bad;
1964	}
1965
1966	if (!cpumask_empty(desc->percpu_enabled)) {
1967		WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1968		     irq, cpumask_first(desc->percpu_enabled));
1969		goto bad;
1970	}
1971
1972	/* Found it - now remove it from the list of entries: */
1973	desc->action = NULL;
1974
 
 
1975	raw_spin_unlock_irqrestore(&desc->lock, flags);
1976
1977	unregister_handler_proc(irq, action);
1978
1979	irq_chip_pm_put(&desc->irq_data);
1980	module_put(desc->owner);
1981	return action;
1982
1983bad:
1984	raw_spin_unlock_irqrestore(&desc->lock, flags);
1985	return NULL;
1986}
1987
1988/**
1989 *	remove_percpu_irq - free a per-cpu interrupt
1990 *	@irq: Interrupt line to free
1991 *	@act: irqaction for the interrupt
1992 *
1993 * Used to remove interrupts statically setup by the early boot process.
1994 */
1995void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1996{
1997	struct irq_desc *desc = irq_to_desc(irq);
1998
1999	if (desc && irq_settings_is_per_cpu_devid(desc))
2000	    __free_percpu_irq(irq, act->percpu_dev_id);
2001}
2002
2003/**
2004 *	free_percpu_irq - free an interrupt allocated with request_percpu_irq
2005 *	@irq: Interrupt line to free
2006 *	@dev_id: Device identity to free
2007 *
2008 *	Remove a percpu interrupt handler. The handler is removed, but
2009 *	the interrupt line is not disabled. This must be done on each
2010 *	CPU before calling this function. The function does not return
2011 *	until any executing interrupts for this IRQ have completed.
2012 *
2013 *	This function must not be called from interrupt context.
2014 */
2015void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2016{
2017	struct irq_desc *desc = irq_to_desc(irq);
2018
2019	if (!desc || !irq_settings_is_per_cpu_devid(desc))
2020		return;
2021
2022	chip_bus_lock(desc);
2023	kfree(__free_percpu_irq(irq, dev_id));
2024	chip_bus_sync_unlock(desc);
2025}
2026EXPORT_SYMBOL_GPL(free_percpu_irq);
2027
 
 
 
 
 
 
 
 
 
 
 
 
 
2028/**
2029 *	setup_percpu_irq - setup a per-cpu interrupt
2030 *	@irq: Interrupt line to setup
2031 *	@act: irqaction for the interrupt
2032 *
2033 * Used to statically setup per-cpu interrupts in the early boot process.
2034 */
2035int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2036{
2037	struct irq_desc *desc = irq_to_desc(irq);
2038	int retval;
2039
2040	if (!desc || !irq_settings_is_per_cpu_devid(desc))
2041		return -EINVAL;
2042
2043	retval = irq_chip_pm_get(&desc->irq_data);
2044	if (retval < 0)
2045		return retval;
2046
2047	retval = __setup_irq(irq, desc, act);
2048
2049	if (retval)
2050		irq_chip_pm_put(&desc->irq_data);
2051
2052	return retval;
2053}
2054
2055/**
2056 *	__request_percpu_irq - allocate a percpu interrupt line
2057 *	@irq: Interrupt line to allocate
2058 *	@handler: Function to be called when the IRQ occurs.
2059 *	@flags: Interrupt type flags (IRQF_TIMER only)
2060 *	@devname: An ascii name for the claiming device
2061 *	@dev_id: A percpu cookie passed back to the handler function
2062 *
2063 *	This call allocates interrupt resources and enables the
2064 *	interrupt on the local CPU. If the interrupt is supposed to be
2065 *	enabled on other CPUs, it has to be done on each CPU using
2066 *	enable_percpu_irq().
2067 *
2068 *	Dev_id must be globally unique. It is a per-cpu variable, and
2069 *	the handler gets called with the interrupted CPU's instance of
2070 *	that variable.
2071 */
2072int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2073			 unsigned long flags, const char *devname,
2074			 void __percpu *dev_id)
2075{
2076	struct irqaction *action;
2077	struct irq_desc *desc;
2078	int retval;
2079
2080	if (!dev_id)
2081		return -EINVAL;
2082
2083	desc = irq_to_desc(irq);
2084	if (!desc || !irq_settings_can_request(desc) ||
2085	    !irq_settings_is_per_cpu_devid(desc))
2086		return -EINVAL;
2087
2088	if (flags && flags != IRQF_TIMER)
2089		return -EINVAL;
2090
2091	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2092	if (!action)
2093		return -ENOMEM;
2094
2095	action->handler = handler;
2096	action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2097	action->name = devname;
2098	action->percpu_dev_id = dev_id;
2099
2100	retval = irq_chip_pm_get(&desc->irq_data);
2101	if (retval < 0) {
2102		kfree(action);
2103		return retval;
2104	}
2105
2106	retval = __setup_irq(irq, desc, action);
2107
2108	if (retval) {
2109		irq_chip_pm_put(&desc->irq_data);
2110		kfree(action);
2111	}
2112
2113	return retval;
2114}
2115EXPORT_SYMBOL_GPL(__request_percpu_irq);
2116
2117/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2118 *	irq_get_irqchip_state - returns the irqchip state of a interrupt.
2119 *	@irq: Interrupt line that is forwarded to a VM
2120 *	@which: One of IRQCHIP_STATE_* the caller wants to know about
2121 *	@state: a pointer to a boolean where the state is to be storeed
2122 *
2123 *	This call snapshots the internal irqchip state of an
2124 *	interrupt, returning into @state the bit corresponding to
2125 *	stage @which
2126 *
2127 *	This function should be called with preemption disabled if the
2128 *	interrupt controller has per-cpu registers.
2129 */
2130int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2131			  bool *state)
2132{
2133	struct irq_desc *desc;
2134	struct irq_data *data;
2135	struct irq_chip *chip;
2136	unsigned long flags;
2137	int err = -EINVAL;
2138
2139	desc = irq_get_desc_buslock(irq, &flags, 0);
2140	if (!desc)
2141		return err;
2142
2143	data = irq_desc_get_irq_data(desc);
2144
2145	do {
2146		chip = irq_data_get_irq_chip(data);
2147		if (chip->irq_get_irqchip_state)
2148			break;
2149#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2150		data = data->parent_data;
2151#else
2152		data = NULL;
2153#endif
2154	} while (data);
2155
2156	if (data)
2157		err = chip->irq_get_irqchip_state(data, which, state);
2158
2159	irq_put_desc_busunlock(desc, flags);
2160	return err;
2161}
2162EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2163
2164/**
2165 *	irq_set_irqchip_state - set the state of a forwarded interrupt.
2166 *	@irq: Interrupt line that is forwarded to a VM
2167 *	@which: State to be restored (one of IRQCHIP_STATE_*)
2168 *	@val: Value corresponding to @which
2169 *
2170 *	This call sets the internal irqchip state of an interrupt,
2171 *	depending on the value of @which.
2172 *
2173 *	This function should be called with preemption disabled if the
2174 *	interrupt controller has per-cpu registers.
2175 */
2176int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2177			  bool val)
2178{
2179	struct irq_desc *desc;
2180	struct irq_data *data;
2181	struct irq_chip *chip;
2182	unsigned long flags;
2183	int err = -EINVAL;
2184
2185	desc = irq_get_desc_buslock(irq, &flags, 0);
2186	if (!desc)
2187		return err;
2188
2189	data = irq_desc_get_irq_data(desc);
2190
2191	do {
2192		chip = irq_data_get_irq_chip(data);
 
 
 
 
2193		if (chip->irq_set_irqchip_state)
2194			break;
2195#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2196		data = data->parent_data;
2197#else
2198		data = NULL;
2199#endif
2200	} while (data);
2201
2202	if (data)
2203		err = chip->irq_set_irqchip_state(data, which, val);
2204
 
2205	irq_put_desc_busunlock(desc, flags);
2206	return err;
2207}
2208EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
v5.14.15
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   4 * Copyright (C) 2005-2006 Thomas Gleixner
   5 *
   6 * This file contains driver APIs to the irq subsystem.
   7 */
   8
   9#define pr_fmt(fmt) "genirq: " fmt
  10
  11#include <linux/irq.h>
  12#include <linux/kthread.h>
  13#include <linux/module.h>
  14#include <linux/random.h>
  15#include <linux/interrupt.h>
  16#include <linux/irqdomain.h>
  17#include <linux/slab.h>
  18#include <linux/sched.h>
  19#include <linux/sched/rt.h>
  20#include <linux/sched/task.h>
  21#include <linux/sched/isolation.h>
  22#include <uapi/linux/sched/types.h>
  23#include <linux/task_work.h>
  24
  25#include "internals.h"
  26
  27#if defined(CONFIG_IRQ_FORCED_THREADING) && !defined(CONFIG_PREEMPT_RT)
  28__read_mostly bool force_irqthreads;
  29EXPORT_SYMBOL_GPL(force_irqthreads);
  30
  31static int __init setup_forced_irqthreads(char *arg)
  32{
  33	force_irqthreads = true;
  34	return 0;
  35}
  36early_param("threadirqs", setup_forced_irqthreads);
  37#endif
  38
  39static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip)
  40{
  41	struct irq_data *irqd = irq_desc_get_irq_data(desc);
  42	bool inprogress;
  43
  44	do {
  45		unsigned long flags;
  46
  47		/*
  48		 * Wait until we're out of the critical section.  This might
  49		 * give the wrong answer due to the lack of memory barriers.
  50		 */
  51		while (irqd_irq_inprogress(&desc->irq_data))
  52			cpu_relax();
  53
  54		/* Ok, that indicated we're done: double-check carefully. */
  55		raw_spin_lock_irqsave(&desc->lock, flags);
  56		inprogress = irqd_irq_inprogress(&desc->irq_data);
  57
  58		/*
  59		 * If requested and supported, check at the chip whether it
  60		 * is in flight at the hardware level, i.e. already pending
  61		 * in a CPU and waiting for service and acknowledge.
  62		 */
  63		if (!inprogress && sync_chip) {
  64			/*
  65			 * Ignore the return code. inprogress is only updated
  66			 * when the chip supports it.
  67			 */
  68			__irq_get_irqchip_state(irqd, IRQCHIP_STATE_ACTIVE,
  69						&inprogress);
  70		}
  71		raw_spin_unlock_irqrestore(&desc->lock, flags);
  72
  73		/* Oops, that failed? */
  74	} while (inprogress);
  75}
  76
  77/**
  78 *	synchronize_hardirq - wait for pending hard IRQ handlers (on other CPUs)
  79 *	@irq: interrupt number to wait for
  80 *
  81 *	This function waits for any pending hard IRQ handlers for this
  82 *	interrupt to complete before returning. If you use this
  83 *	function while holding a resource the IRQ handler may need you
  84 *	will deadlock. It does not take associated threaded handlers
  85 *	into account.
  86 *
  87 *	Do not use this for shutdown scenarios where you must be sure
  88 *	that all parts (hardirq and threaded handler) have completed.
  89 *
  90 *	Returns: false if a threaded handler is active.
  91 *
  92 *	This function may be called - with care - from IRQ context.
  93 *
  94 *	It does not check whether there is an interrupt in flight at the
  95 *	hardware level, but not serviced yet, as this might deadlock when
  96 *	called with interrupts disabled and the target CPU of the interrupt
  97 *	is the current CPU.
  98 */
  99bool synchronize_hardirq(unsigned int irq)
 100{
 101	struct irq_desc *desc = irq_to_desc(irq);
 102
 103	if (desc) {
 104		__synchronize_hardirq(desc, false);
 105		return !atomic_read(&desc->threads_active);
 106	}
 107
 108	return true;
 109}
 110EXPORT_SYMBOL(synchronize_hardirq);
 111
 112/**
 113 *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
 114 *	@irq: interrupt number to wait for
 115 *
 116 *	This function waits for any pending IRQ handlers for this interrupt
 117 *	to complete before returning. If you use this function while
 118 *	holding a resource the IRQ handler may need you will deadlock.
 119 *
 120 *	Can only be called from preemptible code as it might sleep when
 121 *	an interrupt thread is associated to @irq.
 122 *
 123 *	It optionally makes sure (when the irq chip supports that method)
 124 *	that the interrupt is not pending in any CPU and waiting for
 125 *	service.
 126 */
 127void synchronize_irq(unsigned int irq)
 128{
 129	struct irq_desc *desc = irq_to_desc(irq);
 130
 131	if (desc) {
 132		__synchronize_hardirq(desc, true);
 133		/*
 134		 * We made sure that no hardirq handler is
 135		 * running. Now verify that no threaded handlers are
 136		 * active.
 137		 */
 138		wait_event(desc->wait_for_threads,
 139			   !atomic_read(&desc->threads_active));
 140	}
 141}
 142EXPORT_SYMBOL(synchronize_irq);
 143
 144#ifdef CONFIG_SMP
 145cpumask_var_t irq_default_affinity;
 146
 147static bool __irq_can_set_affinity(struct irq_desc *desc)
 148{
 149	if (!desc || !irqd_can_balance(&desc->irq_data) ||
 150	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
 151		return false;
 152	return true;
 153}
 154
 155/**
 156 *	irq_can_set_affinity - Check if the affinity of a given irq can be set
 157 *	@irq:		Interrupt to check
 158 *
 159 */
 160int irq_can_set_affinity(unsigned int irq)
 161{
 162	return __irq_can_set_affinity(irq_to_desc(irq));
 163}
 164
 165/**
 166 * irq_can_set_affinity_usr - Check if affinity of a irq can be set from user space
 167 * @irq:	Interrupt to check
 168 *
 169 * Like irq_can_set_affinity() above, but additionally checks for the
 170 * AFFINITY_MANAGED flag.
 171 */
 172bool irq_can_set_affinity_usr(unsigned int irq)
 173{
 174	struct irq_desc *desc = irq_to_desc(irq);
 175
 176	return __irq_can_set_affinity(desc) &&
 177		!irqd_affinity_is_managed(&desc->irq_data);
 178}
 179
 180/**
 181 *	irq_set_thread_affinity - Notify irq threads to adjust affinity
 182 *	@desc:		irq descriptor which has affinity changed
 183 *
 184 *	We just set IRQTF_AFFINITY and delegate the affinity setting
 185 *	to the interrupt thread itself. We can not call
 186 *	set_cpus_allowed_ptr() here as we hold desc->lock and this
 187 *	code can be called from hard interrupt context.
 188 */
 189void irq_set_thread_affinity(struct irq_desc *desc)
 190{
 191	struct irqaction *action;
 192
 193	for_each_action_of_desc(desc, action)
 194		if (action->thread)
 195			set_bit(IRQTF_AFFINITY, &action->thread_flags);
 196}
 197
 198#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
 199static void irq_validate_effective_affinity(struct irq_data *data)
 200{
 
 201	const struct cpumask *m = irq_data_get_effective_affinity_mask(data);
 202	struct irq_chip *chip = irq_data_get_irq_chip(data);
 203
 204	if (!cpumask_empty(m))
 205		return;
 206	pr_warn_once("irq_chip %s did not update eff. affinity mask of irq %u\n",
 207		     chip->name, data->irq);
 
 208}
 209
 210static inline void irq_init_effective_affinity(struct irq_data *data,
 211					       const struct cpumask *mask)
 212{
 213	cpumask_copy(irq_data_get_effective_affinity_mask(data), mask);
 214}
 215#else
 216static inline void irq_validate_effective_affinity(struct irq_data *data) { }
 217static inline void irq_init_effective_affinity(struct irq_data *data,
 218					       const struct cpumask *mask) { }
 219#endif
 220
 221int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
 222			bool force)
 223{
 224	struct irq_desc *desc = irq_data_to_desc(data);
 225	struct irq_chip *chip = irq_data_get_irq_chip(data);
 226	int ret;
 227
 228	if (!chip || !chip->irq_set_affinity)
 229		return -EINVAL;
 230
 231	/*
 232	 * If this is a managed interrupt and housekeeping is enabled on
 233	 * it check whether the requested affinity mask intersects with
 234	 * a housekeeping CPU. If so, then remove the isolated CPUs from
 235	 * the mask and just keep the housekeeping CPU(s). This prevents
 236	 * the affinity setter from routing the interrupt to an isolated
 237	 * CPU to avoid that I/O submitted from a housekeeping CPU causes
 238	 * interrupts on an isolated one.
 239	 *
 240	 * If the masks do not intersect or include online CPU(s) then
 241	 * keep the requested mask. The isolated target CPUs are only
 242	 * receiving interrupts when the I/O operation was submitted
 243	 * directly from them.
 244	 *
 245	 * If all housekeeping CPUs in the affinity mask are offline, the
 246	 * interrupt will be migrated by the CPU hotplug code once a
 247	 * housekeeping CPU which belongs to the affinity mask comes
 248	 * online.
 249	 */
 250	if (irqd_affinity_is_managed(data) &&
 251	    housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) {
 252		const struct cpumask *hk_mask, *prog_mask;
 253
 254		static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
 255		static struct cpumask tmp_mask;
 256
 257		hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
 258
 259		raw_spin_lock(&tmp_mask_lock);
 260		cpumask_and(&tmp_mask, mask, hk_mask);
 261		if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
 262			prog_mask = mask;
 263		else
 264			prog_mask = &tmp_mask;
 265		ret = chip->irq_set_affinity(data, prog_mask, force);
 266		raw_spin_unlock(&tmp_mask_lock);
 267	} else {
 268		ret = chip->irq_set_affinity(data, mask, force);
 269	}
 270	switch (ret) {
 271	case IRQ_SET_MASK_OK:
 272	case IRQ_SET_MASK_OK_DONE:
 273		cpumask_copy(desc->irq_common_data.affinity, mask);
 274		fallthrough;
 275	case IRQ_SET_MASK_OK_NOCOPY:
 276		irq_validate_effective_affinity(data);
 277		irq_set_thread_affinity(desc);
 278		ret = 0;
 279	}
 280
 281	return ret;
 282}
 283
 284#ifdef CONFIG_GENERIC_PENDING_IRQ
 285static inline int irq_set_affinity_pending(struct irq_data *data,
 286					   const struct cpumask *dest)
 287{
 288	struct irq_desc *desc = irq_data_to_desc(data);
 289
 290	irqd_set_move_pending(data);
 291	irq_copy_pending(desc, dest);
 292	return 0;
 293}
 294#else
 295static inline int irq_set_affinity_pending(struct irq_data *data,
 296					   const struct cpumask *dest)
 297{
 298	return -EBUSY;
 299}
 300#endif
 301
 302static int irq_try_set_affinity(struct irq_data *data,
 303				const struct cpumask *dest, bool force)
 304{
 305	int ret = irq_do_set_affinity(data, dest, force);
 306
 307	/*
 308	 * In case that the underlying vector management is busy and the
 309	 * architecture supports the generic pending mechanism then utilize
 310	 * this to avoid returning an error to user space.
 311	 */
 312	if (ret == -EBUSY && !force)
 313		ret = irq_set_affinity_pending(data, dest);
 314	return ret;
 315}
 316
 317static bool irq_set_affinity_deactivated(struct irq_data *data,
 318					 const struct cpumask *mask, bool force)
 319{
 320	struct irq_desc *desc = irq_data_to_desc(data);
 321
 322	/*
 323	 * Handle irq chips which can handle affinity only in activated
 324	 * state correctly
 325	 *
 326	 * If the interrupt is not yet activated, just store the affinity
 327	 * mask and do not call the chip driver at all. On activation the
 328	 * driver has to make sure anyway that the interrupt is in a
 329	 * usable state so startup works.
 330	 */
 331	if (!IS_ENABLED(CONFIG_IRQ_DOMAIN_HIERARCHY) ||
 332	    irqd_is_activated(data) || !irqd_affinity_on_activate(data))
 333		return false;
 334
 335	cpumask_copy(desc->irq_common_data.affinity, mask);
 336	irq_init_effective_affinity(data, mask);
 337	irqd_set(data, IRQD_AFFINITY_SET);
 338	return true;
 339}
 340
 341int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
 342			    bool force)
 343{
 344	struct irq_chip *chip = irq_data_get_irq_chip(data);
 345	struct irq_desc *desc = irq_data_to_desc(data);
 346	int ret = 0;
 347
 348	if (!chip || !chip->irq_set_affinity)
 349		return -EINVAL;
 350
 351	if (irq_set_affinity_deactivated(data, mask, force))
 352		return 0;
 353
 354	if (irq_can_move_pcntxt(data) && !irqd_is_setaffinity_pending(data)) {
 355		ret = irq_try_set_affinity(data, mask, force);
 356	} else {
 357		irqd_set_move_pending(data);
 358		irq_copy_pending(desc, mask);
 359	}
 360
 361	if (desc->affinity_notify) {
 362		kref_get(&desc->affinity_notify->kref);
 363		if (!schedule_work(&desc->affinity_notify->work)) {
 364			/* Work was already scheduled, drop our extra ref */
 365			kref_put(&desc->affinity_notify->kref,
 366				 desc->affinity_notify->release);
 367		}
 368	}
 369	irqd_set(data, IRQD_AFFINITY_SET);
 370
 371	return ret;
 372}
 373
 374/**
 375 * irq_update_affinity_desc - Update affinity management for an interrupt
 376 * @irq:	The interrupt number to update
 377 * @affinity:	Pointer to the affinity descriptor
 378 *
 379 * This interface can be used to configure the affinity management of
 380 * interrupts which have been allocated already.
 381 *
 382 * There are certain limitations on when it may be used - attempts to use it
 383 * for when the kernel is configured for generic IRQ reservation mode (in
 384 * config GENERIC_IRQ_RESERVATION_MODE) will fail, as it may conflict with
 385 * managed/non-managed interrupt accounting. In addition, attempts to use it on
 386 * an interrupt which is already started or which has already been configured
 387 * as managed will also fail, as these mean invalid init state or double init.
 388 */
 389int irq_update_affinity_desc(unsigned int irq,
 390			     struct irq_affinity_desc *affinity)
 391{
 392	struct irq_desc *desc;
 393	unsigned long flags;
 394	bool activated;
 395	int ret = 0;
 396
 397	/*
 398	 * Supporting this with the reservation scheme used by x86 needs
 399	 * some more thought. Fail it for now.
 400	 */
 401	if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE))
 402		return -EOPNOTSUPP;
 403
 404	desc = irq_get_desc_buslock(irq, &flags, 0);
 405	if (!desc)
 406		return -EINVAL;
 407
 408	/* Requires the interrupt to be shut down */
 409	if (irqd_is_started(&desc->irq_data)) {
 410		ret = -EBUSY;
 411		goto out_unlock;
 412	}
 413
 414	/* Interrupts which are already managed cannot be modified */
 415	if (irqd_affinity_is_managed(&desc->irq_data)) {
 416		ret = -EBUSY;
 417		goto out_unlock;
 418	}
 419
 420	/*
 421	 * Deactivate the interrupt. That's required to undo
 422	 * anything an earlier activation has established.
 423	 */
 424	activated = irqd_is_activated(&desc->irq_data);
 425	if (activated)
 426		irq_domain_deactivate_irq(&desc->irq_data);
 427
 428	if (affinity->is_managed) {
 429		irqd_set(&desc->irq_data, IRQD_AFFINITY_MANAGED);
 430		irqd_set(&desc->irq_data, IRQD_MANAGED_SHUTDOWN);
 431	}
 432
 433	cpumask_copy(desc->irq_common_data.affinity, &affinity->mask);
 434
 435	/* Restore the activation state */
 436	if (activated)
 437		irq_domain_activate_irq(&desc->irq_data, false);
 438
 439out_unlock:
 440	irq_put_desc_busunlock(desc, flags);
 441	return ret;
 442}
 443
 444static int __irq_set_affinity(unsigned int irq, const struct cpumask *mask,
 445			      bool force)
 446{
 447	struct irq_desc *desc = irq_to_desc(irq);
 448	unsigned long flags;
 449	int ret;
 450
 451	if (!desc)
 452		return -EINVAL;
 453
 454	raw_spin_lock_irqsave(&desc->lock, flags);
 455	ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
 456	raw_spin_unlock_irqrestore(&desc->lock, flags);
 457	return ret;
 458}
 459
 460/**
 461 * irq_set_affinity - Set the irq affinity of a given irq
 462 * @irq:	Interrupt to set affinity
 463 * @cpumask:	cpumask
 464 *
 465 * Fails if cpumask does not contain an online CPU
 466 */
 467int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
 468{
 469	return __irq_set_affinity(irq, cpumask, false);
 470}
 471EXPORT_SYMBOL_GPL(irq_set_affinity);
 472
 473/**
 474 * irq_force_affinity - Force the irq affinity of a given irq
 475 * @irq:	Interrupt to set affinity
 476 * @cpumask:	cpumask
 477 *
 478 * Same as irq_set_affinity, but without checking the mask against
 479 * online cpus.
 480 *
 481 * Solely for low level cpu hotplug code, where we need to make per
 482 * cpu interrupts affine before the cpu becomes online.
 483 */
 484int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
 485{
 486	return __irq_set_affinity(irq, cpumask, true);
 487}
 488EXPORT_SYMBOL_GPL(irq_force_affinity);
 489
 490int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
 491{
 492	unsigned long flags;
 493	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 494
 495	if (!desc)
 496		return -EINVAL;
 497	desc->affinity_hint = m;
 498	irq_put_desc_unlock(desc, flags);
 499	/* set the initial affinity to prevent every interrupt being on CPU0 */
 500	if (m)
 501		__irq_set_affinity(irq, m, false);
 502	return 0;
 503}
 504EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
 505
 506static void irq_affinity_notify(struct work_struct *work)
 507{
 508	struct irq_affinity_notify *notify =
 509		container_of(work, struct irq_affinity_notify, work);
 510	struct irq_desc *desc = irq_to_desc(notify->irq);
 511	cpumask_var_t cpumask;
 512	unsigned long flags;
 513
 514	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
 515		goto out;
 516
 517	raw_spin_lock_irqsave(&desc->lock, flags);
 518	if (irq_move_pending(&desc->irq_data))
 519		irq_get_pending(cpumask, desc);
 520	else
 521		cpumask_copy(cpumask, desc->irq_common_data.affinity);
 522	raw_spin_unlock_irqrestore(&desc->lock, flags);
 523
 524	notify->notify(notify, cpumask);
 525
 526	free_cpumask_var(cpumask);
 527out:
 528	kref_put(&notify->kref, notify->release);
 529}
 530
 531/**
 532 *	irq_set_affinity_notifier - control notification of IRQ affinity changes
 533 *	@irq:		Interrupt for which to enable/disable notification
 534 *	@notify:	Context for notification, or %NULL to disable
 535 *			notification.  Function pointers must be initialised;
 536 *			the other fields will be initialised by this function.
 537 *
 538 *	Must be called in process context.  Notification may only be enabled
 539 *	after the IRQ is allocated and must be disabled before the IRQ is
 540 *	freed using free_irq().
 541 */
 542int
 543irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
 544{
 545	struct irq_desc *desc = irq_to_desc(irq);
 546	struct irq_affinity_notify *old_notify;
 547	unsigned long flags;
 548
 549	/* The release function is promised process context */
 550	might_sleep();
 551
 552	if (!desc || desc->istate & IRQS_NMI)
 553		return -EINVAL;
 554
 555	/* Complete initialisation of *notify */
 556	if (notify) {
 557		notify->irq = irq;
 558		kref_init(&notify->kref);
 559		INIT_WORK(&notify->work, irq_affinity_notify);
 560	}
 561
 562	raw_spin_lock_irqsave(&desc->lock, flags);
 563	old_notify = desc->affinity_notify;
 564	desc->affinity_notify = notify;
 565	raw_spin_unlock_irqrestore(&desc->lock, flags);
 566
 567	if (old_notify) {
 568		if (cancel_work_sync(&old_notify->work)) {
 569			/* Pending work had a ref, put that one too */
 570			kref_put(&old_notify->kref, old_notify->release);
 571		}
 572		kref_put(&old_notify->kref, old_notify->release);
 573	}
 574
 575	return 0;
 576}
 577EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 578
 579#ifndef CONFIG_AUTO_IRQ_AFFINITY
 580/*
 581 * Generic version of the affinity autoselector.
 582 */
 583int irq_setup_affinity(struct irq_desc *desc)
 584{
 585	struct cpumask *set = irq_default_affinity;
 586	int ret, node = irq_desc_get_node(desc);
 587	static DEFINE_RAW_SPINLOCK(mask_lock);
 588	static struct cpumask mask;
 589
 590	/* Excludes PER_CPU and NO_BALANCE interrupts */
 591	if (!__irq_can_set_affinity(desc))
 592		return 0;
 593
 594	raw_spin_lock(&mask_lock);
 595	/*
 596	 * Preserve the managed affinity setting and a userspace affinity
 597	 * setup, but make sure that one of the targets is online.
 598	 */
 599	if (irqd_affinity_is_managed(&desc->irq_data) ||
 600	    irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
 601		if (cpumask_intersects(desc->irq_common_data.affinity,
 602				       cpu_online_mask))
 603			set = desc->irq_common_data.affinity;
 604		else
 605			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
 606	}
 607
 608	cpumask_and(&mask, cpu_online_mask, set);
 609	if (cpumask_empty(&mask))
 610		cpumask_copy(&mask, cpu_online_mask);
 611
 612	if (node != NUMA_NO_NODE) {
 613		const struct cpumask *nodemask = cpumask_of_node(node);
 614
 615		/* make sure at least one of the cpus in nodemask is online */
 616		if (cpumask_intersects(&mask, nodemask))
 617			cpumask_and(&mask, &mask, nodemask);
 618	}
 619	ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
 620	raw_spin_unlock(&mask_lock);
 621	return ret;
 622}
 623#else
 624/* Wrapper for ALPHA specific affinity selector magic */
 625int irq_setup_affinity(struct irq_desc *desc)
 626{
 627	return irq_select_affinity(irq_desc_get_irq(desc));
 628}
 629#endif /* CONFIG_AUTO_IRQ_AFFINITY */
 630#endif /* CONFIG_SMP */
 
 
 
 
 
 
 
 
 631
 
 
 
 
 
 
 632
 633/**
 634 *	irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
 635 *	@irq: interrupt number to set affinity
 636 *	@vcpu_info: vCPU specific data or pointer to a percpu array of vCPU
 637 *	            specific data for percpu_devid interrupts
 638 *
 639 *	This function uses the vCPU specific data to set the vCPU
 640 *	affinity for an irq. The vCPU specific data is passed from
 641 *	outside, such as KVM. One example code path is as below:
 642 *	KVM -> IOMMU -> irq_set_vcpu_affinity().
 643 */
 644int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
 645{
 646	unsigned long flags;
 647	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 648	struct irq_data *data;
 649	struct irq_chip *chip;
 650	int ret = -ENOSYS;
 651
 652	if (!desc)
 653		return -EINVAL;
 654
 655	data = irq_desc_get_irq_data(desc);
 656	do {
 657		chip = irq_data_get_irq_chip(data);
 658		if (chip && chip->irq_set_vcpu_affinity)
 659			break;
 660#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
 661		data = data->parent_data;
 662#else
 663		data = NULL;
 664#endif
 665	} while (data);
 666
 667	if (data)
 668		ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
 669	irq_put_desc_unlock(desc, flags);
 670
 671	return ret;
 672}
 673EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
 674
 675void __disable_irq(struct irq_desc *desc)
 676{
 677	if (!desc->depth++)
 678		irq_disable(desc);
 679}
 680
 681static int __disable_irq_nosync(unsigned int irq)
 682{
 683	unsigned long flags;
 684	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 685
 686	if (!desc)
 687		return -EINVAL;
 688	__disable_irq(desc);
 689	irq_put_desc_busunlock(desc, flags);
 690	return 0;
 691}
 692
 693/**
 694 *	disable_irq_nosync - disable an irq without waiting
 695 *	@irq: Interrupt to disable
 696 *
 697 *	Disable the selected interrupt line.  Disables and Enables are
 698 *	nested.
 699 *	Unlike disable_irq(), this function does not ensure existing
 700 *	instances of the IRQ handler have completed before returning.
 701 *
 702 *	This function may be called from IRQ context.
 703 */
 704void disable_irq_nosync(unsigned int irq)
 705{
 706	__disable_irq_nosync(irq);
 707}
 708EXPORT_SYMBOL(disable_irq_nosync);
 709
 710/**
 711 *	disable_irq - disable an irq and wait for completion
 712 *	@irq: Interrupt to disable
 713 *
 714 *	Disable the selected interrupt line.  Enables and Disables are
 715 *	nested.
 716 *	This function waits for any pending IRQ handlers for this interrupt
 717 *	to complete before returning. If you use this function while
 718 *	holding a resource the IRQ handler may need you will deadlock.
 719 *
 720 *	This function may be called - with care - from IRQ context.
 721 */
 722void disable_irq(unsigned int irq)
 723{
 724	if (!__disable_irq_nosync(irq))
 725		synchronize_irq(irq);
 726}
 727EXPORT_SYMBOL(disable_irq);
 728
 729/**
 730 *	disable_hardirq - disables an irq and waits for hardirq completion
 731 *	@irq: Interrupt to disable
 732 *
 733 *	Disable the selected interrupt line.  Enables and Disables are
 734 *	nested.
 735 *	This function waits for any pending hard IRQ handlers for this
 736 *	interrupt to complete before returning. If you use this function while
 737 *	holding a resource the hard IRQ handler may need you will deadlock.
 738 *
 739 *	When used to optimistically disable an interrupt from atomic context
 740 *	the return value must be checked.
 741 *
 742 *	Returns: false if a threaded handler is active.
 743 *
 744 *	This function may be called - with care - from IRQ context.
 745 */
 746bool disable_hardirq(unsigned int irq)
 747{
 748	if (!__disable_irq_nosync(irq))
 749		return synchronize_hardirq(irq);
 750
 751	return false;
 752}
 753EXPORT_SYMBOL_GPL(disable_hardirq);
 754
 755/**
 756 *	disable_nmi_nosync - disable an nmi without waiting
 757 *	@irq: Interrupt to disable
 758 *
 759 *	Disable the selected interrupt line. Disables and enables are
 760 *	nested.
 761 *	The interrupt to disable must have been requested through request_nmi.
 762 *	Unlike disable_nmi(), this function does not ensure existing
 763 *	instances of the IRQ handler have completed before returning.
 764 */
 765void disable_nmi_nosync(unsigned int irq)
 766{
 767	disable_irq_nosync(irq);
 768}
 769
 770void __enable_irq(struct irq_desc *desc)
 771{
 772	switch (desc->depth) {
 773	case 0:
 774 err_out:
 775		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n",
 776		     irq_desc_get_irq(desc));
 777		break;
 778	case 1: {
 779		if (desc->istate & IRQS_SUSPENDED)
 780			goto err_out;
 781		/* Prevent probing on this irq: */
 782		irq_settings_set_noprobe(desc);
 783		/*
 784		 * Call irq_startup() not irq_enable() here because the
 785		 * interrupt might be marked NOAUTOEN. So irq_startup()
 786		 * needs to be invoked when it gets enabled the first
 787		 * time. If it was already started up, then irq_startup()
 788		 * will invoke irq_enable() under the hood.
 789		 */
 790		irq_startup(desc, IRQ_RESEND, IRQ_START_FORCE);
 791		break;
 792	}
 793	default:
 794		desc->depth--;
 795	}
 796}
 797
 798/**
 799 *	enable_irq - enable handling of an irq
 800 *	@irq: Interrupt to enable
 801 *
 802 *	Undoes the effect of one call to disable_irq().  If this
 803 *	matches the last disable, processing of interrupts on this
 804 *	IRQ line is re-enabled.
 805 *
 806 *	This function may be called from IRQ context only when
 807 *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
 808 */
 809void enable_irq(unsigned int irq)
 810{
 811	unsigned long flags;
 812	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 813
 814	if (!desc)
 815		return;
 816	if (WARN(!desc->irq_data.chip,
 817		 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
 818		goto out;
 819
 820	__enable_irq(desc);
 821out:
 822	irq_put_desc_busunlock(desc, flags);
 823}
 824EXPORT_SYMBOL(enable_irq);
 825
 826/**
 827 *	enable_nmi - enable handling of an nmi
 828 *	@irq: Interrupt to enable
 829 *
 830 *	The interrupt to enable must have been requested through request_nmi.
 831 *	Undoes the effect of one call to disable_nmi(). If this
 832 *	matches the last disable, processing of interrupts on this
 833 *	IRQ line is re-enabled.
 834 */
 835void enable_nmi(unsigned int irq)
 836{
 837	enable_irq(irq);
 838}
 839
 840static int set_irq_wake_real(unsigned int irq, unsigned int on)
 841{
 842	struct irq_desc *desc = irq_to_desc(irq);
 843	int ret = -ENXIO;
 844
 845	if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
 846		return 0;
 847
 848	if (desc->irq_data.chip->irq_set_wake)
 849		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
 850
 851	return ret;
 852}
 853
 854/**
 855 *	irq_set_irq_wake - control irq power management wakeup
 856 *	@irq:	interrupt to control
 857 *	@on:	enable/disable power management wakeup
 858 *
 859 *	Enable/disable power management wakeup mode, which is
 860 *	disabled by default.  Enables and disables must match,
 861 *	just as they match for non-wakeup mode support.
 862 *
 863 *	Wakeup mode lets this IRQ wake the system from sleep
 864 *	states like "suspend to RAM".
 865 *
 866 *	Note: irq enable/disable state is completely orthogonal
 867 *	to the enable/disable state of irq wake. An irq can be
 868 *	disabled with disable_irq() and still wake the system as
 869 *	long as the irq has wake enabled. If this does not hold,
 870 *	then the underlying irq chip and the related driver need
 871 *	to be investigated.
 872 */
 873int irq_set_irq_wake(unsigned int irq, unsigned int on)
 874{
 875	unsigned long flags;
 876	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 877	int ret = 0;
 878
 879	if (!desc)
 880		return -EINVAL;
 881
 882	/* Don't use NMIs as wake up interrupts please */
 883	if (desc->istate & IRQS_NMI) {
 884		ret = -EINVAL;
 885		goto out_unlock;
 886	}
 887
 888	/* wakeup-capable irqs can be shared between drivers that
 889	 * don't need to have the same sleep mode behaviors.
 890	 */
 891	if (on) {
 892		if (desc->wake_depth++ == 0) {
 893			ret = set_irq_wake_real(irq, on);
 894			if (ret)
 895				desc->wake_depth = 0;
 896			else
 897				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
 898		}
 899	} else {
 900		if (desc->wake_depth == 0) {
 901			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
 902		} else if (--desc->wake_depth == 0) {
 903			ret = set_irq_wake_real(irq, on);
 904			if (ret)
 905				desc->wake_depth = 1;
 906			else
 907				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
 908		}
 909	}
 910
 911out_unlock:
 912	irq_put_desc_busunlock(desc, flags);
 913	return ret;
 914}
 915EXPORT_SYMBOL(irq_set_irq_wake);
 916
 917/*
 918 * Internal function that tells the architecture code whether a
 919 * particular irq has been exclusively allocated or is available
 920 * for driver use.
 921 */
 922int can_request_irq(unsigned int irq, unsigned long irqflags)
 923{
 924	unsigned long flags;
 925	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 926	int canrequest = 0;
 927
 928	if (!desc)
 929		return 0;
 930
 931	if (irq_settings_can_request(desc)) {
 932		if (!desc->action ||
 933		    irqflags & desc->action->flags & IRQF_SHARED)
 934			canrequest = 1;
 935	}
 936	irq_put_desc_unlock(desc, flags);
 937	return canrequest;
 938}
 939
 940int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
 941{
 942	struct irq_chip *chip = desc->irq_data.chip;
 943	int ret, unmask = 0;
 944
 945	if (!chip || !chip->irq_set_type) {
 946		/*
 947		 * IRQF_TRIGGER_* but the PIC does not support multiple
 948		 * flow-types?
 949		 */
 950		pr_debug("No set_type function for IRQ %d (%s)\n",
 951			 irq_desc_get_irq(desc),
 952			 chip ? (chip->name ? : "unknown") : "unknown");
 953		return 0;
 954	}
 955
 956	if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
 957		if (!irqd_irq_masked(&desc->irq_data))
 958			mask_irq(desc);
 959		if (!irqd_irq_disabled(&desc->irq_data))
 960			unmask = 1;
 961	}
 962
 963	/* Mask all flags except trigger mode */
 964	flags &= IRQ_TYPE_SENSE_MASK;
 965	ret = chip->irq_set_type(&desc->irq_data, flags);
 966
 967	switch (ret) {
 968	case IRQ_SET_MASK_OK:
 969	case IRQ_SET_MASK_OK_DONE:
 970		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
 971		irqd_set(&desc->irq_data, flags);
 972		fallthrough;
 973
 974	case IRQ_SET_MASK_OK_NOCOPY:
 975		flags = irqd_get_trigger_type(&desc->irq_data);
 976		irq_settings_set_trigger_mask(desc, flags);
 977		irqd_clear(&desc->irq_data, IRQD_LEVEL);
 978		irq_settings_clr_level(desc);
 979		if (flags & IRQ_TYPE_LEVEL_MASK) {
 980			irq_settings_set_level(desc);
 981			irqd_set(&desc->irq_data, IRQD_LEVEL);
 982		}
 983
 984		ret = 0;
 985		break;
 986	default:
 987		pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
 988		       flags, irq_desc_get_irq(desc), chip->irq_set_type);
 989	}
 990	if (unmask)
 991		unmask_irq(desc);
 992	return ret;
 993}
 994
 995#ifdef CONFIG_HARDIRQS_SW_RESEND
 996int irq_set_parent(int irq, int parent_irq)
 997{
 998	unsigned long flags;
 999	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
1000
1001	if (!desc)
1002		return -EINVAL;
1003
1004	desc->parent_irq = parent_irq;
1005
1006	irq_put_desc_unlock(desc, flags);
1007	return 0;
1008}
1009EXPORT_SYMBOL_GPL(irq_set_parent);
1010#endif
1011
1012/*
1013 * Default primary interrupt handler for threaded interrupts. Is
1014 * assigned as primary handler when request_threaded_irq is called
1015 * with handler == NULL. Useful for oneshot interrupts.
1016 */
1017static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
1018{
1019	return IRQ_WAKE_THREAD;
1020}
1021
1022/*
1023 * Primary handler for nested threaded interrupts. Should never be
1024 * called.
1025 */
1026static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
1027{
1028	WARN(1, "Primary handler called for nested irq %d\n", irq);
1029	return IRQ_NONE;
1030}
1031
1032static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id)
1033{
1034	WARN(1, "Secondary action handler called for irq %d\n", irq);
1035	return IRQ_NONE;
1036}
1037
1038static int irq_wait_for_interrupt(struct irqaction *action)
1039{
1040	for (;;) {
1041		set_current_state(TASK_INTERRUPTIBLE);
1042
1043		if (kthread_should_stop()) {
1044			/* may need to run one last time */
1045			if (test_and_clear_bit(IRQTF_RUNTHREAD,
1046					       &action->thread_flags)) {
1047				__set_current_state(TASK_RUNNING);
1048				return 0;
1049			}
1050			__set_current_state(TASK_RUNNING);
1051			return -1;
1052		}
1053
1054		if (test_and_clear_bit(IRQTF_RUNTHREAD,
1055				       &action->thread_flags)) {
1056			__set_current_state(TASK_RUNNING);
1057			return 0;
1058		}
1059		schedule();
 
1060	}
 
 
1061}
1062
1063/*
1064 * Oneshot interrupts keep the irq line masked until the threaded
1065 * handler finished. unmask if the interrupt has not been disabled and
1066 * is marked MASKED.
1067 */
1068static void irq_finalize_oneshot(struct irq_desc *desc,
1069				 struct irqaction *action)
1070{
1071	if (!(desc->istate & IRQS_ONESHOT) ||
1072	    action->handler == irq_forced_secondary_handler)
1073		return;
1074again:
1075	chip_bus_lock(desc);
1076	raw_spin_lock_irq(&desc->lock);
1077
1078	/*
1079	 * Implausible though it may be we need to protect us against
1080	 * the following scenario:
1081	 *
1082	 * The thread is faster done than the hard interrupt handler
1083	 * on the other CPU. If we unmask the irq line then the
1084	 * interrupt can come in again and masks the line, leaves due
1085	 * to IRQS_INPROGRESS and the irq line is masked forever.
1086	 *
1087	 * This also serializes the state of shared oneshot handlers
1088	 * versus "desc->threads_oneshot |= action->thread_mask;" in
1089	 * irq_wake_thread(). See the comment there which explains the
1090	 * serialization.
1091	 */
1092	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
1093		raw_spin_unlock_irq(&desc->lock);
1094		chip_bus_sync_unlock(desc);
1095		cpu_relax();
1096		goto again;
1097	}
1098
1099	/*
1100	 * Now check again, whether the thread should run. Otherwise
1101	 * we would clear the threads_oneshot bit of this thread which
1102	 * was just set.
1103	 */
1104	if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1105		goto out_unlock;
1106
1107	desc->threads_oneshot &= ~action->thread_mask;
1108
1109	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
1110	    irqd_irq_masked(&desc->irq_data))
1111		unmask_threaded_irq(desc);
1112
1113out_unlock:
1114	raw_spin_unlock_irq(&desc->lock);
1115	chip_bus_sync_unlock(desc);
1116}
1117
1118#ifdef CONFIG_SMP
1119/*
1120 * Check whether we need to change the affinity of the interrupt thread.
1121 */
1122static void
1123irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
1124{
1125	cpumask_var_t mask;
1126	bool valid = true;
1127
1128	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
1129		return;
1130
1131	/*
1132	 * In case we are out of memory we set IRQTF_AFFINITY again and
1133	 * try again next time
1134	 */
1135	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1136		set_bit(IRQTF_AFFINITY, &action->thread_flags);
1137		return;
1138	}
1139
1140	raw_spin_lock_irq(&desc->lock);
1141	/*
1142	 * This code is triggered unconditionally. Check the affinity
1143	 * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
1144	 */
1145	if (cpumask_available(desc->irq_common_data.affinity)) {
1146		const struct cpumask *m;
1147
1148		m = irq_data_get_effective_affinity_mask(&desc->irq_data);
1149		cpumask_copy(mask, m);
1150	} else {
1151		valid = false;
1152	}
1153	raw_spin_unlock_irq(&desc->lock);
1154
1155	if (valid)
1156		set_cpus_allowed_ptr(current, mask);
1157	free_cpumask_var(mask);
1158}
1159#else
1160static inline void
1161irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
1162#endif
1163
1164/*
1165 * Interrupts which are not explicitly requested as threaded
1166 * interrupts rely on the implicit bh/preempt disable of the hard irq
1167 * context. So we need to disable bh here to avoid deadlocks and other
1168 * side effects.
1169 */
1170static irqreturn_t
1171irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
1172{
1173	irqreturn_t ret;
1174
1175	local_bh_disable();
1176	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1177		local_irq_disable();
1178	ret = action->thread_fn(action->irq, action->dev_id);
1179	if (ret == IRQ_HANDLED)
1180		atomic_inc(&desc->threads_handled);
1181
1182	irq_finalize_oneshot(desc, action);
1183	if (!IS_ENABLED(CONFIG_PREEMPT_RT))
1184		local_irq_enable();
1185	local_bh_enable();
1186	return ret;
1187}
1188
1189/*
1190 * Interrupts explicitly requested as threaded interrupts want to be
1191 * preemptible - many of them need to sleep and wait for slow busses to
1192 * complete.
1193 */
1194static irqreturn_t irq_thread_fn(struct irq_desc *desc,
1195		struct irqaction *action)
1196{
1197	irqreturn_t ret;
1198
1199	ret = action->thread_fn(action->irq, action->dev_id);
1200	if (ret == IRQ_HANDLED)
1201		atomic_inc(&desc->threads_handled);
1202
1203	irq_finalize_oneshot(desc, action);
1204	return ret;
1205}
1206
1207static void wake_threads_waitq(struct irq_desc *desc)
1208{
1209	if (atomic_dec_and_test(&desc->threads_active))
1210		wake_up(&desc->wait_for_threads);
1211}
1212
1213static void irq_thread_dtor(struct callback_head *unused)
1214{
1215	struct task_struct *tsk = current;
1216	struct irq_desc *desc;
1217	struct irqaction *action;
1218
1219	if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
1220		return;
1221
1222	action = kthread_data(tsk);
1223
1224	pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
1225	       tsk->comm, tsk->pid, action->irq);
1226
1227
1228	desc = irq_to_desc(action->irq);
1229	/*
1230	 * If IRQTF_RUNTHREAD is set, we need to decrement
1231	 * desc->threads_active and wake possible waiters.
1232	 */
1233	if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
1234		wake_threads_waitq(desc);
1235
1236	/* Prevent a stale desc->threads_oneshot */
1237	irq_finalize_oneshot(desc, action);
1238}
1239
1240static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action)
1241{
1242	struct irqaction *secondary = action->secondary;
1243
1244	if (WARN_ON_ONCE(!secondary))
1245		return;
1246
1247	raw_spin_lock_irq(&desc->lock);
1248	__irq_wake_thread(desc, secondary);
1249	raw_spin_unlock_irq(&desc->lock);
1250}
1251
1252/*
1253 * Interrupt handler thread
1254 */
1255static int irq_thread(void *data)
1256{
1257	struct callback_head on_exit_work;
1258	struct irqaction *action = data;
1259	struct irq_desc *desc = irq_to_desc(action->irq);
1260	irqreturn_t (*handler_fn)(struct irq_desc *desc,
1261			struct irqaction *action);
1262
1263	if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
1264					&action->thread_flags))
1265		handler_fn = irq_forced_thread_fn;
1266	else
1267		handler_fn = irq_thread_fn;
1268
1269	init_task_work(&on_exit_work, irq_thread_dtor);
1270	task_work_add(current, &on_exit_work, TWA_NONE);
1271
1272	irq_thread_check_affinity(desc, action);
1273
1274	while (!irq_wait_for_interrupt(action)) {
1275		irqreturn_t action_ret;
1276
1277		irq_thread_check_affinity(desc, action);
1278
1279		action_ret = handler_fn(desc, action);
 
 
1280		if (action_ret == IRQ_WAKE_THREAD)
1281			irq_wake_secondary(desc, action);
1282
1283		wake_threads_waitq(desc);
1284	}
1285
1286	/*
1287	 * This is the regular exit path. __free_irq() is stopping the
1288	 * thread via kthread_stop() after calling
1289	 * synchronize_hardirq(). So neither IRQTF_RUNTHREAD nor the
1290	 * oneshot mask bit can be set.
 
 
 
1291	 */
1292	task_work_cancel(current, irq_thread_dtor);
1293	return 0;
1294}
1295
1296/**
1297 *	irq_wake_thread - wake the irq thread for the action identified by dev_id
1298 *	@irq:		Interrupt line
1299 *	@dev_id:	Device identity for which the thread should be woken
1300 *
1301 */
1302void irq_wake_thread(unsigned int irq, void *dev_id)
1303{
1304	struct irq_desc *desc = irq_to_desc(irq);
1305	struct irqaction *action;
1306	unsigned long flags;
1307
1308	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1309		return;
1310
1311	raw_spin_lock_irqsave(&desc->lock, flags);
1312	for_each_action_of_desc(desc, action) {
1313		if (action->dev_id == dev_id) {
1314			if (action->thread)
1315				__irq_wake_thread(desc, action);
1316			break;
1317		}
1318	}
1319	raw_spin_unlock_irqrestore(&desc->lock, flags);
1320}
1321EXPORT_SYMBOL_GPL(irq_wake_thread);
1322
1323static int irq_setup_forced_threading(struct irqaction *new)
1324{
1325	if (!force_irqthreads)
1326		return 0;
1327	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
1328		return 0;
1329
1330	/*
1331	 * No further action required for interrupts which are requested as
1332	 * threaded interrupts already
1333	 */
1334	if (new->handler == irq_default_primary_handler)
1335		return 0;
1336
1337	new->flags |= IRQF_ONESHOT;
1338
1339	/*
1340	 * Handle the case where we have a real primary handler and a
1341	 * thread handler. We force thread them as well by creating a
1342	 * secondary action.
1343	 */
1344	if (new->handler && new->thread_fn) {
1345		/* Allocate the secondary action */
1346		new->secondary = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1347		if (!new->secondary)
1348			return -ENOMEM;
1349		new->secondary->handler = irq_forced_secondary_handler;
1350		new->secondary->thread_fn = new->thread_fn;
1351		new->secondary->dev_id = new->dev_id;
1352		new->secondary->irq = new->irq;
1353		new->secondary->name = new->name;
1354	}
1355	/* Deal with the primary handler */
1356	set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
1357	new->thread_fn = new->handler;
1358	new->handler = irq_default_primary_handler;
1359	return 0;
1360}
1361
1362static int irq_request_resources(struct irq_desc *desc)
1363{
1364	struct irq_data *d = &desc->irq_data;
1365	struct irq_chip *c = d->chip;
1366
1367	return c->irq_request_resources ? c->irq_request_resources(d) : 0;
1368}
1369
1370static void irq_release_resources(struct irq_desc *desc)
1371{
1372	struct irq_data *d = &desc->irq_data;
1373	struct irq_chip *c = d->chip;
1374
1375	if (c->irq_release_resources)
1376		c->irq_release_resources(d);
1377}
1378
1379static bool irq_supports_nmi(struct irq_desc *desc)
1380{
1381	struct irq_data *d = irq_desc_get_irq_data(desc);
1382
1383#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1384	/* Only IRQs directly managed by the root irqchip can be set as NMI */
1385	if (d->parent_data)
1386		return false;
1387#endif
1388	/* Don't support NMIs for chips behind a slow bus */
1389	if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1390		return false;
1391
1392	return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1393}
1394
1395static int irq_nmi_setup(struct irq_desc *desc)
1396{
1397	struct irq_data *d = irq_desc_get_irq_data(desc);
1398	struct irq_chip *c = d->chip;
1399
1400	return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1401}
1402
1403static void irq_nmi_teardown(struct irq_desc *desc)
1404{
1405	struct irq_data *d = irq_desc_get_irq_data(desc);
1406	struct irq_chip *c = d->chip;
1407
1408	if (c->irq_nmi_teardown)
1409		c->irq_nmi_teardown(d);
1410}
1411
1412static int
1413setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1414{
1415	struct task_struct *t;
 
 
 
1416
1417	if (!secondary) {
1418		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
1419				   new->name);
1420	} else {
1421		t = kthread_create(irq_thread, new, "irq/%d-s-%s", irq,
1422				   new->name);
 
1423	}
1424
1425	if (IS_ERR(t))
1426		return PTR_ERR(t);
1427
1428	sched_set_fifo(t);
1429
1430	/*
1431	 * We keep the reference to the task struct even if
1432	 * the thread dies to avoid that the interrupt code
1433	 * references an already freed task_struct.
1434	 */
1435	new->thread = get_task_struct(t);
 
1436	/*
1437	 * Tell the thread to set its affinity. This is
1438	 * important for shared interrupt handlers as we do
1439	 * not invoke setup_affinity() for the secondary
1440	 * handlers as everything is already set up. Even for
1441	 * interrupts marked with IRQF_NO_BALANCE this is
1442	 * correct as we want the thread to move to the cpu(s)
1443	 * on which the requesting code placed the interrupt.
1444	 */
1445	set_bit(IRQTF_AFFINITY, &new->thread_flags);
1446	return 0;
1447}
1448
1449/*
1450 * Internal function to register an irqaction - typically used to
1451 * allocate special interrupts that are part of the architecture.
1452 *
1453 * Locking rules:
1454 *
1455 * desc->request_mutex	Provides serialization against a concurrent free_irq()
1456 *   chip_bus_lock	Provides serialization for slow bus operations
1457 *     desc->lock	Provides serialization against hard interrupts
1458 *
1459 * chip_bus_lock and desc->lock are sufficient for all other management and
1460 * interrupt related functions. desc->request_mutex solely serializes
1461 * request/free_irq().
1462 */
1463static int
1464__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1465{
1466	struct irqaction *old, **old_ptr;
1467	unsigned long flags, thread_mask = 0;
1468	int ret, nested, shared = 0;
1469
1470	if (!desc)
1471		return -EINVAL;
1472
1473	if (desc->irq_data.chip == &no_irq_chip)
1474		return -ENOSYS;
1475	if (!try_module_get(desc->owner))
1476		return -ENODEV;
1477
1478	new->irq = irq;
1479
1480	/*
1481	 * If the trigger type is not specified by the caller,
1482	 * then use the default for this interrupt.
1483	 */
1484	if (!(new->flags & IRQF_TRIGGER_MASK))
1485		new->flags |= irqd_get_trigger_type(&desc->irq_data);
1486
1487	/*
1488	 * Check whether the interrupt nests into another interrupt
1489	 * thread.
1490	 */
1491	nested = irq_settings_is_nested_thread(desc);
1492	if (nested) {
1493		if (!new->thread_fn) {
1494			ret = -EINVAL;
1495			goto out_mput;
1496		}
1497		/*
1498		 * Replace the primary handler which was provided from
1499		 * the driver for non nested interrupt handling by the
1500		 * dummy function which warns when called.
1501		 */
1502		new->handler = irq_nested_primary_handler;
1503	} else {
1504		if (irq_settings_can_thread(desc)) {
1505			ret = irq_setup_forced_threading(new);
1506			if (ret)
1507				goto out_mput;
1508		}
1509	}
1510
1511	/*
1512	 * Create a handler thread when a thread function is supplied
1513	 * and the interrupt does not nest into another interrupt
1514	 * thread.
1515	 */
1516	if (new->thread_fn && !nested) {
1517		ret = setup_irq_thread(new, irq, false);
1518		if (ret)
1519			goto out_mput;
1520		if (new->secondary) {
1521			ret = setup_irq_thread(new->secondary, irq, true);
1522			if (ret)
1523				goto out_thread;
1524		}
1525	}
1526
1527	/*
1528	 * Drivers are often written to work w/o knowledge about the
1529	 * underlying irq chip implementation, so a request for a
1530	 * threaded irq without a primary hard irq context handler
1531	 * requires the ONESHOT flag to be set. Some irq chips like
1532	 * MSI based interrupts are per se one shot safe. Check the
1533	 * chip flags, so we can avoid the unmask dance at the end of
1534	 * the threaded handler for those.
1535	 */
1536	if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
1537		new->flags &= ~IRQF_ONESHOT;
1538
1539	/*
1540	 * Protects against a concurrent __free_irq() call which might wait
1541	 * for synchronize_hardirq() to complete without holding the optional
1542	 * chip bus lock and desc->lock. Also protects against handing out
1543	 * a recycled oneshot thread_mask bit while it's still in use by
1544	 * its previous owner.
1545	 */
1546	mutex_lock(&desc->request_mutex);
1547
1548	/*
1549	 * Acquire bus lock as the irq_request_resources() callback below
1550	 * might rely on the serialization or the magic power management
1551	 * functions which are abusing the irq_bus_lock() callback,
1552	 */
1553	chip_bus_lock(desc);
1554
1555	/* First installed action requests resources. */
1556	if (!desc->action) {
1557		ret = irq_request_resources(desc);
1558		if (ret) {
1559			pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1560			       new->name, irq, desc->irq_data.chip->name);
1561			goto out_bus_unlock;
1562		}
1563	}
1564
1565	/*
1566	 * The following block of code has to be executed atomically
1567	 * protected against a concurrent interrupt and any of the other
1568	 * management calls which are not serialized via
1569	 * desc->request_mutex or the optional bus lock.
1570	 */
1571	raw_spin_lock_irqsave(&desc->lock, flags);
1572	old_ptr = &desc->action;
1573	old = *old_ptr;
1574	if (old) {
1575		/*
1576		 * Can't share interrupts unless both agree to and are
1577		 * the same type (level, edge, polarity). So both flag
1578		 * fields must have IRQF_SHARED set and the bits which
1579		 * set the trigger type must match. Also all must
1580		 * agree on ONESHOT.
1581		 * Interrupt lines used for NMIs cannot be shared.
1582		 */
1583		unsigned int oldtype;
1584
1585		if (desc->istate & IRQS_NMI) {
1586			pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1587				new->name, irq, desc->irq_data.chip->name);
1588			ret = -EINVAL;
1589			goto out_unlock;
1590		}
1591
1592		/*
1593		 * If nobody did set the configuration before, inherit
1594		 * the one provided by the requester.
1595		 */
1596		if (irqd_trigger_type_was_set(&desc->irq_data)) {
1597			oldtype = irqd_get_trigger_type(&desc->irq_data);
1598		} else {
1599			oldtype = new->flags & IRQF_TRIGGER_MASK;
1600			irqd_set_trigger_type(&desc->irq_data, oldtype);
1601		}
1602
1603		if (!((old->flags & new->flags) & IRQF_SHARED) ||
1604		    (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
1605		    ((old->flags ^ new->flags) & IRQF_ONESHOT))
1606			goto mismatch;
1607
1608		/* All handlers must agree on per-cpuness */
1609		if ((old->flags & IRQF_PERCPU) !=
1610		    (new->flags & IRQF_PERCPU))
1611			goto mismatch;
1612
1613		/* add new interrupt at end of irq queue */
1614		do {
1615			/*
1616			 * Or all existing action->thread_mask bits,
1617			 * so we can find the next zero bit for this
1618			 * new action.
1619			 */
1620			thread_mask |= old->thread_mask;
1621			old_ptr = &old->next;
1622			old = *old_ptr;
1623		} while (old);
1624		shared = 1;
1625	}
1626
1627	/*
1628	 * Setup the thread mask for this irqaction for ONESHOT. For
1629	 * !ONESHOT irqs the thread mask is 0 so we can avoid a
1630	 * conditional in irq_wake_thread().
1631	 */
1632	if (new->flags & IRQF_ONESHOT) {
1633		/*
1634		 * Unlikely to have 32 resp 64 irqs sharing one line,
1635		 * but who knows.
1636		 */
1637		if (thread_mask == ~0UL) {
1638			ret = -EBUSY;
1639			goto out_unlock;
1640		}
1641		/*
1642		 * The thread_mask for the action is or'ed to
1643		 * desc->thread_active to indicate that the
1644		 * IRQF_ONESHOT thread handler has been woken, but not
1645		 * yet finished. The bit is cleared when a thread
1646		 * completes. When all threads of a shared interrupt
1647		 * line have completed desc->threads_active becomes
1648		 * zero and the interrupt line is unmasked. See
1649		 * handle.c:irq_wake_thread() for further information.
1650		 *
1651		 * If no thread is woken by primary (hard irq context)
1652		 * interrupt handlers, then desc->threads_active is
1653		 * also checked for zero to unmask the irq line in the
1654		 * affected hard irq flow handlers
1655		 * (handle_[fasteoi|level]_irq).
1656		 *
1657		 * The new action gets the first zero bit of
1658		 * thread_mask assigned. See the loop above which or's
1659		 * all existing action->thread_mask bits.
1660		 */
1661		new->thread_mask = 1UL << ffz(thread_mask);
1662
1663	} else if (new->handler == irq_default_primary_handler &&
1664		   !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1665		/*
1666		 * The interrupt was requested with handler = NULL, so
1667		 * we use the default primary handler for it. But it
1668		 * does not have the oneshot flag set. In combination
1669		 * with level interrupts this is deadly, because the
1670		 * default primary handler just wakes the thread, then
1671		 * the irq lines is reenabled, but the device still
1672		 * has the level irq asserted. Rinse and repeat....
1673		 *
1674		 * While this works for edge type interrupts, we play
1675		 * it safe and reject unconditionally because we can't
1676		 * say for sure which type this interrupt really
1677		 * has. The type flags are unreliable as the
1678		 * underlying chip implementation can override them.
1679		 */
1680		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
1681		       new->name, irq);
1682		ret = -EINVAL;
1683		goto out_unlock;
1684	}
1685
1686	if (!shared) {
1687		init_waitqueue_head(&desc->wait_for_threads);
1688
1689		/* Setup the type (level, edge polarity) if configured: */
1690		if (new->flags & IRQF_TRIGGER_MASK) {
1691			ret = __irq_set_trigger(desc,
1692						new->flags & IRQF_TRIGGER_MASK);
1693
1694			if (ret)
1695				goto out_unlock;
1696		}
1697
1698		/*
1699		 * Activate the interrupt. That activation must happen
1700		 * independently of IRQ_NOAUTOEN. request_irq() can fail
1701		 * and the callers are supposed to handle
1702		 * that. enable_irq() of an interrupt requested with
1703		 * IRQ_NOAUTOEN is not supposed to fail. The activation
1704		 * keeps it in shutdown mode, it merily associates
1705		 * resources if necessary and if that's not possible it
1706		 * fails. Interrupts which are in managed shutdown mode
1707		 * will simply ignore that activation request.
1708		 */
1709		ret = irq_activate(desc);
1710		if (ret)
1711			goto out_unlock;
1712
1713		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1714				  IRQS_ONESHOT | IRQS_WAITING);
1715		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1716
1717		if (new->flags & IRQF_PERCPU) {
1718			irqd_set(&desc->irq_data, IRQD_PER_CPU);
1719			irq_settings_set_per_cpu(desc);
1720			if (new->flags & IRQF_NO_DEBUG)
1721				irq_settings_set_no_debug(desc);
1722		}
1723
1724		if (noirqdebug)
1725			irq_settings_set_no_debug(desc);
1726
1727		if (new->flags & IRQF_ONESHOT)
1728			desc->istate |= IRQS_ONESHOT;
1729
1730		/* Exclude IRQ from balancing if requested */
1731		if (new->flags & IRQF_NOBALANCING) {
1732			irq_settings_set_no_balancing(desc);
1733			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1734		}
1735
1736		if (!(new->flags & IRQF_NO_AUTOEN) &&
1737		    irq_settings_can_autoenable(desc)) {
1738			irq_startup(desc, IRQ_RESEND, IRQ_START_COND);
1739		} else {
1740			/*
1741			 * Shared interrupts do not go well with disabling
1742			 * auto enable. The sharing interrupt might request
1743			 * it while it's still disabled and then wait for
1744			 * interrupts forever.
1745			 */
1746			WARN_ON_ONCE(new->flags & IRQF_SHARED);
1747			/* Undo nested disables: */
1748			desc->depth = 1;
1749		}
1750
1751	} else if (new->flags & IRQF_TRIGGER_MASK) {
1752		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1753		unsigned int omsk = irqd_get_trigger_type(&desc->irq_data);
1754
1755		if (nmsk != omsk)
1756			/* hope the handler works with current  trigger mode */
1757			pr_warn("irq %d uses trigger mode %u; requested %u\n",
1758				irq, omsk, nmsk);
1759	}
1760
1761	*old_ptr = new;
1762
1763	irq_pm_install_action(desc, new);
1764
1765	/* Reset broken irq detection when installing new handler */
1766	desc->irq_count = 0;
1767	desc->irqs_unhandled = 0;
1768
1769	/*
1770	 * Check whether we disabled the irq via the spurious handler
1771	 * before. Reenable it and give it another chance.
1772	 */
1773	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1774		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1775		__enable_irq(desc);
1776	}
1777
1778	raw_spin_unlock_irqrestore(&desc->lock, flags);
1779	chip_bus_sync_unlock(desc);
1780	mutex_unlock(&desc->request_mutex);
1781
1782	irq_setup_timings(desc, new);
1783
1784	/*
1785	 * Strictly no need to wake it up, but hung_task complains
1786	 * when no hard interrupt wakes the thread up.
1787	 */
1788	if (new->thread)
1789		wake_up_process(new->thread);
1790	if (new->secondary)
1791		wake_up_process(new->secondary->thread);
1792
1793	register_irq_proc(irq, desc);
1794	new->dir = NULL;
1795	register_handler_proc(irq, new);
1796	return 0;
1797
1798mismatch:
1799	if (!(new->flags & IRQF_PROBE_SHARED)) {
1800		pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1801		       irq, new->flags, new->name, old->flags, old->name);
1802#ifdef CONFIG_DEBUG_SHIRQ
1803		dump_stack();
1804#endif
1805	}
1806	ret = -EBUSY;
1807
1808out_unlock:
1809	raw_spin_unlock_irqrestore(&desc->lock, flags);
1810
1811	if (!desc->action)
1812		irq_release_resources(desc);
1813out_bus_unlock:
1814	chip_bus_sync_unlock(desc);
1815	mutex_unlock(&desc->request_mutex);
1816
1817out_thread:
1818	if (new->thread) {
1819		struct task_struct *t = new->thread;
1820
1821		new->thread = NULL;
1822		kthread_stop(t);
1823		put_task_struct(t);
1824	}
1825	if (new->secondary && new->secondary->thread) {
1826		struct task_struct *t = new->secondary->thread;
1827
1828		new->secondary->thread = NULL;
1829		kthread_stop(t);
1830		put_task_struct(t);
1831	}
1832out_mput:
1833	module_put(desc->owner);
1834	return ret;
1835}
1836
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1837/*
1838 * Internal function to unregister an irqaction - used to free
1839 * regular and special interrupts that are part of the architecture.
1840 */
1841static struct irqaction *__free_irq(struct irq_desc *desc, void *dev_id)
1842{
1843	unsigned irq = desc->irq_data.irq;
1844	struct irqaction *action, **action_ptr;
1845	unsigned long flags;
1846
1847	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1848
 
 
 
1849	mutex_lock(&desc->request_mutex);
1850	chip_bus_lock(desc);
1851	raw_spin_lock_irqsave(&desc->lock, flags);
1852
1853	/*
1854	 * There can be multiple actions per IRQ descriptor, find the right
1855	 * one based on the dev_id:
1856	 */
1857	action_ptr = &desc->action;
1858	for (;;) {
1859		action = *action_ptr;
1860
1861		if (!action) {
1862			WARN(1, "Trying to free already-free IRQ %d\n", irq);
1863			raw_spin_unlock_irqrestore(&desc->lock, flags);
1864			chip_bus_sync_unlock(desc);
1865			mutex_unlock(&desc->request_mutex);
1866			return NULL;
1867		}
1868
1869		if (action->dev_id == dev_id)
1870			break;
1871		action_ptr = &action->next;
1872	}
1873
1874	/* Found it - now remove it from the list of entries: */
1875	*action_ptr = action->next;
1876
1877	irq_pm_remove_action(desc, action);
1878
1879	/* If this was the last handler, shut down the IRQ line: */
1880	if (!desc->action) {
1881		irq_settings_clr_disable_unlazy(desc);
1882		/* Only shutdown. Deactivate after synchronize_hardirq() */
1883		irq_shutdown(desc);
1884	}
1885
1886#ifdef CONFIG_SMP
1887	/* make sure affinity_hint is cleaned up */
1888	if (WARN_ON_ONCE(desc->affinity_hint))
1889		desc->affinity_hint = NULL;
1890#endif
1891
1892	raw_spin_unlock_irqrestore(&desc->lock, flags);
1893	/*
1894	 * Drop bus_lock here so the changes which were done in the chip
1895	 * callbacks above are synced out to the irq chips which hang
1896	 * behind a slow bus (I2C, SPI) before calling synchronize_hardirq().
1897	 *
1898	 * Aside of that the bus_lock can also be taken from the threaded
1899	 * handler in irq_finalize_oneshot() which results in a deadlock
1900	 * because kthread_stop() would wait forever for the thread to
1901	 * complete, which is blocked on the bus lock.
1902	 *
1903	 * The still held desc->request_mutex() protects against a
1904	 * concurrent request_irq() of this irq so the release of resources
1905	 * and timing data is properly serialized.
1906	 */
1907	chip_bus_sync_unlock(desc);
1908
1909	unregister_handler_proc(irq, action);
1910
1911	/*
1912	 * Make sure it's not being used on another CPU and if the chip
1913	 * supports it also make sure that there is no (not yet serviced)
1914	 * interrupt in flight at the hardware level.
1915	 */
1916	__synchronize_hardirq(desc, true);
1917
1918#ifdef CONFIG_DEBUG_SHIRQ
1919	/*
1920	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1921	 * event to happen even now it's being freed, so let's make sure that
1922	 * is so by doing an extra call to the handler ....
1923	 *
1924	 * ( We do this after actually deregistering it, to make sure that a
1925	 *   'real' IRQ doesn't run in parallel with our fake. )
1926	 */
1927	if (action->flags & IRQF_SHARED) {
1928		local_irq_save(flags);
1929		action->handler(irq, dev_id);
1930		local_irq_restore(flags);
1931	}
1932#endif
1933
1934	/*
1935	 * The action has already been removed above, but the thread writes
1936	 * its oneshot mask bit when it completes. Though request_mutex is
1937	 * held across this which prevents __setup_irq() from handing out
1938	 * the same bit to a newly requested action.
1939	 */
1940	if (action->thread) {
1941		kthread_stop(action->thread);
1942		put_task_struct(action->thread);
1943		if (action->secondary && action->secondary->thread) {
1944			kthread_stop(action->secondary->thread);
1945			put_task_struct(action->secondary->thread);
1946		}
1947	}
1948
1949	/* Last action releases resources */
1950	if (!desc->action) {
1951		/*
1952		 * Reacquire bus lock as irq_release_resources() might
1953		 * require it to deallocate resources over the slow bus.
1954		 */
1955		chip_bus_lock(desc);
1956		/*
1957		 * There is no interrupt on the fly anymore. Deactivate it
1958		 * completely.
1959		 */
1960		raw_spin_lock_irqsave(&desc->lock, flags);
1961		irq_domain_deactivate_irq(&desc->irq_data);
1962		raw_spin_unlock_irqrestore(&desc->lock, flags);
1963
1964		irq_release_resources(desc);
1965		chip_bus_sync_unlock(desc);
1966		irq_remove_timings(desc);
1967	}
1968
1969	mutex_unlock(&desc->request_mutex);
1970
1971	irq_chip_pm_put(&desc->irq_data);
1972	module_put(desc->owner);
1973	kfree(action->secondary);
1974	return action;
1975}
1976
1977/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1978 *	free_irq - free an interrupt allocated with request_irq
1979 *	@irq: Interrupt line to free
1980 *	@dev_id: Device identity to free
1981 *
1982 *	Remove an interrupt handler. The handler is removed and if the
1983 *	interrupt line is no longer in use by any driver it is disabled.
1984 *	On a shared IRQ the caller must ensure the interrupt is disabled
1985 *	on the card it drives before calling this function. The function
1986 *	does not return until any executing interrupts for this IRQ
1987 *	have completed.
1988 *
1989 *	This function must not be called from interrupt context.
1990 *
1991 *	Returns the devname argument passed to request_irq.
1992 */
1993const void *free_irq(unsigned int irq, void *dev_id)
1994{
1995	struct irq_desc *desc = irq_to_desc(irq);
1996	struct irqaction *action;
1997	const char *devname;
1998
1999	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2000		return NULL;
2001
2002#ifdef CONFIG_SMP
2003	if (WARN_ON(desc->affinity_notify))
2004		desc->affinity_notify = NULL;
2005#endif
2006
2007	action = __free_irq(desc, dev_id);
2008
2009	if (!action)
2010		return NULL;
2011
2012	devname = action->name;
2013	kfree(action);
2014	return devname;
2015}
2016EXPORT_SYMBOL(free_irq);
2017
2018/* This function must be called with desc->lock held */
2019static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
2020{
2021	const char *devname = NULL;
2022
2023	desc->istate &= ~IRQS_NMI;
2024
2025	if (!WARN_ON(desc->action == NULL)) {
2026		irq_pm_remove_action(desc, desc->action);
2027		devname = desc->action->name;
2028		unregister_handler_proc(irq, desc->action);
2029
2030		kfree(desc->action);
2031		desc->action = NULL;
2032	}
2033
2034	irq_settings_clr_disable_unlazy(desc);
2035	irq_shutdown_and_deactivate(desc);
2036
2037	irq_release_resources(desc);
2038
2039	irq_chip_pm_put(&desc->irq_data);
2040	module_put(desc->owner);
2041
2042	return devname;
2043}
2044
2045const void *free_nmi(unsigned int irq, void *dev_id)
2046{
2047	struct irq_desc *desc = irq_to_desc(irq);
2048	unsigned long flags;
2049	const void *devname;
2050
2051	if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
2052		return NULL;
2053
2054	if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2055		return NULL;
2056
2057	/* NMI still enabled */
2058	if (WARN_ON(desc->depth == 0))
2059		disable_nmi_nosync(irq);
2060
2061	raw_spin_lock_irqsave(&desc->lock, flags);
2062
2063	irq_nmi_teardown(desc);
2064	devname = __cleanup_nmi(irq, desc);
2065
2066	raw_spin_unlock_irqrestore(&desc->lock, flags);
2067
2068	return devname;
2069}
2070
2071/**
2072 *	request_threaded_irq - allocate an interrupt line
2073 *	@irq: Interrupt line to allocate
2074 *	@handler: Function to be called when the IRQ occurs.
2075 *		  Primary handler for threaded interrupts
2076 *		  If NULL and thread_fn != NULL the default
2077 *		  primary handler is installed
2078 *	@thread_fn: Function called from the irq handler thread
2079 *		    If NULL, no irq thread is created
2080 *	@irqflags: Interrupt type flags
2081 *	@devname: An ascii name for the claiming device
2082 *	@dev_id: A cookie passed back to the handler function
2083 *
2084 *	This call allocates interrupt resources and enables the
2085 *	interrupt line and IRQ handling. From the point this
2086 *	call is made your handler function may be invoked. Since
2087 *	your handler function must clear any interrupt the board
2088 *	raises, you must take care both to initialise your hardware
2089 *	and to set up the interrupt handler in the right order.
2090 *
2091 *	If you want to set up a threaded irq handler for your device
2092 *	then you need to supply @handler and @thread_fn. @handler is
2093 *	still called in hard interrupt context and has to check
2094 *	whether the interrupt originates from the device. If yes it
2095 *	needs to disable the interrupt on the device and return
2096 *	IRQ_WAKE_THREAD which will wake up the handler thread and run
2097 *	@thread_fn. This split handler design is necessary to support
2098 *	shared interrupts.
2099 *
2100 *	Dev_id must be globally unique. Normally the address of the
2101 *	device data structure is used as the cookie. Since the handler
2102 *	receives this value it makes sense to use it.
2103 *
2104 *	If your interrupt is shared you must pass a non NULL dev_id
2105 *	as this is required when freeing the interrupt.
2106 *
2107 *	Flags:
2108 *
2109 *	IRQF_SHARED		Interrupt is shared
2110 *	IRQF_TRIGGER_*		Specify active edge(s) or level
2111 *
2112 */
2113int request_threaded_irq(unsigned int irq, irq_handler_t handler,
2114			 irq_handler_t thread_fn, unsigned long irqflags,
2115			 const char *devname, void *dev_id)
2116{
2117	struct irqaction *action;
2118	struct irq_desc *desc;
2119	int retval;
2120
2121	if (irq == IRQ_NOTCONNECTED)
2122		return -ENOTCONN;
2123
2124	/*
2125	 * Sanity-check: shared interrupts must pass in a real dev-ID,
2126	 * otherwise we'll have trouble later trying to figure out
2127	 * which interrupt is which (messes up the interrupt freeing
2128	 * logic etc).
2129	 *
2130	 * Also shared interrupts do not go well with disabling auto enable.
2131	 * The sharing interrupt might request it while it's still disabled
2132	 * and then wait for interrupts forever.
2133	 *
2134	 * Also IRQF_COND_SUSPEND only makes sense for shared interrupts and
2135	 * it cannot be set along with IRQF_NO_SUSPEND.
2136	 */
2137	if (((irqflags & IRQF_SHARED) && !dev_id) ||
2138	    ((irqflags & IRQF_SHARED) && (irqflags & IRQF_NO_AUTOEN)) ||
2139	    (!(irqflags & IRQF_SHARED) && (irqflags & IRQF_COND_SUSPEND)) ||
2140	    ((irqflags & IRQF_NO_SUSPEND) && (irqflags & IRQF_COND_SUSPEND)))
2141		return -EINVAL;
2142
2143	desc = irq_to_desc(irq);
2144	if (!desc)
2145		return -EINVAL;
2146
2147	if (!irq_settings_can_request(desc) ||
2148	    WARN_ON(irq_settings_is_per_cpu_devid(desc)))
2149		return -EINVAL;
2150
2151	if (!handler) {
2152		if (!thread_fn)
2153			return -EINVAL;
2154		handler = irq_default_primary_handler;
2155	}
2156
2157	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2158	if (!action)
2159		return -ENOMEM;
2160
2161	action->handler = handler;
2162	action->thread_fn = thread_fn;
2163	action->flags = irqflags;
2164	action->name = devname;
2165	action->dev_id = dev_id;
2166
2167	retval = irq_chip_pm_get(&desc->irq_data);
2168	if (retval < 0) {
2169		kfree(action);
2170		return retval;
2171	}
2172
2173	retval = __setup_irq(irq, desc, action);
2174
2175	if (retval) {
2176		irq_chip_pm_put(&desc->irq_data);
2177		kfree(action->secondary);
2178		kfree(action);
2179	}
2180
2181#ifdef CONFIG_DEBUG_SHIRQ_FIXME
2182	if (!retval && (irqflags & IRQF_SHARED)) {
2183		/*
2184		 * It's a shared IRQ -- the driver ought to be prepared for it
2185		 * to happen immediately, so let's make sure....
2186		 * We disable the irq to make sure that a 'real' IRQ doesn't
2187		 * run in parallel with our fake.
2188		 */
2189		unsigned long flags;
2190
2191		disable_irq(irq);
2192		local_irq_save(flags);
2193
2194		handler(irq, dev_id);
2195
2196		local_irq_restore(flags);
2197		enable_irq(irq);
2198	}
2199#endif
2200	return retval;
2201}
2202EXPORT_SYMBOL(request_threaded_irq);
2203
2204/**
2205 *	request_any_context_irq - allocate an interrupt line
2206 *	@irq: Interrupt line to allocate
2207 *	@handler: Function to be called when the IRQ occurs.
2208 *		  Threaded handler for threaded interrupts.
2209 *	@flags: Interrupt type flags
2210 *	@name: An ascii name for the claiming device
2211 *	@dev_id: A cookie passed back to the handler function
2212 *
2213 *	This call allocates interrupt resources and enables the
2214 *	interrupt line and IRQ handling. It selects either a
2215 *	hardirq or threaded handling method depending on the
2216 *	context.
2217 *
2218 *	On failure, it returns a negative value. On success,
2219 *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
2220 */
2221int request_any_context_irq(unsigned int irq, irq_handler_t handler,
2222			    unsigned long flags, const char *name, void *dev_id)
2223{
2224	struct irq_desc *desc;
2225	int ret;
2226
2227	if (irq == IRQ_NOTCONNECTED)
2228		return -ENOTCONN;
2229
2230	desc = irq_to_desc(irq);
2231	if (!desc)
2232		return -EINVAL;
2233
2234	if (irq_settings_is_nested_thread(desc)) {
2235		ret = request_threaded_irq(irq, NULL, handler,
2236					   flags, name, dev_id);
2237		return !ret ? IRQC_IS_NESTED : ret;
2238	}
2239
2240	ret = request_irq(irq, handler, flags, name, dev_id);
2241	return !ret ? IRQC_IS_HARDIRQ : ret;
2242}
2243EXPORT_SYMBOL_GPL(request_any_context_irq);
2244
2245/**
2246 *	request_nmi - allocate an interrupt line for NMI delivery
2247 *	@irq: Interrupt line to allocate
2248 *	@handler: Function to be called when the IRQ occurs.
2249 *		  Threaded handler for threaded interrupts.
2250 *	@irqflags: Interrupt type flags
2251 *	@name: An ascii name for the claiming device
2252 *	@dev_id: A cookie passed back to the handler function
2253 *
2254 *	This call allocates interrupt resources and enables the
2255 *	interrupt line and IRQ handling. It sets up the IRQ line
2256 *	to be handled as an NMI.
2257 *
2258 *	An interrupt line delivering NMIs cannot be shared and IRQ handling
2259 *	cannot be threaded.
2260 *
2261 *	Interrupt lines requested for NMI delivering must produce per cpu
2262 *	interrupts and have auto enabling setting disabled.
2263 *
2264 *	Dev_id must be globally unique. Normally the address of the
2265 *	device data structure is used as the cookie. Since the handler
2266 *	receives this value it makes sense to use it.
2267 *
2268 *	If the interrupt line cannot be used to deliver NMIs, function
2269 *	will fail and return a negative value.
2270 */
2271int request_nmi(unsigned int irq, irq_handler_t handler,
2272		unsigned long irqflags, const char *name, void *dev_id)
2273{
2274	struct irqaction *action;
2275	struct irq_desc *desc;
2276	unsigned long flags;
2277	int retval;
2278
2279	if (irq == IRQ_NOTCONNECTED)
2280		return -ENOTCONN;
2281
2282	/* NMI cannot be shared, used for Polling */
2283	if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2284		return -EINVAL;
2285
2286	if (!(irqflags & IRQF_PERCPU))
2287		return -EINVAL;
2288
2289	if (!handler)
2290		return -EINVAL;
2291
2292	desc = irq_to_desc(irq);
2293
2294	if (!desc || (irq_settings_can_autoenable(desc) &&
2295	    !(irqflags & IRQF_NO_AUTOEN)) ||
2296	    !irq_settings_can_request(desc) ||
2297	    WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2298	    !irq_supports_nmi(desc))
2299		return -EINVAL;
2300
2301	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2302	if (!action)
2303		return -ENOMEM;
2304
2305	action->handler = handler;
2306	action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2307	action->name = name;
2308	action->dev_id = dev_id;
2309
2310	retval = irq_chip_pm_get(&desc->irq_data);
2311	if (retval < 0)
2312		goto err_out;
2313
2314	retval = __setup_irq(irq, desc, action);
2315	if (retval)
2316		goto err_irq_setup;
2317
2318	raw_spin_lock_irqsave(&desc->lock, flags);
2319
2320	/* Setup NMI state */
2321	desc->istate |= IRQS_NMI;
2322	retval = irq_nmi_setup(desc);
2323	if (retval) {
2324		__cleanup_nmi(irq, desc);
2325		raw_spin_unlock_irqrestore(&desc->lock, flags);
2326		return -EINVAL;
2327	}
2328
2329	raw_spin_unlock_irqrestore(&desc->lock, flags);
2330
2331	return 0;
2332
2333err_irq_setup:
2334	irq_chip_pm_put(&desc->irq_data);
2335err_out:
2336	kfree(action);
2337
2338	return retval;
2339}
2340
2341void enable_percpu_irq(unsigned int irq, unsigned int type)
2342{
2343	unsigned int cpu = smp_processor_id();
2344	unsigned long flags;
2345	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2346
2347	if (!desc)
2348		return;
2349
2350	/*
2351	 * If the trigger type is not specified by the caller, then
2352	 * use the default for this interrupt.
2353	 */
2354	type &= IRQ_TYPE_SENSE_MASK;
2355	if (type == IRQ_TYPE_NONE)
2356		type = irqd_get_trigger_type(&desc->irq_data);
2357
2358	if (type != IRQ_TYPE_NONE) {
2359		int ret;
2360
2361		ret = __irq_set_trigger(desc, type);
2362
2363		if (ret) {
2364			WARN(1, "failed to set type for IRQ%d\n", irq);
2365			goto out;
2366		}
2367	}
2368
2369	irq_percpu_enable(desc, cpu);
2370out:
2371	irq_put_desc_unlock(desc, flags);
2372}
2373EXPORT_SYMBOL_GPL(enable_percpu_irq);
2374
2375void enable_percpu_nmi(unsigned int irq, unsigned int type)
2376{
2377	enable_percpu_irq(irq, type);
2378}
2379
2380/**
2381 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
2382 * @irq:	Linux irq number to check for
2383 *
2384 * Must be called from a non migratable context. Returns the enable
2385 * state of a per cpu interrupt on the current cpu.
2386 */
2387bool irq_percpu_is_enabled(unsigned int irq)
2388{
2389	unsigned int cpu = smp_processor_id();
2390	struct irq_desc *desc;
2391	unsigned long flags;
2392	bool is_enabled;
2393
2394	desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2395	if (!desc)
2396		return false;
2397
2398	is_enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
2399	irq_put_desc_unlock(desc, flags);
2400
2401	return is_enabled;
2402}
2403EXPORT_SYMBOL_GPL(irq_percpu_is_enabled);
2404
2405void disable_percpu_irq(unsigned int irq)
2406{
2407	unsigned int cpu = smp_processor_id();
2408	unsigned long flags;
2409	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
2410
2411	if (!desc)
2412		return;
2413
2414	irq_percpu_disable(desc, cpu);
2415	irq_put_desc_unlock(desc, flags);
2416}
2417EXPORT_SYMBOL_GPL(disable_percpu_irq);
2418
2419void disable_percpu_nmi(unsigned int irq)
2420{
2421	disable_percpu_irq(irq);
2422}
2423
2424/*
2425 * Internal function to unregister a percpu irqaction.
2426 */
2427static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2428{
2429	struct irq_desc *desc = irq_to_desc(irq);
2430	struct irqaction *action;
2431	unsigned long flags;
2432
2433	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
2434
2435	if (!desc)
2436		return NULL;
2437
2438	raw_spin_lock_irqsave(&desc->lock, flags);
2439
2440	action = desc->action;
2441	if (!action || action->percpu_dev_id != dev_id) {
2442		WARN(1, "Trying to free already-free IRQ %d\n", irq);
2443		goto bad;
2444	}
2445
2446	if (!cpumask_empty(desc->percpu_enabled)) {
2447		WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
2448		     irq, cpumask_first(desc->percpu_enabled));
2449		goto bad;
2450	}
2451
2452	/* Found it - now remove it from the list of entries: */
2453	desc->action = NULL;
2454
2455	desc->istate &= ~IRQS_NMI;
2456
2457	raw_spin_unlock_irqrestore(&desc->lock, flags);
2458
2459	unregister_handler_proc(irq, action);
2460
2461	irq_chip_pm_put(&desc->irq_data);
2462	module_put(desc->owner);
2463	return action;
2464
2465bad:
2466	raw_spin_unlock_irqrestore(&desc->lock, flags);
2467	return NULL;
2468}
2469
2470/**
2471 *	remove_percpu_irq - free a per-cpu interrupt
2472 *	@irq: Interrupt line to free
2473 *	@act: irqaction for the interrupt
2474 *
2475 * Used to remove interrupts statically setup by the early boot process.
2476 */
2477void remove_percpu_irq(unsigned int irq, struct irqaction *act)
2478{
2479	struct irq_desc *desc = irq_to_desc(irq);
2480
2481	if (desc && irq_settings_is_per_cpu_devid(desc))
2482	    __free_percpu_irq(irq, act->percpu_dev_id);
2483}
2484
2485/**
2486 *	free_percpu_irq - free an interrupt allocated with request_percpu_irq
2487 *	@irq: Interrupt line to free
2488 *	@dev_id: Device identity to free
2489 *
2490 *	Remove a percpu interrupt handler. The handler is removed, but
2491 *	the interrupt line is not disabled. This must be done on each
2492 *	CPU before calling this function. The function does not return
2493 *	until any executing interrupts for this IRQ have completed.
2494 *
2495 *	This function must not be called from interrupt context.
2496 */
2497void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2498{
2499	struct irq_desc *desc = irq_to_desc(irq);
2500
2501	if (!desc || !irq_settings_is_per_cpu_devid(desc))
2502		return;
2503
2504	chip_bus_lock(desc);
2505	kfree(__free_percpu_irq(irq, dev_id));
2506	chip_bus_sync_unlock(desc);
2507}
2508EXPORT_SYMBOL_GPL(free_percpu_irq);
2509
2510void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2511{
2512	struct irq_desc *desc = irq_to_desc(irq);
2513
2514	if (!desc || !irq_settings_is_per_cpu_devid(desc))
2515		return;
2516
2517	if (WARN_ON(!(desc->istate & IRQS_NMI)))
2518		return;
2519
2520	kfree(__free_percpu_irq(irq, dev_id));
2521}
2522
2523/**
2524 *	setup_percpu_irq - setup a per-cpu interrupt
2525 *	@irq: Interrupt line to setup
2526 *	@act: irqaction for the interrupt
2527 *
2528 * Used to statically setup per-cpu interrupts in the early boot process.
2529 */
2530int setup_percpu_irq(unsigned int irq, struct irqaction *act)
2531{
2532	struct irq_desc *desc = irq_to_desc(irq);
2533	int retval;
2534
2535	if (!desc || !irq_settings_is_per_cpu_devid(desc))
2536		return -EINVAL;
2537
2538	retval = irq_chip_pm_get(&desc->irq_data);
2539	if (retval < 0)
2540		return retval;
2541
2542	retval = __setup_irq(irq, desc, act);
2543
2544	if (retval)
2545		irq_chip_pm_put(&desc->irq_data);
2546
2547	return retval;
2548}
2549
2550/**
2551 *	__request_percpu_irq - allocate a percpu interrupt line
2552 *	@irq: Interrupt line to allocate
2553 *	@handler: Function to be called when the IRQ occurs.
2554 *	@flags: Interrupt type flags (IRQF_TIMER only)
2555 *	@devname: An ascii name for the claiming device
2556 *	@dev_id: A percpu cookie passed back to the handler function
2557 *
2558 *	This call allocates interrupt resources and enables the
2559 *	interrupt on the local CPU. If the interrupt is supposed to be
2560 *	enabled on other CPUs, it has to be done on each CPU using
2561 *	enable_percpu_irq().
2562 *
2563 *	Dev_id must be globally unique. It is a per-cpu variable, and
2564 *	the handler gets called with the interrupted CPU's instance of
2565 *	that variable.
2566 */
2567int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2568			 unsigned long flags, const char *devname,
2569			 void __percpu *dev_id)
2570{
2571	struct irqaction *action;
2572	struct irq_desc *desc;
2573	int retval;
2574
2575	if (!dev_id)
2576		return -EINVAL;
2577
2578	desc = irq_to_desc(irq);
2579	if (!desc || !irq_settings_can_request(desc) ||
2580	    !irq_settings_is_per_cpu_devid(desc))
2581		return -EINVAL;
2582
2583	if (flags && flags != IRQF_TIMER)
2584		return -EINVAL;
2585
2586	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2587	if (!action)
2588		return -ENOMEM;
2589
2590	action->handler = handler;
2591	action->flags = flags | IRQF_PERCPU | IRQF_NO_SUSPEND;
2592	action->name = devname;
2593	action->percpu_dev_id = dev_id;
2594
2595	retval = irq_chip_pm_get(&desc->irq_data);
2596	if (retval < 0) {
2597		kfree(action);
2598		return retval;
2599	}
2600
2601	retval = __setup_irq(irq, desc, action);
2602
2603	if (retval) {
2604		irq_chip_pm_put(&desc->irq_data);
2605		kfree(action);
2606	}
2607
2608	return retval;
2609}
2610EXPORT_SYMBOL_GPL(__request_percpu_irq);
2611
2612/**
2613 *	request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2614 *	@irq: Interrupt line to allocate
2615 *	@handler: Function to be called when the IRQ occurs.
2616 *	@name: An ascii name for the claiming device
2617 *	@dev_id: A percpu cookie passed back to the handler function
2618 *
2619 *	This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2620 *	have to be setup on each CPU by calling prepare_percpu_nmi() before
2621 *	being enabled on the same CPU by using enable_percpu_nmi().
2622 *
2623 *	Dev_id must be globally unique. It is a per-cpu variable, and
2624 *	the handler gets called with the interrupted CPU's instance of
2625 *	that variable.
2626 *
2627 *	Interrupt lines requested for NMI delivering should have auto enabling
2628 *	setting disabled.
2629 *
2630 *	If the interrupt line cannot be used to deliver NMIs, function
2631 *	will fail returning a negative value.
2632 */
2633int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2634		       const char *name, void __percpu *dev_id)
2635{
2636	struct irqaction *action;
2637	struct irq_desc *desc;
2638	unsigned long flags;
2639	int retval;
2640
2641	if (!handler)
2642		return -EINVAL;
2643
2644	desc = irq_to_desc(irq);
2645
2646	if (!desc || !irq_settings_can_request(desc) ||
2647	    !irq_settings_is_per_cpu_devid(desc) ||
2648	    irq_settings_can_autoenable(desc) ||
2649	    !irq_supports_nmi(desc))
2650		return -EINVAL;
2651
2652	/* The line cannot already be NMI */
2653	if (desc->istate & IRQS_NMI)
2654		return -EINVAL;
2655
2656	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2657	if (!action)
2658		return -ENOMEM;
2659
2660	action->handler = handler;
2661	action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2662		| IRQF_NOBALANCING;
2663	action->name = name;
2664	action->percpu_dev_id = dev_id;
2665
2666	retval = irq_chip_pm_get(&desc->irq_data);
2667	if (retval < 0)
2668		goto err_out;
2669
2670	retval = __setup_irq(irq, desc, action);
2671	if (retval)
2672		goto err_irq_setup;
2673
2674	raw_spin_lock_irqsave(&desc->lock, flags);
2675	desc->istate |= IRQS_NMI;
2676	raw_spin_unlock_irqrestore(&desc->lock, flags);
2677
2678	return 0;
2679
2680err_irq_setup:
2681	irq_chip_pm_put(&desc->irq_data);
2682err_out:
2683	kfree(action);
2684
2685	return retval;
2686}
2687
2688/**
2689 *	prepare_percpu_nmi - performs CPU local setup for NMI delivery
2690 *	@irq: Interrupt line to prepare for NMI delivery
2691 *
2692 *	This call prepares an interrupt line to deliver NMI on the current CPU,
2693 *	before that interrupt line gets enabled with enable_percpu_nmi().
2694 *
2695 *	As a CPU local operation, this should be called from non-preemptible
2696 *	context.
2697 *
2698 *	If the interrupt line cannot be used to deliver NMIs, function
2699 *	will fail returning a negative value.
2700 */
2701int prepare_percpu_nmi(unsigned int irq)
2702{
2703	unsigned long flags;
2704	struct irq_desc *desc;
2705	int ret = 0;
2706
2707	WARN_ON(preemptible());
2708
2709	desc = irq_get_desc_lock(irq, &flags,
2710				 IRQ_GET_DESC_CHECK_PERCPU);
2711	if (!desc)
2712		return -EINVAL;
2713
2714	if (WARN(!(desc->istate & IRQS_NMI),
2715		 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2716		 irq)) {
2717		ret = -EINVAL;
2718		goto out;
2719	}
2720
2721	ret = irq_nmi_setup(desc);
2722	if (ret) {
2723		pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2724		goto out;
2725	}
2726
2727out:
2728	irq_put_desc_unlock(desc, flags);
2729	return ret;
2730}
2731
2732/**
2733 *	teardown_percpu_nmi - undoes NMI setup of IRQ line
2734 *	@irq: Interrupt line from which CPU local NMI configuration should be
2735 *	      removed
2736 *
2737 *	This call undoes the setup done by prepare_percpu_nmi().
2738 *
2739 *	IRQ line should not be enabled for the current CPU.
2740 *
2741 *	As a CPU local operation, this should be called from non-preemptible
2742 *	context.
2743 */
2744void teardown_percpu_nmi(unsigned int irq)
2745{
2746	unsigned long flags;
2747	struct irq_desc *desc;
2748
2749	WARN_ON(preemptible());
2750
2751	desc = irq_get_desc_lock(irq, &flags,
2752				 IRQ_GET_DESC_CHECK_PERCPU);
2753	if (!desc)
2754		return;
2755
2756	if (WARN_ON(!(desc->istate & IRQS_NMI)))
2757		goto out;
2758
2759	irq_nmi_teardown(desc);
2760out:
2761	irq_put_desc_unlock(desc, flags);
2762}
2763
2764int __irq_get_irqchip_state(struct irq_data *data, enum irqchip_irq_state which,
2765			    bool *state)
2766{
2767	struct irq_chip *chip;
2768	int err = -EINVAL;
2769
2770	do {
2771		chip = irq_data_get_irq_chip(data);
2772		if (WARN_ON_ONCE(!chip))
2773			return -ENODEV;
2774		if (chip->irq_get_irqchip_state)
2775			break;
2776#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2777		data = data->parent_data;
2778#else
2779		data = NULL;
2780#endif
2781	} while (data);
2782
2783	if (data)
2784		err = chip->irq_get_irqchip_state(data, which, state);
2785	return err;
2786}
2787
2788/**
2789 *	irq_get_irqchip_state - returns the irqchip state of a interrupt.
2790 *	@irq: Interrupt line that is forwarded to a VM
2791 *	@which: One of IRQCHIP_STATE_* the caller wants to know about
2792 *	@state: a pointer to a boolean where the state is to be stored
2793 *
2794 *	This call snapshots the internal irqchip state of an
2795 *	interrupt, returning into @state the bit corresponding to
2796 *	stage @which
2797 *
2798 *	This function should be called with preemption disabled if the
2799 *	interrupt controller has per-cpu registers.
2800 */
2801int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2802			  bool *state)
2803{
2804	struct irq_desc *desc;
2805	struct irq_data *data;
 
2806	unsigned long flags;
2807	int err = -EINVAL;
2808
2809	desc = irq_get_desc_buslock(irq, &flags, 0);
2810	if (!desc)
2811		return err;
2812
2813	data = irq_desc_get_irq_data(desc);
2814
2815	err = __irq_get_irqchip_state(data, which, state);
 
 
 
 
 
 
 
 
 
 
 
 
2816
2817	irq_put_desc_busunlock(desc, flags);
2818	return err;
2819}
2820EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
2821
2822/**
2823 *	irq_set_irqchip_state - set the state of a forwarded interrupt.
2824 *	@irq: Interrupt line that is forwarded to a VM
2825 *	@which: State to be restored (one of IRQCHIP_STATE_*)
2826 *	@val: Value corresponding to @which
2827 *
2828 *	This call sets the internal irqchip state of an interrupt,
2829 *	depending on the value of @which.
2830 *
2831 *	This function should be called with preemption disabled if the
2832 *	interrupt controller has per-cpu registers.
2833 */
2834int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
2835			  bool val)
2836{
2837	struct irq_desc *desc;
2838	struct irq_data *data;
2839	struct irq_chip *chip;
2840	unsigned long flags;
2841	int err = -EINVAL;
2842
2843	desc = irq_get_desc_buslock(irq, &flags, 0);
2844	if (!desc)
2845		return err;
2846
2847	data = irq_desc_get_irq_data(desc);
2848
2849	do {
2850		chip = irq_data_get_irq_chip(data);
2851		if (WARN_ON_ONCE(!chip)) {
2852			err = -ENODEV;
2853			goto out_unlock;
2854		}
2855		if (chip->irq_set_irqchip_state)
2856			break;
2857#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
2858		data = data->parent_data;
2859#else
2860		data = NULL;
2861#endif
2862	} while (data);
2863
2864	if (data)
2865		err = chip->irq_set_irqchip_state(data, which, val);
2866
2867out_unlock:
2868	irq_put_desc_busunlock(desc, flags);
2869	return err;
2870}
2871EXPORT_SYMBOL_GPL(irq_set_irqchip_state);
2872
2873/**
2874 * irq_has_action - Check whether an interrupt is requested
2875 * @irq:	The linux irq number
2876 *
2877 * Returns: A snapshot of the current state
2878 */
2879bool irq_has_action(unsigned int irq)
2880{
2881	bool res;
2882
2883	rcu_read_lock();
2884	res = irq_desc_has_action(irq_to_desc(irq));
2885	rcu_read_unlock();
2886	return res;
2887}
2888EXPORT_SYMBOL_GPL(irq_has_action);
2889
2890/**
2891 * irq_check_status_bit - Check whether bits in the irq descriptor status are set
2892 * @irq:	The linux irq number
2893 * @bitmask:	The bitmask to evaluate
2894 *
2895 * Returns: True if one of the bits in @bitmask is set
2896 */
2897bool irq_check_status_bit(unsigned int irq, unsigned int bitmask)
2898{
2899	struct irq_desc *desc;
2900	bool res = false;
2901
2902	rcu_read_lock();
2903	desc = irq_to_desc(irq);
2904	if (desc)
2905		res = !!(desc->status_use_accessors & bitmask);
2906	rcu_read_unlock();
2907	return res;
2908}
2909EXPORT_SYMBOL_GPL(irq_check_status_bit);