Linux Audio

Check our new training course

Loading...
v3.5.6
   1/*
   2 * linux/kernel/irq/manage.c
   3 *
   4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   5 * Copyright (C) 2005-2006 Thomas Gleixner
   6 *
   7 * This file contains driver APIs to the irq subsystem.
   8 */
   9
  10#define pr_fmt(fmt) "genirq: " fmt
  11
  12#include <linux/irq.h>
  13#include <linux/kthread.h>
  14#include <linux/module.h>
  15#include <linux/random.h>
  16#include <linux/interrupt.h>
  17#include <linux/slab.h>
  18#include <linux/sched.h>
  19#include <linux/task_work.h>
  20
  21#include "internals.h"
  22
  23#ifdef CONFIG_IRQ_FORCED_THREADING
  24__read_mostly bool force_irqthreads;
  25
  26static int __init setup_forced_irqthreads(char *arg)
  27{
  28	force_irqthreads = true;
  29	return 0;
  30}
  31early_param("threadirqs", setup_forced_irqthreads);
  32#endif
  33
  34/**
  35 *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  36 *	@irq: interrupt number to wait for
  37 *
  38 *	This function waits for any pending IRQ handlers for this interrupt
  39 *	to complete before returning. If you use this function while
  40 *	holding a resource the IRQ handler may need you will deadlock.
  41 *
  42 *	This function may be called - with care - from IRQ context.
  43 */
  44void synchronize_irq(unsigned int irq)
  45{
  46	struct irq_desc *desc = irq_to_desc(irq);
  47	bool inprogress;
  48
  49	if (!desc)
  50		return;
  51
  52	do {
  53		unsigned long flags;
  54
  55		/*
  56		 * Wait until we're out of the critical section.  This might
  57		 * give the wrong answer due to the lack of memory barriers.
  58		 */
  59		while (irqd_irq_inprogress(&desc->irq_data))
  60			cpu_relax();
  61
  62		/* Ok, that indicated we're done: double-check carefully. */
  63		raw_spin_lock_irqsave(&desc->lock, flags);
  64		inprogress = irqd_irq_inprogress(&desc->irq_data);
  65		raw_spin_unlock_irqrestore(&desc->lock, flags);
  66
  67		/* Oops, that failed? */
  68	} while (inprogress);
  69
  70	/*
  71	 * We made sure that no hardirq handler is running. Now verify
  72	 * that no threaded handlers are active.
  73	 */
  74	wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
  75}
  76EXPORT_SYMBOL(synchronize_irq);
  77
  78#ifdef CONFIG_SMP
  79cpumask_var_t irq_default_affinity;
  80
  81/**
  82 *	irq_can_set_affinity - Check if the affinity of a given irq can be set
  83 *	@irq:		Interrupt to check
  84 *
  85 */
  86int irq_can_set_affinity(unsigned int irq)
  87{
  88	struct irq_desc *desc = irq_to_desc(irq);
  89
  90	if (!desc || !irqd_can_balance(&desc->irq_data) ||
  91	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
  92		return 0;
  93
  94	return 1;
  95}
  96
  97/**
  98 *	irq_set_thread_affinity - Notify irq threads to adjust affinity
  99 *	@desc:		irq descriptor which has affitnity changed
 100 *
 101 *	We just set IRQTF_AFFINITY and delegate the affinity setting
 102 *	to the interrupt thread itself. We can not call
 103 *	set_cpus_allowed_ptr() here as we hold desc->lock and this
 104 *	code can be called from hard interrupt context.
 105 */
 106void irq_set_thread_affinity(struct irq_desc *desc)
 107{
 108	struct irqaction *action = desc->action;
 109
 110	while (action) {
 111		if (action->thread)
 112			set_bit(IRQTF_AFFINITY, &action->thread_flags);
 113		action = action->next;
 114	}
 115}
 116
 117#ifdef CONFIG_GENERIC_PENDING_IRQ
 118static inline bool irq_can_move_pcntxt(struct irq_data *data)
 119{
 120	return irqd_can_move_in_process_context(data);
 121}
 122static inline bool irq_move_pending(struct irq_data *data)
 123{
 124	return irqd_is_setaffinity_pending(data);
 125}
 126static inline void
 127irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
 128{
 129	cpumask_copy(desc->pending_mask, mask);
 130}
 131static inline void
 132irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
 133{
 134	cpumask_copy(mask, desc->pending_mask);
 135}
 136#else
 137static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
 138static inline bool irq_move_pending(struct irq_data *data) { return false; }
 139static inline void
 140irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
 141static inline void
 142irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
 143#endif
 144
 145int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
 146			bool force)
 147{
 148	struct irq_desc *desc = irq_data_to_desc(data);
 149	struct irq_chip *chip = irq_data_get_irq_chip(data);
 150	int ret;
 151
 152	ret = chip->irq_set_affinity(data, mask, false);
 153	switch (ret) {
 154	case IRQ_SET_MASK_OK:
 155		cpumask_copy(data->affinity, mask);
 156	case IRQ_SET_MASK_OK_NOCOPY:
 157		irq_set_thread_affinity(desc);
 158		ret = 0;
 159	}
 160
 161	return ret;
 162}
 163
 164int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
 165{
 166	struct irq_chip *chip = irq_data_get_irq_chip(data);
 167	struct irq_desc *desc = irq_data_to_desc(data);
 168	int ret = 0;
 169
 170	if (!chip || !chip->irq_set_affinity)
 171		return -EINVAL;
 172
 173	if (irq_can_move_pcntxt(data)) {
 174		ret = irq_do_set_affinity(data, mask, false);
 
 
 
 
 
 
 
 175	} else {
 176		irqd_set_move_pending(data);
 177		irq_copy_pending(desc, mask);
 178	}
 179
 180	if (desc->affinity_notify) {
 181		kref_get(&desc->affinity_notify->kref);
 182		schedule_work(&desc->affinity_notify->work);
 183	}
 184	irqd_set(data, IRQD_AFFINITY_SET);
 185
 186	return ret;
 187}
 188
 189/**
 190 *	irq_set_affinity - Set the irq affinity of a given irq
 191 *	@irq:		Interrupt to set affinity
 192 *	@mask:		cpumask
 193 *
 194 */
 195int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
 196{
 197	struct irq_desc *desc = irq_to_desc(irq);
 198	unsigned long flags;
 199	int ret;
 200
 201	if (!desc)
 202		return -EINVAL;
 203
 204	raw_spin_lock_irqsave(&desc->lock, flags);
 205	ret =  __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
 206	raw_spin_unlock_irqrestore(&desc->lock, flags);
 207	return ret;
 208}
 209
 210int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
 211{
 212	unsigned long flags;
 213	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 214
 215	if (!desc)
 216		return -EINVAL;
 217	desc->affinity_hint = m;
 218	irq_put_desc_unlock(desc, flags);
 219	return 0;
 220}
 221EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
 222
 223static void irq_affinity_notify(struct work_struct *work)
 224{
 225	struct irq_affinity_notify *notify =
 226		container_of(work, struct irq_affinity_notify, work);
 227	struct irq_desc *desc = irq_to_desc(notify->irq);
 228	cpumask_var_t cpumask;
 229	unsigned long flags;
 230
 231	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
 232		goto out;
 233
 234	raw_spin_lock_irqsave(&desc->lock, flags);
 235	if (irq_move_pending(&desc->irq_data))
 236		irq_get_pending(cpumask, desc);
 237	else
 238		cpumask_copy(cpumask, desc->irq_data.affinity);
 239	raw_spin_unlock_irqrestore(&desc->lock, flags);
 240
 241	notify->notify(notify, cpumask);
 242
 243	free_cpumask_var(cpumask);
 244out:
 245	kref_put(&notify->kref, notify->release);
 246}
 247
 248/**
 249 *	irq_set_affinity_notifier - control notification of IRQ affinity changes
 250 *	@irq:		Interrupt for which to enable/disable notification
 251 *	@notify:	Context for notification, or %NULL to disable
 252 *			notification.  Function pointers must be initialised;
 253 *			the other fields will be initialised by this function.
 254 *
 255 *	Must be called in process context.  Notification may only be enabled
 256 *	after the IRQ is allocated and must be disabled before the IRQ is
 257 *	freed using free_irq().
 258 */
 259int
 260irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
 261{
 262	struct irq_desc *desc = irq_to_desc(irq);
 263	struct irq_affinity_notify *old_notify;
 264	unsigned long flags;
 265
 266	/* The release function is promised process context */
 267	might_sleep();
 268
 269	if (!desc)
 270		return -EINVAL;
 271
 272	/* Complete initialisation of *notify */
 273	if (notify) {
 274		notify->irq = irq;
 275		kref_init(&notify->kref);
 276		INIT_WORK(&notify->work, irq_affinity_notify);
 277	}
 278
 279	raw_spin_lock_irqsave(&desc->lock, flags);
 280	old_notify = desc->affinity_notify;
 281	desc->affinity_notify = notify;
 282	raw_spin_unlock_irqrestore(&desc->lock, flags);
 283
 284	if (old_notify)
 285		kref_put(&old_notify->kref, old_notify->release);
 286
 287	return 0;
 288}
 289EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 290
 291#ifndef CONFIG_AUTO_IRQ_AFFINITY
 292/*
 293 * Generic version of the affinity autoselector.
 294 */
 295static int
 296setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
 297{
 
 298	struct cpumask *set = irq_default_affinity;
 299	int node = desc->irq_data.node;
 300
 301	/* Excludes PER_CPU and NO_BALANCE interrupts */
 302	if (!irq_can_set_affinity(irq))
 303		return 0;
 304
 305	/*
 306	 * Preserve an userspace affinity setup, but make sure that
 307	 * one of the targets is online.
 308	 */
 309	if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
 310		if (cpumask_intersects(desc->irq_data.affinity,
 311				       cpu_online_mask))
 312			set = desc->irq_data.affinity;
 313		else
 314			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
 315	}
 316
 317	cpumask_and(mask, cpu_online_mask, set);
 318	if (node != NUMA_NO_NODE) {
 319		const struct cpumask *nodemask = cpumask_of_node(node);
 320
 321		/* make sure at least one of the cpus in nodemask is online */
 322		if (cpumask_intersects(mask, nodemask))
 323			cpumask_and(mask, mask, nodemask);
 324	}
 325	irq_do_set_affinity(&desc->irq_data, mask, false);
 326	return 0;
 327}
 328#else
 329static inline int
 330setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
 331{
 332	return irq_select_affinity(irq);
 333}
 334#endif
 335
 336/*
 337 * Called when affinity is set via /proc/irq
 338 */
 339int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
 340{
 341	struct irq_desc *desc = irq_to_desc(irq);
 342	unsigned long flags;
 343	int ret;
 344
 345	raw_spin_lock_irqsave(&desc->lock, flags);
 346	ret = setup_affinity(irq, desc, mask);
 347	raw_spin_unlock_irqrestore(&desc->lock, flags);
 348	return ret;
 349}
 350
 351#else
 352static inline int
 353setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
 354{
 355	return 0;
 356}
 357#endif
 358
 359void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
 360{
 361	if (suspend) {
 362		if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
 363			return;
 364		desc->istate |= IRQS_SUSPENDED;
 365	}
 366
 367	if (!desc->depth++)
 368		irq_disable(desc);
 369}
 370
 371static int __disable_irq_nosync(unsigned int irq)
 372{
 373	unsigned long flags;
 374	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 375
 376	if (!desc)
 377		return -EINVAL;
 378	__disable_irq(desc, irq, false);
 379	irq_put_desc_busunlock(desc, flags);
 380	return 0;
 381}
 382
 383/**
 384 *	disable_irq_nosync - disable an irq without waiting
 385 *	@irq: Interrupt to disable
 386 *
 387 *	Disable the selected interrupt line.  Disables and Enables are
 388 *	nested.
 389 *	Unlike disable_irq(), this function does not ensure existing
 390 *	instances of the IRQ handler have completed before returning.
 391 *
 392 *	This function may be called from IRQ context.
 393 */
 394void disable_irq_nosync(unsigned int irq)
 395{
 396	__disable_irq_nosync(irq);
 397}
 398EXPORT_SYMBOL(disable_irq_nosync);
 399
 400/**
 401 *	disable_irq - disable an irq and wait for completion
 402 *	@irq: Interrupt to disable
 403 *
 404 *	Disable the selected interrupt line.  Enables and Disables are
 405 *	nested.
 406 *	This function waits for any pending IRQ handlers for this interrupt
 407 *	to complete before returning. If you use this function while
 408 *	holding a resource the IRQ handler may need you will deadlock.
 409 *
 410 *	This function may be called - with care - from IRQ context.
 411 */
 412void disable_irq(unsigned int irq)
 413{
 414	if (!__disable_irq_nosync(irq))
 415		synchronize_irq(irq);
 416}
 417EXPORT_SYMBOL(disable_irq);
 418
 419void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
 420{
 421	if (resume) {
 422		if (!(desc->istate & IRQS_SUSPENDED)) {
 423			if (!desc->action)
 424				return;
 425			if (!(desc->action->flags & IRQF_FORCE_RESUME))
 426				return;
 427			/* Pretend that it got disabled ! */
 428			desc->depth++;
 429		}
 430		desc->istate &= ~IRQS_SUSPENDED;
 431	}
 432
 433	switch (desc->depth) {
 434	case 0:
 435 err_out:
 436		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
 437		break;
 438	case 1: {
 439		if (desc->istate & IRQS_SUSPENDED)
 440			goto err_out;
 441		/* Prevent probing on this irq: */
 442		irq_settings_set_noprobe(desc);
 443		irq_enable(desc);
 444		check_irq_resend(desc, irq);
 445		/* fall-through */
 446	}
 447	default:
 448		desc->depth--;
 449	}
 450}
 451
 452/**
 453 *	enable_irq - enable handling of an irq
 454 *	@irq: Interrupt to enable
 455 *
 456 *	Undoes the effect of one call to disable_irq().  If this
 457 *	matches the last disable, processing of interrupts on this
 458 *	IRQ line is re-enabled.
 459 *
 460 *	This function may be called from IRQ context only when
 461 *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
 462 */
 463void enable_irq(unsigned int irq)
 464{
 465	unsigned long flags;
 466	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 467
 468	if (!desc)
 469		return;
 470	if (WARN(!desc->irq_data.chip,
 471		 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
 472		goto out;
 473
 474	__enable_irq(desc, irq, false);
 475out:
 476	irq_put_desc_busunlock(desc, flags);
 477}
 478EXPORT_SYMBOL(enable_irq);
 479
 480static int set_irq_wake_real(unsigned int irq, unsigned int on)
 481{
 482	struct irq_desc *desc = irq_to_desc(irq);
 483	int ret = -ENXIO;
 484
 485	if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
 486		return 0;
 487
 488	if (desc->irq_data.chip->irq_set_wake)
 489		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
 490
 491	return ret;
 492}
 493
 494/**
 495 *	irq_set_irq_wake - control irq power management wakeup
 496 *	@irq:	interrupt to control
 497 *	@on:	enable/disable power management wakeup
 498 *
 499 *	Enable/disable power management wakeup mode, which is
 500 *	disabled by default.  Enables and disables must match,
 501 *	just as they match for non-wakeup mode support.
 502 *
 503 *	Wakeup mode lets this IRQ wake the system from sleep
 504 *	states like "suspend to RAM".
 505 */
 506int irq_set_irq_wake(unsigned int irq, unsigned int on)
 507{
 508	unsigned long flags;
 509	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
 510	int ret = 0;
 511
 512	if (!desc)
 513		return -EINVAL;
 514
 515	/* wakeup-capable irqs can be shared between drivers that
 516	 * don't need to have the same sleep mode behaviors.
 517	 */
 518	if (on) {
 519		if (desc->wake_depth++ == 0) {
 520			ret = set_irq_wake_real(irq, on);
 521			if (ret)
 522				desc->wake_depth = 0;
 523			else
 524				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
 525		}
 526	} else {
 527		if (desc->wake_depth == 0) {
 528			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
 529		} else if (--desc->wake_depth == 0) {
 530			ret = set_irq_wake_real(irq, on);
 531			if (ret)
 532				desc->wake_depth = 1;
 533			else
 534				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
 535		}
 536	}
 537	irq_put_desc_busunlock(desc, flags);
 538	return ret;
 539}
 540EXPORT_SYMBOL(irq_set_irq_wake);
 541
 542/*
 543 * Internal function that tells the architecture code whether a
 544 * particular irq has been exclusively allocated or is available
 545 * for driver use.
 546 */
 547int can_request_irq(unsigned int irq, unsigned long irqflags)
 548{
 549	unsigned long flags;
 550	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
 551	int canrequest = 0;
 552
 553	if (!desc)
 554		return 0;
 555
 556	if (irq_settings_can_request(desc)) {
 557		if (desc->action)
 558			if (irqflags & desc->action->flags & IRQF_SHARED)
 559				canrequest =1;
 560	}
 561	irq_put_desc_unlock(desc, flags);
 562	return canrequest;
 563}
 564
 565int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
 566		      unsigned long flags)
 567{
 568	struct irq_chip *chip = desc->irq_data.chip;
 569	int ret, unmask = 0;
 570
 571	if (!chip || !chip->irq_set_type) {
 572		/*
 573		 * IRQF_TRIGGER_* but the PIC does not support multiple
 574		 * flow-types?
 575		 */
 576		pr_debug("No set_type function for IRQ %d (%s)\n", irq,
 577			 chip ? (chip->name ? : "unknown") : "unknown");
 578		return 0;
 579	}
 580
 581	flags &= IRQ_TYPE_SENSE_MASK;
 582
 583	if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
 584		if (!irqd_irq_masked(&desc->irq_data))
 585			mask_irq(desc);
 586		if (!irqd_irq_disabled(&desc->irq_data))
 587			unmask = 1;
 588	}
 589
 590	/* caller masked out all except trigger mode flags */
 591	ret = chip->irq_set_type(&desc->irq_data, flags);
 592
 593	switch (ret) {
 594	case IRQ_SET_MASK_OK:
 595		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
 596		irqd_set(&desc->irq_data, flags);
 597
 598	case IRQ_SET_MASK_OK_NOCOPY:
 599		flags = irqd_get_trigger_type(&desc->irq_data);
 600		irq_settings_set_trigger_mask(desc, flags);
 601		irqd_clear(&desc->irq_data, IRQD_LEVEL);
 602		irq_settings_clr_level(desc);
 603		if (flags & IRQ_TYPE_LEVEL_MASK) {
 604			irq_settings_set_level(desc);
 605			irqd_set(&desc->irq_data, IRQD_LEVEL);
 606		}
 607
 608		ret = 0;
 609		break;
 610	default:
 611		pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
 612		       flags, irq, chip->irq_set_type);
 613	}
 614	if (unmask)
 615		unmask_irq(desc);
 616	return ret;
 617}
 618
 619/*
 620 * Default primary interrupt handler for threaded interrupts. Is
 621 * assigned as primary handler when request_threaded_irq is called
 622 * with handler == NULL. Useful for oneshot interrupts.
 623 */
 624static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
 625{
 626	return IRQ_WAKE_THREAD;
 627}
 628
 629/*
 630 * Primary handler for nested threaded interrupts. Should never be
 631 * called.
 632 */
 633static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
 634{
 635	WARN(1, "Primary handler called for nested irq %d\n", irq);
 636	return IRQ_NONE;
 637}
 638
 639static int irq_wait_for_interrupt(struct irqaction *action)
 640{
 641	set_current_state(TASK_INTERRUPTIBLE);
 642
 643	while (!kthread_should_stop()) {
 
 644
 645		if (test_and_clear_bit(IRQTF_RUNTHREAD,
 646				       &action->thread_flags)) {
 647			__set_current_state(TASK_RUNNING);
 648			return 0;
 649		}
 650		schedule();
 651		set_current_state(TASK_INTERRUPTIBLE);
 652	}
 653	__set_current_state(TASK_RUNNING);
 654	return -1;
 655}
 656
 657/*
 658 * Oneshot interrupts keep the irq line masked until the threaded
 659 * handler finished. unmask if the interrupt has not been disabled and
 660 * is marked MASKED.
 661 */
 662static void irq_finalize_oneshot(struct irq_desc *desc,
 663				 struct irqaction *action)
 664{
 665	if (!(desc->istate & IRQS_ONESHOT))
 666		return;
 667again:
 668	chip_bus_lock(desc);
 669	raw_spin_lock_irq(&desc->lock);
 670
 671	/*
 672	 * Implausible though it may be we need to protect us against
 673	 * the following scenario:
 674	 *
 675	 * The thread is faster done than the hard interrupt handler
 676	 * on the other CPU. If we unmask the irq line then the
 677	 * interrupt can come in again and masks the line, leaves due
 678	 * to IRQS_INPROGRESS and the irq line is masked forever.
 679	 *
 680	 * This also serializes the state of shared oneshot handlers
 681	 * versus "desc->threads_onehsot |= action->thread_mask;" in
 682	 * irq_wake_thread(). See the comment there which explains the
 683	 * serialization.
 684	 */
 685	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
 686		raw_spin_unlock_irq(&desc->lock);
 687		chip_bus_sync_unlock(desc);
 688		cpu_relax();
 689		goto again;
 690	}
 691
 692	/*
 693	 * Now check again, whether the thread should run. Otherwise
 694	 * we would clear the threads_oneshot bit of this thread which
 695	 * was just set.
 696	 */
 697	if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
 698		goto out_unlock;
 699
 700	desc->threads_oneshot &= ~action->thread_mask;
 701
 702	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
 703	    irqd_irq_masked(&desc->irq_data))
 704		unmask_irq(desc);
 705
 706out_unlock:
 707	raw_spin_unlock_irq(&desc->lock);
 708	chip_bus_sync_unlock(desc);
 709}
 710
 711#ifdef CONFIG_SMP
 712/*
 713 * Check whether we need to chasnge the affinity of the interrupt thread.
 714 */
 715static void
 716irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
 717{
 718	cpumask_var_t mask;
 719
 720	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
 721		return;
 722
 723	/*
 724	 * In case we are out of memory we set IRQTF_AFFINITY again and
 725	 * try again next time
 726	 */
 727	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
 728		set_bit(IRQTF_AFFINITY, &action->thread_flags);
 729		return;
 730	}
 731
 732	raw_spin_lock_irq(&desc->lock);
 733	cpumask_copy(mask, desc->irq_data.affinity);
 734	raw_spin_unlock_irq(&desc->lock);
 735
 736	set_cpus_allowed_ptr(current, mask);
 737	free_cpumask_var(mask);
 738}
 739#else
 740static inline void
 741irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
 742#endif
 743
 744/*
 745 * Interrupts which are not explicitely requested as threaded
 746 * interrupts rely on the implicit bh/preempt disable of the hard irq
 747 * context. So we need to disable bh here to avoid deadlocks and other
 748 * side effects.
 749 */
 750static irqreturn_t
 751irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
 752{
 753	irqreturn_t ret;
 754
 755	local_bh_disable();
 756	ret = action->thread_fn(action->irq, action->dev_id);
 757	irq_finalize_oneshot(desc, action);
 758	local_bh_enable();
 759	return ret;
 760}
 761
 762/*
 763 * Interrupts explicitely requested as threaded interupts want to be
 764 * preemtible - many of them need to sleep and wait for slow busses to
 765 * complete.
 766 */
 767static irqreturn_t irq_thread_fn(struct irq_desc *desc,
 768		struct irqaction *action)
 769{
 770	irqreturn_t ret;
 771
 772	ret = action->thread_fn(action->irq, action->dev_id);
 773	irq_finalize_oneshot(desc, action);
 774	return ret;
 775}
 776
 777static void wake_threads_waitq(struct irq_desc *desc)
 778{
 779	if (atomic_dec_and_test(&desc->threads_active) &&
 780	    waitqueue_active(&desc->wait_for_threads))
 781		wake_up(&desc->wait_for_threads);
 782}
 783
 784static void irq_thread_dtor(struct task_work *unused)
 785{
 786	struct task_struct *tsk = current;
 787	struct irq_desc *desc;
 788	struct irqaction *action;
 789
 790	if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
 791		return;
 792
 793	action = kthread_data(tsk);
 794
 795	pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
 796	       tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
 797
 798
 799	desc = irq_to_desc(action->irq);
 800	/*
 801	 * If IRQTF_RUNTHREAD is set, we need to decrement
 802	 * desc->threads_active and wake possible waiters.
 803	 */
 804	if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
 805		wake_threads_waitq(desc);
 806
 807	/* Prevent a stale desc->threads_oneshot */
 808	irq_finalize_oneshot(desc, action);
 809}
 810
 811/*
 812 * Interrupt handler thread
 813 */
 814static int irq_thread(void *data)
 815{
 816	struct task_work on_exit_work;
 817	static const struct sched_param param = {
 818		.sched_priority = MAX_USER_RT_PRIO/2,
 819	};
 820	struct irqaction *action = data;
 821	struct irq_desc *desc = irq_to_desc(action->irq);
 822	irqreturn_t (*handler_fn)(struct irq_desc *desc,
 823			struct irqaction *action);
 
 824
 825	if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
 826					&action->thread_flags))
 827		handler_fn = irq_forced_thread_fn;
 828	else
 829		handler_fn = irq_thread_fn;
 830
 831	sched_setscheduler(current, SCHED_FIFO, &param);
 832
 833	init_task_work(&on_exit_work, irq_thread_dtor, NULL);
 834	task_work_add(current, &on_exit_work, false);
 835
 836	while (!irq_wait_for_interrupt(action)) {
 837		irqreturn_t action_ret;
 838
 839		irq_thread_check_affinity(desc, action);
 840
 841		action_ret = handler_fn(desc, action);
 842		if (!noirqdebug)
 843			note_interrupt(action->irq, desc, action_ret);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 844
 845		wake_threads_waitq(desc);
 
 846	}
 847
 
 
 
 848	/*
 849	 * This is the regular exit path. __free_irq() is stopping the
 850	 * thread via kthread_stop() after calling
 851	 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
 852	 * oneshot mask bit can be set. We cannot verify that as we
 853	 * cannot touch the oneshot mask at this point anymore as
 854	 * __setup_irq() might have given out currents thread_mask
 855	 * again.
 856	 */
 857	task_work_cancel(current, irq_thread_dtor);
 858	return 0;
 859}
 860
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 861static void irq_setup_forced_threading(struct irqaction *new)
 862{
 863	if (!force_irqthreads)
 864		return;
 865	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
 866		return;
 867
 868	new->flags |= IRQF_ONESHOT;
 869
 870	if (!new->thread_fn) {
 871		set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
 872		new->thread_fn = new->handler;
 873		new->handler = irq_default_primary_handler;
 874	}
 875}
 876
 877/*
 878 * Internal function to register an irqaction - typically used to
 879 * allocate special interrupts that are part of the architecture.
 880 */
 881static int
 882__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 883{
 884	struct irqaction *old, **old_ptr;
 
 885	unsigned long flags, thread_mask = 0;
 886	int ret, nested, shared = 0;
 887	cpumask_var_t mask;
 888
 889	if (!desc)
 890		return -EINVAL;
 891
 892	if (desc->irq_data.chip == &no_irq_chip)
 893		return -ENOSYS;
 894	if (!try_module_get(desc->owner))
 895		return -ENODEV;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 896
 897	/*
 898	 * Check whether the interrupt nests into another interrupt
 899	 * thread.
 900	 */
 901	nested = irq_settings_is_nested_thread(desc);
 902	if (nested) {
 903		if (!new->thread_fn) {
 904			ret = -EINVAL;
 905			goto out_mput;
 906		}
 907		/*
 908		 * Replace the primary handler which was provided from
 909		 * the driver for non nested interrupt handling by the
 910		 * dummy function which warns when called.
 911		 */
 912		new->handler = irq_nested_primary_handler;
 913	} else {
 914		if (irq_settings_can_thread(desc))
 915			irq_setup_forced_threading(new);
 916	}
 917
 918	/*
 919	 * Create a handler thread when a thread function is supplied
 920	 * and the interrupt does not nest into another interrupt
 921	 * thread.
 922	 */
 923	if (new->thread_fn && !nested) {
 924		struct task_struct *t;
 925
 926		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
 927				   new->name);
 928		if (IS_ERR(t)) {
 929			ret = PTR_ERR(t);
 930			goto out_mput;
 931		}
 932		/*
 933		 * We keep the reference to the task struct even if
 934		 * the thread dies to avoid that the interrupt code
 935		 * references an already freed task_struct.
 936		 */
 937		get_task_struct(t);
 938		new->thread = t;
 939	}
 940
 941	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
 942		ret = -ENOMEM;
 943		goto out_thread;
 944	}
 945
 946	/*
 947	 * The following block of code has to be executed atomically
 948	 */
 949	raw_spin_lock_irqsave(&desc->lock, flags);
 950	old_ptr = &desc->action;
 951	old = *old_ptr;
 952	if (old) {
 953		/*
 954		 * Can't share interrupts unless both agree to and are
 955		 * the same type (level, edge, polarity). So both flag
 956		 * fields must have IRQF_SHARED set and the bits which
 957		 * set the trigger type must match. Also all must
 958		 * agree on ONESHOT.
 959		 */
 960		if (!((old->flags & new->flags) & IRQF_SHARED) ||
 961		    ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
 962		    ((old->flags ^ new->flags) & IRQF_ONESHOT))
 
 963			goto mismatch;
 
 964
 965		/* All handlers must agree on per-cpuness */
 966		if ((old->flags & IRQF_PERCPU) !=
 967		    (new->flags & IRQF_PERCPU))
 968			goto mismatch;
 969
 970		/* add new interrupt at end of irq queue */
 971		do {
 972			/*
 973			 * Or all existing action->thread_mask bits,
 974			 * so we can find the next zero bit for this
 975			 * new action.
 976			 */
 977			thread_mask |= old->thread_mask;
 978			old_ptr = &old->next;
 979			old = *old_ptr;
 980		} while (old);
 981		shared = 1;
 982	}
 983
 984	/*
 985	 * Setup the thread mask for this irqaction for ONESHOT. For
 986	 * !ONESHOT irqs the thread mask is 0 so we can avoid a
 987	 * conditional in irq_wake_thread().
 988	 */
 989	if (new->flags & IRQF_ONESHOT) {
 990		/*
 991		 * Unlikely to have 32 resp 64 irqs sharing one line,
 992		 * but who knows.
 993		 */
 994		if (thread_mask == ~0UL) {
 995			ret = -EBUSY;
 996			goto out_mask;
 997		}
 998		/*
 999		 * The thread_mask for the action is or'ed to
1000		 * desc->thread_active to indicate that the
1001		 * IRQF_ONESHOT thread handler has been woken, but not
1002		 * yet finished. The bit is cleared when a thread
1003		 * completes. When all threads of a shared interrupt
1004		 * line have completed desc->threads_active becomes
1005		 * zero and the interrupt line is unmasked. See
1006		 * handle.c:irq_wake_thread() for further information.
1007		 *
1008		 * If no thread is woken by primary (hard irq context)
1009		 * interrupt handlers, then desc->threads_active is
1010		 * also checked for zero to unmask the irq line in the
1011		 * affected hard irq flow handlers
1012		 * (handle_[fasteoi|level]_irq).
1013		 *
1014		 * The new action gets the first zero bit of
1015		 * thread_mask assigned. See the loop above which or's
1016		 * all existing action->thread_mask bits.
1017		 */
1018		new->thread_mask = 1 << ffz(thread_mask);
1019
1020	} else if (new->handler == irq_default_primary_handler) {
1021		/*
1022		 * The interrupt was requested with handler = NULL, so
1023		 * we use the default primary handler for it. But it
1024		 * does not have the oneshot flag set. In combination
1025		 * with level interrupts this is deadly, because the
1026		 * default primary handler just wakes the thread, then
1027		 * the irq lines is reenabled, but the device still
1028		 * has the level irq asserted. Rinse and repeat....
1029		 *
1030		 * While this works for edge type interrupts, we play
1031		 * it safe and reject unconditionally because we can't
1032		 * say for sure which type this interrupt really
1033		 * has. The type flags are unreliable as the
1034		 * underlying chip implementation can override them.
1035		 */
1036		pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1037		       irq);
1038		ret = -EINVAL;
1039		goto out_mask;
1040	}
 
1041
1042	if (!shared) {
1043		init_waitqueue_head(&desc->wait_for_threads);
1044
1045		/* Setup the type (level, edge polarity) if configured: */
1046		if (new->flags & IRQF_TRIGGER_MASK) {
1047			ret = __irq_set_trigger(desc, irq,
1048					new->flags & IRQF_TRIGGER_MASK);
1049
1050			if (ret)
1051				goto out_mask;
1052		}
1053
1054		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1055				  IRQS_ONESHOT | IRQS_WAITING);
1056		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1057
1058		if (new->flags & IRQF_PERCPU) {
1059			irqd_set(&desc->irq_data, IRQD_PER_CPU);
1060			irq_settings_set_per_cpu(desc);
1061		}
1062
1063		if (new->flags & IRQF_ONESHOT)
1064			desc->istate |= IRQS_ONESHOT;
1065
1066		if (irq_settings_can_autoenable(desc))
1067			irq_startup(desc, true);
1068		else
1069			/* Undo nested disables: */
1070			desc->depth = 1;
1071
1072		/* Exclude IRQ from balancing if requested */
1073		if (new->flags & IRQF_NOBALANCING) {
1074			irq_settings_set_no_balancing(desc);
1075			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1076		}
1077
1078		/* Set default affinity mask once everything is setup */
1079		setup_affinity(irq, desc, mask);
1080
1081	} else if (new->flags & IRQF_TRIGGER_MASK) {
1082		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1083		unsigned int omsk = irq_settings_get_trigger_mask(desc);
1084
1085		if (nmsk != omsk)
1086			/* hope the handler works with current  trigger mode */
1087			pr_warning("irq %d uses trigger mode %u; requested %u\n",
1088				   irq, nmsk, omsk);
1089	}
1090
1091	new->irq = irq;
1092	*old_ptr = new;
1093
1094	/* Reset broken irq detection when installing new handler */
1095	desc->irq_count = 0;
1096	desc->irqs_unhandled = 0;
1097
1098	/*
1099	 * Check whether we disabled the irq via the spurious handler
1100	 * before. Reenable it and give it another chance.
1101	 */
1102	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1103		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1104		__enable_irq(desc, irq, false);
1105	}
1106
1107	raw_spin_unlock_irqrestore(&desc->lock, flags);
1108
1109	/*
1110	 * Strictly no need to wake it up, but hung_task complains
1111	 * when no hard interrupt wakes the thread up.
1112	 */
1113	if (new->thread)
1114		wake_up_process(new->thread);
1115
1116	register_irq_proc(irq, desc);
1117	new->dir = NULL;
1118	register_handler_proc(irq, new);
1119	free_cpumask_var(mask);
1120
1121	return 0;
1122
1123mismatch:
1124	if (!(new->flags & IRQF_PROBE_SHARED)) {
1125		pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1126		       irq, new->flags, new->name, old->flags, old->name);
1127#ifdef CONFIG_DEBUG_SHIRQ
 
 
 
 
1128		dump_stack();
1129#endif
1130	}
 
1131	ret = -EBUSY;
1132
1133out_mask:
1134	raw_spin_unlock_irqrestore(&desc->lock, flags);
1135	free_cpumask_var(mask);
1136
1137out_thread:
1138	if (new->thread) {
1139		struct task_struct *t = new->thread;
1140
1141		new->thread = NULL;
1142		kthread_stop(t);
 
1143		put_task_struct(t);
1144	}
1145out_mput:
1146	module_put(desc->owner);
1147	return ret;
1148}
1149
1150/**
1151 *	setup_irq - setup an interrupt
1152 *	@irq: Interrupt line to setup
1153 *	@act: irqaction for the interrupt
1154 *
1155 * Used to statically setup interrupts in the early boot process.
1156 */
1157int setup_irq(unsigned int irq, struct irqaction *act)
1158{
1159	int retval;
1160	struct irq_desc *desc = irq_to_desc(irq);
1161
1162	if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1163		return -EINVAL;
1164	chip_bus_lock(desc);
1165	retval = __setup_irq(irq, desc, act);
1166	chip_bus_sync_unlock(desc);
1167
1168	return retval;
1169}
1170EXPORT_SYMBOL_GPL(setup_irq);
1171
1172/*
1173 * Internal function to unregister an irqaction - used to free
1174 * regular and special interrupts that are part of the architecture.
1175 */
1176static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1177{
1178	struct irq_desc *desc = irq_to_desc(irq);
1179	struct irqaction *action, **action_ptr;
1180	unsigned long flags;
1181
1182	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1183
1184	if (!desc)
1185		return NULL;
1186
1187	raw_spin_lock_irqsave(&desc->lock, flags);
1188
1189	/*
1190	 * There can be multiple actions per IRQ descriptor, find the right
1191	 * one based on the dev_id:
1192	 */
1193	action_ptr = &desc->action;
1194	for (;;) {
1195		action = *action_ptr;
1196
1197		if (!action) {
1198			WARN(1, "Trying to free already-free IRQ %d\n", irq);
1199			raw_spin_unlock_irqrestore(&desc->lock, flags);
1200
1201			return NULL;
1202		}
1203
1204		if (action->dev_id == dev_id)
1205			break;
1206		action_ptr = &action->next;
1207	}
1208
1209	/* Found it - now remove it from the list of entries: */
1210	*action_ptr = action->next;
1211
 
 
 
 
 
 
1212	/* If this was the last handler, shut down the IRQ line: */
1213	if (!desc->action)
1214		irq_shutdown(desc);
1215
1216#ifdef CONFIG_SMP
1217	/* make sure affinity_hint is cleaned up */
1218	if (WARN_ON_ONCE(desc->affinity_hint))
1219		desc->affinity_hint = NULL;
1220#endif
1221
1222	raw_spin_unlock_irqrestore(&desc->lock, flags);
1223
1224	unregister_handler_proc(irq, action);
1225
1226	/* Make sure it's not being used on another CPU: */
1227	synchronize_irq(irq);
1228
1229#ifdef CONFIG_DEBUG_SHIRQ
1230	/*
1231	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1232	 * event to happen even now it's being freed, so let's make sure that
1233	 * is so by doing an extra call to the handler ....
1234	 *
1235	 * ( We do this after actually deregistering it, to make sure that a
1236	 *   'real' IRQ doesn't run in * parallel with our fake. )
1237	 */
1238	if (action->flags & IRQF_SHARED) {
1239		local_irq_save(flags);
1240		action->handler(irq, dev_id);
1241		local_irq_restore(flags);
1242	}
1243#endif
1244
1245	if (action->thread) {
1246		kthread_stop(action->thread);
 
1247		put_task_struct(action->thread);
1248	}
1249
1250	module_put(desc->owner);
1251	return action;
1252}
1253
1254/**
1255 *	remove_irq - free an interrupt
1256 *	@irq: Interrupt line to free
1257 *	@act: irqaction for the interrupt
1258 *
1259 * Used to remove interrupts statically setup by the early boot process.
1260 */
1261void remove_irq(unsigned int irq, struct irqaction *act)
1262{
1263	struct irq_desc *desc = irq_to_desc(irq);
1264
1265	if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1266	    __free_irq(irq, act->dev_id);
1267}
1268EXPORT_SYMBOL_GPL(remove_irq);
1269
1270/**
1271 *	free_irq - free an interrupt allocated with request_irq
1272 *	@irq: Interrupt line to free
1273 *	@dev_id: Device identity to free
1274 *
1275 *	Remove an interrupt handler. The handler is removed and if the
1276 *	interrupt line is no longer in use by any driver it is disabled.
1277 *	On a shared IRQ the caller must ensure the interrupt is disabled
1278 *	on the card it drives before calling this function. The function
1279 *	does not return until any executing interrupts for this IRQ
1280 *	have completed.
1281 *
1282 *	This function must not be called from interrupt context.
1283 */
1284void free_irq(unsigned int irq, void *dev_id)
1285{
1286	struct irq_desc *desc = irq_to_desc(irq);
1287
1288	if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1289		return;
1290
1291#ifdef CONFIG_SMP
1292	if (WARN_ON(desc->affinity_notify))
1293		desc->affinity_notify = NULL;
1294#endif
1295
1296	chip_bus_lock(desc);
1297	kfree(__free_irq(irq, dev_id));
1298	chip_bus_sync_unlock(desc);
1299}
1300EXPORT_SYMBOL(free_irq);
1301
1302/**
1303 *	request_threaded_irq - allocate an interrupt line
1304 *	@irq: Interrupt line to allocate
1305 *	@handler: Function to be called when the IRQ occurs.
1306 *		  Primary handler for threaded interrupts
1307 *		  If NULL and thread_fn != NULL the default
1308 *		  primary handler is installed
1309 *	@thread_fn: Function called from the irq handler thread
1310 *		    If NULL, no irq thread is created
1311 *	@irqflags: Interrupt type flags
1312 *	@devname: An ascii name for the claiming device
1313 *	@dev_id: A cookie passed back to the handler function
1314 *
1315 *	This call allocates interrupt resources and enables the
1316 *	interrupt line and IRQ handling. From the point this
1317 *	call is made your handler function may be invoked. Since
1318 *	your handler function must clear any interrupt the board
1319 *	raises, you must take care both to initialise your hardware
1320 *	and to set up the interrupt handler in the right order.
1321 *
1322 *	If you want to set up a threaded irq handler for your device
1323 *	then you need to supply @handler and @thread_fn. @handler is
1324 *	still called in hard interrupt context and has to check
1325 *	whether the interrupt originates from the device. If yes it
1326 *	needs to disable the interrupt on the device and return
1327 *	IRQ_WAKE_THREAD which will wake up the handler thread and run
1328 *	@thread_fn. This split handler design is necessary to support
1329 *	shared interrupts.
1330 *
1331 *	Dev_id must be globally unique. Normally the address of the
1332 *	device data structure is used as the cookie. Since the handler
1333 *	receives this value it makes sense to use it.
1334 *
1335 *	If your interrupt is shared you must pass a non NULL dev_id
1336 *	as this is required when freeing the interrupt.
1337 *
1338 *	Flags:
1339 *
1340 *	IRQF_SHARED		Interrupt is shared
 
1341 *	IRQF_TRIGGER_*		Specify active edge(s) or level
1342 *
1343 */
1344int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1345			 irq_handler_t thread_fn, unsigned long irqflags,
1346			 const char *devname, void *dev_id)
1347{
1348	struct irqaction *action;
1349	struct irq_desc *desc;
1350	int retval;
1351
1352	/*
1353	 * Sanity-check: shared interrupts must pass in a real dev-ID,
1354	 * otherwise we'll have trouble later trying to figure out
1355	 * which interrupt is which (messes up the interrupt freeing
1356	 * logic etc).
1357	 */
1358	if ((irqflags & IRQF_SHARED) && !dev_id)
1359		return -EINVAL;
1360
1361	desc = irq_to_desc(irq);
1362	if (!desc)
1363		return -EINVAL;
1364
1365	if (!irq_settings_can_request(desc) ||
1366	    WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1367		return -EINVAL;
1368
1369	if (!handler) {
1370		if (!thread_fn)
1371			return -EINVAL;
1372		handler = irq_default_primary_handler;
1373	}
1374
1375	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1376	if (!action)
1377		return -ENOMEM;
1378
1379	action->handler = handler;
1380	action->thread_fn = thread_fn;
1381	action->flags = irqflags;
1382	action->name = devname;
1383	action->dev_id = dev_id;
1384
1385	chip_bus_lock(desc);
1386	retval = __setup_irq(irq, desc, action);
1387	chip_bus_sync_unlock(desc);
1388
1389	if (retval)
1390		kfree(action);
1391
1392#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1393	if (!retval && (irqflags & IRQF_SHARED)) {
1394		/*
1395		 * It's a shared IRQ -- the driver ought to be prepared for it
1396		 * to happen immediately, so let's make sure....
1397		 * We disable the irq to make sure that a 'real' IRQ doesn't
1398		 * run in parallel with our fake.
1399		 */
1400		unsigned long flags;
1401
1402		disable_irq(irq);
1403		local_irq_save(flags);
1404
1405		handler(irq, dev_id);
1406
1407		local_irq_restore(flags);
1408		enable_irq(irq);
1409	}
1410#endif
1411	return retval;
1412}
1413EXPORT_SYMBOL(request_threaded_irq);
1414
1415/**
1416 *	request_any_context_irq - allocate an interrupt line
1417 *	@irq: Interrupt line to allocate
1418 *	@handler: Function to be called when the IRQ occurs.
1419 *		  Threaded handler for threaded interrupts.
1420 *	@flags: Interrupt type flags
1421 *	@name: An ascii name for the claiming device
1422 *	@dev_id: A cookie passed back to the handler function
1423 *
1424 *	This call allocates interrupt resources and enables the
1425 *	interrupt line and IRQ handling. It selects either a
1426 *	hardirq or threaded handling method depending on the
1427 *	context.
1428 *
1429 *	On failure, it returns a negative value. On success,
1430 *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1431 */
1432int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1433			    unsigned long flags, const char *name, void *dev_id)
1434{
1435	struct irq_desc *desc = irq_to_desc(irq);
1436	int ret;
1437
1438	if (!desc)
1439		return -EINVAL;
1440
1441	if (irq_settings_is_nested_thread(desc)) {
1442		ret = request_threaded_irq(irq, NULL, handler,
1443					   flags, name, dev_id);
1444		return !ret ? IRQC_IS_NESTED : ret;
1445	}
1446
1447	ret = request_irq(irq, handler, flags, name, dev_id);
1448	return !ret ? IRQC_IS_HARDIRQ : ret;
1449}
1450EXPORT_SYMBOL_GPL(request_any_context_irq);
1451
1452void enable_percpu_irq(unsigned int irq, unsigned int type)
1453{
1454	unsigned int cpu = smp_processor_id();
1455	unsigned long flags;
1456	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1457
1458	if (!desc)
1459		return;
1460
1461	type &= IRQ_TYPE_SENSE_MASK;
1462	if (type != IRQ_TYPE_NONE) {
1463		int ret;
1464
1465		ret = __irq_set_trigger(desc, irq, type);
1466
1467		if (ret) {
1468			WARN(1, "failed to set type for IRQ%d\n", irq);
1469			goto out;
1470		}
1471	}
1472
1473	irq_percpu_enable(desc, cpu);
1474out:
1475	irq_put_desc_unlock(desc, flags);
1476}
1477
1478void disable_percpu_irq(unsigned int irq)
1479{
1480	unsigned int cpu = smp_processor_id();
1481	unsigned long flags;
1482	struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1483
1484	if (!desc)
1485		return;
1486
1487	irq_percpu_disable(desc, cpu);
1488	irq_put_desc_unlock(desc, flags);
1489}
1490
1491/*
1492 * Internal function to unregister a percpu irqaction.
1493 */
1494static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1495{
1496	struct irq_desc *desc = irq_to_desc(irq);
1497	struct irqaction *action;
1498	unsigned long flags;
1499
1500	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1501
1502	if (!desc)
1503		return NULL;
1504
1505	raw_spin_lock_irqsave(&desc->lock, flags);
1506
1507	action = desc->action;
1508	if (!action || action->percpu_dev_id != dev_id) {
1509		WARN(1, "Trying to free already-free IRQ %d\n", irq);
1510		goto bad;
1511	}
1512
1513	if (!cpumask_empty(desc->percpu_enabled)) {
1514		WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1515		     irq, cpumask_first(desc->percpu_enabled));
1516		goto bad;
1517	}
1518
1519	/* Found it - now remove it from the list of entries: */
1520	desc->action = NULL;
1521
1522	raw_spin_unlock_irqrestore(&desc->lock, flags);
1523
1524	unregister_handler_proc(irq, action);
1525
1526	module_put(desc->owner);
1527	return action;
1528
1529bad:
1530	raw_spin_unlock_irqrestore(&desc->lock, flags);
1531	return NULL;
1532}
1533
1534/**
1535 *	remove_percpu_irq - free a per-cpu interrupt
1536 *	@irq: Interrupt line to free
1537 *	@act: irqaction for the interrupt
1538 *
1539 * Used to remove interrupts statically setup by the early boot process.
1540 */
1541void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1542{
1543	struct irq_desc *desc = irq_to_desc(irq);
1544
1545	if (desc && irq_settings_is_per_cpu_devid(desc))
1546	    __free_percpu_irq(irq, act->percpu_dev_id);
1547}
1548
1549/**
1550 *	free_percpu_irq - free an interrupt allocated with request_percpu_irq
1551 *	@irq: Interrupt line to free
1552 *	@dev_id: Device identity to free
1553 *
1554 *	Remove a percpu interrupt handler. The handler is removed, but
1555 *	the interrupt line is not disabled. This must be done on each
1556 *	CPU before calling this function. The function does not return
1557 *	until any executing interrupts for this IRQ have completed.
1558 *
1559 *	This function must not be called from interrupt context.
1560 */
1561void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1562{
1563	struct irq_desc *desc = irq_to_desc(irq);
1564
1565	if (!desc || !irq_settings_is_per_cpu_devid(desc))
1566		return;
1567
1568	chip_bus_lock(desc);
1569	kfree(__free_percpu_irq(irq, dev_id));
1570	chip_bus_sync_unlock(desc);
1571}
1572
1573/**
1574 *	setup_percpu_irq - setup a per-cpu interrupt
1575 *	@irq: Interrupt line to setup
1576 *	@act: irqaction for the interrupt
1577 *
1578 * Used to statically setup per-cpu interrupts in the early boot process.
1579 */
1580int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1581{
1582	struct irq_desc *desc = irq_to_desc(irq);
1583	int retval;
1584
1585	if (!desc || !irq_settings_is_per_cpu_devid(desc))
1586		return -EINVAL;
1587	chip_bus_lock(desc);
1588	retval = __setup_irq(irq, desc, act);
1589	chip_bus_sync_unlock(desc);
1590
1591	return retval;
1592}
1593
1594/**
1595 *	request_percpu_irq - allocate a percpu interrupt line
1596 *	@irq: Interrupt line to allocate
1597 *	@handler: Function to be called when the IRQ occurs.
1598 *	@devname: An ascii name for the claiming device
1599 *	@dev_id: A percpu cookie passed back to the handler function
1600 *
1601 *	This call allocates interrupt resources, but doesn't
1602 *	automatically enable the interrupt. It has to be done on each
1603 *	CPU using enable_percpu_irq().
1604 *
1605 *	Dev_id must be globally unique. It is a per-cpu variable, and
1606 *	the handler gets called with the interrupted CPU's instance of
1607 *	that variable.
1608 */
1609int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1610		       const char *devname, void __percpu *dev_id)
1611{
1612	struct irqaction *action;
1613	struct irq_desc *desc;
1614	int retval;
1615
1616	if (!dev_id)
1617		return -EINVAL;
1618
1619	desc = irq_to_desc(irq);
1620	if (!desc || !irq_settings_can_request(desc) ||
1621	    !irq_settings_is_per_cpu_devid(desc))
1622		return -EINVAL;
1623
1624	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1625	if (!action)
1626		return -ENOMEM;
1627
1628	action->handler = handler;
1629	action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1630	action->name = devname;
1631	action->percpu_dev_id = dev_id;
1632
1633	chip_bus_lock(desc);
1634	retval = __setup_irq(irq, desc, action);
1635	chip_bus_sync_unlock(desc);
1636
1637	if (retval)
1638		kfree(action);
1639
1640	return retval;
1641}
v3.1
   1/*
   2 * linux/kernel/irq/manage.c
   3 *
   4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
   5 * Copyright (C) 2005-2006 Thomas Gleixner
   6 *
   7 * This file contains driver APIs to the irq subsystem.
   8 */
   9
 
 
  10#include <linux/irq.h>
  11#include <linux/kthread.h>
  12#include <linux/module.h>
  13#include <linux/random.h>
  14#include <linux/interrupt.h>
  15#include <linux/slab.h>
  16#include <linux/sched.h>
 
  17
  18#include "internals.h"
  19
  20#ifdef CONFIG_IRQ_FORCED_THREADING
  21__read_mostly bool force_irqthreads;
  22
  23static int __init setup_forced_irqthreads(char *arg)
  24{
  25	force_irqthreads = true;
  26	return 0;
  27}
  28early_param("threadirqs", setup_forced_irqthreads);
  29#endif
  30
  31/**
  32 *	synchronize_irq - wait for pending IRQ handlers (on other CPUs)
  33 *	@irq: interrupt number to wait for
  34 *
  35 *	This function waits for any pending IRQ handlers for this interrupt
  36 *	to complete before returning. If you use this function while
  37 *	holding a resource the IRQ handler may need you will deadlock.
  38 *
  39 *	This function may be called - with care - from IRQ context.
  40 */
  41void synchronize_irq(unsigned int irq)
  42{
  43	struct irq_desc *desc = irq_to_desc(irq);
  44	bool inprogress;
  45
  46	if (!desc)
  47		return;
  48
  49	do {
  50		unsigned long flags;
  51
  52		/*
  53		 * Wait until we're out of the critical section.  This might
  54		 * give the wrong answer due to the lack of memory barriers.
  55		 */
  56		while (irqd_irq_inprogress(&desc->irq_data))
  57			cpu_relax();
  58
  59		/* Ok, that indicated we're done: double-check carefully. */
  60		raw_spin_lock_irqsave(&desc->lock, flags);
  61		inprogress = irqd_irq_inprogress(&desc->irq_data);
  62		raw_spin_unlock_irqrestore(&desc->lock, flags);
  63
  64		/* Oops, that failed? */
  65	} while (inprogress);
  66
  67	/*
  68	 * We made sure that no hardirq handler is running. Now verify
  69	 * that no threaded handlers are active.
  70	 */
  71	wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
  72}
  73EXPORT_SYMBOL(synchronize_irq);
  74
  75#ifdef CONFIG_SMP
  76cpumask_var_t irq_default_affinity;
  77
  78/**
  79 *	irq_can_set_affinity - Check if the affinity of a given irq can be set
  80 *	@irq:		Interrupt to check
  81 *
  82 */
  83int irq_can_set_affinity(unsigned int irq)
  84{
  85	struct irq_desc *desc = irq_to_desc(irq);
  86
  87	if (!desc || !irqd_can_balance(&desc->irq_data) ||
  88	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
  89		return 0;
  90
  91	return 1;
  92}
  93
  94/**
  95 *	irq_set_thread_affinity - Notify irq threads to adjust affinity
  96 *	@desc:		irq descriptor which has affitnity changed
  97 *
  98 *	We just set IRQTF_AFFINITY and delegate the affinity setting
  99 *	to the interrupt thread itself. We can not call
 100 *	set_cpus_allowed_ptr() here as we hold desc->lock and this
 101 *	code can be called from hard interrupt context.
 102 */
 103void irq_set_thread_affinity(struct irq_desc *desc)
 104{
 105	struct irqaction *action = desc->action;
 106
 107	while (action) {
 108		if (action->thread)
 109			set_bit(IRQTF_AFFINITY, &action->thread_flags);
 110		action = action->next;
 111	}
 112}
 113
 114#ifdef CONFIG_GENERIC_PENDING_IRQ
 115static inline bool irq_can_move_pcntxt(struct irq_data *data)
 116{
 117	return irqd_can_move_in_process_context(data);
 118}
 119static inline bool irq_move_pending(struct irq_data *data)
 120{
 121	return irqd_is_setaffinity_pending(data);
 122}
 123static inline void
 124irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
 125{
 126	cpumask_copy(desc->pending_mask, mask);
 127}
 128static inline void
 129irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
 130{
 131	cpumask_copy(mask, desc->pending_mask);
 132}
 133#else
 134static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
 135static inline bool irq_move_pending(struct irq_data *data) { return false; }
 136static inline void
 137irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
 138static inline void
 139irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
 140#endif
 141
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 142int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
 143{
 144	struct irq_chip *chip = irq_data_get_irq_chip(data);
 145	struct irq_desc *desc = irq_data_to_desc(data);
 146	int ret = 0;
 147
 148	if (!chip || !chip->irq_set_affinity)
 149		return -EINVAL;
 150
 151	if (irq_can_move_pcntxt(data)) {
 152		ret = chip->irq_set_affinity(data, mask, false);
 153		switch (ret) {
 154		case IRQ_SET_MASK_OK:
 155			cpumask_copy(data->affinity, mask);
 156		case IRQ_SET_MASK_OK_NOCOPY:
 157			irq_set_thread_affinity(desc);
 158			ret = 0;
 159		}
 160	} else {
 161		irqd_set_move_pending(data);
 162		irq_copy_pending(desc, mask);
 163	}
 164
 165	if (desc->affinity_notify) {
 166		kref_get(&desc->affinity_notify->kref);
 167		schedule_work(&desc->affinity_notify->work);
 168	}
 169	irqd_set(data, IRQD_AFFINITY_SET);
 170
 171	return ret;
 172}
 173
 174/**
 175 *	irq_set_affinity - Set the irq affinity of a given irq
 176 *	@irq:		Interrupt to set affinity
 177 *	@mask:		cpumask
 178 *
 179 */
 180int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
 181{
 182	struct irq_desc *desc = irq_to_desc(irq);
 183	unsigned long flags;
 184	int ret;
 185
 186	if (!desc)
 187		return -EINVAL;
 188
 189	raw_spin_lock_irqsave(&desc->lock, flags);
 190	ret =  __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
 191	raw_spin_unlock_irqrestore(&desc->lock, flags);
 192	return ret;
 193}
 194
 195int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
 196{
 197	unsigned long flags;
 198	struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
 199
 200	if (!desc)
 201		return -EINVAL;
 202	desc->affinity_hint = m;
 203	irq_put_desc_unlock(desc, flags);
 204	return 0;
 205}
 206EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
 207
 208static void irq_affinity_notify(struct work_struct *work)
 209{
 210	struct irq_affinity_notify *notify =
 211		container_of(work, struct irq_affinity_notify, work);
 212	struct irq_desc *desc = irq_to_desc(notify->irq);
 213	cpumask_var_t cpumask;
 214	unsigned long flags;
 215
 216	if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
 217		goto out;
 218
 219	raw_spin_lock_irqsave(&desc->lock, flags);
 220	if (irq_move_pending(&desc->irq_data))
 221		irq_get_pending(cpumask, desc);
 222	else
 223		cpumask_copy(cpumask, desc->irq_data.affinity);
 224	raw_spin_unlock_irqrestore(&desc->lock, flags);
 225
 226	notify->notify(notify, cpumask);
 227
 228	free_cpumask_var(cpumask);
 229out:
 230	kref_put(&notify->kref, notify->release);
 231}
 232
 233/**
 234 *	irq_set_affinity_notifier - control notification of IRQ affinity changes
 235 *	@irq:		Interrupt for which to enable/disable notification
 236 *	@notify:	Context for notification, or %NULL to disable
 237 *			notification.  Function pointers must be initialised;
 238 *			the other fields will be initialised by this function.
 239 *
 240 *	Must be called in process context.  Notification may only be enabled
 241 *	after the IRQ is allocated and must be disabled before the IRQ is
 242 *	freed using free_irq().
 243 */
 244int
 245irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
 246{
 247	struct irq_desc *desc = irq_to_desc(irq);
 248	struct irq_affinity_notify *old_notify;
 249	unsigned long flags;
 250
 251	/* The release function is promised process context */
 252	might_sleep();
 253
 254	if (!desc)
 255		return -EINVAL;
 256
 257	/* Complete initialisation of *notify */
 258	if (notify) {
 259		notify->irq = irq;
 260		kref_init(&notify->kref);
 261		INIT_WORK(&notify->work, irq_affinity_notify);
 262	}
 263
 264	raw_spin_lock_irqsave(&desc->lock, flags);
 265	old_notify = desc->affinity_notify;
 266	desc->affinity_notify = notify;
 267	raw_spin_unlock_irqrestore(&desc->lock, flags);
 268
 269	if (old_notify)
 270		kref_put(&old_notify->kref, old_notify->release);
 271
 272	return 0;
 273}
 274EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
 275
 276#ifndef CONFIG_AUTO_IRQ_AFFINITY
 277/*
 278 * Generic version of the affinity autoselector.
 279 */
 280static int
 281setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
 282{
 283	struct irq_chip *chip = irq_desc_get_chip(desc);
 284	struct cpumask *set = irq_default_affinity;
 285	int ret;
 286
 287	/* Excludes PER_CPU and NO_BALANCE interrupts */
 288	if (!irq_can_set_affinity(irq))
 289		return 0;
 290
 291	/*
 292	 * Preserve an userspace affinity setup, but make sure that
 293	 * one of the targets is online.
 294	 */
 295	if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
 296		if (cpumask_intersects(desc->irq_data.affinity,
 297				       cpu_online_mask))
 298			set = desc->irq_data.affinity;
 299		else
 300			irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
 301	}
 302
 303	cpumask_and(mask, cpu_online_mask, set);
 304	ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
 305	switch (ret) {
 306	case IRQ_SET_MASK_OK:
 307		cpumask_copy(desc->irq_data.affinity, mask);
 308	case IRQ_SET_MASK_OK_NOCOPY:
 309		irq_set_thread_affinity(desc);
 310	}
 
 311	return 0;
 312}
 313#else
 314static inline int
 315setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
 316{
 317	return irq_select_affinity(irq);
 318}
 319#endif
 320
 321/*
 322 * Called when affinity is set via /proc/irq
 323 */
 324int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
 325{
 326	struct irq_desc *desc = irq_to_desc(irq);
 327	unsigned long flags;
 328	int ret;
 329
 330	raw_spin_lock_irqsave(&desc->lock, flags);
 331	ret = setup_affinity(irq, desc, mask);
 332	raw_spin_unlock_irqrestore(&desc->lock, flags);
 333	return ret;
 334}
 335
 336#else
 337static inline int
 338setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
 339{
 340	return 0;
 341}
 342#endif
 343
 344void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
 345{
 346	if (suspend) {
 347		if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
 348			return;
 349		desc->istate |= IRQS_SUSPENDED;
 350	}
 351
 352	if (!desc->depth++)
 353		irq_disable(desc);
 354}
 355
 356static int __disable_irq_nosync(unsigned int irq)
 357{
 358	unsigned long flags;
 359	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
 360
 361	if (!desc)
 362		return -EINVAL;
 363	__disable_irq(desc, irq, false);
 364	irq_put_desc_busunlock(desc, flags);
 365	return 0;
 366}
 367
 368/**
 369 *	disable_irq_nosync - disable an irq without waiting
 370 *	@irq: Interrupt to disable
 371 *
 372 *	Disable the selected interrupt line.  Disables and Enables are
 373 *	nested.
 374 *	Unlike disable_irq(), this function does not ensure existing
 375 *	instances of the IRQ handler have completed before returning.
 376 *
 377 *	This function may be called from IRQ context.
 378 */
 379void disable_irq_nosync(unsigned int irq)
 380{
 381	__disable_irq_nosync(irq);
 382}
 383EXPORT_SYMBOL(disable_irq_nosync);
 384
 385/**
 386 *	disable_irq - disable an irq and wait for completion
 387 *	@irq: Interrupt to disable
 388 *
 389 *	Disable the selected interrupt line.  Enables and Disables are
 390 *	nested.
 391 *	This function waits for any pending IRQ handlers for this interrupt
 392 *	to complete before returning. If you use this function while
 393 *	holding a resource the IRQ handler may need you will deadlock.
 394 *
 395 *	This function may be called - with care - from IRQ context.
 396 */
 397void disable_irq(unsigned int irq)
 398{
 399	if (!__disable_irq_nosync(irq))
 400		synchronize_irq(irq);
 401}
 402EXPORT_SYMBOL(disable_irq);
 403
 404void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
 405{
 406	if (resume) {
 407		if (!(desc->istate & IRQS_SUSPENDED)) {
 408			if (!desc->action)
 409				return;
 410			if (!(desc->action->flags & IRQF_FORCE_RESUME))
 411				return;
 412			/* Pretend that it got disabled ! */
 413			desc->depth++;
 414		}
 415		desc->istate &= ~IRQS_SUSPENDED;
 416	}
 417
 418	switch (desc->depth) {
 419	case 0:
 420 err_out:
 421		WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
 422		break;
 423	case 1: {
 424		if (desc->istate & IRQS_SUSPENDED)
 425			goto err_out;
 426		/* Prevent probing on this irq: */
 427		irq_settings_set_noprobe(desc);
 428		irq_enable(desc);
 429		check_irq_resend(desc, irq);
 430		/* fall-through */
 431	}
 432	default:
 433		desc->depth--;
 434	}
 435}
 436
 437/**
 438 *	enable_irq - enable handling of an irq
 439 *	@irq: Interrupt to enable
 440 *
 441 *	Undoes the effect of one call to disable_irq().  If this
 442 *	matches the last disable, processing of interrupts on this
 443 *	IRQ line is re-enabled.
 444 *
 445 *	This function may be called from IRQ context only when
 446 *	desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
 447 */
 448void enable_irq(unsigned int irq)
 449{
 450	unsigned long flags;
 451	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
 452
 453	if (!desc)
 454		return;
 455	if (WARN(!desc->irq_data.chip,
 456		 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
 457		goto out;
 458
 459	__enable_irq(desc, irq, false);
 460out:
 461	irq_put_desc_busunlock(desc, flags);
 462}
 463EXPORT_SYMBOL(enable_irq);
 464
 465static int set_irq_wake_real(unsigned int irq, unsigned int on)
 466{
 467	struct irq_desc *desc = irq_to_desc(irq);
 468	int ret = -ENXIO;
 469
 
 
 
 470	if (desc->irq_data.chip->irq_set_wake)
 471		ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
 472
 473	return ret;
 474}
 475
 476/**
 477 *	irq_set_irq_wake - control irq power management wakeup
 478 *	@irq:	interrupt to control
 479 *	@on:	enable/disable power management wakeup
 480 *
 481 *	Enable/disable power management wakeup mode, which is
 482 *	disabled by default.  Enables and disables must match,
 483 *	just as they match for non-wakeup mode support.
 484 *
 485 *	Wakeup mode lets this IRQ wake the system from sleep
 486 *	states like "suspend to RAM".
 487 */
 488int irq_set_irq_wake(unsigned int irq, unsigned int on)
 489{
 490	unsigned long flags;
 491	struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
 492	int ret = 0;
 493
 494	if (!desc)
 495		return -EINVAL;
 496
 497	/* wakeup-capable irqs can be shared between drivers that
 498	 * don't need to have the same sleep mode behaviors.
 499	 */
 500	if (on) {
 501		if (desc->wake_depth++ == 0) {
 502			ret = set_irq_wake_real(irq, on);
 503			if (ret)
 504				desc->wake_depth = 0;
 505			else
 506				irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
 507		}
 508	} else {
 509		if (desc->wake_depth == 0) {
 510			WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
 511		} else if (--desc->wake_depth == 0) {
 512			ret = set_irq_wake_real(irq, on);
 513			if (ret)
 514				desc->wake_depth = 1;
 515			else
 516				irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
 517		}
 518	}
 519	irq_put_desc_busunlock(desc, flags);
 520	return ret;
 521}
 522EXPORT_SYMBOL(irq_set_irq_wake);
 523
 524/*
 525 * Internal function that tells the architecture code whether a
 526 * particular irq has been exclusively allocated or is available
 527 * for driver use.
 528 */
 529int can_request_irq(unsigned int irq, unsigned long irqflags)
 530{
 531	unsigned long flags;
 532	struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
 533	int canrequest = 0;
 534
 535	if (!desc)
 536		return 0;
 537
 538	if (irq_settings_can_request(desc)) {
 539		if (desc->action)
 540			if (irqflags & desc->action->flags & IRQF_SHARED)
 541				canrequest =1;
 542	}
 543	irq_put_desc_unlock(desc, flags);
 544	return canrequest;
 545}
 546
 547int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
 548		      unsigned long flags)
 549{
 550	struct irq_chip *chip = desc->irq_data.chip;
 551	int ret, unmask = 0;
 552
 553	if (!chip || !chip->irq_set_type) {
 554		/*
 555		 * IRQF_TRIGGER_* but the PIC does not support multiple
 556		 * flow-types?
 557		 */
 558		pr_debug("No set_type function for IRQ %d (%s)\n", irq,
 559				chip ? (chip->name ? : "unknown") : "unknown");
 560		return 0;
 561	}
 562
 563	flags &= IRQ_TYPE_SENSE_MASK;
 564
 565	if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
 566		if (!irqd_irq_masked(&desc->irq_data))
 567			mask_irq(desc);
 568		if (!irqd_irq_disabled(&desc->irq_data))
 569			unmask = 1;
 570	}
 571
 572	/* caller masked out all except trigger mode flags */
 573	ret = chip->irq_set_type(&desc->irq_data, flags);
 574
 575	switch (ret) {
 576	case IRQ_SET_MASK_OK:
 577		irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
 578		irqd_set(&desc->irq_data, flags);
 579
 580	case IRQ_SET_MASK_OK_NOCOPY:
 581		flags = irqd_get_trigger_type(&desc->irq_data);
 582		irq_settings_set_trigger_mask(desc, flags);
 583		irqd_clear(&desc->irq_data, IRQD_LEVEL);
 584		irq_settings_clr_level(desc);
 585		if (flags & IRQ_TYPE_LEVEL_MASK) {
 586			irq_settings_set_level(desc);
 587			irqd_set(&desc->irq_data, IRQD_LEVEL);
 588		}
 589
 590		ret = 0;
 591		break;
 592	default:
 593		pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
 594		       flags, irq, chip->irq_set_type);
 595	}
 596	if (unmask)
 597		unmask_irq(desc);
 598	return ret;
 599}
 600
 601/*
 602 * Default primary interrupt handler for threaded interrupts. Is
 603 * assigned as primary handler when request_threaded_irq is called
 604 * with handler == NULL. Useful for oneshot interrupts.
 605 */
 606static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
 607{
 608	return IRQ_WAKE_THREAD;
 609}
 610
 611/*
 612 * Primary handler for nested threaded interrupts. Should never be
 613 * called.
 614 */
 615static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
 616{
 617	WARN(1, "Primary handler called for nested irq %d\n", irq);
 618	return IRQ_NONE;
 619}
 620
 621static int irq_wait_for_interrupt(struct irqaction *action)
 622{
 
 
 623	while (!kthread_should_stop()) {
 624		set_current_state(TASK_INTERRUPTIBLE);
 625
 626		if (test_and_clear_bit(IRQTF_RUNTHREAD,
 627				       &action->thread_flags)) {
 628			__set_current_state(TASK_RUNNING);
 629			return 0;
 630		}
 631		schedule();
 
 632	}
 
 633	return -1;
 634}
 635
 636/*
 637 * Oneshot interrupts keep the irq line masked until the threaded
 638 * handler finished. unmask if the interrupt has not been disabled and
 639 * is marked MASKED.
 640 */
 641static void irq_finalize_oneshot(struct irq_desc *desc,
 642				 struct irqaction *action, bool force)
 643{
 644	if (!(desc->istate & IRQS_ONESHOT))
 645		return;
 646again:
 647	chip_bus_lock(desc);
 648	raw_spin_lock_irq(&desc->lock);
 649
 650	/*
 651	 * Implausible though it may be we need to protect us against
 652	 * the following scenario:
 653	 *
 654	 * The thread is faster done than the hard interrupt handler
 655	 * on the other CPU. If we unmask the irq line then the
 656	 * interrupt can come in again and masks the line, leaves due
 657	 * to IRQS_INPROGRESS and the irq line is masked forever.
 658	 *
 659	 * This also serializes the state of shared oneshot handlers
 660	 * versus "desc->threads_onehsot |= action->thread_mask;" in
 661	 * irq_wake_thread(). See the comment there which explains the
 662	 * serialization.
 663	 */
 664	if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
 665		raw_spin_unlock_irq(&desc->lock);
 666		chip_bus_sync_unlock(desc);
 667		cpu_relax();
 668		goto again;
 669	}
 670
 671	/*
 672	 * Now check again, whether the thread should run. Otherwise
 673	 * we would clear the threads_oneshot bit of this thread which
 674	 * was just set.
 675	 */
 676	if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
 677		goto out_unlock;
 678
 679	desc->threads_oneshot &= ~action->thread_mask;
 680
 681	if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
 682	    irqd_irq_masked(&desc->irq_data))
 683		unmask_irq(desc);
 684
 685out_unlock:
 686	raw_spin_unlock_irq(&desc->lock);
 687	chip_bus_sync_unlock(desc);
 688}
 689
 690#ifdef CONFIG_SMP
 691/*
 692 * Check whether we need to chasnge the affinity of the interrupt thread.
 693 */
 694static void
 695irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
 696{
 697	cpumask_var_t mask;
 698
 699	if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
 700		return;
 701
 702	/*
 703	 * In case we are out of memory we set IRQTF_AFFINITY again and
 704	 * try again next time
 705	 */
 706	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
 707		set_bit(IRQTF_AFFINITY, &action->thread_flags);
 708		return;
 709	}
 710
 711	raw_spin_lock_irq(&desc->lock);
 712	cpumask_copy(mask, desc->irq_data.affinity);
 713	raw_spin_unlock_irq(&desc->lock);
 714
 715	set_cpus_allowed_ptr(current, mask);
 716	free_cpumask_var(mask);
 717}
 718#else
 719static inline void
 720irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
 721#endif
 722
 723/*
 724 * Interrupts which are not explicitely requested as threaded
 725 * interrupts rely on the implicit bh/preempt disable of the hard irq
 726 * context. So we need to disable bh here to avoid deadlocks and other
 727 * side effects.
 728 */
 729static irqreturn_t
 730irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
 731{
 732	irqreturn_t ret;
 733
 734	local_bh_disable();
 735	ret = action->thread_fn(action->irq, action->dev_id);
 736	irq_finalize_oneshot(desc, action, false);
 737	local_bh_enable();
 738	return ret;
 739}
 740
 741/*
 742 * Interrupts explicitely requested as threaded interupts want to be
 743 * preemtible - many of them need to sleep and wait for slow busses to
 744 * complete.
 745 */
 746static irqreturn_t irq_thread_fn(struct irq_desc *desc,
 747		struct irqaction *action)
 748{
 749	irqreturn_t ret;
 750
 751	ret = action->thread_fn(action->irq, action->dev_id);
 752	irq_finalize_oneshot(desc, action, false);
 753	return ret;
 754}
 755
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 756/*
 757 * Interrupt handler thread
 758 */
 759static int irq_thread(void *data)
 760{
 
 761	static const struct sched_param param = {
 762		.sched_priority = MAX_USER_RT_PRIO/2,
 763	};
 764	struct irqaction *action = data;
 765	struct irq_desc *desc = irq_to_desc(action->irq);
 766	irqreturn_t (*handler_fn)(struct irq_desc *desc,
 767			struct irqaction *action);
 768	int wake;
 769
 770	if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
 771					&action->thread_flags))
 772		handler_fn = irq_forced_thread_fn;
 773	else
 774		handler_fn = irq_thread_fn;
 775
 776	sched_setscheduler(current, SCHED_FIFO, &param);
 777	current->irqaction = action;
 
 
 778
 779	while (!irq_wait_for_interrupt(action)) {
 
 780
 781		irq_thread_check_affinity(desc, action);
 782
 783		atomic_inc(&desc->threads_active);
 784
 785		raw_spin_lock_irq(&desc->lock);
 786		if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
 787			/*
 788			 * CHECKME: We might need a dedicated
 789			 * IRQ_THREAD_PENDING flag here, which
 790			 * retriggers the thread in check_irq_resend()
 791			 * but AFAICT IRQS_PENDING should be fine as it
 792			 * retriggers the interrupt itself --- tglx
 793			 */
 794			desc->istate |= IRQS_PENDING;
 795			raw_spin_unlock_irq(&desc->lock);
 796		} else {
 797			irqreturn_t action_ret;
 798
 799			raw_spin_unlock_irq(&desc->lock);
 800			action_ret = handler_fn(desc, action);
 801			if (!noirqdebug)
 802				note_interrupt(action->irq, desc, action_ret);
 803		}
 804
 805		wake = atomic_dec_and_test(&desc->threads_active);
 806
 807		if (wake && waitqueue_active(&desc->wait_for_threads))
 808			wake_up(&desc->wait_for_threads);
 809	}
 810
 811	/* Prevent a stale desc->threads_oneshot */
 812	irq_finalize_oneshot(desc, action, true);
 813
 814	/*
 815	 * Clear irqaction. Otherwise exit_irq_thread() would make
 816	 * fuzz about an active irq thread going into nirvana.
 
 
 
 
 
 817	 */
 818	current->irqaction = NULL;
 819	return 0;
 820}
 821
 822/*
 823 * Called from do_exit()
 824 */
 825void exit_irq_thread(void)
 826{
 827	struct task_struct *tsk = current;
 828	struct irq_desc *desc;
 829
 830	if (!tsk->irqaction)
 831		return;
 832
 833	printk(KERN_ERR
 834	       "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
 835	       tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
 836
 837	desc = irq_to_desc(tsk->irqaction->irq);
 838
 839	/*
 840	 * Prevent a stale desc->threads_oneshot. Must be called
 841	 * before setting the IRQTF_DIED flag.
 842	 */
 843	irq_finalize_oneshot(desc, tsk->irqaction, true);
 844
 845	/*
 846	 * Set the THREAD DIED flag to prevent further wakeups of the
 847	 * soon to be gone threaded handler.
 848	 */
 849	set_bit(IRQTF_DIED, &tsk->irqaction->flags);
 850}
 851
 852static void irq_setup_forced_threading(struct irqaction *new)
 853{
 854	if (!force_irqthreads)
 855		return;
 856	if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
 857		return;
 858
 859	new->flags |= IRQF_ONESHOT;
 860
 861	if (!new->thread_fn) {
 862		set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
 863		new->thread_fn = new->handler;
 864		new->handler = irq_default_primary_handler;
 865	}
 866}
 867
 868/*
 869 * Internal function to register an irqaction - typically used to
 870 * allocate special interrupts that are part of the architecture.
 871 */
 872static int
 873__setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 874{
 875	struct irqaction *old, **old_ptr;
 876	const char *old_name = NULL;
 877	unsigned long flags, thread_mask = 0;
 878	int ret, nested, shared = 0;
 879	cpumask_var_t mask;
 880
 881	if (!desc)
 882		return -EINVAL;
 883
 884	if (desc->irq_data.chip == &no_irq_chip)
 885		return -ENOSYS;
 886	if (!try_module_get(desc->owner))
 887		return -ENODEV;
 888	/*
 889	 * Some drivers like serial.c use request_irq() heavily,
 890	 * so we have to be careful not to interfere with a
 891	 * running system.
 892	 */
 893	if (new->flags & IRQF_SAMPLE_RANDOM) {
 894		/*
 895		 * This function might sleep, we want to call it first,
 896		 * outside of the atomic block.
 897		 * Yes, this might clear the entropy pool if the wrong
 898		 * driver is attempted to be loaded, without actually
 899		 * installing a new handler, but is this really a problem,
 900		 * only the sysadmin is able to do this.
 901		 */
 902		rand_initialize_irq(irq);
 903	}
 904
 905	/*
 906	 * Check whether the interrupt nests into another interrupt
 907	 * thread.
 908	 */
 909	nested = irq_settings_is_nested_thread(desc);
 910	if (nested) {
 911		if (!new->thread_fn) {
 912			ret = -EINVAL;
 913			goto out_mput;
 914		}
 915		/*
 916		 * Replace the primary handler which was provided from
 917		 * the driver for non nested interrupt handling by the
 918		 * dummy function which warns when called.
 919		 */
 920		new->handler = irq_nested_primary_handler;
 921	} else {
 922		if (irq_settings_can_thread(desc))
 923			irq_setup_forced_threading(new);
 924	}
 925
 926	/*
 927	 * Create a handler thread when a thread function is supplied
 928	 * and the interrupt does not nest into another interrupt
 929	 * thread.
 930	 */
 931	if (new->thread_fn && !nested) {
 932		struct task_struct *t;
 933
 934		t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
 935				   new->name);
 936		if (IS_ERR(t)) {
 937			ret = PTR_ERR(t);
 938			goto out_mput;
 939		}
 940		/*
 941		 * We keep the reference to the task struct even if
 942		 * the thread dies to avoid that the interrupt code
 943		 * references an already freed task_struct.
 944		 */
 945		get_task_struct(t);
 946		new->thread = t;
 947	}
 948
 949	if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
 950		ret = -ENOMEM;
 951		goto out_thread;
 952	}
 953
 954	/*
 955	 * The following block of code has to be executed atomically
 956	 */
 957	raw_spin_lock_irqsave(&desc->lock, flags);
 958	old_ptr = &desc->action;
 959	old = *old_ptr;
 960	if (old) {
 961		/*
 962		 * Can't share interrupts unless both agree to and are
 963		 * the same type (level, edge, polarity). So both flag
 964		 * fields must have IRQF_SHARED set and the bits which
 965		 * set the trigger type must match. Also all must
 966		 * agree on ONESHOT.
 967		 */
 968		if (!((old->flags & new->flags) & IRQF_SHARED) ||
 969		    ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
 970		    ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
 971			old_name = old->name;
 972			goto mismatch;
 973		}
 974
 975		/* All handlers must agree on per-cpuness */
 976		if ((old->flags & IRQF_PERCPU) !=
 977		    (new->flags & IRQF_PERCPU))
 978			goto mismatch;
 979
 980		/* add new interrupt at end of irq queue */
 981		do {
 
 
 
 
 
 982			thread_mask |= old->thread_mask;
 983			old_ptr = &old->next;
 984			old = *old_ptr;
 985		} while (old);
 986		shared = 1;
 987	}
 988
 989	/*
 990	 * Setup the thread mask for this irqaction. Unlikely to have
 991	 * 32 resp 64 irqs sharing one line, but who knows.
 
 992	 */
 993	if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
 994		ret = -EBUSY;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 995		goto out_mask;
 996	}
 997	new->thread_mask = 1 << ffz(thread_mask);
 998
 999	if (!shared) {
1000		init_waitqueue_head(&desc->wait_for_threads);
1001
1002		/* Setup the type (level, edge polarity) if configured: */
1003		if (new->flags & IRQF_TRIGGER_MASK) {
1004			ret = __irq_set_trigger(desc, irq,
1005					new->flags & IRQF_TRIGGER_MASK);
1006
1007			if (ret)
1008				goto out_mask;
1009		}
1010
1011		desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1012				  IRQS_ONESHOT | IRQS_WAITING);
1013		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1014
1015		if (new->flags & IRQF_PERCPU) {
1016			irqd_set(&desc->irq_data, IRQD_PER_CPU);
1017			irq_settings_set_per_cpu(desc);
1018		}
1019
1020		if (new->flags & IRQF_ONESHOT)
1021			desc->istate |= IRQS_ONESHOT;
1022
1023		if (irq_settings_can_autoenable(desc))
1024			irq_startup(desc);
1025		else
1026			/* Undo nested disables: */
1027			desc->depth = 1;
1028
1029		/* Exclude IRQ from balancing if requested */
1030		if (new->flags & IRQF_NOBALANCING) {
1031			irq_settings_set_no_balancing(desc);
1032			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1033		}
1034
1035		/* Set default affinity mask once everything is setup */
1036		setup_affinity(irq, desc, mask);
1037
1038	} else if (new->flags & IRQF_TRIGGER_MASK) {
1039		unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1040		unsigned int omsk = irq_settings_get_trigger_mask(desc);
1041
1042		if (nmsk != omsk)
1043			/* hope the handler works with current  trigger mode */
1044			pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
1045				   irq, nmsk, omsk);
1046	}
1047
1048	new->irq = irq;
1049	*old_ptr = new;
1050
1051	/* Reset broken irq detection when installing new handler */
1052	desc->irq_count = 0;
1053	desc->irqs_unhandled = 0;
1054
1055	/*
1056	 * Check whether we disabled the irq via the spurious handler
1057	 * before. Reenable it and give it another chance.
1058	 */
1059	if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1060		desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1061		__enable_irq(desc, irq, false);
1062	}
1063
1064	raw_spin_unlock_irqrestore(&desc->lock, flags);
1065
1066	/*
1067	 * Strictly no need to wake it up, but hung_task complains
1068	 * when no hard interrupt wakes the thread up.
1069	 */
1070	if (new->thread)
1071		wake_up_process(new->thread);
1072
1073	register_irq_proc(irq, desc);
1074	new->dir = NULL;
1075	register_handler_proc(irq, new);
1076	free_cpumask_var(mask);
1077
1078	return 0;
1079
1080mismatch:
 
 
 
1081#ifdef CONFIG_DEBUG_SHIRQ
1082	if (!(new->flags & IRQF_PROBE_SHARED)) {
1083		printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
1084		if (old_name)
1085			printk(KERN_ERR "current handler: %s\n", old_name);
1086		dump_stack();
 
1087	}
1088#endif
1089	ret = -EBUSY;
1090
1091out_mask:
1092	raw_spin_unlock_irqrestore(&desc->lock, flags);
1093	free_cpumask_var(mask);
1094
1095out_thread:
1096	if (new->thread) {
1097		struct task_struct *t = new->thread;
1098
1099		new->thread = NULL;
1100		if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
1101			kthread_stop(t);
1102		put_task_struct(t);
1103	}
1104out_mput:
1105	module_put(desc->owner);
1106	return ret;
1107}
1108
1109/**
1110 *	setup_irq - setup an interrupt
1111 *	@irq: Interrupt line to setup
1112 *	@act: irqaction for the interrupt
1113 *
1114 * Used to statically setup interrupts in the early boot process.
1115 */
1116int setup_irq(unsigned int irq, struct irqaction *act)
1117{
1118	int retval;
1119	struct irq_desc *desc = irq_to_desc(irq);
1120
 
 
1121	chip_bus_lock(desc);
1122	retval = __setup_irq(irq, desc, act);
1123	chip_bus_sync_unlock(desc);
1124
1125	return retval;
1126}
1127EXPORT_SYMBOL_GPL(setup_irq);
1128
1129 /*
1130 * Internal function to unregister an irqaction - used to free
1131 * regular and special interrupts that are part of the architecture.
1132 */
1133static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1134{
1135	struct irq_desc *desc = irq_to_desc(irq);
1136	struct irqaction *action, **action_ptr;
1137	unsigned long flags;
1138
1139	WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1140
1141	if (!desc)
1142		return NULL;
1143
1144	raw_spin_lock_irqsave(&desc->lock, flags);
1145
1146	/*
1147	 * There can be multiple actions per IRQ descriptor, find the right
1148	 * one based on the dev_id:
1149	 */
1150	action_ptr = &desc->action;
1151	for (;;) {
1152		action = *action_ptr;
1153
1154		if (!action) {
1155			WARN(1, "Trying to free already-free IRQ %d\n", irq);
1156			raw_spin_unlock_irqrestore(&desc->lock, flags);
1157
1158			return NULL;
1159		}
1160
1161		if (action->dev_id == dev_id)
1162			break;
1163		action_ptr = &action->next;
1164	}
1165
1166	/* Found it - now remove it from the list of entries: */
1167	*action_ptr = action->next;
1168
1169	/* Currently used only by UML, might disappear one day: */
1170#ifdef CONFIG_IRQ_RELEASE_METHOD
1171	if (desc->irq_data.chip->release)
1172		desc->irq_data.chip->release(irq, dev_id);
1173#endif
1174
1175	/* If this was the last handler, shut down the IRQ line: */
1176	if (!desc->action)
1177		irq_shutdown(desc);
1178
1179#ifdef CONFIG_SMP
1180	/* make sure affinity_hint is cleaned up */
1181	if (WARN_ON_ONCE(desc->affinity_hint))
1182		desc->affinity_hint = NULL;
1183#endif
1184
1185	raw_spin_unlock_irqrestore(&desc->lock, flags);
1186
1187	unregister_handler_proc(irq, action);
1188
1189	/* Make sure it's not being used on another CPU: */
1190	synchronize_irq(irq);
1191
1192#ifdef CONFIG_DEBUG_SHIRQ
1193	/*
1194	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1195	 * event to happen even now it's being freed, so let's make sure that
1196	 * is so by doing an extra call to the handler ....
1197	 *
1198	 * ( We do this after actually deregistering it, to make sure that a
1199	 *   'real' IRQ doesn't run in * parallel with our fake. )
1200	 */
1201	if (action->flags & IRQF_SHARED) {
1202		local_irq_save(flags);
1203		action->handler(irq, dev_id);
1204		local_irq_restore(flags);
1205	}
1206#endif
1207
1208	if (action->thread) {
1209		if (!test_bit(IRQTF_DIED, &action->thread_flags))
1210			kthread_stop(action->thread);
1211		put_task_struct(action->thread);
1212	}
1213
1214	module_put(desc->owner);
1215	return action;
1216}
1217
1218/**
1219 *	remove_irq - free an interrupt
1220 *	@irq: Interrupt line to free
1221 *	@act: irqaction for the interrupt
1222 *
1223 * Used to remove interrupts statically setup by the early boot process.
1224 */
1225void remove_irq(unsigned int irq, struct irqaction *act)
1226{
1227	__free_irq(irq, act->dev_id);
 
 
 
1228}
1229EXPORT_SYMBOL_GPL(remove_irq);
1230
1231/**
1232 *	free_irq - free an interrupt allocated with request_irq
1233 *	@irq: Interrupt line to free
1234 *	@dev_id: Device identity to free
1235 *
1236 *	Remove an interrupt handler. The handler is removed and if the
1237 *	interrupt line is no longer in use by any driver it is disabled.
1238 *	On a shared IRQ the caller must ensure the interrupt is disabled
1239 *	on the card it drives before calling this function. The function
1240 *	does not return until any executing interrupts for this IRQ
1241 *	have completed.
1242 *
1243 *	This function must not be called from interrupt context.
1244 */
1245void free_irq(unsigned int irq, void *dev_id)
1246{
1247	struct irq_desc *desc = irq_to_desc(irq);
1248
1249	if (!desc)
1250		return;
1251
1252#ifdef CONFIG_SMP
1253	if (WARN_ON(desc->affinity_notify))
1254		desc->affinity_notify = NULL;
1255#endif
1256
1257	chip_bus_lock(desc);
1258	kfree(__free_irq(irq, dev_id));
1259	chip_bus_sync_unlock(desc);
1260}
1261EXPORT_SYMBOL(free_irq);
1262
1263/**
1264 *	request_threaded_irq - allocate an interrupt line
1265 *	@irq: Interrupt line to allocate
1266 *	@handler: Function to be called when the IRQ occurs.
1267 *		  Primary handler for threaded interrupts
1268 *		  If NULL and thread_fn != NULL the default
1269 *		  primary handler is installed
1270 *	@thread_fn: Function called from the irq handler thread
1271 *		    If NULL, no irq thread is created
1272 *	@irqflags: Interrupt type flags
1273 *	@devname: An ascii name for the claiming device
1274 *	@dev_id: A cookie passed back to the handler function
1275 *
1276 *	This call allocates interrupt resources and enables the
1277 *	interrupt line and IRQ handling. From the point this
1278 *	call is made your handler function may be invoked. Since
1279 *	your handler function must clear any interrupt the board
1280 *	raises, you must take care both to initialise your hardware
1281 *	and to set up the interrupt handler in the right order.
1282 *
1283 *	If you want to set up a threaded irq handler for your device
1284 *	then you need to supply @handler and @thread_fn. @handler ist
1285 *	still called in hard interrupt context and has to check
1286 *	whether the interrupt originates from the device. If yes it
1287 *	needs to disable the interrupt on the device and return
1288 *	IRQ_WAKE_THREAD which will wake up the handler thread and run
1289 *	@thread_fn. This split handler design is necessary to support
1290 *	shared interrupts.
1291 *
1292 *	Dev_id must be globally unique. Normally the address of the
1293 *	device data structure is used as the cookie. Since the handler
1294 *	receives this value it makes sense to use it.
1295 *
1296 *	If your interrupt is shared you must pass a non NULL dev_id
1297 *	as this is required when freeing the interrupt.
1298 *
1299 *	Flags:
1300 *
1301 *	IRQF_SHARED		Interrupt is shared
1302 *	IRQF_SAMPLE_RANDOM	The interrupt can be used for entropy
1303 *	IRQF_TRIGGER_*		Specify active edge(s) or level
1304 *
1305 */
1306int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1307			 irq_handler_t thread_fn, unsigned long irqflags,
1308			 const char *devname, void *dev_id)
1309{
1310	struct irqaction *action;
1311	struct irq_desc *desc;
1312	int retval;
1313
1314	/*
1315	 * Sanity-check: shared interrupts must pass in a real dev-ID,
1316	 * otherwise we'll have trouble later trying to figure out
1317	 * which interrupt is which (messes up the interrupt freeing
1318	 * logic etc).
1319	 */
1320	if ((irqflags & IRQF_SHARED) && !dev_id)
1321		return -EINVAL;
1322
1323	desc = irq_to_desc(irq);
1324	if (!desc)
1325		return -EINVAL;
1326
1327	if (!irq_settings_can_request(desc))
 
1328		return -EINVAL;
1329
1330	if (!handler) {
1331		if (!thread_fn)
1332			return -EINVAL;
1333		handler = irq_default_primary_handler;
1334	}
1335
1336	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1337	if (!action)
1338		return -ENOMEM;
1339
1340	action->handler = handler;
1341	action->thread_fn = thread_fn;
1342	action->flags = irqflags;
1343	action->name = devname;
1344	action->dev_id = dev_id;
1345
1346	chip_bus_lock(desc);
1347	retval = __setup_irq(irq, desc, action);
1348	chip_bus_sync_unlock(desc);
1349
1350	if (retval)
1351		kfree(action);
1352
1353#ifdef CONFIG_DEBUG_SHIRQ_FIXME
1354	if (!retval && (irqflags & IRQF_SHARED)) {
1355		/*
1356		 * It's a shared IRQ -- the driver ought to be prepared for it
1357		 * to happen immediately, so let's make sure....
1358		 * We disable the irq to make sure that a 'real' IRQ doesn't
1359		 * run in parallel with our fake.
1360		 */
1361		unsigned long flags;
1362
1363		disable_irq(irq);
1364		local_irq_save(flags);
1365
1366		handler(irq, dev_id);
1367
1368		local_irq_restore(flags);
1369		enable_irq(irq);
1370	}
1371#endif
1372	return retval;
1373}
1374EXPORT_SYMBOL(request_threaded_irq);
1375
1376/**
1377 *	request_any_context_irq - allocate an interrupt line
1378 *	@irq: Interrupt line to allocate
1379 *	@handler: Function to be called when the IRQ occurs.
1380 *		  Threaded handler for threaded interrupts.
1381 *	@flags: Interrupt type flags
1382 *	@name: An ascii name for the claiming device
1383 *	@dev_id: A cookie passed back to the handler function
1384 *
1385 *	This call allocates interrupt resources and enables the
1386 *	interrupt line and IRQ handling. It selects either a
1387 *	hardirq or threaded handling method depending on the
1388 *	context.
1389 *
1390 *	On failure, it returns a negative value. On success,
1391 *	it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1392 */
1393int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1394			    unsigned long flags, const char *name, void *dev_id)
1395{
1396	struct irq_desc *desc = irq_to_desc(irq);
1397	int ret;
1398
1399	if (!desc)
1400		return -EINVAL;
1401
1402	if (irq_settings_is_nested_thread(desc)) {
1403		ret = request_threaded_irq(irq, NULL, handler,
1404					   flags, name, dev_id);
1405		return !ret ? IRQC_IS_NESTED : ret;
1406	}
1407
1408	ret = request_irq(irq, handler, flags, name, dev_id);
1409	return !ret ? IRQC_IS_HARDIRQ : ret;
1410}
1411EXPORT_SYMBOL_GPL(request_any_context_irq);