Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
4 *
5 * This file contains the /proc/irq/ handling code.
6 */
7
8#include <linux/irq.h>
9#include <linux/gfp.h>
10#include <linux/proc_fs.h>
11#include <linux/seq_file.h>
12#include <linux/interrupt.h>
13#include <linux/kernel_stat.h>
14#include <linux/mutex.h>
15
16#include "internals.h"
17
18/*
19 * Access rules:
20 *
21 * procfs protects read/write of /proc/irq/N/ files against a
22 * concurrent free of the interrupt descriptor. remove_proc_entry()
23 * immediately prevents new read/writes to happen and waits for
24 * already running read/write functions to complete.
25 *
26 * We remove the proc entries first and then delete the interrupt
27 * descriptor from the radix tree and free it. So it is guaranteed
28 * that irq_to_desc(N) is valid as long as the read/writes are
29 * permitted by procfs.
30 *
31 * The read from /proc/interrupts is a different problem because there
32 * is no protection. So the lookup and the access to irqdesc
33 * information must be protected by sparse_irq_lock.
34 */
35static struct proc_dir_entry *root_irq_dir;
36
37#ifdef CONFIG_SMP
38
39enum {
40 AFFINITY,
41 AFFINITY_LIST,
42 EFFECTIVE,
43 EFFECTIVE_LIST,
44};
45
46static int show_irq_affinity(int type, struct seq_file *m)
47{
48 struct irq_desc *desc = irq_to_desc((long)m->private);
49 const struct cpumask *mask;
50
51 switch (type) {
52 case AFFINITY:
53 case AFFINITY_LIST:
54 mask = desc->irq_common_data.affinity;
55#ifdef CONFIG_GENERIC_PENDING_IRQ
56 if (irqd_is_setaffinity_pending(&desc->irq_data))
57 mask = desc->pending_mask;
58#endif
59 break;
60 case EFFECTIVE:
61 case EFFECTIVE_LIST:
62#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
63 mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
64 break;
65#endif
66 default:
67 return -EINVAL;
68 }
69
70 switch (type) {
71 case AFFINITY_LIST:
72 case EFFECTIVE_LIST:
73 seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
74 break;
75 case AFFINITY:
76 case EFFECTIVE:
77 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
78 break;
79 }
80 return 0;
81}
82
83static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
84{
85 struct irq_desc *desc = irq_to_desc((long)m->private);
86 unsigned long flags;
87 cpumask_var_t mask;
88
89 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
90 return -ENOMEM;
91
92 raw_spin_lock_irqsave(&desc->lock, flags);
93 if (desc->affinity_hint)
94 cpumask_copy(mask, desc->affinity_hint);
95 raw_spin_unlock_irqrestore(&desc->lock, flags);
96
97 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
98 free_cpumask_var(mask);
99
100 return 0;
101}
102
103int no_irq_affinity;
104static int irq_affinity_proc_show(struct seq_file *m, void *v)
105{
106 return show_irq_affinity(AFFINITY, m);
107}
108
109static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
110{
111 return show_irq_affinity(AFFINITY_LIST, m);
112}
113
114#ifndef CONFIG_AUTO_IRQ_AFFINITY
115static inline int irq_select_affinity_usr(unsigned int irq)
116{
117 /*
118 * If the interrupt is started up already then this fails. The
119 * interrupt is assigned to an online CPU already. There is no
120 * point to move it around randomly. Tell user space that the
121 * selected mask is bogus.
122 *
123 * If not then any change to the affinity is pointless because the
124 * startup code invokes irq_setup_affinity() which will select
125 * a online CPU anyway.
126 */
127 return -EINVAL;
128}
129#else
130/* ALPHA magic affinity auto selector. Keep it for historical reasons. */
131static inline int irq_select_affinity_usr(unsigned int irq)
132{
133 return irq_select_affinity(irq);
134}
135#endif
136
137static ssize_t write_irq_affinity(int type, struct file *file,
138 const char __user *buffer, size_t count, loff_t *pos)
139{
140 unsigned int irq = (int)(long)pde_data(file_inode(file));
141 cpumask_var_t new_value;
142 int err;
143
144 if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
145 return -EIO;
146
147 if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
148 return -ENOMEM;
149
150 if (type)
151 err = cpumask_parselist_user(buffer, count, new_value);
152 else
153 err = cpumask_parse_user(buffer, count, new_value);
154 if (err)
155 goto free_cpumask;
156
157 /*
158 * Do not allow disabling IRQs completely - it's a too easy
159 * way to make the system unusable accidentally :-) At least
160 * one online CPU still has to be targeted.
161 */
162 if (!cpumask_intersects(new_value, cpu_online_mask)) {
163 /*
164 * Special case for empty set - allow the architecture code
165 * to set default SMP affinity.
166 */
167 err = irq_select_affinity_usr(irq) ? -EINVAL : count;
168 } else {
169 err = irq_set_affinity(irq, new_value);
170 if (!err)
171 err = count;
172 }
173
174free_cpumask:
175 free_cpumask_var(new_value);
176 return err;
177}
178
179static ssize_t irq_affinity_proc_write(struct file *file,
180 const char __user *buffer, size_t count, loff_t *pos)
181{
182 return write_irq_affinity(0, file, buffer, count, pos);
183}
184
185static ssize_t irq_affinity_list_proc_write(struct file *file,
186 const char __user *buffer, size_t count, loff_t *pos)
187{
188 return write_irq_affinity(1, file, buffer, count, pos);
189}
190
191static int irq_affinity_proc_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, irq_affinity_proc_show, pde_data(inode));
194}
195
196static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
197{
198 return single_open(file, irq_affinity_list_proc_show, pde_data(inode));
199}
200
201static const struct proc_ops irq_affinity_proc_ops = {
202 .proc_open = irq_affinity_proc_open,
203 .proc_read = seq_read,
204 .proc_lseek = seq_lseek,
205 .proc_release = single_release,
206 .proc_write = irq_affinity_proc_write,
207};
208
209static const struct proc_ops irq_affinity_list_proc_ops = {
210 .proc_open = irq_affinity_list_proc_open,
211 .proc_read = seq_read,
212 .proc_lseek = seq_lseek,
213 .proc_release = single_release,
214 .proc_write = irq_affinity_list_proc_write,
215};
216
217#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
218static int irq_effective_aff_proc_show(struct seq_file *m, void *v)
219{
220 return show_irq_affinity(EFFECTIVE, m);
221}
222
223static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v)
224{
225 return show_irq_affinity(EFFECTIVE_LIST, m);
226}
227#endif
228
229static int default_affinity_show(struct seq_file *m, void *v)
230{
231 seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
232 return 0;
233}
234
235static ssize_t default_affinity_write(struct file *file,
236 const char __user *buffer, size_t count, loff_t *ppos)
237{
238 cpumask_var_t new_value;
239 int err;
240
241 if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
242 return -ENOMEM;
243
244 err = cpumask_parse_user(buffer, count, new_value);
245 if (err)
246 goto out;
247
248 /*
249 * Do not allow disabling IRQs completely - it's a too easy
250 * way to make the system unusable accidentally :-) At least
251 * one online CPU still has to be targeted.
252 */
253 if (!cpumask_intersects(new_value, cpu_online_mask)) {
254 err = -EINVAL;
255 goto out;
256 }
257
258 cpumask_copy(irq_default_affinity, new_value);
259 err = count;
260
261out:
262 free_cpumask_var(new_value);
263 return err;
264}
265
266static int default_affinity_open(struct inode *inode, struct file *file)
267{
268 return single_open(file, default_affinity_show, pde_data(inode));
269}
270
271static const struct proc_ops default_affinity_proc_ops = {
272 .proc_open = default_affinity_open,
273 .proc_read = seq_read,
274 .proc_lseek = seq_lseek,
275 .proc_release = single_release,
276 .proc_write = default_affinity_write,
277};
278
279static int irq_node_proc_show(struct seq_file *m, void *v)
280{
281 struct irq_desc *desc = irq_to_desc((long) m->private);
282
283 seq_printf(m, "%d\n", irq_desc_get_node(desc));
284 return 0;
285}
286#endif
287
288static int irq_spurious_proc_show(struct seq_file *m, void *v)
289{
290 struct irq_desc *desc = irq_to_desc((long) m->private);
291
292 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
293 desc->irq_count, desc->irqs_unhandled,
294 jiffies_to_msecs(desc->last_unhandled));
295 return 0;
296}
297
298#define MAX_NAMELEN 128
299
300static int name_unique(unsigned int irq, struct irqaction *new_action)
301{
302 struct irq_desc *desc = irq_to_desc(irq);
303 struct irqaction *action;
304 unsigned long flags;
305 int ret = 1;
306
307 raw_spin_lock_irqsave(&desc->lock, flags);
308 for_each_action_of_desc(desc, action) {
309 if ((action != new_action) && action->name &&
310 !strcmp(new_action->name, action->name)) {
311 ret = 0;
312 break;
313 }
314 }
315 raw_spin_unlock_irqrestore(&desc->lock, flags);
316 return ret;
317}
318
319void register_handler_proc(unsigned int irq, struct irqaction *action)
320{
321 char name [MAX_NAMELEN];
322 struct irq_desc *desc = irq_to_desc(irq);
323
324 if (!desc->dir || action->dir || !action->name ||
325 !name_unique(irq, action))
326 return;
327
328 snprintf(name, MAX_NAMELEN, "%s", action->name);
329
330 /* create /proc/irq/1234/handler/ */
331 action->dir = proc_mkdir(name, desc->dir);
332}
333
334#undef MAX_NAMELEN
335
336#define MAX_NAMELEN 10
337
338void register_irq_proc(unsigned int irq, struct irq_desc *desc)
339{
340 static DEFINE_MUTEX(register_lock);
341 void __maybe_unused *irqp = (void *)(unsigned long) irq;
342 char name [MAX_NAMELEN];
343
344 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
345 return;
346
347 /*
348 * irq directories are registered only when a handler is
349 * added, not when the descriptor is created, so multiple
350 * tasks might try to register at the same time.
351 */
352 mutex_lock(®ister_lock);
353
354 if (desc->dir)
355 goto out_unlock;
356
357 sprintf(name, "%d", irq);
358
359 /* create /proc/irq/1234 */
360 desc->dir = proc_mkdir(name, root_irq_dir);
361 if (!desc->dir)
362 goto out_unlock;
363
364#ifdef CONFIG_SMP
365 /* create /proc/irq/<irq>/smp_affinity */
366 proc_create_data("smp_affinity", 0644, desc->dir,
367 &irq_affinity_proc_ops, irqp);
368
369 /* create /proc/irq/<irq>/affinity_hint */
370 proc_create_single_data("affinity_hint", 0444, desc->dir,
371 irq_affinity_hint_proc_show, irqp);
372
373 /* create /proc/irq/<irq>/smp_affinity_list */
374 proc_create_data("smp_affinity_list", 0644, desc->dir,
375 &irq_affinity_list_proc_ops, irqp);
376
377 proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show,
378 irqp);
379# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
380 proc_create_single_data("effective_affinity", 0444, desc->dir,
381 irq_effective_aff_proc_show, irqp);
382 proc_create_single_data("effective_affinity_list", 0444, desc->dir,
383 irq_effective_aff_list_proc_show, irqp);
384# endif
385#endif
386 proc_create_single_data("spurious", 0444, desc->dir,
387 irq_spurious_proc_show, (void *)(long)irq);
388
389out_unlock:
390 mutex_unlock(®ister_lock);
391}
392
393void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
394{
395 char name [MAX_NAMELEN];
396
397 if (!root_irq_dir || !desc->dir)
398 return;
399#ifdef CONFIG_SMP
400 remove_proc_entry("smp_affinity", desc->dir);
401 remove_proc_entry("affinity_hint", desc->dir);
402 remove_proc_entry("smp_affinity_list", desc->dir);
403 remove_proc_entry("node", desc->dir);
404# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
405 remove_proc_entry("effective_affinity", desc->dir);
406 remove_proc_entry("effective_affinity_list", desc->dir);
407# endif
408#endif
409 remove_proc_entry("spurious", desc->dir);
410
411 sprintf(name, "%u", irq);
412 remove_proc_entry(name, root_irq_dir);
413}
414
415#undef MAX_NAMELEN
416
417void unregister_handler_proc(unsigned int irq, struct irqaction *action)
418{
419 proc_remove(action->dir);
420}
421
422static void register_default_affinity_proc(void)
423{
424#ifdef CONFIG_SMP
425 proc_create("irq/default_smp_affinity", 0644, NULL,
426 &default_affinity_proc_ops);
427#endif
428}
429
430void init_irq_proc(void)
431{
432 unsigned int irq;
433 struct irq_desc *desc;
434
435 /* create /proc/irq */
436 root_irq_dir = proc_mkdir("irq", NULL);
437 if (!root_irq_dir)
438 return;
439
440 register_default_affinity_proc();
441
442 /*
443 * Create entries for all existing IRQs.
444 */
445 for_each_irq_desc(irq, desc)
446 register_irq_proc(irq, desc);
447}
448
449#ifdef CONFIG_GENERIC_IRQ_SHOW
450
451int __weak arch_show_interrupts(struct seq_file *p, int prec)
452{
453 return 0;
454}
455
456#ifndef ACTUAL_NR_IRQS
457# define ACTUAL_NR_IRQS nr_irqs
458#endif
459
460int show_interrupts(struct seq_file *p, void *v)
461{
462 static int prec;
463
464 unsigned long flags, any_count = 0;
465 int i = *(loff_t *) v, j;
466 struct irqaction *action;
467 struct irq_desc *desc;
468
469 if (i > ACTUAL_NR_IRQS)
470 return 0;
471
472 if (i == ACTUAL_NR_IRQS)
473 return arch_show_interrupts(p, prec);
474
475 /* print header and calculate the width of the first column */
476 if (i == 0) {
477 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
478 j *= 10;
479
480 seq_printf(p, "%*s", prec + 8, "");
481 for_each_online_cpu(j)
482 seq_printf(p, "CPU%-8d", j);
483 seq_putc(p, '\n');
484 }
485
486 rcu_read_lock();
487 desc = irq_to_desc(i);
488 if (!desc || irq_settings_is_hidden(desc))
489 goto outsparse;
490
491 if (desc->kstat_irqs) {
492 for_each_online_cpu(j)
493 any_count |= data_race(*per_cpu_ptr(desc->kstat_irqs, j));
494 }
495
496 if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
497 goto outsparse;
498
499 seq_printf(p, "%*d: ", prec, i);
500 for_each_online_cpu(j)
501 seq_printf(p, "%10u ", desc->kstat_irqs ?
502 *per_cpu_ptr(desc->kstat_irqs, j) : 0);
503
504 raw_spin_lock_irqsave(&desc->lock, flags);
505 if (desc->irq_data.chip) {
506 if (desc->irq_data.chip->irq_print_chip)
507 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
508 else if (desc->irq_data.chip->name)
509 seq_printf(p, " %8s", desc->irq_data.chip->name);
510 else
511 seq_printf(p, " %8s", "-");
512 } else {
513 seq_printf(p, " %8s", "None");
514 }
515 if (desc->irq_data.domain)
516 seq_printf(p, " %*lu", prec, desc->irq_data.hwirq);
517 else
518 seq_printf(p, " %*s", prec, "");
519#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
520 seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
521#endif
522 if (desc->name)
523 seq_printf(p, "-%-8s", desc->name);
524
525 action = desc->action;
526 if (action) {
527 seq_printf(p, " %s", action->name);
528 while ((action = action->next) != NULL)
529 seq_printf(p, ", %s", action->name);
530 }
531
532 seq_putc(p, '\n');
533 raw_spin_unlock_irqrestore(&desc->lock, flags);
534outsparse:
535 rcu_read_unlock();
536 return 0;
537}
538#endif
1/*
2 * linux/kernel/irq/proc.c
3 *
4 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
5 *
6 * This file contains the /proc/irq/ handling code.
7 */
8
9#include <linux/irq.h>
10#include <linux/gfp.h>
11#include <linux/proc_fs.h>
12#include <linux/seq_file.h>
13#include <linux/interrupt.h>
14#include <linux/kernel_stat.h>
15#include <linux/mutex.h>
16
17#include "internals.h"
18
19/*
20 * Access rules:
21 *
22 * procfs protects read/write of /proc/irq/N/ files against a
23 * concurrent free of the interrupt descriptor. remove_proc_entry()
24 * immediately prevents new read/writes to happen and waits for
25 * already running read/write functions to complete.
26 *
27 * We remove the proc entries first and then delete the interrupt
28 * descriptor from the radix tree and free it. So it is guaranteed
29 * that irq_to_desc(N) is valid as long as the read/writes are
30 * permitted by procfs.
31 *
32 * The read from /proc/interrupts is a different problem because there
33 * is no protection. So the lookup and the access to irqdesc
34 * information must be protected by sparse_irq_lock.
35 */
36static struct proc_dir_entry *root_irq_dir;
37
38#ifdef CONFIG_SMP
39
40static int show_irq_affinity(int type, struct seq_file *m, void *v)
41{
42 struct irq_desc *desc = irq_to_desc((long)m->private);
43 const struct cpumask *mask = desc->irq_common_data.affinity;
44
45#ifdef CONFIG_GENERIC_PENDING_IRQ
46 if (irqd_is_setaffinity_pending(&desc->irq_data))
47 mask = desc->pending_mask;
48#endif
49 if (type)
50 seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
51 else
52 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
53 return 0;
54}
55
56static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
57{
58 struct irq_desc *desc = irq_to_desc((long)m->private);
59 unsigned long flags;
60 cpumask_var_t mask;
61
62 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
63 return -ENOMEM;
64
65 raw_spin_lock_irqsave(&desc->lock, flags);
66 if (desc->affinity_hint)
67 cpumask_copy(mask, desc->affinity_hint);
68 raw_spin_unlock_irqrestore(&desc->lock, flags);
69
70 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
71 free_cpumask_var(mask);
72
73 return 0;
74}
75
76#ifndef is_affinity_mask_valid
77#define is_affinity_mask_valid(val) 1
78#endif
79
80int no_irq_affinity;
81static int irq_affinity_proc_show(struct seq_file *m, void *v)
82{
83 return show_irq_affinity(0, m, v);
84}
85
86static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
87{
88 return show_irq_affinity(1, m, v);
89}
90
91
92static ssize_t write_irq_affinity(int type, struct file *file,
93 const char __user *buffer, size_t count, loff_t *pos)
94{
95 unsigned int irq = (int)(long)PDE_DATA(file_inode(file));
96 cpumask_var_t new_value;
97 int err;
98
99 if (!irq_can_set_affinity(irq) || no_irq_affinity)
100 return -EIO;
101
102 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
103 return -ENOMEM;
104
105 if (type)
106 err = cpumask_parselist_user(buffer, count, new_value);
107 else
108 err = cpumask_parse_user(buffer, count, new_value);
109 if (err)
110 goto free_cpumask;
111
112 if (!is_affinity_mask_valid(new_value)) {
113 err = -EINVAL;
114 goto free_cpumask;
115 }
116
117 /*
118 * Do not allow disabling IRQs completely - it's a too easy
119 * way to make the system unusable accidentally :-) At least
120 * one online CPU still has to be targeted.
121 */
122 if (!cpumask_intersects(new_value, cpu_online_mask)) {
123 /* Special case for empty set - allow the architecture
124 code to set default SMP affinity. */
125 err = irq_select_affinity_usr(irq, new_value) ? -EINVAL : count;
126 } else {
127 irq_set_affinity(irq, new_value);
128 err = count;
129 }
130
131free_cpumask:
132 free_cpumask_var(new_value);
133 return err;
134}
135
136static ssize_t irq_affinity_proc_write(struct file *file,
137 const char __user *buffer, size_t count, loff_t *pos)
138{
139 return write_irq_affinity(0, file, buffer, count, pos);
140}
141
142static ssize_t irq_affinity_list_proc_write(struct file *file,
143 const char __user *buffer, size_t count, loff_t *pos)
144{
145 return write_irq_affinity(1, file, buffer, count, pos);
146}
147
148static int irq_affinity_proc_open(struct inode *inode, struct file *file)
149{
150 return single_open(file, irq_affinity_proc_show, PDE_DATA(inode));
151}
152
153static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
154{
155 return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode));
156}
157
158static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
159{
160 return single_open(file, irq_affinity_hint_proc_show, PDE_DATA(inode));
161}
162
163static const struct file_operations irq_affinity_proc_fops = {
164 .open = irq_affinity_proc_open,
165 .read = seq_read,
166 .llseek = seq_lseek,
167 .release = single_release,
168 .write = irq_affinity_proc_write,
169};
170
171static const struct file_operations irq_affinity_hint_proc_fops = {
172 .open = irq_affinity_hint_proc_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
178static const struct file_operations irq_affinity_list_proc_fops = {
179 .open = irq_affinity_list_proc_open,
180 .read = seq_read,
181 .llseek = seq_lseek,
182 .release = single_release,
183 .write = irq_affinity_list_proc_write,
184};
185
186static int default_affinity_show(struct seq_file *m, void *v)
187{
188 seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
189 return 0;
190}
191
192static ssize_t default_affinity_write(struct file *file,
193 const char __user *buffer, size_t count, loff_t *ppos)
194{
195 cpumask_var_t new_value;
196 int err;
197
198 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
199 return -ENOMEM;
200
201 err = cpumask_parse_user(buffer, count, new_value);
202 if (err)
203 goto out;
204
205 if (!is_affinity_mask_valid(new_value)) {
206 err = -EINVAL;
207 goto out;
208 }
209
210 /*
211 * Do not allow disabling IRQs completely - it's a too easy
212 * way to make the system unusable accidentally :-) At least
213 * one online CPU still has to be targeted.
214 */
215 if (!cpumask_intersects(new_value, cpu_online_mask)) {
216 err = -EINVAL;
217 goto out;
218 }
219
220 cpumask_copy(irq_default_affinity, new_value);
221 err = count;
222
223out:
224 free_cpumask_var(new_value);
225 return err;
226}
227
228static int default_affinity_open(struct inode *inode, struct file *file)
229{
230 return single_open(file, default_affinity_show, PDE_DATA(inode));
231}
232
233static const struct file_operations default_affinity_proc_fops = {
234 .open = default_affinity_open,
235 .read = seq_read,
236 .llseek = seq_lseek,
237 .release = single_release,
238 .write = default_affinity_write,
239};
240
241static int irq_node_proc_show(struct seq_file *m, void *v)
242{
243 struct irq_desc *desc = irq_to_desc((long) m->private);
244
245 seq_printf(m, "%d\n", irq_desc_get_node(desc));
246 return 0;
247}
248
249static int irq_node_proc_open(struct inode *inode, struct file *file)
250{
251 return single_open(file, irq_node_proc_show, PDE_DATA(inode));
252}
253
254static const struct file_operations irq_node_proc_fops = {
255 .open = irq_node_proc_open,
256 .read = seq_read,
257 .llseek = seq_lseek,
258 .release = single_release,
259};
260#endif
261
262static int irq_spurious_proc_show(struct seq_file *m, void *v)
263{
264 struct irq_desc *desc = irq_to_desc((long) m->private);
265
266 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
267 desc->irq_count, desc->irqs_unhandled,
268 jiffies_to_msecs(desc->last_unhandled));
269 return 0;
270}
271
272static int irq_spurious_proc_open(struct inode *inode, struct file *file)
273{
274 return single_open(file, irq_spurious_proc_show, PDE_DATA(inode));
275}
276
277static const struct file_operations irq_spurious_proc_fops = {
278 .open = irq_spurious_proc_open,
279 .read = seq_read,
280 .llseek = seq_lseek,
281 .release = single_release,
282};
283
284#define MAX_NAMELEN 128
285
286static int name_unique(unsigned int irq, struct irqaction *new_action)
287{
288 struct irq_desc *desc = irq_to_desc(irq);
289 struct irqaction *action;
290 unsigned long flags;
291 int ret = 1;
292
293 raw_spin_lock_irqsave(&desc->lock, flags);
294 for_each_action_of_desc(desc, action) {
295 if ((action != new_action) && action->name &&
296 !strcmp(new_action->name, action->name)) {
297 ret = 0;
298 break;
299 }
300 }
301 raw_spin_unlock_irqrestore(&desc->lock, flags);
302 return ret;
303}
304
305void register_handler_proc(unsigned int irq, struct irqaction *action)
306{
307 char name [MAX_NAMELEN];
308 struct irq_desc *desc = irq_to_desc(irq);
309
310 if (!desc->dir || action->dir || !action->name ||
311 !name_unique(irq, action))
312 return;
313
314 memset(name, 0, MAX_NAMELEN);
315 snprintf(name, MAX_NAMELEN, "%s", action->name);
316
317 /* create /proc/irq/1234/handler/ */
318 action->dir = proc_mkdir(name, desc->dir);
319}
320
321#undef MAX_NAMELEN
322
323#define MAX_NAMELEN 10
324
325void register_irq_proc(unsigned int irq, struct irq_desc *desc)
326{
327 static DEFINE_MUTEX(register_lock);
328 char name [MAX_NAMELEN];
329
330 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
331 return;
332
333 /*
334 * irq directories are registered only when a handler is
335 * added, not when the descriptor is created, so multiple
336 * tasks might try to register at the same time.
337 */
338 mutex_lock(®ister_lock);
339
340 if (desc->dir)
341 goto out_unlock;
342
343 memset(name, 0, MAX_NAMELEN);
344 sprintf(name, "%d", irq);
345
346 /* create /proc/irq/1234 */
347 desc->dir = proc_mkdir(name, root_irq_dir);
348 if (!desc->dir)
349 goto out_unlock;
350
351#ifdef CONFIG_SMP
352 /* create /proc/irq/<irq>/smp_affinity */
353 proc_create_data("smp_affinity", 0644, desc->dir,
354 &irq_affinity_proc_fops, (void *)(long)irq);
355
356 /* create /proc/irq/<irq>/affinity_hint */
357 proc_create_data("affinity_hint", 0444, desc->dir,
358 &irq_affinity_hint_proc_fops, (void *)(long)irq);
359
360 /* create /proc/irq/<irq>/smp_affinity_list */
361 proc_create_data("smp_affinity_list", 0644, desc->dir,
362 &irq_affinity_list_proc_fops, (void *)(long)irq);
363
364 proc_create_data("node", 0444, desc->dir,
365 &irq_node_proc_fops, (void *)(long)irq);
366#endif
367
368 proc_create_data("spurious", 0444, desc->dir,
369 &irq_spurious_proc_fops, (void *)(long)irq);
370
371out_unlock:
372 mutex_unlock(®ister_lock);
373}
374
375void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
376{
377 char name [MAX_NAMELEN];
378
379 if (!root_irq_dir || !desc->dir)
380 return;
381#ifdef CONFIG_SMP
382 remove_proc_entry("smp_affinity", desc->dir);
383 remove_proc_entry("affinity_hint", desc->dir);
384 remove_proc_entry("smp_affinity_list", desc->dir);
385 remove_proc_entry("node", desc->dir);
386#endif
387 remove_proc_entry("spurious", desc->dir);
388
389 memset(name, 0, MAX_NAMELEN);
390 sprintf(name, "%u", irq);
391 remove_proc_entry(name, root_irq_dir);
392}
393
394#undef MAX_NAMELEN
395
396void unregister_handler_proc(unsigned int irq, struct irqaction *action)
397{
398 proc_remove(action->dir);
399}
400
401static void register_default_affinity_proc(void)
402{
403#ifdef CONFIG_SMP
404 proc_create("irq/default_smp_affinity", 0644, NULL,
405 &default_affinity_proc_fops);
406#endif
407}
408
409void init_irq_proc(void)
410{
411 unsigned int irq;
412 struct irq_desc *desc;
413
414 /* create /proc/irq */
415 root_irq_dir = proc_mkdir("irq", NULL);
416 if (!root_irq_dir)
417 return;
418
419 register_default_affinity_proc();
420
421 /*
422 * Create entries for all existing IRQs.
423 */
424 for_each_irq_desc(irq, desc) {
425 if (!desc)
426 continue;
427
428 register_irq_proc(irq, desc);
429 }
430}
431
432#ifdef CONFIG_GENERIC_IRQ_SHOW
433
434int __weak arch_show_interrupts(struct seq_file *p, int prec)
435{
436 return 0;
437}
438
439#ifndef ACTUAL_NR_IRQS
440# define ACTUAL_NR_IRQS nr_irqs
441#endif
442
443int show_interrupts(struct seq_file *p, void *v)
444{
445 static int prec;
446
447 unsigned long flags, any_count = 0;
448 int i = *(loff_t *) v, j;
449 struct irqaction *action;
450 struct irq_desc *desc;
451
452 if (i > ACTUAL_NR_IRQS)
453 return 0;
454
455 if (i == ACTUAL_NR_IRQS)
456 return arch_show_interrupts(p, prec);
457
458 /* print header and calculate the width of the first column */
459 if (i == 0) {
460 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
461 j *= 10;
462
463 seq_printf(p, "%*s", prec + 8, "");
464 for_each_online_cpu(j)
465 seq_printf(p, "CPU%-8d", j);
466 seq_putc(p, '\n');
467 }
468
469 irq_lock_sparse();
470 desc = irq_to_desc(i);
471 if (!desc)
472 goto outsparse;
473
474 raw_spin_lock_irqsave(&desc->lock, flags);
475 for_each_online_cpu(j)
476 any_count |= kstat_irqs_cpu(i, j);
477 action = desc->action;
478 if ((!action || irq_desc_is_chained(desc)) && !any_count)
479 goto out;
480
481 seq_printf(p, "%*d: ", prec, i);
482 for_each_online_cpu(j)
483 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
484
485 if (desc->irq_data.chip) {
486 if (desc->irq_data.chip->irq_print_chip)
487 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
488 else if (desc->irq_data.chip->name)
489 seq_printf(p, " %8s", desc->irq_data.chip->name);
490 else
491 seq_printf(p, " %8s", "-");
492 } else {
493 seq_printf(p, " %8s", "None");
494 }
495 if (desc->irq_data.domain)
496 seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq);
497#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
498 seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
499#endif
500 if (desc->name)
501 seq_printf(p, "-%-8s", desc->name);
502
503 if (action) {
504 seq_printf(p, " %s", action->name);
505 while ((action = action->next) != NULL)
506 seq_printf(p, ", %s", action->name);
507 }
508
509 seq_putc(p, '\n');
510out:
511 raw_spin_unlock_irqrestore(&desc->lock, flags);
512outsparse:
513 irq_unlock_sparse();
514 return 0;
515}
516#endif