Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
4 *
5 * This file contains the /proc/irq/ handling code.
6 */
7
8#include <linux/irq.h>
9#include <linux/gfp.h>
10#include <linux/proc_fs.h>
11#include <linux/seq_file.h>
12#include <linux/interrupt.h>
13#include <linux/kernel_stat.h>
14#include <linux/mutex.h>
15
16#include "internals.h"
17
18/*
19 * Access rules:
20 *
21 * procfs protects read/write of /proc/irq/N/ files against a
22 * concurrent free of the interrupt descriptor. remove_proc_entry()
23 * immediately prevents new read/writes to happen and waits for
24 * already running read/write functions to complete.
25 *
26 * We remove the proc entries first and then delete the interrupt
27 * descriptor from the radix tree and free it. So it is guaranteed
28 * that irq_to_desc(N) is valid as long as the read/writes are
29 * permitted by procfs.
30 *
31 * The read from /proc/interrupts is a different problem because there
32 * is no protection. So the lookup and the access to irqdesc
33 * information must be protected by sparse_irq_lock.
34 */
35static struct proc_dir_entry *root_irq_dir;
36
37#ifdef CONFIG_SMP
38
39enum {
40 AFFINITY,
41 AFFINITY_LIST,
42 EFFECTIVE,
43 EFFECTIVE_LIST,
44};
45
46static int show_irq_affinity(int type, struct seq_file *m)
47{
48 struct irq_desc *desc = irq_to_desc((long)m->private);
49 const struct cpumask *mask;
50
51 switch (type) {
52 case AFFINITY:
53 case AFFINITY_LIST:
54 mask = desc->irq_common_data.affinity;
55#ifdef CONFIG_GENERIC_PENDING_IRQ
56 if (irqd_is_setaffinity_pending(&desc->irq_data))
57 mask = desc->pending_mask;
58#endif
59 break;
60 case EFFECTIVE:
61 case EFFECTIVE_LIST:
62#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
63 mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
64 break;
65#endif
66 default:
67 return -EINVAL;
68 }
69
70 switch (type) {
71 case AFFINITY_LIST:
72 case EFFECTIVE_LIST:
73 seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
74 break;
75 case AFFINITY:
76 case EFFECTIVE:
77 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
78 break;
79 }
80 return 0;
81}
82
83static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
84{
85 struct irq_desc *desc = irq_to_desc((long)m->private);
86 unsigned long flags;
87 cpumask_var_t mask;
88
89 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
90 return -ENOMEM;
91
92 raw_spin_lock_irqsave(&desc->lock, flags);
93 if (desc->affinity_hint)
94 cpumask_copy(mask, desc->affinity_hint);
95 raw_spin_unlock_irqrestore(&desc->lock, flags);
96
97 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
98 free_cpumask_var(mask);
99
100 return 0;
101}
102
103int no_irq_affinity;
104static int irq_affinity_proc_show(struct seq_file *m, void *v)
105{
106 return show_irq_affinity(AFFINITY, m);
107}
108
109static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
110{
111 return show_irq_affinity(AFFINITY_LIST, m);
112}
113
114#ifndef CONFIG_AUTO_IRQ_AFFINITY
115static inline int irq_select_affinity_usr(unsigned int irq)
116{
117 /*
118 * If the interrupt is started up already then this fails. The
119 * interrupt is assigned to an online CPU already. There is no
120 * point to move it around randomly. Tell user space that the
121 * selected mask is bogus.
122 *
123 * If not then any change to the affinity is pointless because the
124 * startup code invokes irq_setup_affinity() which will select
125 * a online CPU anyway.
126 */
127 return -EINVAL;
128}
129#else
130/* ALPHA magic affinity auto selector. Keep it for historical reasons. */
131static inline int irq_select_affinity_usr(unsigned int irq)
132{
133 return irq_select_affinity(irq);
134}
135#endif
136
137static ssize_t write_irq_affinity(int type, struct file *file,
138 const char __user *buffer, size_t count, loff_t *pos)
139{
140 unsigned int irq = (int)(long)pde_data(file_inode(file));
141 cpumask_var_t new_value;
142 int err;
143
144 if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
145 return -EIO;
146
147 if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
148 return -ENOMEM;
149
150 if (type)
151 err = cpumask_parselist_user(buffer, count, new_value);
152 else
153 err = cpumask_parse_user(buffer, count, new_value);
154 if (err)
155 goto free_cpumask;
156
157 /*
158 * Do not allow disabling IRQs completely - it's a too easy
159 * way to make the system unusable accidentally :-) At least
160 * one online CPU still has to be targeted.
161 */
162 if (!cpumask_intersects(new_value, cpu_online_mask)) {
163 /*
164 * Special case for empty set - allow the architecture code
165 * to set default SMP affinity.
166 */
167 err = irq_select_affinity_usr(irq) ? -EINVAL : count;
168 } else {
169 err = irq_set_affinity(irq, new_value);
170 if (!err)
171 err = count;
172 }
173
174free_cpumask:
175 free_cpumask_var(new_value);
176 return err;
177}
178
179static ssize_t irq_affinity_proc_write(struct file *file,
180 const char __user *buffer, size_t count, loff_t *pos)
181{
182 return write_irq_affinity(0, file, buffer, count, pos);
183}
184
185static ssize_t irq_affinity_list_proc_write(struct file *file,
186 const char __user *buffer, size_t count, loff_t *pos)
187{
188 return write_irq_affinity(1, file, buffer, count, pos);
189}
190
191static int irq_affinity_proc_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, irq_affinity_proc_show, pde_data(inode));
194}
195
196static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
197{
198 return single_open(file, irq_affinity_list_proc_show, pde_data(inode));
199}
200
201static const struct proc_ops irq_affinity_proc_ops = {
202 .proc_open = irq_affinity_proc_open,
203 .proc_read = seq_read,
204 .proc_lseek = seq_lseek,
205 .proc_release = single_release,
206 .proc_write = irq_affinity_proc_write,
207};
208
209static const struct proc_ops irq_affinity_list_proc_ops = {
210 .proc_open = irq_affinity_list_proc_open,
211 .proc_read = seq_read,
212 .proc_lseek = seq_lseek,
213 .proc_release = single_release,
214 .proc_write = irq_affinity_list_proc_write,
215};
216
217#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
218static int irq_effective_aff_proc_show(struct seq_file *m, void *v)
219{
220 return show_irq_affinity(EFFECTIVE, m);
221}
222
223static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v)
224{
225 return show_irq_affinity(EFFECTIVE_LIST, m);
226}
227#endif
228
229static int default_affinity_show(struct seq_file *m, void *v)
230{
231 seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
232 return 0;
233}
234
235static ssize_t default_affinity_write(struct file *file,
236 const char __user *buffer, size_t count, loff_t *ppos)
237{
238 cpumask_var_t new_value;
239 int err;
240
241 if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
242 return -ENOMEM;
243
244 err = cpumask_parse_user(buffer, count, new_value);
245 if (err)
246 goto out;
247
248 /*
249 * Do not allow disabling IRQs completely - it's a too easy
250 * way to make the system unusable accidentally :-) At least
251 * one online CPU still has to be targeted.
252 */
253 if (!cpumask_intersects(new_value, cpu_online_mask)) {
254 err = -EINVAL;
255 goto out;
256 }
257
258 cpumask_copy(irq_default_affinity, new_value);
259 err = count;
260
261out:
262 free_cpumask_var(new_value);
263 return err;
264}
265
266static int default_affinity_open(struct inode *inode, struct file *file)
267{
268 return single_open(file, default_affinity_show, pde_data(inode));
269}
270
271static const struct proc_ops default_affinity_proc_ops = {
272 .proc_open = default_affinity_open,
273 .proc_read = seq_read,
274 .proc_lseek = seq_lseek,
275 .proc_release = single_release,
276 .proc_write = default_affinity_write,
277};
278
279static int irq_node_proc_show(struct seq_file *m, void *v)
280{
281 struct irq_desc *desc = irq_to_desc((long) m->private);
282
283 seq_printf(m, "%d\n", irq_desc_get_node(desc));
284 return 0;
285}
286#endif
287
288static int irq_spurious_proc_show(struct seq_file *m, void *v)
289{
290 struct irq_desc *desc = irq_to_desc((long) m->private);
291
292 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
293 desc->irq_count, desc->irqs_unhandled,
294 jiffies_to_msecs(desc->last_unhandled));
295 return 0;
296}
297
298#define MAX_NAMELEN 128
299
300static int name_unique(unsigned int irq, struct irqaction *new_action)
301{
302 struct irq_desc *desc = irq_to_desc(irq);
303 struct irqaction *action;
304 unsigned long flags;
305 int ret = 1;
306
307 raw_spin_lock_irqsave(&desc->lock, flags);
308 for_each_action_of_desc(desc, action) {
309 if ((action != new_action) && action->name &&
310 !strcmp(new_action->name, action->name)) {
311 ret = 0;
312 break;
313 }
314 }
315 raw_spin_unlock_irqrestore(&desc->lock, flags);
316 return ret;
317}
318
319void register_handler_proc(unsigned int irq, struct irqaction *action)
320{
321 char name [MAX_NAMELEN];
322 struct irq_desc *desc = irq_to_desc(irq);
323
324 if (!desc->dir || action->dir || !action->name ||
325 !name_unique(irq, action))
326 return;
327
328 snprintf(name, MAX_NAMELEN, "%s", action->name);
329
330 /* create /proc/irq/1234/handler/ */
331 action->dir = proc_mkdir(name, desc->dir);
332}
333
334#undef MAX_NAMELEN
335
336#define MAX_NAMELEN 10
337
338void register_irq_proc(unsigned int irq, struct irq_desc *desc)
339{
340 static DEFINE_MUTEX(register_lock);
341 void __maybe_unused *irqp = (void *)(unsigned long) irq;
342 char name [MAX_NAMELEN];
343
344 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
345 return;
346
347 /*
348 * irq directories are registered only when a handler is
349 * added, not when the descriptor is created, so multiple
350 * tasks might try to register at the same time.
351 */
352 mutex_lock(®ister_lock);
353
354 if (desc->dir)
355 goto out_unlock;
356
357 sprintf(name, "%d", irq);
358
359 /* create /proc/irq/1234 */
360 desc->dir = proc_mkdir(name, root_irq_dir);
361 if (!desc->dir)
362 goto out_unlock;
363
364#ifdef CONFIG_SMP
365 /* create /proc/irq/<irq>/smp_affinity */
366 proc_create_data("smp_affinity", 0644, desc->dir,
367 &irq_affinity_proc_ops, irqp);
368
369 /* create /proc/irq/<irq>/affinity_hint */
370 proc_create_single_data("affinity_hint", 0444, desc->dir,
371 irq_affinity_hint_proc_show, irqp);
372
373 /* create /proc/irq/<irq>/smp_affinity_list */
374 proc_create_data("smp_affinity_list", 0644, desc->dir,
375 &irq_affinity_list_proc_ops, irqp);
376
377 proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show,
378 irqp);
379# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
380 proc_create_single_data("effective_affinity", 0444, desc->dir,
381 irq_effective_aff_proc_show, irqp);
382 proc_create_single_data("effective_affinity_list", 0444, desc->dir,
383 irq_effective_aff_list_proc_show, irqp);
384# endif
385#endif
386 proc_create_single_data("spurious", 0444, desc->dir,
387 irq_spurious_proc_show, (void *)(long)irq);
388
389out_unlock:
390 mutex_unlock(®ister_lock);
391}
392
393void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
394{
395 char name [MAX_NAMELEN];
396
397 if (!root_irq_dir || !desc->dir)
398 return;
399#ifdef CONFIG_SMP
400 remove_proc_entry("smp_affinity", desc->dir);
401 remove_proc_entry("affinity_hint", desc->dir);
402 remove_proc_entry("smp_affinity_list", desc->dir);
403 remove_proc_entry("node", desc->dir);
404# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
405 remove_proc_entry("effective_affinity", desc->dir);
406 remove_proc_entry("effective_affinity_list", desc->dir);
407# endif
408#endif
409 remove_proc_entry("spurious", desc->dir);
410
411 sprintf(name, "%u", irq);
412 remove_proc_entry(name, root_irq_dir);
413}
414
415#undef MAX_NAMELEN
416
417void unregister_handler_proc(unsigned int irq, struct irqaction *action)
418{
419 proc_remove(action->dir);
420}
421
422static void register_default_affinity_proc(void)
423{
424#ifdef CONFIG_SMP
425 proc_create("irq/default_smp_affinity", 0644, NULL,
426 &default_affinity_proc_ops);
427#endif
428}
429
430void init_irq_proc(void)
431{
432 unsigned int irq;
433 struct irq_desc *desc;
434
435 /* create /proc/irq */
436 root_irq_dir = proc_mkdir("irq", NULL);
437 if (!root_irq_dir)
438 return;
439
440 register_default_affinity_proc();
441
442 /*
443 * Create entries for all existing IRQs.
444 */
445 for_each_irq_desc(irq, desc)
446 register_irq_proc(irq, desc);
447}
448
449#ifdef CONFIG_GENERIC_IRQ_SHOW
450
451int __weak arch_show_interrupts(struct seq_file *p, int prec)
452{
453 return 0;
454}
455
456#ifndef ACTUAL_NR_IRQS
457# define ACTUAL_NR_IRQS nr_irqs
458#endif
459
460int show_interrupts(struct seq_file *p, void *v)
461{
462 static int prec;
463
464 unsigned long flags, any_count = 0;
465 int i = *(loff_t *) v, j;
466 struct irqaction *action;
467 struct irq_desc *desc;
468
469 if (i > ACTUAL_NR_IRQS)
470 return 0;
471
472 if (i == ACTUAL_NR_IRQS)
473 return arch_show_interrupts(p, prec);
474
475 /* print header and calculate the width of the first column */
476 if (i == 0) {
477 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
478 j *= 10;
479
480 seq_printf(p, "%*s", prec + 8, "");
481 for_each_online_cpu(j)
482 seq_printf(p, "CPU%-8d", j);
483 seq_putc(p, '\n');
484 }
485
486 rcu_read_lock();
487 desc = irq_to_desc(i);
488 if (!desc || irq_settings_is_hidden(desc))
489 goto outsparse;
490
491 if (desc->kstat_irqs) {
492 for_each_online_cpu(j)
493 any_count |= data_race(*per_cpu_ptr(desc->kstat_irqs, j));
494 }
495
496 if ((!desc->action || irq_desc_is_chained(desc)) && !any_count)
497 goto outsparse;
498
499 seq_printf(p, "%*d: ", prec, i);
500 for_each_online_cpu(j)
501 seq_printf(p, "%10u ", desc->kstat_irqs ?
502 *per_cpu_ptr(desc->kstat_irqs, j) : 0);
503
504 raw_spin_lock_irqsave(&desc->lock, flags);
505 if (desc->irq_data.chip) {
506 if (desc->irq_data.chip->irq_print_chip)
507 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
508 else if (desc->irq_data.chip->name)
509 seq_printf(p, " %8s", desc->irq_data.chip->name);
510 else
511 seq_printf(p, " %8s", "-");
512 } else {
513 seq_printf(p, " %8s", "None");
514 }
515 if (desc->irq_data.domain)
516 seq_printf(p, " %*lu", prec, desc->irq_data.hwirq);
517 else
518 seq_printf(p, " %*s", prec, "");
519#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
520 seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
521#endif
522 if (desc->name)
523 seq_printf(p, "-%-8s", desc->name);
524
525 action = desc->action;
526 if (action) {
527 seq_printf(p, " %s", action->name);
528 while ((action = action->next) != NULL)
529 seq_printf(p, ", %s", action->name);
530 }
531
532 seq_putc(p, '\n');
533 raw_spin_unlock_irqrestore(&desc->lock, flags);
534outsparse:
535 rcu_read_unlock();
536 return 0;
537}
538#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
4 *
5 * This file contains the /proc/irq/ handling code.
6 */
7
8#include <linux/irq.h>
9#include <linux/gfp.h>
10#include <linux/proc_fs.h>
11#include <linux/seq_file.h>
12#include <linux/interrupt.h>
13#include <linux/kernel_stat.h>
14#include <linux/mutex.h>
15
16#include "internals.h"
17
18/*
19 * Access rules:
20 *
21 * procfs protects read/write of /proc/irq/N/ files against a
22 * concurrent free of the interrupt descriptor. remove_proc_entry()
23 * immediately prevents new read/writes to happen and waits for
24 * already running read/write functions to complete.
25 *
26 * We remove the proc entries first and then delete the interrupt
27 * descriptor from the radix tree and free it. So it is guaranteed
28 * that irq_to_desc(N) is valid as long as the read/writes are
29 * permitted by procfs.
30 *
31 * The read from /proc/interrupts is a different problem because there
32 * is no protection. So the lookup and the access to irqdesc
33 * information must be protected by sparse_irq_lock.
34 */
35static struct proc_dir_entry *root_irq_dir;
36
37#ifdef CONFIG_SMP
38
39enum {
40 AFFINITY,
41 AFFINITY_LIST,
42 EFFECTIVE,
43 EFFECTIVE_LIST,
44};
45
46static int show_irq_affinity(int type, struct seq_file *m)
47{
48 struct irq_desc *desc = irq_to_desc((long)m->private);
49 const struct cpumask *mask;
50
51 switch (type) {
52 case AFFINITY:
53 case AFFINITY_LIST:
54 mask = desc->irq_common_data.affinity;
55#ifdef CONFIG_GENERIC_PENDING_IRQ
56 if (irqd_is_setaffinity_pending(&desc->irq_data))
57 mask = desc->pending_mask;
58#endif
59 break;
60 case EFFECTIVE:
61 case EFFECTIVE_LIST:
62#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
63 mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
64 break;
65#endif
66 default:
67 return -EINVAL;
68 }
69
70 switch (type) {
71 case AFFINITY_LIST:
72 case EFFECTIVE_LIST:
73 seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
74 break;
75 case AFFINITY:
76 case EFFECTIVE:
77 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
78 break;
79 }
80 return 0;
81}
82
83static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
84{
85 struct irq_desc *desc = irq_to_desc((long)m->private);
86 unsigned long flags;
87 cpumask_var_t mask;
88
89 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
90 return -ENOMEM;
91
92 raw_spin_lock_irqsave(&desc->lock, flags);
93 if (desc->affinity_hint)
94 cpumask_copy(mask, desc->affinity_hint);
95 raw_spin_unlock_irqrestore(&desc->lock, flags);
96
97 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
98 free_cpumask_var(mask);
99
100 return 0;
101}
102
103#ifndef is_affinity_mask_valid
104#define is_affinity_mask_valid(val) 1
105#endif
106
107int no_irq_affinity;
108static int irq_affinity_proc_show(struct seq_file *m, void *v)
109{
110 return show_irq_affinity(AFFINITY, m);
111}
112
113static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
114{
115 return show_irq_affinity(AFFINITY_LIST, m);
116}
117
118
119static ssize_t write_irq_affinity(int type, struct file *file,
120 const char __user *buffer, size_t count, loff_t *pos)
121{
122 unsigned int irq = (int)(long)PDE_DATA(file_inode(file));
123 cpumask_var_t new_value;
124 int err;
125
126 if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
127 return -EIO;
128
129 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
130 return -ENOMEM;
131
132 if (type)
133 err = cpumask_parselist_user(buffer, count, new_value);
134 else
135 err = cpumask_parse_user(buffer, count, new_value);
136 if (err)
137 goto free_cpumask;
138
139 if (!is_affinity_mask_valid(new_value)) {
140 err = -EINVAL;
141 goto free_cpumask;
142 }
143
144 /*
145 * Do not allow disabling IRQs completely - it's a too easy
146 * way to make the system unusable accidentally :-) At least
147 * one online CPU still has to be targeted.
148 */
149 if (!cpumask_intersects(new_value, cpu_online_mask)) {
150 /*
151 * Special case for empty set - allow the architecture code
152 * to set default SMP affinity.
153 */
154 err = irq_select_affinity_usr(irq) ? -EINVAL : count;
155 } else {
156 err = irq_set_affinity(irq, new_value);
157 if (!err)
158 err = count;
159 }
160
161free_cpumask:
162 free_cpumask_var(new_value);
163 return err;
164}
165
166static ssize_t irq_affinity_proc_write(struct file *file,
167 const char __user *buffer, size_t count, loff_t *pos)
168{
169 return write_irq_affinity(0, file, buffer, count, pos);
170}
171
172static ssize_t irq_affinity_list_proc_write(struct file *file,
173 const char __user *buffer, size_t count, loff_t *pos)
174{
175 return write_irq_affinity(1, file, buffer, count, pos);
176}
177
178static int irq_affinity_proc_open(struct inode *inode, struct file *file)
179{
180 return single_open(file, irq_affinity_proc_show, PDE_DATA(inode));
181}
182
183static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
184{
185 return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode));
186}
187
188static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
189{
190 return single_open(file, irq_affinity_hint_proc_show, PDE_DATA(inode));
191}
192
193static const struct file_operations irq_affinity_proc_fops = {
194 .open = irq_affinity_proc_open,
195 .read = seq_read,
196 .llseek = seq_lseek,
197 .release = single_release,
198 .write = irq_affinity_proc_write,
199};
200
201static const struct file_operations irq_affinity_hint_proc_fops = {
202 .open = irq_affinity_hint_proc_open,
203 .read = seq_read,
204 .llseek = seq_lseek,
205 .release = single_release,
206};
207
208static const struct file_operations irq_affinity_list_proc_fops = {
209 .open = irq_affinity_list_proc_open,
210 .read = seq_read,
211 .llseek = seq_lseek,
212 .release = single_release,
213 .write = irq_affinity_list_proc_write,
214};
215
216#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
217static int irq_effective_aff_proc_show(struct seq_file *m, void *v)
218{
219 return show_irq_affinity(EFFECTIVE, m);
220}
221
222static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v)
223{
224 return show_irq_affinity(EFFECTIVE_LIST, m);
225}
226
227static int irq_effective_aff_proc_open(struct inode *inode, struct file *file)
228{
229 return single_open(file, irq_effective_aff_proc_show, PDE_DATA(inode));
230}
231
232static int irq_effective_aff_list_proc_open(struct inode *inode,
233 struct file *file)
234{
235 return single_open(file, irq_effective_aff_list_proc_show,
236 PDE_DATA(inode));
237}
238
239static const struct file_operations irq_effective_aff_proc_fops = {
240 .open = irq_effective_aff_proc_open,
241 .read = seq_read,
242 .llseek = seq_lseek,
243 .release = single_release,
244};
245
246static const struct file_operations irq_effective_aff_list_proc_fops = {
247 .open = irq_effective_aff_list_proc_open,
248 .read = seq_read,
249 .llseek = seq_lseek,
250 .release = single_release,
251};
252#endif
253
254static int default_affinity_show(struct seq_file *m, void *v)
255{
256 seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
257 return 0;
258}
259
260static ssize_t default_affinity_write(struct file *file,
261 const char __user *buffer, size_t count, loff_t *ppos)
262{
263 cpumask_var_t new_value;
264 int err;
265
266 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
267 return -ENOMEM;
268
269 err = cpumask_parse_user(buffer, count, new_value);
270 if (err)
271 goto out;
272
273 if (!is_affinity_mask_valid(new_value)) {
274 err = -EINVAL;
275 goto out;
276 }
277
278 /*
279 * Do not allow disabling IRQs completely - it's a too easy
280 * way to make the system unusable accidentally :-) At least
281 * one online CPU still has to be targeted.
282 */
283 if (!cpumask_intersects(new_value, cpu_online_mask)) {
284 err = -EINVAL;
285 goto out;
286 }
287
288 cpumask_copy(irq_default_affinity, new_value);
289 err = count;
290
291out:
292 free_cpumask_var(new_value);
293 return err;
294}
295
296static int default_affinity_open(struct inode *inode, struct file *file)
297{
298 return single_open(file, default_affinity_show, PDE_DATA(inode));
299}
300
301static const struct file_operations default_affinity_proc_fops = {
302 .open = default_affinity_open,
303 .read = seq_read,
304 .llseek = seq_lseek,
305 .release = single_release,
306 .write = default_affinity_write,
307};
308
309static int irq_node_proc_show(struct seq_file *m, void *v)
310{
311 struct irq_desc *desc = irq_to_desc((long) m->private);
312
313 seq_printf(m, "%d\n", irq_desc_get_node(desc));
314 return 0;
315}
316
317static int irq_node_proc_open(struct inode *inode, struct file *file)
318{
319 return single_open(file, irq_node_proc_show, PDE_DATA(inode));
320}
321
322static const struct file_operations irq_node_proc_fops = {
323 .open = irq_node_proc_open,
324 .read = seq_read,
325 .llseek = seq_lseek,
326 .release = single_release,
327};
328#endif
329
330static int irq_spurious_proc_show(struct seq_file *m, void *v)
331{
332 struct irq_desc *desc = irq_to_desc((long) m->private);
333
334 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
335 desc->irq_count, desc->irqs_unhandled,
336 jiffies_to_msecs(desc->last_unhandled));
337 return 0;
338}
339
340static int irq_spurious_proc_open(struct inode *inode, struct file *file)
341{
342 return single_open(file, irq_spurious_proc_show, PDE_DATA(inode));
343}
344
345static const struct file_operations irq_spurious_proc_fops = {
346 .open = irq_spurious_proc_open,
347 .read = seq_read,
348 .llseek = seq_lseek,
349 .release = single_release,
350};
351
352#define MAX_NAMELEN 128
353
354static int name_unique(unsigned int irq, struct irqaction *new_action)
355{
356 struct irq_desc *desc = irq_to_desc(irq);
357 struct irqaction *action;
358 unsigned long flags;
359 int ret = 1;
360
361 raw_spin_lock_irqsave(&desc->lock, flags);
362 for_each_action_of_desc(desc, action) {
363 if ((action != new_action) && action->name &&
364 !strcmp(new_action->name, action->name)) {
365 ret = 0;
366 break;
367 }
368 }
369 raw_spin_unlock_irqrestore(&desc->lock, flags);
370 return ret;
371}
372
373void register_handler_proc(unsigned int irq, struct irqaction *action)
374{
375 char name [MAX_NAMELEN];
376 struct irq_desc *desc = irq_to_desc(irq);
377
378 if (!desc->dir || action->dir || !action->name ||
379 !name_unique(irq, action))
380 return;
381
382 snprintf(name, MAX_NAMELEN, "%s", action->name);
383
384 /* create /proc/irq/1234/handler/ */
385 action->dir = proc_mkdir(name, desc->dir);
386}
387
388#undef MAX_NAMELEN
389
390#define MAX_NAMELEN 10
391
392void register_irq_proc(unsigned int irq, struct irq_desc *desc)
393{
394 static DEFINE_MUTEX(register_lock);
395 void __maybe_unused *irqp = (void *)(unsigned long) irq;
396 char name [MAX_NAMELEN];
397
398 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
399 return;
400
401 /*
402 * irq directories are registered only when a handler is
403 * added, not when the descriptor is created, so multiple
404 * tasks might try to register at the same time.
405 */
406 mutex_lock(®ister_lock);
407
408 if (desc->dir)
409 goto out_unlock;
410
411 sprintf(name, "%d", irq);
412
413 /* create /proc/irq/1234 */
414 desc->dir = proc_mkdir(name, root_irq_dir);
415 if (!desc->dir)
416 goto out_unlock;
417
418#ifdef CONFIG_SMP
419 /* create /proc/irq/<irq>/smp_affinity */
420 proc_create_data("smp_affinity", 0644, desc->dir,
421 &irq_affinity_proc_fops, irqp);
422
423 /* create /proc/irq/<irq>/affinity_hint */
424 proc_create_data("affinity_hint", 0444, desc->dir,
425 &irq_affinity_hint_proc_fops, irqp);
426
427 /* create /proc/irq/<irq>/smp_affinity_list */
428 proc_create_data("smp_affinity_list", 0644, desc->dir,
429 &irq_affinity_list_proc_fops, irqp);
430
431 proc_create_data("node", 0444, desc->dir,
432 &irq_node_proc_fops, irqp);
433# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
434 proc_create_data("effective_affinity", 0444, desc->dir,
435 &irq_effective_aff_proc_fops, irqp);
436 proc_create_data("effective_affinity_list", 0444, desc->dir,
437 &irq_effective_aff_list_proc_fops, irqp);
438# endif
439#endif
440 proc_create_data("spurious", 0444, desc->dir,
441 &irq_spurious_proc_fops, (void *)(long)irq);
442
443out_unlock:
444 mutex_unlock(®ister_lock);
445}
446
447void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
448{
449 char name [MAX_NAMELEN];
450
451 if (!root_irq_dir || !desc->dir)
452 return;
453#ifdef CONFIG_SMP
454 remove_proc_entry("smp_affinity", desc->dir);
455 remove_proc_entry("affinity_hint", desc->dir);
456 remove_proc_entry("smp_affinity_list", desc->dir);
457 remove_proc_entry("node", desc->dir);
458# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
459 remove_proc_entry("effective_affinity", desc->dir);
460 remove_proc_entry("effective_affinity_list", desc->dir);
461# endif
462#endif
463 remove_proc_entry("spurious", desc->dir);
464
465 sprintf(name, "%u", irq);
466 remove_proc_entry(name, root_irq_dir);
467}
468
469#undef MAX_NAMELEN
470
471void unregister_handler_proc(unsigned int irq, struct irqaction *action)
472{
473 proc_remove(action->dir);
474}
475
476static void register_default_affinity_proc(void)
477{
478#ifdef CONFIG_SMP
479 proc_create("irq/default_smp_affinity", 0644, NULL,
480 &default_affinity_proc_fops);
481#endif
482}
483
484void init_irq_proc(void)
485{
486 unsigned int irq;
487 struct irq_desc *desc;
488
489 /* create /proc/irq */
490 root_irq_dir = proc_mkdir("irq", NULL);
491 if (!root_irq_dir)
492 return;
493
494 register_default_affinity_proc();
495
496 /*
497 * Create entries for all existing IRQs.
498 */
499 for_each_irq_desc(irq, desc)
500 register_irq_proc(irq, desc);
501}
502
503#ifdef CONFIG_GENERIC_IRQ_SHOW
504
505int __weak arch_show_interrupts(struct seq_file *p, int prec)
506{
507 return 0;
508}
509
510#ifndef ACTUAL_NR_IRQS
511# define ACTUAL_NR_IRQS nr_irqs
512#endif
513
514int show_interrupts(struct seq_file *p, void *v)
515{
516 static int prec;
517
518 unsigned long flags, any_count = 0;
519 int i = *(loff_t *) v, j;
520 struct irqaction *action;
521 struct irq_desc *desc;
522
523 if (i > ACTUAL_NR_IRQS)
524 return 0;
525
526 if (i == ACTUAL_NR_IRQS)
527 return arch_show_interrupts(p, prec);
528
529 /* print header and calculate the width of the first column */
530 if (i == 0) {
531 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
532 j *= 10;
533
534 seq_printf(p, "%*s", prec + 8, "");
535 for_each_online_cpu(j)
536 seq_printf(p, "CPU%-8d", j);
537 seq_putc(p, '\n');
538 }
539
540 irq_lock_sparse();
541 desc = irq_to_desc(i);
542 if (!desc)
543 goto outsparse;
544
545 raw_spin_lock_irqsave(&desc->lock, flags);
546 for_each_online_cpu(j)
547 any_count |= kstat_irqs_cpu(i, j);
548 action = desc->action;
549 if ((!action || irq_desc_is_chained(desc)) && !any_count)
550 goto out;
551
552 seq_printf(p, "%*d: ", prec, i);
553 for_each_online_cpu(j)
554 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
555
556 if (desc->irq_data.chip) {
557 if (desc->irq_data.chip->irq_print_chip)
558 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
559 else if (desc->irq_data.chip->name)
560 seq_printf(p, " %8s", desc->irq_data.chip->name);
561 else
562 seq_printf(p, " %8s", "-");
563 } else {
564 seq_printf(p, " %8s", "None");
565 }
566 if (desc->irq_data.domain)
567 seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq);
568 else
569 seq_printf(p, " %*s", prec, "");
570#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
571 seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
572#endif
573 if (desc->name)
574 seq_printf(p, "-%-8s", desc->name);
575
576 if (action) {
577 seq_printf(p, " %s", action->name);
578 while ((action = action->next) != NULL)
579 seq_printf(p, ", %s", action->name);
580 }
581
582 seq_putc(p, '\n');
583out:
584 raw_spin_unlock_irqrestore(&desc->lock, flags);
585outsparse:
586 irq_unlock_sparse();
587 return 0;
588}
589#endif