Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
4 *
5 * This file contains the /proc/irq/ handling code.
6 */
7
8#include <linux/irq.h>
9#include <linux/gfp.h>
10#include <linux/proc_fs.h>
11#include <linux/seq_file.h>
12#include <linux/interrupt.h>
13#include <linux/kernel_stat.h>
14#include <linux/mutex.h>
15
16#include "internals.h"
17
18/*
19 * Access rules:
20 *
21 * procfs protects read/write of /proc/irq/N/ files against a
22 * concurrent free of the interrupt descriptor. remove_proc_entry()
23 * immediately prevents new read/writes to happen and waits for
24 * already running read/write functions to complete.
25 *
26 * We remove the proc entries first and then delete the interrupt
27 * descriptor from the radix tree and free it. So it is guaranteed
28 * that irq_to_desc(N) is valid as long as the read/writes are
29 * permitted by procfs.
30 *
31 * The read from /proc/interrupts is a different problem because there
32 * is no protection. So the lookup and the access to irqdesc
33 * information must be protected by sparse_irq_lock.
34 */
35static struct proc_dir_entry *root_irq_dir;
36
37#ifdef CONFIG_SMP
38
39enum {
40 AFFINITY,
41 AFFINITY_LIST,
42 EFFECTIVE,
43 EFFECTIVE_LIST,
44};
45
46static int show_irq_affinity(int type, struct seq_file *m)
47{
48 struct irq_desc *desc = irq_to_desc((long)m->private);
49 const struct cpumask *mask;
50
51 switch (type) {
52 case AFFINITY:
53 case AFFINITY_LIST:
54 mask = desc->irq_common_data.affinity;
55#ifdef CONFIG_GENERIC_PENDING_IRQ
56 if (irqd_is_setaffinity_pending(&desc->irq_data))
57 mask = desc->pending_mask;
58#endif
59 break;
60 case EFFECTIVE:
61 case EFFECTIVE_LIST:
62#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
63 mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
64 break;
65#endif
66 default:
67 return -EINVAL;
68 }
69
70 switch (type) {
71 case AFFINITY_LIST:
72 case EFFECTIVE_LIST:
73 seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
74 break;
75 case AFFINITY:
76 case EFFECTIVE:
77 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
78 break;
79 }
80 return 0;
81}
82
83static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
84{
85 struct irq_desc *desc = irq_to_desc((long)m->private);
86 unsigned long flags;
87 cpumask_var_t mask;
88
89 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
90 return -ENOMEM;
91
92 raw_spin_lock_irqsave(&desc->lock, flags);
93 if (desc->affinity_hint)
94 cpumask_copy(mask, desc->affinity_hint);
95 raw_spin_unlock_irqrestore(&desc->lock, flags);
96
97 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
98 free_cpumask_var(mask);
99
100 return 0;
101}
102
103#ifndef is_affinity_mask_valid
104#define is_affinity_mask_valid(val) 1
105#endif
106
107int no_irq_affinity;
108static int irq_affinity_proc_show(struct seq_file *m, void *v)
109{
110 return show_irq_affinity(AFFINITY, m);
111}
112
113static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
114{
115 return show_irq_affinity(AFFINITY_LIST, m);
116}
117
118
119static ssize_t write_irq_affinity(int type, struct file *file,
120 const char __user *buffer, size_t count, loff_t *pos)
121{
122 unsigned int irq = (int)(long)PDE_DATA(file_inode(file));
123 cpumask_var_t new_value;
124 int err;
125
126 if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
127 return -EIO;
128
129 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
130 return -ENOMEM;
131
132 if (type)
133 err = cpumask_parselist_user(buffer, count, new_value);
134 else
135 err = cpumask_parse_user(buffer, count, new_value);
136 if (err)
137 goto free_cpumask;
138
139 if (!is_affinity_mask_valid(new_value)) {
140 err = -EINVAL;
141 goto free_cpumask;
142 }
143
144 /*
145 * Do not allow disabling IRQs completely - it's a too easy
146 * way to make the system unusable accidentally :-) At least
147 * one online CPU still has to be targeted.
148 */
149 if (!cpumask_intersects(new_value, cpu_online_mask)) {
150 /*
151 * Special case for empty set - allow the architecture code
152 * to set default SMP affinity.
153 */
154 err = irq_select_affinity_usr(irq) ? -EINVAL : count;
155 } else {
156 err = irq_set_affinity(irq, new_value);
157 if (!err)
158 err = count;
159 }
160
161free_cpumask:
162 free_cpumask_var(new_value);
163 return err;
164}
165
166static ssize_t irq_affinity_proc_write(struct file *file,
167 const char __user *buffer, size_t count, loff_t *pos)
168{
169 return write_irq_affinity(0, file, buffer, count, pos);
170}
171
172static ssize_t irq_affinity_list_proc_write(struct file *file,
173 const char __user *buffer, size_t count, loff_t *pos)
174{
175 return write_irq_affinity(1, file, buffer, count, pos);
176}
177
178static int irq_affinity_proc_open(struct inode *inode, struct file *file)
179{
180 return single_open(file, irq_affinity_proc_show, PDE_DATA(inode));
181}
182
183static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
184{
185 return single_open(file, irq_affinity_list_proc_show, PDE_DATA(inode));
186}
187
188static int irq_affinity_hint_proc_open(struct inode *inode, struct file *file)
189{
190 return single_open(file, irq_affinity_hint_proc_show, PDE_DATA(inode));
191}
192
193static const struct file_operations irq_affinity_proc_fops = {
194 .open = irq_affinity_proc_open,
195 .read = seq_read,
196 .llseek = seq_lseek,
197 .release = single_release,
198 .write = irq_affinity_proc_write,
199};
200
201static const struct file_operations irq_affinity_hint_proc_fops = {
202 .open = irq_affinity_hint_proc_open,
203 .read = seq_read,
204 .llseek = seq_lseek,
205 .release = single_release,
206};
207
208static const struct file_operations irq_affinity_list_proc_fops = {
209 .open = irq_affinity_list_proc_open,
210 .read = seq_read,
211 .llseek = seq_lseek,
212 .release = single_release,
213 .write = irq_affinity_list_proc_write,
214};
215
216#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
217static int irq_effective_aff_proc_show(struct seq_file *m, void *v)
218{
219 return show_irq_affinity(EFFECTIVE, m);
220}
221
222static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v)
223{
224 return show_irq_affinity(EFFECTIVE_LIST, m);
225}
226
227static int irq_effective_aff_proc_open(struct inode *inode, struct file *file)
228{
229 return single_open(file, irq_effective_aff_proc_show, PDE_DATA(inode));
230}
231
232static int irq_effective_aff_list_proc_open(struct inode *inode,
233 struct file *file)
234{
235 return single_open(file, irq_effective_aff_list_proc_show,
236 PDE_DATA(inode));
237}
238
239static const struct file_operations irq_effective_aff_proc_fops = {
240 .open = irq_effective_aff_proc_open,
241 .read = seq_read,
242 .llseek = seq_lseek,
243 .release = single_release,
244};
245
246static const struct file_operations irq_effective_aff_list_proc_fops = {
247 .open = irq_effective_aff_list_proc_open,
248 .read = seq_read,
249 .llseek = seq_lseek,
250 .release = single_release,
251};
252#endif
253
254static int default_affinity_show(struct seq_file *m, void *v)
255{
256 seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
257 return 0;
258}
259
260static ssize_t default_affinity_write(struct file *file,
261 const char __user *buffer, size_t count, loff_t *ppos)
262{
263 cpumask_var_t new_value;
264 int err;
265
266 if (!alloc_cpumask_var(&new_value, GFP_KERNEL))
267 return -ENOMEM;
268
269 err = cpumask_parse_user(buffer, count, new_value);
270 if (err)
271 goto out;
272
273 if (!is_affinity_mask_valid(new_value)) {
274 err = -EINVAL;
275 goto out;
276 }
277
278 /*
279 * Do not allow disabling IRQs completely - it's a too easy
280 * way to make the system unusable accidentally :-) At least
281 * one online CPU still has to be targeted.
282 */
283 if (!cpumask_intersects(new_value, cpu_online_mask)) {
284 err = -EINVAL;
285 goto out;
286 }
287
288 cpumask_copy(irq_default_affinity, new_value);
289 err = count;
290
291out:
292 free_cpumask_var(new_value);
293 return err;
294}
295
296static int default_affinity_open(struct inode *inode, struct file *file)
297{
298 return single_open(file, default_affinity_show, PDE_DATA(inode));
299}
300
301static const struct file_operations default_affinity_proc_fops = {
302 .open = default_affinity_open,
303 .read = seq_read,
304 .llseek = seq_lseek,
305 .release = single_release,
306 .write = default_affinity_write,
307};
308
309static int irq_node_proc_show(struct seq_file *m, void *v)
310{
311 struct irq_desc *desc = irq_to_desc((long) m->private);
312
313 seq_printf(m, "%d\n", irq_desc_get_node(desc));
314 return 0;
315}
316
317static int irq_node_proc_open(struct inode *inode, struct file *file)
318{
319 return single_open(file, irq_node_proc_show, PDE_DATA(inode));
320}
321
322static const struct file_operations irq_node_proc_fops = {
323 .open = irq_node_proc_open,
324 .read = seq_read,
325 .llseek = seq_lseek,
326 .release = single_release,
327};
328#endif
329
330static int irq_spurious_proc_show(struct seq_file *m, void *v)
331{
332 struct irq_desc *desc = irq_to_desc((long) m->private);
333
334 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
335 desc->irq_count, desc->irqs_unhandled,
336 jiffies_to_msecs(desc->last_unhandled));
337 return 0;
338}
339
340static int irq_spurious_proc_open(struct inode *inode, struct file *file)
341{
342 return single_open(file, irq_spurious_proc_show, PDE_DATA(inode));
343}
344
345static const struct file_operations irq_spurious_proc_fops = {
346 .open = irq_spurious_proc_open,
347 .read = seq_read,
348 .llseek = seq_lseek,
349 .release = single_release,
350};
351
352#define MAX_NAMELEN 128
353
354static int name_unique(unsigned int irq, struct irqaction *new_action)
355{
356 struct irq_desc *desc = irq_to_desc(irq);
357 struct irqaction *action;
358 unsigned long flags;
359 int ret = 1;
360
361 raw_spin_lock_irqsave(&desc->lock, flags);
362 for_each_action_of_desc(desc, action) {
363 if ((action != new_action) && action->name &&
364 !strcmp(new_action->name, action->name)) {
365 ret = 0;
366 break;
367 }
368 }
369 raw_spin_unlock_irqrestore(&desc->lock, flags);
370 return ret;
371}
372
373void register_handler_proc(unsigned int irq, struct irqaction *action)
374{
375 char name [MAX_NAMELEN];
376 struct irq_desc *desc = irq_to_desc(irq);
377
378 if (!desc->dir || action->dir || !action->name ||
379 !name_unique(irq, action))
380 return;
381
382 snprintf(name, MAX_NAMELEN, "%s", action->name);
383
384 /* create /proc/irq/1234/handler/ */
385 action->dir = proc_mkdir(name, desc->dir);
386}
387
388#undef MAX_NAMELEN
389
390#define MAX_NAMELEN 10
391
392void register_irq_proc(unsigned int irq, struct irq_desc *desc)
393{
394 static DEFINE_MUTEX(register_lock);
395 void __maybe_unused *irqp = (void *)(unsigned long) irq;
396 char name [MAX_NAMELEN];
397
398 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
399 return;
400
401 /*
402 * irq directories are registered only when a handler is
403 * added, not when the descriptor is created, so multiple
404 * tasks might try to register at the same time.
405 */
406 mutex_lock(®ister_lock);
407
408 if (desc->dir)
409 goto out_unlock;
410
411 sprintf(name, "%d", irq);
412
413 /* create /proc/irq/1234 */
414 desc->dir = proc_mkdir(name, root_irq_dir);
415 if (!desc->dir)
416 goto out_unlock;
417
418#ifdef CONFIG_SMP
419 /* create /proc/irq/<irq>/smp_affinity */
420 proc_create_data("smp_affinity", 0644, desc->dir,
421 &irq_affinity_proc_fops, irqp);
422
423 /* create /proc/irq/<irq>/affinity_hint */
424 proc_create_data("affinity_hint", 0444, desc->dir,
425 &irq_affinity_hint_proc_fops, irqp);
426
427 /* create /proc/irq/<irq>/smp_affinity_list */
428 proc_create_data("smp_affinity_list", 0644, desc->dir,
429 &irq_affinity_list_proc_fops, irqp);
430
431 proc_create_data("node", 0444, desc->dir,
432 &irq_node_proc_fops, irqp);
433# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
434 proc_create_data("effective_affinity", 0444, desc->dir,
435 &irq_effective_aff_proc_fops, irqp);
436 proc_create_data("effective_affinity_list", 0444, desc->dir,
437 &irq_effective_aff_list_proc_fops, irqp);
438# endif
439#endif
440 proc_create_data("spurious", 0444, desc->dir,
441 &irq_spurious_proc_fops, (void *)(long)irq);
442
443out_unlock:
444 mutex_unlock(®ister_lock);
445}
446
447void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
448{
449 char name [MAX_NAMELEN];
450
451 if (!root_irq_dir || !desc->dir)
452 return;
453#ifdef CONFIG_SMP
454 remove_proc_entry("smp_affinity", desc->dir);
455 remove_proc_entry("affinity_hint", desc->dir);
456 remove_proc_entry("smp_affinity_list", desc->dir);
457 remove_proc_entry("node", desc->dir);
458# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
459 remove_proc_entry("effective_affinity", desc->dir);
460 remove_proc_entry("effective_affinity_list", desc->dir);
461# endif
462#endif
463 remove_proc_entry("spurious", desc->dir);
464
465 sprintf(name, "%u", irq);
466 remove_proc_entry(name, root_irq_dir);
467}
468
469#undef MAX_NAMELEN
470
471void unregister_handler_proc(unsigned int irq, struct irqaction *action)
472{
473 proc_remove(action->dir);
474}
475
476static void register_default_affinity_proc(void)
477{
478#ifdef CONFIG_SMP
479 proc_create("irq/default_smp_affinity", 0644, NULL,
480 &default_affinity_proc_fops);
481#endif
482}
483
484void init_irq_proc(void)
485{
486 unsigned int irq;
487 struct irq_desc *desc;
488
489 /* create /proc/irq */
490 root_irq_dir = proc_mkdir("irq", NULL);
491 if (!root_irq_dir)
492 return;
493
494 register_default_affinity_proc();
495
496 /*
497 * Create entries for all existing IRQs.
498 */
499 for_each_irq_desc(irq, desc)
500 register_irq_proc(irq, desc);
501}
502
503#ifdef CONFIG_GENERIC_IRQ_SHOW
504
505int __weak arch_show_interrupts(struct seq_file *p, int prec)
506{
507 return 0;
508}
509
510#ifndef ACTUAL_NR_IRQS
511# define ACTUAL_NR_IRQS nr_irqs
512#endif
513
514int show_interrupts(struct seq_file *p, void *v)
515{
516 static int prec;
517
518 unsigned long flags, any_count = 0;
519 int i = *(loff_t *) v, j;
520 struct irqaction *action;
521 struct irq_desc *desc;
522
523 if (i > ACTUAL_NR_IRQS)
524 return 0;
525
526 if (i == ACTUAL_NR_IRQS)
527 return arch_show_interrupts(p, prec);
528
529 /* print header and calculate the width of the first column */
530 if (i == 0) {
531 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
532 j *= 10;
533
534 seq_printf(p, "%*s", prec + 8, "");
535 for_each_online_cpu(j)
536 seq_printf(p, "CPU%-8d", j);
537 seq_putc(p, '\n');
538 }
539
540 irq_lock_sparse();
541 desc = irq_to_desc(i);
542 if (!desc)
543 goto outsparse;
544
545 raw_spin_lock_irqsave(&desc->lock, flags);
546 for_each_online_cpu(j)
547 any_count |= kstat_irqs_cpu(i, j);
548 action = desc->action;
549 if ((!action || irq_desc_is_chained(desc)) && !any_count)
550 goto out;
551
552 seq_printf(p, "%*d: ", prec, i);
553 for_each_online_cpu(j)
554 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
555
556 if (desc->irq_data.chip) {
557 if (desc->irq_data.chip->irq_print_chip)
558 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
559 else if (desc->irq_data.chip->name)
560 seq_printf(p, " %8s", desc->irq_data.chip->name);
561 else
562 seq_printf(p, " %8s", "-");
563 } else {
564 seq_printf(p, " %8s", "None");
565 }
566 if (desc->irq_data.domain)
567 seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq);
568 else
569 seq_printf(p, " %*s", prec, "");
570#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
571 seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
572#endif
573 if (desc->name)
574 seq_printf(p, "-%-8s", desc->name);
575
576 if (action) {
577 seq_printf(p, " %s", action->name);
578 while ((action = action->next) != NULL)
579 seq_printf(p, ", %s", action->name);
580 }
581
582 seq_putc(p, '\n');
583out:
584 raw_spin_unlock_irqrestore(&desc->lock, flags);
585outsparse:
586 irq_unlock_sparse();
587 return 0;
588}
589#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
4 *
5 * This file contains the /proc/irq/ handling code.
6 */
7
8#include <linux/irq.h>
9#include <linux/gfp.h>
10#include <linux/proc_fs.h>
11#include <linux/seq_file.h>
12#include <linux/interrupt.h>
13#include <linux/kernel_stat.h>
14#include <linux/mutex.h>
15
16#include "internals.h"
17
18/*
19 * Access rules:
20 *
21 * procfs protects read/write of /proc/irq/N/ files against a
22 * concurrent free of the interrupt descriptor. remove_proc_entry()
23 * immediately prevents new read/writes to happen and waits for
24 * already running read/write functions to complete.
25 *
26 * We remove the proc entries first and then delete the interrupt
27 * descriptor from the radix tree and free it. So it is guaranteed
28 * that irq_to_desc(N) is valid as long as the read/writes are
29 * permitted by procfs.
30 *
31 * The read from /proc/interrupts is a different problem because there
32 * is no protection. So the lookup and the access to irqdesc
33 * information must be protected by sparse_irq_lock.
34 */
35static struct proc_dir_entry *root_irq_dir;
36
37#ifdef CONFIG_SMP
38
39enum {
40 AFFINITY,
41 AFFINITY_LIST,
42 EFFECTIVE,
43 EFFECTIVE_LIST,
44};
45
46static int show_irq_affinity(int type, struct seq_file *m)
47{
48 struct irq_desc *desc = irq_to_desc((long)m->private);
49 const struct cpumask *mask;
50
51 switch (type) {
52 case AFFINITY:
53 case AFFINITY_LIST:
54 mask = desc->irq_common_data.affinity;
55 if (irq_move_pending(&desc->irq_data))
56 mask = irq_desc_get_pending_mask(desc);
57 break;
58 case EFFECTIVE:
59 case EFFECTIVE_LIST:
60#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
61 mask = irq_data_get_effective_affinity_mask(&desc->irq_data);
62 break;
63#endif
64 default:
65 return -EINVAL;
66 }
67
68 switch (type) {
69 case AFFINITY_LIST:
70 case EFFECTIVE_LIST:
71 seq_printf(m, "%*pbl\n", cpumask_pr_args(mask));
72 break;
73 case AFFINITY:
74 case EFFECTIVE:
75 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
76 break;
77 }
78 return 0;
79}
80
81static int irq_affinity_hint_proc_show(struct seq_file *m, void *v)
82{
83 struct irq_desc *desc = irq_to_desc((long)m->private);
84 unsigned long flags;
85 cpumask_var_t mask;
86
87 if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
88 return -ENOMEM;
89
90 raw_spin_lock_irqsave(&desc->lock, flags);
91 if (desc->affinity_hint)
92 cpumask_copy(mask, desc->affinity_hint);
93 raw_spin_unlock_irqrestore(&desc->lock, flags);
94
95 seq_printf(m, "%*pb\n", cpumask_pr_args(mask));
96 free_cpumask_var(mask);
97
98 return 0;
99}
100
101int no_irq_affinity;
102static int irq_affinity_proc_show(struct seq_file *m, void *v)
103{
104 return show_irq_affinity(AFFINITY, m);
105}
106
107static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
108{
109 return show_irq_affinity(AFFINITY_LIST, m);
110}
111
112#ifndef CONFIG_AUTO_IRQ_AFFINITY
113static inline int irq_select_affinity_usr(unsigned int irq)
114{
115 /*
116 * If the interrupt is started up already then this fails. The
117 * interrupt is assigned to an online CPU already. There is no
118 * point to move it around randomly. Tell user space that the
119 * selected mask is bogus.
120 *
121 * If not then any change to the affinity is pointless because the
122 * startup code invokes irq_setup_affinity() which will select
123 * a online CPU anyway.
124 */
125 return -EINVAL;
126}
127#else
128/* ALPHA magic affinity auto selector. Keep it for historical reasons. */
129static inline int irq_select_affinity_usr(unsigned int irq)
130{
131 return irq_select_affinity(irq);
132}
133#endif
134
135static ssize_t write_irq_affinity(int type, struct file *file,
136 const char __user *buffer, size_t count, loff_t *pos)
137{
138 unsigned int irq = (int)(long)pde_data(file_inode(file));
139 cpumask_var_t new_value;
140 int err;
141
142 if (!irq_can_set_affinity_usr(irq) || no_irq_affinity)
143 return -EPERM;
144
145 if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
146 return -ENOMEM;
147
148 if (type)
149 err = cpumask_parselist_user(buffer, count, new_value);
150 else
151 err = cpumask_parse_user(buffer, count, new_value);
152 if (err)
153 goto free_cpumask;
154
155 /*
156 * Do not allow disabling IRQs completely - it's a too easy
157 * way to make the system unusable accidentally :-) At least
158 * one online CPU still has to be targeted.
159 */
160 if (!cpumask_intersects(new_value, cpu_online_mask)) {
161 /*
162 * Special case for empty set - allow the architecture code
163 * to set default SMP affinity.
164 */
165 err = irq_select_affinity_usr(irq) ? -EINVAL : count;
166 } else {
167 err = irq_set_affinity(irq, new_value);
168 if (!err)
169 err = count;
170 }
171
172free_cpumask:
173 free_cpumask_var(new_value);
174 return err;
175}
176
177static ssize_t irq_affinity_proc_write(struct file *file,
178 const char __user *buffer, size_t count, loff_t *pos)
179{
180 return write_irq_affinity(0, file, buffer, count, pos);
181}
182
183static ssize_t irq_affinity_list_proc_write(struct file *file,
184 const char __user *buffer, size_t count, loff_t *pos)
185{
186 return write_irq_affinity(1, file, buffer, count, pos);
187}
188
189static int irq_affinity_proc_open(struct inode *inode, struct file *file)
190{
191 return single_open(file, irq_affinity_proc_show, pde_data(inode));
192}
193
194static int irq_affinity_list_proc_open(struct inode *inode, struct file *file)
195{
196 return single_open(file, irq_affinity_list_proc_show, pde_data(inode));
197}
198
199static const struct proc_ops irq_affinity_proc_ops = {
200 .proc_open = irq_affinity_proc_open,
201 .proc_read = seq_read,
202 .proc_lseek = seq_lseek,
203 .proc_release = single_release,
204 .proc_write = irq_affinity_proc_write,
205};
206
207static const struct proc_ops irq_affinity_list_proc_ops = {
208 .proc_open = irq_affinity_list_proc_open,
209 .proc_read = seq_read,
210 .proc_lseek = seq_lseek,
211 .proc_release = single_release,
212 .proc_write = irq_affinity_list_proc_write,
213};
214
215#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
216static int irq_effective_aff_proc_show(struct seq_file *m, void *v)
217{
218 return show_irq_affinity(EFFECTIVE, m);
219}
220
221static int irq_effective_aff_list_proc_show(struct seq_file *m, void *v)
222{
223 return show_irq_affinity(EFFECTIVE_LIST, m);
224}
225#endif
226
227static int default_affinity_show(struct seq_file *m, void *v)
228{
229 seq_printf(m, "%*pb\n", cpumask_pr_args(irq_default_affinity));
230 return 0;
231}
232
233static ssize_t default_affinity_write(struct file *file,
234 const char __user *buffer, size_t count, loff_t *ppos)
235{
236 cpumask_var_t new_value;
237 int err;
238
239 if (!zalloc_cpumask_var(&new_value, GFP_KERNEL))
240 return -ENOMEM;
241
242 err = cpumask_parse_user(buffer, count, new_value);
243 if (err)
244 goto out;
245
246 /*
247 * Do not allow disabling IRQs completely - it's a too easy
248 * way to make the system unusable accidentally :-) At least
249 * one online CPU still has to be targeted.
250 */
251 if (!cpumask_intersects(new_value, cpu_online_mask)) {
252 err = -EINVAL;
253 goto out;
254 }
255
256 cpumask_copy(irq_default_affinity, new_value);
257 err = count;
258
259out:
260 free_cpumask_var(new_value);
261 return err;
262}
263
264static int default_affinity_open(struct inode *inode, struct file *file)
265{
266 return single_open(file, default_affinity_show, pde_data(inode));
267}
268
269static const struct proc_ops default_affinity_proc_ops = {
270 .proc_open = default_affinity_open,
271 .proc_read = seq_read,
272 .proc_lseek = seq_lseek,
273 .proc_release = single_release,
274 .proc_write = default_affinity_write,
275};
276
277static int irq_node_proc_show(struct seq_file *m, void *v)
278{
279 struct irq_desc *desc = irq_to_desc((long) m->private);
280
281 seq_printf(m, "%d\n", irq_desc_get_node(desc));
282 return 0;
283}
284#endif
285
286static int irq_spurious_proc_show(struct seq_file *m, void *v)
287{
288 struct irq_desc *desc = irq_to_desc((long) m->private);
289
290 seq_printf(m, "count %u\n" "unhandled %u\n" "last_unhandled %u ms\n",
291 desc->irq_count, desc->irqs_unhandled,
292 jiffies_to_msecs(desc->last_unhandled));
293 return 0;
294}
295
296#define MAX_NAMELEN 128
297
298static int name_unique(unsigned int irq, struct irqaction *new_action)
299{
300 struct irq_desc *desc = irq_to_desc(irq);
301 struct irqaction *action;
302 unsigned long flags;
303 int ret = 1;
304
305 raw_spin_lock_irqsave(&desc->lock, flags);
306 for_each_action_of_desc(desc, action) {
307 if ((action != new_action) && action->name &&
308 !strcmp(new_action->name, action->name)) {
309 ret = 0;
310 break;
311 }
312 }
313 raw_spin_unlock_irqrestore(&desc->lock, flags);
314 return ret;
315}
316
317void register_handler_proc(unsigned int irq, struct irqaction *action)
318{
319 char name [MAX_NAMELEN];
320 struct irq_desc *desc = irq_to_desc(irq);
321
322 if (!desc->dir || action->dir || !action->name ||
323 !name_unique(irq, action))
324 return;
325
326 snprintf(name, MAX_NAMELEN, "%s", action->name);
327
328 /* create /proc/irq/1234/handler/ */
329 action->dir = proc_mkdir(name, desc->dir);
330}
331
332#undef MAX_NAMELEN
333
334#define MAX_NAMELEN 10
335
336void register_irq_proc(unsigned int irq, struct irq_desc *desc)
337{
338 static DEFINE_MUTEX(register_lock);
339 void __maybe_unused *irqp = (void *)(unsigned long) irq;
340 char name [MAX_NAMELEN];
341
342 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip))
343 return;
344
345 /*
346 * irq directories are registered only when a handler is
347 * added, not when the descriptor is created, so multiple
348 * tasks might try to register at the same time.
349 */
350 mutex_lock(®ister_lock);
351
352 if (desc->dir)
353 goto out_unlock;
354
355 sprintf(name, "%d", irq);
356
357 /* create /proc/irq/1234 */
358 desc->dir = proc_mkdir(name, root_irq_dir);
359 if (!desc->dir)
360 goto out_unlock;
361
362#ifdef CONFIG_SMP
363 umode_t umode = S_IRUGO;
364
365 if (irq_can_set_affinity_usr(desc->irq_data.irq))
366 umode |= S_IWUSR;
367
368 /* create /proc/irq/<irq>/smp_affinity */
369 proc_create_data("smp_affinity", umode, desc->dir,
370 &irq_affinity_proc_ops, irqp);
371
372 /* create /proc/irq/<irq>/affinity_hint */
373 proc_create_single_data("affinity_hint", 0444, desc->dir,
374 irq_affinity_hint_proc_show, irqp);
375
376 /* create /proc/irq/<irq>/smp_affinity_list */
377 proc_create_data("smp_affinity_list", umode, desc->dir,
378 &irq_affinity_list_proc_ops, irqp);
379
380 proc_create_single_data("node", 0444, desc->dir, irq_node_proc_show,
381 irqp);
382# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
383 proc_create_single_data("effective_affinity", 0444, desc->dir,
384 irq_effective_aff_proc_show, irqp);
385 proc_create_single_data("effective_affinity_list", 0444, desc->dir,
386 irq_effective_aff_list_proc_show, irqp);
387# endif
388#endif
389 proc_create_single_data("spurious", 0444, desc->dir,
390 irq_spurious_proc_show, (void *)(long)irq);
391
392out_unlock:
393 mutex_unlock(®ister_lock);
394}
395
396void unregister_irq_proc(unsigned int irq, struct irq_desc *desc)
397{
398 char name [MAX_NAMELEN];
399
400 if (!root_irq_dir || !desc->dir)
401 return;
402#ifdef CONFIG_SMP
403 remove_proc_entry("smp_affinity", desc->dir);
404 remove_proc_entry("affinity_hint", desc->dir);
405 remove_proc_entry("smp_affinity_list", desc->dir);
406 remove_proc_entry("node", desc->dir);
407# ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
408 remove_proc_entry("effective_affinity", desc->dir);
409 remove_proc_entry("effective_affinity_list", desc->dir);
410# endif
411#endif
412 remove_proc_entry("spurious", desc->dir);
413
414 sprintf(name, "%u", irq);
415 remove_proc_entry(name, root_irq_dir);
416}
417
418#undef MAX_NAMELEN
419
420void unregister_handler_proc(unsigned int irq, struct irqaction *action)
421{
422 proc_remove(action->dir);
423}
424
425static void register_default_affinity_proc(void)
426{
427#ifdef CONFIG_SMP
428 proc_create("irq/default_smp_affinity", 0644, NULL,
429 &default_affinity_proc_ops);
430#endif
431}
432
433void init_irq_proc(void)
434{
435 unsigned int irq;
436 struct irq_desc *desc;
437
438 /* create /proc/irq */
439 root_irq_dir = proc_mkdir("irq", NULL);
440 if (!root_irq_dir)
441 return;
442
443 register_default_affinity_proc();
444
445 /*
446 * Create entries for all existing IRQs.
447 */
448 for_each_irq_desc(irq, desc)
449 register_irq_proc(irq, desc);
450}
451
452#ifdef CONFIG_GENERIC_IRQ_SHOW
453
454int __weak arch_show_interrupts(struct seq_file *p, int prec)
455{
456 return 0;
457}
458
459#ifndef ACTUAL_NR_IRQS
460# define ACTUAL_NR_IRQS irq_get_nr_irqs()
461#endif
462
463int show_interrupts(struct seq_file *p, void *v)
464{
465 const unsigned int nr_irqs = irq_get_nr_irqs();
466 static int prec;
467
468 int i = *(loff_t *) v, j;
469 struct irqaction *action;
470 struct irq_desc *desc;
471 unsigned long flags;
472
473 if (i > ACTUAL_NR_IRQS)
474 return 0;
475
476 if (i == ACTUAL_NR_IRQS)
477 return arch_show_interrupts(p, prec);
478
479 /* print header and calculate the width of the first column */
480 if (i == 0) {
481 for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
482 j *= 10;
483
484 seq_printf(p, "%*s", prec + 8, "");
485 for_each_online_cpu(j)
486 seq_printf(p, "CPU%-8d", j);
487 seq_putc(p, '\n');
488 }
489
490 rcu_read_lock();
491 desc = irq_to_desc(i);
492 if (!desc || irq_settings_is_hidden(desc))
493 goto outsparse;
494
495 if (!desc->action || irq_desc_is_chained(desc) || !desc->kstat_irqs)
496 goto outsparse;
497
498 seq_printf(p, "%*d:", prec, i);
499 for_each_online_cpu(j) {
500 unsigned int cnt = desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, j) : 0;
501
502 seq_put_decimal_ull_width(p, " ", cnt, 10);
503 }
504 seq_putc(p, ' ');
505
506 raw_spin_lock_irqsave(&desc->lock, flags);
507 if (desc->irq_data.chip) {
508 if (desc->irq_data.chip->irq_print_chip)
509 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p);
510 else if (desc->irq_data.chip->name)
511 seq_printf(p, "%8s", desc->irq_data.chip->name);
512 else
513 seq_printf(p, "%8s", "-");
514 } else {
515 seq_printf(p, "%8s", "None");
516 }
517 if (desc->irq_data.domain)
518 seq_printf(p, " %*lu", prec, desc->irq_data.hwirq);
519 else
520 seq_printf(p, " %*s", prec, "");
521#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
522 seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
523#endif
524 if (desc->name)
525 seq_printf(p, "-%-8s", desc->name);
526
527 action = desc->action;
528 if (action) {
529 seq_printf(p, " %s", action->name);
530 while ((action = action->next) != NULL)
531 seq_printf(p, ", %s", action->name);
532 }
533
534 seq_putc(p, '\n');
535 raw_spin_unlock_irqrestore(&desc->lock, flags);
536outsparse:
537 rcu_read_unlock();
538 return 0;
539}
540#endif