Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * kernel/lockdep_proc.c
4 *
5 * Runtime locking correctness validator
6 *
7 * Started by Ingo Molnar:
8 *
9 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
11 *
12 * Code for /proc/lockdep and /proc/lockdep_stats:
13 *
14 */
15#include <linux/export.h>
16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
18#include <linux/kallsyms.h>
19#include <linux/debug_locks.h>
20#include <linux/vmalloc.h>
21#include <linux/sort.h>
22#include <linux/uaccess.h>
23#include <asm/div64.h>
24
25#include "lockdep_internals.h"
26
27static void *l_next(struct seq_file *m, void *v, loff_t *pos)
28{
29 return seq_list_next(v, &all_lock_classes, pos);
30}
31
32static void *l_start(struct seq_file *m, loff_t *pos)
33{
34 return seq_list_start_head(&all_lock_classes, *pos);
35}
36
37static void l_stop(struct seq_file *m, void *v)
38{
39}
40
41static void print_name(struct seq_file *m, struct lock_class *class)
42{
43 char str[KSYM_NAME_LEN];
44 const char *name = class->name;
45
46 if (!name) {
47 name = __get_key_name(class->key, str);
48 seq_printf(m, "%s", name);
49 } else{
50 seq_printf(m, "%s", name);
51 if (class->name_version > 1)
52 seq_printf(m, "#%d", class->name_version);
53 if (class->subclass)
54 seq_printf(m, "/%d", class->subclass);
55 }
56}
57
58static int l_show(struct seq_file *m, void *v)
59{
60 struct lock_class *class = list_entry(v, struct lock_class, lock_entry);
61 struct lock_list *entry;
62 char usage[LOCK_USAGE_CHARS];
63
64 if (v == &all_lock_classes) {
65 seq_printf(m, "all lock classes:\n");
66 return 0;
67 }
68
69 seq_printf(m, "%p", class->key);
70#ifdef CONFIG_DEBUG_LOCKDEP
71 seq_printf(m, " OPS:%8ld", debug_class_ops_read(class));
72#endif
73#ifdef CONFIG_PROVE_LOCKING
74 seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class));
75 seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
76#endif
77
78 get_usage_chars(class, usage);
79 seq_printf(m, " %s", usage);
80
81 seq_printf(m, ": ");
82 print_name(m, class);
83 seq_puts(m, "\n");
84
85 list_for_each_entry(entry, &class->locks_after, entry) {
86 if (entry->distance == 1) {
87 seq_printf(m, " -> [%p] ", entry->class->key);
88 print_name(m, entry->class);
89 seq_puts(m, "\n");
90 }
91 }
92 seq_puts(m, "\n");
93
94 return 0;
95}
96
97static const struct seq_operations lockdep_ops = {
98 .start = l_start,
99 .next = l_next,
100 .stop = l_stop,
101 .show = l_show,
102};
103
104#ifdef CONFIG_PROVE_LOCKING
105static void *lc_start(struct seq_file *m, loff_t *pos)
106{
107 if (*pos < 0)
108 return NULL;
109
110 if (*pos == 0)
111 return SEQ_START_TOKEN;
112
113 return lock_chains + (*pos - 1);
114}
115
116static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
117{
118 *pos = lockdep_next_lockchain(*pos - 1) + 1;
119 return lc_start(m, pos);
120}
121
122static void lc_stop(struct seq_file *m, void *v)
123{
124}
125
126static int lc_show(struct seq_file *m, void *v)
127{
128 struct lock_chain *chain = v;
129 struct lock_class *class;
130 int i;
131
132 if (v == SEQ_START_TOKEN) {
133 if (nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)
134 seq_printf(m, "(buggered) ");
135 seq_printf(m, "all lock chains:\n");
136 return 0;
137 }
138
139 seq_printf(m, "irq_context: %d\n", chain->irq_context);
140
141 for (i = 0; i < chain->depth; i++) {
142 class = lock_chain_get_class(chain, i);
143 if (!class->key)
144 continue;
145
146 seq_printf(m, "[%p] ", class->key);
147 print_name(m, class);
148 seq_puts(m, "\n");
149 }
150 seq_puts(m, "\n");
151
152 return 0;
153}
154
155static const struct seq_operations lockdep_chains_ops = {
156 .start = lc_start,
157 .next = lc_next,
158 .stop = lc_stop,
159 .show = lc_show,
160};
161#endif /* CONFIG_PROVE_LOCKING */
162
163static void lockdep_stats_debug_show(struct seq_file *m)
164{
165#ifdef CONFIG_DEBUG_LOCKDEP
166 unsigned long long hi1 = debug_atomic_read(hardirqs_on_events),
167 hi2 = debug_atomic_read(hardirqs_off_events),
168 hr1 = debug_atomic_read(redundant_hardirqs_on),
169 hr2 = debug_atomic_read(redundant_hardirqs_off),
170 si1 = debug_atomic_read(softirqs_on_events),
171 si2 = debug_atomic_read(softirqs_off_events),
172 sr1 = debug_atomic_read(redundant_softirqs_on),
173 sr2 = debug_atomic_read(redundant_softirqs_off);
174
175 seq_printf(m, " chain lookup misses: %11llu\n",
176 debug_atomic_read(chain_lookup_misses));
177 seq_printf(m, " chain lookup hits: %11llu\n",
178 debug_atomic_read(chain_lookup_hits));
179 seq_printf(m, " cyclic checks: %11llu\n",
180 debug_atomic_read(nr_cyclic_checks));
181 seq_printf(m, " redundant checks: %11llu\n",
182 debug_atomic_read(nr_redundant_checks));
183 seq_printf(m, " redundant links: %11llu\n",
184 debug_atomic_read(nr_redundant));
185 seq_printf(m, " find-mask forwards checks: %11llu\n",
186 debug_atomic_read(nr_find_usage_forwards_checks));
187 seq_printf(m, " find-mask backwards checks: %11llu\n",
188 debug_atomic_read(nr_find_usage_backwards_checks));
189
190 seq_printf(m, " hardirq on events: %11llu\n", hi1);
191 seq_printf(m, " hardirq off events: %11llu\n", hi2);
192 seq_printf(m, " redundant hardirq ons: %11llu\n", hr1);
193 seq_printf(m, " redundant hardirq offs: %11llu\n", hr2);
194 seq_printf(m, " softirq on events: %11llu\n", si1);
195 seq_printf(m, " softirq off events: %11llu\n", si2);
196 seq_printf(m, " redundant softirq ons: %11llu\n", sr1);
197 seq_printf(m, " redundant softirq offs: %11llu\n", sr2);
198#endif
199}
200
201static int lockdep_stats_show(struct seq_file *m, void *v)
202{
203 unsigned long nr_unused = 0, nr_uncategorized = 0,
204 nr_irq_safe = 0, nr_irq_unsafe = 0,
205 nr_softirq_safe = 0, nr_softirq_unsafe = 0,
206 nr_hardirq_safe = 0, nr_hardirq_unsafe = 0,
207 nr_irq_read_safe = 0, nr_irq_read_unsafe = 0,
208 nr_softirq_read_safe = 0, nr_softirq_read_unsafe = 0,
209 nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
210 sum_forward_deps = 0;
211
212#ifdef CONFIG_PROVE_LOCKING
213 struct lock_class *class;
214
215 list_for_each_entry(class, &all_lock_classes, lock_entry) {
216
217 if (class->usage_mask == 0)
218 nr_unused++;
219 if (class->usage_mask == LOCKF_USED)
220 nr_uncategorized++;
221 if (class->usage_mask & LOCKF_USED_IN_IRQ)
222 nr_irq_safe++;
223 if (class->usage_mask & LOCKF_ENABLED_IRQ)
224 nr_irq_unsafe++;
225 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
226 nr_softirq_safe++;
227 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ)
228 nr_softirq_unsafe++;
229 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
230 nr_hardirq_safe++;
231 if (class->usage_mask & LOCKF_ENABLED_HARDIRQ)
232 nr_hardirq_unsafe++;
233 if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
234 nr_irq_read_safe++;
235 if (class->usage_mask & LOCKF_ENABLED_IRQ_READ)
236 nr_irq_read_unsafe++;
237 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
238 nr_softirq_read_safe++;
239 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ)
240 nr_softirq_read_unsafe++;
241 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
242 nr_hardirq_read_safe++;
243 if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
244 nr_hardirq_read_unsafe++;
245
246 sum_forward_deps += lockdep_count_forward_deps(class);
247 }
248#ifdef CONFIG_DEBUG_LOCKDEP
249 DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
250#endif
251
252#endif
253 seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
254 nr_lock_classes, MAX_LOCKDEP_KEYS);
255 seq_printf(m, " direct dependencies: %11lu [max: %lu]\n",
256 nr_list_entries, MAX_LOCKDEP_ENTRIES);
257 seq_printf(m, " indirect dependencies: %11lu\n",
258 sum_forward_deps);
259
260 /*
261 * Total number of dependencies:
262 *
263 * All irq-safe locks may nest inside irq-unsafe locks,
264 * plus all the other known dependencies:
265 */
266 seq_printf(m, " all direct dependencies: %11lu\n",
267 nr_irq_unsafe * nr_irq_safe +
268 nr_hardirq_unsafe * nr_hardirq_safe +
269 nr_list_entries);
270
271#ifdef CONFIG_PROVE_LOCKING
272 seq_printf(m, " dependency chains: %11lu [max: %lu]\n",
273 lock_chain_count(), MAX_LOCKDEP_CHAINS);
274 seq_printf(m, " dependency chain hlocks: %11d [max: %lu]\n",
275 nr_chain_hlocks, MAX_LOCKDEP_CHAIN_HLOCKS);
276#endif
277
278#ifdef CONFIG_TRACE_IRQFLAGS
279 seq_printf(m, " in-hardirq chains: %11u\n",
280 nr_hardirq_chains);
281 seq_printf(m, " in-softirq chains: %11u\n",
282 nr_softirq_chains);
283#endif
284 seq_printf(m, " in-process chains: %11u\n",
285 nr_process_chains);
286 seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
287 nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
288#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
289 seq_printf(m, " number of stack traces: %llu\n",
290 lockdep_stack_trace_count());
291 seq_printf(m, " number of stack hash chains: %llu\n",
292 lockdep_stack_hash_count());
293#endif
294 seq_printf(m, " combined max dependencies: %11u\n",
295 (nr_hardirq_chains + 1) *
296 (nr_softirq_chains + 1) *
297 (nr_process_chains + 1)
298 );
299 seq_printf(m, " hardirq-safe locks: %11lu\n",
300 nr_hardirq_safe);
301 seq_printf(m, " hardirq-unsafe locks: %11lu\n",
302 nr_hardirq_unsafe);
303 seq_printf(m, " softirq-safe locks: %11lu\n",
304 nr_softirq_safe);
305 seq_printf(m, " softirq-unsafe locks: %11lu\n",
306 nr_softirq_unsafe);
307 seq_printf(m, " irq-safe locks: %11lu\n",
308 nr_irq_safe);
309 seq_printf(m, " irq-unsafe locks: %11lu\n",
310 nr_irq_unsafe);
311
312 seq_printf(m, " hardirq-read-safe locks: %11lu\n",
313 nr_hardirq_read_safe);
314 seq_printf(m, " hardirq-read-unsafe locks: %11lu\n",
315 nr_hardirq_read_unsafe);
316 seq_printf(m, " softirq-read-safe locks: %11lu\n",
317 nr_softirq_read_safe);
318 seq_printf(m, " softirq-read-unsafe locks: %11lu\n",
319 nr_softirq_read_unsafe);
320 seq_printf(m, " irq-read-safe locks: %11lu\n",
321 nr_irq_read_safe);
322 seq_printf(m, " irq-read-unsafe locks: %11lu\n",
323 nr_irq_read_unsafe);
324
325 seq_printf(m, " uncategorized locks: %11lu\n",
326 nr_uncategorized);
327 seq_printf(m, " unused locks: %11lu\n",
328 nr_unused);
329 seq_printf(m, " max locking depth: %11u\n",
330 max_lockdep_depth);
331#ifdef CONFIG_PROVE_LOCKING
332 seq_printf(m, " max bfs queue depth: %11u\n",
333 max_bfs_queue_depth);
334#endif
335 lockdep_stats_debug_show(m);
336 seq_printf(m, " debug_locks: %11u\n",
337 debug_locks);
338
339 return 0;
340}
341
342#ifdef CONFIG_LOCK_STAT
343
344struct lock_stat_data {
345 struct lock_class *class;
346 struct lock_class_stats stats;
347};
348
349struct lock_stat_seq {
350 struct lock_stat_data *iter_end;
351 struct lock_stat_data stats[MAX_LOCKDEP_KEYS];
352};
353
354/*
355 * sort on absolute number of contentions
356 */
357static int lock_stat_cmp(const void *l, const void *r)
358{
359 const struct lock_stat_data *dl = l, *dr = r;
360 unsigned long nl, nr;
361
362 nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr;
363 nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr;
364
365 return nr - nl;
366}
367
368static void seq_line(struct seq_file *m, char c, int offset, int length)
369{
370 int i;
371
372 for (i = 0; i < offset; i++)
373 seq_puts(m, " ");
374 for (i = 0; i < length; i++)
375 seq_printf(m, "%c", c);
376 seq_puts(m, "\n");
377}
378
379static void snprint_time(char *buf, size_t bufsiz, s64 nr)
380{
381 s64 div;
382 s32 rem;
383
384 nr += 5; /* for display rounding */
385 div = div_s64_rem(nr, 1000, &rem);
386 snprintf(buf, bufsiz, "%lld.%02d", (long long)div, (int)rem/10);
387}
388
389static void seq_time(struct seq_file *m, s64 time)
390{
391 char num[15];
392
393 snprint_time(num, sizeof(num), time);
394 seq_printf(m, " %14s", num);
395}
396
397static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
398{
399 seq_printf(m, "%14lu", lt->nr);
400 seq_time(m, lt->min);
401 seq_time(m, lt->max);
402 seq_time(m, lt->total);
403 seq_time(m, lt->nr ? div_s64(lt->total, lt->nr) : 0);
404}
405
406static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
407{
408 const struct lockdep_subclass_key *ckey;
409 struct lock_class_stats *stats;
410 struct lock_class *class;
411 const char *cname;
412 int i, namelen;
413 char name[39];
414
415 class = data->class;
416 stats = &data->stats;
417
418 namelen = 38;
419 if (class->name_version > 1)
420 namelen -= 2; /* XXX truncates versions > 9 */
421 if (class->subclass)
422 namelen -= 2;
423
424 rcu_read_lock_sched();
425 cname = rcu_dereference_sched(class->name);
426 ckey = rcu_dereference_sched(class->key);
427
428 if (!cname && !ckey) {
429 rcu_read_unlock_sched();
430 return;
431
432 } else if (!cname) {
433 char str[KSYM_NAME_LEN];
434 const char *key_name;
435
436 key_name = __get_key_name(ckey, str);
437 snprintf(name, namelen, "%s", key_name);
438 } else {
439 snprintf(name, namelen, "%s", cname);
440 }
441 rcu_read_unlock_sched();
442
443 namelen = strlen(name);
444 if (class->name_version > 1) {
445 snprintf(name+namelen, 3, "#%d", class->name_version);
446 namelen += 2;
447 }
448 if (class->subclass) {
449 snprintf(name+namelen, 3, "/%d", class->subclass);
450 namelen += 2;
451 }
452
453 if (stats->write_holdtime.nr) {
454 if (stats->read_holdtime.nr)
455 seq_printf(m, "%38s-W:", name);
456 else
457 seq_printf(m, "%40s:", name);
458
459 seq_printf(m, "%14lu ", stats->bounces[bounce_contended_write]);
460 seq_lock_time(m, &stats->write_waittime);
461 seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_write]);
462 seq_lock_time(m, &stats->write_holdtime);
463 seq_puts(m, "\n");
464 }
465
466 if (stats->read_holdtime.nr) {
467 seq_printf(m, "%38s-R:", name);
468 seq_printf(m, "%14lu ", stats->bounces[bounce_contended_read]);
469 seq_lock_time(m, &stats->read_waittime);
470 seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_read]);
471 seq_lock_time(m, &stats->read_holdtime);
472 seq_puts(m, "\n");
473 }
474
475 if (stats->read_waittime.nr + stats->write_waittime.nr == 0)
476 return;
477
478 if (stats->read_holdtime.nr)
479 namelen += 2;
480
481 for (i = 0; i < LOCKSTAT_POINTS; i++) {
482 char ip[32];
483
484 if (class->contention_point[i] == 0)
485 break;
486
487 if (!i)
488 seq_line(m, '-', 40-namelen, namelen);
489
490 snprintf(ip, sizeof(ip), "[<%p>]",
491 (void *)class->contention_point[i]);
492 seq_printf(m, "%40s %14lu %29s %pS\n",
493 name, stats->contention_point[i],
494 ip, (void *)class->contention_point[i]);
495 }
496 for (i = 0; i < LOCKSTAT_POINTS; i++) {
497 char ip[32];
498
499 if (class->contending_point[i] == 0)
500 break;
501
502 if (!i)
503 seq_line(m, '-', 40-namelen, namelen);
504
505 snprintf(ip, sizeof(ip), "[<%p>]",
506 (void *)class->contending_point[i]);
507 seq_printf(m, "%40s %14lu %29s %pS\n",
508 name, stats->contending_point[i],
509 ip, (void *)class->contending_point[i]);
510 }
511 if (i) {
512 seq_puts(m, "\n");
513 seq_line(m, '.', 0, 40 + 1 + 12 * (14 + 1));
514 seq_puts(m, "\n");
515 }
516}
517
518static void seq_header(struct seq_file *m)
519{
520 seq_puts(m, "lock_stat version 0.4\n");
521
522 if (unlikely(!debug_locks))
523 seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
524
525 seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
526 seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s %14s %14s "
527 "%14s %14s\n",
528 "class name",
529 "con-bounces",
530 "contentions",
531 "waittime-min",
532 "waittime-max",
533 "waittime-total",
534 "waittime-avg",
535 "acq-bounces",
536 "acquisitions",
537 "holdtime-min",
538 "holdtime-max",
539 "holdtime-total",
540 "holdtime-avg");
541 seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
542 seq_printf(m, "\n");
543}
544
545static void *ls_start(struct seq_file *m, loff_t *pos)
546{
547 struct lock_stat_seq *data = m->private;
548 struct lock_stat_data *iter;
549
550 if (*pos == 0)
551 return SEQ_START_TOKEN;
552
553 iter = data->stats + (*pos - 1);
554 if (iter >= data->iter_end)
555 iter = NULL;
556
557 return iter;
558}
559
560static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
561{
562 (*pos)++;
563 return ls_start(m, pos);
564}
565
566static void ls_stop(struct seq_file *m, void *v)
567{
568}
569
570static int ls_show(struct seq_file *m, void *v)
571{
572 if (v == SEQ_START_TOKEN)
573 seq_header(m);
574 else
575 seq_stats(m, v);
576
577 return 0;
578}
579
580static const struct seq_operations lockstat_ops = {
581 .start = ls_start,
582 .next = ls_next,
583 .stop = ls_stop,
584 .show = ls_show,
585};
586
587static int lock_stat_open(struct inode *inode, struct file *file)
588{
589 int res;
590 struct lock_class *class;
591 struct lock_stat_seq *data = vmalloc(sizeof(struct lock_stat_seq));
592
593 if (!data)
594 return -ENOMEM;
595
596 res = seq_open(file, &lockstat_ops);
597 if (!res) {
598 struct lock_stat_data *iter = data->stats;
599 struct seq_file *m = file->private_data;
600
601 list_for_each_entry(class, &all_lock_classes, lock_entry) {
602 iter->class = class;
603 iter->stats = lock_stats(class);
604 iter++;
605 }
606 data->iter_end = iter;
607
608 sort(data->stats, data->iter_end - data->stats,
609 sizeof(struct lock_stat_data),
610 lock_stat_cmp, NULL);
611
612 m->private = data;
613 } else
614 vfree(data);
615
616 return res;
617}
618
619static ssize_t lock_stat_write(struct file *file, const char __user *buf,
620 size_t count, loff_t *ppos)
621{
622 struct lock_class *class;
623 char c;
624
625 if (count) {
626 if (get_user(c, buf))
627 return -EFAULT;
628
629 if (c != '0')
630 return count;
631
632 list_for_each_entry(class, &all_lock_classes, lock_entry)
633 clear_lock_stats(class);
634 }
635 return count;
636}
637
638static int lock_stat_release(struct inode *inode, struct file *file)
639{
640 struct seq_file *seq = file->private_data;
641
642 vfree(seq->private);
643 return seq_release(inode, file);
644}
645
646static const struct file_operations proc_lock_stat_operations = {
647 .open = lock_stat_open,
648 .write = lock_stat_write,
649 .read = seq_read,
650 .llseek = seq_lseek,
651 .release = lock_stat_release,
652};
653#endif /* CONFIG_LOCK_STAT */
654
655static int __init lockdep_proc_init(void)
656{
657 proc_create_seq("lockdep", S_IRUSR, NULL, &lockdep_ops);
658#ifdef CONFIG_PROVE_LOCKING
659 proc_create_seq("lockdep_chains", S_IRUSR, NULL, &lockdep_chains_ops);
660#endif
661 proc_create_single("lockdep_stats", S_IRUSR, NULL, lockdep_stats_show);
662#ifdef CONFIG_LOCK_STAT
663 proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL,
664 &proc_lock_stat_operations);
665#endif
666
667 return 0;
668}
669
670__initcall(lockdep_proc_init);
671
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * kernel/lockdep_proc.c
4 *
5 * Runtime locking correctness validator
6 *
7 * Started by Ingo Molnar:
8 *
9 * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
10 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
11 *
12 * Code for /proc/lockdep and /proc/lockdep_stats:
13 *
14 */
15#include <linux/export.h>
16#include <linux/proc_fs.h>
17#include <linux/seq_file.h>
18#include <linux/kallsyms.h>
19#include <linux/debug_locks.h>
20#include <linux/vmalloc.h>
21#include <linux/sort.h>
22#include <linux/uaccess.h>
23#include <asm/div64.h>
24
25#include "lockdep_internals.h"
26
27static void *l_next(struct seq_file *m, void *v, loff_t *pos)
28{
29 return seq_list_next(v, &all_lock_classes, pos);
30}
31
32static void *l_start(struct seq_file *m, loff_t *pos)
33{
34 return seq_list_start_head(&all_lock_classes, *pos);
35}
36
37static void l_stop(struct seq_file *m, void *v)
38{
39}
40
41static void print_name(struct seq_file *m, struct lock_class *class)
42{
43 char str[KSYM_NAME_LEN];
44 const char *name = class->name;
45
46 if (!name) {
47 name = __get_key_name(class->key, str);
48 seq_printf(m, "%s", name);
49 } else{
50 seq_printf(m, "%s", name);
51 if (class->name_version > 1)
52 seq_printf(m, "#%d", class->name_version);
53 if (class->subclass)
54 seq_printf(m, "/%d", class->subclass);
55 }
56}
57
58static int l_show(struct seq_file *m, void *v)
59{
60 struct lock_class *class = list_entry(v, struct lock_class, lock_entry);
61 struct lock_list *entry;
62 char usage[LOCK_USAGE_CHARS];
63
64 if (v == &all_lock_classes) {
65 seq_printf(m, "all lock classes:\n");
66 return 0;
67 }
68
69 seq_printf(m, "%p", class->key);
70#ifdef CONFIG_DEBUG_LOCKDEP
71 seq_printf(m, " OPS:%8ld", debug_class_ops_read(class));
72#endif
73 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
74 seq_printf(m, " FD:%5ld", lockdep_count_forward_deps(class));
75 seq_printf(m, " BD:%5ld", lockdep_count_backward_deps(class));
76
77 get_usage_chars(class, usage);
78 seq_printf(m, " %s", usage);
79 }
80
81 seq_printf(m, ": ");
82 print_name(m, class);
83 seq_puts(m, "\n");
84
85 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
86 list_for_each_entry(entry, &class->locks_after, entry) {
87 if (entry->distance == 1) {
88 seq_printf(m, " -> [%p] ", entry->class->key);
89 print_name(m, entry->class);
90 seq_puts(m, "\n");
91 }
92 }
93 seq_puts(m, "\n");
94 }
95
96 return 0;
97}
98
99static const struct seq_operations lockdep_ops = {
100 .start = l_start,
101 .next = l_next,
102 .stop = l_stop,
103 .show = l_show,
104};
105
106#ifdef CONFIG_PROVE_LOCKING
107static void *lc_start(struct seq_file *m, loff_t *pos)
108{
109 if (*pos < 0)
110 return NULL;
111
112 if (*pos == 0)
113 return SEQ_START_TOKEN;
114
115 return lock_chains + (*pos - 1);
116}
117
118static void *lc_next(struct seq_file *m, void *v, loff_t *pos)
119{
120 *pos = lockdep_next_lockchain(*pos - 1) + 1;
121 return lc_start(m, pos);
122}
123
124static void lc_stop(struct seq_file *m, void *v)
125{
126}
127
128static int lc_show(struct seq_file *m, void *v)
129{
130 struct lock_chain *chain = v;
131 struct lock_class *class;
132 int i;
133 static const char * const irq_strs[] = {
134 [0] = "0",
135 [LOCK_CHAIN_HARDIRQ_CONTEXT] = "hardirq",
136 [LOCK_CHAIN_SOFTIRQ_CONTEXT] = "softirq",
137 [LOCK_CHAIN_SOFTIRQ_CONTEXT|
138 LOCK_CHAIN_HARDIRQ_CONTEXT] = "hardirq|softirq",
139 };
140
141 if (v == SEQ_START_TOKEN) {
142 if (!nr_free_chain_hlocks)
143 seq_printf(m, "(buggered) ");
144 seq_printf(m, "all lock chains:\n");
145 return 0;
146 }
147
148 seq_printf(m, "irq_context: %s\n", irq_strs[chain->irq_context]);
149
150 for (i = 0; i < chain->depth; i++) {
151 class = lock_chain_get_class(chain, i);
152 if (!class->key)
153 continue;
154
155 seq_printf(m, "[%p] ", class->key);
156 print_name(m, class);
157 seq_puts(m, "\n");
158 }
159 seq_puts(m, "\n");
160
161 return 0;
162}
163
164static const struct seq_operations lockdep_chains_ops = {
165 .start = lc_start,
166 .next = lc_next,
167 .stop = lc_stop,
168 .show = lc_show,
169};
170#endif /* CONFIG_PROVE_LOCKING */
171
172static void lockdep_stats_debug_show(struct seq_file *m)
173{
174#ifdef CONFIG_DEBUG_LOCKDEP
175 unsigned long long hi1 = debug_atomic_read(hardirqs_on_events),
176 hi2 = debug_atomic_read(hardirqs_off_events),
177 hr1 = debug_atomic_read(redundant_hardirqs_on),
178 hr2 = debug_atomic_read(redundant_hardirqs_off),
179 si1 = debug_atomic_read(softirqs_on_events),
180 si2 = debug_atomic_read(softirqs_off_events),
181 sr1 = debug_atomic_read(redundant_softirqs_on),
182 sr2 = debug_atomic_read(redundant_softirqs_off);
183
184 seq_printf(m, " chain lookup misses: %11llu\n",
185 debug_atomic_read(chain_lookup_misses));
186 seq_printf(m, " chain lookup hits: %11llu\n",
187 debug_atomic_read(chain_lookup_hits));
188 seq_printf(m, " cyclic checks: %11llu\n",
189 debug_atomic_read(nr_cyclic_checks));
190 seq_printf(m, " redundant checks: %11llu\n",
191 debug_atomic_read(nr_redundant_checks));
192 seq_printf(m, " redundant links: %11llu\n",
193 debug_atomic_read(nr_redundant));
194 seq_printf(m, " find-mask forwards checks: %11llu\n",
195 debug_atomic_read(nr_find_usage_forwards_checks));
196 seq_printf(m, " find-mask backwards checks: %11llu\n",
197 debug_atomic_read(nr_find_usage_backwards_checks));
198
199 seq_printf(m, " hardirq on events: %11llu\n", hi1);
200 seq_printf(m, " hardirq off events: %11llu\n", hi2);
201 seq_printf(m, " redundant hardirq ons: %11llu\n", hr1);
202 seq_printf(m, " redundant hardirq offs: %11llu\n", hr2);
203 seq_printf(m, " softirq on events: %11llu\n", si1);
204 seq_printf(m, " softirq off events: %11llu\n", si2);
205 seq_printf(m, " redundant softirq ons: %11llu\n", sr1);
206 seq_printf(m, " redundant softirq offs: %11llu\n", sr2);
207#endif
208}
209
210static int lockdep_stats_show(struct seq_file *m, void *v)
211{
212 unsigned long nr_unused = 0, nr_uncategorized = 0,
213 nr_irq_safe = 0, nr_irq_unsafe = 0,
214 nr_softirq_safe = 0, nr_softirq_unsafe = 0,
215 nr_hardirq_safe = 0, nr_hardirq_unsafe = 0,
216 nr_irq_read_safe = 0, nr_irq_read_unsafe = 0,
217 nr_softirq_read_safe = 0, nr_softirq_read_unsafe = 0,
218 nr_hardirq_read_safe = 0, nr_hardirq_read_unsafe = 0,
219 sum_forward_deps = 0;
220
221#ifdef CONFIG_PROVE_LOCKING
222 struct lock_class *class;
223
224 list_for_each_entry(class, &all_lock_classes, lock_entry) {
225
226 if (class->usage_mask == 0)
227 nr_unused++;
228 if (class->usage_mask == LOCKF_USED)
229 nr_uncategorized++;
230 if (class->usage_mask & LOCKF_USED_IN_IRQ)
231 nr_irq_safe++;
232 if (class->usage_mask & LOCKF_ENABLED_IRQ)
233 nr_irq_unsafe++;
234 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ)
235 nr_softirq_safe++;
236 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ)
237 nr_softirq_unsafe++;
238 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ)
239 nr_hardirq_safe++;
240 if (class->usage_mask & LOCKF_ENABLED_HARDIRQ)
241 nr_hardirq_unsafe++;
242 if (class->usage_mask & LOCKF_USED_IN_IRQ_READ)
243 nr_irq_read_safe++;
244 if (class->usage_mask & LOCKF_ENABLED_IRQ_READ)
245 nr_irq_read_unsafe++;
246 if (class->usage_mask & LOCKF_USED_IN_SOFTIRQ_READ)
247 nr_softirq_read_safe++;
248 if (class->usage_mask & LOCKF_ENABLED_SOFTIRQ_READ)
249 nr_softirq_read_unsafe++;
250 if (class->usage_mask & LOCKF_USED_IN_HARDIRQ_READ)
251 nr_hardirq_read_safe++;
252 if (class->usage_mask & LOCKF_ENABLED_HARDIRQ_READ)
253 nr_hardirq_read_unsafe++;
254
255 sum_forward_deps += lockdep_count_forward_deps(class);
256 }
257#ifdef CONFIG_DEBUG_LOCKDEP
258 DEBUG_LOCKS_WARN_ON(debug_atomic_read(nr_unused_locks) != nr_unused);
259#endif
260
261#endif
262 seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
263 nr_lock_classes, MAX_LOCKDEP_KEYS);
264 seq_printf(m, " direct dependencies: %11lu [max: %lu]\n",
265 nr_list_entries, MAX_LOCKDEP_ENTRIES);
266 seq_printf(m, " indirect dependencies: %11lu\n",
267 sum_forward_deps);
268
269 /*
270 * Total number of dependencies:
271 *
272 * All irq-safe locks may nest inside irq-unsafe locks,
273 * plus all the other known dependencies:
274 */
275 seq_printf(m, " all direct dependencies: %11lu\n",
276 nr_irq_unsafe * nr_irq_safe +
277 nr_hardirq_unsafe * nr_hardirq_safe +
278 nr_list_entries);
279
280#ifdef CONFIG_PROVE_LOCKING
281 seq_printf(m, " dependency chains: %11lu [max: %lu]\n",
282 lock_chain_count(), MAX_LOCKDEP_CHAINS);
283 seq_printf(m, " dependency chain hlocks used: %11lu [max: %lu]\n",
284 MAX_LOCKDEP_CHAIN_HLOCKS -
285 (nr_free_chain_hlocks + nr_lost_chain_hlocks),
286 MAX_LOCKDEP_CHAIN_HLOCKS);
287 seq_printf(m, " dependency chain hlocks lost: %11u\n",
288 nr_lost_chain_hlocks);
289#endif
290
291#ifdef CONFIG_TRACE_IRQFLAGS
292 seq_printf(m, " in-hardirq chains: %11u\n",
293 nr_hardirq_chains);
294 seq_printf(m, " in-softirq chains: %11u\n",
295 nr_softirq_chains);
296#endif
297 seq_printf(m, " in-process chains: %11u\n",
298 nr_process_chains);
299 seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
300 nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
301#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
302 seq_printf(m, " number of stack traces: %11llu\n",
303 lockdep_stack_trace_count());
304 seq_printf(m, " number of stack hash chains: %11llu\n",
305 lockdep_stack_hash_count());
306#endif
307 seq_printf(m, " combined max dependencies: %11u\n",
308 (nr_hardirq_chains + 1) *
309 (nr_softirq_chains + 1) *
310 (nr_process_chains + 1)
311 );
312 seq_printf(m, " hardirq-safe locks: %11lu\n",
313 nr_hardirq_safe);
314 seq_printf(m, " hardirq-unsafe locks: %11lu\n",
315 nr_hardirq_unsafe);
316 seq_printf(m, " softirq-safe locks: %11lu\n",
317 nr_softirq_safe);
318 seq_printf(m, " softirq-unsafe locks: %11lu\n",
319 nr_softirq_unsafe);
320 seq_printf(m, " irq-safe locks: %11lu\n",
321 nr_irq_safe);
322 seq_printf(m, " irq-unsafe locks: %11lu\n",
323 nr_irq_unsafe);
324
325 seq_printf(m, " hardirq-read-safe locks: %11lu\n",
326 nr_hardirq_read_safe);
327 seq_printf(m, " hardirq-read-unsafe locks: %11lu\n",
328 nr_hardirq_read_unsafe);
329 seq_printf(m, " softirq-read-safe locks: %11lu\n",
330 nr_softirq_read_safe);
331 seq_printf(m, " softirq-read-unsafe locks: %11lu\n",
332 nr_softirq_read_unsafe);
333 seq_printf(m, " irq-read-safe locks: %11lu\n",
334 nr_irq_read_safe);
335 seq_printf(m, " irq-read-unsafe locks: %11lu\n",
336 nr_irq_read_unsafe);
337
338 seq_printf(m, " uncategorized locks: %11lu\n",
339 nr_uncategorized);
340 seq_printf(m, " unused locks: %11lu\n",
341 nr_unused);
342 seq_printf(m, " max locking depth: %11u\n",
343 max_lockdep_depth);
344#ifdef CONFIG_PROVE_LOCKING
345 seq_printf(m, " max bfs queue depth: %11u\n",
346 max_bfs_queue_depth);
347#endif
348 lockdep_stats_debug_show(m);
349 seq_printf(m, " debug_locks: %11u\n",
350 debug_locks);
351
352 /*
353 * Zapped classes and lockdep data buffers reuse statistics.
354 */
355 seq_puts(m, "\n");
356 seq_printf(m, " zapped classes: %11lu\n",
357 nr_zapped_classes);
358#ifdef CONFIG_PROVE_LOCKING
359 seq_printf(m, " zapped lock chains: %11lu\n",
360 nr_zapped_lock_chains);
361 seq_printf(m, " large chain blocks: %11u\n",
362 nr_large_chain_blocks);
363#endif
364 return 0;
365}
366
367#ifdef CONFIG_LOCK_STAT
368
369struct lock_stat_data {
370 struct lock_class *class;
371 struct lock_class_stats stats;
372};
373
374struct lock_stat_seq {
375 struct lock_stat_data *iter_end;
376 struct lock_stat_data stats[MAX_LOCKDEP_KEYS];
377};
378
379/*
380 * sort on absolute number of contentions
381 */
382static int lock_stat_cmp(const void *l, const void *r)
383{
384 const struct lock_stat_data *dl = l, *dr = r;
385 unsigned long nl, nr;
386
387 nl = dl->stats.read_waittime.nr + dl->stats.write_waittime.nr;
388 nr = dr->stats.read_waittime.nr + dr->stats.write_waittime.nr;
389
390 return nr - nl;
391}
392
393static void seq_line(struct seq_file *m, char c, int offset, int length)
394{
395 int i;
396
397 for (i = 0; i < offset; i++)
398 seq_puts(m, " ");
399 for (i = 0; i < length; i++)
400 seq_printf(m, "%c", c);
401 seq_puts(m, "\n");
402}
403
404static void snprint_time(char *buf, size_t bufsiz, s64 nr)
405{
406 s64 div;
407 s32 rem;
408
409 nr += 5; /* for display rounding */
410 div = div_s64_rem(nr, 1000, &rem);
411 snprintf(buf, bufsiz, "%lld.%02d", (long long)div, (int)rem/10);
412}
413
414static void seq_time(struct seq_file *m, s64 time)
415{
416 char num[15];
417
418 snprint_time(num, sizeof(num), time);
419 seq_printf(m, " %14s", num);
420}
421
422static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
423{
424 seq_printf(m, "%14lu", lt->nr);
425 seq_time(m, lt->min);
426 seq_time(m, lt->max);
427 seq_time(m, lt->total);
428 seq_time(m, lt->nr ? div64_u64(lt->total, lt->nr) : 0);
429}
430
431static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
432{
433 const struct lockdep_subclass_key *ckey;
434 struct lock_class_stats *stats;
435 struct lock_class *class;
436 const char *cname;
437 int i, namelen;
438 char name[39];
439
440 class = data->class;
441 stats = &data->stats;
442
443 namelen = 38;
444 if (class->name_version > 1)
445 namelen -= 2; /* XXX truncates versions > 9 */
446 if (class->subclass)
447 namelen -= 2;
448
449 rcu_read_lock_sched();
450 cname = rcu_dereference_sched(class->name);
451 ckey = rcu_dereference_sched(class->key);
452
453 if (!cname && !ckey) {
454 rcu_read_unlock_sched();
455 return;
456
457 } else if (!cname) {
458 char str[KSYM_NAME_LEN];
459 const char *key_name;
460
461 key_name = __get_key_name(ckey, str);
462 snprintf(name, namelen, "%s", key_name);
463 } else {
464 snprintf(name, namelen, "%s", cname);
465 }
466 rcu_read_unlock_sched();
467
468 namelen = strlen(name);
469 if (class->name_version > 1) {
470 snprintf(name+namelen, 3, "#%d", class->name_version);
471 namelen += 2;
472 }
473 if (class->subclass) {
474 snprintf(name+namelen, 3, "/%d", class->subclass);
475 namelen += 2;
476 }
477
478 if (stats->write_holdtime.nr) {
479 if (stats->read_holdtime.nr)
480 seq_printf(m, "%38s-W:", name);
481 else
482 seq_printf(m, "%40s:", name);
483
484 seq_printf(m, "%14lu ", stats->bounces[bounce_contended_write]);
485 seq_lock_time(m, &stats->write_waittime);
486 seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_write]);
487 seq_lock_time(m, &stats->write_holdtime);
488 seq_puts(m, "\n");
489 }
490
491 if (stats->read_holdtime.nr) {
492 seq_printf(m, "%38s-R:", name);
493 seq_printf(m, "%14lu ", stats->bounces[bounce_contended_read]);
494 seq_lock_time(m, &stats->read_waittime);
495 seq_printf(m, " %14lu ", stats->bounces[bounce_acquired_read]);
496 seq_lock_time(m, &stats->read_holdtime);
497 seq_puts(m, "\n");
498 }
499
500 if (stats->read_waittime.nr + stats->write_waittime.nr == 0)
501 return;
502
503 if (stats->read_holdtime.nr)
504 namelen += 2;
505
506 for (i = 0; i < LOCKSTAT_POINTS; i++) {
507 char ip[32];
508
509 if (class->contention_point[i] == 0)
510 break;
511
512 if (!i)
513 seq_line(m, '-', 40-namelen, namelen);
514
515 snprintf(ip, sizeof(ip), "[<%p>]",
516 (void *)class->contention_point[i]);
517 seq_printf(m, "%40s %14lu %29s %pS\n",
518 name, stats->contention_point[i],
519 ip, (void *)class->contention_point[i]);
520 }
521 for (i = 0; i < LOCKSTAT_POINTS; i++) {
522 char ip[32];
523
524 if (class->contending_point[i] == 0)
525 break;
526
527 if (!i)
528 seq_line(m, '-', 40-namelen, namelen);
529
530 snprintf(ip, sizeof(ip), "[<%p>]",
531 (void *)class->contending_point[i]);
532 seq_printf(m, "%40s %14lu %29s %pS\n",
533 name, stats->contending_point[i],
534 ip, (void *)class->contending_point[i]);
535 }
536 if (i) {
537 seq_puts(m, "\n");
538 seq_line(m, '.', 0, 40 + 1 + 12 * (14 + 1));
539 seq_puts(m, "\n");
540 }
541}
542
543static void seq_header(struct seq_file *m)
544{
545 seq_puts(m, "lock_stat version 0.4\n");
546
547 if (unlikely(!debug_locks))
548 seq_printf(m, "*WARNING* lock debugging disabled!! - possibly due to a lockdep warning\n");
549
550 seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
551 seq_printf(m, "%40s %14s %14s %14s %14s %14s %14s %14s %14s %14s %14s "
552 "%14s %14s\n",
553 "class name",
554 "con-bounces",
555 "contentions",
556 "waittime-min",
557 "waittime-max",
558 "waittime-total",
559 "waittime-avg",
560 "acq-bounces",
561 "acquisitions",
562 "holdtime-min",
563 "holdtime-max",
564 "holdtime-total",
565 "holdtime-avg");
566 seq_line(m, '-', 0, 40 + 1 + 12 * (14 + 1));
567 seq_printf(m, "\n");
568}
569
570static void *ls_start(struct seq_file *m, loff_t *pos)
571{
572 struct lock_stat_seq *data = m->private;
573 struct lock_stat_data *iter;
574
575 if (*pos == 0)
576 return SEQ_START_TOKEN;
577
578 iter = data->stats + (*pos - 1);
579 if (iter >= data->iter_end)
580 iter = NULL;
581
582 return iter;
583}
584
585static void *ls_next(struct seq_file *m, void *v, loff_t *pos)
586{
587 (*pos)++;
588 return ls_start(m, pos);
589}
590
591static void ls_stop(struct seq_file *m, void *v)
592{
593}
594
595static int ls_show(struct seq_file *m, void *v)
596{
597 if (v == SEQ_START_TOKEN)
598 seq_header(m);
599 else
600 seq_stats(m, v);
601
602 return 0;
603}
604
605static const struct seq_operations lockstat_ops = {
606 .start = ls_start,
607 .next = ls_next,
608 .stop = ls_stop,
609 .show = ls_show,
610};
611
612static int lock_stat_open(struct inode *inode, struct file *file)
613{
614 int res;
615 struct lock_class *class;
616 struct lock_stat_seq *data = vmalloc(sizeof(struct lock_stat_seq));
617
618 if (!data)
619 return -ENOMEM;
620
621 res = seq_open(file, &lockstat_ops);
622 if (!res) {
623 struct lock_stat_data *iter = data->stats;
624 struct seq_file *m = file->private_data;
625
626 list_for_each_entry(class, &all_lock_classes, lock_entry) {
627 iter->class = class;
628 iter->stats = lock_stats(class);
629 iter++;
630 }
631 data->iter_end = iter;
632
633 sort(data->stats, data->iter_end - data->stats,
634 sizeof(struct lock_stat_data),
635 lock_stat_cmp, NULL);
636
637 m->private = data;
638 } else
639 vfree(data);
640
641 return res;
642}
643
644static ssize_t lock_stat_write(struct file *file, const char __user *buf,
645 size_t count, loff_t *ppos)
646{
647 struct lock_class *class;
648 char c;
649
650 if (count) {
651 if (get_user(c, buf))
652 return -EFAULT;
653
654 if (c != '0')
655 return count;
656
657 list_for_each_entry(class, &all_lock_classes, lock_entry)
658 clear_lock_stats(class);
659 }
660 return count;
661}
662
663static int lock_stat_release(struct inode *inode, struct file *file)
664{
665 struct seq_file *seq = file->private_data;
666
667 vfree(seq->private);
668 return seq_release(inode, file);
669}
670
671static const struct proc_ops lock_stat_proc_ops = {
672 .proc_open = lock_stat_open,
673 .proc_write = lock_stat_write,
674 .proc_read = seq_read,
675 .proc_lseek = seq_lseek,
676 .proc_release = lock_stat_release,
677};
678#endif /* CONFIG_LOCK_STAT */
679
680static int __init lockdep_proc_init(void)
681{
682 proc_create_seq("lockdep", S_IRUSR, NULL, &lockdep_ops);
683#ifdef CONFIG_PROVE_LOCKING
684 proc_create_seq("lockdep_chains", S_IRUSR, NULL, &lockdep_chains_ops);
685#endif
686 proc_create_single("lockdep_stats", S_IRUSR, NULL, lockdep_stats_show);
687#ifdef CONFIG_LOCK_STAT
688 proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL, &lock_stat_proc_ops);
689#endif
690
691 return 0;
692}
693
694__initcall(lockdep_proc_init);
695