Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *
4 * Copyright (C) IBM Corporation, 2005
5 * Jeff Muizelaar, 2006, 2007
6 * Pekka Paalanen, 2008 <pq@iki.fi>
7 *
8 * Derived from the read-mod example from relay-examples by Tom Zanussi.
9 */
10
11#define pr_fmt(fmt) "mmiotrace: " fmt
12
13#include <linux/moduleparam.h>
14#include <linux/debugfs.h>
15#include <linux/slab.h>
16#include <linux/uaccess.h>
17#include <linux/io.h>
18#include <linux/mmiotrace.h>
19#include <linux/pgtable.h>
20#include <asm/e820/api.h> /* for ISA_START_ADDRESS */
21#include <linux/atomic.h>
22#include <linux/percpu.h>
23#include <linux/cpu.h>
24
25#include "pf_in.h"
26
27struct trap_reason {
28 unsigned long addr;
29 unsigned long ip;
30 enum reason_type type;
31 int active_traces;
32};
33
34struct remap_trace {
35 struct list_head list;
36 struct kmmio_probe probe;
37 resource_size_t phys;
38 unsigned long id;
39};
40
41/* Accessed per-cpu. */
42static DEFINE_PER_CPU(struct trap_reason, pf_reason);
43static DEFINE_PER_CPU(struct mmiotrace_rw, cpu_trace);
44
45static DEFINE_MUTEX(mmiotrace_mutex);
46static DEFINE_SPINLOCK(trace_lock);
47static atomic_t mmiotrace_enabled;
48static LIST_HEAD(trace_list); /* struct remap_trace */
49
50/*
51 * Locking in this file:
52 * - mmiotrace_mutex enforces enable/disable_mmiotrace() critical sections.
53 * - mmiotrace_enabled may be modified only when holding mmiotrace_mutex
54 * and trace_lock.
55 * - Routines depending on is_enabled() must take trace_lock.
56 * - trace_list users must hold trace_lock.
57 * - is_enabled() guarantees that mmio_trace_{rw,mapping} are allowed.
58 * - pre/post callbacks assume the effect of is_enabled() being true.
59 */
60
61/* module parameters */
62static unsigned long filter_offset;
63static bool nommiotrace;
64static bool trace_pc;
65
66module_param(filter_offset, ulong, 0);
67module_param(nommiotrace, bool, 0);
68module_param(trace_pc, bool, 0);
69
70MODULE_PARM_DESC(filter_offset, "Start address of traced mappings.");
71MODULE_PARM_DESC(nommiotrace, "Disable actual MMIO tracing.");
72MODULE_PARM_DESC(trace_pc, "Record address of faulting instructions.");
73
74static bool is_enabled(void)
75{
76 return atomic_read(&mmiotrace_enabled);
77}
78
79static void print_pte(unsigned long address)
80{
81 unsigned int level;
82 pte_t *pte = lookup_address(address, &level);
83
84 if (!pte) {
85 pr_err("Error in %s: no pte for page 0x%08lx\n",
86 __func__, address);
87 return;
88 }
89
90 if (level == PG_LEVEL_2M) {
91 pr_emerg("4MB pages are not currently supported: 0x%08lx\n",
92 address);
93 BUG();
94 }
95 pr_info("pte for 0x%lx: 0x%llx 0x%llx\n",
96 address,
97 (unsigned long long)pte_val(*pte),
98 (unsigned long long)pte_val(*pte) & _PAGE_PRESENT);
99}
100
101/*
102 * For some reason the pre/post pairs have been called in an
103 * unmatched order. Report and die.
104 */
105static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr)
106{
107 const struct trap_reason *my_reason = &get_cpu_var(pf_reason);
108 pr_emerg("unexpected fault for address: 0x%08lx, last fault for address: 0x%08lx\n",
109 addr, my_reason->addr);
110 print_pte(addr);
111 pr_emerg("faulting IP is at %pS\n", (void *)regs->ip);
112 pr_emerg("last faulting IP was at %pS\n", (void *)my_reason->ip);
113#ifdef __i386__
114 pr_emerg("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
115 regs->ax, regs->bx, regs->cx, regs->dx);
116 pr_emerg("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
117 regs->si, regs->di, regs->bp, regs->sp);
118#else
119 pr_emerg("rax: %016lx rcx: %016lx rdx: %016lx\n",
120 regs->ax, regs->cx, regs->dx);
121 pr_emerg("rsi: %016lx rdi: %016lx rbp: %016lx rsp: %016lx\n",
122 regs->si, regs->di, regs->bp, regs->sp);
123#endif
124 put_cpu_var(pf_reason);
125 BUG();
126}
127
128static void pre(struct kmmio_probe *p, struct pt_regs *regs,
129 unsigned long addr)
130{
131 struct trap_reason *my_reason = &get_cpu_var(pf_reason);
132 struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
133 const unsigned long instptr = instruction_pointer(regs);
134 const enum reason_type type = get_ins_type(instptr);
135 struct remap_trace *trace = p->private;
136
137 /* it doesn't make sense to have more than one active trace per cpu */
138 if (my_reason->active_traces)
139 die_kmmio_nesting_error(regs, addr);
140 else
141 my_reason->active_traces++;
142
143 my_reason->type = type;
144 my_reason->addr = addr;
145 my_reason->ip = instptr;
146
147 my_trace->phys = addr - trace->probe.addr + trace->phys;
148 my_trace->map_id = trace->id;
149
150 /*
151 * Only record the program counter when requested.
152 * It may taint clean-room reverse engineering.
153 */
154 if (trace_pc)
155 my_trace->pc = instptr;
156 else
157 my_trace->pc = 0;
158
159 /*
160 * XXX: the timestamp recorded will be *after* the tracing has been
161 * done, not at the time we hit the instruction. SMP implications
162 * on event ordering?
163 */
164
165 switch (type) {
166 case REG_READ:
167 my_trace->opcode = MMIO_READ;
168 my_trace->width = get_ins_mem_width(instptr);
169 break;
170 case REG_WRITE:
171 my_trace->opcode = MMIO_WRITE;
172 my_trace->width = get_ins_mem_width(instptr);
173 my_trace->value = get_ins_reg_val(instptr, regs);
174 break;
175 case IMM_WRITE:
176 my_trace->opcode = MMIO_WRITE;
177 my_trace->width = get_ins_mem_width(instptr);
178 my_trace->value = get_ins_imm_val(instptr);
179 break;
180 default:
181 {
182 unsigned char *ip = (unsigned char *)instptr;
183 my_trace->opcode = MMIO_UNKNOWN_OP;
184 my_trace->width = 0;
185 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
186 *(ip + 2);
187 }
188 }
189 put_cpu_var(cpu_trace);
190 put_cpu_var(pf_reason);
191}
192
193static void post(struct kmmio_probe *p, unsigned long condition,
194 struct pt_regs *regs)
195{
196 struct trap_reason *my_reason = &get_cpu_var(pf_reason);
197 struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
198
199 /* this should always return the active_trace count to 0 */
200 my_reason->active_traces--;
201 if (my_reason->active_traces) {
202 pr_emerg("unexpected post handler");
203 BUG();
204 }
205
206 switch (my_reason->type) {
207 case REG_READ:
208 my_trace->value = get_ins_reg_val(my_reason->ip, regs);
209 break;
210 default:
211 break;
212 }
213
214 mmio_trace_rw(my_trace);
215 put_cpu_var(cpu_trace);
216 put_cpu_var(pf_reason);
217}
218
219static void ioremap_trace_core(resource_size_t offset, unsigned long size,
220 void __iomem *addr)
221{
222 static atomic_t next_id;
223 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
224 /* These are page-unaligned. */
225 struct mmiotrace_map map = {
226 .phys = offset,
227 .virt = (unsigned long)addr,
228 .len = size,
229 .opcode = MMIO_PROBE
230 };
231
232 if (!trace) {
233 pr_err("kmalloc failed in ioremap\n");
234 return;
235 }
236
237 *trace = (struct remap_trace) {
238 .probe = {
239 .addr = (unsigned long)addr,
240 .len = size,
241 .pre_handler = pre,
242 .post_handler = post,
243 .private = trace
244 },
245 .phys = offset,
246 .id = atomic_inc_return(&next_id)
247 };
248 map.map_id = trace->id;
249
250 spin_lock_irq(&trace_lock);
251 if (!is_enabled()) {
252 kfree(trace);
253 goto not_enabled;
254 }
255
256 mmio_trace_mapping(&map);
257 list_add_tail(&trace->list, &trace_list);
258 if (!nommiotrace)
259 register_kmmio_probe(&trace->probe);
260
261not_enabled:
262 spin_unlock_irq(&trace_lock);
263}
264
265void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
266 void __iomem *addr)
267{
268 if (!is_enabled()) /* recheck and proper locking in *_core() */
269 return;
270
271 pr_debug("ioremap_*(0x%llx, 0x%lx) = %p\n",
272 (unsigned long long)offset, size, addr);
273 if ((filter_offset) && (offset != filter_offset))
274 return;
275 ioremap_trace_core(offset, size, addr);
276}
277
278static void iounmap_trace_core(volatile void __iomem *addr)
279{
280 struct mmiotrace_map map = {
281 .phys = 0,
282 .virt = (unsigned long)addr,
283 .len = 0,
284 .opcode = MMIO_UNPROBE
285 };
286 struct remap_trace *trace;
287 struct remap_trace *tmp;
288 struct remap_trace *found_trace = NULL;
289
290 pr_debug("Unmapping %p.\n", addr);
291
292 spin_lock_irq(&trace_lock);
293 if (!is_enabled())
294 goto not_enabled;
295
296 list_for_each_entry_safe(trace, tmp, &trace_list, list) {
297 if ((unsigned long)addr == trace->probe.addr) {
298 if (!nommiotrace)
299 unregister_kmmio_probe(&trace->probe);
300 list_del(&trace->list);
301 found_trace = trace;
302 break;
303 }
304 }
305 map.map_id = (found_trace) ? found_trace->id : -1;
306 mmio_trace_mapping(&map);
307
308not_enabled:
309 spin_unlock_irq(&trace_lock);
310 if (found_trace) {
311 synchronize_rcu(); /* unregister_kmmio_probe() requirement */
312 kfree(found_trace);
313 }
314}
315
316void mmiotrace_iounmap(volatile void __iomem *addr)
317{
318 might_sleep();
319 if (is_enabled()) /* recheck and proper locking in *_core() */
320 iounmap_trace_core(addr);
321}
322
323int mmiotrace_printk(const char *fmt, ...)
324{
325 int ret = 0;
326 va_list args;
327 unsigned long flags;
328 va_start(args, fmt);
329
330 spin_lock_irqsave(&trace_lock, flags);
331 if (is_enabled())
332 ret = mmio_trace_printk(fmt, args);
333 spin_unlock_irqrestore(&trace_lock, flags);
334
335 va_end(args);
336 return ret;
337}
338EXPORT_SYMBOL(mmiotrace_printk);
339
340static void clear_trace_list(void)
341{
342 struct remap_trace *trace;
343 struct remap_trace *tmp;
344
345 /*
346 * No locking required, because the caller ensures we are in a
347 * critical section via mutex, and is_enabled() is false,
348 * i.e. nothing can traverse or modify this list.
349 * Caller also ensures is_enabled() cannot change.
350 */
351 list_for_each_entry(trace, &trace_list, list) {
352 pr_notice("purging non-iounmapped trace @0x%08lx, size 0x%lx.\n",
353 trace->probe.addr, trace->probe.len);
354 if (!nommiotrace)
355 unregister_kmmio_probe(&trace->probe);
356 }
357 synchronize_rcu(); /* unregister_kmmio_probe() requirement */
358
359 list_for_each_entry_safe(trace, tmp, &trace_list, list) {
360 list_del(&trace->list);
361 kfree(trace);
362 }
363}
364
365#ifdef CONFIG_HOTPLUG_CPU
366static cpumask_var_t downed_cpus;
367
368static void enter_uniprocessor(void)
369{
370 int cpu;
371 int err;
372
373 if (!cpumask_available(downed_cpus) &&
374 !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) {
375 pr_notice("Failed to allocate mask\n");
376 goto out;
377 }
378
379 cpus_read_lock();
380 cpumask_copy(downed_cpus, cpu_online_mask);
381 cpumask_clear_cpu(cpumask_first(cpu_online_mask), downed_cpus);
382 if (num_online_cpus() > 1)
383 pr_notice("Disabling non-boot CPUs...\n");
384 cpus_read_unlock();
385
386 for_each_cpu(cpu, downed_cpus) {
387 err = remove_cpu(cpu);
388 if (!err)
389 pr_info("CPU%d is down.\n", cpu);
390 else
391 pr_err("Error taking CPU%d down: %d\n", cpu, err);
392 }
393out:
394 if (num_online_cpus() > 1)
395 pr_warn("multiple CPUs still online, may miss events.\n");
396}
397
398static void leave_uniprocessor(void)
399{
400 int cpu;
401 int err;
402
403 if (!cpumask_available(downed_cpus) || cpumask_empty(downed_cpus))
404 return;
405 pr_notice("Re-enabling CPUs...\n");
406 for_each_cpu(cpu, downed_cpus) {
407 err = add_cpu(cpu);
408 if (!err)
409 pr_info("enabled CPU%d.\n", cpu);
410 else
411 pr_err("cannot re-enable CPU%d: %d\n", cpu, err);
412 }
413}
414
415#else /* !CONFIG_HOTPLUG_CPU */
416static void enter_uniprocessor(void)
417{
418 if (num_online_cpus() > 1)
419 pr_warn("multiple CPUs are online, may miss events. "
420 "Suggest booting with maxcpus=1 kernel argument.\n");
421}
422
423static void leave_uniprocessor(void)
424{
425}
426#endif
427
428void enable_mmiotrace(void)
429{
430 mutex_lock(&mmiotrace_mutex);
431 if (is_enabled())
432 goto out;
433
434 if (nommiotrace)
435 pr_info("MMIO tracing disabled.\n");
436 kmmio_init();
437 enter_uniprocessor();
438 spin_lock_irq(&trace_lock);
439 atomic_inc(&mmiotrace_enabled);
440 spin_unlock_irq(&trace_lock);
441 pr_info("enabled.\n");
442out:
443 mutex_unlock(&mmiotrace_mutex);
444}
445
446void disable_mmiotrace(void)
447{
448 mutex_lock(&mmiotrace_mutex);
449 if (!is_enabled())
450 goto out;
451
452 spin_lock_irq(&trace_lock);
453 atomic_dec(&mmiotrace_enabled);
454 BUG_ON(is_enabled());
455 spin_unlock_irq(&trace_lock);
456
457 clear_trace_list(); /* guarantees: no more kmmio callbacks */
458 leave_uniprocessor();
459 kmmio_cleanup();
460 pr_info("disabled.\n");
461out:
462 mutex_unlock(&mmiotrace_mutex);
463}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *
4 * Copyright (C) IBM Corporation, 2005
5 * Jeff Muizelaar, 2006, 2007
6 * Pekka Paalanen, 2008 <pq@iki.fi>
7 *
8 * Derived from the read-mod example from relay-examples by Tom Zanussi.
9 */
10
11#define pr_fmt(fmt) "mmiotrace: " fmt
12
13#define DEBUG 1
14
15#include <linux/moduleparam.h>
16#include <linux/debugfs.h>
17#include <linux/slab.h>
18#include <linux/uaccess.h>
19#include <linux/io.h>
20#include <asm/pgtable.h>
21#include <linux/mmiotrace.h>
22#include <asm/e820/api.h> /* for ISA_START_ADDRESS */
23#include <linux/atomic.h>
24#include <linux/percpu.h>
25#include <linux/cpu.h>
26
27#include "pf_in.h"
28
29struct trap_reason {
30 unsigned long addr;
31 unsigned long ip;
32 enum reason_type type;
33 int active_traces;
34};
35
36struct remap_trace {
37 struct list_head list;
38 struct kmmio_probe probe;
39 resource_size_t phys;
40 unsigned long id;
41};
42
43/* Accessed per-cpu. */
44static DEFINE_PER_CPU(struct trap_reason, pf_reason);
45static DEFINE_PER_CPU(struct mmiotrace_rw, cpu_trace);
46
47static DEFINE_MUTEX(mmiotrace_mutex);
48static DEFINE_SPINLOCK(trace_lock);
49static atomic_t mmiotrace_enabled;
50static LIST_HEAD(trace_list); /* struct remap_trace */
51
52/*
53 * Locking in this file:
54 * - mmiotrace_mutex enforces enable/disable_mmiotrace() critical sections.
55 * - mmiotrace_enabled may be modified only when holding mmiotrace_mutex
56 * and trace_lock.
57 * - Routines depending on is_enabled() must take trace_lock.
58 * - trace_list users must hold trace_lock.
59 * - is_enabled() guarantees that mmio_trace_{rw,mapping} are allowed.
60 * - pre/post callbacks assume the effect of is_enabled() being true.
61 */
62
63/* module parameters */
64static unsigned long filter_offset;
65static bool nommiotrace;
66static bool trace_pc;
67
68module_param(filter_offset, ulong, 0);
69module_param(nommiotrace, bool, 0);
70module_param(trace_pc, bool, 0);
71
72MODULE_PARM_DESC(filter_offset, "Start address of traced mappings.");
73MODULE_PARM_DESC(nommiotrace, "Disable actual MMIO tracing.");
74MODULE_PARM_DESC(trace_pc, "Record address of faulting instructions.");
75
76static bool is_enabled(void)
77{
78 return atomic_read(&mmiotrace_enabled);
79}
80
81static void print_pte(unsigned long address)
82{
83 unsigned int level;
84 pte_t *pte = lookup_address(address, &level);
85
86 if (!pte) {
87 pr_err("Error in %s: no pte for page 0x%08lx\n",
88 __func__, address);
89 return;
90 }
91
92 if (level == PG_LEVEL_2M) {
93 pr_emerg("4MB pages are not currently supported: 0x%08lx\n",
94 address);
95 BUG();
96 }
97 pr_info("pte for 0x%lx: 0x%llx 0x%llx\n",
98 address,
99 (unsigned long long)pte_val(*pte),
100 (unsigned long long)pte_val(*pte) & _PAGE_PRESENT);
101}
102
103/*
104 * For some reason the pre/post pairs have been called in an
105 * unmatched order. Report and die.
106 */
107static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr)
108{
109 const struct trap_reason *my_reason = &get_cpu_var(pf_reason);
110 pr_emerg("unexpected fault for address: 0x%08lx, last fault for address: 0x%08lx\n",
111 addr, my_reason->addr);
112 print_pte(addr);
113 pr_emerg("faulting IP is at %pS\n", (void *)regs->ip);
114 pr_emerg("last faulting IP was at %pS\n", (void *)my_reason->ip);
115#ifdef __i386__
116 pr_emerg("eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
117 regs->ax, regs->bx, regs->cx, regs->dx);
118 pr_emerg("esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
119 regs->si, regs->di, regs->bp, regs->sp);
120#else
121 pr_emerg("rax: %016lx rcx: %016lx rdx: %016lx\n",
122 regs->ax, regs->cx, regs->dx);
123 pr_emerg("rsi: %016lx rdi: %016lx rbp: %016lx rsp: %016lx\n",
124 regs->si, regs->di, regs->bp, regs->sp);
125#endif
126 put_cpu_var(pf_reason);
127 BUG();
128}
129
130static void pre(struct kmmio_probe *p, struct pt_regs *regs,
131 unsigned long addr)
132{
133 struct trap_reason *my_reason = &get_cpu_var(pf_reason);
134 struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
135 const unsigned long instptr = instruction_pointer(regs);
136 const enum reason_type type = get_ins_type(instptr);
137 struct remap_trace *trace = p->private;
138
139 /* it doesn't make sense to have more than one active trace per cpu */
140 if (my_reason->active_traces)
141 die_kmmio_nesting_error(regs, addr);
142 else
143 my_reason->active_traces++;
144
145 my_reason->type = type;
146 my_reason->addr = addr;
147 my_reason->ip = instptr;
148
149 my_trace->phys = addr - trace->probe.addr + trace->phys;
150 my_trace->map_id = trace->id;
151
152 /*
153 * Only record the program counter when requested.
154 * It may taint clean-room reverse engineering.
155 */
156 if (trace_pc)
157 my_trace->pc = instptr;
158 else
159 my_trace->pc = 0;
160
161 /*
162 * XXX: the timestamp recorded will be *after* the tracing has been
163 * done, not at the time we hit the instruction. SMP implications
164 * on event ordering?
165 */
166
167 switch (type) {
168 case REG_READ:
169 my_trace->opcode = MMIO_READ;
170 my_trace->width = get_ins_mem_width(instptr);
171 break;
172 case REG_WRITE:
173 my_trace->opcode = MMIO_WRITE;
174 my_trace->width = get_ins_mem_width(instptr);
175 my_trace->value = get_ins_reg_val(instptr, regs);
176 break;
177 case IMM_WRITE:
178 my_trace->opcode = MMIO_WRITE;
179 my_trace->width = get_ins_mem_width(instptr);
180 my_trace->value = get_ins_imm_val(instptr);
181 break;
182 default:
183 {
184 unsigned char *ip = (unsigned char *)instptr;
185 my_trace->opcode = MMIO_UNKNOWN_OP;
186 my_trace->width = 0;
187 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
188 *(ip + 2);
189 }
190 }
191 put_cpu_var(cpu_trace);
192 put_cpu_var(pf_reason);
193}
194
195static void post(struct kmmio_probe *p, unsigned long condition,
196 struct pt_regs *regs)
197{
198 struct trap_reason *my_reason = &get_cpu_var(pf_reason);
199 struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
200
201 /* this should always return the active_trace count to 0 */
202 my_reason->active_traces--;
203 if (my_reason->active_traces) {
204 pr_emerg("unexpected post handler");
205 BUG();
206 }
207
208 switch (my_reason->type) {
209 case REG_READ:
210 my_trace->value = get_ins_reg_val(my_reason->ip, regs);
211 break;
212 default:
213 break;
214 }
215
216 mmio_trace_rw(my_trace);
217 put_cpu_var(cpu_trace);
218 put_cpu_var(pf_reason);
219}
220
221static void ioremap_trace_core(resource_size_t offset, unsigned long size,
222 void __iomem *addr)
223{
224 static atomic_t next_id;
225 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
226 /* These are page-unaligned. */
227 struct mmiotrace_map map = {
228 .phys = offset,
229 .virt = (unsigned long)addr,
230 .len = size,
231 .opcode = MMIO_PROBE
232 };
233
234 if (!trace) {
235 pr_err("kmalloc failed in ioremap\n");
236 return;
237 }
238
239 *trace = (struct remap_trace) {
240 .probe = {
241 .addr = (unsigned long)addr,
242 .len = size,
243 .pre_handler = pre,
244 .post_handler = post,
245 .private = trace
246 },
247 .phys = offset,
248 .id = atomic_inc_return(&next_id)
249 };
250 map.map_id = trace->id;
251
252 spin_lock_irq(&trace_lock);
253 if (!is_enabled()) {
254 kfree(trace);
255 goto not_enabled;
256 }
257
258 mmio_trace_mapping(&map);
259 list_add_tail(&trace->list, &trace_list);
260 if (!nommiotrace)
261 register_kmmio_probe(&trace->probe);
262
263not_enabled:
264 spin_unlock_irq(&trace_lock);
265}
266
267void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
268 void __iomem *addr)
269{
270 if (!is_enabled()) /* recheck and proper locking in *_core() */
271 return;
272
273 pr_debug("ioremap_*(0x%llx, 0x%lx) = %p\n",
274 (unsigned long long)offset, size, addr);
275 if ((filter_offset) && (offset != filter_offset))
276 return;
277 ioremap_trace_core(offset, size, addr);
278}
279
280static void iounmap_trace_core(volatile void __iomem *addr)
281{
282 struct mmiotrace_map map = {
283 .phys = 0,
284 .virt = (unsigned long)addr,
285 .len = 0,
286 .opcode = MMIO_UNPROBE
287 };
288 struct remap_trace *trace;
289 struct remap_trace *tmp;
290 struct remap_trace *found_trace = NULL;
291
292 pr_debug("Unmapping %p.\n", addr);
293
294 spin_lock_irq(&trace_lock);
295 if (!is_enabled())
296 goto not_enabled;
297
298 list_for_each_entry_safe(trace, tmp, &trace_list, list) {
299 if ((unsigned long)addr == trace->probe.addr) {
300 if (!nommiotrace)
301 unregister_kmmio_probe(&trace->probe);
302 list_del(&trace->list);
303 found_trace = trace;
304 break;
305 }
306 }
307 map.map_id = (found_trace) ? found_trace->id : -1;
308 mmio_trace_mapping(&map);
309
310not_enabled:
311 spin_unlock_irq(&trace_lock);
312 if (found_trace) {
313 synchronize_rcu(); /* unregister_kmmio_probe() requirement */
314 kfree(found_trace);
315 }
316}
317
318void mmiotrace_iounmap(volatile void __iomem *addr)
319{
320 might_sleep();
321 if (is_enabled()) /* recheck and proper locking in *_core() */
322 iounmap_trace_core(addr);
323}
324
325int mmiotrace_printk(const char *fmt, ...)
326{
327 int ret = 0;
328 va_list args;
329 unsigned long flags;
330 va_start(args, fmt);
331
332 spin_lock_irqsave(&trace_lock, flags);
333 if (is_enabled())
334 ret = mmio_trace_printk(fmt, args);
335 spin_unlock_irqrestore(&trace_lock, flags);
336
337 va_end(args);
338 return ret;
339}
340EXPORT_SYMBOL(mmiotrace_printk);
341
342static void clear_trace_list(void)
343{
344 struct remap_trace *trace;
345 struct remap_trace *tmp;
346
347 /*
348 * No locking required, because the caller ensures we are in a
349 * critical section via mutex, and is_enabled() is false,
350 * i.e. nothing can traverse or modify this list.
351 * Caller also ensures is_enabled() cannot change.
352 */
353 list_for_each_entry(trace, &trace_list, list) {
354 pr_notice("purging non-iounmapped trace @0x%08lx, size 0x%lx.\n",
355 trace->probe.addr, trace->probe.len);
356 if (!nommiotrace)
357 unregister_kmmio_probe(&trace->probe);
358 }
359 synchronize_rcu(); /* unregister_kmmio_probe() requirement */
360
361 list_for_each_entry_safe(trace, tmp, &trace_list, list) {
362 list_del(&trace->list);
363 kfree(trace);
364 }
365}
366
367#ifdef CONFIG_HOTPLUG_CPU
368static cpumask_var_t downed_cpus;
369
370static void enter_uniprocessor(void)
371{
372 int cpu;
373 int err;
374
375 if (downed_cpus == NULL &&
376 !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) {
377 pr_notice("Failed to allocate mask\n");
378 goto out;
379 }
380
381 get_online_cpus();
382 cpumask_copy(downed_cpus, cpu_online_mask);
383 cpumask_clear_cpu(cpumask_first(cpu_online_mask), downed_cpus);
384 if (num_online_cpus() > 1)
385 pr_notice("Disabling non-boot CPUs...\n");
386 put_online_cpus();
387
388 for_each_cpu(cpu, downed_cpus) {
389 err = cpu_down(cpu);
390 if (!err)
391 pr_info("CPU%d is down.\n", cpu);
392 else
393 pr_err("Error taking CPU%d down: %d\n", cpu, err);
394 }
395out:
396 if (num_online_cpus() > 1)
397 pr_warning("multiple CPUs still online, may miss events.\n");
398}
399
400static void leave_uniprocessor(void)
401{
402 int cpu;
403 int err;
404
405 if (downed_cpus == NULL || cpumask_weight(downed_cpus) == 0)
406 return;
407 pr_notice("Re-enabling CPUs...\n");
408 for_each_cpu(cpu, downed_cpus) {
409 err = cpu_up(cpu);
410 if (!err)
411 pr_info("enabled CPU%d.\n", cpu);
412 else
413 pr_err("cannot re-enable CPU%d: %d\n", cpu, err);
414 }
415}
416
417#else /* !CONFIG_HOTPLUG_CPU */
418static void enter_uniprocessor(void)
419{
420 if (num_online_cpus() > 1)
421 pr_warning("multiple CPUs are online, may miss events. "
422 "Suggest booting with maxcpus=1 kernel argument.\n");
423}
424
425static void leave_uniprocessor(void)
426{
427}
428#endif
429
430void enable_mmiotrace(void)
431{
432 mutex_lock(&mmiotrace_mutex);
433 if (is_enabled())
434 goto out;
435
436 if (nommiotrace)
437 pr_info("MMIO tracing disabled.\n");
438 kmmio_init();
439 enter_uniprocessor();
440 spin_lock_irq(&trace_lock);
441 atomic_inc(&mmiotrace_enabled);
442 spin_unlock_irq(&trace_lock);
443 pr_info("enabled.\n");
444out:
445 mutex_unlock(&mmiotrace_mutex);
446}
447
448void disable_mmiotrace(void)
449{
450 mutex_lock(&mmiotrace_mutex);
451 if (!is_enabled())
452 goto out;
453
454 spin_lock_irq(&trace_lock);
455 atomic_dec(&mmiotrace_enabled);
456 BUG_ON(is_enabled());
457 spin_unlock_irq(&trace_lock);
458
459 clear_trace_list(); /* guarantees: no more kmmio callbacks */
460 leave_uniprocessor();
461 kmmio_cleanup();
462 pr_info("disabled.\n");
463out:
464 mutex_unlock(&mmiotrace_mutex);
465}