Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v3.1
 
  1/*
  2 * This program is free software; you can redistribute it and/or modify
  3 * it under the terms of the GNU General Public License as published by
  4 * the Free Software Foundation; either version 2 of the License, or
  5 * (at your option) any later version.
  6 *
  7 * This program is distributed in the hope that it will be useful,
  8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 10 * GNU General Public License for more details.
 11 *
 12 * You should have received a copy of the GNU General Public License
 13 * along with this program; if not, write to the Free Software
 14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 15 *
 16 * Copyright (C) IBM Corporation, 2005
 17 *               Jeff Muizelaar, 2006, 2007
 18 *               Pekka Paalanen, 2008 <pq@iki.fi>
 19 *
 20 * Derived from the read-mod example from relay-examples by Tom Zanussi.
 21 */
 22
 23#define pr_fmt(fmt) "mmiotrace: " fmt
 24
 25#define DEBUG 1
 26
 27#include <linux/module.h>
 28#include <linux/debugfs.h>
 29#include <linux/slab.h>
 30#include <linux/uaccess.h>
 31#include <linux/io.h>
 32#include <linux/version.h>
 33#include <linux/kallsyms.h>
 34#include <asm/pgtable.h>
 35#include <linux/mmiotrace.h>
 36#include <asm/e820.h> /* for ISA_START_ADDRESS */
 
 37#include <linux/atomic.h>
 38#include <linux/percpu.h>
 39#include <linux/cpu.h>
 40
 41#include "pf_in.h"
 42
 43struct trap_reason {
 44	unsigned long addr;
 45	unsigned long ip;
 46	enum reason_type type;
 47	int active_traces;
 48};
 49
 50struct remap_trace {
 51	struct list_head list;
 52	struct kmmio_probe probe;
 53	resource_size_t phys;
 54	unsigned long id;
 55};
 56
 57/* Accessed per-cpu. */
 58static DEFINE_PER_CPU(struct trap_reason, pf_reason);
 59static DEFINE_PER_CPU(struct mmiotrace_rw, cpu_trace);
 60
 61static DEFINE_MUTEX(mmiotrace_mutex);
 62static DEFINE_SPINLOCK(trace_lock);
 63static atomic_t mmiotrace_enabled;
 64static LIST_HEAD(trace_list);		/* struct remap_trace */
 65
 66/*
 67 * Locking in this file:
 68 * - mmiotrace_mutex enforces enable/disable_mmiotrace() critical sections.
 69 * - mmiotrace_enabled may be modified only when holding mmiotrace_mutex
 70 *   and trace_lock.
 71 * - Routines depending on is_enabled() must take trace_lock.
 72 * - trace_list users must hold trace_lock.
 73 * - is_enabled() guarantees that mmio_trace_{rw,mapping} are allowed.
 74 * - pre/post callbacks assume the effect of is_enabled() being true.
 75 */
 76
 77/* module parameters */
 78static unsigned long	filter_offset;
 79static int		nommiotrace;
 80static int		trace_pc;
 81
 82module_param(filter_offset, ulong, 0);
 83module_param(nommiotrace, bool, 0);
 84module_param(trace_pc, bool, 0);
 85
 86MODULE_PARM_DESC(filter_offset, "Start address of traced mappings.");
 87MODULE_PARM_DESC(nommiotrace, "Disable actual MMIO tracing.");
 88MODULE_PARM_DESC(trace_pc, "Record address of faulting instructions.");
 89
 90static bool is_enabled(void)
 91{
 92	return atomic_read(&mmiotrace_enabled);
 93}
 94
 95static void print_pte(unsigned long address)
 96{
 97	unsigned int level;
 98	pte_t *pte = lookup_address(address, &level);
 99
100	if (!pte) {
101		pr_err("Error in %s: no pte for page 0x%08lx\n",
102		       __func__, address);
103		return;
104	}
105
106	if (level == PG_LEVEL_2M) {
107		pr_emerg("4MB pages are not currently supported: 0x%08lx\n",
108			 address);
109		BUG();
110	}
111	pr_info("pte for 0x%lx: 0x%llx 0x%llx\n",
112		address,
113		(unsigned long long)pte_val(*pte),
114		(unsigned long long)pte_val(*pte) & _PAGE_PRESENT);
115}
116
117/*
118 * For some reason the pre/post pairs have been called in an
119 * unmatched order. Report and die.
120 */
121static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr)
122{
123	const struct trap_reason *my_reason = &get_cpu_var(pf_reason);
124	pr_emerg("unexpected fault for address: 0x%08lx, last fault for address: 0x%08lx\n",
125		 addr, my_reason->addr);
126	print_pte(addr);
127	print_symbol(KERN_EMERG "faulting IP is at %s\n", regs->ip);
128	print_symbol(KERN_EMERG "last faulting IP was at %s\n", my_reason->ip);
129#ifdef __i386__
130	pr_emerg("eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
131		 regs->ax, regs->bx, regs->cx, regs->dx);
132	pr_emerg("esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
133		 regs->si, regs->di, regs->bp, regs->sp);
134#else
135	pr_emerg("rax: %016lx   rcx: %016lx   rdx: %016lx\n",
136		 regs->ax, regs->cx, regs->dx);
137	pr_emerg("rsi: %016lx   rdi: %016lx   rbp: %016lx   rsp: %016lx\n",
138		 regs->si, regs->di, regs->bp, regs->sp);
139#endif
140	put_cpu_var(pf_reason);
141	BUG();
142}
143
144static void pre(struct kmmio_probe *p, struct pt_regs *regs,
145						unsigned long addr)
146{
147	struct trap_reason *my_reason = &get_cpu_var(pf_reason);
148	struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
149	const unsigned long instptr = instruction_pointer(regs);
150	const enum reason_type type = get_ins_type(instptr);
151	struct remap_trace *trace = p->private;
152
153	/* it doesn't make sense to have more than one active trace per cpu */
154	if (my_reason->active_traces)
155		die_kmmio_nesting_error(regs, addr);
156	else
157		my_reason->active_traces++;
158
159	my_reason->type = type;
160	my_reason->addr = addr;
161	my_reason->ip = instptr;
162
163	my_trace->phys = addr - trace->probe.addr + trace->phys;
164	my_trace->map_id = trace->id;
165
166	/*
167	 * Only record the program counter when requested.
168	 * It may taint clean-room reverse engineering.
169	 */
170	if (trace_pc)
171		my_trace->pc = instptr;
172	else
173		my_trace->pc = 0;
174
175	/*
176	 * XXX: the timestamp recorded will be *after* the tracing has been
177	 * done, not at the time we hit the instruction. SMP implications
178	 * on event ordering?
179	 */
180
181	switch (type) {
182	case REG_READ:
183		my_trace->opcode = MMIO_READ;
184		my_trace->width = get_ins_mem_width(instptr);
185		break;
186	case REG_WRITE:
187		my_trace->opcode = MMIO_WRITE;
188		my_trace->width = get_ins_mem_width(instptr);
189		my_trace->value = get_ins_reg_val(instptr, regs);
190		break;
191	case IMM_WRITE:
192		my_trace->opcode = MMIO_WRITE;
193		my_trace->width = get_ins_mem_width(instptr);
194		my_trace->value = get_ins_imm_val(instptr);
195		break;
196	default:
197		{
198			unsigned char *ip = (unsigned char *)instptr;
199			my_trace->opcode = MMIO_UNKNOWN_OP;
200			my_trace->width = 0;
201			my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
202								*(ip + 2);
203		}
204	}
205	put_cpu_var(cpu_trace);
206	put_cpu_var(pf_reason);
207}
208
209static void post(struct kmmio_probe *p, unsigned long condition,
210							struct pt_regs *regs)
211{
212	struct trap_reason *my_reason = &get_cpu_var(pf_reason);
213	struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
214
215	/* this should always return the active_trace count to 0 */
216	my_reason->active_traces--;
217	if (my_reason->active_traces) {
218		pr_emerg("unexpected post handler");
219		BUG();
220	}
221
222	switch (my_reason->type) {
223	case REG_READ:
224		my_trace->value = get_ins_reg_val(my_reason->ip, regs);
225		break;
226	default:
227		break;
228	}
229
230	mmio_trace_rw(my_trace);
231	put_cpu_var(cpu_trace);
232	put_cpu_var(pf_reason);
233}
234
235static void ioremap_trace_core(resource_size_t offset, unsigned long size,
236							void __iomem *addr)
237{
238	static atomic_t next_id;
239	struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
240	/* These are page-unaligned. */
241	struct mmiotrace_map map = {
242		.phys = offset,
243		.virt = (unsigned long)addr,
244		.len = size,
245		.opcode = MMIO_PROBE
246	};
247
248	if (!trace) {
249		pr_err("kmalloc failed in ioremap\n");
250		return;
251	}
252
253	*trace = (struct remap_trace) {
254		.probe = {
255			.addr = (unsigned long)addr,
256			.len = size,
257			.pre_handler = pre,
258			.post_handler = post,
259			.private = trace
260		},
261		.phys = offset,
262		.id = atomic_inc_return(&next_id)
263	};
264	map.map_id = trace->id;
265
266	spin_lock_irq(&trace_lock);
267	if (!is_enabled()) {
268		kfree(trace);
269		goto not_enabled;
270	}
271
272	mmio_trace_mapping(&map);
273	list_add_tail(&trace->list, &trace_list);
274	if (!nommiotrace)
275		register_kmmio_probe(&trace->probe);
276
277not_enabled:
278	spin_unlock_irq(&trace_lock);
279}
280
281void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
282						void __iomem *addr)
283{
284	if (!is_enabled()) /* recheck and proper locking in *_core() */
285		return;
286
287	pr_debug("ioremap_*(0x%llx, 0x%lx) = %p\n",
288		 (unsigned long long)offset, size, addr);
289	if ((filter_offset) && (offset != filter_offset))
290		return;
291	ioremap_trace_core(offset, size, addr);
292}
293
294static void iounmap_trace_core(volatile void __iomem *addr)
295{
296	struct mmiotrace_map map = {
297		.phys = 0,
298		.virt = (unsigned long)addr,
299		.len = 0,
300		.opcode = MMIO_UNPROBE
301	};
302	struct remap_trace *trace;
303	struct remap_trace *tmp;
304	struct remap_trace *found_trace = NULL;
305
306	pr_debug("Unmapping %p.\n", addr);
307
308	spin_lock_irq(&trace_lock);
309	if (!is_enabled())
310		goto not_enabled;
311
312	list_for_each_entry_safe(trace, tmp, &trace_list, list) {
313		if ((unsigned long)addr == trace->probe.addr) {
314			if (!nommiotrace)
315				unregister_kmmio_probe(&trace->probe);
316			list_del(&trace->list);
317			found_trace = trace;
318			break;
319		}
320	}
321	map.map_id = (found_trace) ? found_trace->id : -1;
322	mmio_trace_mapping(&map);
323
324not_enabled:
325	spin_unlock_irq(&trace_lock);
326	if (found_trace) {
327		synchronize_rcu(); /* unregister_kmmio_probe() requirement */
328		kfree(found_trace);
329	}
330}
331
332void mmiotrace_iounmap(volatile void __iomem *addr)
333{
334	might_sleep();
335	if (is_enabled()) /* recheck and proper locking in *_core() */
336		iounmap_trace_core(addr);
337}
338
339int mmiotrace_printk(const char *fmt, ...)
340{
341	int ret = 0;
342	va_list args;
343	unsigned long flags;
344	va_start(args, fmt);
345
346	spin_lock_irqsave(&trace_lock, flags);
347	if (is_enabled())
348		ret = mmio_trace_printk(fmt, args);
349	spin_unlock_irqrestore(&trace_lock, flags);
350
351	va_end(args);
352	return ret;
353}
354EXPORT_SYMBOL(mmiotrace_printk);
355
356static void clear_trace_list(void)
357{
358	struct remap_trace *trace;
359	struct remap_trace *tmp;
360
361	/*
362	 * No locking required, because the caller ensures we are in a
363	 * critical section via mutex, and is_enabled() is false,
364	 * i.e. nothing can traverse or modify this list.
365	 * Caller also ensures is_enabled() cannot change.
366	 */
367	list_for_each_entry(trace, &trace_list, list) {
368		pr_notice("purging non-iounmapped trace @0x%08lx, size 0x%lx.\n",
369			  trace->probe.addr, trace->probe.len);
370		if (!nommiotrace)
371			unregister_kmmio_probe(&trace->probe);
372	}
373	synchronize_rcu(); /* unregister_kmmio_probe() requirement */
374
375	list_for_each_entry_safe(trace, tmp, &trace_list, list) {
376		list_del(&trace->list);
377		kfree(trace);
378	}
379}
380
381#ifdef CONFIG_HOTPLUG_CPU
382static cpumask_var_t downed_cpus;
383
384static void enter_uniprocessor(void)
385{
386	int cpu;
387	int err;
388
389	if (downed_cpus == NULL &&
390	    !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) {
391		pr_notice("Failed to allocate mask\n");
392		goto out;
393	}
394
395	get_online_cpus();
396	cpumask_copy(downed_cpus, cpu_online_mask);
397	cpumask_clear_cpu(cpumask_first(cpu_online_mask), downed_cpus);
398	if (num_online_cpus() > 1)
399		pr_notice("Disabling non-boot CPUs...\n");
400	put_online_cpus();
401
402	for_each_cpu(cpu, downed_cpus) {
403		err = cpu_down(cpu);
404		if (!err)
405			pr_info("CPU%d is down.\n", cpu);
406		else
407			pr_err("Error taking CPU%d down: %d\n", cpu, err);
408	}
409out:
410	if (num_online_cpus() > 1)
411		pr_warning("multiple CPUs still online, may miss events.\n");
412}
413
414/* __ref because leave_uniprocessor calls cpu_up which is __cpuinit,
415   but this whole function is ifdefed CONFIG_HOTPLUG_CPU */
416static void __ref leave_uniprocessor(void)
417{
418	int cpu;
419	int err;
420
421	if (downed_cpus == NULL || cpumask_weight(downed_cpus) == 0)
422		return;
423	pr_notice("Re-enabling CPUs...\n");
424	for_each_cpu(cpu, downed_cpus) {
425		err = cpu_up(cpu);
426		if (!err)
427			pr_info("enabled CPU%d.\n", cpu);
428		else
429			pr_err("cannot re-enable CPU%d: %d\n", cpu, err);
430	}
431}
432
433#else /* !CONFIG_HOTPLUG_CPU */
434static void enter_uniprocessor(void)
435{
436	if (num_online_cpus() > 1)
437		pr_warning("multiple CPUs are online, may miss events. "
438			   "Suggest booting with maxcpus=1 kernel argument.\n");
439}
440
441static void leave_uniprocessor(void)
442{
443}
444#endif
445
446void enable_mmiotrace(void)
447{
448	mutex_lock(&mmiotrace_mutex);
449	if (is_enabled())
450		goto out;
451
452	if (nommiotrace)
453		pr_info("MMIO tracing disabled.\n");
454	kmmio_init();
455	enter_uniprocessor();
456	spin_lock_irq(&trace_lock);
457	atomic_inc(&mmiotrace_enabled);
458	spin_unlock_irq(&trace_lock);
459	pr_info("enabled.\n");
460out:
461	mutex_unlock(&mmiotrace_mutex);
462}
463
464void disable_mmiotrace(void)
465{
466	mutex_lock(&mmiotrace_mutex);
467	if (!is_enabled())
468		goto out;
469
470	spin_lock_irq(&trace_lock);
471	atomic_dec(&mmiotrace_enabled);
472	BUG_ON(is_enabled());
473	spin_unlock_irq(&trace_lock);
474
475	clear_trace_list(); /* guarantees: no more kmmio callbacks */
476	leave_uniprocessor();
477	kmmio_cleanup();
478	pr_info("disabled.\n");
479out:
480	mutex_unlock(&mmiotrace_mutex);
481}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
 
 
 
 
 
 
 
 
 
 
 
 
 
  3 *
  4 * Copyright (C) IBM Corporation, 2005
  5 *               Jeff Muizelaar, 2006, 2007
  6 *               Pekka Paalanen, 2008 <pq@iki.fi>
  7 *
  8 * Derived from the read-mod example from relay-examples by Tom Zanussi.
  9 */
 10
 11#define pr_fmt(fmt) "mmiotrace: " fmt
 12
 13#define DEBUG 1
 14
 15#include <linux/moduleparam.h>
 16#include <linux/debugfs.h>
 17#include <linux/slab.h>
 18#include <linux/uaccess.h>
 19#include <linux/io.h>
 
 
 
 20#include <linux/mmiotrace.h>
 21#include <linux/pgtable.h>
 22#include <asm/e820/api.h> /* for ISA_START_ADDRESS */
 23#include <linux/atomic.h>
 24#include <linux/percpu.h>
 25#include <linux/cpu.h>
 26
 27#include "pf_in.h"
 28
 29struct trap_reason {
 30	unsigned long addr;
 31	unsigned long ip;
 32	enum reason_type type;
 33	int active_traces;
 34};
 35
 36struct remap_trace {
 37	struct list_head list;
 38	struct kmmio_probe probe;
 39	resource_size_t phys;
 40	unsigned long id;
 41};
 42
 43/* Accessed per-cpu. */
 44static DEFINE_PER_CPU(struct trap_reason, pf_reason);
 45static DEFINE_PER_CPU(struct mmiotrace_rw, cpu_trace);
 46
 47static DEFINE_MUTEX(mmiotrace_mutex);
 48static DEFINE_SPINLOCK(trace_lock);
 49static atomic_t mmiotrace_enabled;
 50static LIST_HEAD(trace_list);		/* struct remap_trace */
 51
 52/*
 53 * Locking in this file:
 54 * - mmiotrace_mutex enforces enable/disable_mmiotrace() critical sections.
 55 * - mmiotrace_enabled may be modified only when holding mmiotrace_mutex
 56 *   and trace_lock.
 57 * - Routines depending on is_enabled() must take trace_lock.
 58 * - trace_list users must hold trace_lock.
 59 * - is_enabled() guarantees that mmio_trace_{rw,mapping} are allowed.
 60 * - pre/post callbacks assume the effect of is_enabled() being true.
 61 */
 62
 63/* module parameters */
 64static unsigned long	filter_offset;
 65static bool		nommiotrace;
 66static bool		trace_pc;
 67
 68module_param(filter_offset, ulong, 0);
 69module_param(nommiotrace, bool, 0);
 70module_param(trace_pc, bool, 0);
 71
 72MODULE_PARM_DESC(filter_offset, "Start address of traced mappings.");
 73MODULE_PARM_DESC(nommiotrace, "Disable actual MMIO tracing.");
 74MODULE_PARM_DESC(trace_pc, "Record address of faulting instructions.");
 75
 76static bool is_enabled(void)
 77{
 78	return atomic_read(&mmiotrace_enabled);
 79}
 80
 81static void print_pte(unsigned long address)
 82{
 83	unsigned int level;
 84	pte_t *pte = lookup_address(address, &level);
 85
 86	if (!pte) {
 87		pr_err("Error in %s: no pte for page 0x%08lx\n",
 88		       __func__, address);
 89		return;
 90	}
 91
 92	if (level == PG_LEVEL_2M) {
 93		pr_emerg("4MB pages are not currently supported: 0x%08lx\n",
 94			 address);
 95		BUG();
 96	}
 97	pr_info("pte for 0x%lx: 0x%llx 0x%llx\n",
 98		address,
 99		(unsigned long long)pte_val(*pte),
100		(unsigned long long)pte_val(*pte) & _PAGE_PRESENT);
101}
102
103/*
104 * For some reason the pre/post pairs have been called in an
105 * unmatched order. Report and die.
106 */
107static void die_kmmio_nesting_error(struct pt_regs *regs, unsigned long addr)
108{
109	const struct trap_reason *my_reason = &get_cpu_var(pf_reason);
110	pr_emerg("unexpected fault for address: 0x%08lx, last fault for address: 0x%08lx\n",
111		 addr, my_reason->addr);
112	print_pte(addr);
113	pr_emerg("faulting IP is at %pS\n", (void *)regs->ip);
114	pr_emerg("last faulting IP was at %pS\n", (void *)my_reason->ip);
115#ifdef __i386__
116	pr_emerg("eax: %08lx   ebx: %08lx   ecx: %08lx   edx: %08lx\n",
117		 regs->ax, regs->bx, regs->cx, regs->dx);
118	pr_emerg("esi: %08lx   edi: %08lx   ebp: %08lx   esp: %08lx\n",
119		 regs->si, regs->di, regs->bp, regs->sp);
120#else
121	pr_emerg("rax: %016lx   rcx: %016lx   rdx: %016lx\n",
122		 regs->ax, regs->cx, regs->dx);
123	pr_emerg("rsi: %016lx   rdi: %016lx   rbp: %016lx   rsp: %016lx\n",
124		 regs->si, regs->di, regs->bp, regs->sp);
125#endif
126	put_cpu_var(pf_reason);
127	BUG();
128}
129
130static void pre(struct kmmio_probe *p, struct pt_regs *regs,
131						unsigned long addr)
132{
133	struct trap_reason *my_reason = &get_cpu_var(pf_reason);
134	struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
135	const unsigned long instptr = instruction_pointer(regs);
136	const enum reason_type type = get_ins_type(instptr);
137	struct remap_trace *trace = p->private;
138
139	/* it doesn't make sense to have more than one active trace per cpu */
140	if (my_reason->active_traces)
141		die_kmmio_nesting_error(regs, addr);
142	else
143		my_reason->active_traces++;
144
145	my_reason->type = type;
146	my_reason->addr = addr;
147	my_reason->ip = instptr;
148
149	my_trace->phys = addr - trace->probe.addr + trace->phys;
150	my_trace->map_id = trace->id;
151
152	/*
153	 * Only record the program counter when requested.
154	 * It may taint clean-room reverse engineering.
155	 */
156	if (trace_pc)
157		my_trace->pc = instptr;
158	else
159		my_trace->pc = 0;
160
161	/*
162	 * XXX: the timestamp recorded will be *after* the tracing has been
163	 * done, not at the time we hit the instruction. SMP implications
164	 * on event ordering?
165	 */
166
167	switch (type) {
168	case REG_READ:
169		my_trace->opcode = MMIO_READ;
170		my_trace->width = get_ins_mem_width(instptr);
171		break;
172	case REG_WRITE:
173		my_trace->opcode = MMIO_WRITE;
174		my_trace->width = get_ins_mem_width(instptr);
175		my_trace->value = get_ins_reg_val(instptr, regs);
176		break;
177	case IMM_WRITE:
178		my_trace->opcode = MMIO_WRITE;
179		my_trace->width = get_ins_mem_width(instptr);
180		my_trace->value = get_ins_imm_val(instptr);
181		break;
182	default:
183		{
184			unsigned char *ip = (unsigned char *)instptr;
185			my_trace->opcode = MMIO_UNKNOWN_OP;
186			my_trace->width = 0;
187			my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
188								*(ip + 2);
189		}
190	}
191	put_cpu_var(cpu_trace);
192	put_cpu_var(pf_reason);
193}
194
195static void post(struct kmmio_probe *p, unsigned long condition,
196							struct pt_regs *regs)
197{
198	struct trap_reason *my_reason = &get_cpu_var(pf_reason);
199	struct mmiotrace_rw *my_trace = &get_cpu_var(cpu_trace);
200
201	/* this should always return the active_trace count to 0 */
202	my_reason->active_traces--;
203	if (my_reason->active_traces) {
204		pr_emerg("unexpected post handler");
205		BUG();
206	}
207
208	switch (my_reason->type) {
209	case REG_READ:
210		my_trace->value = get_ins_reg_val(my_reason->ip, regs);
211		break;
212	default:
213		break;
214	}
215
216	mmio_trace_rw(my_trace);
217	put_cpu_var(cpu_trace);
218	put_cpu_var(pf_reason);
219}
220
221static void ioremap_trace_core(resource_size_t offset, unsigned long size,
222							void __iomem *addr)
223{
224	static atomic_t next_id;
225	struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
226	/* These are page-unaligned. */
227	struct mmiotrace_map map = {
228		.phys = offset,
229		.virt = (unsigned long)addr,
230		.len = size,
231		.opcode = MMIO_PROBE
232	};
233
234	if (!trace) {
235		pr_err("kmalloc failed in ioremap\n");
236		return;
237	}
238
239	*trace = (struct remap_trace) {
240		.probe = {
241			.addr = (unsigned long)addr,
242			.len = size,
243			.pre_handler = pre,
244			.post_handler = post,
245			.private = trace
246		},
247		.phys = offset,
248		.id = atomic_inc_return(&next_id)
249	};
250	map.map_id = trace->id;
251
252	spin_lock_irq(&trace_lock);
253	if (!is_enabled()) {
254		kfree(trace);
255		goto not_enabled;
256	}
257
258	mmio_trace_mapping(&map);
259	list_add_tail(&trace->list, &trace_list);
260	if (!nommiotrace)
261		register_kmmio_probe(&trace->probe);
262
263not_enabled:
264	spin_unlock_irq(&trace_lock);
265}
266
267void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
268						void __iomem *addr)
269{
270	if (!is_enabled()) /* recheck and proper locking in *_core() */
271		return;
272
273	pr_debug("ioremap_*(0x%llx, 0x%lx) = %p\n",
274		 (unsigned long long)offset, size, addr);
275	if ((filter_offset) && (offset != filter_offset))
276		return;
277	ioremap_trace_core(offset, size, addr);
278}
279
280static void iounmap_trace_core(volatile void __iomem *addr)
281{
282	struct mmiotrace_map map = {
283		.phys = 0,
284		.virt = (unsigned long)addr,
285		.len = 0,
286		.opcode = MMIO_UNPROBE
287	};
288	struct remap_trace *trace;
289	struct remap_trace *tmp;
290	struct remap_trace *found_trace = NULL;
291
292	pr_debug("Unmapping %p.\n", addr);
293
294	spin_lock_irq(&trace_lock);
295	if (!is_enabled())
296		goto not_enabled;
297
298	list_for_each_entry_safe(trace, tmp, &trace_list, list) {
299		if ((unsigned long)addr == trace->probe.addr) {
300			if (!nommiotrace)
301				unregister_kmmio_probe(&trace->probe);
302			list_del(&trace->list);
303			found_trace = trace;
304			break;
305		}
306	}
307	map.map_id = (found_trace) ? found_trace->id : -1;
308	mmio_trace_mapping(&map);
309
310not_enabled:
311	spin_unlock_irq(&trace_lock);
312	if (found_trace) {
313		synchronize_rcu(); /* unregister_kmmio_probe() requirement */
314		kfree(found_trace);
315	}
316}
317
318void mmiotrace_iounmap(volatile void __iomem *addr)
319{
320	might_sleep();
321	if (is_enabled()) /* recheck and proper locking in *_core() */
322		iounmap_trace_core(addr);
323}
324
325int mmiotrace_printk(const char *fmt, ...)
326{
327	int ret = 0;
328	va_list args;
329	unsigned long flags;
330	va_start(args, fmt);
331
332	spin_lock_irqsave(&trace_lock, flags);
333	if (is_enabled())
334		ret = mmio_trace_printk(fmt, args);
335	spin_unlock_irqrestore(&trace_lock, flags);
336
337	va_end(args);
338	return ret;
339}
340EXPORT_SYMBOL(mmiotrace_printk);
341
342static void clear_trace_list(void)
343{
344	struct remap_trace *trace;
345	struct remap_trace *tmp;
346
347	/*
348	 * No locking required, because the caller ensures we are in a
349	 * critical section via mutex, and is_enabled() is false,
350	 * i.e. nothing can traverse or modify this list.
351	 * Caller also ensures is_enabled() cannot change.
352	 */
353	list_for_each_entry(trace, &trace_list, list) {
354		pr_notice("purging non-iounmapped trace @0x%08lx, size 0x%lx.\n",
355			  trace->probe.addr, trace->probe.len);
356		if (!nommiotrace)
357			unregister_kmmio_probe(&trace->probe);
358	}
359	synchronize_rcu(); /* unregister_kmmio_probe() requirement */
360
361	list_for_each_entry_safe(trace, tmp, &trace_list, list) {
362		list_del(&trace->list);
363		kfree(trace);
364	}
365}
366
367#ifdef CONFIG_HOTPLUG_CPU
368static cpumask_var_t downed_cpus;
369
370static void enter_uniprocessor(void)
371{
372	int cpu;
373	int err;
374
375	if (!cpumask_available(downed_cpus) &&
376	    !alloc_cpumask_var(&downed_cpus, GFP_KERNEL)) {
377		pr_notice("Failed to allocate mask\n");
378		goto out;
379	}
380
381	get_online_cpus();
382	cpumask_copy(downed_cpus, cpu_online_mask);
383	cpumask_clear_cpu(cpumask_first(cpu_online_mask), downed_cpus);
384	if (num_online_cpus() > 1)
385		pr_notice("Disabling non-boot CPUs...\n");
386	put_online_cpus();
387
388	for_each_cpu(cpu, downed_cpus) {
389		err = remove_cpu(cpu);
390		if (!err)
391			pr_info("CPU%d is down.\n", cpu);
392		else
393			pr_err("Error taking CPU%d down: %d\n", cpu, err);
394	}
395out:
396	if (num_online_cpus() > 1)
397		pr_warn("multiple CPUs still online, may miss events.\n");
398}
399
400static void leave_uniprocessor(void)
 
 
401{
402	int cpu;
403	int err;
404
405	if (!cpumask_available(downed_cpus) || cpumask_weight(downed_cpus) == 0)
406		return;
407	pr_notice("Re-enabling CPUs...\n");
408	for_each_cpu(cpu, downed_cpus) {
409		err = add_cpu(cpu);
410		if (!err)
411			pr_info("enabled CPU%d.\n", cpu);
412		else
413			pr_err("cannot re-enable CPU%d: %d\n", cpu, err);
414	}
415}
416
417#else /* !CONFIG_HOTPLUG_CPU */
418static void enter_uniprocessor(void)
419{
420	if (num_online_cpus() > 1)
421		pr_warn("multiple CPUs are online, may miss events. "
422			"Suggest booting with maxcpus=1 kernel argument.\n");
423}
424
425static void leave_uniprocessor(void)
426{
427}
428#endif
429
430void enable_mmiotrace(void)
431{
432	mutex_lock(&mmiotrace_mutex);
433	if (is_enabled())
434		goto out;
435
436	if (nommiotrace)
437		pr_info("MMIO tracing disabled.\n");
438	kmmio_init();
439	enter_uniprocessor();
440	spin_lock_irq(&trace_lock);
441	atomic_inc(&mmiotrace_enabled);
442	spin_unlock_irq(&trace_lock);
443	pr_info("enabled.\n");
444out:
445	mutex_unlock(&mmiotrace_mutex);
446}
447
448void disable_mmiotrace(void)
449{
450	mutex_lock(&mmiotrace_mutex);
451	if (!is_enabled())
452		goto out;
453
454	spin_lock_irq(&trace_lock);
455	atomic_dec(&mmiotrace_enabled);
456	BUG_ON(is_enabled());
457	spin_unlock_irq(&trace_lock);
458
459	clear_trace_list(); /* guarantees: no more kmmio callbacks */
460	leave_uniprocessor();
461	kmmio_cleanup();
462	pr_info("disabled.\n");
463out:
464	mutex_unlock(&mmiotrace_mutex);
465}