Linux Audio

Check our new training course

Loading...
v3.15
 
  1/*
  2 * Memory mapped I/O tracing
  3 *
  4 * Copyright (C) 2008 Pekka Paalanen <pq@iki.fi>
  5 */
  6
  7#define DEBUG 1
  8
  9#include <linux/kernel.h>
 10#include <linux/mmiotrace.h>
 11#include <linux/pci.h>
 12#include <linux/slab.h>
 13#include <linux/time.h>
 14
 15#include <linux/atomic.h>
 16
 17#include "trace.h"
 18#include "trace_output.h"
 19
 20struct header_iter {
 21	struct pci_dev *dev;
 22};
 23
 24static struct trace_array *mmio_trace_array;
 25static bool overrun_detected;
 26static unsigned long prev_overruns;
 27static atomic_t dropped_count;
 28
 29static void mmio_reset_data(struct trace_array *tr)
 30{
 31	overrun_detected = false;
 32	prev_overruns = 0;
 33
 34	tracing_reset_online_cpus(&tr->trace_buffer);
 35}
 36
 37static int mmio_trace_init(struct trace_array *tr)
 38{
 39	pr_debug("in %s\n", __func__);
 40	mmio_trace_array = tr;
 41
 42	mmio_reset_data(tr);
 43	enable_mmiotrace();
 44	return 0;
 45}
 46
 47static void mmio_trace_reset(struct trace_array *tr)
 48{
 49	pr_debug("in %s\n", __func__);
 50
 51	disable_mmiotrace();
 52	mmio_reset_data(tr);
 53	mmio_trace_array = NULL;
 54}
 55
 56static void mmio_trace_start(struct trace_array *tr)
 57{
 58	pr_debug("in %s\n", __func__);
 59	mmio_reset_data(tr);
 60}
 61
 62static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
 63{
 64	int ret = 0;
 65	int i;
 66	resource_size_t start, end;
 67	const struct pci_driver *drv = pci_dev_driver(dev);
 68
 69	/* XXX: incomplete checks for trace_seq_printf() return value */
 70	ret += trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x",
 71				dev->bus->number, dev->devfn,
 72				dev->vendor, dev->device, dev->irq);
 73	/*
 74	 * XXX: is pci_resource_to_user() appropriate, since we are
 75	 * supposed to interpret the __ioremap() phys_addr argument based on
 76	 * these printed values?
 77	 */
 78	for (i = 0; i < 7; i++) {
 79		pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
 80		ret += trace_seq_printf(s, " %llx",
 81			(unsigned long long)(start |
 82			(dev->resource[i].flags & PCI_REGION_FLAG_MASK)));
 83	}
 84	for (i = 0; i < 7; i++) {
 85		pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
 86		ret += trace_seq_printf(s, " %llx",
 
 87			dev->resource[i].start < dev->resource[i].end ?
 88			(unsigned long long)(end - start) + 1 : 0);
 89	}
 90	if (drv)
 91		ret += trace_seq_printf(s, " %s\n", drv->name);
 92	else
 93		ret += trace_seq_puts(s, " \n");
 94	return ret;
 95}
 96
 97static void destroy_header_iter(struct header_iter *hiter)
 98{
 99	if (!hiter)
100		return;
101	pci_dev_put(hiter->dev);
102	kfree(hiter);
103}
104
105static void mmio_pipe_open(struct trace_iterator *iter)
106{
107	struct header_iter *hiter;
108	struct trace_seq *s = &iter->seq;
109
110	trace_seq_puts(s, "VERSION 20070824\n");
111
112	hiter = kzalloc(sizeof(*hiter), GFP_KERNEL);
113	if (!hiter)
114		return;
115
116	hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
117	iter->private = hiter;
118}
119
120/* XXX: This is not called when the pipe is closed! */
121static void mmio_close(struct trace_iterator *iter)
122{
123	struct header_iter *hiter = iter->private;
124	destroy_header_iter(hiter);
125	iter->private = NULL;
126}
127
128static unsigned long count_overruns(struct trace_iterator *iter)
129{
130	unsigned long cnt = atomic_xchg(&dropped_count, 0);
131	unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
132
133	if (over > prev_overruns)
134		cnt += over - prev_overruns;
135	prev_overruns = over;
136	return cnt;
137}
138
139static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp,
140				char __user *ubuf, size_t cnt, loff_t *ppos)
141{
142	ssize_t ret;
143	struct header_iter *hiter = iter->private;
144	struct trace_seq *s = &iter->seq;
145	unsigned long n;
146
147	n = count_overruns(iter);
148	if (n) {
149		/* XXX: This is later than where events were lost. */
150		trace_seq_printf(s, "MARK 0.000000 Lost %lu events.\n", n);
151		if (!overrun_detected)
152			pr_warning("mmiotrace has lost events.\n");
153		overrun_detected = true;
154		goto print_out;
155	}
156
157	if (!hiter)
158		return 0;
159
160	mmio_print_pcidev(s, hiter->dev);
161	hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, hiter->dev);
162
163	if (!hiter->dev) {
164		destroy_header_iter(hiter);
165		iter->private = NULL;
166	}
167
168print_out:
169	ret = trace_seq_to_user(s, ubuf, cnt);
170	return (ret == -EBUSY) ? 0 : ret;
171}
172
173static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
174{
175	struct trace_entry *entry = iter->ent;
176	struct trace_mmiotrace_rw *field;
177	struct mmiotrace_rw *rw;
178	struct trace_seq *s	= &iter->seq;
179	unsigned long long t	= ns2usecs(iter->ts);
180	unsigned long usec_rem	= do_div(t, USEC_PER_SEC);
181	unsigned secs		= (unsigned long)t;
182	int ret = 1;
183
184	trace_assign_type(field, entry);
185	rw = &field->rw;
186
187	switch (rw->opcode) {
188	case MMIO_READ:
189		ret = trace_seq_printf(s,
190			"R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
191			rw->width, secs, usec_rem, rw->map_id,
192			(unsigned long long)rw->phys,
193			rw->value, rw->pc, 0);
194		break;
195	case MMIO_WRITE:
196		ret = trace_seq_printf(s,
197			"W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
198			rw->width, secs, usec_rem, rw->map_id,
199			(unsigned long long)rw->phys,
200			rw->value, rw->pc, 0);
201		break;
202	case MMIO_UNKNOWN_OP:
203		ret = trace_seq_printf(s,
204			"UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx,"
205			"%02lx 0x%lx %d\n",
206			secs, usec_rem, rw->map_id,
207			(unsigned long long)rw->phys,
208			(rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff,
209			(rw->value >> 0) & 0xff, rw->pc, 0);
210		break;
211	default:
212		ret = trace_seq_puts(s, "rw what?\n");
213		break;
214	}
215	if (ret)
216		return TRACE_TYPE_HANDLED;
217	return TRACE_TYPE_PARTIAL_LINE;
218}
219
220static enum print_line_t mmio_print_map(struct trace_iterator *iter)
221{
222	struct trace_entry *entry = iter->ent;
223	struct trace_mmiotrace_map *field;
224	struct mmiotrace_map *m;
225	struct trace_seq *s	= &iter->seq;
226	unsigned long long t	= ns2usecs(iter->ts);
227	unsigned long usec_rem	= do_div(t, USEC_PER_SEC);
228	unsigned secs		= (unsigned long)t;
229	int ret;
230
231	trace_assign_type(field, entry);
232	m = &field->map;
233
234	switch (m->opcode) {
235	case MMIO_PROBE:
236		ret = trace_seq_printf(s,
237			"MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
238			secs, usec_rem, m->map_id,
239			(unsigned long long)m->phys, m->virt, m->len,
240			0UL, 0);
241		break;
242	case MMIO_UNPROBE:
243		ret = trace_seq_printf(s,
244			"UNMAP %u.%06lu %d 0x%lx %d\n",
245			secs, usec_rem, m->map_id, 0UL, 0);
246		break;
247	default:
248		ret = trace_seq_puts(s, "map what?\n");
249		break;
250	}
251	if (ret)
252		return TRACE_TYPE_HANDLED;
253	return TRACE_TYPE_PARTIAL_LINE;
254}
255
256static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
257{
258	struct trace_entry *entry = iter->ent;
259	struct print_entry *print = (struct print_entry *)entry;
260	const char *msg		= print->buf;
261	struct trace_seq *s	= &iter->seq;
262	unsigned long long t	= ns2usecs(iter->ts);
263	unsigned long usec_rem	= do_div(t, USEC_PER_SEC);
264	unsigned secs		= (unsigned long)t;
265	int ret;
266
267	/* The trailing newline must be in the message. */
268	ret = trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg);
269	if (!ret)
270		return TRACE_TYPE_PARTIAL_LINE;
271
272	return TRACE_TYPE_HANDLED;
273}
274
275static enum print_line_t mmio_print_line(struct trace_iterator *iter)
276{
277	switch (iter->ent->type) {
278	case TRACE_MMIO_RW:
279		return mmio_print_rw(iter);
280	case TRACE_MMIO_MAP:
281		return mmio_print_map(iter);
282	case TRACE_PRINT:
283		return mmio_print_mark(iter);
284	default:
285		return TRACE_TYPE_HANDLED; /* ignore unknown entries */
286	}
287}
288
289static struct tracer mmio_tracer __read_mostly =
290{
291	.name		= "mmiotrace",
292	.init		= mmio_trace_init,
293	.reset		= mmio_trace_reset,
294	.start		= mmio_trace_start,
295	.pipe_open	= mmio_pipe_open,
296	.close		= mmio_close,
297	.read		= mmio_read,
298	.print_line	= mmio_print_line,
 
299};
300
301__init static int init_mmio_trace(void)
302{
303	return register_tracer(&mmio_tracer);
304}
305device_initcall(init_mmio_trace);
306
307static void __trace_mmiotrace_rw(struct trace_array *tr,
308				struct trace_array_cpu *data,
309				struct mmiotrace_rw *rw)
310{
311	struct ftrace_event_call *call = &event_mmiotrace_rw;
312	struct ring_buffer *buffer = tr->trace_buffer.buffer;
313	struct ring_buffer_event *event;
314	struct trace_mmiotrace_rw *entry;
315	int pc = preempt_count();
316
317	event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
318					  sizeof(*entry), 0, pc);
319	if (!event) {
320		atomic_inc(&dropped_count);
321		return;
322	}
323	entry	= ring_buffer_event_data(event);
324	entry->rw			= *rw;
325
326	if (!call_filter_check_discard(call, entry, buffer, event))
327		trace_buffer_unlock_commit(buffer, event, 0, pc);
328}
329
330void mmio_trace_rw(struct mmiotrace_rw *rw)
331{
332	struct trace_array *tr = mmio_trace_array;
333	struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
334	__trace_mmiotrace_rw(tr, data, rw);
335}
336
337static void __trace_mmiotrace_map(struct trace_array *tr,
338				struct trace_array_cpu *data,
339				struct mmiotrace_map *map)
340{
341	struct ftrace_event_call *call = &event_mmiotrace_map;
342	struct ring_buffer *buffer = tr->trace_buffer.buffer;
343	struct ring_buffer_event *event;
344	struct trace_mmiotrace_map *entry;
345	int pc = preempt_count();
346
347	event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
348					  sizeof(*entry), 0, pc);
349	if (!event) {
350		atomic_inc(&dropped_count);
351		return;
352	}
353	entry	= ring_buffer_event_data(event);
354	entry->map			= *map;
355
356	if (!call_filter_check_discard(call, entry, buffer, event))
357		trace_buffer_unlock_commit(buffer, event, 0, pc);
358}
359
360void mmio_trace_mapping(struct mmiotrace_map *map)
361{
362	struct trace_array *tr = mmio_trace_array;
363	struct trace_array_cpu *data;
364
365	preempt_disable();
366	data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
367	__trace_mmiotrace_map(tr, data, map);
368	preempt_enable();
369}
370
371int mmio_trace_printk(const char *fmt, va_list args)
372{
373	return trace_vprintk(0, fmt, args);
374}
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Memory mapped I/O tracing
  4 *
  5 * Copyright (C) 2008 Pekka Paalanen <pq@iki.fi>
  6 */
  7
  8#define DEBUG 1
  9
 10#include <linux/kernel.h>
 11#include <linux/mmiotrace.h>
 12#include <linux/pci.h>
 13#include <linux/slab.h>
 14#include <linux/time.h>
 15
 16#include <linux/atomic.h>
 17
 18#include "trace.h"
 19#include "trace_output.h"
 20
 21struct header_iter {
 22	struct pci_dev *dev;
 23};
 24
 25static struct trace_array *mmio_trace_array;
 26static bool overrun_detected;
 27static unsigned long prev_overruns;
 28static atomic_t dropped_count;
 29
 30static void mmio_reset_data(struct trace_array *tr)
 31{
 32	overrun_detected = false;
 33	prev_overruns = 0;
 34
 35	tracing_reset_online_cpus(&tr->trace_buffer);
 36}
 37
 38static int mmio_trace_init(struct trace_array *tr)
 39{
 40	pr_debug("in %s\n", __func__);
 41	mmio_trace_array = tr;
 42
 43	mmio_reset_data(tr);
 44	enable_mmiotrace();
 45	return 0;
 46}
 47
 48static void mmio_trace_reset(struct trace_array *tr)
 49{
 50	pr_debug("in %s\n", __func__);
 51
 52	disable_mmiotrace();
 53	mmio_reset_data(tr);
 54	mmio_trace_array = NULL;
 55}
 56
 57static void mmio_trace_start(struct trace_array *tr)
 58{
 59	pr_debug("in %s\n", __func__);
 60	mmio_reset_data(tr);
 61}
 62
 63static void mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
 64{
 
 65	int i;
 66	resource_size_t start, end;
 67	const struct pci_driver *drv = pci_dev_driver(dev);
 68
 69	trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x",
 70			 dev->bus->number, dev->devfn,
 71			 dev->vendor, dev->device, dev->irq);
 
 
 
 
 
 
 72	for (i = 0; i < 7; i++) {
 73		start = dev->resource[i].start;
 74		trace_seq_printf(s, " %llx",
 75			(unsigned long long)(start |
 76			(dev->resource[i].flags & PCI_REGION_FLAG_MASK)));
 77	}
 78	for (i = 0; i < 7; i++) {
 79		start = dev->resource[i].start;
 80		end = dev->resource[i].end;
 81		trace_seq_printf(s, " %llx",
 82			dev->resource[i].start < dev->resource[i].end ?
 83			(unsigned long long)(end - start) + 1 : 0);
 84	}
 85	if (drv)
 86		trace_seq_printf(s, " %s\n", drv->name);
 87	else
 88		trace_seq_puts(s, " \n");
 
 89}
 90
 91static void destroy_header_iter(struct header_iter *hiter)
 92{
 93	if (!hiter)
 94		return;
 95	pci_dev_put(hiter->dev);
 96	kfree(hiter);
 97}
 98
 99static void mmio_pipe_open(struct trace_iterator *iter)
100{
101	struct header_iter *hiter;
102	struct trace_seq *s = &iter->seq;
103
104	trace_seq_puts(s, "VERSION 20070824\n");
105
106	hiter = kzalloc(sizeof(*hiter), GFP_KERNEL);
107	if (!hiter)
108		return;
109
110	hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
111	iter->private = hiter;
112}
113
114/* XXX: This is not called when the pipe is closed! */
115static void mmio_close(struct trace_iterator *iter)
116{
117	struct header_iter *hiter = iter->private;
118	destroy_header_iter(hiter);
119	iter->private = NULL;
120}
121
122static unsigned long count_overruns(struct trace_iterator *iter)
123{
124	unsigned long cnt = atomic_xchg(&dropped_count, 0);
125	unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
126
127	if (over > prev_overruns)
128		cnt += over - prev_overruns;
129	prev_overruns = over;
130	return cnt;
131}
132
133static ssize_t mmio_read(struct trace_iterator *iter, struct file *filp,
134				char __user *ubuf, size_t cnt, loff_t *ppos)
135{
136	ssize_t ret;
137	struct header_iter *hiter = iter->private;
138	struct trace_seq *s = &iter->seq;
139	unsigned long n;
140
141	n = count_overruns(iter);
142	if (n) {
143		/* XXX: This is later than where events were lost. */
144		trace_seq_printf(s, "MARK 0.000000 Lost %lu events.\n", n);
145		if (!overrun_detected)
146			pr_warn("mmiotrace has lost events\n");
147		overrun_detected = true;
148		goto print_out;
149	}
150
151	if (!hiter)
152		return 0;
153
154	mmio_print_pcidev(s, hiter->dev);
155	hiter->dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, hiter->dev);
156
157	if (!hiter->dev) {
158		destroy_header_iter(hiter);
159		iter->private = NULL;
160	}
161
162print_out:
163	ret = trace_seq_to_user(s, ubuf, cnt);
164	return (ret == -EBUSY) ? 0 : ret;
165}
166
167static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
168{
169	struct trace_entry *entry = iter->ent;
170	struct trace_mmiotrace_rw *field;
171	struct mmiotrace_rw *rw;
172	struct trace_seq *s	= &iter->seq;
173	unsigned long long t	= ns2usecs(iter->ts);
174	unsigned long usec_rem	= do_div(t, USEC_PER_SEC);
175	unsigned secs		= (unsigned long)t;
 
176
177	trace_assign_type(field, entry);
178	rw = &field->rw;
179
180	switch (rw->opcode) {
181	case MMIO_READ:
182		trace_seq_printf(s,
183			"R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
184			rw->width, secs, usec_rem, rw->map_id,
185			(unsigned long long)rw->phys,
186			rw->value, rw->pc, 0);
187		break;
188	case MMIO_WRITE:
189		trace_seq_printf(s,
190			"W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
191			rw->width, secs, usec_rem, rw->map_id,
192			(unsigned long long)rw->phys,
193			rw->value, rw->pc, 0);
194		break;
195	case MMIO_UNKNOWN_OP:
196		trace_seq_printf(s,
197			"UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx,"
198			"%02lx 0x%lx %d\n",
199			secs, usec_rem, rw->map_id,
200			(unsigned long long)rw->phys,
201			(rw->value >> 16) & 0xff, (rw->value >> 8) & 0xff,
202			(rw->value >> 0) & 0xff, rw->pc, 0);
203		break;
204	default:
205		trace_seq_puts(s, "rw what?\n");
206		break;
207	}
208
209	return trace_handle_return(s);
 
210}
211
212static enum print_line_t mmio_print_map(struct trace_iterator *iter)
213{
214	struct trace_entry *entry = iter->ent;
215	struct trace_mmiotrace_map *field;
216	struct mmiotrace_map *m;
217	struct trace_seq *s	= &iter->seq;
218	unsigned long long t	= ns2usecs(iter->ts);
219	unsigned long usec_rem	= do_div(t, USEC_PER_SEC);
220	unsigned secs		= (unsigned long)t;
 
221
222	trace_assign_type(field, entry);
223	m = &field->map;
224
225	switch (m->opcode) {
226	case MMIO_PROBE:
227		trace_seq_printf(s,
228			"MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
229			secs, usec_rem, m->map_id,
230			(unsigned long long)m->phys, m->virt, m->len,
231			0UL, 0);
232		break;
233	case MMIO_UNPROBE:
234		trace_seq_printf(s,
235			"UNMAP %u.%06lu %d 0x%lx %d\n",
236			secs, usec_rem, m->map_id, 0UL, 0);
237		break;
238	default:
239		trace_seq_puts(s, "map what?\n");
240		break;
241	}
242
243	return trace_handle_return(s);
 
244}
245
246static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
247{
248	struct trace_entry *entry = iter->ent;
249	struct print_entry *print = (struct print_entry *)entry;
250	const char *msg		= print->buf;
251	struct trace_seq *s	= &iter->seq;
252	unsigned long long t	= ns2usecs(iter->ts);
253	unsigned long usec_rem	= do_div(t, USEC_PER_SEC);
254	unsigned secs		= (unsigned long)t;
 
255
256	/* The trailing newline must be in the message. */
257	trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg);
 
 
258
259	return trace_handle_return(s);
260}
261
262static enum print_line_t mmio_print_line(struct trace_iterator *iter)
263{
264	switch (iter->ent->type) {
265	case TRACE_MMIO_RW:
266		return mmio_print_rw(iter);
267	case TRACE_MMIO_MAP:
268		return mmio_print_map(iter);
269	case TRACE_PRINT:
270		return mmio_print_mark(iter);
271	default:
272		return TRACE_TYPE_HANDLED; /* ignore unknown entries */
273	}
274}
275
276static struct tracer mmio_tracer __read_mostly =
277{
278	.name		= "mmiotrace",
279	.init		= mmio_trace_init,
280	.reset		= mmio_trace_reset,
281	.start		= mmio_trace_start,
282	.pipe_open	= mmio_pipe_open,
283	.close		= mmio_close,
284	.read		= mmio_read,
285	.print_line	= mmio_print_line,
286	.noboot		= true,
287};
288
289__init static int init_mmio_trace(void)
290{
291	return register_tracer(&mmio_tracer);
292}
293device_initcall(init_mmio_trace);
294
295static void __trace_mmiotrace_rw(struct trace_array *tr,
296				struct trace_array_cpu *data,
297				struct mmiotrace_rw *rw)
298{
299	struct trace_event_call *call = &event_mmiotrace_rw;
300	struct ring_buffer *buffer = tr->trace_buffer.buffer;
301	struct ring_buffer_event *event;
302	struct trace_mmiotrace_rw *entry;
303	int pc = preempt_count();
304
305	event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
306					  sizeof(*entry), 0, pc);
307	if (!event) {
308		atomic_inc(&dropped_count);
309		return;
310	}
311	entry	= ring_buffer_event_data(event);
312	entry->rw			= *rw;
313
314	if (!call_filter_check_discard(call, entry, buffer, event))
315		trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
316}
317
318void mmio_trace_rw(struct mmiotrace_rw *rw)
319{
320	struct trace_array *tr = mmio_trace_array;
321	struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
322	__trace_mmiotrace_rw(tr, data, rw);
323}
324
325static void __trace_mmiotrace_map(struct trace_array *tr,
326				struct trace_array_cpu *data,
327				struct mmiotrace_map *map)
328{
329	struct trace_event_call *call = &event_mmiotrace_map;
330	struct ring_buffer *buffer = tr->trace_buffer.buffer;
331	struct ring_buffer_event *event;
332	struct trace_mmiotrace_map *entry;
333	int pc = preempt_count();
334
335	event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
336					  sizeof(*entry), 0, pc);
337	if (!event) {
338		atomic_inc(&dropped_count);
339		return;
340	}
341	entry	= ring_buffer_event_data(event);
342	entry->map			= *map;
343
344	if (!call_filter_check_discard(call, entry, buffer, event))
345		trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
346}
347
348void mmio_trace_mapping(struct mmiotrace_map *map)
349{
350	struct trace_array *tr = mmio_trace_array;
351	struct trace_array_cpu *data;
352
353	preempt_disable();
354	data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
355	__trace_mmiotrace_map(tr, data, map);
356	preempt_enable();
357}
358
359int mmio_trace_printk(const char *fmt, va_list args)
360{
361	return trace_vprintk(0, fmt, args);
362}