Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Intel IOMMU trace support
4 *
5 * Copyright (C) 2019 Intel Corporation
6 *
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
8 */
9#ifdef CONFIG_INTEL_IOMMU
10#undef TRACE_SYSTEM
11#define TRACE_SYSTEM intel_iommu
12
13#if !defined(_TRACE_INTEL_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ)
14#define _TRACE_INTEL_IOMMU_H
15
16#include <linux/tracepoint.h>
17#include <linux/intel-iommu.h>
18
19DECLARE_EVENT_CLASS(dma_map,
20 TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
21 size_t size),
22
23 TP_ARGS(dev, dev_addr, phys_addr, size),
24
25 TP_STRUCT__entry(
26 __string(dev_name, dev_name(dev))
27 __field(dma_addr_t, dev_addr)
28 __field(phys_addr_t, phys_addr)
29 __field(size_t, size)
30 ),
31
32 TP_fast_assign(
33 __assign_str(dev_name, dev_name(dev));
34 __entry->dev_addr = dev_addr;
35 __entry->phys_addr = phys_addr;
36 __entry->size = size;
37 ),
38
39 TP_printk("dev=%s dev_addr=0x%llx phys_addr=0x%llx size=%zu",
40 __get_str(dev_name),
41 (unsigned long long)__entry->dev_addr,
42 (unsigned long long)__entry->phys_addr,
43 __entry->size)
44);
45
46DEFINE_EVENT(dma_map, map_single,
47 TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
48 size_t size),
49 TP_ARGS(dev, dev_addr, phys_addr, size)
50);
51
52DEFINE_EVENT(dma_map, map_sg,
53 TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
54 size_t size),
55 TP_ARGS(dev, dev_addr, phys_addr, size)
56);
57
58DEFINE_EVENT(dma_map, bounce_map_single,
59 TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
60 size_t size),
61 TP_ARGS(dev, dev_addr, phys_addr, size)
62);
63
64DECLARE_EVENT_CLASS(dma_unmap,
65 TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
66
67 TP_ARGS(dev, dev_addr, size),
68
69 TP_STRUCT__entry(
70 __string(dev_name, dev_name(dev))
71 __field(dma_addr_t, dev_addr)
72 __field(size_t, size)
73 ),
74
75 TP_fast_assign(
76 __assign_str(dev_name, dev_name(dev));
77 __entry->dev_addr = dev_addr;
78 __entry->size = size;
79 ),
80
81 TP_printk("dev=%s dev_addr=0x%llx size=%zu",
82 __get_str(dev_name),
83 (unsigned long long)__entry->dev_addr,
84 __entry->size)
85);
86
87DEFINE_EVENT(dma_unmap, unmap_single,
88 TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
89 TP_ARGS(dev, dev_addr, size)
90);
91
92DEFINE_EVENT(dma_unmap, unmap_sg,
93 TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
94 TP_ARGS(dev, dev_addr, size)
95);
96
97DEFINE_EVENT(dma_unmap, bounce_unmap_single,
98 TP_PROTO(struct device *dev, dma_addr_t dev_addr, size_t size),
99 TP_ARGS(dev, dev_addr, size)
100);
101
102#endif /* _TRACE_INTEL_IOMMU_H */
103
104/* This part must be outside protection */
105#include <trace/define_trace.h>
106#endif /* CONFIG_INTEL_IOMMU */
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Intel IOMMU trace support
4 *
5 * Copyright (C) 2019 Intel Corporation
6 *
7 * Author: Lu Baolu <baolu.lu@linux.intel.com>
8 */
9#undef TRACE_SYSTEM
10#define TRACE_SYSTEM intel_iommu
11
12#if !defined(_TRACE_INTEL_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ)
13#define _TRACE_INTEL_IOMMU_H
14
15#include <linux/tracepoint.h>
16#include <linux/intel-iommu.h>
17
18#define MSG_MAX 256
19
20TRACE_EVENT(qi_submit,
21 TP_PROTO(struct intel_iommu *iommu, u64 qw0, u64 qw1, u64 qw2, u64 qw3),
22
23 TP_ARGS(iommu, qw0, qw1, qw2, qw3),
24
25 TP_STRUCT__entry(
26 __field(u64, qw0)
27 __field(u64, qw1)
28 __field(u64, qw2)
29 __field(u64, qw3)
30 __string(iommu, iommu->name)
31 ),
32
33 TP_fast_assign(
34 __assign_str(iommu, iommu->name);
35 __entry->qw0 = qw0;
36 __entry->qw1 = qw1;
37 __entry->qw2 = qw2;
38 __entry->qw3 = qw3;
39 ),
40
41 TP_printk("%s %s: 0x%llx 0x%llx 0x%llx 0x%llx",
42 __print_symbolic(__entry->qw0 & 0xf,
43 { QI_CC_TYPE, "cc_inv" },
44 { QI_IOTLB_TYPE, "iotlb_inv" },
45 { QI_DIOTLB_TYPE, "dev_tlb_inv" },
46 { QI_IEC_TYPE, "iec_inv" },
47 { QI_IWD_TYPE, "inv_wait" },
48 { QI_EIOTLB_TYPE, "p_iotlb_inv" },
49 { QI_PC_TYPE, "pc_inv" },
50 { QI_DEIOTLB_TYPE, "p_dev_tlb_inv" },
51 { QI_PGRP_RESP_TYPE, "page_grp_resp" }),
52 __get_str(iommu),
53 __entry->qw0, __entry->qw1, __entry->qw2, __entry->qw3
54 )
55);
56
57TRACE_EVENT(prq_report,
58 TP_PROTO(struct intel_iommu *iommu, struct device *dev,
59 u64 dw0, u64 dw1, u64 dw2, u64 dw3,
60 unsigned long seq),
61
62 TP_ARGS(iommu, dev, dw0, dw1, dw2, dw3, seq),
63
64 TP_STRUCT__entry(
65 __field(u64, dw0)
66 __field(u64, dw1)
67 __field(u64, dw2)
68 __field(u64, dw3)
69 __field(unsigned long, seq)
70 __string(iommu, iommu->name)
71 __string(dev, dev_name(dev))
72 __dynamic_array(char, buff, MSG_MAX)
73 ),
74
75 TP_fast_assign(
76 __entry->dw0 = dw0;
77 __entry->dw1 = dw1;
78 __entry->dw2 = dw2;
79 __entry->dw3 = dw3;
80 __entry->seq = seq;
81 __assign_str(iommu, iommu->name);
82 __assign_str(dev, dev_name(dev));
83 ),
84
85 TP_printk("%s/%s seq# %ld: %s",
86 __get_str(iommu), __get_str(dev), __entry->seq,
87 decode_prq_descriptor(__get_str(buff), MSG_MAX, __entry->dw0,
88 __entry->dw1, __entry->dw2, __entry->dw3)
89 )
90);
91#endif /* _TRACE_INTEL_IOMMU_H */
92
93/* This part must be outside protection */
94#include <trace/define_trace.h>